code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
if [ "$GHCVER" = "ghcjs" ]; then phantomjs --web-security=no jsbits/options.js result/bin/ghcjs-tests.jsexe/index.html; fi
dmjio/hackernews
build/phantomjs.sh
Shell
mit
125
#!/bin/sh prog=deczky3_socp_test.m depends="deczky3_socp_test.m \ test_common.m print_polynomial.m print_pole_zero.m \ armijo_kim.m \ fixResultNaN.m iirA.m iirE.m iirT.m iirP.m local_max.m iir_slb.m \ iir_socp_mmse.m iir_slb_exchange_constraints.m iir_slb_constraints_are_empty.m \ iir_slb_set_empty_constraints.m iir_slb_show_constraints.m \ iir_slb_update_constraints.m xConstraints.m showResponseBands.m \ showResponse.m showResponsePassBands.m showZPplot.m x2tf.m" tmp=/tmp/$$ here=`pwd` if [ $? -ne 0 ]; then echo "Failed pwd"; exit 1; fi fail() { echo FAILED ${0#$here"/"} $prog 1>&2 cd $here rm -rf $tmp exit 1 } pass() { echo PASSED ${0#$here"/"} $prog cd $here rm -rf $tmp exit 0 } trap "fail" 1 2 3 15 mkdir $tmp if [ $? -ne 0 ]; then echo "Failed mkdir"; exit 1; fi for file in $depends;do \ cp -R src/$file $tmp; \ if [ $? -ne 0 ]; then echo "Failed cp "$file; fail; fi \ done cd $tmp if [ $? -ne 0 ]; then echo "Failed cd"; fail; fi # the output should look like this # cat > test_d2_coef.m << 'EOF' Ud2=0,Vd2=0,Md2=10,Qd2=6,Rd2=1 d2 = [ 0.0034549892, ... 1.0281357855, 1.0300194560, 1.4047410877, 1.7981205077, ... 2.1704133399, ... 2.0034501134, 2.7150949171, 1.7701610817, 0.7330142457, ... 0.1788165601, ... 0.4953456248, 0.5900366432, 0.6308488488, ... 0.3523136329, 1.1006443928, 1.4455070181 ]'; EOF if [ $? -ne 0 ]; then echo "Failed output cat"; fail; fi # # run and see if the results match # echo "Running $prog" octave --no-gui -q $prog >test.out 2>&1 if [ $? -ne 0 ]; then echo "Failed running $prog"; fail; fi diff -Bb test_d2_coef.m deczky3_socp_test_d2_coef.m if [ $? -ne 0 ]; then echo "Failed diff -Bb"; fail; fi # # this much worked # pass
robertgj/DesignOfIIRFilters
test/00/t0063a.sh
Shell
mit
1,830
# or_dir.bash: functions to be used as guards in recipes. # see https://github.com/pepaslabs/deploy.sh # # usage: # # root_or_die # # each function should emit an echo_step_error and exit if the condition # isn't met. # meta function die_x_for_y_only() { local x="${1}" local y="${2}" echo_step_error "This ${x} is for ${y} only." exit 1 } function die_recipe_for_x_only() { die_x_for_y_only "recipe" "${1}" } # user-related functions function root_or_die() { is_root || die_recipe_for_x_only "root" } function user_or_die() { is_user || die_recipe_for_x_only "regular users" } # platform-related functions function linux_or_die() { is_linux || die_recipe_for_x_only "Linux" } function darwin_or_die() { is_darwin || die_recipe_for_x_only "Darwin" } function debian_or_die() { is_debian || die_recipe_for_x_only "Debian" }
pepaslabs/deploy.sh
lib/deploy.sh/or_die.bash
Shell
mit
882
[server@localhost pgame_ydjd]$ cat shutDownServer.sh #!/bin/sh /home/server/sanguo/pgame_ydjd/run.sh com.kueem.pgame.net.netty.ShutdownServer
qifanyang/tool
tool/antdemo/self/shutdownservr.sh
Shell
mit
142
#!/bin/bash echo "This is the INFO hook-PRE-sync script example!" exit 2
SpoddyCoder/clonepi
dev/hook-tests/pre-sync-info.sh
Shell
mit
74
#!/bin/sh if [ ! ${branch_eg} ] then echo "please set branch_eg, for example release/eg/26" exit fi echo "branch_eg is ${branch_eg}" read -p "Continue? (y/n) " -n 1 echo if [[ $REPLY =~ ^[Yy]$ ]] then echo else exit fi for repo in \ eg-web-search \ eg-web-common \ ensemblgenomes-api \ eg-web-plants ; do if [ ! -d "$repo" ]; then echo "Checking out $repo (branch $branch_eg)" git clone --branch ${branch_eg} https://github.com/EnsemblGenomes/${repo} else echo Already got $repo, attempting to pull... cd $repo git pull git status cd ../ fi echo echo done
warelab/gramene-ensembl
scripts/misc-scripts/fetch_eg_packages.sh
Shell
mit
710
#!/bin/bash # Script Name: AtoMiC Certbot Installer echo echo -e "${GREEN}AtoMiC Certbot Installer Script$ENDCOLOR" source "$SCRIPTPATH/utils/python/python-installer.sh" source "$SCRIPTPATH/utils/certbot/certbot-constants.sh" source "$SCRIPTPATH/utils/certbot/certbot-repository-configurator.sh" source "$SCRIPTPATH/inc/app-repository-add.sh" source "$SCRIPTPATH/inc/pkgupdate.sh" source "$SCRIPTPATH/inc/app-install.sh" certbot echo -e "${GREEN}---> $APPTITLE installation complete.$ENDCOLOR"
TommyE123/AtoMiC-ToolKit
utils/certbot/certbot-installer.sh
Shell
mit
495
#!/bin/bash name="norx" hidden_dir="/home/$name/.$name" export LANGUAGE=en_US.UTF-8 export LANG=en_US.UTF-8 export LC_ALL=en_US.UTF-8 locale-gen en_US.UTF-8 if [ ! -d $hidden_dir ]; then sudo -u $name mkdir $hidden_dir fi if [ ! -f "$hidden_dir/.done_profile" ]; then # Set locale to when user logs into the VM through SSH echo "Setting default locale for $name account" echo ' export LANGUAGE=en_US.UTF-8 export LANG=en_US.UTF-8 export LC_ALL=en_US.UTF-8 ' >> /home/$name/.profile touch "$hidden_dir/.done_profile" fi if [ ! -f "$hidden_dir/.done_packages" ]; then echo "Installing needed packages" apt-get -y install python-software-properties build-essential # Add some repositories apt-add-repository -y ppa:sharpie/postgis-nightly add-apt-repository -y ppa:mapnik/v2.1.0 add-apt-repository -y ppa:chris-lea/node.js add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable apt-get update ## Install postgreqsql database apt-get -y install postgresql-9.1 ## Install PostGIS 2.1 apt-get -y install postgresql-9.1-postgis-2.0 ## Install gdal apt-get -y install libgdal1h libgdal-dev gdal-bin # Install mapnik sudo apt-get -y install libmapnik mapnik-utils python-mapnik libmapnik-dev ## Install some tools needed to install services apt-get -y install git subversion unzip zerofree curl ## Install node apt-get -y install nodejs # Install needed Python packages apt-get install -y libboost-python-dev python-dev # Install python modules pip install pillow TileStache pyproj echo "Installing Elastic Search server with JDBC-bindings for Postgres" apt-get install -y openjdk-7-jre-headless wget --quiet https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.3.deb -O /tmp/elasticsearch-0.90.3.deb dpkg -i /tmp/elasticsearch-0.90.3.deb cd /usr/share/elasticsearch/bin ./plugin -url http://bit.ly/19iNdvZ -install river-jdbc cd .. cd plugins/river-jdbc wget --quiet http://jdbc.postgresql.org/download/postgresql-9.1-903.jdbc4.jar touch "$hidden_dir/.done_packages" fi if [ ! -f "$hidden_dir/.done_postgres" ]; then echo "Setting up database" # Postgres/PostGIS setup echo " * Creating PostGIS template" ## Create PostGIS template sudo -u postgres createdb -E UTF8 template_postgis2 sudo -u postgres createlang -d template_postgis2 plpgsql sudo -u postgres psql -d postgres -c "UPDATE pg_database SET datistemplate='true' WHERE datname='template_postgis2'" sudo -u postgres psql -d template_postgis2 -f /usr/share/postgresql/9.1/contrib/postgis-2.0/postgis.sql sudo -u postgres psql -d template_postgis2 -f /usr/share/postgresql/9.1/contrib/postgis-2.0/spatial_ref_sys.sql sudo -u postgres psql -d template_postgis2 -f /usr/share/postgresql/9.1/contrib/postgis-2.0/rtpostgis.sql sudo -u postgres psql -d template_postgis2 -f /usr/share/postgresql/9.1/contrib/postgis-2.0/legacy.sql sudo -u postgres psql -d template_postgis2 -c "GRANT ALL ON geometry_columns TO PUBLIC;" sudo -u postgres psql -d template_postgis2 -c "GRANT ALL ON geography_columns TO PUBLIC;" sudo -u postgres psql -d template_postgis2 -c "GRANT ALL ON spatial_ref_sys TO PUBLIC;" echo " * Creating Postgres user '$name' with password 'bengler'" sudo -u postgres psql -c "CREATE ROLE root LOGIN INHERIT CREATEDB;" sudo -u postgres psql -c "ALTER USER root WITH PASSWORD 'bengler';" sudo -u postgres psql -c "CREATE ROLE $name LOGIN INHERIT CREATEDB;" sudo -u postgres psql -c "ALTER USER $name WITH PASSWORD 'bengler';" echo " * Creating database '$name'" sudo -u $name createdb -O $name -E UTF8 -T template_postgis2 $name touch "$hidden_dir/.done_postgres" fi if [ ! -f "$hidden_dir/.done_dataseed" ]; then echo "Seeding map data. This will take a very long time!" # Prepare to cook map data echo " * Fetching seed code from GitHub" cd /home/$name sudo -u $name mkdir data sudo -u $name git clone git://github.com/bengler/norx_data.git data cd data echo " * Starting seed. Please be patient...this is going to take a long time!" ./seed.sh $name $name bengler if [ -f '/home/$name/data' ]; then touch "$hidden_dir/.done_dataseed" echo " * Seed done!" fi fi if [ ! -f "$hidden_dir/.done_services" ]; then echo "Setting up map services" # Set up services that we need running cd /home/$name sudo -u $name mkdir services echo " * Fetching services-repository from GitHub" sudo -u $name git clone git://github.com/bengler/norx_services.git services cd /home/$name/services chown -R $name * ./bootstrap.sh $name if [ -d '/home/$name/services' ]; then touch "$hidden_dir/.done_services" echo " * Services done!" fi fi
bengler/norx
sh/base_setup.sh
Shell
mit
4,757
#!/bin/bash set -eu os="${1:-linux}" arch="${2:-amd64}" stack_name="buildkite-aws-stack-test-${os}-${arch}-${BUILDKITE_BUILD_NUMBER}" stack_queue_name="testqueue-${os}-${arch}-${BUILDKITE_BUILD_NUMBER}" # download parfait binary wget -N https://github.com/lox/parfait/releases/download/v1.1.3/parfait_linux_amd64 mv parfait_linux_amd64 parfait chmod +x ./parfait vpc_id=$(aws ec2 describe-vpcs --filters "Name=isDefault,Values=true" --query "Vpcs[0].VpcId" --output text) subnets=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$vpc_id" --query "Subnets[*].[SubnetId,AvailabilityZone]" --output text) subnet_ids=$(awk '{print $1}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//') az_ids=$(awk '{print $2}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//') image_id=$(buildkite-agent meta-data get "${os}_${arch}_image_id") echo "Using AMI $image_id for $os/$arch" service_role="$(buildkite-agent meta-data get service-role-arn)" echo "Using service role ${service_role}" instance_type="t3.nano" instance_disk="10" if [[ "$os" == "windows" ]] ; then instance_type="m5.large" instance_disk="100" fi if [[ "$arch" == "arm64" ]] ; then instance_type="m6g.large" fi cat << EOF > config.json [ { "ParameterKey": "BuildkiteAgentToken", "ParameterValue": "$BUILDKITE_AWS_STACK_AGENT_TOKEN" }, { "ParameterKey": "BuildkiteQueue", "ParameterValue": "${stack_queue_name}" }, { "ParameterKey": "KeyName", "ParameterValue": "${AWS_KEYPAIR:-aws-stack-test}" }, { "ParameterKey": "InstanceType", "ParameterValue": "${instance_type}" }, { "ParameterKey": "InstanceOperatingSystem", "ParameterValue": "${os}" }, { "ParameterKey": "VpcId", "ParameterValue": "${vpc_id}" }, { "ParameterKey": "Subnets", "ParameterValue": "${subnet_ids}" }, { "ParameterKey": "AvailabilityZones", "ParameterValue": "${az_ids}" }, { "ParameterKey": "MaxSize", "ParameterValue": "1" }, { "ParameterKey": "AgentsPerInstance", "ParameterValue": "3" }, { "ParameterKey": "ECRAccessPolicy", "ParameterValue": "readonly" }, { "ParameterKey": "RootVolumeSize", "ParameterValue": "${instance_disk}" }, { "ParameterKey": "EnableDockerUserNamespaceRemap", "ParameterValue": "true" }, { "ParameterKey": "EnableAgentGitMirrorsExperiment", "ParameterValue": "true" }, { "ParameterKey": "ScaleInIdlePeriod", "ParameterValue": "60" } ] EOF echo "--- Building templates" make "mappings-for-${os}-${arch}-image" build/aws-stack.yml "IMAGE_ID=$image_id" echo "--- Validating templates" make validate echo "--- Creating stack ${stack_name}" make create-stack "STACK_NAME=$stack_name" "SERVICE_ROLE=$service_role" echo "+++ ⌛️ Waiting for update to complete" ./parfait watch-stack "${stack_name}"
buildkite/elastic-ci-stack-for-aws
.buildkite/steps/launch.sh
Shell
mit
2,868
#!/usr/bin/env bash echo "Install nexus repository manager. Port:8081" sudo docker volume create --name nexus-data sudo docker run -d --restart=always -p 8081:8081 --name nexus -v nexus-data:/nexus-data sonatype/nexus3
wizardbyron/provisioners
facility/nexus/nexus.sh
Shell
mit
218
##depends:none # rtg-tools, BSD License cd ${TMPDIR_PATH}/ wget https://github.com/RealTimeGenomics/rtg-tools/releases/download/3.8.3/rtg-tools-3.8.3-linux-x64.zip unzip rtg-tools-3.8.3-linux-x64.zip -d ${TOOLS_PATH} cd ${TOOLS_PATH} ln -s rtg-tools-3.8.3 rtg
chipster/chipster-tools
setup/bundles/bundle_binaries/modules/external_tools/rtg-tools.bash
Shell
mit
274
#!/bin/bash nvm use v4.3.2 node data/datagen.js node-lambda package -p build -n looks-like -e production -f deploy.env -x "alexa data spec test .gitignore build-package.sh"
nstublen/alexa-looks-like-game
build-package.sh
Shell
mit
174
#! /bin/bash OUTF=rem-duplicates.sh; echo "#! /bin/sh" > $OUTF; echo "" >> $OUTF; find "$@" -type f -print0 | xargs -0 -n1 md5sum | sort --key=1,32 | uniq -w 32 -d --all-repeated=separate | sed -r 's/^[0-9a-f]*( )*//;s/([^a-zA-Z0-9./_-])/\\\1/g;s/(.+)/#rm \1/' >> $OUTF; chmod a+x $OUTF
kindasimple/play
scripts/dupes/find_dupes.sh
Shell
mit
314
#!/usr/bin/env bash # Install the latest miniconda package manager # All must be run as root [ $(/usr/bin/id -u) -ne 0 ] \ && echo 'Must be run as root!' \ && exit 1 # Distro-speciic Dependencies ===================================== if [ -n "$(type yum 2>/dev/null)" ]; then ## CentOS/Fedora ## # Requirements which do not ship in minimal/docker distros yum install --assumeyes \ bzip2 \ ca-certificates elif [ -n "$(type pacman 2>/dev/null)" ]; then ## Arch/Manjaro ## true elif [ -n "$(type apt-get 2>/dev/null)" ]; then ## Debian/Ubuntu ## true fi # ================================================================= # Download & run installer curl -fsSL -o /tmp/miniconda.sh \ https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh /bin/bash /tmp/miniconda.sh -b -p /opt/miniconda3 rm /tmp/miniconda.sh # Cleanup install artifacts /opt/miniconda3/bin/conda clean -tipsy # Configure environment ln -s /opt/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh echo "##### Conda profile setup" >> /etc/profile.d/conda.sh echo 'export PATH=/opt/miniconda3/bin:$PATH' >> /etc/profile.d/conda.sh
jakebrinkmann/dotfiles
install/conda.sh
Shell
mit
1,173
echo "Input the number of lines to read from the tail:" read number_of_lines echo `tail -n $number_of_lines cols1_2.txt > tail_"$number_of_lines".txt`
yasutaka/nlp_100
johnny/第2章/15.sh
Shell
mit
153
#!/bin/bash # This script generates json configuration files for check-web using values # from the SSM parameter store. # The following environment variables must be set: if [[ -z ${DEPLOY_ENV+x} || -z ${DEPLOYDIR+x} || -z ${AWS_DEFAULT_REGION+x} ]]; then echo "DEPLOY_ENV, DEPLOYDIR, and AWS_DEFAULT_REGION must be in the environment. Exiting." exit 1 fi SSM_PREFIX="/${DEPLOY_ENV}/check-web" WORKTMP=$(mktemp) # Create user config.js from SSM parameter value: DESTFILE="${DEPLOYDIR}/latest/config.js" aws ssm get-parameters --region $AWS_DEFAULT_REGION --name "${SSM_PREFIX}/config" | jq .Parameters[].Value|sed 's/["]//g' | python -m base64 -d > $WORKTMP if (( $? != 0 )); then echo "Error retrieving SSM parameter ${SSM_PREFIX}/config. Exiting." exit 1 fi mv $WORKTMP $DESTFILE # Create config-server.js from SSM parameter value: DESTFILE="${DEPLOYDIR}/latest/config-server.js" aws ssm get-parameters --region $AWS_DEFAULT_REGION --name "${SSM_PREFIX}/config-server" | jq .Parameters[].Value|sed 's/["]//g' | python -m base64 -d > $WORKTMP if (( $? != 0 )); then echo "Error retrieving SSM parameter ${SSM_PREFIX}/config-server. Exiting." exit 1 fi mv $WORKTMP $DESTFILE echo "Configuration for env $DEPLOY_ENV complete." exit 0
meedan/check-web
production/bin/create_configs.sh
Shell
mit
1,252
#!/bin/bash sudo yum install python git sudo yum install python-pip sudo pip install --upgrade pip sudo pip install markdown sudo yum install pycrypto # git clone https://www.github.com/zamalchi/hoursapp.git # cd hoursapp #../launcher.sh
TechSquareInc/hours
installers/centos7-installer.sh
Shell
mit
243
#!/bin/bash #!/bin/bash python ./manage.py dumpdata --format json \ --indent 4 \ --output './cinemair/fixtures/initial_data.json' \ 'cinemas.Cinema' \ 'movies.Movie' \ 'shows.Show'
Cinemair/cinemair-server
scripts/regenerate_fixtures.sh
Shell
mit
322
#! /bin/bash if [ -n "$1" ]; then pushd $1 ; fi echo "" echo "[LuaSkin shared] --> [LuaSkin threaded]:" ack --objc '\[LuaSkin\s+shared\]' echo "" echo "refTable -> [skin refTableFor:USERDATA_TAG]" ack --objc '[lL]uaRef:\w+' ack --objc 'luaUnref:\w+' echo"" echo "Non-unique refTable:" ack --objc "\w+\s*=\s*\[\w+\s+registerLibrary" echo "" echo "Thread related areas of concern:" ack --objc 'dispatch_get_main_queue' ack --objc 'performSelectorOnMainThread:' ack --objc 'CFRunLoopGetMain' ack --objc '\[NSThread mainThread\]' echo "" if [ -n "$1" ]; then popd ; fi
asmagill/hs._asm.luathread
evaluate.sh
Shell
mit
571
#!/bin/bash # # <Replace with the description and/or purpose of this shell script.> # Declare global variables # # <Replace with the variables of your shell script.> HOSTNAME=$(hostname) DATETIME=$(date) # Declare functions # # <Replace with the functions of your shell script.> function hello() { if [ $# -eq 0 ] then echo "Hello, world!" return 1 fi local COUNTER=1 for NAME in $@ do echo "Hello, ${NAME}. You are the number ${COUNTER}" COUNTER=$(($COUNTER+1)) done return 0 } # Main body of the shell script starts here. # # <Replace with the main commands of your shell script.> hello Leo Edu # Exit with an explicit exit status. exit 0
leovant/examples
shell/template.sh
Shell
mit
681
#!/usr/bin/env bash # # (c) 2017-2018 Konstantin Gredeskoul # # MIT License, distributed as part of `sym` ruby gem. # • https://github.com/kigster/sym # #============================================================================== # # The purpose of this script is to transparently edit application secrets in # Rails apps or other projects. It simplifies the process of key import, as well # as the direct editing, as well as multi-file encryption/decryption routines. # # The idea is that you set some of the variables below to values specific to your # system and working with encrypted files will become very easy. # # SYMIT__FOLDER is a relative folder to your project root, under which you # might keep ALL of your encrypted files. Alternatively, if you keep encrypted # files sprinkled around your project, just leave it out, because it defaults # to "." — the current folder, and search anything beneath. # # Variables: # # # only search ./config folder # export SYMIT__FOLDER="config" # # # this will be the name of your key in OS-X KeyChain # export SYMIT__KEY="my-org.engineering.dev" # just a name # # # This is the extension given to the encrypted files. Ideally, leave it # # be as ".enc" # export SYMIT__EXTENSION=".enc" # # And then # # symit import key [ insecure ] # import a key and password-protect it (or not) # symit auto application.yml.enc # auto-decrypts it # symit auto application.yml # auto-encrypts it # symit decrypt application.yml # finds application.yml.enc and decrypts that. # # # ...and vola! You are editing the encrypted file with sym from the root of # your Rails application. Neat, no? # # Check if we are being sourced in, or run as a script: ( [[ -n ${ZSH_EVAL_CONTEXT} && ${ZSH_EVAL_CONTEXT} =~ :file$ ]] || \ [[ -n $BASH_VERSION && $0 != "$BASH_SOURCE" ]]) && _s_=1 || _s_=0 (( $_s_ )) && _is_sourced=1 (( $_s_ )) || _is_sourced=0 # Set all the defaults function __symit::init() { export SYMIT__EXTENSION=${SYMIT__EXTENSION:-'.enc'} export SYMIT__FOLDER=${SYMIT__FOLDER:-'.'} export SYMIT__KEY=${SYMIT__KEY} export SYMIT__MIN_VERSION='latest' } # Returns name of the current shell, eg 'bash' function __lib::shell::name() { echo $(basename $(printf $SHELL)) } # Returns 'yes' if current shell is BASH function __lib::shell::is_bash() { [[ $(__lib::shell::name) == "bash" ]] && echo yes } # Returns a number representing shell version, eg. # 3 or 4 for BASH v3 and v4 respectively. function __lib::bash::version_number() { echo $BASH_VERSION | awk 'BEGIN{FS="."}{print $1}' } # Enable all colors, but only if the STDOUT is a terminal function __lib::color::setup() { if [[ -t 1 ]]; then export txtblk='\e[0;30m' # Black - Regular export txtred='\e[0;31m' # Red export txtgrn='\e[0;32m' # Green export txtylw='\e[0;33m' # Yellow export txtblu='\e[0;34m' # Blue export txtpur='\e[0;35m' # Purple export txtcyn='\e[0;36m' # Cyan export txtwht='\e[0;37m' # White export bldblk='\e[1;30m' # Black - Bold export bldred='\e[1;31m' # Red export bldgrn='\e[1;32m' # Green export bldylw='\e[1;33m' # Yellow export bldblu='\e[1;34m' # Blue export bldpur='\e[1;35m' # Purple export bldcyn='\e[1;36m' # Cyan export bldwht='\e[1;37m' # White export unkblk='\e[4;30m' # Black - Underline export undred='\e[4;31m' # Red export undgrn='\e[4;32m' # Green export undylw='\e[4;33m' # Yellow export undblu='\e[4;34m' # Blue export undpur='\e[4;35m' # Purple export undcyn='\e[4;36m' # Cyan export undwht='\e[4;37m' # White export bakblk='\e[40m' # Black - Background export bakred='\e[41m' # Red export bakgrn='\e[42m' # Green export bakylw='\e[43m' # Yellow export bakblu='\e[44m' # Blue export bakpur='\e[45m' # Purple export bakcyn='\e[46m' # Cyan export bakwht='\e[47m' # White export clr='\e[0m' # Text Reset export txtrst='\e[0m' # Text Reset export rst='\e[0m' # Text Reset fi } # Unset all the colors, in case we a being piped into # something else. function __lib::color::reset() { export txtblk= export txtred= export txtgrn= export txtylw= export txtblu= export txtpur= export txtcyn= export txtwht= export bldblk= export bldred= export bldgrn= export bldylw= export bldblu= export bldpur= export bldcyn= export bldwht= export unkblk= export undred= export undgrn= export undylw= export undblu= export undpur= export undcyn= export undwht= export bakblk= export bakred= export bakgrn= export bakylw= export bakblu= export bakpur= export bakcyn= export bakwht= export clr= export txtrst= export rst= } # Enable or disable the colors based on whether the STDOUT # is a proper terminal, or a pipe. function __lib::stdout::configure() { if [[ -t 1 ]]; then __lib::color::setup else __lib::color::reset fi } __lib::stdout::configure # Check if we are being run as a script, and if so — bail. (( $_s_ )) || { printf "${bldred}This script is meant to be sourced into your environment,\n" printf "not run on a command line.${clr} \n\n" printf "Please add 'source $0' to your BASH initialization file,\n" printf "or run the following command:\n\n" printf " \$ ${bldgrn}sym -B ~/.bash_profile${clr}\n\n" printf "${bldblu}Thanks for using Sym!${clr}\n" exit 1 } # Horizontal line, width of the full terminal function __lib::color::hr() { local cols=${1:-${COLUMNS}} local char=${2:-"—"} local color=${3:-${txtylw}} printf "${color}" eval "printf \"%0.s${char}\" {1..${cols}}" printf "${clr}\n" } # Large header, all caps function __lib::color::h1() { local title=$(echo "$*" | tr 'a-z' 'A-Z') len=${#title} printf "${bldylw}${title}\n" __lib::color::hr ${len} '─' } # Smaller header function __lib::color::h2() { printf "${bldpur}$*${clr}\n" } # Shift cursor by N positions to the right function __lib::color::cursor-right-by() { position=$1 printf "\e[${position}C" } # Shift cursor by N positions to the left function __lib::color::cursor-left-by() { position=$1 printf "\e[${position}D" } # Shift cursor by N positions up function __lib::color::cursor-up-by() { position=$1 printf "\e[${position}A" } # Shift cursor by N positions down function __lib::color::cursor-down-by() { position=$1 printf "\e[${position}B" } # Convert a version string such as "1.50.17" to an integer # 101050017 for numeric comparison: function __lib::ver-to-i() { version=${1} echo ${version} | awk 'BEGIN{FS="."}{ printf "1%02d%03.3d%03.3d", $1, $2, $3}' } # Convert a result of __lib::ver-to-i() back to a regular version. function __lib::i-to-ver() { version=${1} /usr/bin/env ruby -e "ver='${version}'; printf %Q{%d.%d.%d}, ver[1..2].to_i, ver[3..5].to_i, ver[6..8].to_i" } # Prints Usage function __symit::usage() { echo __lib::color::h1 "symit" printf " ${bldylw}symit${bldgrn} is a powerful BASH helper, that enhances the CLI encryption tool called ${bldred}Sym${clr}, which is a Ruby Gem. Sym has an extensive CLI interface, but it only handles one encryption/decryption operation per invocation. With this script, you can auto decrypt all files in a given folder, you can import the key in a simpler way, and you can save into the environment sym configuration that will be used. It also streamlines editing of encrypted files in a given folder. Symit can be configured either with the ENV variables, or using the CLI flags.\n" printf " The recommended way to use ${bldred}symit${clr} is to set the following environment variables, which removes the need to pass these values via the flags. These variables default to the shown values if not set elsewhere: Perhaps the most critically important variable to set is ${txtylw}SYMIT__KEY${clr}: ${txtylw} export SYMIT__KEY='my-org.my-app.dev' eg: export SYMIT__KEY='github.web.development' ${clr} The ${txtcya}key${clr} can resolve to a file name, or a name of ENV variable, a keychain entry, or be the actual key (not recommended!). See the following link for more info: ${undblu}https://github.com/kigster/sym#resolving-the--k-argument${clr} Additional configuration is available through these variables: ${txtylw} export SYMIT__EXTENSION='${SYMIT__EXTENSION}' export SYMIT__FOLDER='${SYMIT__FOLDER}' export SYMIT__MIN_VERSION='latest' ${clr} The last variable defines the minimum Sym version desired. Set it to 'latest' to have symit auto-upgrade Sym every time it is invoked. ${clr}\n" __lib::color::h2 "Usage:" printf " ${bldgrn}symit [ action ] [ file-path/pattern ] [ flags ]${clr}\n\n" __lib::color::h2 "Actions:" printf " Action is the first word that defaults to ${bldylw}edit${clr}.\n\n" printf " ${bldcya}Valid actions are below, starting with the Key import or creation:${clr}\n\n" printf " ${bldylw}— generate ${clr}create a new secure key, and copies it to the\n" printf " clipboard (if supported), otherwise prints to STDOUT\n" printf " Key name (set via SYMIT__KEY or -k flag) is required,\n" printf " and is used as the KeyChain entry name for the new key.\n\n" printf " ${bldylw}— import [insecure]\n" printf " ${clr}imports the key from clipboard and adds password\n" printf " encryption unless 'insecure' is passed in. Same as above\n" printf " in relation with the key parameter.\n\n" printf " ${bldcya}The following actions require the file pattern/path argument:${clr}\n" printf " ${bldylw}— edit ${clr}Finds all files, and opens them in $EDITOR\n" printf " ${bldylw}— encrypt ${clr}Encrypts files matching file-path\n" printf " ${bldylw}— decrypt ${clr}Adds the extension to file pattern and decrypts\n" printf " ${bldylw}— auto ${clr}encrypts decrypted file, and vice versa\n" echo __lib::color::h2 "Flags:" printf " -f | --folder DIR ${clr}Top level folder to search.${clr}\n" printf " -k | --key KEY ${clr}Key identifier${clr}\n" printf " -x | --extension EXT ${clr}Default extension of encrypted files.${clr}\n" printf " -n | --dry-run ${clr}Print stuff, but dont do it${clr}\n" printf " -a | --all-files ${clr}If provided ALL FILES are operated on${clr}\n" printf " ${clr}Use with CAUTION!${clr}\n" printf " -v | --verbose ${clr}Print more stuff${clr}\n" printf " -q | --quiet ${clr}Print less stuff${clr}\n" printf " -h | --help ${clr}Show this help message${clr}\n" echo __lib::color::h2 'Encryption key identifier can be:' printf "${clr}" printf ' 1. name of the keychain item storing the keychain (secure) 2. name of the environment variable storing the Key (*) 3. name of the file storing the key (*) 4. the key itself (*)' echo printf "${bldred}" printf ' (*) 2-4 are insecure UNLESS the key is encrypted with a password.'; echo printf "${clr}\ Please refer to README about generating password protected keys:\n ${bldblu}${undblu}https://github.com/kigster/sym#generating-the-key--examples${clr}\n\n" echo __lib::color::h1 'Examples:' printf " To import a key securely, first copy the key to your clipboard,\n" printf " and then run the following command, pasting the key when asked:\n\n" printf " ❯ ${bldgrn}symit${bldblu} import key ${clr}\n\n" printf " To encrypt or decrypt ALL files in the 'config' directory:${clr}\n\n" printf " ❯ ${bldgrn}symit${bldblu} encrypt|decrypt -a -f config ${clr}\n\n" printf " To decrypt all *.yml.enc files in the 'config' directory:${clr}\n\n" printf " ❯ ${bldgrn}symit${bldblu} decrypt '*.yml' -f config ${clr}\n\n" printf " To edit an encrypted file ${txtblu}config/application.yml.enc${clr}\n\n" printf " ❯ ${bldgrn}symit${bldblu} application.yml${clr}\n\n" printf " To auto decrypt a file ${txtblu}config/settings/crypt/pass.yml.enc${clr}\n\n" printf " ❯ ${bldgrn}symit${bldblu} auto config/settings/crypt/pass.yml.enc${clr}\n\n" printf " To automatically decide to either encrypt or decrypt a file,\n" printf " based on the file extension use 'auto' command. The first line below\n" printf " encrypts the file, second decrypts it, because the file extension is .enc:${clr}\n\n" printf " ❯ ${bldgrn}symit${bldblu} auto config/settings/crypt/pass.yml${clr}\n" printf " ❯ ${bldgrn}symit${bldblu} auto config/settings/crypt/pass.yml.enc${clr}\n\n" printf " To encrypt a file ${txtblu}config/settings.yml${clr}\n" printf " ❯ ${bldgrn}symit${bldblu} encrypt config/settings.yml${clr}\n\n" } function __datum() { date +"%m/%d/%Y.%H:%M:%S" } function __warn() { __lib::color::cursor-left-by 1000 printf "${bldylw}$* ${bldylw}\n" } function __err() { __lib::color::cursor-left-by 1000 printf "${bldred}ERROR: ${txtred}$* ${bldylw}\n" } function __inf() { [[ ${cli__opts__quiet} ]] && return __lib::color::cursor-left-by 1000 printf "${txtblu}$*${clr}\n" } function __dbg() { [[ ${cli__opts__verbose} ]] || return __lib::color::cursor-left-by 1000 printf "${txtgrn}$*${clr}\n" } function __lib::command::print() { __inf "${bldylw}❯ ${bldcya}$*${clr}" } function __symit::sym::installed_version() { __lib::ver-to-i $(gem list | grep sym | awk '{print $2}' | sed 's/(//g;s/)//g') } function __symit::sym::latest_version() { __lib::ver-to-i $(gem query --remote -n '^sym$' | awk '{print $2}' | sed 's/(//g;s/)//g') } function __symit::install::update() { local desired_version=$1 shift local current_version=$2 shift local version_args=$* __inf "updating sym to version ${bldylw}$(__lib::i-to-ver ${desired_version})${clr}..." printf "${bldblu}" >&1 echo y | gem uninstall sym --force -x 2>/dev/null printf "${clr}" >&1 command="gem install sym ${version_args} " eval "${command}" >/dev/null code=$? printf "${clr}" >&2 if [[ ${code} != 0 ]]; then __err "gem install returned ${code}, with command ${bldylw}${command}" return 127 fi current_version=$(__symit::sym::installed_version) __inf "sym version ${bldylw}$(__lib::i-to-ver ${current_version}) was successfully installed." } function __symit::install::gem() { if [[ -n ${__symit_last_checked_at} ]]; then now=$(date +'%s') if [[ $(( $now - ${__symit_last_checked_at} )) -lt 3600 ]]; then return fi fi export __symit_last_checked_at=${now:-$(date +'%s')} __inf "Verifying current sym version, please wait..." current_version=$(__symit::sym::installed_version) if [[ -n ${SYMIT__MIN_VERSION} ]]; then if [[ ${SYMIT__MIN_VERSION} -eq 'latest' ]]; then desired_version=$(__symit::sym::latest_version) version_args='' else desired_version=$( __lib::ver-to-i ${SYMIT__MIN_VERSION}) version_args=" --version ${SYMIT__MIN_VERSION}" fi if [[ "${desired_version}" != "${current_version}" ]]; then __symit::install::update "${desired_version}" "${current_version}" "${version_args}" else __inf "${bldgrn}sym${clr} ${txtblu}is on the correct version ${bldylw}$(__lib::i-to-ver ${desired_version})${txtblu} already" fi else if [[ -z ${current_version} ]] ; then __dbg "installing latest version of ${bldylw}sym..." fi fi } function __symit::files() { eval $(__symit::files::cmd) } function __symit::files::cmd() { if [[ -n ${cli__opts__file} && -n ${cli__opts__extension} ]]; then local folder=${cli__opts__folder} local file="${cli__opts__file}" local ext="${cli__opts__extension}" if [[ ${file} =~ '/' ]]; then if [[ ${folder} == '.' ]]; then folder="$(dirname ${file})" else folder="${folder}/$(dirname ${file})" fi file="$(basename ${file})" fi if [[ "${cli__opts__action}" == "encrypt" ]] ; then printf "find ${folder} -name '${file}' -and -not -name '*${ext}'" elif [[ "${cli__opts__action}" == "auto" ]] ; then printf "find ${folder} -name '${file}'" else # edit, decrypt [[ ${file} =~ "${ext}" ]] || file="${file}${ext}" printf "find ${folder} -name '${file}'" fi fi } function __symit::command() { file=${1} if [[ -n "${cli__opts__key}" && -n "${cli__opts__extension}" ]]; then action="${cli__opts__action}" v="sym__actions__${action}" flags="${!v}" if [[ ${action} =~ "key" ]]; then [[ -n ${cli__opts__verbose} ]] && printf "processing key import action ${bldylw}${action}${clr}\n" >&2 printf "sym ${flags} ${cli__opts__key} " elif [[ ${action} =~ "generate" ]] ; then [[ -n ${cli__opts__verbose} ]] && printf "processing generate key action ${bldylw}${action}${clr}\n" >&2 if [[ -n $(which pbcopy) ]]; then out_key=/tmp/outkey command="sym ${flags} ${cli__opts__key} -q -o ${out_key}; cat ${out_key} | pbcopy; rm -f ${out_key}" printf "${command}" else printf "sym ${flags} ${cli__opts__key} " fi elif [[ -n ${file} ]] ; then ext="${cli__opts__extension}" [[ -z ${ext} ]] && ext='.enc' ext=$(echo ${ext} | sed -E 's/[\*\/,.]//g') if [[ ${action} =~ "encrypt" ]]; then printf "sym ${flags} ${file} -ck ${cli__opts__key} -o ${file}.${ext}" elif [[ ${action} =~ "decrypt" ]]; then new_name=$(echo ${file} | sed "s/\.${ext}//g") [[ "${new_name}" == "${file}" ]] && name="${file}.decrypted" printf "sym ${flags} ${file} -ck ${cli__opts__key} -o ${new_name}" else printf "sym ${flags} ${file} -ck ${cli__opts__key} " fi else printf "printf \"ERROR: not sure how to generate a correct command\\n\"" fi fi } function __symit::cleanup() { unset sym__actions unset cli__opts } function __symit::exit() { code=${1:-0} __symit::cleanup echo -n ${code} } function __symit::print_cli_args() { __dbg "action ${bldylw}: ${cli__opts__action}${clr}" __dbg "key ${bldylw}: ${cli__opts__key}${clr}" __dbg "file ${bldylw}: ${cli__opts__file}${clr}" __dbg "extension ${bldylw}: ${cli__opts__extension}${clr}" __dbg "folder ${bldylw}: ${cli__opts__folder}${clr}" __dbg "verbose ${bldylw}: ${cli__opts__verbose}${clr}" __dbg "dry_run ${bldylw}: ${cli__opts__dry_run}${clr}" } function __symit::args::needs_file() { if [[ "${cli__opts__action}" == 'edit' || \ "${cli__opts__action}" == 'auto' || \ "${cli__opts__action}" == 'encrypt' || \ "${cli__opts__action}" == 'decrypt' ]]; then printf 'yes' fi } function __symit::validate_args() { if [[ -n $(__symit::args::needs_file) && -z ${cli__opts__file} ]]; then __err "missing file argument, config/application.yml" return $(__symit::exit 2) fi if [[ -z "${cli__opts__key}" ]]; then __err "Key was not defined, pass it with ${bldblu}-k KEY_ID${bldred}" __err "or set it via ${bldgrn}\$SYMIT__KEY${bldred} variable." return $(__symit::exit 4) fi if [[ -z ${cli__opts__extension} ]]; then cli__opts__extension='.enc' fi } function __symit::run() { __symit::cleanup __symit::init cli__opts__verbose='' cli__opts__quiet='' cli__opts__key=${SYMIT__KEY} cli__opts__extension=${SYMIT__EXTENSION} cli__opts__folder=${SYMIT__FOLDER} cli__opts__dry_run='' cli__opts__action=edit cli__opts__file='' sym__actions__generate=' -cpgx ' sym__actions__edit=' -t ' sym__actions__encrypt='-e -f ' sym__actions__decrypt='-d -f ' sym__actions__auto=' -n ' sym__actions__key_secure=' -iqcpx ' sym__actions__key_insecure=' -iqcx ' sym__actions__install='install' if [[ -z $1 ]]; then __symit::usage return $(__symit::exit 0) fi while :; do case $1 in -h|-\?|--help) shift __symit::usage __symit::cleanup return $(__symit::exit 0) ;; -k|--key) shift if [[ -z $1 ]]; then __err "-k/--key requires an argument" && return $(__symit::exit 1) else cli__opts__key=$1 shift fi ;; -x|--extension) shift if [[ -z $1 ]]; then __err "-x/--extension requires an argument" && return $(__symit::exit 1) else cli__opts__extension=${1} shift fi ;; -f|--folder) shift if [[ -z $1 ]]; then __err "-f/--folder requires an argument" && return $(__symit::exit 1) else cli__opts__folder=${1} shift fi ;; -a|--all-files) shift cli__opts__file="'*'" ;; -n|--dry-run) shift cli__opts__dry_run="yes" ;; -v|--verbose) shift cli__opts__verbose="yes" ;; -q|--quiet) shift cli__opts__quiet="yes" ;; import|key) shift cli__opts__action="key_secure" ;; insecure) shift if [[ "${cli__opts__action}" == 'key_secure' ]] ; then cli__opts__action="key_insecure" fi ;; --) # End of all options. shift break ;; -?*) __err 'WARN: Unknown option: %s\n' "$1" >&2 return $(__symit::exit 127) shift ;; ?*) param=$1 v="sym__actions__${param}" if [[ ! ${param} =~ '.' && -n "${!v}" ]]; then __dbg "Action ${bldylw}${param}${clr} is a valid action." cli__opts__action=${param} else __dbg "Parameter ${bldylw}${param}${clr} is not a valid action," __dbg "therefore it must be a file pattern." cli__opts__file=${1} fi shift ;; *) # Default case: If no more options then break out of the loop. break shift esac done [[ -n "${cli__opts__verbose}" ]] && __symit::print_cli_args if [[ "${cli__opts__action}" == 'install' ]]; then if [[ -n ${cli__opts__dry_run} ]]; then __dbg "This command verifies that Sym is properly installed," __dbg "and if not found — installs it." return $(__symit::exit 0) else __symit::install::gem return $(__symit::exit 0) fi fi __symit::validate_args code=$? if [[ ${code} != 0 ]]; then return $(__symit::exit ${code}) fi __symit::install::gem changed_count=0 if [[ -n "${cli__opts__dry_run}" ]] ; then __lib::color::h1 "DRY RUN" for file in $(__symit::files); do printf " \$ ${bldblu}$(__symit::command ${file})${clr}\n" done else if [[ -n "${cli__opts__file}" ]]; then [[ -n ${cli__opts__verbose} ]] && __dbg $(__symit::files) declare -a file_list for file in $(__symit::files); do local cmd="$(__symit::command ${file})" __lib::command::print "${cmd}" eval "${cmd}" code=$?; [[ ${code} != 0 ]] && __err "command '${bldblu}${cmd}${bldred}' exited with code ${bldylw}${code}" changed_count=$(( ${changed_count} + 1)) done if [[ ${changed_count} == 0 ]]; then printf "${undylw}Bad news:${clr}\n\n" __warn " No files matched your specification. The following 'find' command" __warn " ran to find the file you requested. Please change the name, and " __warn " try again.\n" __warn " ${bldblu}$(__symit::files::cmd)${clr}\n\n" return $(__symit::exit 5) fi else # opts[file] cmd=$(__symit::command) __lib::command::print "${cmd}" eval "${cmd}" code=$?; [[ ${code} != 0 ]] && return $(__symit::exit ${code}) changed_count=$(( ${changed_count} + 1)) fi fi } function symit() { __lib::stdout::configure __symit::run $@ }
kigster/sym
bin/sym.symit.bash
Shell
mit
24,207
#!/bin/bash cd /opt rm -fr mold-api git clone -b $BRANCH https://github.com/mo-ld/mold-api.git cd mold-api /bin/bash -l -c "rvm requirements && rvm install 2.3.0 && gem install bundler --no-ri --no-rdoc && bundle install" wait source /etc/profile.d/rvm.sh rvm use ruby-2.3.0 cd /opt/mold-api/ unicorn -p 9393 --env production
mo-ld/mold-dock
docker-mold-api/mold-api-entry.debug.sh
Shell
mit
327
#!/bin/bash cd ~/dev/ if [ "$1" == "www" ]; then npm install &>/dev/null bower install &>/dev/null cd backend && forever start server.js fi if [ "$1" == "nas" ]; then npm install &>/dev/null cd nas && forever start server.js fi
jirojo2/cdps
run.sh
Shell
mit
235
#!/bin/sh ### BEGIN INIT INFO # Provides: unicorn # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Manage unicorn server # Description: Start, stop, restart unicorn server for a specific application. ### END INIT INFO set -e # Feel free to change any of the following variables for your app: TIMEOUT=${TIMEOUT-60} APP_ROOT=/home/deployer/apps/home_page/current PID=$APP_ROOT/tmp/pids/unicorn.pid CMD="cd $APP_ROOT; bundle exec unicorn -D -c $APP_ROOT/config/unicorn.rb -E production" AS_USER=deployer set -u OLD_PIN="$PID.oldbin" sig () { test -s "$PID" && kill -$1 `cat $PID` } oldsig () { test -s $OLD_PIN && kill -$1 `cat $OLD_PIN` } run () { if [ "$(id -un)" = "$AS_USER" ]; then eval $1 else su -c "$1" - $AS_USER fi } case "$1" in start) sig 0 && echo >&2 "Already running" && exit 0 run "$CMD" ;; stop) sig QUIT && exit 0 echo >&2 "Not running" ;; force-stop) sig TERM && exit 0 echo >&2 "Not running" ;; restart|reload) sig HUP && echo reloaded OK && exit 0 echo >&2 "Couldn't reload, starting '$CMD' instead" run "$CMD" ;; upgrade) if sig USR2 && sleep 2 && sig 0 && oldsig QUIT then n=$TIMEOUT while test -s $OLD_PIN && test $n -ge 0 do printf '.' && sleep 1 && n=$(( $n - 1 )) done echo if test $n -lt 0 && test -s $OLD_PIN then echo >&2 "$OLD_PIN still exists after $TIMEOUT seconds" exit 1 fi exit 0 fi echo >&2 "Couldn't upgrade, starting '$CMD' instead" run "$CMD" ;; reopen-logs) sig USR1 ;; *) echo >&2 "Usage: $0 <start|stop|restart|upgrade|force-stop|reopen-logs>" exit 1 ;; esac
volontarian/murd.ch
config/unicorn_init.sh
Shell
mit
1,749
#!/bin/bash # Microsoft IIS Internal IP Leak Validator # By Luis "connection" Santana # HackTalk Security if [ $# -ne 2 ]; then echo Usage: $0 IP Port exit fi echo [*] sending Get request to $1 on port $2 printf "GET / HTTP/1.0\r\nCONNECTION: CLOSE\r\n\r\n" | nc $1 $2 | grep Content-Location: echo [*] done
connection/validation-scripts
HTTP/internalIpLeak.sh
Shell
mit
320
# This file was generated on 2015-03-15T22:57:16-07:00 from the rspec-dev repo. # DO NOT modify it by hand as your changes will get lost the next time it is generated. function is_mri { if ruby -e "exit(!defined?(RUBY_ENGINE) || RUBY_ENGINE == 'ruby')"; then # RUBY_ENGINE only returns 'ruby' on MRI. # MRI 1.8.7 lacks the constant but all other rubies have it (including JRuby in 1.8 mode) return 0 else return 1 fi; } function is_mri_192 { if is_mri; then if ruby -e "exit(RUBY_VERSION == '1.9.2')"; then return 0 else return 1 fi else return 1 fi } function is_mri_2plus { if is_mri; then if ruby -e "exit(RUBY_VERSION.to_f > 2.0)"; then return 0 else return 1 fi else return 1 fi } function rspec_support_compatible { if [ "$MAINTENANCE_BRANCH" != "2-99-maintenance" ] && [ "$MAINTENANCE_BRANCH" != "2-14-maintenance" ]; then return 0 else return 1 fi } function documentation_enforced { if [ -x ./bin/yard ]; then if is_mri_2plus; then return 0 else return 1 fi else return 1 fi } function style_and_lint_enforced { if [ -x ./bin/rubocop ]; then return 0 else return 1 fi }
eugeneius/rspec-core
script/predicate_functions.sh
Shell
mit
1,232
#!/bin/bash if test "$1" = "run"; then port=8080 if [ $2 ]; then port=$2 fi bundle exec jekyll serve --watch --host=0.0.0.0 --port=$port elif test "$1" = "build"; then bundle exec jekyll build --destination=dist elif test "$1" = "deploy"; then bundle exec jekyll build --destination=dist cos-upload local:./dist blog:/ curl -fL -u freshCDN "https://scf.page404.cn/test/freshCDN/" else echo "error param" fi
lxy444/lxy444.github.io
blog.sh
Shell
mit
430
#!/bin/bash git checkout gh-pages git merge master ./gradlew clean asciidoctor cp -R build/asciidoc/html5/* . git commit -a -m "Updated files" git push origin gh-pages git checkout master
kiview/testcontainers-grails-workshop
update_ghpages.sh
Shell
mit
188
DISTNAME=arch RELVER= BASEURL=http://mirror.vpsfree.cz/archlinux/iso/latest # Using the Bootstrap Image rline=`curl $BASEURL/md5sums.txt | grep bootstrap | grep x86_64` bfile=${rline##* } RELVER=`echo $bfile | awk -F- '{print $3}'` wget -P $DOWNLOAD $BASEURL/$bfile md5s=`md5sum $DOWNLOAD/$bfile` if [ ${rline%% *} != ${md5s%% *} ]; then echo "Bootstrap checksum wrong! Quitting." exit 1 fi cd $INSTALL gzip -dc $DOWNLOAD/$bfile | tar x --preserve-permissions --preserve-order --numeric-owner --one-top-level=$INSTALL INSTALL1=$INSTALL/root.x86_64 sed -ri 's/^#(.*vpsfree\.cz.*)$/\1/' $INSTALL1/etc/pacman.d/mirrorlist sed -ri 's/( unshare --fork --pid )/ /' $INSTALL1/usr/bin/arch-chroot sed -ri 's/^SigLevel = Required DatabaseOptional$/SigLevel = Never/' $INSTALL1/etc/pacman.conf CHROOT="$INSTALL1/bin/arch-chroot $INSTALL1" # Install the base system $CHROOT pacstrap -dG /mnt base openssh INSTALL2=$INSTALL1/mnt # Configure the system #$CHROOT genfstab -p /mnt >> /mnt/etc/fstab cat >> $INSTALL2/etc/fstab <<EOF tmpfs /tmp tmpfs nodev,nosuid 0 0 devpts /dev/pts devpts gid=5,mode=620 0 0 LABEL=/ / ext4 defaults EOF CHROOT2="$CHROOT arch-chroot /mnt" # Downgrade systemd mkdir -p $INSTALL2/root/pkgs cp $BASEDIR/packages/arch/* $INSTALL2/root/pkgs for lpkg in `cd $INSTALL2/root/pkgs && ls -1 *.pkg.tar.xz`; do $CHROOT2 pacman -U --noconfirm /root/pkgs/$lpkg done rm -rf $INSTALL2/root/pkgs $CHROOT2 pacman -Rns --noconfirm linux yes | $CHROOT2 pacman -Scc $CHROOT2 ln -s /usr/share/zoneinfo/Europe/Prague /etc/localtime $CHROOT2 systemctl enable sshd sed -ri 's/^#( *IgnorePkg *=.*)$/\1 libsystemd systemd systemd-sysvcompat python2-systemd/' $INSTALL2/etc/pacman.conf cd $INSTALL rm -f $INSTALL2/etc/machine-id $INSTALL2/root/.bash_history mv $INSTALL2/* $INSTALL rm -r $INSTALL1
pvanek/build-vpsfree-templates
templates/arch.sh
Shell
mit
1,867
#!/usr/bin/env bash export NVM_DIR="$HOME/.nvm" . "/usr/local/opt/nvm/nvm.sh"
smyrick/dot-files
bash-profile/tools/nodejs.sh
Shell
mit
79
pushd `dirname $0` > /dev/null SCRIPTPATH=`pwd` popd > /dev/null JARPATH="$SCRIPTPATH/target" if [ ! -d "$JARPATH" ]; then echo "Cumulonimbus does not exists. Probably not yet built. Please use \"mvn clean install\"" exit 1 fi java -jar "$JARPATH"/cumulonimbus-*.jar $@
martinjmares/javaone2015-cloudone
cumulonimbus/start.sh
Shell
mit
276
#!/bin/bash # Mounting is done here instead of etcd because of bug https://bugs.launchpad.net/cloud-init/+bug/1692093 # Once the bug is fixed, replace the below with the cloud init changes replaced in https://github.com/Azure/acs-engine/pull/661. set -x DISK=/dev/sdc PARTITION=${DISK}1 MOUNTPOINT=/var/lib/etcddisk udevadm settle mkdir -p $MOUNTPOINT mount | grep $MOUNTPOINT if [ $? -eq 0 ] then echo "disk is already mounted" exit 0 fi # fill /etc/fstab grep "/dev/sdc1" /etc/fstab if [ $? -ne 0 ] then echo "$PARTITION $MOUNTPOINT auto defaults,nofail 0 2" >> /etc/fstab fi # check if partition exists ls $PARTITION if [ $? -ne 0 ] then # partition does not exist /sbin/sgdisk --new 1 $DISK /sbin/mkfs.ext4 $PARTITION -L etcd_disk -F -E lazy_itable_init=1,lazy_journal_init=1 fi mount $MOUNTPOINT /bin/chown -R etcd:etcd /var/lib/etcddisk
rjtsdl/acs-engine
parts/k8s/kubernetes_mountetcd.sh
Shell
mit
923
#!/usr/bin/env sh git clone --depth=1 https://github.com/eriklarko/golang-hello-world.git /go/src
SaferSocietyGroup/suab
examples/golang-hello/checkout-code.sh
Shell
mit
99
adb shell am start -n com.playstation.remoteplay/.RpActivityMain sleep 5 adb shell input tap 850 460 source Xperia_Z2_Tablet/coords_portrait #source Xperia_Z2_Tablet/coords_landscape
snipem/ps4-remote-play-control
startremoteplay.sh
Shell
mit
183
#!/bin/bash SMP=4 MEM=2048 #NETWORK=e1000 NETWORK=virtio GRAPHIC= #-nographic GRAPHIC_FLAG=0 QEMU=$(which qemu-system-x86_64) $QEMU -cpu host,level=9 -enable-kvm -smp $SMP -m $MEM $GRAPHIC -hda qemu.img -net nic,model=$NETWORK -net "user,hostfwd=tcp::5556-:22" -gdb tcp::9999 & #-monitor telnet:127.0.0.1:1234,server,nowait & # To get into the Qemu console, run `telnet 127.0.0.1 1234`.
Granary/granary2
scripts/vmlaunch.sh
Shell
mit
391
#!/usr/bin/bash seq -f '%.0f' 3 2 8000000 | factor | gawk '/ .* /{next} BEGIN{f=0;t=1;fifo[0]=2} {while(fifo[f]<$2){print fifo[f];fifo[t]=fifo[f]**2;f+=1;t+=1}} {print $2; fifo[t]=$2*$2;t+=1}' | head -500500 | gawk 'BEGIN{s=1}{s=(s*$1)%500500507}END{print s}'
DestructHub/ProjectEuler
Problem500/Bash/solution_1.sh
Shell
mit
260
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for DSA-3154-1 # # Security announcement date: 2015-02-05 00:00:00 UTC # Script generation date: 2017-01-01 21:07:13 UTC # # Operating System: Debian 7 (Wheezy) # Architecture: i386 # # Vulnerable packages fix on version: # - ntp:1:4.2.6.p5+dfsg-2+deb7u2 # # Last versions recommanded by security team: # - ntp:1:4.2.6.p5+dfsg-2+deb7u7 # # CVE List: # - CVE-2014-9750 # - CVE-2014-9751 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade ntp=1:4.2.6.p5+dfsg-2+deb7u7 -y
Cyberwatch/cbw-security-fixes
Debian_7_(Wheezy)/i386/2015/DSA-3154-1.sh
Shell
mit
650
#!/bin/bash HEADER="#pragma once #include \"../SubSystem.h\" class $1 : public SubSystem { public: $1(); ~$1(); virtual void Startup(); virtual void Update(const double deltaTime); virtual void Shutdown(); private: }; " BODY="#include \"$1.h\" $1::$1(){ } $1::~$1(){ } void $1::Startup() { } void $1::Update(const double deltaTime) { } void $1::Shutdown() { } " echo "$HEADER" > "$1.h" echo "$BODY" > "$1.cpp"
exnotime/Tephra
src/Core/subsystem/systems/CreateSubsystem.sh
Shell
mit
424
#!/usr/bin/env bash clean() { rm -f $file1 } trap clean EXIT file1=`mktemp` docker run --rm $1 steamcmd +quit > $file1 || exit 1
BraisGabin/dst-server
test/steamcmd-path/run.sh
Shell
mit
132
: ${baseURL:="https://api.respoke.io/v1"} : ${appToken:?"appToken required"} curl -X POST -H "App-Token: $appToken" -H 'Content-type: application/json' \ $baseURL/turn # { # "username": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXXXXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.0000000000", # "password": "XXXXXXXXXXXX+XXXXXXXXXXXXXX=", # "ttl": 60, # "uris": [ # "turn:54.193.20.11:3478?transport=udp" # ] # }
respoke/docs
examples/turn.sh
Shell
mit
428
export GRADLE_HOME=/Users/williamwatson/Documents/java-libs/gradle-2.11 export PATH="$GRADLE_HOME/bin:$PATH"
wwatson/dotfiles
java/path.zsh
Shell
mit
109
#!/bin/bash #ssh pi@axiom 'cd ~/git/hrl_autobed_dev/autobed_web/scripts/; touch test_file' touch ~/ros_workspace/git/hrl_autobed_dev/autobed_web/scripts/test_file #./autobed_kill.sh echo "this is a test"
gt-ros-pkg/hrl_autobed_dev
autobed_web/scripts/test.sh
Shell
mit
206
../../contrib/docker/forwindows/requirements.sh
vergecurrency/raspi
building/windows/requirements.sh
Shell
mit
47
#-- #-- Stop FitNesse #-- #-- set INSTALLDIR to the directory in which FitNesse was installed INSTALLDIR=/appl/testautomation/fitnesse #-- pidfile=$INSTALLDIR/fit.pid [[ -f $pidfile ]] && { kill `cat $pidfile` ; rm $pidfile ; }
consag/fitnessefixtures
linuxscripts/stopFitNesse.sh
Shell
epl-1.0
229
#!/bin/bash touch /home/pi/shutdown.log; php /home/pi/BatteryAlert.php >> /home/pi/shutdown.log; ssh [email protected] 'sh /vmfs/volumes/datastore1/scripts/ESXIShutdown.sh' >> /home/pi/shutdown.log; ssh -p 1022 [email protected] 'shutdown -r now' >> /home/pi/shutdown.log; shutdown 0;
zzzz401/APC-Battery-ESXI-Shutdown
PowerFailure.sh
Shell
gpl-2.0
277
#!/bin/bash # # Script to make an installable package of the plugin. # # Uses xcodebuild, pkgbuild and productbuild. # # Create a clean install directory... if test -d build/Package; then sudo chmod -R u+w build/Package sudo rm -rf build/Package fi mkdir -p build/Package/Root mkdir -p build/Package/Resources # Install into this directory... xcodebuild -project "$PWD/ChiptuneImporter.xcodeproj" \ -target ChiptuneImporter \ -configuration Release \ install \ DSTROOT="$PWD/build/Package/Root" # Extract the version number from the project... ver=$(git describe | sed 's/release_//') if [[ ! ( $? -eq 0 ) ]]; then ver=$(/usr/libexec/PlistBuddy -c "Print:CFBundleShortVersionString" "ChiptuneImporter/ChiptuneImporter-Info.plist"); fi # Make the package with pkgbuild and the product distribution with productbuild... echo pkgbuild... pkgbuild --identifier com.danielecattaneo.chiptuneimporter \ --version "$ver" \ --root build/Package/Root \ --scripts ./PackageResources/Scripts \ "./ChiptuneImporter.pkg" productbuild --distribution ./PackageResources/Distribution.xml \ --resources ./PackageResources/Resources \ --package-path ./ \ "./ChiptuneImporter-$ver.pkg" rm ./ChiptuneImporter.pkg
shysaur/ChiptuneMDImporter
makepackage.sh
Shell
gpl-2.0
1,381
#!/bin/bash service postgresql start service celeryd start rm -f /var/run/redis_6379.pid service redis_6379 start #service httpd start /usr/sbin/apachectl -D FOREGROUND
fredericlemoine/lsd-web
docker_files/init_docker.sh
Shell
gpl-2.0
170
#!/bin/bash # # Create a new repo. # THIS HAS TO BE RUN ON THE GIT SERVER! # WARNING: # This file is maintained within ansible # All local changes will be lost. # Figure out the environment we're running in GITROOT=/var/lib/dist-git/git/rpms # check if a moron is driving me if [ ! -d $GITROOT ] ; then # we're not on the git server (this check is fragile) echo "ERROR: This script has to be run on the git server." echo "ERROR: Homer sez 'Duh'." exit -9 fi # Local variables VERBOSE=0 TEST= IGNORE= GIT_SSH_URL="ssh://localhost" Usage() { cat <<EOF Usage: $0 [OPTIONS] <package_name> Creates a new repo for <package_name> Options: -h,--help This help message --email-domain DOMAIN Email domain for git hooks.maildomain --pkg-owner-emails EMAILS Comma separated list of emails for git hooks.mailinglist EOF } # fail with no arguments if [ $# -eq 0 ]; then Usage exit -1 fi OPTS=$(getopt -o h -l help -l email-domain: -l pkg-owner-emails: -l default-branch-author: -- "$@") if [ $? != 0 ] then exit 1 fi eval set -- "$OPTS" while true ; do case "$1" in -h | --help) Usage; exit 0;; --email-domain) EMAIL_DOMAIN=$2; shift 2;; --pkg-owner-emails) PKG_OWNER_EMAILS=$2; shift 2;; --default-branch-author) AUTHOR=$2; shift 2;; --) shift; break;; esac done # fail when more or none packages are specified if ! [ $# -eq 1 ]; then Usage exit -1 fi PACKAGE=$1 if [ -z $EMAIL_DOMAIN ]; then EMAIL_DOMAIN=fedoraproject.org fi if [ -z $PKG_OWNER_EMAILS ]; then [email protected],[email protected] fi if [ -z $AUTHOR ]; then AUTHOR="Fedora Release Engineering <[email protected]>" fi # Sanity checks before we start doing damage [ $VERBOSE -gt 1 ] && echo "Checking package $PACKAGE..." if [ -f $GITROOT/$PACKAGE.git/refs/heads/master ] ; then echo "ERROR: Package module $PACKAGE already exists!" >&2 exit -1 fi # A cleanup in case gitolite came by this repo if [ -f $GITROOT/$PACKAGE.git/hooks/update ] ; then echo "Gitolite already initialized this repo. Will remove its hooks" rm -f $GITROOT/$PACKAGE.git/hooks/update fi # "global" permissions check if [ ! -w $GITROOT ] ; then echo "ERROR: You can not write to $GITROOT" echo "ERROR: You can not create repos" exit -1 fi # Now start working on creating those branches # Create a tmpdir to do some git work in TMPDIR=$(mktemp -d /tmp/tmpXXXXXX) # First create the master repo mkdir -p $GITROOT/$PACKAGE.git pushd $GITROOT/$PACKAGE.git >/dev/null git init -q --shared --bare echo "$PACKAGE" > description # This is used to figure out who to send mail to. git config --add hooks.mailinglist $PKG_OWNER_EMAILS git config --add hooks.maildomain $EMAIL_DOMAIN popd >/dev/null # Now clone that repo and create the .gitignore and sources file git init -q $TMPDIR/$PACKAGE pushd $TMPDIR/$PACKAGE >/dev/null touch .gitignore sources git add . git commit -q -m 'Initial setup of the repo' --author "$AUTHOR" git remote add origin $GITROOT/$PACKAGE.git git push -q origin master popd >/dev/null # Place the gitolite update hook in place since we're not using our own ln -s /var/lib/dist-git/gitolite/hooks/common/update $GITROOT/$PACKAGE.git/hooks/update # Setup our post-receive hooks mkdir -p $GITROOT/$PACKAGE.git/hooks/post-receive-chained.d ln -s /usr/share/git-core/mail-hooks/gnome-post-receive-email \ $GITROOT/$PACKAGE.git/hooks/post-receive-chained.d/post-receive-email ln -s /usr/share/git-core/post-receive-fedmsg \ $GITROOT/$PACKAGE.git/hooks/post-receive-chained.d/post-receive-fedmsg # This one kicks off all the others in post-receive-chained.d ln -s /usr/share/git-core/post-receive-chained \ $GITROOT/$PACKAGE.git/hooks/post-receive rm -rf $TMPDIR echo "Done."
pombredanne/dist-git
scripts/dist-git/git_package.sh
Shell
gpl-2.0
3,903
#!/bin/sh # script for execution of deployed applications # # Sets up the MCR environment for the current $ARCH and executes # the specified command. # exe_name=$0 exe_dir=`dirname "$0"` echo "------------------------------------------" if [ "x$1" = "x" ]; then echo Usage: echo $0 \<deployedMCRroot\> args else echo Setting up environment variables MCRROOT="$1" echo --- DYLD_LIBRARY_PATH=.:${MCRROOT}/runtime/maci64 ; DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:${MCRROOT}/bin/maci64 ; DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:${MCRROOT}/sys/os/maci64; export DYLD_LIBRARY_PATH; echo DYLD_LIBRARY_PATH is ${DYLD_LIBRARY_PATH}; shift 1 args= while [ $# -gt 0 ]; do token=$1 args="${args} \"${token}\"" shift done eval "\"${exe_dir}/ReadTreeFcn.app/Contents/MacOS/ReadTreeFcn\"" $args fi exit
cevo-public/TreePar-MatlabPlugin
Compiled/run_ReadTreeFcn.sh
Shell
gpl-2.0
840
#!/bin/bash # # Johnathon Trumble # [email protected] # March 18, 2015 # # Post-domain-extension configuration script # # CHANGELOG # 03/18/2015 - Tweaked logic to determine if AdminServer is running # 03/19/2015 - Added JOC configuration # Added logging configuration update # 03/23/2015 - Added NodeManager startup # 04/13/2015 - Moved domain edits to the create script # (should save a lot of time) # Source environment settings, exit on error [[ ! -a setScriptEnv.sh ]] && echo "[> Environment setup could not be completed. Ensure you are executing from the scripts directory, or via the fmw_deploy utility <]" && exit 2 || . ./setScriptEnv.sh [[ $? == "2" ]] && echo "[> Halting script execution <]" && exit 2 NM_START_TIMEOUT=120 SOA_CLUSTER="SOA_Cluster" SOA_DISCOVER_PORT="9998" usage(){ echo "~*~* Run as the oracle install user _after_ creating/extending the domain *~*~" echo echo " Fill in all information in the provide[[ ! -a setScriptEnv.sh ]] && echo "[> Environment setup could not be completed. Ensure you are executing from the scripts directory, or via the fmw_deploy utility <]" && exit 2 || . ./setScriptEnv.sh" } punpack(){ APP_HOME=${MSERVER_HOME/mserver\/*/mserver\/applications} [[ -e $TEMPLATE_DIR/$DOMAIN_NAME-packd.jar ]] && rm -f $TEMPLATE_DIR/$DOMAIN_NAME-packd.jar echo ">>> Packing up the domain from $DOMAIN_HOME..." $FMW_HOME/oracle_common/common/bin/pack.sh -managed=true -domain=$DOMAIN_HOME -template=$TEMPLATE_DIR/$DOMAIN_NAME-packd.jar -template_name=$DOMAIN_NAME echo ">>> Unpacking the domain to $MSERVER_HOME..." $FMW_HOME/oracle_common/common/bin/unpack.sh -domain=$MSERVER_HOME -template=$TEMPLATE_DIR/$DOMAIN_NAME-packd.jar -app_dir=$APP_HOME -overwrite_domain=true } # MAIN [[ $1 == "usage" ]] && usage && exit 1 # Get username/password for 'weblogic' user from the operator username="weblogic" #echo "Enter $username's password: " #read -s password password=$ADMIN_PW # Test if the security directory exists yet. If not, create it # It should not exist unless you've tried to start the AdminServer once if [[ ! -d $DOMAIN_HOME/servers/AdminServer/security ]] then echo "~~>> Security directory not found - generating..." mkdir -p $DOMAIN_HOME/servers/AdminServer/security fi # Populate the boot.properties file based on user input echo ">> Setting boot identity for domain startup..." cat << EOF > $DOMAIN_HOME/servers/AdminServer/security/boot.properties username=$username password=$password EOF # Edit the nodemanager properties file to allow start scripts # Without this set to true, the environment will fail to be set properly # during server start and the managed servers will start in ADMIN mode echo ">> Editing nodemanager properties..." if [[ -s $FMW_HOME/wlserver_10.3/common/nodemanager/nodemanager.properties ]]; then sed -i 's/StartScriptEnabled=false/StartScriptEnabled=true/g' $FMW_HOME/wlserver_10.3/common/nodemanager/nodemanager.properties else cat << EOF > $FMW_HOME/wlserver_10.3/common/nodemanager/nodemanager.properties #$(date) DomainsFile=$WL_HOME/common/nodemanager/nodemanager.domains LogLimit=0 PropertiesVersion=10.3 DomainsDirRemoteSharingEnabled=false javaHome=$JAVA_HOME AuthenticationEnabled=true NodeManagerHome=$WL_HOME/common/nodemanager JavaHome=$JAVA_HOME/jre LogLevel=INFO DomainsFileEnabled=true StartScriptName=startWebLogic.sh ListenAddress= NativeVersionEnabled=true ListenPort=$NM_PORT LogToStderr=true SecureListener=true LogCount=1 DomainRegistrationEnabled=false StopScriptEnabled=false QuitEnabled=false LogAppend=true StateCheckInterval=500 CrashRecoveryEnabled=false StartScriptEnabled=true LogFile=$DOMAIN_BASE/nodemanager.log LogFormatter=weblogic.nodemanager.server.LogFormatter ListenBacklog=50 EOF fi # Set USER_MEM_ARGS for JDK7 sed -i 's/.*export SUN_JAVA_HOME.*/&\n\nUSER_MEM_ARGS=\"-Xms32m -Xmx200m -XX:MaxPermSize=350m\"\nexport USER_MEM_ARGS/' $DOMAIN_HOME/bin/setDomainEnv.sh # Create logs directory [[ ! -d $LOG_DIR ]] && mkdir -p $LOG_DIR # Pack/Unpack operations echo ">> Performing a pack/unpack..." punpack # Disable hostname verification (depreciated) #cd $FMW_HOME/oracle_common/common/bin/ #./wlst.sh $RESP_DIR/disable_hostname_verification.py # Configure log locations and rotation #./wlst.sh $RESP_DIR/update_logging.py # Configure persistent transaction log location #./wlst.sh $RESP_DIR/configure_tlogs.py # Startup Admin Server ADM_PID=$(ps -ef | grep weblogic.Name=AdminServer | grep -v grep | awk '{print $2}') if [[ ! $ADM_PID ]]; then echo "[WARNING] AdminServer must be running. Starting up..." [[ -z $WLS_DOMAIN ]] && . ~/.bash_profile python ~/wls_scripts/servercontrol.py --start=admin [[ $? != "0" ]] && echo "[> Halting script execution <]" && exit 2 fi # Configure JOC using expect /usr/bin/expect << EOD set timeout -1 spawn $FMW_HOME/oracle_common/common/bin/wlst.sh expect "wls:/offline> " send "connect('weblogic','$ADMIN_PW','t3://$ADMIN_SERVER_HOST:7001')\r" expect "*serverConfig> " send "execfile('$FMW_HOME/oracle_common/bin/configure-joc.py')\r" expect "Enter Hostnames*" send "$SOA_HOSTNAMES\r" expect "Do you want to specify a cluster name*" send "y\r" expect "Enter Cluster Name*" send "$SOA_CLUSTER\r" expect "Enter Discover Port*" send "$SOA_DISCOVER_PORT\r" expect "Enter Distribute Mode*" send "true\r" expect "Do you want to exclude any server*" send "n\r" expect "wls:*> " send "exit()\r" expect eof EOD # Start NodeManager NM_PID=$(ps -ef | grep weblogic.NodeManager | grep -v grep | awk '{print $2}') if [[ ! $NM_PID ]]; then echo "Starting up NodeManager..." [[ -z $WLS_DOMAIN ]] && . ~/.bash_profile python ~/wls_scripts/servercontrol.py --start=nodemanager [[ $? != "0" ]] && echo "[> Halting script execution <]" && exit 2 fi # Perform unpack operations on all remote nodes for NODE in ${MACHINE_LIST[*]}; do if [[ $NODE == $(hostname) ]]; then echo ">> Skipping this machine, since operations have already been performed" else ssh -o StrictHostKeyChecking=no -t oracle@$NODE "[[ -d $MEDIA_BASE ]] && exit 1 || exit 0" if [[ $? == 0 ]]; then echo ">>> Media base does not exist on remote host. Ensure that these scripts are accessible via a mount point and try again" exit 2 fi echo ">> Performing remote node setup on node $NODE" ssh -o StrictHostKeyChecking=no -t oracle@$NODE "cd $MEDIA_BASE/scripts; ./post_domain_remote.sh" fi if [[ $? == 2 ]]; then echo "[FATAL] An error occurred during remote node setup. Please inspect the output, correct the error, and try again" echo ">> [NODE IN ERROR]: $NODE" exit 2 fi done # Shut down AdminServer python ~/wls_scripts/servercontrol.py --stop=admin echo "> DONE!"
trumble117/wcc_deploy
fmwsetup/scripts/10-post_domain_config.sh
Shell
gpl-2.0
6,774
rm -rf simv* csrc simv.daidir vcs \ -sverilog \ -timescale="1ns/1ns" \ FlyBehavior.sv \ FlyWithWings.sv \ FlyNoWay.sv \ QuackBehavior.sv \ Quack.sv \ MuteQuack.sv \ Squeak.sv \ Duck.sv \ MallardDuck.sv \ DecoyDuck.sv \ RubberDuck.sv \ ModelDuck.sv \ MiniDuckSimulator.sv simv +ntb_random_seed_automatic
tenthousandfailures/systemverilog-design-patterns
strategypattern/example/run_synopsys.sh
Shell
gpl-2.0
365
#!/bin/bash DOTFILES=$HOME/.dotfiles echo "creating symlinks" linkables=$( ls -1 -d **/*.symlink ) for file in $linkables ; do target="$HOME/.$( basename $file ".symlink" )" echo "creating symlink for $file" ln -s $DOTFILES/$file $target done mkdir $HOME/.config/matplotlib ln -s $DOTFILES/config/matplotlib/matplotlibrc $HOME/.config/matplotlib/matplotlibrc mkdir $HOME/.config/kitty ln -s $DOTFILES/kitty/kitty.conf $HOME/.config/kitty/kitty.conf
romain-fontugne/dotfiles
install/link.sh
Shell
gpl-2.0
465
#!/bin/bash BASE_DIR=$(dirname $0) cd $BASE_DIR CURRENT_DIR=`pwd` lnif() { if [ -e "$1" ]; then ln -sf "$1" "$2" fi } today=`date +%Y%m%d` list=( \ bashrc \ zshrc \ profile.robc \ profile.devel \ byobu \ dot_emacs \ fetchmailrc \ gitconfig \ inputrc \ indent.pro \ mailcap \ msmtprc \ muttrc \ netrc \ procmailrc \ screenrc \ tmux.conf \ ackrc \ w3m \ vmailrc \ lynx.cfg \ lynx.lss \ quiltrc \ globalrc \ ) # Download plantuml.jar wget http://sourceforge.net/projects/plantuml/files/plantuml.jar/download -O lib/plantuml.jar for i in ${list[@]}; do echo "Install $i"; lnif $CURRENT_DIR/$i $HOME/.$i; echo "Link .$i -> $CURRENT_DIR/$i"; done # Install tmuxen bin=$HOME/.bin lib=$HOME/.lib cwd=$(pwd) if [ -d $bin ]; then echo "Backup $bin to $bin.$today" mv -f $bin $bin.$today fi if [ -d $lib ]; then echo "Backup $lib to $lib.$today" mv -f $lib $lib.$today fi mkdir $bin mkdir $lib echo "Installing tmuxen to $bin" ln -sf $cwd/tmuxen $bin/tmuxen echo "Symlinking _tmux.conf to $HOME/.tmux.conf" ln -sf $cwd/_tmux.conf $HOME/.tmux.conf echo "Installing utils to $bin" for i in `ls bin`; do echo "Install $i"; ln -sf $cwd/bin/$i $bin/$i; done echo "Installing libs to $lib" for i in `ls lib`; do echo "Install $i"; ln -sf $cwd/lib/$i $lib/$i; done echo "Install Done!"
robbie-cao/xnix-config
install.sh
Shell
gpl-2.0
1,578
#!/bin/bash [ "${FNRDEBUG:-0}" -gt 1 ] && set -x # Synopsis {{{1 # The 'multi' source outputs at most $MAXREC (500) records to the tree # list widget. Each row displays its own icon, which is chosen in # alphabetical order from the icon cache. The row label is the icon # name. Activating a row brings up a demo Xdialog message showing the # numbered icon name. Handled error cases: # 1. Invalid/not found ICONCACHE directory. # 2. Empty icon cache. # Source title and error messages can be localized by installing a # suitable .mo file, findnrun-plugin-multi.mo # User may change. {{{1 MAXREC=500 # output records # Source declaration, manually copy to ~/.findnrunrc {{{1 # And add source 'multi' to variable SOURCES in ~/.findnrunrc. # Uncomment each line when copying. #SOURCE_multi='multi:multi::multi:' #TAP_multi='/usr/share/findnrun/doc/examples/multi-field-tap.sh "${term}" | findnrun-formatter --' #DRAIN_multi='show() { Xdialog --msgbox "$*" 0x0 ;} ; show' #TITLE_multi='multi-field example' # No change below. {{{1 term=$1 # search term # Trap {{{1 TMPF="/tmp/.${0##*/}.tmp.$$" trap 'rm -f "${TMPF:-/tmp/dummy}"*' HUP INT QUIT TERM ABRT 0 # i18n Localization {{{1 TEXTDOMAIN="findnrun-plugin-multi" # Load findnrun settings. {{{1 ICONCACHE= . ${HOME}/.findnrunrc # Handle errors upfront. {{{1 # But only when the input search field is "", which happens in two # cases: 1) the first time the source plugin is started, typically by # pressing F3; and 2) when the user clears the search input field. if [ -z "${term}" ]; then if ! [ -d "${ICONCACHE}" ]; then # Print error directly onto the list widget. {{{ printf "$(gettext \ "%sError: invalid setting: ICONCACHE.\n%sPlease try restarting Findnrun.")" \ '||' '||' # 2 records #}}} exit fi # Our tap search targets the filename template findnrun-*.png only set +f # enable * expansion line= printf "%s\n" "${ICONCACHE}/findnrun"-*.png >"${TMPF}" && # list icon filenames read line < "${TMPF}" # read first filename # Filename ends with '*.png' if there were no matches. if case "${line}" in *-\*.png) true ;; *) false ;; esac; then # Error: no matching filename. {{{ # i18n Please translate just the first gettext in # i18n TEXTDOMAIN=findnrun-plugin-multi. The second gettext is # i18n re-cycled from TEXTDOMAIN=findnrun by design to keep translated # i18n text in sync. # i18n Fyi, $TITLE is the value of $TITLE_multi in ~/.findnrunrc and # i18n here it's already translated, provided that a translation for # i18n $TITLE_multi exists in TEXTDOMAIN=findnrun-plugin-multi. printf "$(gettext \ "%sPlugin '%s' found no icons to display.\n%sPlease untick [%s] and tick it again.\n%sThen clear the search input field to refresh search results.")" \ '||' "${TITLE}" '||' "$(gettext -d findnrun "_Show all icons")" '||' # 3 records #}}} exit fi fi # Implement the search. {{{1 # Output tabular data {{{2 # Fill six columns: # <icon-filename> '|' <tap-reserved> '|' <label> '|' <tap-data> '|' <comment> '|' <categories> d=/dev/fd # This is one way to fill six columns worth of data. Each here-doc below # contains a column's worth of data. Sub-shells run in some here-docs to # create dynamic data. Do not indent outside of an opening '$(' and its # closing ')' or you will get spurious spaces in column data. paste -d '|' $d/4 4<<EOF4 $d/5 5<<EOF5 $d/6 6<<EOF6 $d/7 7<<EOF7 $d/8 8<<EOF8 $d/9 9<<EOF9 $( # Save icon names to ${TMPF}.4 set +f # enable * expansion # List icon filenames that match the search term. printf "%s\n" "${ICONCACHE}/findnrun-"*"$1"*.png >"${TMPF}.4" && # Format filenames. awk ' { # Exit on reaching the maximum output record count. if( ++n > '"${MAXREC}"') exit # Take basename and print to this column (EOF4). gsub(/^.*\/|\.png$/, ""); print # Strip name template prefix and print to file for column EOF6. print substr($0, 1+length("findnrun-")) > "'"${TMPF}.6"'" } ' "${TMPF}.4" # Below leave 2nd column (EOF) empty - it is tap-reserved. ) EOF4 EOF5 $( # Print labels courtesy of column EOF4. cat "${TMPF}.6" ) EOF6 $( # Source drain will process these data. # Output a numbered list of icon names for Xdialog to show on row activation. cat -n "${TMPF}.6" ) EOF7 comments row 1 comments row 2 etc. EOF8 categories row 1 categories row 2 etc. EOF9
step-/find-n-run
usr/share/findnrun/doc/examples/multi-field-tap.sh
Shell
gpl-2.0
4,407
#!/bin/bash # Iridium browser OSX signing script for outside of Mac App Store # Copyright (C) 2015 struktur AG <[email protected]> # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #======= Don't change this part unless you know what you are doing ======= if [ -z "$6" ] then echo echo "sign.sh usage: sign.sh signing_identity bundle_id bundle_version app_file_path output_name_without_.app output_dir" echo echo "Please keep in mind that bundle_version should correspond to the version you are setting in version patch!" echo echo "You can check your identities with: security -q find-identity" echo security -q find-identity echo exit 1 fi IDENTITY=$1 BUNDLE_ID=$2 VERSION=$3 INPUT_FILE_PATH=$4 OUTPUT_FILE_NAME=$5 OUTPUT_DIR=$6 echo echo "Iridium signing script" echo echo " Please make sure you have setup signing and packaging identities in script correctly" echo " You can check your identities with: security -q find-identity -p codesigning -v" echo echo " Your current settings:" echo " signing identity - $IDENTITY" echo " bundle id - $BUNDLE_ID" echo " version - $VERSION" echo echo echo "Copying app" rm -rf $OUTPUT_DIR/$OUTPUT_FILE_NAME.app cp -p -a $INPUT_FILE_PATH $OUTPUT_DIR/$OUTPUT_FILE_NAME.app echo echo "Signing" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper.app" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper EH.app" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper NP.app" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Libraries/exif.so" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Resources/app_mode_loader.app" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Helpers/crashpad_handler" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Internet Plug-Ins/nacl_irt_x86_64.nexe" codesign -s $IDENTITY "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Iridium Framework" codesign -s $IDENTITY -i "$BUNDLE_ID" "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app" # validate code signatures echo echo "Validating code signature and resources" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper.app" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper EH.app" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Helper NP.app" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Libraries/exif.so" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Resources/app_mode_loader.app" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Helpers/crashpad_handler" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Internet Plug-Ins/nacl_irt_x86_64.nexe" spctl --assess -t exec -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app/Contents/Versions/$VERSION/Iridium Framework.framework/Iridium Framework" spctl --assess -vvvv "$OUTPUT_DIR/$OUTPUT_FILE_NAME.app"
guorendong/iridium-browser-osx
code_signing/sign.sh
Shell
gpl-2.0
4,154
## setting up dotfiles set -u # zsh ln -ivs $PWD/zshrc ~/.zshrc ln -ivs $PWD/zshenv ~/.zshenv ln -ivs $PWD/git-prompt.sh ~/.git-prompt.sh ln -ivs $PWD/zprofile ~/.zprofile # emacs mkdir -vp ~/.emacs.d/ ln -ivs $PWD/init.el ~/.emacs.d/init.el test -e ~/.emacs && echo '!!!warning .emacs file exists. should be removed' # keyboard layouts mkdir -vp ~/Library/Keyboard\ Layouts cp -iv $PWD/Dvorak-JP.icns ~/Library/Keyboard\ Layouts cp -iv $PWD/Dvorak-JP.keylayout ~/Library/Keyboard\ Layouts cp -iv $PWD/Dvorak-R.icns ~/Library/Keyboard\ Layouts cp -iv $PWD/Dvorak-R.keylayout ~/Library/Keyboard\ Layouts # VSCode VSCODE_USER_PATH=~/Library/Application\ Support/Code/User mkdir -vp $VSCODE_USER_PATH ln -ivs $PWD/vscode-keybindings.json "$VSCODE_USER_PATH/keybindings.json" ln -ivs $PWD/vscode-settings.json "$VSCODE_USER_PATH/settings.json"
ryna4c2e/dotfiles
setup.sh
Shell
gpl-2.0
854
sudo defaults write /System/Library/LaunchDaemons/com.apple.coreservices.appleevents ExitTimeOut -int 5 sudo defaults write /System/Library/LaunchDaemons/com.apple.securityd ExitTimeOut -int 5 sudo defaults write /System/Library/LaunchDaemons/com.apple.mDNSResponder ExitTimeOut -int 5 sudo defaults write /System/Library/LaunchDaemons/com.apple.diskarbitrationd ExitTimeOut -int 5 sudo defaults write /System/Library/LaunchAgents/com.apple.coreservices.appleid.authentication ExitTimeOut -int 5
erikdejonge/devenv
setfastshutdown.sh
Shell
gpl-2.0
496
find_cmd_val() { if [ -n "$QUERY_STRING" ] then query1=$(echo "$QUERY_STRING" | cut -d"&" -f1) cmd=$(echo "$query1" | cut -d"=" -f1) if [ "$cmd" = "cmd" ] then echo "$(echo "$query1" | cut -d"=" -f2)" else echo "Unknown cmd" fi else echo "No query string" fi } print_footer() { echo "<p>Footer - All environment variables:<br>" for p in $(printenv); do echo "$p" "<br>"; done echo "Parsing cmd result: " "$(find_cmd_val)" "<br>" if [ ${CONTENT_LENGTH=0} -gt 0 ] then cat <&0 >../tmp/tmp.dat echo "Got content via POST and saved it in tmp/tmp.dat<br>" fi echo "<br><br><br></p>" } cat << EOF <!doctype html> <html> <head> <meta charset="utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <!-- <meta http-equiv="refresh" content="0"> --> <link href="/OL91/style/style.css" rel="stylesheet" type="text/css" /> <title>Upgrade</title> </head> <body> <header> </header> <div id="head"> <h1><img src="/OL91/img/header_logo_gradient_75x75.jpg" alt="Logo" width="75" height="75" border="0">&nbsp;OL 91</h1> </div> <div id="statusbar"> <h2>Upgrade</h2> </div> <div id="container"> <div id="navbar"> <form action="/OL91/cgi-bin/upload.sh" method="post" enctype="multipart/form-data" id="upload"> <input type="file" name="filename"> <input type="submit" value="Upgrade"> </form> </div> <div id="navbar"> <ul> <li class="backMain"><a href="/OL91/cgi-bin/main.sh">Back to main page ..</a></li> </ul> </div> </div> <footer> </footer> </body> </html> EOF
ReinhardProbst/WBM
htdocs/OL91/cgi-bin/upgrade.sh
Shell
gpl-2.0
1,879
#!/bin/sh # Copyright (C) 1999-2005 ImageMagick Studio LLC # # This program is covered by multiple licenses, which are described in # LICENSE. You should have received a copy of LICENSE with this # package; otherwise see http://www.imagemagick.org/script/license.php. . ${srcdir}/tests/common.shi ${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_bilevel.miff PNM
atmark-techno/atmark-dist
user/imagemagick/tests/rwfile_PNM_bilevel.sh
Shell
gpl-2.0
362
#!/usr/bin/env bash # Author: cbweaver (https://github.com/cbweaver) # Description: Handle output to stdout and stderr reset_all="\033[0m" reset_bold="\033[21m" bold="\033[1m" black="\033[0;30m" dark_gray="\033[1;30m" blue="\033[0;34m" light_blue="\033[1;34m" green="\033[0;32m" light_green="\033[1;32m" cyan="\033[0;36m" light_cyan="\033[1;36m" red="\033[0;31m" light_red="\033[1;31m" purple="\033[0;35m" light_purple="\033[1;35m" brown="\033[0;33m" yellow="\033[1;33m" light_gray="\033[0;37m" white="\033[1;37m" # Purpose: Format and print a given message # Arguments: # 1. message_type # 2. message function msg { if [[ $quiet = "false" ]]; then if [[ $# -eq 2 ]]; then case $1 in ERROR ) echo -e "$light_red$2$reset_all" >&2 ;; PROMPT ) echo -en "$light_cyan$2$reset_all" ;; COMMENT ) echo -e "$yellow$2$reset_all" ;; SUCCESS ) echo -e "$light_green$2$reset_all" ;; PLAIN ) echo -e "$white$1$reset_all" ;; * ) echo -e "$2" ;; esac else echo "$1" fi fi } # test all colors # Comment this section out while not testing. #message="Here's some text in a special color!" #echo -e "black: $black$message$reset_all" #echo -e "dark_gray: $dark_gray$message$reset_all" #echo -e "blue: $blue$message$reset_all" #echo -e "light_blue: $light_blue$message$reset_all" #echo -e "green: $green$message$reset_all" #echo -e "light_green: $light_green$message$reset_all" #echo -e "cyan: $cyan$message$reset_all" #echo -e "light_cyan: $light_cyan$message$reset_all" #echo -e "red: $red$message$reset_all" #echo -e "light_red: $light_red$message$reset_all" #echo -e "purple: $purple$message$reset_all" #echo -e "light_purple: $light_purple$message$reset_all" #echo -e "brown: $brown$message$reset_all" #echo -e "yellow: $yellow$message$reset_all" #echo -e "light_gray: $light_gray$message$reset_all" #echo -e "white: $white$message$reset_all"
cbweaver/cms-manager
lib/output.sh
Shell
gpl-2.0
1,984
. ./init.sh # commits every 1000 rows # sql thread stops normally (simulating offline backup job) mysql $S1 test -e "set global relay_log_purge=0" mysql $S2 test -e "set global relay_log_purge=0" mysql $S3 test -e "set global relay_log_purge=0" mysql $S4 test -e "set global relay_log_purge=0" mysql $S3 test -e "flush logs" mysql $S3 test -e "flush logs" perl insert.pl $MP $MYSQL_USER $MYSQL_PWD 2 1000 0 mysql $S2 test -e "stop slave sql_thread" mysql $S2 test -e "insert into t1 values (99950, 100, 100)" check_sql_stop $0 $S2P perl tran_insert.pl $MP $MYSQL_USER $MYSQL_PWD 1001 100000 1000 ./run_bg.sh & wait_until_manager_start $0 check_sql_stop $0 $S2P masterha_check_status --conf=$CONF > /dev/null fail_if_nonzero $0 $? masterha_stop --conf=$CONF > /dev/null check_sql_stop $0 $S2P ./kill_m.sh ./run.sh fail_if_zero $0 $? mysql $S1 test -e "insert into t1 values(10000003, 300, 'bbbaaaaaaa');" check_master $0 $S2P $MP check_count $0 $S2P 99001 ./check $0 100001 "h=127.0.0.1,P=$S1P h=127.0.0.1,P=$S3P h=127.0.0.1,P=$S4P"
cenalulu/mha4mysql-manager-dp
tests/t/t_large_data_sql_fail.sh
Shell
gpl-2.0
1,047
#!/bin/sh pwd=`pwd` contege="${pwd}/pre-built/ConTeGe.jar" contegeLibs="${pwd}/lib/scala-lib-2.11.8.jar:${pwd}/lib/asm-tree-4.0.jar:${pwd}/lib/asm-4.0.jar:${pwd}/lib/tools.jar:${pwd}/lib/testSkeleton.jar:${pwd}/lib/commons-io-2.0.1.jar:${pwd}/lib/jpf.jar:${pwd}/lib/bcel-5.2.jar" contegeOwnLibs="${pwd}/ownLibs/javaModel.jar:${pwd}/ownLibs/clinitRewriter.jar" # example: test XYSeries class from jfreechart-0.9.8 --- replace this with your own class under test bmBase="${pwd}/benchmarks/pldi2012/" bm="XYSeries" testedJar="${bmBase}/${bm}/jar/jfreechart-0.9.8_rewritten.jar" testedJarLibs="${bmBase}/${bm}/lib/jcommon-0.8.0.jar:${bmBase}/${bm}/clinit.jar" testedJarEnvTypes="${bmBase}/${bm}/env_types.txt" # ClassTester arguments: # # 0: the class under test (CUT) # 1: file with names of auxiliary types # 2: random seed # 3: max. nb of tries to generate suffixes (i.e. how long it should run) # 4: result file (only written when a thread safety violation is found) # 5: whether to reset static state before each test (only works when classes have been instrumented with ClinitRewriter) seed=3 maxSuffixGenTries=100 cmd="java -cp ${contegeLibs}:${contege}:${contegeOwnLibs}:${testedJar}:${testedJarLibs} contege.ClassTester org.jfree.data.XYSeries ${testedJarEnvTypes} ${seed} ${maxSuffixGenTries} result.out false" echo "${cmd}\n" eval ${cmd} cat result.out
michaelpradel/ConTeGe
scripts/ClassTester_pre-built.sh
Shell
gpl-2.0
1,368
#!/usr/bin/env bash cd .. java -jar video/target/video-0.0.1.jar serve video/media/sample-thor.mp4 "C:/Program Files/VideoLAN/VLC"
at15/cs433
video/serve-stream.sh
Shell
gpl-2.0
130
#!/bin/sh # -------------------------------------------------------------------------- # install script for Osx # -------------------------------------------------------------------------- # Processing Wrapper for the Oculus Rift library # http://github.com/xohm/SimpleOculusRift # -------------------------------------------------------------------------- # prog: Max Rheiner / Interaction Design / zhdk / http://iad.zhdk.ch/ # date: 06/24/2014 (m/d/y) # ---------------------------------------------------------------------------- # Change P5_Path to the folder where Processing stores the libraries # On Osx it should be in '~Documents/Processing' (Processing 1.5.1) # ---------------------------------------------------------------------------- # copy the libs/doc/examples to the processing folders P5_Path=~/Documents/Processing # check if libraries folder exists if [ ! -d $P5_Path/libraries ]; then mkdir $P5_Path/libraries fi # copie the files cp -r ./dist/all/SimpleOculusRift $P5_Path/libraries/ # remove all subversion folders cd $P5_Path/libraries/SimpleOculusRift rm -rf `find . -type d -name .svn` rm -rf `find . -type d -name .git` echo "--- installed SimpleOculusRift ---"
xohm/SimpleOculusRift
installOsx.sh
Shell
gpl-2.0
1,202
#!/bin/bash # For RHEL or Fedora # installing kubeadm # https://kubernetes.io/docs/setup/independent/install-kubeadm/ # kubeadm config print init-defaults # https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kube* EOF # Set SELinux in permissive mode (effectively disabling it) setenforce 0 sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes systemctl enable kubelet && systemctl start kubelet systemctl enable docker.service && systemctl start docker.service # turn off swap swapoff -a # comment the line with swap in /etc/fstab file or run command as below sed -i -e /swap/d /etc/fstab # flush all iptables on master and worker nodes iptable -F && iptables -X ip6tables -F && ip6tables -X systemctl stop firewalld && systemctl disable firewalld # or set the following firewall rules on master node firewall-cmd --permanent --add-port=6443/tcp firewall-cmd --permanent --add-port=2379-2380/tcp firewall-cmd --permanent --add-port=10250/tcp firewall-cmd --permanent --add-port=10251/tcp firewall-cmd --permanent --add-port=10252/tcp firewall-cmd --permanent --add-port=10255/tcp firewall-cmd --reload # set the following firewall rules on worker nodes firewall-cmd --permanent --add-port=10250/tcp firewall-cmd --permanent --add-port=10255/tcp firewall-cmd --permanent --add-port=30000-32767/tcp firewall-cmd --permanent --add-port=6783/tcp firewall-cmd --reload # Some users on RHEL/CentOS 7 have reported issues with traffic being routed # incorrectly due to iptables being bypassed. You should ensure # net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config, e.g. cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl --system
jijianwen/Learn
Linux/container/geektime/11_deploy_k8s_cluster/install_kubeadm.sh
Shell
gpl-2.0
2,155
git mv $1 archive/ cp $2 ~/sources/kinoraw_repo/ cd ~/sources/kinoraw_repo/ git add $2 git rm $1 git commit -a -m "new kinoraw_tools release" git push
kinoraw/kinoraw_tools
commit_release.sh
Shell
gpl-2.0
150
#!/bin/bash [ -z "$RTAGS_DISABLED" ] && [ -x "$(which rc)" ] && rc --silent --compile "$@" & [ -n "$RTAGS_RMAKE" ] && exit 0 compiler="$1" shift $compiler "$@" exit $?
hitmoon/emacs-conf
emacs.d/elpa/rtags-20200221.36/rtags-2.38/bin/rtags-gcc-prefix.sh
Shell
gpl-2.0
169
#!/usr/bin/env bash # Author: Jon Schipp # Nagios Exit Codes OK=0 WARNING=1 CRITICAL=2 UNKNOWN=3 usage() { cat <<EOF Nagios plug-in that checks packet rate for traffic specified with a bpf Options: -i Network interface -f <bpf> Filter in libpcap syntax -t <int> Time interval in seconds (def: 1) -w <int> Warning threshold -c <int> Critical threshold EOF } argcheck() { if [ $ARGC -lt $1 ]; then echo "Please specify an argument!, try $0 -h for more information" exit 1 fi } depend_check(){ bin=$(which tcpdump) [[ -f $bin ]] || { echo "UNKNOWN: $bin not found in ${PATH}" && exit $UNKNOWN; } [[ -d /tmp ]] && DIR=/tmp && return [[ -d /var/tmp ]] && DIR=/var/tmp && return DIR=. } check_bpf () { [ "$1" ] || { echo "No BPF specified, use \`\`-f''" && exit $UNKNOWN; } exp='\0324\0303\0262\0241\02\0\04\0\0\0\0\0\0\0\0\0\0377\0377\0\0\01\0\0\0' echo -en "$exp" | tcpdump -r - "$*" >/dev/null 2>&1 || { echo "UNKNOWN: Invalid BPF" && exit $UNKNOWN; } } get_packets() { timeout -s SIGINT $TIME tcpdump -nni $INT "$FILTER" 2>/dev/null > $BEFORE timeout -s SIGINT $TIME tcpdump -nni $INT "$FILTER" 2>/dev/null > $AFTER ! [ -f $BEFORE ] && echo "UNKNOWN: $BEFORE doesn't exist!" && exit $UNKNOWN ! [ -f $AFTER ] && echo "UNKNOWN: $AFTER doesn't exist!" && exit $UNKNOWN } get_counts() { START=$(cat $BEFORE | wc -l) STOP=$(cat $AFTER | wc -l) [[ $START -gt $STOP ]] && RESULT=$((START-STOP)) [[ $STOP -gt $START ]] && RESULT=$((STOP-START)) } traffic_calculation() { if [ $1 -gt $CRIT ]; then exit $CRITICAL elif [ $1 -gt $WARN ]; then exit $WARNING else exit $OK fi } PPS=0 BPS=0 LINERATE=0 TIME=1 WARN=0 CRIT=0 ARGC=$# BEFORE=$DIR/check_traffic1.txt AFTER=$DIR/check_traffic2.txt # Print warning and exit if less than n arguments specified argcheck 1 depend_check # option and argument handling while getopts "hi:c:f:t:w:" OPTION do case $OPTION in h) usage exit ;; i) INT=$OPTARG ;; f) FILTER="$OPTARG" ;; t) TIME=$OPTARG ;; c) CRIT=$OPTARG ;; w) WARN=$OPTARG ;; *) exit $UNKNOWN ;; esac done [ -d /sys/class/net/$INT ] || { "UNKNOWN: $INT does not exist" && exit $UNKNOWN; } [ -d /proc ] && check_bpf "$FILTER" get_packets get_counts echo "Traffic rate is ~${RESULT}/${TIME}" traffic_calculation $RESULT
jonschipp/nagios-plugins
check_traffic.sh
Shell
gpl-2.0
2,422
#!/usr/bin/env bash # # XStream Complete Test Suite, version 1 # script to run the complete collection of test suites. ########################################## # # # # # # readonly XDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) source "${XDIR}/TestFramework/TestSuite.sh" ${XDIR}; function XStreamTestSuite { TestSuite } bash ./TestFramework/TestCase.sh clean bash ./TestFramework/TestCase.sh cleanTest bash ./TestFramework/TestCase.sh setup XStreamTestSuite
ncdesouza/xstream
frontend/test/XStreamTestSuite.sh
Shell
gpl-2.0
479
#!/bin/sh set -ex # Build jcgo.jar and "auxbin" .jar files; generate Java source files of "rflg_out". # # Prerequisites: # * Oracle JDK 1.6.0 (or 1.4.2_19) # * curl ftp://ftp.gnu.org/gnu/classpath/classpath-0.93.tar.gz | tar zxf - # * mkdir -p contrib/swt # * unzip swt-3.8-win32-win32-x86.zip src.zip -d contrib/swt/swt-win32-win32-x86 # * (cd contrib/swt/swt-win32-win32-x86; unzip src.zip -d src) SWT_SRC_RELPATH=contrib/swt/swt-win32-win32-x86/src # Set current working directory to JCGO root: cd $(dirname "$0")/.. # jcgo.jar: mkdir -p .build_tmp/jtr/bin echo "Main-Class: com.ivmaisoft.jcgo.Main" > .build_tmp/jtr/MANIFEST.MF javac -d .build_tmp/jtr/bin -source 1.3 -target 1.3 jtrsrc/com/ivmaisoft/jcgo/*.java jar cfm jcgo.jar .build_tmp/jtr/MANIFEST.MF -C .build_tmp/jtr/bin com # auxbin/jre/GenRefl.jar: GENREFL_JAR=auxbin/jre/GenRefl.jar mkdir -p .build_tmp/genrefl/bin auxbin/jre echo "Main-Class: com.ivmaisoft.jcgorefl.GenRefl" > .build_tmp/genrefl/MANIFEST.MF javac -d .build_tmp/genrefl/bin -source 1.3 -target 1.3 \ reflgen/com/ivmaisoft/jcgorefl/GenRefl.java jar cfm $GENREFL_JAR .build_tmp/genrefl/MANIFEST.MF -C .build_tmp/genrefl/bin com # auxbin/jre/JPropJav.jar: JPROPJAV_JAR=auxbin/jre/JPropJav.jar mkdir -p .build_tmp/jpropjav/bin auxbin/jre echo "Main-Class: com.ivmaisoft.jpropjav.Main" > .build_tmp/jpropjav/MANIFEST.MF javac -d .build_tmp/jpropjav/bin -source 1.3 -target 1.3 \ miscsrc/jpropjav/com/ivmaisoft/jpropjav/*.java # Note: ignore warning about deprecated API usage. jar cfm $JPROPJAV_JAR .build_tmp/jpropjav/MANIFEST.MF -C .build_tmp/jpropjav/bin com # auxbin/jre/TraceJni.jar: mkdir -p .build_tmp/tracejni/bin auxbin/jre echo "Main-Class: com.ivmaisoft.jcgorefl.TraceJni" > .build_tmp/tracejni/MANIFEST.MF javac -d .build_tmp/tracejni/bin -source 1.3 -target 1.3 \ reflgen/com/ivmaisoft/jcgorefl/TraceJni.java jar cfm auxbin/jre/TraceJni.jar .build_tmp/tracejni/MANIFEST.MF -C .build_tmp/tracejni/bin com # rflg_out (reflection info generation): mkdir -p rflg_out java -Dline.separator=$'\n' -jar $GENREFL_JAR -d rflg_out reflgen/*.dat # rflg_out (convert property files to Java source): java -Dline.separator=$'\n' -jar $JPROPJAV_JAR -d rflg_out \ -sourcepath goclsp/clsp_fix/resource -sourcepath classpath-0.93/resource \ -sourcepath $SWT_SRC_RELPATH @goclsp/clsp_res/jreslist.in # Translated C code for GenRefl: mkdir -p .build_tmp/genrefl/jcgo_Out java -jar jcgo.jar -d .build_tmp/genrefl/jcgo_Out -src reflgen -src goclsp/clsp_asc \ com.ivmaisoft.jcgorefl.GenRefl @stdpaths.in # Translated C code for JPropJav: mkdir -p .build_tmp/jpropjav/jcgo_Out java -jar jcgo.jar -d .build_tmp/jpropjav/jcgo_Out -src miscsrc/jpropjav -src goclsp/clsp_asc \ com.ivmaisoft.jpropjav.Main @stdpaths.in # Translated C code for jcgo native binary: mkdir -p .build_tmp/jtr/jcgo_Out java -Xss1M -jar jcgo.jar -d .build_tmp/jtr/jcgo_Out -src jtrsrc -src goclsp/clsp_asc \ com.ivmaisoft.jcgo.Main @stdpaths.in
ivmai/JCGO
mkjcgo/build-java.sh
Shell
gpl-2.0
2,976
#!/bin/bash #Bitscout project #Copyright Kaspersky Lab . ./scripts/functions UNPACKED_INITRD=0 if [ ! -d "./build.$GLOBAL_BASEARCH/initrd" ] then statusprint "Unpacking initrd.." scripts/initrd_unpack.sh UNPACKED_INITRD=1 fi if [ $GLOBAL_TARGET = "iso" ]; then statusprint "Fixing casper find_livefs method.." if [ -f "./build.$GLOBAL_BASEARCH/initrd/main/scripts/casper" ] then if ! grep -q "${PROJECTNAME}-${GLOBAL_BUILDID}" ./build.$GLOBAL_BASEARCH/initrd/main/scripts/casper then sudo sed -i 's/^\( *\)\(mount -t ${fstype} -o ro,noatime "${devname}" $mountpoint || return 1\)/\1blkid "${devname}" | grep -q "'"${PROJECTNAME}-${GLOBAL_BUILDID}"'" || return 1\n\1\2/' ./build.$GLOBAL_BASEARCH/initrd/main/scripts/casper fi fi fi if [ $UNPACKED_INITRD -eq 1 ] then statusprint "Packing initrd.." scripts/initrd_pack.sh UNPACKED_INITRD=0 fi exit 0;
vitaly-kamluk/bitscout
scripts/initrd_findlivefs_fix.sh
Shell
gpl-2.0
892
#!/bin/bash export PATH=/data/apps/bin:$PATH cd /data/Lacuna-Server/bin perl clean_up_empires.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl sanitize_ss.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl summarize_economy.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl generate_news_feeds.pl >>/tmp/news_feeds.log 2>>/tmp/news_feeds.log perl tick_parliament.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl add_missions.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl clean_up_market.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl jackpot/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl trelvestian/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl diablotin/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl saben/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl delambert/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl cult/hourly_update.pl >>/tmp/hourly.log 2>>/tmp/hourly.log perl tick_fissures.pl >>/tmp/tick_fissures.log 2>>/tmp/tick_fissures.log perl test_weather.pl >>/tmp/test_weather.csv 2>/tmp/test_weather.log perl trelvestian/send_attack.pl >>/tmp/attack_trel.log 2>>/tmp/attack_trel.log &
Imzogelmo/Lacuna-Server-Open
bin/run_hourly.sh
Shell
gpl-2.0
1,135
# ---------------------------------------------------------------------------- # Mostra a classificação e jogos do torneio Libertadores da América. # Opções: # <número> | <fase>: Mostra jogos da fase selecionada # fases: pre ou primeira, grupos ou segunda, oitavas # -g <número>: Jogos da segunda fase do grupo selecionado # -c [número]: Mostra a classificação, nos grupos da segunda fase # -cg <número> ou -gc <número>: Classificação e jogos do grupo selecionado. # # As fases podem ser: # pré, pre, primeira ou 1, para a fase pré-libertadores # grupos, segunda ou 2, para a fase de grupos da libertadores # oitavas ou 3 # quartas ou 4 # semi, semi-final ou 5 # final ou 6 # # Nomenclatura: # PG - Pontos Ganhos # J - Jogos # V - Vitórias # E - Empates # D - Derrotas # GP - Gols Pró # GC - Gols Contra # SG - Saldo de Gols # (%) - Aproveitamento (pontos) # # Obs.: Se a opção for --atualiza, o cache usado é renovado # # Uso: zzlibertadores [ fase | -c [número] | -g <número> ] # Ex.: zzlibertadores 2 # Jogos da Fase 2 (Grupos) # zzlibertadores -g 5 # Jogos do grupo 5 da fase 2 # zzlibertadores -c # Classificação de todos os grupos # zzlibertadores -c 3 # Classificação no grupo 3 # zzlibertadores -cg 7 # Classificação e jogos do grupo 7 # # Autor: Itamar <itamarnet (a) yahoo com br> # Desde: 2013-03-17 # Versão: 15 # Licença: GPL # Requisitos: zzecho zzpad zzdatafmt # ---------------------------------------------------------------------------- zzlibertadores () { zzzz -h libertadores "$1" && return local ano=$(date +%Y) local cache=$(zztool cache libertadores) local url="http://esporte.uol.com.br/futebol/campeonatos/libertadores/jogos/" local awk_jogo=' NR % 3 ~ /^[12]$/ { if ($1 ~ /^[0-9-]{1,}$/ && $2 ~ /^[0-9-]{1,}$/) { penais[NR % 3]=$1; placar[NR % 3]=$2; $1=""; $2="" } else if ($1 ~ /^[0-9-]{1,}$/ && $2 !~ /^[0-9-]{1,}$/) { penais[NR % 3]=""; placar[NR % 3]=$1; $1="" } sub(/^ */,"");sub(/ *$/,"") time[NR % 3]=" " $0 " " } NR % 3 == 0 { if (length(penais[1])>0 && length(penais[2])>0) { placar[1] = placar[1] " ( " penais[1] placar[2] = penais[2] " ) " placar[2] } else { penais[1]="";penais[2]="" } sub(/ *$/,""); print time[1] placar[1] "|" placar[2] time[2] "|" $0 placar[1]="";placar[2]="" } ' local sed_mata=' 1d; $d /Confronto/d;/^ *$/d; s/pós[ -]jogo *//; s/^ *//; s/__*//g; s/ [A-Z][A-Z][A-Z]//; ' local time1 time2 horario linha test -n "$1" || { zztool -e uso libertadores; return 1; } # Tempo de resposta do site está elevando, usando cache para minimizar efeito test "$1" = "--atualiza" && { zztool cache rm libertadores; shift; } if ! test -s "$cache" || test $(head -n 1 "$cache") != $(zzdatafmt --iso hoje) then zzdatafmt --iso hoje > "$cache" $ZZWWWDUMP "$url" >> "$cache" fi # Mostrando os jogos # Escolhendo as fases # Fase 1 (Pré-libertadores) case "$1" in 1 | pr[eé] | primeira) sed -n '/PRIMEIRA FASE/,/FASE DE GRUPOS/p' "$cache" | sed "$sed_mata" | awk "$awk_jogo" | while read linha do time1=$( echo $linha | cut -d"|" -f 1 ) time2=$( echo $linha | cut -d"|" -f 2 ) horario=$(echo $linha | cut -d"|" -f 3 ) echo "$(zzpad -l 28 $time1) X $(zzpad -r 28 $time2) $horario" done ;; # Fase 2 (Fase de Grupos) 2 | grupos | segunda) for grupo in 1 2 3 4 5 6 7 8 do zzlibertadores -g $grupo echo done ;; 3 | oitavas) sed -n '/^OITAVAS DE FINAL/,/^ *\*/p' "$cache" | sed "$sed_mata" | sed 's/.*\([0-9]º\)/\1/' | awk "$awk_jogo" | while read linha do time1=$( echo $linha | cut -d"|" -f 1 ) time2=$( echo $linha | cut -d"|" -f 2 ) horario=$(echo $linha | cut -d"|" -f 3 ) echo "$(zzpad -l 28 $time1) X $(zzpad -r 28 $time2) $horario" done ;; 4 | quartas | 5 | semi | semi-final | 6 | final) case $1 in 4 | quartas) sed -n '/^QUARTAS DE FINAL/,/^OITAVAS DE FINAL/p' "$cache";; 5 | semi | semi-final) sed -n '/^SEMIFINAIS/,/^QUARTAS DE FINAL/p' "$cache";; 6 | final) sed -n '/^FINAL/,/^SEMIFINAIS/p' "$cache";; esac | sed "$sed_mata" | sed 's/.*Vencedor/Vencedor/' | awk "$awk_jogo" | while read linha do time1=$( echo $linha | cut -d"|" -f 1 ) time2=$( echo $linha | cut -d"|" -f 2 ) horario=$(echo $linha | cut -d"|" -f 3 ) echo "$(zzpad -l 28 $time1) X $(zzpad -r 28 $time2) $horario" done ;; esac # Escolhendo o grupo para os jogos if test "$1" = "-g" && zztool testa_numero $2 && test $2 -le 8 -a $2 -ge 1 then echo "Grupo $2" sed -n "/^ *Grupo $2/,/Grupo /p" "$cache"| sed ' 1d; /°/d; /Rodada [2-9]/d; /Classificados para as oitavas de final/,$d ' | sed "$sed_mata" | awk "$awk_jogo" | sed 's/\(h[0-9][0-9]\).*$/\1/' | while read linha do time1=$( echo $linha | cut -d"|" -f 1 ) time2=$( echo $linha | cut -d"|" -f 2 ) horario=$(echo $linha | cut -d"|" -f 3 ) echo "$(zzpad -l 28 $time1) X $(zzpad -r 28 $time2) $horario" done fi # Mostrando a classificação (Fase de grupos) if test "$1" = "-c" -o "$1" = "-cg" -o "$1" = "-gc" then if zztool testa_numero $2 && test $2 -le 8 -a $2 -ge 1 then grupo="$2" sed -n "/^ *Grupo $2/,/Rodada 1/p" "$cache" | sed -n '/PG/p;/°/p' | sed 's/ LDU / ldu /g'| sed 's/[^-][A-Z][A-Z][A-Z] //;s/ [A-Z][A-Z][A-Z]//' | sed 's/ ldu / LDU /g'| awk -v cor_awk="$ZZCOR" '{ if (NF < 10) { print } if (NF == 10) { printf "%-28s", $1 for (i=2;i<=10;i++) { printf " %3s", $i } print "" } if (NF > 10) { if (cor_awk==1 && ($1 == "1°" || $1 == "2°")) { printf "\033[42;30m" } time="" for (i=1;i<NF-8;i++) { time=time " " $i } printf "%-28s", time for (i=NF-8;i<=NF;i++) { printf " %3s", $i } if (cor_awk==1) { printf "\033[m\n" } else {print ""} } }' test "$1" = "-cg" -o "$1" = "-gc" && { echo; zzlibertadores -g $2 | sed '1d'; } else for grupo in 1 2 3 4 5 6 7 8 do zzlibertadores -c $grupo -n test "$1" = "-cg" -o "$1" = "-gc" && { echo; zzlibertadores -g $grupo | sed '1d'; } echo done fi if test $ZZCOR -eq 1 then test "$3" != "-n" && { echo ""; zzecho -f verde -l preto " Oitavas de Final "; } fi fi }
gmgall/funcoeszz
zz/zzlibertadores.sh
Shell
gpl-2.0
6,275
#!/bin/bash # usage: # video.is.AV1.sh FILE='xyz.mkv' # THIS ALLOWS INJECTING VARS into the local namespace # might not be very secure, be careful how you declare & check variables for ARGUMENT in "$@"; do KEY=$(echo $ARGUMENT | cut -f1 -d=) VALUE=$(echo $ARGUMENT | cut -f2- -d=) declare $KEY="$VALUE" done : ${FILE:=""}; MATCHED=`ffprobe "$FILE" 2>&1 | egrep 'Stream.+Video: av1 ' | tr '\n' ' '`; if [ -z "$MATCHED" ]; then exit 1; fi; exit 0;
quamis/scripts-sh
video/video.is.AV1.sh
Shell
gpl-2.0
466
#!/bin/sh # # Copyright (C) 2010 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This shell script is used to rebuild the prebuilt STLport binaries from # their sources. It requires a working NDK installation. # # include common function and variable definitions . `dirname $0`/prebuilt-common.sh . `dirname $0`/builder-funcs.sh PROGRAM_PARAMETERS="" PROGRAM_DESCRIPTION=\ "Rebuild the prebuilt STLport binaries for the Android NDK. This script is called when packaging a new NDK release. It will simply rebuild the STLport static and shared libraries from sources. This requires a temporary NDK installation containing platforms and toolchain binaries for all target architectures. By default, this will try with the current NDK directory, unless you use the --ndk-dir=<path> option. The output will be placed in appropriate sub-directories of <ndk>/$STLPORT_SUBDIR, but you can override this with the --out-dir=<path> option. " PACKAGE_DIR= register_var_option "--package-dir=<path>" PACKAGE_DIR "Put prebuilt tarballs into <path>." NDK_DIR= register_var_option "--ndk-dir=<path>" NDK_DIR "Specify NDK root path for the build." BUILD_DIR= OPTION_BUILD_DIR= register_var_option "--build-dir=<path>" OPTION_BUILD_DIR "Specify temporary build dir." OUT_DIR= register_var_option "--out-dir=<path>" OUT_DIR "Specify output directory directly." ABIS="$PREBUILT_ABIS" register_var_option "--abis=<list>" ABIS "Specify list of target ABIs." NO_MAKEFILE= register_var_option "--no-makefile" NO_MAKEFILE "Do not use makefile to speed-up build" register_jobs_option extract_parameters "$@" ABIS=$(commas_to_spaces $ABIS) # Handle NDK_DIR if [ -z "$NDK_DIR" ] ; then NDK_DIR=$ANDROID_NDK_ROOT log "Auto-config: --ndk-dir=$NDK_DIR" else if [ ! -d "$NDK_DIR" ] ; then echo "ERROR: NDK directory does not exists: $NDK_DIR" exit 1 fi fi if [ -z "$OPTION_BUILD_DIR" ]; then BUILD_DIR=$NDK_TMPDIR/build-stlport else BUILD_DIR=$OPTION_BUILD_DIR fi mkdir -p "$BUILD_DIR" fail_panic "Could not create build directory: $BUILD_DIR" GABIXX_SRCDIR=$ANDROID_NDK_ROOT/$GABIXX_SUBDIR GABIXX_CFLAGS="-fPIC -O2 -DANDROID -D__ANDROID__ -I$GABIXX_SRCDIR/include" GABIXX_CXXFLAGS="-fno-exceptions -frtti" GABIXX_SOURCES=$(cd $ANDROID_NDK_ROOT/$GABIXX_SUBDIR && ls src/*.cc) GABIXX_LDFLAGS= # Location of the STLPort source tree STLPORT_SRCDIR=$ANDROID_NDK_ROOT/$STLPORT_SUBDIR STLPORT_CFLAGS="-DGNU_SOURCE -fPIC -O2 -I$STLPORT_SRCDIR/stlport -DANDROID -D__ANDROID__" STLPORT_CFLAGS=$STLPORT_CFLAGS" -I$ANDROID_NDK_ROOT/$GABIXX_SUBDIR/include" STLPORT_CXXFLAGS="-fuse-cxa-atexit -fno-exceptions -frtti" STLPORT_SOURCES=\ "src/dll_main.cpp \ src/fstream.cpp \ src/strstream.cpp \ src/sstream.cpp \ src/ios.cpp \ src/stdio_streambuf.cpp \ src/istream.cpp \ src/ostream.cpp \ src/iostream.cpp \ src/codecvt.cpp \ src/collate.cpp \ src/ctype.cpp \ src/monetary.cpp \ src/num_get.cpp \ src/num_put.cpp \ src/num_get_float.cpp \ src/num_put_float.cpp \ src/numpunct.cpp \ src/time_facets.cpp \ src/messages.cpp \ src/locale.cpp \ src/locale_impl.cpp \ src/locale_catalog.cpp \ src/facets_byname.cpp \ src/complex.cpp \ src/complex_io.cpp \ src/complex_trig.cpp \ src/string.cpp \ src/bitset.cpp \ src/allocators.cpp \ src/c_locale.c \ src/cxa.c" # If the --no-makefile flag is not used, we're going to put all build # commands in a temporary Makefile that we will be able to invoke with # -j$NUM_JOBS to build stuff in parallel. # if [ -z "$NO_MAKEFILE" ]; then MAKEFILE=$BUILD_DIR/Makefile else MAKEFILE= fi build_stlport_libs_for_abi () { local ARCH BINPREFIX SYSROOT local ABI=$1 local BUILDDIR="$2" local DSTDIR="$3" local SRC OBJ OBJECTS CFLAGS CXXFLAGS mkdir -p "$BUILDDIR" # If the output directory is not specified, use default location if [ -z "$DSTDIR" ]; then DSTDIR=$NDK_DIR/$STLPORT_SUBDIR/libs/$ABI fi mkdir -p "$DSTDIR" builder_begin_android $ABI "$BUILDDIR" "$MAKEFILE" builder_set_dstdir "$DSTDIR" builder_set_srcdir "$GABIXX_SRCDIR" builder_cflags "$GABIXX_CFLAGS" builder_cxxflags "$GABIXX_CXXFLAGS" builder_ldflags "$GABIXX_LDFLAGS" builder_sources $GABIXX_SOURCES builder_set_srcdir "$STLPORT_SRCDIR" builder_reset_cflags builder_cflags "$STLPORT_CFLAGS" builder_reset_cxxflags builder_cxxflags "$STLPORT_CXXFLAGS" builder_sources $STLPORT_SOURCES log "Building $DSTDIR/libstlport_static.a" builder_static_library libstlport_static log "Building $DSTDIR/libstlport_shared.so" builder_shared_library libstlport_shared builder_end } for ABI in $ABIS; do build_stlport_libs_for_abi $ABI "$BUILD_DIR/$ABI" done # If needed, package files into tarballs if [ -n "$PACKAGE_DIR" ] ; then for ABI in $ABIS; do FILES="" for LIB in libstlport_static.a libstlport_shared.so; do FILES="$FILES $STLPORT_SUBDIR/libs/$ABI/$LIB" done PACKAGE="$PACKAGE_DIR/stlport-libs-$ABI.tar.bz2" log "Packaging: $PACKAGE" pack_archive "$PACKAGE" "$NDK_DIR" "$FILES" fail_panic "Could not package $ABI STLport binaries!" dump "Packaging: $PACKAGE" done fi if [ -z "$OPTION_BUILD_DIR" ]; then log "Cleaning up..." rm -rf $BUILD_DIR else log "Don't forget to cleanup: $BUILD_DIR" fi log "Done!"
rex-xxx/mt6572_x201
ndk/build/tools/build-stlport.sh
Shell
gpl-2.0
5,885
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2009-2020 Mike Shal <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # Similar to t6003, only we forget to update the shell script. Try to remove an # existing target and see that the shell script runs again. . ./tup.sh cat > Tupfile << HERE : |> sh ok.sh |> a b HERE cat > ok.sh << HERE touch a touch b HERE tup touch ok.sh Tupfile update check_exist a b cat > Tupfile << HERE : |> sh ok.sh |> a HERE tup touch Tupfile update_fail cat > ok.sh << HERE touch a HERE tup touch ok.sh update check_exist a check_not_exist b eotup
ppannuto/tup
test/t6025-change-multi-output3.sh
Shell
gpl-2.0
1,214
#/bin/sh # file: simulate_mti.sh # # (c) Copyright 2008 - 2011 Xilinx, Inc. All rights reserved. # # This file contains confidential and proprietary information # of Xilinx, Inc. and is protected under U.S. and # international copyright and other intellectual property # laws. # # DISCLAIMER # This disclaimer is not a license and does not grant any # rights to the materials distributed herewith. Except as # otherwise provided in a valid license issued to you by # Xilinx, and to the maximum extent permitted by applicable # law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND # WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES # AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING # BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- # INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and # (2) Xilinx shall not be liable (whether in contract or tort, # including negligence, or under any other theory of # liability) for any loss or damage of any kind or nature # related to, arising under or in connection with these # materials, including for any direct, or any indirect, # special, incidental, or consequential loss or damage # (including loss of data, profits, goodwill, or any type of # loss or damage suffered as a result of any action brought # by a third party) even if such damage or loss was # reasonably foreseeable or Xilinx had been advised of the # possibility of the same. # # CRITICAL APPLICATIONS # Xilinx products are not designed or intended to be fail- # safe, or for use in any application requiring fail-safe # performance, such as life-support or safety devices or # systems, Class III medical devices, nuclear facilities, # applications related to the deployment of airbags, or any # other applications that could lead to death, personal # injury, or severe property or environmental damage # (individually and collectively, "Critical # Applications"). Customer assumes the sole risk and # liability of any use of Xilinx products in Critical # Applications, subject only to applicable laws and # regulations governing limitations on product liability. # # THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS # PART OF THIS FILE AT ALL TIMES. # # set up the working directory set work work vlib work # compile all of the files vlog -work work $XILINX/verilog/src/glbl.v vlog -work work ../../implement/results/routed.v vlog -work work sdram_clk_gen_tb.v # run the simulation vsim -c -t ps +transport_int_delays -voptargs="+acc" -L secureip -L simprims_ver -sdfmax sdram_clk_gen_tb/dut=../../implement/results/routed.sdf +no_notifier work.sdram_clk_gen_tb work.glbl
orbancedric/DeepGate
other/Mojo Projects/Mojo-SDRAM/ipcore_dir/sdram_clk_gen/simulation/timing/simulate_mti.sh
Shell
gpl-3.0
2,635
#desc:Weekday date +%w
opalenzuela/opendomo-automation
usr/local/opendomo/bin/weekday.sh
Shell
gpl-3.0
23
#!/bin/bash sudo update-alternatives --remove-all ControlPanel sudo update-alternatives --remove \ itweb-settings \ /usr/lib/jvm/jdk-7-oracle-x64/jre/bin/ControlPanel sudo update-alternatives --install \ /usr/bin/itweb-settings \ itweb-settings \ /usr/lib/jvm/jdk-7-oracle-x64/jre/bin/ControlPanel \ 317 sudo rm -f /usr/bin/ControlPanel sudo ln -s -f itweb-settings /usr/bin/ControlPanel exit 0
jmanoel7/my_shell_scripts
bin/configure-ControlPanel-debian.sh
Shell
gpl-3.0
407
#!/bin/sh export GR_DONT_LOAD_PREFS=1 export srcdir=/home/katsikas/gnuradio/gnuradio-core/src/python/gnuradio/gr export PATH=/home/katsikas/gnuradio/build/gnuradio-core/src/python/gnuradio/gr:$PATH export LD_LIBRARY_PATH=/home/katsikas/gnuradio/build/volk/lib:/home/katsikas/gnuradio/build/gruel/src/lib:/home/katsikas/gnuradio/build/gnuradio-core/src/lib:$LD_LIBRARY_PATH export PYTHONPATH=/home/katsikas/gnuradio/build/gnuradio-core/src/python:/home/katsikas/gnuradio/build/gnuradio-core/src/lib/swig:$PYTHONPATH /usr/bin/python -B /home/katsikas/gnuradio/gnuradio-core/src/python/gnuradio/gr/qa_frequency_modulator.py
katsikas/gnuradio
build/gnuradio-core/src/python/gnuradio/gr/qa_frequency_modulator_test.sh
Shell
gpl-3.0
622
# Updates for the 1.67.0 Release PHP=${1} echo "wp --allow-root plugin activate classic-editor" wp --allow-root plugin activate classic-editor wp --allow-root core update-db
proudcity/wp-proud-composer
updates/release-1.67.0.sh
Shell
gpl-3.0
175
#!/bin/bash DESTDIR_SQ=/usr bindir_SQ=/bin perfexec_instdir_SQ=/libexec OUTPUT=./ INSTALL=install $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
gatieme/EnergyEfficient
script/script_result/install_perf/install_perf.sh
Shell
gpl-3.0
1,277
#!/bin/bash # Network Interface Wizard # CS 126, Lab 8 # Thurman Gillespy, 2/11/17 # get some defaults NUMETH=$(ip a | grep eth[0-9]: | wc -l) ETHLIST=$(ip a | grep eth[0-9]: | awk '{print $2}' | sed 's/://') DEFNAMESERVER=$(cat /etc/resolv.conf | grep name | awk '{print $2}') #might be more than 1, so pick the first for n in $DEFNAMESERVER do DEFNAMESERVER=$n break; # only do this once done DEFGATEWAY=$(ip route | grep default | awk '{print $3}') NULL=0.0.0.0 ETH=eth0 IP=$NULL IPADDR=$NULL DEFMASK=$NULL NETMASK=$NULL NAMESERVER=$NULL GATEWAY=$NULL splash() { clear echo "---------------------------------------" echo "| |" echo "| Network Interface Wizard |" echo "| |" echo "---------------------------------------" echo " Interface: $ETH" echo " IP address: $IPADDR" echo " Network mask: $NETMASK" echo " Name server: $NAMESERVER" echo " Default gateway: $GATEWAY" echo "---------------------------------------" } interface() { n=1 echo "There are $NUMETH interfaces." for e in $ETHLIST do echo -e "\t$n: $e" let n=n+1 done read -p "Select the interface to edit: " IN if [[ -z $IN ]] then IN=1 fi # if more than one char, just grab the first IN=$(echo $IN | cut -c1) # is it a number NUM=$(echo $IN | grep [0-9] | wc -l ) if [[ $NUM = 0 ]] then IN=1 fi # make sure we in range for number of interfaces if [[ "$IN" -gt "$NUMETH" ]] then IN=$NUMETH elif [[ $IN = 0 ]] then IN=1 fi let IN=$IN-1 ETH="eth$IN" splash } validateIP() { PARAM=$1 # handle <CR> without any data if [[ -z $1 ]] then PARAM="<nothing entered>" fi ipcalc -s -c $PARAM if [[ $? = 0 ]] then return 0 else echo "$PARAM: invalid IP address." return 1 fi } ipaddr() { RESULT=1 while [[ $RESULT -eq 1 ]] do read -p "Enter IP address: " IP splash validateIP $IP RESULT=$? done IPADDR=$IP splash } netmask() { DEFMASK=$(ipcalc -m $IPADDR | sed 's/NETMASK=//') RESULT=1 while [[ $RESULT -eq 1 ]] do echo "Default netmask: $DEFMASK" read -p "Enter netmask. Press <return> for default: " IP splash if [[ -z "$IP" ]] then IP=$DEFMASK fi validateIP $IP RESULT=$? done NETMASK=$IP splash } nameserver() { RESULT=1 while [[ $RESULT -eq 1 ]] do echo -e "Current nameserver: $DEFNAMESERVER" read -p "Enter nameserver address. Press <enter> for default: " IP splash if [[ -z "$IP" ]] then IP=$DEFNAMESERVER fi validateIP $IP RESULT=$? done NAMESERVER=$IP splash } gateway() { RESULT=1 while [[ $RESULT -eq 1 ]] do echo "Default gateway: $DEFGATEWAY" read -p "Enter gateway address. Press <enter> for default: " IP splash if [[ -z "$IP" ]] then IP=$DEFGATEWAY fi validateIP $IP RESULT=$? done GATEWAY=$IP splash } decision() { EXIT=no while [[ "$EXIT" = "no" ]] do splash echo " 1. Save configuration" echo " 2. Exit without save" echo " 3. Edit interface" echo " 4. Edit IP address" echo " 5. Edit netmask" echo " 6. Edit name server" echo " 7. Edit default gateway" read -p " Select an option: " CHOICE case $CHOICE in "1"|"S"|"s") setlive setconfig splash echo "Configuration saved." EXIT=yes ;; "2"|"x"|"X"|"q"|"Q") echo "Configuration not saved." EXIT=yes ;; "3") splash interface ;; "4") splash ipaddr ;; "5") splash netmask ;; "6") splash nameserver ;; "7") splash gateway ;; * ) read -p "Invalid input. Press any key to continue." IN splash ;; esac done } setlive() { IPFULL=$IPADDR/$NETMASK # for lab, just edit the eth1 device ETH="eth1" ip addr add $IPFULL dev $ETH # don't need to update nameserver if accepted default if [[ $NAMESERVER -ne $DEFNAMESERVER ]] then echo "nameserver $NAMESERVER" >> /etc/resolv.conf fi } setconfig() { OUT="/tmp/ifcfg-$ETH" echo "#generated by netwiz" > $OUT echo "TYPE=Ethernet" >> $OUT echo "BOOTPROTO=static" >> $OUT echo "IPADDR=$IPADDR" >> $OUT echo "NETMASK=$NETMASK" >> $OUT echo "DEFROUTE=yes" >> $OUT echo "PEERDNS=yes" >> $OUT echo "DNS1=$NAMESERVER" >> $OUT echo "GATEWAY=$GATEWAY" >> $OUT echo "PEERROUTES=yes" >> $OUT echo "IPV4_FAILURE_FATAL=no" >> $OUT echo "IPV6INIT=yes" >> $OUT echo "IPV6_AUTOCONF=yes" >> $OUT echo "IPV6_DEFROUTE=yes" >> $OUT echo "IPV6_PEERDNS=yes" >> $OUT echo "IPV6_PEERROUTES=yes" >> $OUT echo "IPV6_FAILURE_FATAL=no" >> $OUT echo "NAME=$ETH" >> $OUT echo "DEVICE=$ETH" >> $OUT echo "ONBOOT=yes" >> $OUT echo "NM_CONTROLLED=no" >> $OUT #not for our lab #mv $OUT > /etc/sysconfig/network-scripts/ } splash interface ipaddr netmask nameserver gateway decision #debug #ip addr show dev eth1 #cat $OUT exit 0
tgill880/CS126
bin/netwiz.sh
Shell
gpl-3.0
4,865
#!/bin/bash python AnalyzeSimulation.py --paralog1 YBR191W --paralog2 YPL079W --simnum 20 > YBR191W_YPL079W_MG94_nonclock_Sim20_PrintScreen.txt
xjw1001001/IGCexpansion
Simulation/ShFiles/MG94_YBR191W_YPL079W_sim20.sh
Shell
gpl-3.0
145
#!/bin/sh #production binary for bootloader #dfu-util -a 0 --dfuse-address 0x08010000 -D /tmp/ArduCopter.build/revomini_Revolution.bin # bare metal binary #dfu-util -a 0 --dfuse-address 0x08000000:unprotect:force -D /tmp/ArduCopter.build/revomini_Revolution.bin dfu-util -a 0 --dfuse-address 0x08000000:leave -D ../../../../../ArduPlane/revomini_Revolution.bin -R
dgrat/ardupilot
libraries/AP_HAL_F4Light/boards/revomini_Revolution/support/UPLOAD-DFU-plane.sh
Shell
gpl-3.0
367
host='https://cgit.freedesktop.org/' for folder in `curl -s $host/libreoffice/dictionaries/tree/ | grep 'ls-dir' | cut -d "'" -f 6`; do for file in `curl -s $host$folder | grep 'ls-blob' | grep 'hyph_.*\.dic' | cut -d "'" -f 6`; do wget -N `echo $host$file | sed 's/tree/plain/'` done done
kaysinds/PoopDog
dicts/en/pyphen/dictionaries/update.sh
Shell
gpl-3.0
307
#!/bin/sh DIR="$( cd "$( dirname "$0" )" && pwd )" mkdir /usr/src/acpi_call-0.0.1 cp $DIR/dkms.conf $DIR/src/* /usr/src/acpi_call-0.0.1 dkms add -m acpi_call -v 0.0.1 dkms build -m acpi_call -v 0.0.1 dkms install -m acpi_call -v 0.0.1
z0rc/debumblebee
install-files/contrib/acpi_call/install.sh
Shell
gpl-3.0
237
######################################### # GNU GPL License v3 # # Author: Amir Yahyavi # ######################################### #!/bin/bash source deploynodes.sh startnodes
yahyavi/solrcloud
scripts/startnodes.sh
Shell
gpl-3.0
214
#/bin/bash echo rm -rf SXI/flatten rm -rf SXI/flatten echo mkdir SXI/flatten mkdir SXI/flatten echo cd SXI cd SXI echo ../../flatten-hierarchy flatten DriverBoard.*.sch J?.sch TCE.sch time ../../flatten-hierarchy flatten DriverBoard.*.sch J?.sch TCE.sch
xcthulhu/lambda-geda
tests/flatten_SXI.sh
Shell
gpl-3.0
255
#!/usr/bin/env bash for file in $(find . -name "updatefilelist.txt"); do filename=${file##*/} # Learn filename in string path, filename=$(basename $file) path=${file%$filename} # Learn up directory updirname=$(dirname $file) variable="filelist.txt" mv $file ${path}${variable} done
pleycpl/scripts-shell
change_filename_in_directory.sh
Shell
gpl-3.0
313
#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" node $DIR/../scripts/get_serialnumber.js serialnumber=$(<$DIR/../conf/serialnumber) echo $serialnumber docker rm -f ehealth_platform docker run --name ehealth_platform --net=host -it --privileged -d -p 127.0.1.1:3000:3000 -v /dev/ttyACM0:/dev/ttyACM0 -e "ARDUINO_SERIAL_NUMBER=$serialnumber" ehealth-platform
mibzzz/ehealth-platform
bin/run_app.sh
Shell
gpl-3.0
379
#! /bin/sh g++ simplest_ffmpeg_video_filter.cpp -g -o simplest_ffmpeg_video_filter.exe \ -I /usr/local/include -L /usr/local/lib \ -lmingw32 -lSDLmain -lSDL -lavformat -lavcodec -lavutil -lavfilter -lswscale
YuxuanLing/trunk
trunk/code/study/ffmpeg/simplest_ffmpeg_video_filter/simplest_ffmpeg_video_filter/compile_mingw.sh
Shell
gpl-3.0
212
sudo avrdude -B5 -p m328p -c usbasp -P usb -v -U lfuse:w:0xFF:m -U hfuse:w:0xDE:m -U efuse:w:0xFD:m
jsoloPDX/Inductive-Proximity-MIDI-Controller-PSU-Capstone-Project-11
T11-firmware/fuses.sh
Shell
gpl-3.0
100
#!/bin/bash # ffmpegupreverse # a script to reverse the installation ONLY of x264 and ffmpeg by my ffmpegup and / or newinstall scripts # taken from the excellet tutorial found here: # http://ubuntuforums.org/showthread.php?t=786095&highlight=ffmpeg+x264+latest # all props to fakeoutdoorsman, not me # check http://code.google.com/p/x264-ffmpeg-up-to-date/ for updates # ver 1.1 by rupert plumridge # 6th November 2010 # added support for Ubuntu Maverick ###################################### # ver 1.0BETA by rupert plumridge # 16th April 2010 # first version released # this is a BETA script, so it may not work as expected and may destroy the world, including your computer..use at your own risk. # THIS SCRIPT DELETES THE SOURCE FILES AND FOLDERS FROM YOUR COMPUTER, BE SURE YOU ARE HAPPY WITH THIS #User Editable Variables # please edit the following variable if you haven't installed ffmpeg and x264 via my other script and the source for each app wasn't installed in the location below INSTALL="/usr/local/src" # location of log file LOG=/var/log/ffmpegupreverse.log # location of the script's lock file LOCK="/var/run/ffmpegupreverse.pid" ######################################## # do not edit anything beyond this line, unless you know what you are doing ######################################## # first some error checking set -o nounset set -o errexit set -o pipefail ############### # list of all the functions in the script ############### #maverick uninstall maverick () { apt-get remove x264 ffmpeg rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #lucid uninstall lucid () { apt-get remove x264 ffmpeg rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #karmic uninstall karmic () { apt-get remove x264 ffmpeg libtheora rm $INSTALL/libtheora-1.1.1.tar.gz >> $LOG rm -rf $INSTALL/libtheora-1.1.1 >> $LOG rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #jaunty uninstall jaunty () { apt-get remove x264 ffmpeg libopencore-amr libtheora rm $INSTALL/libtheora-1.1.1.tar.gz >> $LOG rm -rf $INSTALL/libtheora-1.1.1 >> $LOG rm $INSTALL/opencore-amr-0.1.2.tar.gz >> $LOG rm -rf $INSTALL/opencore-amr-0.1.2 >> $LOG rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #intrepid uninstall intrepid () { apt-get remove x264 ffmpeg libopencore-amr libtheora rm $INSTALL/libtheora-1.1.1.tar.gz >> $LOG rm -rf $INSTALL/libtheora-1.1.1 >> $LOG rm $INSTALL/opencore-amr-0.1.2.tar.gz >> $LOG rm -rf $INSTALL/opencore-amr-0.1.2 >> $LOG rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #hardy uninstall hardy () { apt-get remove x264 ffmpeg libopencore-amr libtheora yasm rm $INSTALL/yasm-1.0.0.tar.gz >> $LOG rm -rf $INSTALL/yasm-1.0.0 >> $LOG rm $INSTALL/libtheora-1.1.1.tar.gz >> $LOG rm -rf $INSTALL/libtheora-1.1.1 >> $LOG rm $INSTALL/opencore-amr-0.1.2.tar.gz >> $LOG rm -rf $INSTALL/opencore-amr-0.1.2 >> $LOG rm -rf $INSTALL/x264 >> $LOG rm -rf $INSTALL/ffmpeg >> $LOG } #exit function die () { echo $@ exit 1 } #error function error () { echo $1 echo $@ exit 1 } #this script must be run as root, so lets check that if [ "$(id -u)" != "0" ]; then echo "Exiting. This script must be run as root" 1>&2 exit 1 fi #first, lets warn the user use of this script requires some common sense and may mess things up echo "WARNING, this script removes the packages built by my ffmpegup.sh." echo echo "WARNING, this may invovle the removal of packages you may have had previously on your system." echo echo "Only proceed if you want to COMPLETELY remove ALL these packages." read -p "Continue (y/n)?" [ "$REPLY" == y ] || die "Exiting, nothing has been removed or undone." echo #next, lets find out what version of Ubuntu we are running and check it DISTRO=( $(cat /etc/lsb-release | grep CODE | cut -c 18-) ) OKDISTRO="hardy intrepid jaunty karmic lucid maverick" if [[ ! $(grep $DISTRO <<< $OKDISTRO) ]]; then die "Exiting. Your distro is not supported, sorry."; fi read -p "You are running Ubuntu $DISTRO, is this correct (y/n)?" [ "$REPLY" == y ] || die "Sorry, I think you are using a different distro, exiting to be safe." echo # check that the default place to download to and log file location is ok echo "Is this the location you chose when you ran ffmpegup.sh or fffmpegin.sh?:" read -p ""$INSTALL" (y/n)?" [ "$REPLY" == y ] || die "Exiting. Please edit the script changing the INSTALL variable to the location of your choice." echo echo "This script logs to:" echo "$LOG" read -p "Is this ok (y/n)?" [ "$REPLY" == y ] || die "Exiting. Please edit the script changing the LOG variable to the location of your choice." echo # ok, already, last check before proceeding echo "OK, we are ready to rumble." read -p "Shall I proceed, remember, this musn't be stopped (y/n)?" [ "$REPLY" == y ] || die "Exiting. Bye, did I come on too strong?." echo echo "Lets roll!" echo "script started" > $LOG echo "uninstalling everything" echo "uninstalling everything" >> $LOG $DISTRO || error "Sorry something went wrong, please check the $LOG file." echo "That's it, all done." echo "Exiting now, bye. Sorry you didn't like my other scripts :( " exit
kent1D/mediaspip_scripts
exemples/ffmpegupreversev1.1.sh
Shell
gpl-3.0
5,146
#!/bin/bash set -e box_name="${BOX_NAME:=localhost}" os_major_release="${OS_MAJOR_RELEASE:=7}" os_minor_release="${OS_MINOR_RELEASE:=3}" salt_short_release="${SALT_SHORT_RELEASE:=2016.11}" salt_master="${SALT_MASTER:=10.0.2.2}" enable_mysql_support="${ENABLE_MYSQL_SUPPORT:=0}" ensure_minion_starts_on_boot() { #systemctl is-enabled salt-minion.service rl=`runlevel | cut -f2 -d' '` chkconfig salt-minion --level ${rl} if [ "$?" == 0 ]; then echo "salt-minion already in runlevel ${rl}" else #chkconfig salt-minion on systemctl enable salt-minion.service fi } ensure_dependencies() { if [ "${enable_mysql_support}" -gt 0 ]; then salt-call --local state.sls installers/bootstrap_managed_host/db fi #yum install -y salt-minion #yum install -y swig m2crypto # used by salt recipes? } ensure_salt_minion_minimal_roles() { roles='as-development,org_as-m,deploy,rbenv' if [ "${enable_mysql_support}" -gt 0 ]; then roles="${roles},db,mariadb" fi salt-call --local grains.setval roles ${roles} } bootstrap_salt_minion() { sed -i -e "s/^#*\s*master:.*$/master: ${salt_master}/" /etc/salt/minion sed -i -e "s/^.*id:.*$/id: ${box_name}/" /etc/salt/minion } ensure_minion_id() { mkdir -p /etc/salt echo "${box_name}" > /etc/salt/minion_id } bootstrap_salt_minion #ensure_minion_starts_on_boot ensure_salt_minion_minimal_roles ensure_dependencies ensure_minion_id
belt/workflows
packer/scripts/setup_salt_minion.sh
Shell
gpl-3.0
1,415
#!/bin/bash # Copyright (c) 2010-2014, Brian Case # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # the code to start multiple jobs came from opengdp under the above licence # ps if anybody wants to complain, i wrote it all, its my code i will mix it if i want ;-) ## @param indir dir the original file(s) will be placed in ## @param outdir dir to place the output in name="gfs" dsname="$name" datadir="/bigdata/coldwave" indir="/bigdata/coldwave/source" outdir="/bigdata/coldwave/done" baseurl="ftp://nomads.ncdc.noaa.gov/GFS/Grid4" tmp="/tmp" limit=14 start=20141020 end=20141131 main() { mkdir -p "${indir}/${dsname}" mkdir -p "${outdir}/${dsname}" getlist "$start" "$end" > "${indir}/${dsname}/mirrorfile" mainloop "${indir}/${dsname}/mirrorfile" } getlist() { local start="$1" local end="$2" local yyyymm local run local hour local yyyymmdd="$start" while [ "$yyyymmdd" -le "$end" ] do yyyymmdd=$(date -d "$yyyymmdd + 1 days" "+%Y%m%d") yyyymm=$(date -d "$yyyymmdd" "+%Y%m") echo "mkdir file:${indir}/${yyyymmdd}" echo "chmod 755 file:${indir}/${yyyymmdd}" ##### loop over the runs ##### for run in 0000 0600 1200 1800 do ##### loop over the hours ##### for hour in 000 003 do if ! [ -f "${indir}/${yyyymmdd}/gfs_4_${yyyymmdd}_${run}_${hour}.grb2" ] then echo "get -O ${indir}/${yyyymmdd} ${baseurl}/${yyyymm}/${yyyymmdd}/gfs_4_${yyyymmdd}_${run}_${hour}.grb2" echo "chmod 644 file:${indir}/${yyyymmdd}/gfs_4_${yyyymmdd}_${run}_${hour}.grb2" fi done done done } ############################################################################### ## @brief frontend to bc for calcing floats ## ## @param ... the calculation to perform ## ## @return 0 for success, 1 for failure ## ## @retval stdout the calculation result ## ############################################################################### fcalc () { if [[ $# -gt 0 ]] then bc -q 2>/dev/null <<< "scale=20; $*" fi } ############################################################################### ## @brief print est completion time meter to stdout ## ## @param started time job started UNIXTIME ## @param lines total number of jobs ## @param donelines jobs completed ## ## @return 0 for success ## ############################################################################### comp_meter () { started=$1 lines=$2 donelines=$3 local comp local decdone=$(fcalc "$donelines / $lines") local percdone=$(fcalc "scale = 0; $decdone * 100") local elap=$(($(date +%s) - started)) local comp=$(fcalc "scale=0; $elap / $decdone") ((comp += started)) printf "\r%3.0f%% complete. EST. finish at %s" $percdone "$(date -d "@${comp}")" } ############################################################################### ## @brief function to spawn a new job ## ############################################################################### startjob() { local line="$line" dofile "$line" & } ############################################################################### ## @brief multi proceessing loop ## ## @param mirrorfile the mirror file ## ## @return 0 for success ## ############################################################################### mainloop () { local mirrorfile="$1" local doing local line local lines local donelines local started ((doing=0)) ##### open a fd to a named pipe ##### mkfifo pipe; exec 3<>pipe ##### setup for the est completion time ##### lines=$(grep "${mirrorfile}" -e "^get" | wc -l | cut -d " " -f 1 ) ((donelines=0)) started=$(date +%s) ##### loop over the list ##### while read line ; do ##### if it is a mkdir command do it now ##### if grep -e "^mkdir" <<< "$line" > /dev/null then lftp -e "$line ; exit" continue fi ##### under the limit just start a job ##### if [ $doing -lt $limit ] then startjob "$line" ((doing++)) ##### over the limit wait for a job to finish before starting ##### else read <&3 ((doing--)) if grep -e "^get" <<< "$line" > /dev/null then ((donelines++)) fi comp_meter $started $lines $donelines startjob "$line" ((doing++)) fi done < "${mirrorfile}" wait echo } getdate() { sed 's:.*gfs_4_\([0-9]\{4\}\)\([0-9]\{2\}\)\([0-9]\{2\}\)_\([0-9]\{2\}\)00_0\([0-9]\{2\}\).*:\1\2\3:' } getrun() { sed 's:.*gfs_4_\([0-9]\{4\}\)\([0-9]\{2\}\)\([0-9]\{2\}\)_\([0-9]\{2\}\)00_0\([0-9]\{2\}\).*:\4:' } gethour() { sed 's:.*gfs_4_\([0-9]\{4\}\)\([0-9]\{2\}\)\([0-9]\{2\}\)_\([0-9]\{2\}\)00_0\([0-9]\{2\}\).*:\5:' } subtmplftp() {\ local tmpdir="$1" sed "s:get \([-]. \)\{1,\}[-/_.A-Za-z0-9]*:get \1${tmpdir}:g" } ############################################################################### ## @brief function to get the basename of a file ## ## @param input pathname to parse ## ## @return 0 on success ## @retval stdout the basename ## ############################################################################### file_get_basename () { local input="$1" local file="${input##*/}" local base="${file%.*}" local base2="${base%.*}" local ext="${file##*.}" local ext2="${base##*.}" local ext=$(tr "[A-Z]" "[a-z]" <<< "$ext") local ext2=$(tr "[A-Z]" "[a-z]" <<< "$ext2") if [[ "$ext2" == "tar" ]] then ext="${ext2}.${ext}" base="$base2" fi echo "$base" } ############################################################################### ## @brief function to get the extension of a file ## ## @param input pathname to parse ## ## @return 0 on success ## @retval stdout the extension ## ############################################################################### file_get_extension () { local input="$1" local file="${input##*/}" local base="${file%.*}" local base2="${base%.*}" local ext="${file##*.}" local ext2="${base##*.}" local ext=$( tr "[A-Z]" "[a-z]" <<< "$ext") local ext2=$(tr "[A-Z]" "[a-z]" <<< "$ext2") if [[ "$ext2" == "tar" ]] then ext="${ext2}.${ext}" base="$base2" fi echo "$ext" } ############################################################################### ## @brief function to get the dir of a file ## ## @param input pathname to parse ## ## @return 0 on success ## @retval stdout the dir ## ############################################################################### file_get_dir () { local input="$1" local dir="${input%/*}" if [[ "$dir" != "$input" ]] then echo "${dir}/" fi } ############################################################################### ## @brief function to test for true ## ## @param value the value to test ## @param default the default return value if no match ## ## @return 0 for true, 1 for false ## ## @details ## case insensitive matching of true/ false / yes / no / 0 (false)/ 1 (true) ## ############################################################################### istrue () { case "$1" in [nN][oO]) false ;; [yY][eE][sS]) true ;; [fF][aA][lL][sS][eE]) false ;; [tT][rR][uU][eE]) true ;; 0) false ;; 1) true ;; *) ##### if there is a second numeric arg return it by itself,##### ##### otherwise return the default of false ##### if [ -n "$2" ] then return $2 else false fi ;; esac } ############################################################################### ## @brief function to proccess a file ## ## @param myline line from a ftp mirror file ## ## @details ## global vars ## @param indir dir the original file(s) will be placed in ## @param outdir dir to place the output in ## @param tmp the temp dir ## ## note this function echoes to fd 3 when finished and is called by mainloop ## multi proccesseing ## ############################################################################### dofile () { local myline=$1 local sourcedir=${indir//\/\///} local sourcedir=${sourcedir//\/\///} local tmpdir local dir="" tmpdir=$(mktemp -d -p "$tmp" "${dsname}XXXXXXXXXX") || { printerror "mktemp" echo >&3 return } if ! grep -q -e "^get" <<< "$myline" then echo >&3 rm -rf "${tmpdir}" return fi ##### make an array of files ##### local i=0 local files local line while read line do files[i++]=$(sed 's:.*/::' <<<"$line" | sed 's/.* -o //' | sed 's/%20/ /g') done < <( tr ";" "\n" <<<"$myline" ) ##### get the last file name ##### local file="${files[@]:(-1)}" if ! istrue "$ignoredir" && grep -q -e "$sourcedir/" <<<"$myline" then dir="$(sed "s|.*$sourcedir/\(.*\) [A-Za-z]*:/.*|\1|" <<<"$myline")/" fi local ext=$(file_get_extension "$file") local base=$(file_get_basename "$file") local ts=$(getdate <<< "$myline") local run=$(getrun <<< "$myline") local hour=$(gethour <<< "$myline") lftp -e "$(subtmplftp "${tmpdir}" <<<"$myline") ; exit" > /dev/null 2> /dev/null dofile_sub "$file" "$ext" "$tmpdir" "$ts" "$run" "$hour" ${files[@]} local status=$? if (($status == 0)) then ##### mv the files to the source dir ##### local f for f in "${files[@]}" do if [ -f "${tmpdir}/${f}" ] then mv "${tmpdir}/${f}" "${indir}/${dir}${f}" fi done fi rm -rf "${tmpdir}" echo >&3 "$myline" return $status } #### the following code came mostly from grib_contour and is gpl2 dofile_sub() { local file="$1" local ext="$2" local tmpdir="$3" local ts="$4" local run="$5" local hour="$6" local files=("${@:7}") local gribfile="${tmpdir}/$file" local msg local umsg local vmsg msg=$(getgribindex "$gribfile" "HGT=.* 100000-ISBL") plot "$gribfile" 1000hgt "$msg" 20 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "TMP=.* 100000-ISBL") plot "$gribfile" 1000t "$msg" 1 "$hour" 03 "$run" "$ts" umsg=$(getgribindex "$gribfile" "UGRD=.* 100000-ISBL") vmsg=$(getgribindex "$gribfile" "VGRD=.* 100000-ISBL") windplot "$gribfile" $gribfile 1000wind "$umsg" "$vmsg" 5 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "HGT=.* 92500-ISBL") plot "$gribfile" 925hgt "$msg" 20 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "TMP=.* 92500-ISBL") plot "$gribfile" 925t "$msg" 1 "$hour" 03 "$run" "$ts" umsg=$(getgribindex "$gribfile" "UGRD=.* 92500-ISBL") vmsg=$(getgribindex "$gribfile" "VGRD=.* 92500-ISBL") windplot "$gribfile" $gribfile 925wind "$umsg" "$vmsg" 5 "$hour" 03 "$run" "$ts" #msg=$(getgribindex "$gribfile" "HGT=.* 85000-ISBL") #plot "$gribfile" 850hgt "$msg" 20 "$hour" 03 "$run" "$ts" #msg=$(getgribindex "$gribfile" "TMP=.* 85000-ISBL") #plot "$gribfile" 850t "$msg" 1 "$hour" 03 "$run" "$ts" #umsg=$(getgribindex "$gribfile" "UGRD=.* 85000-ISBL") #vmsg=$(getgribindex "$gribfile" "VGRD=.* 85000-ISBL") #windplot "$gribfile" $gribfile 850wind "$umsg" "$vmsg" 5 "$hour" 03 "$run" "$ts" #msg=$(getgribindex "$gribfile" "HGT=.* 70000-ISBL") #plot "$gribfile" 700hgt "$msg" 20 "$hour" 03 "$run" "$ts" #msg=$(getgribindex "$gribfile" "TMP=.* 70000-ISBL") #plot "$gribfile" 700t "$msg" 1 "$hour" 03 "$run" "$ts" #umsg=$(getgribindex "$gribfile" "UGRD=.* 70000-ISBL") #vmsg=$(getgribindex "$gribfile" "VGRD=.* 70000-ISBL") #windplot "$gribfile" $gribfile 700wind "$umsg" "$vmsg" 5 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "HGT=.* 50000-ISBL") plot "$gribfile" 500hgt "$msg" 20 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "TMP=.* 50000-ISBL") plot "$gribfile" 500t "$msg" 1 "$hour" 03 "$run" "$ts" umsg=$(getgribindex "$gribfile" "UGRD=.* 50000-ISBL") vmsg=$(getgribindex "$gribfile" "VGRD=.* 50000-ISBL") windplot "$gribfile" $gribfile 500wind "$umsg" "$vmsg" 10 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "HGT=.* 30000-ISBL") plot "$gribfile" 300hgt "$msg" 20 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "TMP=.* 30000-ISBL") plot "$gribfile" 300t "$msg" 1 "$hour" 03 "$run" "$ts" umsg=$(getgribindex "$gribfile" "UGRD=.* 30000-ISBL") vmsg=$(getgribindex "$gribfile" "VGRD=.* 30000-ISBL") windplot "$gribfile" $gribfile 300wind "$umsg" "$vmsg" 10 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "HGT=.* 20000-ISBL") plot "$gribfile" 200hgt "$msg" 20 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "TMP=.* 20000-ISBL") plot "$gribfile" 200t "$msg" 1 "$hour" 03 "$run" "$ts" umsg=$(getgribindex "$gribfile" "UGRD=.* 20000-ISBL") vmsg=$(getgribindex "$gribfile" "VGRD=.* 20000-ISBL") windplot "$gribfile" $gribfile 200wind "$umsg" "$vmsg" 10 "$hour" 03 "$run" "$ts" msg=$(getgribindex "$gribfile" "PRMSL=.* 0-MSL") plot "$gribfile" pmsl "$msg" 200 "$hour" 03 "$run" "$ts" } ############################################################################### ## @brief function to plot the data ## ## @param gribfile full path to the grib file to plot ## @param name name of the product ie: 925hgt ## @param gribmsg the grib msg # ## @param interval interval apart the contours are ## @param timee the forcast hour in the run ## @param incr how many hours apart the frames are ## @param extra extra args to pass to grib_contour ## ## @details ## global vars ## @param wwwdisk the base web dir to write to "/var/www/html/" ## @param run the run of the model "00" ## @param dsname name of the dataset ## ############################################################################### plot () { local gribfile="$1" local name="$2" local grbmsg="$3" local interval="$4" local timee="$5" local incr="$6" local run="$7" local ts="$8" local extra="$9" local zip local kml zip="${outdir}/${dsname}/${name}${ts}${run}${timee}.kmz" kml="${name}${ts}${run}${timee}.kml" if [[ -f "$zip" ]] then rm "$zip" fi nice -n 10 grib_contour -g "${gribfile}" -m "$grbmsg" \ -i "$interval" -s "$name" \ -k "$kml" -z "$zip" \ "$extra" appendkml "$name" "$timee" "$incr" "$grid" "$run" "$ts" } ############################################################################### ## @brief function to plot the wind data ## ## @param ufile full path to the u grib file to plot ## @param vfile full path to the u grib file to plot ## @param name name of the product ie: 925hgt ## @param umsg the u grib msg # ## @param vmsg the v grib msg # ## @param interval interval apart the contours are ## @param timee the forcast hour in the run ## @param incr how many hours apart the frames are ## @param extra extra args to pass to grib_contour ## ## @details ## global vars ## @param wwwdisk the base web dir to write to "/var/www/html/" ## @param run the run of the model "00" ## @param dsname name of the dataset ## ############################################################################### windplot () { local ufile="$1" local vfile="$2" local name="$3" local umsg="$4" local vmsg="$5" local interval="$6" local timee="$7" local incr="$8" local run="${9}" local ts="${10}" local extra="${11}" local zip local kml zip="${outdir}/${dsname}/${name}${ts}${run}${timee}.kmz" kml="${name}${ts}${run}${timee}.kml" if [[ -f "$zip" ]] then rm "$zip" fi nice -n 10 grib_contour -w -u "${ufile}" -v "${vfile}" \ -U $umsg -V $vmsg \ -i $interval -s $name \ -k $kml -z "$zip" \ $extra appendkml "$name" "$timee" "$incr" "$grid" "$run" "$ts" #appendgeoext $name $timee $incr $grid #appendmapfile "$name" "$zip" } getgribindex() { degrib -in "$1" -I |\ grep "$2" |\ cut -d "," -f 1 |\ head -n 1 } ############################################################################### ## @brief function to create the root kml file ## ## @param name the name of the product ie: 925hgt ## ## @details ## global vars ## @param wwwdisk the base web dir to write to "/var/www/html/" ## @param run the run of the model "00" ## @param www the base url for the website ## @param dsname name of the dataset ## ############################################################################### mkrootkml () { local name="$1" local frames="${outdir}/${dsname}/${name}.kml" cat > $frames << EOF <?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://earth.google.com/kml/2.2"> <Folder> <ScreenOverlay> <name>Key</name> <visibility>1</visibility> <Icon> <href>${www}/kml/images/${name}.png</href> </Icon> <overlayXY x="0" y="1" xunits="fraction" yunits="fraction"/> <screenXY x="0" y="1" xunits="fraction" yunits="fraction"/> <rotationXY x="0" y="0" xunits="fraction" yunits="fraction"/> <size x="0" y="0" xunits="fraction" yunits="fraction"/> </ScreenOverlay> <name>frames</name> <description></description> </Folder> </kml> EOF } ############################################################################### ## @brief function to append the new frame to the root kml ## ## @param name the name of the product ie: 925hgt ## @param hr the forcast hour in the run ## @param incr how many hours apart the frames are ## @param grid optional name to prefix onto the kml name ## ## @details ## global vars ## @param wwwdisk the base web dir to write to "/var/www/html/" ## @param run the run of the model "00" ## @param www the base url for the website ## @param dsname name of the dataset ## ############################################################################### appendkml () { local name="$1" local hr="$2" local incr="$3" local grid="$4" local run="$5" local ts="$6" local frames="${outdir}/${dsname}/${name}.kml" if ! [ -f "$frames" ] then mkrootkml "${name}" fi ##### lock the file ##### local lock="$tmp/grib_contour_hfdkjshfakldf${dsname}.${name}.kml.lock" while ! mkdir "$lock" > /dev/null do sleep 1 done ##### test if the kml is older than 6 hours ##### #local frametime; #local nowtime; #frametime=$(stat --printf "%Y\n" "$frames") #nowtime=$(date "+%s") #if (( frametime + 21600 < nowtime)) #then # mkrootkml "${name}" #fi local zip if [[ "$grid" != "" ]] then zip="${grid}.${name}${ts}${run}${timee}.kmz" else zip="${name}${ts}${run}${timee}.kmz" fi begin=$(date -d "$ts $run GMT $hr hours" "+%FT%TZ" -u) end=$(date -d "$ts $run GMT $hr hours $incr hours" "+%FT%TZ" -u) ed -s $frames << EOF $ -2 a <NetworkLink> <name> ${ts}${run}Z + ${hr} ${grid}.${name}</name> <TimeSpan> <begin>${begin}</begin> <end>${end}</end> </TimeSpan> <visibility>1</visibility> <open>0</open> <Url> <href>${zip}</href> </Url> </NetworkLink> . w EOF ##### remove the lock ##### rmdir "$lock" } main
winkey/grib_contour
share/examples/archive.bash
Shell
gpl-3.0
21,295
#! /bin/sh echo "switch to user lippman" # ensure new user can create file cd /tmp rm setuid_demo rm setgid_demo rm setuid_setgid_demo echo "remove testing file ok"
goodpaperman/apue
04.chapter/permission/setuid_setgid_order_3.sh
Shell
gpl-3.0
166
#!/bin/bash # vim: ts=4:sw=4 . ~/plescripts/plelib.sh . ~/plescripts/networklib.sh . ~/plescripts/global.cfg EXEC_CMD_ACTION=EXEC typeset test_iso_ol7=yes typeset -r ME=$0 typeset -r PARAMS="$*" typeset -r str_usage=\ "Usage : $ME [-skip_test_iso_ol7] Ce script vérifie que le virtual-host remplie les conditions nécessaires au bon fonctionnement de la démo." while [ $# -ne 0 ] do case $1 in -emul) EXEC_CMD_ACTION=NOP shift ;; -skip_test_iso_ol7) test_iso_ol7=no shift ;; -h|-help|help) info "$str_usage" LN exit 1 ;; *) error "Arg '$1' invalid." LN info "$str_usage" exit 1 ;; esac done typeset -i nr_errors=0 function test_if_scripts_exists { line_separator info -n "Directory exists \$HOME/plescripts " if [ ! -d "$HOME/plescripts" ] then info -f "[$KO]" error " must contains all scripts." ((++nr_errors)) else info -f "[$OK]" fi LN } # $1 zip name # Don't work with 12.1 function _test_if_grip_zip_exists { typeset -r zip_name="$1" info "Grid zip $orarel :" info -n "Exist \$HOME/$oracle_install/grid/$zip_name " if [ ! -f "$HOME/$oracle_install/grid/$zip_name" ] then info -f "[$KO]" error " \$HOME/$oracle_install/grid must contains $zip_name" LN ((++nr_errors)) else info -f "[$OK]" LN fi } function test_oracle_binaries_or_zip { line_separator if [[ $orarel =~ 18 ]] then info "Oracle zip $orarel :" info -n "Exist \$HOME/$oracle_install/database/LINUX.X64_180000_db_home.zip " if [ ! -f "$HOME/$oracle_install/database/LINUX.X64_180000_db_home.zip" ] then info -f "[$KO]" error " \$HOME/$oracle_install/database/LINUX.X64_180000_db_home.zip." LN ((++nr_errors)) else info -f "[$OK]" LN fi elif [[ $orarel =~ 12 ]] then info "Oracle $orarel extracted :" info -n "Exist \$HOME/$oracle_install/database/runInstaller " if [ ! -f "$HOME/$oracle_install/database/runInstaller" ] then info -f "[$KO]" error " \$HOME/$oracle_install/database must contains Oracle installer." LN ((++nr_errors)) else info -f "[$OK]" LN fi else error "Release '$orarel' invalid." exit 1 fi } function test_grid_binaries_or_zip { line_separator case "$orarel" in 12.2|18.0) _test_if_grip_zip_exists $grid_zip_name ;; 12.1) info "Grid $orarel extracted :" info -n "Exist \$HOME/$oracle_install/grid/runInstaller " if [ ! -f "$HOME/$oracle_install/grid/runInstaller" ] then info -f "[$KO]" error " \$HOME/$oracle_install/grid must contains Grid installer." LN ((++nr_errors)) else info -f "[$OK]" LN fi ;; *) error "Release '$orarel' invalid." exit 1 ;; esac } function _is_exported { typeset -r directory=$1 info -n " - $(replace_paths_by_shell_vars $directory) " typeset -r network=$(right_pad_ip $infra_network) if grep -qE "${directory}\s*${network}.*" /etc/exports 2>/dev/null then info -f "[$OK]" return 0 else ((++nr_errors)) info -f "[$KO]" return 1 fi } function validate_NFS_exports { line_separator typeset -r network=$(right_pad_ip $infra_network) info "Validate NFS exports from $client_hostname on network ${network} :" if ! _is_exported $HOME/plescripts then info " add to /etc/exports : $HOME/plescripts $network/$if_pub_prefix(rw,sync,subtree_check,no_root_squash)" fi if ! _is_exported $HOME/$oracle_install then info " add to /etc/exports : $HOME/oracle_install/$orarel $network/$if_pub_prefix(ro,subtree_check)" fi LN } function ISO_OLinux7_exists { line_separator info -n "ISO Oracle Linux $OL7_LABEL exists $(replace_paths_by_shell_vars $full_linux_iso_name) " if [ ! -f "$full_linux_iso_name" ] then info -f "[$KO]" ((++nr_errors)) else info -f "[$OK]" fi LN } function validate_gateway { line_separator info -n "Validate gateway $gateway " if ping -c 1 $gateway >/dev/null 2>&1 then info -f "[$OK]" else info -f "[$KO]" ((++nr_errors)) fi LN } function validate_master_time_server { [ "$master_time_server" == internet ] && return || true line_separator info -n "Time synchronization server : ping of $master_time_server " if ping -c 1 $master_time_server >/dev/null 2>&1 then info -f "[$OK]" else info -f "[$KO]" ((++nr_errors)) fi LN } function validate_nic { line_separator info -n "Validate NIC $if_net_bridgeadapter " if [ "$if_net_bridgeadapter" == "undef" ] then info -f "[$KO]" ((++nr_errors)) else info -f "[$OK]" fi LN } function validate_resolv_conf { line_separator info "Validate resolv.conf " info -n " - Test : search $infra_domain " if grep search /etc/resolv.conf | grep -q $infra_domain then info -f "[$OK]" else info -f "[$KO]" ((++nr_errors)) fi info -n " - Test : nameserver $infra_ip " if grep -q ${infra_ip} /etc/resolv.conf then info -f "[$OK]" else info -f "[$KO]" ((++nr_errors)) fi LN } function _shell_in_path { line_separator info -n "\$PATH contains \$HOME/plescripts/shell " if command_exists stop_vm then info -f "[$OK]" else info -f "[$KO]" ((++nr_errors)) fi LN } function _in_path { typeset option=no if [ "$1" == "-o" ] then option=yes shift fi typeset -r cmd=$1 typeset -r cmd_msg=$2 typeset -r msg=$(printf "%-10s " $cmd) info -n " $msg" if command_exists $cmd then info -f "[$OK]" else if [ $option == yes ] then info -f -n "[${BLUE}optional${NORM}]" else ((++nr_errors)) info -f -n "[$KO]" fi info -f " $cmd_msg" typeset -r distrib=$(grep ^NAME /etc/os-release | cut -d= -f2) if [[ $option == no && $distrib =~ openSUSE ]] then exec_cmd -ci cnf $cmd LN fi fi } function test_tools { _shell_in_path info "Installed :" _in_path VBoxManage "Install VirtualVox" _in_path nc "Install nc" _in_path ssh "Install ssh" _in_path -o git "Install git" _in_path -o tmux "Install tmux" LN if command_exists VBoxManage then info -n "$USER in group vboxusers " gid_vboxusers=$(cat /etc/group|grep vboxusers|cut -d: -f3) if id|grep -q $gid_vboxusers then info -f "[$OK]" LN else ((++nr_errors)) if cat /etc/group|grep -qE "^vboxusers.*$USER*" then info -f "[$KO] : disconnect user $USER and connect again." LN else info -f "[$KO] execute : sudo usermod -a -G vboxusers $USER" warning "Take effect on a new connection." LN fi fi fi } function test_if_configure_global_cfg_executed { line_separator info -n "~/plescripts/configure_global.cfg.sh executed " typeset errors_msg typeset -i exec_global=0 typeset hn=$(hostname -s) if [ "$hn" != "$client_hostname" ] then ((++nr_errors)) exec_global=1 errors_msg="\n\tclient_hostname=$client_hostname expected $hn" fi if [ "$USER" != "$common_user_name" ] then ((++nr_errors)) exec_global=1 errors_msg="$errors_msg\n\tcommon_user_name=$common_user_name expected $USER" fi if [ x"$vm_path" == x ] then ((++nr_errors)) exec_global=1 errors_msg="$errors_msg\n\tvm_path not set." fi if [ $exec_global -eq 1 ] then info -f "[$KO]" info "Execute : ./configure_global.cfg.sh" error "Errors :$errors_msg" info "vm_path = $vm_path" else info -f "[$OK]" fi LN } function test_timer_hpet { if [ ! -f /sys/devices/system/clocksource/clocksource0/current_clocksource ] then return fi line_separator typeset -r timer_name=$(cat /sys/devices/system/clocksource/clocksource0/current_clocksource) case "$timer_name" in "hpet") info "Current timer ${GREEN}$timer_name${NORM}." LN ;; kvm-clock) error "Timer ${RED}kvm-clock${NORM} invalid." error "$(hostname -s) must be a physical machine." LN ((++nr_errors)) ;; *) info "Current timer ${RED}$timer_name${NORM}?" if grep -q hpet /sys/devices/system/clocksource/clocksource0/available_clocksource then info " ==> Enable hpet timer for better performances." LN else info " hpet timer not available." LN fi ;; esac } typeset -r orarel=${oracle_release%.*.*} test_if_scripts_exists test_oracle_binaries_or_zip test_grid_binaries_or_zip validate_NFS_exports [ $test_iso_ol7 == yes ] && ISO_OLinux7_exists || true validate_gateway validate_nic validate_master_time_server validate_resolv_conf test_tools test_if_configure_global_cfg_executed if [ "$common_user_name" != "no_user_defined" ] then line_separator exec_cmd -c "~/plescripts/shell/set_plescripts_acl.sh" fi test_timer_hpet line_separator if [ $nr_errors -ne 0 ] then error "Configuration failed : $nr_errors errors." LN exit 1 else info "Configuration [$OK]" LN exit 0 fi
PhilippeLeroux/plescripts
validate_config.sh
Shell
gpl-3.0
8,604
# ################################################################################ # Copyright (C) 2014 PLEYNET & Partners <[email protected]> # # A Luxembourgish Company, PLEYNET & Partners, www.pleynet.lu # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version, with the additional term under section 7(b) # of GPLv3 that the text # "A Luxembourgish Company, PLEYNET & Partners, www.pleynet-jb.com" # must be preserved. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ SRC=$(< ./param-source.txt) NUMPROG=4 LOG="$SRC""Log.sh" UUID=$(cat /proc/sys/kernel/random/uuid | sed -e "s/-//g") if [ echo "Pas assez d'arguments" 1>&2 echo "Les arguments autorisés sont suspend | parttime | fulltime" bash $LOG $UUID $NUMPROG 2 "Pas assez d'arguments" exit 1 fi if [ "$1" != "suspend" -a "$1" != "parttime" -a "$1" != "fulltime" ]; then echo "Argument incorecte" 1>&2 echo "Les arguments autorisés sont suspend | parttime | fulltime" bash $LOG $UUID $NUMPROG 2 "Argument incorecte" exit 2 fi FILESUSPEND="/tmp/DB_suspend" FILEFORCE="/tmp/DB_force" if [ -f "$FILESUSPEND" ]; then rm "$FILESUSPEND" fi if [ -f "$FILEFORCE" ]; then rm "$FILEFORCE" fi if [ "$1" = "suspend" ]; then echo "Suspention des backups" bash $LOG $UUID $NUMPROG 2 "Suspention des backups" echo "" > "$FILESUSPEND" fi if [ "$1" = "fulltime" ]; then echo "Forçage des backups" bash $LOG $UUID $NUMPROG 2 "Forçage des backups" echo "" > "$FILEFORCE" fi
Dessoul/DoubleBackup
Changer_mode.sh
Shell
gpl-3.0
2,105
#!/bin/bash -ex # # DeepSea integration test "suites/ceph-test/pynfs.sh" # # This script runs DeepSea stages 0-4 to deploy a Ceph cluster with MDS and # NFS-Ganesha. After stage 4 completes, it runs the PyNFS test suite. # # The script makes no assumptions beyond those listed in qa/README. # # This script takes an optional command-line option, "--fsal", which can # be either "cephfs", "rgw", or "both". If the option is absent, the value # defaults to "cephfs". # # On success, the script returns 0. On failure, for whatever reason, the script # returns non-zero. # # The script produces verbose output on stdout, which can be captured for later # forensic analysis. # BASEDIR=$(pwd) source $BASEDIR/common/common.sh source $BASEDIR/common/nfs-ganesha.sh function usage { set +x echo "${0} - script for testing NFS Ganesha deployment by running PyNFS test suite" echo "for use in SUSE Enterprise Storage testing" echo echo "Usage:" echo " ${0} [-h,--help] [--cli] [--fsal={cephfs,rgw,both}]" echo echo "Options:" echo " --cli Use DeepSea CLI" echo " --fsal Defaults to cephfs" echo " --help Display this usage message" exit 1 } TEMP=$(getopt -o h --long "cli,fsal:,help" \ -n 'pynfs.sh' -- "$@") if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi # Note the quotes around TEMP': they are essential! eval set -- "$TEMP" # process options CLI="" FSAL=cephfs while true ; do case "$1" in --cli) CLI="cli" ; shift ;; --fsal) FSAL=$2 ; shift ; shift ;; -h|--help) usage ;; # does not return --) shift ; break ;; *) echo "Internal error" ; exit 1 ;; esac done case "$FSAL" in cephfs) break ;; rgw) break ;; both) break ;; *) usage ;; # does not return esac echo "Testing deployment with FSAL ->$FSAL<-" assert_enhanced_getopt install_deps cat_salt_config run_stage_0 "$CLI" run_stage_1 "$CLI" policy_cfg_base policy_cfg_mon_flex policy_cfg_storage 1 # one node will be a "client" (no storage role) if [ "$FSAL" = "cephfs" -o "$FSAL" = "both" ] ; then policy_cfg_mds fi if [ "$FSAL" = "rgw" -o "$FSAL" = "both" ] ; then policy_cfg_rgw rgw_demo_users fi policy_cfg_nfs_ganesha cat_policy_cfg run_stage_2 "$CLI" ceph_conf_small_cluster run_stage_3 "$CLI" ceph_cluster_status if [ "$FSAL" = "cephfs" -o "$FSAL" = "both" ] ; then create_all_pools_at_once cephfs_data cephfs_metadata fi nfs_ganesha_no_root_squash run_stage_4 "$CLI" ceph_cluster_status ceph_health_test nfs_ganesha_cat_config_file nfs_ganesha_debug_log # kludge to work around mount hang #nfs_ganesha_showmount_loop if [ "$FSAL" = "cephfs" ] ; then nfs_ganesha_pynfs_test else echo "This test supports CephFS FSAL only. Bailing out." exit 1 fi echo "OK"
LenzGr/DeepSea
qa/suites/ceph-test/pynfs.sh
Shell
gpl-3.0
2,803