code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/bin/bash # First arg: folder name of unpacked tar # Second arg: name of tar archive # Third arg: url where to get tar archive function getTar { if [ ! -d $1 ]; then [ ! -f $2 ] && wget $3 echo "Unpacking $2" tar -xf $2 cd .. else echo 'Already up-to-date' fi } function getLatestGit { if [ -d $1 ]; then cd $1 git pull cd .. else git clone $2 fi } buildshLocation=$(dirname $0) dir=$(cd $buildshLocation;pwd) currentDir=$(pwd) cd $dir # Get opencv scons builder echo 'Getting OpenCV...' getLatestGit 'openCV-sconsbuilder' https://github.com/bverhagen/openCV-sconsbuilder.git #if [ -d openCV-sconsbuilder ]; then # cd openCV-sconsbuilder # git pull # cd .. #else # git clone https://github.com/bverhagen/openCV-sconsbuilder.git #fi cd openCV-sconsbuilder ./getOpenCv.sh cd .. # Get Catch # Since Catch is just a header file, we add it to Git #echo 'Getting Catch...' #catchDir='catch' #if [ ! -d $catchDir ]; then # mkdir catch # cd catch # wget https://raw.githubusercontent.com/philsquared/Catch/master/single_include/catch.hpp # cd .. #else # echo 'Already up-to-date' #fi # Get Boost boostVersion='1.57.0' boostDir='boost' # Replace all dots in boostVersion with underscores boostVersionUnderscore=${boostVersion//./_} boostArchive="boost_$boostVersionUnderscore.tar.gz" echo "Getting boost $boostVersion..." [ ! -d $boostDir ] && mkdir $boostDir cd $boostDir getTar "boost_$boostVersionUnderscore" $boostArchive https://downloads.sourceforge.net/project/boost/boost/$boostVersion/$boostArchive cd .. cd $currentDir
tass-belgium/EmbeddedMT
3rdparty/get3rdparty.sh
Shell
gpl-2.0
1,557
#!/usr/bin/env bash PACKAGES="cmake pkgconfig fftw libogg libvorbis lame libsndfile libsamplerate jack sdl libgig libsoundio stk fluid-synth portaudio node fltk carla" if [ $QT5 ]; then PACKAGES="$PACKAGES qt5" fi brew install $PACKAGES ccache if [ -z "$QT5" ]; then brew tap cartr/qt4 brew tap-pin cartr/qt4 brew install qt@4 fi npm install -g appdmg
RebeccaDeField/lmms
.travis/osx..install.sh
Shell
gpl-2.0
361
#!/bin/bash export ARCH=arm export CROSS_COMPILE=/opt/toolchains/arm-eabi-4.8/bin/arm-eabi- make -j8 ARCH=arm lineageos_ja3gxx_defconfig make -j8 ARCH=arm
OPNay/android_kernel_samsung_jalte
build_kernel_test.sh
Shell
gpl-2.0
157
#!/bin/bash fn_help() { echo "*** Error: ${1} ***" echo -e "\n Usage:" echo -e "\n For discovery: ${0} <json> discovery or ${0} <json> net_discovery" echo " * Example #1: Discover process pids with exactly names \"trivial-rewrite\" and \"qmgr\" that are owned by postfix and discover process pids with \"zabbix\" on its name and \"/usr/bin\" regexp on its entire command line -> ${0} '{\"postfix\":{\"exactly\":[\"trivial-rewrite\",\"qmgr\"]},\"root\":{\"name\":[\"zabbix\",\"ssh\"],\"cmd\":[\"/usr/bin\"]}}' discovery" echo " * Example #2: ${0} '{\"postfix\":{\"exactly\":[\"trivial-rewrite\",\"qmgr\"]},\"root\":{\"name\":[\"zabbix\",\"ssh\"],\"cmd\":[\"/bin/sh\"]}}' discovery" echo -e "\n For counters: ${0} <pid> <options>" echo " * Example #1: Retreive process file descritors quantity -> ${0} 928 fd" echo " * Example #2: Retreive process state -> ${0} 928 state" exit 1 } fn_check_user() { if ! id -u "${1}" >/dev/null 2>&1; then fn_help "User ${1} does not exist" fi } fn_get_procs() { PROC_USERS=`jq -r 'keys[]' <<< ${1}` for PROC_USER in ${PROC_USERS}; do fn_check_user ${PROC_USER} SEARCH_TYPES=`jq -r ".[\"${PROC_USER}\"] |keys[]" <<< ${1}` for SEARCH_TYPE in $SEARCH_TYPES; do case $SEARCH_TYPE in cmd ) PGREP_SEARCH_PARAM="-f" ;; name ) PGREP_SEARCH_PARAM="" ;; exactly ) PGREP_SEARCH_PARAM="-x" ;; esac PROC_PATTERNS=`jq -r ".[\"${PROC_USER}\"] |.${SEARCH_TYPE} |.[]" <<< ${1}` for PROC_PATTERN in ${PROC_PATTERNS}; do CMD_RESULT=`pgrep ${PGREP_SEARCH_PARAM} ${PROC_PATTERN} -u ${PROC_USER} -l` for i in ${CMD_RESULT}; do PROC_NAME=`awk '{print $2}' <<< ${i} | tr -d ':$'` PROC_PID=`awk '{print $1}' <<< ${i} | tr -d ':$'` ZBX_JSON=`jq -c ".data |= .+ [{\"{#PROC_USER}\" : \"${PROC_USER}\",\"{#PROC_NAME}\" : \"${PROC_NAME}\",\"{#PROC_PID}\" : \"${PROC_PID}\", ${2}}]" <<< ${ZBX_JSON}` done done done done } IFS=" " if [ $# -lt 2 ]; then fn_help "Missing parameters" fi ACTION=${2} NET_GLOBAL_STAT="/proc/net/dev" ZBX_JSON='{"data":[]}' if [ "${ACTION}" != "discovery" ]; then STAT="/proc/${1}/stat" IO_STAT="/proc/${1}/io" FD_STAT="/proc/${1}/fd" OOM_STAT="/proc/${1}/oom_score" NET_STAT="/proc/${1}/net/dev" else if grep -q '{' <<< ${1}; then if ! jq '.' <<< ${1} >/dev/null; then fn_help "Invalid JSON" fi fi fi case ${ACTION} in net_discovery ) IFNAMES=`cat ${NET_GLOBAL_STAT} | grep : | awk '{print $1}' |tr -d ' :'` for IFNAME in ${IFNAMES}; do fn_get_procs ${1} "\"{#IFNAME}\":\"${IFNAME}\"" done echo ${ZBX_JSON} ;; discovery ) fn_get_procs ${1} echo ${ZBX_JSON} ;; fd ) ls ${FD_STAT} | wc -l ;; state ) awk '{ print $3 }' ${STAT} ;; min_flt ) # https://en.wikipedia.org/wiki/Page_fault#Minor awk '{ print $10 }' ${STAT} ;; cmin_flt ) awk '{ print $11 }' ${STAT} ;; maj_flt ) # https://en.wikipedia.org/wiki/Page_fault#Major awk '{ print $12 }' ${STAT} ;; cmaj_flt ) awk '{ print $13 }' ${STAT} ;; cpu_usage ) # http://stackoverflow.com/questions/14885598/bash-script-checking-cpu-usage-of-specific-process top -b -p ${1} -n 1 | grep ${1} | awk '{print $9}' | sed 's/,/./g' ;; priority ) # http://superuser.com/questions/203657/difference-between-nice-value-and-priority-in-the-top-output awk '{ print $18 }' ${STAT} ;; nice ) # http://superuser.com/questions/203657/difference-between-nice-value-and-priority-in-the-top-output awk '{ print $19 }' ${STAT} ;; num_threads ) awk '{ print $20 }' ${STAT} ;; sigpending ) # Max limit per process: ulimit -i awk '{ print $31 }' ${STAT} ;; sigblocked ) awk '{ print $32 }' ${STAT} ;; sigign ) awk '{ print $33 }' ${STAT} ;; sigcatch ) awk '{ print $34 }' ${STAT} ;; io_syscr ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.3 /proc/<pid>/io - Display the IO accounting fields grep "^syscr" ${IO_STAT} | awk '{ print $2 }' ;; io_syscw ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.3 /proc/<pid>/io - Display the IO accounting fields grep "^syscw" ${IO_STAT} | awk '{ print $2 }' ;; io_read_bytes ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.3 /proc/<pid>/io - Display the IO accounting fields grep "^read_bytes" ${IO_STAT} | awk '{ print $2 }' ;; io_write_bytes ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.3 /proc/<pid>/io - Display the IO accounting fields grep "^write_bytes" ${IO_STAT} | awk '{ print $2 }' ;; io_cancelled_write_bytes ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.3 /proc/<pid>/io - Display the IO accounting fields grep "^cancelled_write_bytes" ${IO_STAT} | awk '{ print $2 }' ;; oom_score ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - 3.2 /proc/<pid>/oom_score - Display current oom-killer score cat ${OOM_STAT} ;; # Memory usage is reported in KB -> need to use a custom mitiplier in zabbix mem_uss ) # http://stackoverflow.com/questions/22372960/is-this-explanation-about-vss-rss-pss-uss-accurately smem -c "pid uss" |egrep "^( )?${1}" | awk '{print $2}' ;; mem_pss ) # http://stackoverflow.com/questions/22372960/is-this-explanation-about-vss-rss-pss-uss-accurately smem -c "pid pss" |egrep "^( )?${1}" | awk '{print $2}' ;; mem_swap ) smem -c "pid swap" |egrep "^( )?${1}" | awk '{print $2}' ;; net ) # https://www.kernel.org/doc/Documentation/filesystems/proc.txt - Table 1-9: Network info in /proc/net # http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html case ${4} in # Received bytes_in ) grep ${3}: ${NET_STAT} |awk '{print $2}' ;; packets_in ) grep ${3}: ${NET_STAT} |awk '{print $3}' ;; errs_in ) grep ${3}: ${NET_STAT} |awk '{print $4}' ;; drop_in ) grep ${3}: ${NET_STAT} |awk '{print $5}' ;; # Transmitted bytes_out ) grep ${3}: ${NET_STAT} |awk '{print $10}' ;; packets_out ) grep ${3}: ${NET_STAT} |awk '{print $11}' ;; errs_out ) grep ${3}: ${NET_STAT} |awk '{print $12}' ;; drop_out ) grep ${3}: ${NET_STAT} |awk '{print $13}' ;; esac ;; * ) fn_help esac
galindro/zabbix-enhanced-templates
scripts/get_proc_stats.sh
Shell
gpl-2.0
6,680
#!/bin/bash # 7c is as 6z, but reducing the left and right tolerance from 5 to 4. # No clear difference. # I reran the scoring of train_dev for 6z because the scoring script # has had a bug fixed. # local/score.sh data/train_dev exp/chain/tdnn_6z_sp/graph_sw1_tg exp/chain/tdnn_6z_sp/decode_train_dev_sw1_tg # local/score.sh data/train_dev exp/chain/tdnn_6z_sp/graph_sw1_tg exp/chain/tdnn_6z_sp/decode_train_dev_sw1_fsh_fg; # local/chain/compare_wer.sh 6z 7c # System 6z 7c # WER on train_dev(tg) 14.88 14.89 # WER on train_dev(fg) 13.66 13.69 # WER on eval2000(tg) 17.2 17.2 # WER on eval2000(fg) 15.6 15.5 # Final train prob -0.106268 -0.107003 # Final valid prob -0.126726 -0.133782 # Final train prob (xent) -1.4556 -1.40549 # Final valid prob (xent) -1.50136 -1.47833 # local/chain/compare_wer.sh 6v 6y 6z # System 6v 6y 6z # WER on train_dev(tg) 15.00 15.36 15.18 # WER on train_dev(fg) 13.91 14.19 14.06 # WER on eval2000(tg) 17.2 17.2 17.2 # WER on eval2000(fg) 15.7 15.8 15.6 # Final train prob -0.105012 -0.102139 -0.106268 # Final valid prob -0.125877 -0.119654 -0.126726 # Final train prob (xent) -1.54736 -1.55598 -1.4556 # Final valid prob (xent) -1.57475 -1.58821 -1.50136 # 6y is as 6w, but after fixing the config-generation script to use # a higher learning-rate factor for the final xent layer (it was otherwise # training too slowly). # 6w is as 6v (a new tdnn-based recipe), but using 1.5 million not 1.2 million # frames per iter (and of course re-dumping the egs). # this is same as v2 script but with xent-regularization # it has a different splicing configuration set -e # configs for 'chain' affix= stage=12 train_stage=-10 get_egs_stage=-10 speed_perturb=true dir=exp/chain/tdnn_7c # Note: _sp will get added to this if $speed_perturb == true. decode_iter= # TDNN options # this script uses the new tdnn config generator so it needs a final 0 to reflect that the final layer input has no splicing # smoothing options self_repair_scale=0.00001 # training options num_epochs=4 initial_effective_lrate=0.001 final_effective_lrate=0.0001 leftmost_questions_truncate=-1 max_param_change=2.0 final_layer_normalize_target=0.5 num_jobs_initial=3 num_jobs_final=16 minibatch_size=128 relu_dim=576 frames_per_eg=150 remove_egs=false common_egs_dir= xent_regularize=0.1 # End configuration section. echo "$0 $@" # Print the command line for logging . cmd.sh . ./path.sh . ./utils/parse_options.sh if ! cuda-compiled; then cat <<EOF && exit 1 This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA If you want to use GPUs (and have them), go to src/, and configure and make on a machine where "nvcc" is installed. EOF fi # The iVector-extraction and feature-dumping parts are the same as the standard # nnet3 setup, and you can skip them by setting "--stage 8" if you have already # run those things. suffix= if [ "$speed_perturb" == "true" ]; then suffix=_sp fi dir=${dir}${affix:+_$affix}$suffix train_set=train_nodup$suffix ali_dir=exp/tri4_ali_nodup$suffix treedir=exp/chain/tri5_2y_tree$suffix lang=data/lang_chain_2y # if we are using the speed-perturbed data we need to generate # alignments for it. local/nnet3/run_ivector_common.sh --stage $stage \ --speed-perturb $speed_perturb \ --generate-alignments $speed_perturb || exit 1; if [ $stage -le 9 ]; then # Get the alignments as lattices (gives the CTC training more freedom). # use the same num-jobs as the alignments nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1; steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \ data/lang exp/tri4 exp/tri4_lats_nodup$suffix rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space fi if [ $stage -le 10 ]; then # Create a version of the lang/ directory that has one state per phone in the # topo file. [note, it really has two states.. the first one is only repeated # once, the second one has zero or more repeats.] rm -rf $lang cp -r data/lang $lang silphonelist=$(cat $lang/phones/silence.csl) || exit 1; nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1; # Use our special topology... note that later on may have to tune this # topology. steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo fi if [ $stage -le 11 ]; then # Build a tree using our new topology. steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ --leftmost-questions-truncate $leftmost_questions_truncate \ --cmd "$train_cmd" 9000 data/$train_set $lang $ali_dir $treedir fi if [ $stage -le 12 ]; then echo "$0: creating neural net configs"; if [ ! -z "$relu_dim" ]; then dim_opts="--relu-dim $relu_dim" else dim_opts="--pnorm-input-dim $pnorm_input_dim --pnorm-output-dim $pnorm_output_dim" fi # create the config files for nnet initialization repair_opts=${self_repair_scale:+" --self-repair-scale-nonlinearity $self_repair_scale "} steps/nnet3/tdnn/make_configs.py \ $repair_opts \ --feat-dir data/${train_set}_hires \ --ivector-dir exp/nnet3/ivectors_${train_set} \ --tree-dir $treedir \ $dim_opts \ --splice-indexes "-1,0,1 -1,0,1,2 -3,0,3 -3,0,3 -3,0,3 -6,-3,0 0" \ --use-presoftmax-prior-scale false \ --xent-regularize $xent_regularize \ --xent-separate-forward-affine true \ --include-log-softmax false \ --final-layer-normalize-target $final_layer_normalize_target \ $dir/configs || exit 1; fi if [ $stage -le 13 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ /export/b0{5,6,7,8}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage fi touch $dir/egs/.nodelete # keep egs around when that run dies. steps/nnet3/chain/train.py --stage $train_stage \ --cmd "$decode_cmd" \ --feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ --chain.xent-regularize $xent_regularize \ --chain.leaky-hmm-coefficient 0.1 \ --chain.left-tolerance 4 --chain.right-tolerance 4 \ --chain.l2-regularize 0.00005 \ --chain.apply-deriv-weights false \ --chain.lm-opts="--num-extra-lm-states=2000" \ --egs.stage $get_egs_stage \ --egs.opts "--frames-overlap-per-eg 0" \ --egs.chunk-width $frames_per_eg \ --trainer.num-chunk-per-minibatch $minibatch_size \ --trainer.frames-per-iter 1500000 \ --trainer.num-epochs $num_epochs \ --trainer.optimization.num-jobs-initial $num_jobs_initial \ --trainer.optimization.num-jobs-final $num_jobs_final \ --trainer.optimization.initial-effective-lrate $initial_effective_lrate \ --trainer.optimization.final-effective-lrate $final_effective_lrate \ --trainer.max-param-change $max_param_change \ --cleanup.remove-egs $remove_egs \ --feat-dir data/${train_set}_hires \ --tree-dir $treedir \ --lat-dir exp/tri4_lats_nodup$suffix \ --dir $dir || exit 1; fi if [ $stage -le 13 ]; then # Note: it might appear that this $lang directory is mismatched, and it is as # far as the 'topo' is concerned, but this script doesn't read the 'topo' from # the lang directory. utils/mkgraph.sh --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg fi decode_suff=sw1_tg graph_dir=$dir/graph_sw1_tg if [ $stage -le 14 ]; then iter_opts= if [ ! -z $decode_iter ]; then iter_opts=" --iter $decode_iter " fi for decode_set in train_dev eval2000; do ( steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ --nj 50 --cmd "$decode_cmd" $iter_opts \ --online-ivector-dir exp/nnet3/ivectors_${decode_set} \ $graph_dir data/${decode_set}_hires $dir/decode_${decode_set}${decode_iter:+_$decode_iter}_${decode_suff} || exit 1; if $has_fisher; then steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \ data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \ $dir/decode_${decode_set}${decode_iter:+_$decode_iter}_sw1_{tg,fsh_fg} || exit 1; fi ) & done fi wait; exit 0;
michellemorales/OpenMM
kaldi/egs/swbd/s5c/local/chain/tuning/run_tdnn_7c.sh
Shell
gpl-2.0
8,332
#!/bin/bash # Copyright 2014 Mirsk Digital ApS (Author: Andreas Kirkedal) # Copyright 2016 KTH Royal Institute of Technology (Author: Emelie Kullmann) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, # MERCHANTABLITY OR NON-INFRINGEMENT. # See the Apache 2 License for the specific language governing permissions and # limitations under the License. if [ $# != 2 ]; then echo "Usage: create_dataset.sh <src-data-dir> <dest-dir> " exit 1 fi src=$1 dest=$2 mkdir $dest python3 local/normalize_transcript_prefixed.py $src/text.unnormalised $src/onlyids $src/onlytext paste -d ' ' $src/onlyids $src/onlytext > $dest/text for f in wav.scp utt2spk; do cp $src/$f $dest/$f done utils/utt2spk_to_spk2utt.pl $dest/utt2spk > $dest/spk2utt utils/validate_data_dir.sh --no-feats $dest || exit 1;
michellemorales/OpenMM
kaldi/egs/sprakbanken_swe/s5/local/create_datasets.sh
Shell
gpl-2.0
1,200
#/bin/bash uwsgi --ini ./conf/uwsgi_192.168.1.103.ini
eliot-framework/eliot
deploy/deploy_192.168.1.103.sh
Shell
gpl-2.0
54
# # Copyright (C) 2010 OpenWrt.org # . /lib/ramips.sh PART_NAME=firmware RAMFS_COPY_DATA=/lib/ramips.sh platform_check_image() { local board=$(ramips_board_name) local magic="$(get_magic_long "$1")" [ "$#" -gt 1 ] && return 1 case "$board" in 3g-6200n | \ 3g-6200nl | \ 3g150b | \ 3g300m | \ a5-v11 | \ air3gii | \ ai-br100 |\ all0239-3g | \ all0256n | \ all5002 | \ all5003 | \ ar725w | \ asl26555 | \ awapn2403 | \ awm002-evb | \ awm003-evb | \ bc2 | \ broadway | \ carambola | \ cf-wr800n | \ d105 | \ dap-1350 | \ dcs-930 | \ dcs-930l-b1 | \ dir-300-b1 | \ dir-300-b7 | \ dir-320-b1 | \ dir-600-b1 | \ dir-600-b2 | \ dir-615-d | \ dir-615-h1 | \ dir-620-a1 | \ dir-620-d1 | \ dir-810l | \ e1700 | \ ex2700 |\ esr-9753 | \ f7c027 | \ fonera20n | \ freestation5 | \ firewrt |\ pbr-m1 |\ hg255d | \ hlk-rm04 | \ ht-tm02 | \ hw550-3g | \ hc5661-v2|\ ip2202 | \ linkits7688 | \ linkits7688d | \ m2m | \ m3 | \ m4 | \ microwrt | \ mlw221 | \ mlwg2 | \ mofi3500-3gn | \ mpr-a1 | \ mpr-a2 | \ mr-102n | \ mzk-w300nh2 | \ nbg-419n | \ nw718 | \ omni-emb | \ omni-emb-hpm | \ omni-plug | \ olinuxino-rt5350f | \ olinuxino-rt5350f-evb | \ psr-680w | \ px4885 | \ re6500 | \ rp-n53 | \ rt-g32-b1 | \ rt-n10-plus | \ rt-n13u | \ rt-n14u | \ rt-n15 | \ rt-n56u | \ rut5xx | \ sl-r7205 | \ tew-691gr | \ tew-692gr | \ ur-326n4g |\ ur-336un |\ v22rw-2x2 | \ vocore | \ w150m | \ w306r-v20 |\ w502u |\ whr-g300n |\ whr-300hp2 |\ whr-600d |\ whr-1166d |\ wizfi630a |\ witi |\ wsr-600 |\ wl-330n | \ wl-330n3g | \ wl-351 | \ wl341v3 | \ wli-tx4-ag300n | \ wzr-agl300nh | \ wmr300 |\ wnce2001 | \ wr512-3gn |\ wr6202 |\ wr8305rt |\ wrtnode |\ wt1520 |\ wt3020 |\ x5 |\ x8 |\ xiaomi-miwifi-mini |\ y1 |\ y1s |\ zbt-wa05 |\ zbt-wg2626 |\ zte-q7) [ "$magic" != "27051956" ] && { echo "Invalid image type." return 1 } return 0 ;; wsr-1166) [ "$magic" != "48445230" ] && { echo "Invalid image type." return 1 } return 0 ;; ar670w) [ "$magic" != "6d000080" ] && { echo "Invalid image type." return 1 } return 0 ;; cy-swr1100 |\ dir-610-a1 |\ dir-645 |\ dir-860l-b1) [ "$magic" != "5ea3a417" ] && { echo "Invalid image type." return 1 } return 0 ;; br-6475nd) [ "$magic" != "43535953" ] && { echo "Invalid image type." return 1 } return 0 ;; c20i) [ "$magic" != "03000000" ] && { echo "Invalid image type." return 1 } return 0 ;; esac echo "Sysupgrade is not yet supported on $board." return 1 } platform_do_upgrade() { local board=$(ramips_board_name) case "$board" in *) default_do_upgrade "$ARGV" ;; esac } disable_watchdog() { killall watchdog ( ps | grep -v 'grep' | grep '/dev/watchdog' ) && { echo 'Could not disable watchdog' return 1 } } blink_led() { . /etc/diag.sh; set_state upgrade } append sysupgrade_pre_upgrade disable_watchdog append sysupgrade_pre_upgrade blink_led
Ntemis/openwrtcc
target/linux/ramips/base-files/lib/upgrade/platform.sh
Shell
gpl-2.0
3,005
#! /bin/bash SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" CALL_PATH="$PWD" # NOTE: To link statically: --enable-static # (Or was it --enable-portable? both?) # perform the bootstrap echo "Bootstrapping bcplusparser..." cd "$SCRIPT_PATH" if [ "$1" == "noconf" ] then mkdir -p build-scripts && \ libtoolize && \ aclocal -I build-scripts --install && \ autoheader && \ automake --add-missing && \ autoconf else mkdir -p build-scripts && \ libtoolize && \ aclocal -I build-scripts --install && \ autoheader && \ automake --add-missing && \ autoconf && \ ./configure "$@" fi cd "$CALL_PATH"
babb517/bcplusparser
bootstrap.sh
Shell
gpl-2.0
635
# # Copyright (C) 2010 OpenWrt.org # . /lib/ramips.sh PART_NAME=firmware RAMFS_COPY_DATA=/lib/ramips.sh platform_check_image() { local board=$(ramips_board_name) local magic="$(get_magic_long "$1")" [ "$#" -gt 1 ] && return 1 case "$board" in 3g-6200n | \ 3g-6200nl | \ 3g150b | \ 3g300m | \ a5-v11 | \ air3gii | \ ai-br100 |\ all0239-3g | \ all0256n | \ all5002 | \ all5003 | \ ar725w | \ asl26555 | \ awapn2403 | \ awm002-evb | \ awm003-evb | \ bc2 | \ broadway | \ carambola | \ d105 | \ dap-1350 | \ dcs-930 | \ dir-300-b1 | \ dir-300-b7 | \ dir-320-b1 | \ dir-600-b1 | \ dir-600-b2 | \ dir-615-d | \ dir-615-h1 | \ dir-620-a1 | \ dir-620-d1 | \ dir-810l | \ e1700 | \ esr-9753 | \ f7c027 | \ fonera20n | \ freestation5 | \ firewrt |\ hg255d | \ hlk-rm04 | \ hiwifi-hc5761 |\ ht-tm02 | \ hw550-3g | \ ip2202 | \ m2m | \ m3 | \ m4 | \ mlw221 | \ mlwg2 | \ mofi3500-3gn | \ mpr-a1 | \ mpr-a2 | \ mzk-w300nh2 | \ nbg-419n | \ nw718 | \ omni-emb | \ omni-emb-hpm | \ omni-plug | \ psr-680w | \ px4885 | \ rp-n53 | \ rt-g32-b1 | \ rt-n10-plus | \ rt-n13u | \ rt-n14u | \ rt-n15 | \ rt-n56u | \ rut5xx | \ sl-r7205 | \ tew-691gr | \ tew-692gr | \ ur-326n4g |\ ur-336un |\ v22rw-2x2 | \ vocore | \ w150m | \ w306r-v20 |\ w502u |\ whr-g300n |\ whr-600d |\ whr-1166d |\ wsr-600 |\ wl-330n | \ wl-330n3g | \ wl-351 | \ wl341v3 | \ wli-tx4-ag300n | \ wzr-agl300nh | \ wmr300 |\ wnce2001 | \ wr512-3gn |\ wr6202 |\ wr8305rt |\ wrtnode |\ wt1520 |\ wt3020 |\ x5 |\ x8 |\ xiaomi-miwifi-mini |\ y1 |\ y1s |\ zte-q7 |\ zbt-wa05) [ "$magic" != "27051956" ] && { echo "Invalid image type." return 1 } return 0 ;; wsr-1166) [ "$magic" != "48445230" ] && { echo "Invalid image type." return 1 } return 0 ;; ar670w) [ "$magic" != "6d000080" ] && { echo "Invalid image type." return 1 } return 0 ;; cy-swr1100 |\ dir-610-a1 |\ dir-645 |\ dir-860l-b1) [ "$magic" != "5ea3a417" ] && { echo "Invalid image type." return 1 } return 0 ;; br-6475nd) [ "$magic" != "43535953" ] && { echo "Invalid image type." return 1 } return 0 ;; c20i) [ "$magic" != "03000000" ] && { echo "Invalid image type." return 1 } return 0 ;; esac echo "Sysupgrade is not yet supported on $board." return 1 } platform_do_upgrade() { local board=$(ramips_board_name) case "$board" in *) default_do_upgrade "$ARGV" ;; esac } disable_watchdog() { killall watchdog ( ps | grep -v 'grep' | grep '/dev/watchdog' ) && { echo 'Could not disable watchdog' return 1 } } append sysupgrade_pre_upgrade disable_watchdog
okcom84301/openwrt
target/linux/ramips/base-files/lib/upgrade/platform.sh
Shell
gpl-2.0
2,690
#!/bin/sh # ¶Ôbmy´úÂë½øÐÐgrep # author: interma@bmy cd ./src grep -n $1 *.c grep -n $1 *.h cd ../nju09 grep -n $1 *.c grep -n $1 *.h cd ../local_utl grep -n $1 *.c grep -n $1 *.h cd ../ythtlib grep -n $1 *.c grep -n $1 *.h cd ../libythtbbs grep -n $1 *.c grep -n $1 *.h cd ../include grep -n $1 *.h
moqi88/bmybbs
bmy_grep.sh
Shell
gpl-2.0
300
#!/bin/bash # # Tempesta FW service script. # # Copyright (C) 2014 NatSys Lab. ([email protected]). # Copyright (C) 2015-2018 Tempesta Technologies, Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. if [ "${TEMPESTA_LCK}" != "$0" ]; then env TEMPESTA_LCK="$0" flock -n -E 254 "/tmp/tempesta-lock-file" "$0" "$@" if [ $? -eq 254 ]; then echo "Cannot operate with Tempesta FW: locked by another process" exit 3 fi exit fi . "$(dirname $0)/tfw_lib.sh" script_path="$(dirname $0)" tdb_path=${TDB_PATH:="$TFW_ROOT/tempesta_db/core"} tfw_path=${TFW_PATH:="$TFW_ROOT/tempesta_fw"} tls_path=${TLS_PATH:="$TFW_ROOT/tls"} class_path=${TFW_CLS_PATH:="$tfw_path/classifier/"} tfw_cfg_path=${TFW_CFG_PATH:="$TFW_ROOT/etc/tempesta_fw.conf"} sched_path=${TFW_SCHED_PATH:="$tfw_path/sched/"} sched_ko_files=($(ls $sched_path/tfw_sched_*.ko)) tls_mod=tempesta_tls tdb_mod=tempesta_db tfw_mod=tempesta_fw tfw_sched_mod=tfw_sched_$sched frang_mod="tfw_frang" declare -r LONG_OPTS="help,load,unload,start,stop,restart,reload" declare devs=$(ip addr show up | awk '/^[0-9]+/ { sub(/:/, "", $2); print $2}') usage() { echo -e "\nUsage: ${TFW_NAME} [options] {action}\n" echo -e "Options:" echo -e " -f Load Frang, HTTP DoS protection module." echo -e " -d <devs> Ingress and egress network devices" echo -e " (ex. -d \"lo ens3\").\n" echo -e "Actions:" echo -e " --help Show this message and exit." echo -e " --load Load Tempesta modules." echo -e " --unload Unload Tempesta modules." echo -e " --start Load modules and start." echo -e " --stop Stop and unload modules." echo -e " --restart Restart.\n" echo -e " --reload Live reconfiguration.\n" } error() { echo "ERROR: $1" >&1 exit 1 } # Tempesta requires kernel module loading, so we need root credentials. [ `id -u` -ne 0 ] && error "Please, run the script as root" load_one_module() { if [ -z "$1" ]; then echo "$0: Empty argument"; exit 255; fi MOD_PATH_NAME="$1"; shift; MOD_NAME="$(basename ${MOD_PATH_NAME%%.ko})"; lsmod | grep -w "${MOD_NAME}" 2>&1 > /dev/null || { echo "Loading module ${MOD_NAME} $@"; insmod "${MOD_PATH_NAME}" "$@"; } } # The separate load_modules/unload_modules routines are used for unit testing. load_modules() { echo "Loading Tempesta kernel modules..." # Set verbose kernel logging, # so debug messages are shown on serial console as well. echo '8 7 1 7' > /proc/sys/kernel/printk load_one_module "$tls_path/$tls_mod.ko" || error "cannot load tempesta TLS module" load_one_module "$tdb_path/$tdb_mod.ko" || error "cannot load tempesta database module" load_one_module "$tfw_path/$tfw_mod.ko" "tfw_cfg_path=$tfw_cfg_path" || error "cannot load tempesta module" for ko_file in "${sched_ko_files[@]}"; do load_one_module "$ko_file" || error "cannot load tempesta scheduler module" done if grep -q -E "^\s*frang_limits" $tfw_cfg_path; then echo "Load Frang" load_one_module "$class_path/$frang_mod.ko" || error "cannot load $frang_mod module" fi } unload_modules() { echo "Un-loading Tempesta kernel modules..." for ko_file in "${sched_ko_files[@]}"; do rmmod $(basename "${ko_file%.ko}") done [ "`lsmod | grep \"\<$frang_mod\>\"`" ] && rmmod $frang_mod rmmod $tfw_mod rmmod $tdb_mod rmmod $tls_mod } setup() { tfw_set_net_queues "$devs" # Tempesta builds socket buffers by itself, don't cork TCP segments. sysctl -w net.ipv4.tcp_autocorking=0 >/dev/null # Sotfirqs are doing more work, so increase input queues. sysctl -w net.core.netdev_max_backlog=10000 >/dev/null sysctl -w net.core.somaxconn=131072 >/dev/null sysctl -w net.ipv4.tcp_max_syn_backlog=131072 >/dev/null } # JS challenge file is a template file, update it using values defined in # TempestaFW configuration file. # Don't break start up process if there are errors in configuration file. # Handling all the possible cases is too complicated for this script. # Let TempestaFW warn user on issues. update_js_challenge_template() { if ! grep -q "^\s*js_challenge\s" $tfw_cfg_path; then return fi echo "...compile html templates" # Cache directive from start to end to simplify extracting values, # checking for line breaks, reordering of options and so on. js_dtv=`grep -m 1 -E '^\s*js_challenge\s[^;]+;' $tfw_cfg_path` c_dtv=`grep --m 1 -E '^\s*sticky\s[^;]+;' $tfw_cfg_path` d_min=`echo $js_dtv | perl -ne 'print "$1\n" if /\sdelay_min=(\d+)/'` d_range=`echo $js_dtv | perl -ne 'print "$1\n" if /\sdelay_range=(\d+)/'` template=`echo $js_dtv | perl -ne 'print "$1\n" if /(\/[^;\s]+)/'` cookie=`echo $c_dtv | perl -ne 'print "$1\n" if /\sname=\"?([\w_]+)\"?/'` # Set default values template=${template:-"/etc/tempesta/js_challenge.html"} cookie=${cookie:-"__tfw"} if [[ -z $d_min || -z $d_range ]]; then echo "Error: 'js_challenge' mandatory options not set!" return fi template=${template%%.html}".tpl" $script_path/update_template.pl $template $cookie $d_min $d_range } start() { echo "Starting Tempesta..." TFW_STATE=$(sysctl net.tempesta.state 2> /dev/null) TFW_STATE=${TFW_STATE##* } [[ -z ${TFW_STATE} ]] && { setup; echo "...load Tempesta modules" load_modules; # Create database directory if it doesn't exist. mkdir -p /opt/tempesta/db/; # At this time we don't have stable TDB data format, so # it would be nice to clean all the tables before the start. # TODO: Remove the hack when TDB is fixed. rm -f /opt/tempesta/db/*.tdb; } update_js_challenge_template echo "...start Tempesta FW" sysctl -w net.tempesta.state=start >/dev/null if [ $? -ne 0 ]; then unload_modules error "cannot start Tempesta FW" else echo "done" fi } stop() { echo "Stopping Tempesta..." sysctl -w net.tempesta.state=stop echo "...unload Tempesta modules" unload_modules echo "done" } reload() { update_js_challenge_template echo "Running live reconfiguration of Tempesta..." sysctl -w net.tempesta.state=start >/dev/null if [ $? -ne 0 ]; then error "cannot reconfigure Tempesta FW" else echo "done" fi } args=$(getopt -o "d:f" -a -l "$LONG_OPTS" -- "$@") eval set -- "${args}" while :; do case "$1" in # Selectors for internal usage. --load) load_modules exit ;; --unload) unload_modules exit ;; # User CLI. --start) start exit ;; --stop) stop exit ;; --restart) stop start exit ;; --reload) reload exit ;; # Ignore any options after action. -d) devs=$2 shift 2 ;; --help) usage exit ;; *) error "Bad command line argument: $opt" exit 2 ;; esac done
ikoveshnikov/tempesta
scripts/tempesta.sh
Shell
gpl-2.0
7,282
#!/bin/bash set -e echo "This deployment script is ugly. You should probably only use it if you've read and understood the whole thing. Sorry." echo "This script will now exit without doing anything." exit 0 current_time=$(date "+%Y%m%d-%H%M") deploy_filename="aws_deploy/$current_time-upload.zip" mkdir -p aws_deploy zip -r $deploy_filename * aws lambda update-function-code --function-name actdigstream --zip-file fileb://$deploy_filename
rob-deutsch/actdigstream
deploy.sh
Shell
gpl-2.0
445
#!/bin/bash ###################################################################### # # idFORGE Framework - Manage identity manuals in community # Copyright © 2015 The CentOS Artwork SIG # # idFORGE Framework is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # idFORGE Framework is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with idFORGE Framework; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # # Alain Reguera Delgado <[email protected]> # 39 Street No. 4426 Cienfuegos, Cuba. # ###################################################################### function plain_setAttachment { [[ -z ${EMAIL_ATTACHMENT} ]] && return local ATTACHMENTS="${EMAIL_ATTACHMENT}" local ATTACHMENT='' EMAIL_ATTACHMENT='' for ATTACHMENT in ${ATTACHMENTS};do idforge_checkFiles ${ATTACHMENT} EMAIL_ATTACHMENT="${EMAIL_ATTACHMENT} -a ${ATTACHMENT}" done }
areguera/idforge
Library/Modules/Render/Modules/Email/Modules/Plain/plain_setAttachment.sh
Shell
gpl-2.0
1,413
#!/bin/zsh STR="Spec" echo "${1+$STR}" echo "Running tests: " runhaskell -itest-suite -ilibrary test-suite/${1+$STR}.hs
emhoracek/explora
runtests.sh
Shell
gpl-2.0
123
#!/bin/bash while true; do { echo "Listening for gcode file" /sbin/busybox nc -l -p 9876 > /SAMPLE03.gcode_temp echo "Received gcode file" NEW_FILE_NAME=/SAMPLE03.gcode_$(/bin/date +"%y-%m-%d-%h-%M-%s") if [ -f /SAMPLE03.gcode ] then echo "Moving SAMPLE03.gcode to "$NEW_FILE_NAME mv /SAMPLE03.gcode $NEW_FILE_NAME fi echo "Moving temp file to SAMPLE03.gcode" mv /SAMPLE03.gcode_temp /SAMPLE03.gcode sleep 1 } done
snakecharmer1024/davinci_wifisd
sd/listen_for_gcode.sh
Shell
gpl-2.0
526
module-starter --module='Testbed::Spatial::VDS::Schema::Result::Public::AccidentRiskResult Testbed::Spatial::VDS::Schema::Result::Public::AnnualStreetVolumeTable Testbed::Spatial::VDS::Schema::Result::Public::Calendar Testbed::Spatial::VDS::Schema::Result::Public::CarbAirbasinsAligned03 Testbed::Spatial::VDS::Schema::Result::Public::CarbAirdistrictsAligned03 Testbed::Spatial::VDS::Schema::Result::Public::CarbCountiesAligned03 Testbed::Spatial::VDS::Schema::Result::Public::City Testbed::Spatial::VDS::Schema::Result::Public::County Testbed::Spatial::VDS::Schema::Result::Public::CountiesFip Testbed::Spatial::VDS::Schema::Result::Public::CountyCity Testbed::Spatial::VDS::Schema::Result::Public::Crossing Testbed::Spatial::VDS::Schema::Result::Public::DesignHvyLongD7812 Testbed::Spatial::VDS::Schema::Result::Public::DetectorCount Testbed::Spatial::VDS::Schema::Result::Public::DetectorCountsHr Testbed::Spatial::VDS::Schema::Result::Public::DistrictCounty Testbed::Spatial::VDS::Schema::Result::Public::DistrictCrossing Testbed::Spatial::VDS::Schema::Result::Public::District Testbed::Spatial::VDS::Schema::Result::Public::FfConnector Testbed::Spatial::VDS::Schema::Result::Public::FinalMatchTbl Testbed::Spatial::VDS::Schema::Result::Public::FreewayOsmRoute Testbed::Spatial::VDS::Schema::Result::Public::Freeway Testbed::Spatial::VDS::Schema::Result::Public::GeomId Testbed::Spatial::VDS::Schema::Result::Public::GeomPoints4269 Testbed::Spatial::VDS::Schema::Result::Public::GeomPoints4326 Testbed::Spatial::VDS::Schema::Result::Public::I405dir Testbed::Spatial::VDS::Schema::Result::Public::JoinedVdsGeom Testbed::Spatial::VDS::Schema::Result::Public::LoopStat Testbed::Spatial::VDS::Schema::Result::Public::LoopSummaryStat Testbed::Spatial::VDS::Schema::Result::Public::MainlineRamp Testbed::Spatial::VDS::Schema::Result::Public::OctamFlowData Testbed::Spatial::VDS::Schema::Result::Public::OctamFlows20002030 Testbed::Spatial::VDS::Schema::Result::Public::OctamLink Testbed::Spatial::VDS::Schema::Result::Public::OctamNetwork Testbed::Spatial::VDS::Schema::Result::Public::OctamNode Testbed::Spatial::VDS::Schema::Result::Public::OctamNodeDerived Testbed::Spatial::VDS::Schema::Result::Public::OctamNodesGeom2230 Testbed::Spatial::VDS::Schema::Result::Public::OctamNodesGeom2875 Testbed::Spatial::VDS::Schema::Result::Public::OctamNodesTest Testbed::Spatial::VDS::Schema::Result::Public::OctamOnRamps Testbed::Spatial::VDS::Schema::Result::Public::OctamSed2000 Testbed::Spatial::VDS::Schema::Result::Public::OctamTaz Testbed::Spatial::VDS::Schema::Result::Public::OctamTazGeom Testbed::Spatial::VDS::Schema::Result::Public::OctamTazGeom2230 Testbed::Spatial::VDS::Schema::Result::Public::OnrTazMatchTbl Testbed::Spatial::VDS::Schema::Result::Public::P5mt Testbed::Spatial::VDS::Schema::Result::Public::Pems5min Testbed::Spatial::VDS::Schema::Result::Public::Pems5minAnnualAvg Testbed::Spatial::VDS::Schema::Result::Public::Pems5minMini Testbed::Spatial::VDS::Schema::Result::Public::PemsRampAggregate Testbed::Spatial::VDS::Schema::Result::Public::PemsRawTest Testbed::Spatial::VDS::Schema::Result::Public::PemsRawTest2 Testbed::Spatial::VDS::Schema::Result::Public::PemsRawTestOld Testbed::Spatial::VDS::Schema::Result::Public::RouteLine Testbed::Spatial::VDS::Schema::Result::Public::RouteTypeMapping Testbed::Spatial::VDS::Schema::Result::Public::SjvNetworkLink Testbed::Spatial::VDS::Schema::Result::Public::SjvNetworkNode Testbed::Spatial::VDS::Schema::Result::Public::SpatialRefSy Testbed::Spatial::VDS::Schema::Result::Public::StatsId Testbed::Spatial::VDS::Schema::Result::Public::TazOnrMatchTbl Testbed::Spatial::VDS::Schema::Result::Public::TempVdsData Testbed::Spatial::VDS::Schema::Result::Public::TestbedFacility Testbed::Spatial::VDS::Schema::Result::Public::TestId Testbed::Spatial::VDS::Schema::Result::Public::Timestamp Testbed::Spatial::VDS::Schema::Result::Public::Tvd Testbed::Spatial::VDS::Schema::Result::Public::Vd Testbed::Spatial::VDS::Schema::Result::Public::Vds30secondRawVersioned Testbed::Spatial::VDS::Schema::Result::Public::VdsCity Testbed::Spatial::VDS::Schema::Result::Public::VdsComplete Testbed::Spatial::VDS::Schema::Result::Public::VdsCounty Testbed::Spatial::VDS::Schema::Result::Public::VdsCurrentTest Testbed::Spatial::VDS::Schema::Result::Public::VdsDistrict Testbed::Spatial::VDS::Schema::Result::Public::VdsFreeway Testbed::Spatial::VDS::Schema::Result::Public::VdsGeom2230 Testbed::Spatial::VDS::Schema::Result::Public::VdsHaspems5min Testbed::Spatial::VDS::Schema::Result::Public::VdsIdAll Testbed::Spatial::VDS::Schema::Result::Public::VdsPoints4269 Testbed::Spatial::VDS::Schema::Result::Public::VdsPoints4326 Testbed::Spatial::VDS::Schema::Result::Public::VdsRouteRelation Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000EOr Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000NOr Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000NOrAlt Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000Or Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000OrAlt Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000SimpleOr Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000SOr Testbed::Spatial::VDS::Schema::Result::Public::VdsSed2000WOr Testbed::Spatial::VDS::Schema::Result::Public::VdsSegmentGeometry Testbed::Spatial::VDS::Schema::Result::Public::VdsStat Testbed::Spatial::VDS::Schema::Result::Public::VdsSummarystat Testbed::Spatial::VDS::Schema::Result::Public::VdsTazIntersection Testbed::Spatial::VDS::Schema::Result::Public::VdsTazIntersectionsAlt Testbed::Spatial::VDS::Schema::Result::Public::VdsTazIntersectionsSimple Testbed::Spatial::VDS::Schema::Result::Public::Vdstype Testbed::Spatial::VDS::Schema::Result::Public::VdsVdstype Testbed::Spatial::VDS::Schema::Result::Public::VdsVersioned Testbed::Spatial::VDS::Schema::Result::Public::VdsWimDistance Testbed::Spatial::VDS::Schema::Result::Public::VehLengthLookup Testbed::Spatial::VDS::Schema::Result::Public::VoronoiPoly Testbed::Spatial::VDS::Schema::Result::Public::VoronoiPolysAlt Testbed::Spatial::VDS::Schema::Result::Public::VoronoiPolyJoined Testbed::Spatial::VDS::Schema::Result::Public::WimCounty Testbed::Spatial::VDS::Schema::Result::Public::WimData Testbed::Spatial::VDS::Schema::Result::Public::WimDataWeekday5min Testbed::Spatial::VDS::Schema::Result::Public::WimDistrict Testbed::Spatial::VDS::Schema::Result::Public::WimFreeway Testbed::Spatial::VDS::Schema::Result::Public::WimLaneByHourReport Testbed::Spatial::VDS::Schema::Result::Public::WimLaneDir Testbed::Spatial::VDS::Schema::Result::Public::WimLengthWeekDayFacility Testbed::Spatial::VDS::Schema::Result::Public::WimPoints4269 Testbed::Spatial::VDS::Schema::Result::Public::WimPoints4326 Testbed::Spatial::VDS::Schema::Result::Public::WimStation Testbed::Spatial::VDS::Schema::Result::Public::WimStatus Testbed::Spatial::VDS::Schema::Result::Public::WimStatus2010 Testbed::Spatial::VDS::Schema::Result::Public::WimStatusCode Testbed::Spatial::VDS::Schema::Result::Public::AirbasinView Testbed::Spatial::VDS::Schema::Result::Public::AnnualMainlineVolume Testbed::Spatial::VDS::Schema::Result::Public::DateIn2007 Testbed::Spatial::VDS::Schema::Result::Public::Design Testbed::Spatial::VDS::Schema::Result::Public::DesignFwyVehTime Testbed::Spatial::VDS::Schema::Result::Public::Distinctfacility Testbed::Spatial::VDS::Schema::Result::Public::GeographyColumn Testbed::Spatial::VDS::Schema::Result::Public::GeometryColumn Testbed::Spatial::VDS::Schema::Result::Public::MeanOnrTripDistance Testbed::Spatial::VDS::Schema::Result::Public::OctamLinkFlow Testbed::Spatial::VDS::Schema::Result::Public::OctamLinksGeom2230 Testbed::Spatial::VDS::Schema::Result::Public::OctamNetFlow Testbed::Spatial::VDS::Schema::Result::Public::OctamNetworkFlow Testbed::Spatial::VDS::Schema::Result::Public::OctamNetworkView Testbed::Spatial::VDS::Schema::Result::Public::OctamRawRamp Testbed::Spatial::VDS::Schema::Result::Public::OctamTazAll2000 Testbed::Spatial::VDS::Schema::Result::Public::OrderedNode Testbed::Spatial::VDS::Schema::Result::Public::PemsRawTest2Full Testbed::Spatial::VDS::Schema::Result::Public::RasterColumn Testbed::Spatial::VDS::Schema::Result::Public::RasterOverview Testbed::Spatial::VDS::Schema::Result::Public::TazShape Testbed::Spatial::VDS::Schema::Result::Public::Tempview2232542323 Testbed::Spatial::VDS::Schema::Result::Public::Tempview4832386255 Testbed::Spatial::VDS::Schema::Result::Public::Tempview5414516523 Testbed::Spatial::VDS::Schema::Result::Public::TmcpeData Testbed::Spatial::VDS::Schema::Result::Public::TmcpeDataCreate Testbed::Spatial::VDS::Schema::Result::Public::Trial Testbed::Spatial::VDS::Schema::Result::Public::Vds2007dataGeoview Testbed::Spatial::VDS::Schema::Result::Public::VdsCurrentOrdered Testbed::Spatial::VDS::Schema::Result::Public::VdsCurrentView Testbed::Spatial::VDS::Schema::Result::Public::VdsCurrentViewGrail Testbed::Spatial::VDS::Schema::Result::Public::VdsGeoview Testbed::Spatial::VDS::Schema::Result::Public::VdsGeoviewFull Testbed::Spatial::VDS::Schema::Result::Public::ViewOctamOnRamps Testbed::Spatial::VDS::Schema::Result::Public::Vv Testbed::Spatial::VDS::Schema::Result::Public::WimGeoview Testbed::Spatial::VDS::Schema::Result::Public::WimStationsGeoview Testbed::Spatial::VDS::Schema::Result::Hsis::Acc Testbed::Spatial::VDS::Schema::Result::Hsis::Access Testbed::Spatial::VDS::Schema::Result::Hsis::Acctype Testbed::Spatial::VDS::Schema::Result::Hsis::CasenoVd Testbed::Spatial::VDS::Schema::Result::Hsis::Case Testbed::Spatial::VDS::Schema::Result::Hsis::County Testbed::Spatial::VDS::Schema::Result::Hsis::Dirtrvl Testbed::Spatial::VDS::Schema::Result::Hsis::Dspd Testbed::Spatial::VDS::Schema::Result::Hsis::Hwygrp Testbed::Spatial::VDS::Schema::Result::Hsis::Light Testbed::Spatial::VDS::Schema::Result::Hsis::Loctyp Testbed::Spatial::VDS::Schema::Result::Hsis::Numlane Testbed::Spatial::VDS::Schema::Result::Hsis::Psmilprf Testbed::Spatial::VDS::Schema::Result::Hsis::Psmilsuf Testbed::Spatial::VDS::Schema::Result::Hsis::Rdsurf Testbed::Spatial::VDS::Schema::Result::Hsis::Road Testbed::Spatial::VDS::Schema::Result::Hsis::Rodwycl Testbed::Spatial::VDS::Schema::Result::Hsis::RteSuf Testbed::Spatial::VDS::Schema::Result::Hsis::Sdehwy Testbed::Spatial::VDS::Schema::Result::Hsis::Severity Testbed::Spatial::VDS::Schema::Result::Hsis::Veh Testbed::Spatial::VDS::Schema::Result::Hsis::Weather Testbed::Spatial::VDS::Schema::Result::Wim::Summaries5minClass Testbed::Spatial::VDS::Schema::Result::Wim::Summaries5minSpeed Testbed::Spatial::VDS::Schema::Result::Wim::SummariesDailySpeedClass Testbed::Spatial::VDS::Schema::Result::Newctmlmap::SegmentDetectorEvent Testbed::Spatial::VDS::Schema::Result::Newctmlmap::Twim Testbed::Spatial::VDS::Schema::Result::Newctmlmap::VdsSegmentGeometry Testbed::Spatial::VDS::Schema::Result::Newctmlmap::WimSegmentGeometry Testbed::Spatial::VDS::Schema::Result::Newctmlmap::VdsView Testbed::Spatial::VDS::Schema::Result::Newctmlmap::VdsViewQgi Testbed::Spatial::VDS::Schema::Result::Newctmlmap::WimView Testbed::Spatial::VDS::Schema::Result::Newctmlmap::WimViewQgi Testbed::Spatial::VDS::Schema::Result::Tempseg::Mostusedroadbit Testbed::Spatial::VDS::Schema::Result::Tempseg::TdetectorTable Testbed::Spatial::VDS::Schema::Result::Tempseg::Twim Testbed::Spatial::VDS::Schema::Result::Tempseg::Tdetector' --author='James Marca' --email='[email protected]'
jmarca/spatialvds_schema
module.starter.sh
Shell
gpl-2.0
11,395
#!/usr/bin/env bash # jenkins build helper script for openbsc. This is how we build on jenkins.osmocom.org if ! [ -x "$(command -v osmo-build-dep.sh)" ]; then echo "Error: We need to have scripts/osmo-deps.sh from http://git.osmocom.org/osmo-ci/ in PATH !" exit 2 fi set -ex base="$PWD" deps="$base/deps" inst="$deps/install" export deps inst osmo-clean-workspace.sh mkdir "$deps" || true set +x echo echo echo echo " =============================== rtl-sdr ===============================" echo set -x cd "$base" autoreconf --install --force ./configure --enable-sanitize --enable-werror $MAKE $PARALLEL_MAKE LD_LIBRARY_PATH="$inst/lib" $MAKE check \ || cat-testlogs.sh LD_LIBRARY_PATH="$inst/lib" \ DISTCHECK_CONFIGURE_FLAGS="--enable-werror" \ $MAKE distcheck \ || cat-testlogs.sh osmo-clean-workspace.sh
TA1DB/librtlsdr
contrib/jenkins.sh
Shell
gpl-2.0
828
#!/bin/sh /sbin/ifconfig eth0 192.168.1.128 netmask 255.255.255.0 ifconfig eth0 up route add -net 192.168.1.0 netmask 255.255.255.0 gw 192.168.1.1 dev eth0 sleep 2 ifconfig eth0 down ifconfig eth0 up
xuguangmin/network_interface_to_serial
fileForUpload/filesystem/rootfs/etc/init.d/ifconfig-eth22.sh
Shell
gpl-2.0
205
#!/bin/bash #Display what VMs are running on this xen host. #Written by hfuller on 03 Feb 2015 #Known bugs: if your hostname is over three or four characters it might look bad on a 20 char display. #This utilizes http://sourceforge.net/projects/lcdproc-client/ which should be in your path (and may be included with this). while { echo "Running VMs on `hostname`:" & sudo xm list|tail -n +3|cut -d " " -f 1|xargs|fold -sw 20; } | lcdproc_client.py -t 60 -f -; do sleep 0 done
makerslocal/netadmin-scripts
xen_lcd.sh
Shell
gpl-2.0
479
#!/bin/sh OUTFILE=/tmp/fingers.out ../../../bin/fingers > $OUTFILE 2>&1 cmp canonical.out $OUTFILE if [ $? -ne 0 ]; then echo "!!! Failure. diff canonical.out $OUTFILE prints:" diff canonical.out $OUTFILE else echo " *** Success" fi
strnbrg59/stuff
fingers/test/test_all.sh
Shell
gpl-2.0
321
#!/bin/sh # # PM-QA validation test suite for the power management on Linux # # Copyright (C) 2011, Linaro Limited. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Contributors: # Daniel Lezcano <[email protected]> (IBM Corporation) # - initial API and implementation # # URL : https://wiki.linaro.org/WorkingGroups/PowerManagement/Resources/TestSuite/PmQaSpecification#cpuhotplug_07 . ../include/functions.sh TMPFILE=cpuhotplug_07.tmp UEVENT_READER="../utils/uevent_reader" check_notification() { cpu=$1 if [ "$cpu" = "cpu0" ]; then is_cpu0_hotplug_allowed $hotplug_allow_cpu0 || return 0 fi # damn ! udevadm is buffering the output, we have to use a temp file # to retrieve the output rm -f $TMPFILE $UEVENT_READER $TMPFILE & pid=$! sleep 1 set_offline $cpu set_online $cpu # let the time the notification to reach userspace # and buffered in the file sleep 1 kill -s INT $pid grep "offline@/devices/system/cpu/$cpu" $TMPFILE ret=$? check "offline event was received" "test $ret -eq 0" grep "online@/devices/system/cpu/$cpu" $TMPFILE ret=$? check "online event was received" "test $ret -eq 0" rm -f $TMPFILE } for_each_cpu check_notification test_status_show
bigzz/pm-qa
cpuhotplug/cpuhotplug_07.sh
Shell
gpl-2.0
1,944
#!/usr/bin/env bash # # DESCRIPTION: # This scripts tests an example p4 alignment pipeline on an single tile runfolder. # # Copyright (c) 2014 Genome Research Ltd. # Author: Stefan Dang <[email protected]> # globals p4_templates=./p4_templates expected_output=./expected_output test_dir=./tmp vtf_dir=$test_dir/vtf out_dir=$test_dir/outdata cfg_dir=$test_dir/cfgdata tmp_dir=$test_dir/tmpdata err_dir=$test_dir/errdata rpt=12588_1 function download_testinput { wget -qO- ftp://ngs.sanger.ac.uk/production/dnap/npg/p4_docker_1.tar.gz | tar -xz } function run_flow { # create subdirectories mkdir -p $test_dir $vtf_dir $out_dir $cfg_dir $tmp_dir $err_dir # move template files to their own subdirectory cp ./$p4_templates/* $vtf_dir/ # preprocess subgraph vtf templates, convert to json grep -v "^#" $vtf_dir/bwa_mem_alignment.vtf | tr -d "\n\t" > $cfg_dir/bwa_mem_alignment.json &&\ grep -v "^#" $vtf_dir/post_alignment.vtf | tr -d "\n\t" > $cfg_dir/post_alignment.json &&\ grep -v "^#" $vtf_dir/post_alignment_filter.vtf | tr -d "\n\t" > $cfg_dir/post_alignment_filter.json &&\ grep -v "^#" $vtf_dir/seqchksum.vtf | tr -d "\n\t" > $cfg_dir/seqchksum.json # preprocess main template, convert to json vtfp.pl -l tmp/gawp.vtf.log -o tmp/gawp.json \ -keys bwa_executable -vals bwa \ -keys illumina2bam_jar -vals /usr/local/jars/Illumina2bam.jar \ -keys alignment_filter_jar -vals /usr/local/jars/AlignmentFilter.jar \ -keys outdatadir -vals ./$out_dir \ -keys cfgdatadir -vals ./$cfg_dir \ -keys tmpdir -vals ./$tmp_dir \ -keys i2b_intensity_dir -vals "$(pwd)/runfolder/Data/Intensities" \ -keys i2b_lane -vals 1 \ -keys i2b_library_name -vals myi2blib \ -keys i2b_sample_alias -vals myi2bsample \ -keys i2b_study_name -vals myi2bstudy \ -keys i2b_first_tile -vals 1101 \ -keys i2b_tile_limit -vals 1 \ -keys rpt -vals $rpt \ -keys alignment_method -vals bwa_mem \ -keys reposdir -vals "$(pwd)/references" \ -keys alignment_refname_target -vals Escherichia_coli/E_coli_B_strain.fasta \ -keys alignment_refname_phix -vals PhiX/phix_unsnipped_short_no_N.fa \ -keys picard_dict_name_target -vals Escherichia_coli/E_coli_B_strain.fasta.dict \ -keys picard_dict_name_phix -vals PhiX/phix_unsnipped_short_no_N.fa.dict \ -keys refname_fasta_target -vals Escherichia_coli/E_coli_B_strain.fasta \ -keys refname_fasta_phix -vals PhiX/phix_unsnipped_short_no_N.fa \ -keys aligner_numthreads -vals 2 \ -keys java_cmd -vals java \ <(grep -v "^#" $vtf_dir/generic_alignment_with_phix.vtf | sed -e "s/^ *//" | tr -d "\n\t") # run flow viv.pl -x -s -v 3 -o tmp/gawp.log tmp/gawp.json mv ./*.err $err_dir/ } function compare_results { cmp $expected_output/12588_1.bam.md5 $out_dir/$rpt.bam.md5 cmp $expected_output/12588_1_in.bamseqchecksum $out_dir/"$rpt"_in.bamseqchecksum cmp $expected_output/12588_1_out.bamseqchecksum $out_dir/"$rpt"_out.bamseqchecksum cmp $expected_output/12588_1.bamstats $out_dir/$rpt.bamstats cmp $expected_output/12588_1.flagstat $out_dir/$rpt.flagstat cmp $expected_output/12588_1_phix.bamstats $out_dir/"$rpt"_phix.bamstats cmp $expected_output/12588_1_phix.flagstat $out_dir/"$rpt"_phix.flagstat } function main { download_testinput run_flow compare_results exit 0 } main
wtsi-npg/npg_docker
p4/test/test.sh
Shell
gpl-2.0
3,296
./SimpleViewer Heraklion_urban_green_sub.tif ./MeanShiftSegmentation Heraklion_urban_green_sub.tif MSFilteredOut.tif MSClusteredOut.tif MSLabeledOut.tif MSBoundOut.tif MSFilteredPretty.jpg MSClusteredPretty.jpg 7 30 10 1.0 gdal_translate -b 3 -b 2 -b 1 MSClusteredPretty.jpg Heraklion_ObjectMeans.png gdal_translate -ot Byte -of JPEG -scale MSBoundOut.tif Heraklion_Objects.jpg ./OneImageBandMath Heraklion_urban_green_sub.tif urban_green.img urban_green.png "if((b4-b3)/(b4+b3) > 0.2, 255, 0)"
gnorasi/gnorasi
Utilities/urban_green.sh
Shell
gpl-2.0
495
#!/bin/bash ip -6 addr del 2001:100::2/64 dev eth0 ip -6 addr del 2001:1::1/64 dev eth1 ip -6 tunnel del ip6tnl1 ip -6 tunnel del ip6tnl2 rmmod ip6_tunnel rmmod tunnel6 ip -6 addr add 2001:100::2/64 dev eth0 ip -6 addr add 2001:1::1/64 dev eth1 echo "0" > /proc/sys/net/ipv6/conf/all/accept_ra echo "0" > /proc/sys/net/ipv6/conf/eth1/accept_ra echo "0" > /proc/sys/net/ipv6/conf/eth0/accept_ra echo "1" > /proc/sys/net/ipv6/conf/all/forwarding ip -6 route add to default via 2001:100::1 dev eth0 modprobe ip6_tunnel modprobe tunnel6 xhost +; export DISPLAY=:0.0; rm -f /usr/local/src/mipv6-daemon-umip-0.4/logs/mag12lma.pcap ; sync; wireshark -i eth0 -k -n -w /usr/local/src/mipv6-daemon-umip-0.4/logs/mag12lma.pcap & rm -f /usr/local/src/mipv6-daemon-umip-0.4/logs/mag12ap.pcap ; sync; wireshark -i eth1 -k -n -w /usr/local/src/mipv6-daemon-umip-0.4/logs/mag12ap.pcap & pmip6d -c /usr/local/src/mipv6-daemon-umip-0.4/extras/example-mag1.conf
NetworkingGroupSKKU/Buffering-Scheme-in-PMIPv6
pmipv6-daemon-umip-0.4/extras/UMIP0.4_MAG1_UBUNTU.10.04.sh
Shell
gpl-2.0
955
#! /bin/sh -e # tup - A file-based build system # # Copyright (C) 2008-2020 Mike Shal <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # This is basically t5003, but after creating a new file and deleting an old # file we run with a memory checker. I tried to get an example that exercises # a fair bit of the usual functionality in a single trace. echo "TODO: use valgrind? mtrace doesn't work with threads" exit 0 . ./tup.sh cat > Tupfile << HERE : foreach *.c |> gcc -c %f -o %o |> %B.o : *.o |> ar cru %o %f |> libfoo.a HERE # Verify both files are compiled echo "int foo(void) {return 0;}" > foo.c echo "void bar1(void) {}" > bar.c tup touch foo.c bar.c update sym_check foo.o foo sym_check bar.o bar1 sym_check libfoo.a foo bar1 # Rename bar.c to realbar.c. mv bar.c realbar.c tup rm bar.c tup touch realbar.c MALLOC_TRACE=mout tup upd # Still seem to be some leaks in sqlite, even though I'm finalizing the # statements and doing an sqlite3_close(). Maybe I'm missing something. cat mout | grep -v libsqlite3.so | mtrace `which tup` - eotup
ppannuto/tup
test/t5006-move-c-file-memcheck.sh
Shell
gpl-2.0
1,676
#!/bin/sh get_common_syscalls() { cat <<EOF # filter that works ok for true open close mmap mmap2 munmap mprotect fstat fstat64 access read brk execve arch_prctl exit_group geteuid geteuid32 getuid getuid32 setresuid setresuid32 setgid setgid32 setuid setuid32 set_thread_area EOF } L="$(pwd)/snap-confine/snap-confine" export L TMP="$(mktemp -d)" trap 'rm -rf $TMP' EXIT export SNAPPY_LAUNCHER_SECCOMP_PROFILE_DIR="$TMP" export SNAPPY_LAUNCHER_INSIDE_TESTS="1" export SNAP_CONFINE_NO_ROOT=1 export SNAP_NAME=name FAIL() { printf ": FAIL\n" exit 1 } PASS() { printf ": PASS\n" }
femdom/snapd
cmd/snap-confine/tests/common.sh
Shell
gpl-3.0
608
#!/bin/bash python RunBootstrap.py --paralog1 YLR406C --paralog2 YDL075W --bootnum 84 > YLR406C_YDL075W_Boot84_PrintScreen.txt
xjw1001001/IGCexpansion
Bootstrap/ShFiles/MG94_YLR406C_YDL075W_Boot84.sh
Shell
gpl-3.0
128
cd .. rm -r temp mkdir temp mkdir temp/DEBIAN mkdir temp/usr mkdir temp/usr/bin cp DEBIAN/control-core temp/DEBIAN/control cp DEBIAN/copyright temp/DEBIAN cp bin/Debug/*.dll temp/usr/bin/ cp bin/Debug/*.dll.mdb temp/usr/bin/ dpkg -b temp DEBIAN/sentience-core.deb cd DEBIAN alien -r sentience-core.deb cd .. rm -r temp mkdir temp mkdir temp/DEBIAN mkdir temp/usr mkdir temp/usr/bin cp DEBIAN/control temp/DEBIAN cp DEBIAN/copyright temp/DEBIAN cp bin/Debug/stereosensormodel.* temp/usr/bin/ dpkg -b temp DEBIAN/stereosensormodel.deb cd DEBIAN alien -r stereosensormodel.deb
bashrc/sentience
applications/sensormodel/DEBIAN/makepackage.sh
Shell
gpl-3.0
575
#!/bin/bash filename=$(basename "$1") filename=${filename/md/pdf} pandoc -f markdown "$1" --template=$HOME/Docs/mytemplate.tex \ -V geometry:"left=0.75in, right=0.75in" \ -V version=1.0 \ -V mainfont="Nimbus Sans" \ -V monofont="Nimbus Mono" \ -o "$HOME/Docs/Done/$filename" rm "$1"
ryoung29/my-scripts
bash/mkpdf.sh
Shell
gpl-3.0
304
#!/bin/bash # # # # # # . ./downlink.link cd CLIENT_DATA/ wget $LINKFIREFOX -O current_Firefox.exe exit 0
kernt/opsi_tools
firefox/getdownload.sh
Shell
gpl-3.0
109
( tar -cf testfile.tar ../src/*.* && \ ../src/peek testfile.tar && \ rm -f testfile.tar ) >/dev/null 2>&1 || exit 1
znxster/peek
tests/Tar.sh
Shell
gpl-3.0
124
# mod-utilities . $ZYNTHIAN_DIR/zynthian-recipe/recipe/_zynth_lib.sh cd $ZYNTHIAN_SW_DIR/plugins zynth_git https://github.com/moddevices/mod-utilities.git if [ ${?} -ne 0 -o "${build}" = "build" ] then zynth_build_request clear cd mod-utilities quoted_ZYNTHIAN_PLUGINS_DIR=`quote_path ${ZYNTHIAN_PLUGINS_DIR}/lv2` sed -i -- "s/^INSTALL_PATH = \/usr\/local\/lib\/lv2/INSTALL_PATH = ${quoted_ZYNTHIAN_PLUGINS_DIR}/" Makefile make MOD=1 sudo make MOD=1 install zynth_build_request ready make clean cd .. fi
dcoredump/zynthian-recipe
recipe/mod-utilities.sh
Shell
gpl-3.0
515
#!/bin/sh docker stop webapp docker rm webapp
jianyingdeshitou/docker-sample
dockerbook-code/4/webapp/test/rm.sh
Shell
gpl-3.0
46
#!/bin/sh # File: papi.c # CVS: $Id$ # Author: Philip Mucci # [email protected] # Mods: Kevin London # [email protected] # Philip Mucci # [email protected] # if make sure that the tests are built if [ "x$BUILD" != "x" ]; then cd testlib; make; cd .. cd ctests; make; cd .. cd ftests; make; cd .. for comp in `ls components/*/tests` ; do \ cd components/$$comp/tests ; make; cd ../../.. ; done fi AIXTHREAD_SCOPE=S export AIXTHREAD_SCOPE if [ "X$1" = "X-v" ]; then shift ; TESTS_QUIET="" else # This should never have been an argument, but an environment variable! TESTS_QUIET="TESTS_QUIET" export TESTS_QUIET fi if [ "x$VALGRIND" != "x" ]; then VALGRIND="valgrind --leak-check=full"; fi #CTESTS=`find ctests -maxdepth 1 -perm -u+x -type f`; CTESTS=`find ctests/* -prune -perm -u+x -type f ! -name "*.[c|h]"`; FTESTS=`find ftests -perm -u+x -type f ! -name "*.[c|h|F]"`; COMPTESTS=`find components/*/tests -perm -u+x -type f ! \( -name "*.[c|h]" -o -name "*.cu" \)`; #EXCLUDE=`grep --regexp=^# --invert-match run_tests_exclude.txt` EXCLUDE=`grep -v -e '^#\|^$' run_tests_exclude.txt` ALLTESTS="$CTESTS $FTESTS $COMPTESTS"; x=0; CWD=`pwd` PATH=./ctests:$PATH export PATH echo "Platform:" uname -a echo "Date:" date echo "" if [ -r /proc/cpuinfo ]; then echo "Cpuinfo:" # only print info on first processor on x86 sed '/^$/q' /proc/cpuinfo fi echo "" if ["$VALGRIND" = ""]; then echo "The following test cases will be run:"; else echo "The following test cases will be run using valgrind:"; fi echo "" MATCH=0 LIST="" for i in $ALLTESTS; do for xtest in $EXCLUDE; do if [ "$i" = "$xtest" ]; then MATCH=1 break fi; done if [ `basename $i` = "Makefile" ]; then MATCH=1 fi; if [ $MATCH -ne 1 ]; then LIST="$LIST $i" fi; MATCH=0 done echo $LIST echo "" CUDA=`find Makefile | xargs grep cuda`; if [ "$CUDA" != "" ]; then EXCLUDE="$EXCLUDE `grep -v -e '^#\|^$' run_tests_exclude_cuda.txt`" fi echo "" echo "The following test cases will NOT be run:"; echo $EXCLUDE; echo ""; echo "Running C Tests"; echo "" if [ "$LD_LIBRARY_PATH" = "" ]; then LD_LIBRARY_PATH=.:./libpfm-3.y/lib else LD_LIBRARY_PATH=.:./libpfm-3.y/lib:"$LD_LIBRARY_PATH" fi export LD_LIBRARY_PATH if [ "$LIBPATH" = "" ]; then LIBPATH=.:./libpfm-3.y/lib else LIBPATH=.:./libpfm-3.y/lib:"$LIBPATH" fi export LIBPATH for i in $CTESTS; do for xtest in $EXCLUDE; do if [ "$i" = "$xtest" ]; then MATCH=1 break fi; done if [ `basename $i` = "Makefile" ]; then MATCH=1 fi; if [ $MATCH -ne 1 ]; then if [ -x $i ]; then if [ "$i" = "ctests/timer_overflow" ]; then echo Skipping test $i, it takes too long... else RAN="$i $RAN" printf "Running $i:"; $VALGRIND ./$i $TESTS_QUIET fi; fi; fi; MATCH=0 done echo "" echo "Running Fortran Tests"; echo "" for i in $FTESTS; do for xtest in $EXCLUDE; do if [ "$i" = "$xtest" ]; then MATCH=1 break fi; done if [ `basename $i` = "Makefile" ]; then MATCH=1 fi; if [ $MATCH -ne 1 ]; then if [ -x $i ]; then RAN="$i $RAN" printf "Running $i:"; $VALGRIND ./$i $TESTS_QUIET fi; fi; MATCH=0 done echo ""; echo "Running Component Tests"; echo "" for i in $COMPTESTS; do for xtest in $EXCLUDE; do if [ "$i" = "$xtest" ]; then MATCH=1 break fi; done if [ `basename $i` = "Makefile" ]; then MATCH=1 fi; if [ $MATCH -ne 1 ]; then if [ -x $i ]; then RAN="$i $RAN" printf "Running $i:"; $VALGRIND ./$i $TESTS_QUIET fi; fi; MATCH=0 done if [ "$RAN" = "" ]; then echo "FAILED to run any tests. (you can safely ignore this if this was expected behavior)" fi;
JNPA/DPAcalc
dependencies/papi-5.3.0/papi-5.3.0/src/run_tests.sh
Shell
gpl-3.0
3,796
#!/bin/sh # $XTermId: 8colors.sh,v 1.13 2003/05/19 00:52:30 tom Exp $ # ----------------------------------------------------------------------------- # this file is part of xterm # # Copyright 1999-2002,2003 by Thomas E. Dickey # # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE ABOVE LISTED COPYRIGHT HOLDER(S) BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name(s) of the above copyright # holders shall not be used in advertising or otherwise to promote the # sale, use or other dealings in this Software without prior written # authorization. # ----------------------------------------------------------------------------- # Show a simple 8-color test pattern ESC="" CMD='echo' OPT='-n' SUF='' TMP=/tmp/xterm$$ eval '$CMD $OPT >$TMP || echo fail >$TMP' 2>/dev/null ( test ! -f $TMP || test -s $TMP ) && for verb in printf print ; do rm -f $TMP eval '$verb "\c" >$TMP || echo fail >$TMP' 2>/dev/null if test -f $TMP ; then if test ! -s $TMP ; then CMD="$verb" OPT= SUF='\c' break fi fi done rm -f $TMP if ( trap "echo exit" EXIT 2>/dev/null ) >/dev/null then trap '$CMD $OPT ""; exit' EXIT HUP INT TRAP TERM else trap '$CMD $OPT ""; exit' 0 1 2 5 15 fi echo "" while true do for AT in 0 1 4 7 do case $AT in 0) attr="normal ";; 1) attr="bold ";; 4) attr="under ";; 7) attr="reverse ";; esac for FG in 0 1 2 3 4 5 6 7 do case $FG in 0) fcolor="black ";; 1) fcolor="red ";; 2) fcolor="green ";; 3) fcolor="yellow ";; 4) fcolor="blue ";; 5) fcolor="magenta ";; 6) fcolor="cyan ";; 7) fcolor="white ";; esac $CMD $OPT "[0;${AT}m$attr" $CMD $OPT "[3${FG}m$fcolor" for BG in 1 2 3 4 5 6 7 do case $BG in 0) bcolor="black ";; 1) bcolor="red ";; 2) bcolor="green ";; 3) bcolor="yellow ";; 4) bcolor="blue ";; 5) bcolor="magenta ";; 6) bcolor="cyan ";; 7) bcolor="white ";; esac $CMD $OPT "[4${BG}m$bcolor" done echo "" done sleep 1 done done
chriskmanx/qmole
QMOLEDEV/xterm-270/vttests/8colors.sh
Shell
gpl-3.0
3,113
mvn -q -Dmaven.test.skip=true install
GDCN/GDCN
GDCN_proj/build.sh
Shell
gpl-3.0
38
#!/bin/bash . settings.sh BASEDIR=$2 case $1 in armeabi) NDK_ABI='arm' NDK_TOOLCHAIN_ABI='arm-linux-androideabi' ;; armeabi-v7a) NDK_ABI='arm' NDK_TOOLCHAIN_ABI='arm-linux-androideabi' ARCH_CPU='armv7-a' CFLAGS="$CFLAGS -march=$ARCH_CPU" ;; esac TOOLCHAIN_PREFIX=${BASEDIR}/toolchain-android if [ ! -d "$TOOLCHAIN_PREFIX" ]; then ${ANDROID_NDK_ROOT_PATH}/build/tools/make-standalone-toolchain.sh --toolchain=${NDK_TOOLCHAIN_ABI}-${NDK_TOOLCHAIN_ABI_VERSION} --platform=android-${ANDROID_API_VERSION} --install-dir=${TOOLCHAIN_PREFIX} fi CROSS_PREFIX=${TOOLCHAIN_PREFIX}/bin/${NDK_TOOLCHAIN_ABI}- NDK_SYSROOT=${TOOLCHAIN_PREFIX}/sysroot export CFLAGS="${CFLAGS} -Wformat -Wformat-security -Werror=format-security --param ssp-buffer-size=4 -fstack-protector -D_FORTIFY_SOURCE=2 -I${TOOLCHAIN_PREFIX}/include" export LDFLAGS="-Wl,-z,relro -Wl,-z,now -pie -L${TOOLCHAIN_PREFIX}/lib" export CPPFLAGS='-Wformat -Wformat-security -Werror=format-security --param ssp-buffer-size=4 -fstack-protector -D_FORTIFY_SOURCE=2' export PKG_CONFIG_LIBDIR="${TOOLCHAIN_PREFIX}/lib/pkgconfig" export CC="${CROSS_PREFIX}gcc --sysroot=${NDK_SYSROOT}" export LD="${CROSS_PREFIX}ld" export RANLIB="${CROSS_PREFIX}ranlib" export STRIP="${CROSS_PREFIX}strip" export READELF="${CROSS_PREFIX}readelf" export OBJDUMP="${CROSS_PREFIX}objdump" export ADDR2LINE="${CROSS_PREFIX}addr2line" export AR="${CROSS_PREFIX}ar" export AS="${CROSS_PREFIX}as" export CXX="${CROSS_PREFIX}g++" export OBJCOPY="${CROSS_PREFIX}objcopy" export ELFEDIT="${CROSS_PREFIX}elfedit" export CPP="${CROSS_PREFIX}cpp" export DWP="${CROSS_PREFIX}dwp" export GCONV="${CROSS_PREFIX}gconv" export GDP="${CROSS_PREFIX}gdb" export GPROF="${CROSS_PREFIX}gprof" export NM="${CROSS_PREFIX}nm" export SIZE="${CROSS_PREFIX}size" export STRINGS="${CROSS_PREFIX}strings"
hiteshsondhi88/android-curl
abi_settings.sh
Shell
gpl-3.0
1,844
#!/bin/sh set -e autoreconf -if
lexical/betaradio
autogen.sh
Shell
gpl-3.0
34
#!/bin/sh if ! whoami &> /dev/null; then if [ -w /etc/passwd ]; then echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> /etc/passwd fi fi exec "$@"
jfx/ci-report
docker/uid-entrypoint.sh
Shell
gpl-3.0
201
#!/bin/bash current=$(dconf read /org/gnome/desktop/input-sources/xkb-options) swapped="['caps:swapescape']" capslock="['caps:capslock']" echo "Current status: $current" if [ "$current" == "$swapped" ] then echo "Making caps and escape WORK NORMALLY" dconf write /org/gnome/desktop/input-sources/xkb-options $capslock elif [ "$current" == "$capslock" ] then echo "Swapping caps and escape" dconf write /org/gnome/desktop/input-sources/xkb-options $swapped else echo "caps is not swapescaped nor capslock. Doing nothing." fi
QuitHub/dotfiles
swapesc.sh
Shell
gpl-3.0
546
# input-output-functions.sh # # NOTE: This is the first file to be sourced (because of _ in the name) which is why # it contains some special stuff like EXIT_TASKS that I want to be available everywhere # input-output functions for Relax-and-Recover # # Relax-and-Recover is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # Relax-and-Recover is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Relax-and-Recover; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # # the sequence $'...' is an special bash expansion with backslash-escaped characters # see "Words of the form $'string' are treated specially" in "man bash" # that works at least down to bash 3.1 in SLES10: LF=$'\n' # collect exit tasks in this array # without the empty string as initial value ${EXIT_TASKS[@]} would be an unbound variable # that would result an error exit if 'set -eu' is used: EXIT_TASKS=("") # add $* as a task to be done at the end function AddExitTask () { # NOTE: we add the task at the beginning to make sure that they are executed in reverse order # I use $* on purpose because I want to get one string from all args! EXIT_TASKS=( "$*" "${EXIT_TASKS[@]}" ) Debug "Added '$*' as an exit task" } function QuietAddExitTask () { # I use $* on purpose because I want to get one string from all args! EXIT_TASKS=( "$*" "${EXIT_TASKS[@]}" ) } # remove $* from the task list function RemoveExitTask () { local removed="" exit_tasks="" for (( c=0 ; c<${#EXIT_TASKS[@]} ; c++ )) ; do if test "${EXIT_TASKS[c]}" = "$*" ; then # the ' ' protect from bash expansion, however unlikely to have a file named EXIT_TASKS in pwd... unset 'EXIT_TASKS[c]' removed=yes Debug "Removed '$*' from the list of exit tasks" fi done if ! test "$removed" = "yes" ; then exit_tasks="$( for task in "${EXIT_TASKS[@]}" ; do echo "$task" ; done )" Log "Could not remove exit task '$*' (not found). Exit Tasks: '$exit_tasks'" fi } # do all exit tasks function DoExitTasks () { Log "Running exit tasks." # kill all running jobs JOBS=( $( jobs -p ) ) # when "jobs -p" results nothing then JOBS is still an unbound variable so that # an empty default value is used to avoid 'set -eu' error exit if $JOBS is unset: if test -n ${JOBS:-""} ; then Log "The following jobs are still active:" jobs -l >&2 kill -9 "${JOBS[@]}" >&2 # allow system to clean up after killed jobs sleep 1 fi for task in "${EXIT_TASKS[@]}" ; do Debug "Exit task '$task'" eval "$task" done } # activate the trap function builtin trap "DoExitTasks" 0 # keep PID of main process readonly MASTER_PID=$$ # duplication STDOUT to fd7 to use for Print exec 7>&1 QuietAddExitTask "exec 7>&-" # USR1 is used to abort on errors, not using Print to always print to the original STDOUT, even if quiet builtin trap "echo 'Aborting due to an error, check $LOGFILE for details' >&7 ; kill $MASTER_PID" USR1 # make sure nobody else can use trap function trap () { BugError "Forbidden use of trap with '$@'. Use AddExitTask instead." } # Check if any of the arguments is executable (logical OR condition). # Using plain "type" without any option because has_binary is intended # to know if there is a program that one can call regardless if it is # an alias, builtin, function, or a disk file that would be executed # see https://github.com/rear/rear/issues/729 function has_binary () { for bin in $@ ; do if type $bin >&8 2>&1 ; then return 0 fi done return 1 } # Get the name of the disk file that would be executed. # In contrast to "type -p" that returns nothing for an alias, builtin, or function, # "type -P" forces a PATH search for each NAME, even if it is an alias, builtin, # or function, and returns the name of the disk file that would be executed # see https://github.com/rear/rear/issues/729 function get_path () { type -P $1 2>&8 } Error() { # If first argument is numerical, use it as exit code if [ $1 -eq $1 ] 2>&8; then EXIT_CODE=$1 shift else EXIT_CODE=1 fi VERBOSE=1 LogPrint "ERROR: $*" if has_binary caller; then # Print stack strace on errors in reverse order ( echo "=== Stack trace ===" local c=0; while caller $((c++)); do :; done | awk ' { l[NR]=$3":"$1" "$2 } END { for (i=NR; i>0;) print "Trace "NR-i": "l[i--] } ' echo "Message: $*" echo "===================" ) >&2 fi LogToSyslog "ERROR: $*" kill -USR1 $MASTER_PID # make sure that Error exits the master process, even if called from child processes :-) } StopIfError() { # If return code is non-zero, bail out if (( $? != 0 )); then Error "$@" fi } BugError() { # If first argument is numerical, use it as exit code if [ $1 -eq $1 ] 2>&8; then EXIT_CODE=$1 shift else EXIT_CODE=1 fi Error "BUG BUG BUG! " "$@" " === Issue report === Please report this unexpected issue at: https://github.com/rear/rear/issues Also include the relevant bits from $LOGFILE HINT: If you can reproduce the issue, try using the -d or -D option ! ====================" } BugIfError() { # If return code is non-zero, bail out if (( $? != 0 )); then BugError "$@" fi } function Debug () { test -n "$DEBUG" && Log "$@" || true } function Print () { test -n "$VERBOSE" && echo -e "$*" >&7 || true } # print if there is an error PrintIfError() { # If return code is non-zero, bail out if (( $? != 0 )); then Print "$@" fi } if [[ "$DEBUG" || "$DEBUGSCRIPTS" ]]; then Stamp() { date +"%Y-%m-%d %H:%M:%S.%N " } else Stamp() { date +"%Y-%m-%d %H:%M:%S " } fi function Log () { if test $# -gt 0 ; then echo "$(Stamp)$*" else echo "$(Stamp)$(cat)" fi >&2 } # log if there is an error LogIfError() { # If return code is non-zero, bail out if (( $? != 0 )); then Log "$@" fi } function LogPrint () { Log "$@" Print "$@" } # log/print if there is an error LogPrintIfError() { # If return code is non-zero, bail out if (( $? != 0 )); then LogPrint "$@" fi } # setup dummy progress subsystem as a default # not VERBOSE, Progress stuff replaced by dummy/noop exec 8>/dev/null # start ProgressPipe listening at fd 8 QuietAddExitTask "exec 8>&-" # new method, close fd 8 at exit ProgressStart() { : ; } ProgressStop() { : ; } ProgressError() { : ; } ProgressStep() { : ; } ProgressInfo() { : ; } LogToSyslog() { # send a line to syslog or messages file with input string logger -t rear -i "$*" }
dagwieers/rear
usr/share/rear/lib/_input-output-functions.sh
Shell
gpl-3.0
7,122
#!/bin/sh # a handy script to get rid of festa dies after font editing # chris niven 2010 #rm -v ~/aruspix/trunk/osx/install_root/Library/Fonts/FestaDiesA.dfont #rm -v ~/aruspix/trunk/osx/install_root/Library/Fonts/FestaDiesB.dfont #rm -v ~/aruspix/trunk/varia/font/FestaDiesA.dfont #rm -v ~/aruspix/trunk/varia/font/FestaDiesB.dfont #rm -v ~/aruspix/trunk/varia/font/FestaDiesA.ttf #rm -v ~/aruspix/trunk/varia/font/FestaDiesB.ttf #rm -v ~/Library/Fonts/FestaDiesB.dfont rm -v ~/Library/Fonts/FestaDiesA.dfont rm -v ~/aruspix/trunk/osx/install_root/Libary/Fonts/FestaDiesA.dfont
DDMAL/aruspix
osx/remove_festa_dies.sh
Shell
gpl-3.0
582
#!/bin/bash # 3.3.2 Ensure IPv6 redirects are not accepted (Scored) BENCHMARKNUM='cis_benchmark_3_3_2' RESULT= PARAMS=( net.ipv6.conf.all.accept_redirects net.ipv6.conf.default.accept_redirects ) for P in ${PARAMS[@]} do RESULT=$( /sbin/sysctl $P | /bin/grep -v "^$P = 0" ) if [[ $RESULT ]] then break fi done if [[ -z $RESULT ]] then echo "${BENCHMARKNUM}=passed" else echo "${BENCHMARKNUM}=failed" fi
proletaryo/puppet-ciscentos6
files/scripts/benchmark-3.3.2.sh
Shell
gpl-3.0
429
#!/bin/bash mkdir -p build git submodule init git submodule update cp -r ledscape build rm ledscape/.git cp *.cc Makefile build mv *.gyp build cp -r src build cp -r overwrite build export NPMBIN=$(npm bin) cd build make premake make cd ..
seriousmumbo/ledscape-node
build.sh
Shell
gpl-3.0
240
: ${test_timeout:="5 minutes"} : ${replication_timeout:="90 seconds"} : ${number_of_chunkservers:=12} : ${goals="2 3 4 5 6 7 8 9 xor2 xor3 xor4 xor5 xor6 xor7 xor8 xor9"} : ${verify_file_content=YES} # Returns list of all chunks in the following format: # chunk 0000000000000001_00000001 parity 6 # chunk 0000000000000001_00000001 part 1/6 # chunk 0000000000000003_00000001 # chunk 0000000000000004_00000001 part 1/3 get_list_of_chunks() { lizardfs fileinfo */* | awk '/\tchunk/{id=$3} /\tcopy/{print "chunk",id,$4,$5}' | sort } timeout_set "$test_timeout" CHUNKSERVERS=$number_of_chunkservers \ USE_RAMDISK=YES \ MOUNT_EXTRA_CONFIG="mfscachemode=NEVER" \ MASTER_EXTRA_CONFIG="CHUNKS_LOOP_MIN_TIME = 1` `|CHUNKS_LOOP_MAX_CPU = 90` `|CHUNKS_WRITE_REP_LIMIT = 10` `|OPERATIONS_DELAY_INIT = 0` `|OPERATIONS_DELAY_DISCONNECT = 0` `|ACCEPTABLE_DIFFERENCE = 10" \ setup_local_empty_lizardfs info # Create files with goals from the $goals list cd "${info[mount0]}" for goal in $goals; do dir="dir_$goal" mkdir "$dir" lizardfs setgoal "$goal" "$dir" FILE_SIZE=1M file-generate "$dir/file" done # Remember list of all available chunks, stop one of the chunkservers and wait for replication chunks_before=$(get_list_of_chunks) lizardfs_chunkserver_daemon 0 stop echo "Waiting $replication_timeout for replication..." end_time=$(date +%s -d "$replication_timeout") while (( $(date +%s) < end_time )); do chunks=$(get_list_of_chunks) if [[ "$chunks" == "$chunks_before" ]]; then break; fi sleep 1 done if [[ "$chunks" != "$chunks_before" ]]; then diff=$(diff <(echo "$chunks_before") <(echo "$chunks") | grep '^[<>]' || true) test_fail "Replication did not succeed in $replication_timeout. Difference:"$'\n'"$diff" fi if [[ $verify_file_content == YES ]]; then for ((csid=1; csid < number_of_chunkservers; ++csid)); do lizardfs_chunkserver_daemon $csid stop file-validate */* lizardfs_chunkserver_daemon $csid start lizardfs_wait_for_ready_chunkservers $((number_of_chunkservers - 1)) done fi
lizardfs/lizardfs
tests/test_suites/ShortSystemTests/test_chunk_replication.sh
Shell
gpl-3.0
2,028
#%% After arg.sh #%% Before package.sh #%% Private _service_set_version() { sed -i "s/Version:.*/Version: ${PKG_VERSION}/g" ${PKG_NAME}.spec } >/dev/null _service_download_url() { # Return if file exists or download url not set [ -v PKG_DOWNLOAD_URL ] || return 1 [ -f ${PKG_NAME}-${PKG_VERSION}.tar.gz ] && return 1 # Download the file and store curl output local curl_output=$(curl -JLO ${PKG_DOWNLOAD_URL}) # Extract download filename from curl output local filename=$(grep -o -P "(?<=filename ').*(?=')" <<< "$curl_output") # Prefix download filename with tmp_vendor_ mv {,tmp_vendor_}$filename } >/dev/null _service_unpack() { local from_file=$(ls tmp_vendor_* | head -1) local to_dir="tmp_vendor_dir/" mkdir -p $to_dir case $from_file in *.tar.bz2) tar xjf $from_file -C $to_dir ;; *.tar.gz) tar xzf $from_file -C $to_dir ;; *.tar.xz) tar xJf $from_file -C $to_dir ;; *.tar) tar xf $from_file -C $to_dir ;; *.tbz2) tar xjf $from_file -C $to_dir ;; *.tgz) tar xzf $from_file -C $to_dir ;; *.zip) unzip $from_file -d $to_dir ;; *.7z) 7z x $to_dir ;; *.bz2) bunzip2 $to_dir ;; *.gz) gunzip $to_dir ;; *.xz) unxz $to_dir ;; *) cmd_error "unknown download file format" ;; esac } >/dev/null _service_correct_root() { local correct_root="${PKG_NAME}-${PKG_VERSION}" # Pass if we have root with correct name already [ -d tmp_vendor_dir/$correct_root ] && return 0 pushd tmp_vendor_dir # If there is a root dir, rename it # Otherwise create the root and move files to it local child_dirs=$(ls -l | grep ^d | wc -l) if [ $child_dirs -eq 1 ]; then mv ./*/ $correct_root else mkdir $correct_root mv !($correct_root) $correct_root fi popd } >/dev/null _service_repack() { local from_dir="tmp_vendor_dir" local to_file="${PKG_NAME}-${PKG_VERSION}.tar.gz" _service_correct_root tar -C $from_dir -czf $to_file . } >/dev/null _service_run_hook() { local hook_name=$1; shift if [ "$(type -t $hook_name)" = 'function' ]; then eval $hook_name fi } _service_delete_tmpfiles() { rm -fr tmp_* } >/dev/null #%% Public service_setup() { pushd $PKG_DIR >/dev/null source service.sh _service_set_version _service_download_url && (_service_unpack; _service_repack) _service_run_hook 'pkg_service_hook' popd >/dev/null } service_cleanup() { pushd $PKG_DIR >/dev/null _service_run_hook 'pkg_cleanup_hook' _service_delete_tmpfiles popd >/dev/null }
mkrawiec/builddock
include/service.sh
Shell
gpl-3.0
2,746
#!/bin/zsh SALT=`date +%N` if [[ ARGC -gt 0 ]] then BINNAME=`basename $PWD` foreach USER ($@) mkdir -p obj/$USER AA=`echo $USER $SALT $BINNAME | sha512sum | cut -c 1-8` cat program.c.template | sed s/AAAAAA/0x$AA/ >! program.c gcc -m32 -std=gnu99 -Wl,-z,norelro -mno-align-double -o obj/$USER/$BINNAME program.c end else echo "USAGE: build.zsh <user_email(s)>" fi
cliffe/SecGen
modules/utilities/unix/ctf/metactf/files/repository/src_malware/Ch01-08/Ch06CAsm_LinkedList/build.zsh
Shell
gpl-3.0
389
#!/bin/sh LOCAL_REPO=${HOME}/flatpak-repos/peek REMOTE_REPO=s3://flatpak.uploadedlobster.com REGION=eu-central-1 flatpak build-update-repo \ --generate-static-deltas \ --gpg-sign=B539AD7A5763EE9C1C2E4DE24C14923F47BF1A02 \ --prune --prune-depth=20 \ ${LOCAL_REPO} # First sync all but the summary aws s3 sync --region="${REGION}" \ --acl public-read \ --exclude="summary" --exclude="summary.sig" \ "${LOCAL_REPO}" "${REMOTE_REPO}" # Sync the summary aws s3 sync --region="${REGION}" \ --acl public-read \ --exclude="*" --include="summary" --include="summary.sig" \ "${LOCAL_REPO}" "${REMOTE_REPO}" # As a last pass also sync deleted files aws s3 sync --region="${REGION}" \ --acl public-read \ --delete \ "${LOCAL_REPO}" "${REMOTE_REPO}"
phw/peek
build-aux/flatpak/sync-aws.sh
Shell
gpl-3.0
767
#!/bin/bash # set -x # Fri Oct 2 11:09:52 CEST 2015 # [email protected] # Make it work with CSRF enabled Alfresco 5.x # spd: fixed bug: supplied short name is ignored # spd: visibility defaults to "private" # spd: add some cookies (alfLogin, alfUsername3, others supplied by server) # spd: read and use Alfresco-CSRFToken # spd: add "Referer" and "Origin" HTTTP headers # spd: add "isPublic" attribute to JSON # spd: cosmetic changes in source code (split some long lines) # param section # source function library ALFTOOLS_BIN=`dirname "$0"` . $ALFTOOLS_BIN/alfToolsLib.sh # intended to be replaced in command script by a command specific output function __show_command_options() { echo " command options:" echo " -s SHORT_NAME optional, the sites short name" echo " -d DESCRIPTION optional, the site description" echo " -a ACCESS optional, either 'public', 'moderated' or 'private'" echo " -p SITE_PRESET optional, standard preset is 'site-dashboard'" echo } # intended to be replaced in command script function __show_command_arguments() { echo " command arguments:" echo " SITE_TITLE the main title of the site" echo } # intended to be replaced in command script function __show_command_explanation() { echo " command explanation:" echo " the alfCreateSite.sh command let you create a site from the command line." echo echo " usage examples:" echo echo " ./alfCreateSite.sh NewSite" echo " --> creates a new site named 'NewSite' with private visibility" echo } # command local options ALF_CMD_OPTIONS="${ALF_GLOBAL_OPTIONS}s:d:a:p:" ALF_SITE_SHORT_NAME="" ALF_SITE_DESCRIPTION="" ALF_SITE_VISIBILITY="PUBLIC" ALF_SITE_ISPUBLIC="true" ALF_SITE_PRESET="site-dashboard" ALF_SITE_TITLE="" function __process_cmd_option() { local OPTNAME=$1 local OPTARG=$2 case $OPTNAME in s) ALF_SITE_SHORT_NAME=$OPTARG;; d) ALF_SITE_DESCRIPTION="$OPTARG";; a) case "_$OPTARG" in "_public") ALF_SITE_VISIBILITY="PUBLIC" ALF_SITE_ISPUBLIC="true" ;; "_moderated") ALF_SITE_VISIBILITY="MODERATED" ALF_SITE_ISPUBLIC="true" ;; *) ALF_SITE_VISIBILITY="PRIVATE" ALF_SITE_ISPUBLIC="false" ;; esac ;; p) ALF_SITE_PRESET=$OPTARG;; esac } __process_options "$@" # shift away parsed args shift $((OPTIND-1)) # command arguments, ALF_SITE_TITLE=$1 if $ALF_VERBOSE then ALF_CURL_OPTS="$ALF_CURL_OPTS -v" echo "connection params:" echo " user: $ALF_UID" echo " endpoint: $ALF_EP" echo " curl opts: $ALF_CURL_OPTS" echo " site title: $ALF_SITE_TITLE" echo " site desc: $ALF_SITE_DESCRIPTION" echo " site visibility: $ALF_SITE_VISIBILITY" echo " site isPublic: $ALF_SITE_ISPUBLIC" echo " site preset: $ALF_SITE_PRESET" echo " site short name: $ALF_SITE_SHORT_NAME" fi # parameter check if [ "_$ALF_SITE_TITLE" = "_" ] then echo "a site title is required" exit 1 fi if [ "_$ALF_SITE_SHORT_NAME" = "_" ] then # fiddle to create a somewhat nice short name TMP_SHORT_NAME=`echo -n "$ALF_SITE_TITLE" | perl -pe 's/[^a-z0-9]/_/gi' | tr '[:upper:]' '[:lower:]'` ALF_SITE_SHORT_NAME=${TMP_SHORT_NAME} fi # craft json body ALF_JSON=`echo '{}' |\ $ALF_JSHON \ -s "$ALF_SITE_TITLE" -i title \ -s "$ALF_SITE_SHORT_NAME" -i shortName \ -s "$ALF_SITE_DESCRIPTION" -i description \ -s "$ALF_SITE_PRESET" -i sitePreset \ -s "$ALF_SITE_VISIBILITY" -i visibility \ -n "$ALF_SITE_ISPUBLIC" -i isPublic` # get a valid share session id __get_share_session_id ALF_SESSIONID="$ALF_SHARE_SESSIONID" ALF_CSRF=`echo "$ALF_JSON" |\ curl $ALF_CURL_OPTS -v \ -H "Content-Type: application/json; charset=UTF-8" \ --cookie JSESSIONID="$ALF_SESSIONID" \ -d@- -X POST $ALF_SHARE_EP/service/modules/create-site 2>&1 | \ sed -e '/Alfresco-CSRFToken/!d' -e 's/^.*Token=//' -e 's/; .*//g'` ALF_CSRF_DECODED=`echo "$ALF_CSRF" | __htd` ALF_SERVER=`echo "$ALF_SHARE_EP" | sed -e 's,/share,,'` echo "$ALF_JSON" |\ curl $ALF_CURL_OPTS -v \ -H "Content-Type: application/json; charset=UTF-8" \ -H "Origin: $ALF_SERVER" \ -H "Alfresco-CSRFToken: $ALF_CSRF_DECODED" \ -e $ALF_SHARE_EP/service/modules/create-site \ --cookie JSESSIONID="${ALF_SESSIONID}; Alfresco-CSRFToken=$ALF_CSRF" \ -d@- \ -X POST \ $ALF_SHARE_EP/service/modules/create-site?"$ALF_CSRF_DECODED" # #{"visibility":"PUBLIC","title":"OtherSite","shortName":"othersite","description":"other site descrpiption","sitePreset":"site-dashboard"}#upload webscript parameter description:
ecm4u/alfresco-shell-tools
bin/alfCreateSite.sh
Shell
gpl-3.0
4,537
#!/bin/bash set -e pegasus_lite_version_major="4" pegasus_lite_version_minor="7" pegasus_lite_version_patch="0" pegasus_lite_enforce_strict_wp_check="true" pegasus_lite_version_allow_wp_auto_download="true" . pegasus-lite-common.sh pegasus_lite_init # cleanup in case of failures trap pegasus_lite_signal_int INT trap pegasus_lite_signal_term TERM trap pegasus_lite_exit EXIT echo -e "\n################################ Setting up workdir ################################" 1>&2 # work dir export pegasus_lite_work_dir=$PWD pegasus_lite_setup_work_dir echo -e "\n###################### figuring out the worker package to use ######################" 1>&2 # figure out the worker package to use pegasus_lite_worker_package echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2 # set the xbit for any executables staged /bin/chmod +x example_workflow-terminate_0-1.0 echo -e "\n############################# executing the user tasks #############################" 1>&2 # execute the tasks set +e pegasus-kickstart -n example_workflow::terminate_0:1.0 -N ID0000016 -R condorpool -L example_workflow -T 2016-10-26T07:50:01+00:00 ./example_workflow-terminate_0-1.0 job_ec=$? set -e
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 1A/logs/w-08_2/20161026T075001+0000/00/00/terminate_0_ID0000016.sh
Shell
gpl-3.0
1,237
#!/bin/bash # 1.7.1.1 Ensure message of the day is configured properly (Scored) RESULT=$(/bin/grep -P '(\\v|\\r|\\m|\\s)' /etc/motd) if [[ -z $RESULT ]] then echo 'cis_benchmark_1_7_1_1=passed' else echo 'cis_benchmark_1_7_1_1=failed' fi
proletaryo/puppet-ciscentos6
files/scripts/benchmark-1.7.1.1.sh
Shell
gpl-3.0
248
#!/bin/bash set -e # exit with nonzero exit code if anything fails SOURCE_BRANCH="master" TARGET_BRANCH="gh-pages" # Pull requests and commits to other branches shouldn't try to deploy, just build to verify if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then echo "Skipping deploy since not on $SOURCE_BRANCH" exit 0 fi # Save some useful information REPO=`git config remote.origin.url` SSH_REPO=${REPO/https:\/\/github.com\//[email protected]:} SHA=`git rev-parse --verify HEAD` # Get the deploy key by using Travis's stored variables to decrypt deploy_key.enc ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key" ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv" ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR} ENCRYPTED_IV=${!ENCRYPTED_IV_VAR} openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in deploy_key.enc -out deploy_key -d chmod 600 deploy_key eval `ssh-agent -s` ssh-add deploy_key # Clone the existing gh-pages for this repo into out/ # Create a new empty branch if gh-pages doesn't exist yet (should only happen on first deply) git clone $REPO out cd out git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH cd .. # Clean out existing contents rm -rf out/**/* || exit 0 # compile gitbook # enter manual folder cd manual # remove old folder if present rm -rf _book || exit 0 # install gitbook deps gitbook install # build new gitbook build # copy new files to out cp -r _book/** ../out # exit to higher level cd .. # Now let's go have some fun with the cloned repo cd out git config user.name "Travis CI" git config user.email "$COMMIT_AUTHOR_EMAIL" # If there are no changes to the compiled out (e.g. this is a README update) then just bail. if git diff --quiet; then echo "No changes to the output on this push; exiting." exit 0 fi # Commit the "changes", i.e. the new version. # The delta will show diffs between new and old versions. git add . git commit -m "Deploy to GitHub Pages: ${SHA}" # Now that we're all set up, we can push. git push $SSH_REPO $TARGET_BRANCH
AKSW/LIMES-dev
gh-deploy.sh
Shell
gpl-3.0
2,056
#!/bin/bash # disable ipv6 echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6 sed -i '$ i\echo 1 > /proc/sys/net/ipv6/conf/all/disable_ipv6' /etc/rc.local # install wget and curl apt-get update;apt-get -y install wget curl; # set time GMT +7 ln -fs /usr/share/zoneinfo/Asia/Jakarta /etc/localtime # set locale sed -i 's/AcceptEnv/#AcceptEnv/g' /etc/ssh/sshd_config apt-get -y install dropbear sed -i 's/NO_START=1/NO_START=0/g' /etc/default/dropbear sed -i 's/DROPBEAR_PORT=22/DROPBEAR_PORT=443/g' /etc/default/dropbear sed -i 's/DROPBEAR_EXTRA_ARGS=/DROPBEAR_EXTRA_ARGS="-p 109 -p 110"/g' /etc/default/dropbear echo "/bin/false" >> /etc/shells # install fail2ban apt-get -y install fail2ban;service fail2ban restart # Instal DDOS Flate if [ -d '/usr/local/ddos' ]; then echo; echo; echo "Please un-install the previous version first" exit 0 else mkdir /usr/local/ddos fi clear echo; echo 'Installing DOS-Deflate 0.6'; echo echo; echo -n 'Downloading source files...' wget -q -O /usr/local/ddos/ddos.conf http://www.inetbase.com/scripts/ddos/ddos.conf echo -n '.' wget -q -O /usr/local/ddos/LICENSE http://www.inetbase.com/scripts/ddos/LICENSE echo -n '.' wget -q -O /usr/local/ddos/ignore.ip.list http://www.inetbase.com/scripts/ddos/ignore.ip.list echo -n '.' wget -q -O /usr/local/ddos/ddos.sh http://www.inetbase.com/scripts/ddos/ddos.sh chmod 0755 /usr/local/ddos/ddos.sh cp -s /usr/local/ddos/ddos.sh /usr/local/sbin/ddos echo '...done' echo; echo -n 'Creating cron to run script every minute.....(Default setting)' /usr/local/ddos/ddos.sh --cron > /dev/null 2>&1 echo '.....done' echo; echo 'Installation has completed.' echo 'Config file is at /usr/local/ddos/ddos.conf' echo 'Please send in your comments and/or suggestions to [email protected]' # install webmin apt-get install perl libnet-ssleay-perl openssl libauthen-pam-perl libpam-runtime libio-pty-perl apt-show-versions python -y wget http://prdownloads.sourceforge.net/webadmin/webmin_1.870_all.deb dpkg --install webmin_1.870_all.deb sed -i 's/ssl=1/ssl=0/g' /etc/webmin/miniserv.conf reboot
wawan740/Seven4zerO
autoinstall.sh
Shell
gpl-3.0
2,059
#!/bin/bash # setup eclipse . utils.sh eclipsever="eclipse-oxygen" eclipsename="Eclipse Oxygen" if [[ -e /opt/$eclipsever ]]; then log "Eclipse already installed" else tarname="eclipse-java-oxygen-2-linux-gtk-x86_64.tar.gz" getproxy eclipse "http://mirror.switch.ch/eclipse/technology/epp/downloads/release/oxygen/2/eclipse-java-oxygen-2-linux-gtk-x86_64.tar.gz" . .proxies.sh pushd /opt log "Downloading $eclipsever from mirror $eclipseproxy" wget $eclipseproxy -O $tarname log "Untaring eclipse" tar xzf $tarname mv eclipse $eclipsever log "Cleaning tar file" rm -f $tarname log "Extending eclipse memory" sed -i "s/-Xmx.*m/-Xmx2048m/g" /opt/$eclipsever/eclipse.ini if [[ -e /usr/local/eclipse ]]; then log "/usr/local/eclipse found, you may want to unlink your local eclipse and link to $eclipsename version" else log "Creating link: ln -s /opt/$eclipsever /usr/local/eclipse" ln -s /opt/$eclipsever /usr/local/eclipse fi popd if [[ -e /etc/profile.d/eclipse.sh ]]; then log "/etc/profile.d/eclipse.sh exists" else log "Creating /etc/profile.d/eclipse.sh" cat <<-EOF > /etc/profile.d/eclipse.sh export ECLIPSE_HOME=/usr/local/eclipse export PATH=\${ECLIPSE_HOME}:\${PATH} EOF chmod +x /etc/profile.d/eclipse.sh fi log "Adding eclipse to menu" cat <<-EOF > /usr/share/applications/"$eclipsever".desktop [Desktop Entry] Version=1.0 Comment=$eclipsename Name=$eclipsename Icon=/opt/$eclipsever/icon.xpm Exec=/opt/$eclipsever/eclipse Terminal=false Type=Application Categories=GNOME;Development;IDE;Java;Programming; EOF chmod +x /usr/share/applications/"$eclipsever".desktop log Finished fi
alexfeigin/devel
get-eclipse.sh
Shell
gpl-3.0
1,649
#!/bin/sh # Probe Ext2, Ext3 and Ext4 file systems # Copyright (C) 2008-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. . "${srcdir=.}/init.sh"; path_prepend_ ../parted require_512_byte_sector_size_ dev=loop-file ss=$sector_size_ for type in ext2 ext3 ext4 nilfs2; do ( mkfs.$type -V ) >/dev/null 2>&1 \ || { warn_ "$ME: no $type support"; continue; } case $type in ext*) n_sectors=8000 force=-F;; *) n_sectors=$((257*1024)) force=;; esac # create an $type file system dd if=/dev/zero of=$dev bs=$ss count=$n_sectors >/dev/null || fail=1 mkfs.$type $force $dev || { warn_ $ME: mkfs.$type failed; fail=1; continue; } # probe the $type file system parted -m -s $dev u s print >out 2>&1 || fail=1 grep '^1:.*:'$type'::;$' out || { cat out; fail=1; } done # Some features should indicate ext4 by themselves. for feature in uninit_bg flex_bg; do # create an ext3 file system dd if=/dev/zero of=$dev bs=1024 count=4096 >/dev/null || fail=1 mkfs.ext3 -F $dev >/dev/null || skip_ "mkfs.ext3 failed" # set the feature tune2fs -O $feature $dev || skip_ "tune2fs failed" # probe the file system, which should now be ext4 parted -m -s $dev u s print >out 2>&1 || fail=1 grep '^1:.*:ext4::;$' out || fail=1 done Exit $fail
philenotfound/parted
tests/t1700-probe-fs.sh
Shell
gpl-3.0
1,888
#!/bin/sh set -e set -u DIR="$(dirname $0)" dc() { docker-compose -p bgt_import -f ${DIR}/docker-compose.yml $* } trap 'dc kill ; dc rm -f' EXIT dc build #dc run --rm tests dc run --rm importer
DatapuntAmsterdam/bgt
.jenkins/import_fme.sh
Shell
mpl-2.0
201
#!/bin/bash # ttl=600 # cut=/usr/bin/cut getent=/usr/bin/getent rpc_pipefs=/var/lib/nfs/rpc_pipefs # die() { echo "Usage: $0 cache_name entry_name" exit 1 } [ $# -lt 2 ] && die cachename="$1" cache_path=${rpc_pipefs}/cache/${cachename}/channel case "${cachename}" in dns_resolve) name="$2" result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )" [ -z "${result}" ] && result="0" ;; *) die ;; esac echo "${result} ${name} ${ttl}" >${cache_path}
heliogabalo/The-side-of-the-source
Codigo/Scripts/Bash/nfs_cache_getent.sh
Shell
mpl-2.0
460
#!/bin/bash cd .. git pull origin master HASH=`ipfs add -r . | awk -F" " '{print $(NF-1)}'` HASH=`echo $HASH | awk -F" " '{print $(NF)}'` cd host echo $HASH > latest_hash.txt echo "Done! Run hostRehua.py to make sure it stays online."
NickGeek/rehua
host/updateRehua.sh
Shell
mpl-2.0
235
#! /bin/bash # Simple directory snapshot tool using git. git="$(which git)" [ -z "$git" ] && echo "Please install git first!" gitdir="$(git rev-parse --git-dir 2> /dev/null)" [ -z "$gitdir" -a "$1" != "--init" ] && echo "Not in a snapshot repository! Create a new one with '$0 --init'." snapshotdir="$(dirname "$gitdir")" # exit on error set -e function git() { [ -z "$git" -o -z "$gitdir" ] && exit 1 echo "> git $@" $git "$@" } function commit() { git add --verbose "$snapshotdir" git commit --all --message "${1:-snapshot commit}" } case "$1" in --init ) [ -z "$git" ] && exit 1 [ -n "$2" ] && mkdir -p "$2" && cd "$2" $git init ;; -t|--tag ) if [ -n "$2" ]; then commit "$3" git tag -a "$2" else git tag -l -n1 fi ;; -l|--list ) git whatchanged --reverse --pretty=format:"%Cred%h %Cgreen%ai %Cblue%an %Creset- %s" --name-status ;; -d|--diff ) git diff "${2:-HEAD}" "${3:-.}" ;; -s|--status ) git status --all ;; -v|--view ) git show "${2:-HEAD}":"$3" ;; -w|--switch ) git checkout "${2:-master}" ;; -h|--help ) echo "Usage: $0 [options]" echo echo " --init [path] Create a new snapshot repository" echo echo " -c, --commit [<commit-msg>] Commit all changes" echo " -t, --tag Display all existing tags" echo " -t, --tag <tag-name> [<commit-msg>] Commit all changes and create a new tag" echo " -l, --list Display the history of all changes" echo " -d, --diff Show the current changes" echo " -d, --diff <commit-hash> [path] Show the changes for given commit" echo " -s, --status Display the current status" echo " -v, --view {<commit-hash>|<tag-name>} <path> View a previous version of the file" echo " -w, --switch {<commit-hash>|<tag-name>} Switch to the specified version" echo " -w, --switch Switch back to the current version" echo " -h, --help Print this help and exit" echo echo "With no options, or only a commit message given, acts like --commit." ;; * ) commit "$1" ;; esac
blackwinter/scratch
snapshot.sh
Shell
agpl-3.0
2,443
#!/bin/sh function fixperms { chown -R $UID:$GID /var/log /data /opt/maubot } cd /opt/maubot mkdir -p /var/log/maubot /data/plugins /data/trash /data/dbs /data/crypto if [ ! -f /data/config.yaml ]; then cp docker/example-config.yaml /data/config.yaml echo "Config file not found. Example config copied to /data/config.yaml" echo "Please modify the config file to your liking and restart the container." fixperms exit fi alembic -x config=/data/config.yaml upgrade head fixperms exec su-exec $UID:$GID python3 -m maubot -c /data/config.yaml -b docker/example-config.yaml
tulir/maubot
docker/run.sh
Shell
agpl-3.0
584
#!/bin/sh if [ `uname` = "CYGWIN_NT-5.1" ]; then EXT=.bat elif [ `uname` = "Linux" ]; then EXT= fi #Target script f=test_sint_sch1000_call.script i=0 while [ $i -lt 3 ]; do echo "executing..."$f time janus$EXT < $f i=`expr $i + 1` done
tyoko-dev/Janus-A-sml
tests/test_sint_sch1000_call_3times.sh
Shell
agpl-3.0
255
#!/bin/sh ########################################## ## restart openerp servers ########################################## DIR_SERVICE='/etc/init.d/' NAME_PATTERN='${NAME_PATTERN}' SERVICE_PATTERN='openerp' PROCESS=`ps ax | grep $NAME_PATTERN | awk '{ print $1}' | sed '$d'` echo $PROCESS for P in $PROCESS do kill $P echo 'kill process '$P done exit 0
jmesteve/saas3
openerp/addons_extra/server_manager/templates/stop_process.sh
Shell
agpl-3.0
372
#!/bin/bash # Copyright 2013 Telefonica Investigación y Desarrollo, S.A.U # # This file is part of FI-WARE LiveDemo App # # FI-WARE LiveDemo App is free software: you can redistribute it and/or modify it under the terms # of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # FI-WARE LiveDemo App is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License # for more details. # # You should have received a copy of the GNU Affero General Public License along with FI-WARE LiveDemo App. If not, # see http://www.gnu.org/licenses/. # # For those usages not covered by the GNU Affero General Public License please contact with fermin at tid dot es (curl ${CB_HOST}:${CB_PORT}/NGSI10/subscribeContext -s -S --header 'Content-Type: application/xml' -d @- | xmllint --format - ) <<EOF <?xml version="1.0"?> <subscribeContextRequest> <entityIdList> <entityId type="Node" isPattern="true"> <id>OUTSMART.NODE.*</id> </entityId> <entityId type="AMMS" isPattern="true"> <id>OUTSMART.AMMS.*</id> </entityId> <entityId type="Regulator" isPattern="true"> <id>OUTSMART.RG.*</id> </entityId> </entityIdList> <attributeList> </attributeList> <reference>http://${CEP_HOST}:${CEP_PORT}/ProtonOnWebServer/rest/events</reference> <duration>P1Y</duration> <notifyConditions> <notifyCondition> <type>ONCHANGE</type> <condValueList> <condValue>TimeInstant</condValue> </condValueList> </notifyCondition> </notifyConditions> <!--use throttling only if you expect too verbose context producer --> <!--throttling>PT5S</throttling--> </subscribeContextRequest> EOF
telefonicaid/fiware-livedemoapp
scripts/bootstrapping/03_subscribeCep.sh
Shell
agpl-3.0
1,960
#!/bin/bash -x # # Generated - do not edit! # # Macros TOP=`pwd` CND_PLATFORM=GNU-Linux-x86 CND_CONF=Debug CND_DISTDIR=dist CND_BUILDDIR=build CND_DLIB_EXT=so NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging TMPDIRNAME=tmp-packaging OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/litec-to-c OUTPUT_BASENAME=litec-to-c PACKAGE_TOP_DIR=litec-to-c/ # Functions function checkReturnCode { rc=$? if [ $rc != 0 ] then exit $rc fi } function makeDirectory # $1 directory path # $2 permission (optional) { mkdir -p "$1" checkReturnCode if [ "$2" != "" ] then chmod $2 "$1" checkReturnCode fi } function copyFileToTmpDir # $1 from-file path # $2 to-file path # $3 permission { cp "$1" "$2" checkReturnCode if [ "$3" != "" ] then chmod $3 "$2" checkReturnCode fi } # Setup cd "${TOP}" mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package rm -rf ${NBTMPDIR} mkdir -p ${NBTMPDIR} # Copy files and create directories and links cd "${TOP}" makeDirectory "${NBTMPDIR}/litec-to-c/bin" copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 # Generate tar file cd "${TOP}" rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/litec-to-c.tar cd ${NBTMPDIR} tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/litec-to-c.tar * checkReturnCode # Cleanup cd "${TOP}" rm -rf ${NBTMPDIR}
luciotato/LiteScript
devel/litec/litec-to-c/nbproject/Package-Debug.bash
Shell
agpl-3.0
1,461
#!/bin/bash # Print each line, exit on error set -ev # Install doc requirements conda config --set safety_checks disabled conda config --add channels omnia conda config --add channels conda-forge conda install -yq --file docs/requirements.txt pip install -U awscli msmb_theme==1.2.0 which gcc which g++ python setup.py install # Make docs cd docs && make html && cd - # Move the docs into a versioned subdirectory python devtools/travis-ci/set_doc_version.py # Prepare versions.json python devtools/travis-ci/update_versions_json.py
mattwthompson/mdtraj
devtools/travis-ci/build_docs.sh
Shell
lgpl-2.1
539
#!/bin/bash . /usr/local/bin/unit_test_lib.sh clean_all sleep 1s stdbuf -oL -eL cmdmod /opt/pyrame/cmd_la_gen8_90.xml > la_gen8_90.trace 2>&1 & stdbuf -oL -eL cmdmod /opt/pyrame/cmd_gpib.xml > gpib.trace 2>&1 & stdbuf -oL -eL cmdmod /opt/pyrame/cmd_tcp.xml > tcp.trace 2>&1 & sleep 1s exec_n_test ./init.sh "la_gen8_90(bus=gpib(bus=tcp(host=10.220.0.71),dst_addr=1))" id=`cat ent.trace | awk -F= '{ print $3 }'` exec_n_test ./config.sh $id exec_n_test ./get_voltage.sh $id v0=`cat ent.trace | awk -F= '{ print $3 }'` exec_n_test ./set_voltage.sh $id 2.9 exec_n_test ./get_voltage.sh $id v=`cat ent.trace | awk -F= '{ print $3 }'` #check_equal_values $v "2.9" exec_n_test ./set_voltage.sh $id $v0 exec_n_test ./inval.sh $id exec_n_test ./deinit.sh $id echo "all test passed" clean_all exit 0
sbinet-staging/pyrame
ps/cmd_la_gen8_90/unit_test_eth.sh
Shell
lgpl-3.0
800
#!/bin/bash cd "$(dirname "$0")" ../map-creator.sh europe/great-britain/england ram en,de,fr,es ../map-creator.sh europe/great-britain/scotland ram en,de,fr,es ../map-creator.sh europe/great-britain/wales ram en,de,fr,es
mapsforge/mapsforge-creator
v4/europe-great-britain.sh
Shell
lgpl-3.0
223
#!/bin/bash cd "`dirname "$0"`" exec ./sbt.sh run
melezov/postgresql-overview-listen-notify
run.sh
Shell
unlicense
50
#!/bin/bash # if backup same as new delete it if cmp -s dbase/pigpio.sqlite dbase/pigpio.sqlite.bak then rm dbase/pigpio.sqlite.bak else d=$(date "+%F-%H-%M-%S") mv -f dbase/pigpio.sqlite.bak dbase/pigpio.sqlite.$d fi # delete backups older than a week find dbase/pigpio.sqlite.2* -mtime +7 -delete &>/dev/null
joan2937/pigpio
DOC/bin/purge.sh
Shell
unlicense
323
#!/bin/sh set -e -x PFKARCH=$( sh ../scripts/architecture ) export PFKARCH if [ ! -d fluxbox ] ; then echo 'no fluxbox dir, skipping fluxbox build' # i'm not going to consider this an error, maybe # i just didn't extract it. exit 0 fi cd fluxbox if [ ! -f 00-PFK-CONFIGURE ] ; then echo 'no fluxbox configure file, correct branch?' # this is an error, check out the proper branch. exit 1 fi if [ ! -d m4 ] ; then mkdir m4 fi if [ ! -f configure ] ; then autoreconf -i fi if [ ! -f $OBJDIR/fluxbox/Makefile ] ; then touch Makefile.in aclocal.m4 build-aux/compile build-aux/config.guess \ build-aux/config.sub build-aux/depcomp build-aux/install-sh \ build-aux/missing config.h.in configure nls/C/Makefile.in \ nls/be_BY/Makefile.in nls/bg_BG/Makefile.in nls/cs_CZ/Makefile.in \ nls/da_DK/Makefile.in nls/de_AT/Makefile.in nls/de_CH/Makefile.in \ nls/de_DE/Makefile.in nls/el_GR/Makefile.in nls/en_GB/Makefile.in \ nls/en_US/Makefile.in nls/es_AR/Makefile.in nls/es_ES/Makefile.in \ nls/et_EE/Makefile.in nls/fi_FI/Makefile.in nls/fr_CH/Makefile.in \ nls/fr_FR/Makefile.in nls/he_IL/Makefile.in nls/it_IT/Makefile.in \ nls/ja_JP/Makefile.in nls/ko_KR/Makefile.in nls/lv_LV/Makefile.in \ nls/mk_MK/Makefile.in nls/nb_NO/Makefile.in nls/nl_NL/Makefile.in \ nls/no_NO/Makefile.in nls/pl_PL/Makefile.in nls/pt_BR/Makefile.in \ nls/pt_PT/Makefile.in nls/ru_RU/Makefile.in nls/sk_SK/Makefile.in \ nls/sl_SI/Makefile.in nls/sv_SE/Makefile.in nls/tr_TR/Makefile.in \ nls/uk_UA/Makefile.in nls/vi_VN/Makefile.in nls/zh_CN/Makefile.in \ nls/zh_TW/Makefile.in fi case x$PFK_CONFIG_flubox_xinerama in xenable) xinerama=--enable-xinerama ;; xdisable) xinerama=--disable-xinerama ;; xdefault) xinerama="" ;; *) echo please set PFK_CONFIG_flubox_xinerama to yes or no in config exit 1 ;; esac FLUXBOX_DIR="$PWD" mkdir -p "$OBJDIR/fluxbox" cd "$OBJDIR/fluxbox" if [ ! -f Makefile ] ; then "$FLUXBOX_DIR/configure" --prefix=$HOME/pfk/$PFKARCH/fluxbox_1.3.7 \ --disable-xrandr $xinerama fi make $PFK_CONFIG_contrib_makejobs exit 0
flipk/pfkutils
contrib/build-fluxbox.sh
Shell
unlicense
2,139
#!/bin/sh # Install all the python libraries via pip rm -rf lib/!(README.md) sudo pip install -r requirements.txt -t lib/
PiJoules/Text-from-Memes
app.sh
Shell
unlicense
123
#!/usr/bin/env bash # Terminate already running bar instances pkill polybar # Wait until the processes have been shut down while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done # Launch bar1 and bar2 polybar top & > /dev/null #polybar bottom & > /dev/null #polybar bar2 & echo "Bars launched..."
Multipixelone/dotfiles
polybar/.config/polybar/launch.sh
Shell
unlicense
306
#!/usr/bin/env bash ############################# # Include scripts ############################# source /bootstrap/configuration.sh source /bootstrap/environment.sh ############################# # variables and environment ############################# get_environment SQL_SCRIPT=/bootstrap/cinder.sql ############################ # CONFIGURE GLANCE ############################ re_write_file "/controller/cinder/cinder.conf" "/etc/cinder/" fix_configs $SQL_SCRIPT MI_IP=`ip a | grep 10.4 | awk '{print $2}' | cut -d"/" -f1` echo "El valor de MY_IP es: $MI_IP" sed -i "s!^my_ip.*=.*!my_ip = $MI_IP!" /etc/cinder/cinder.conf cat >/etc/cinder/nfs_shares <<EOF 172.16.26.11:/vol_NFS_CLOUD_EP_cinder1 172.16.26.12:/vol_NFS_CLOUD_EP_cinder2 EOF cat >/usr/local/lib/python2.7/dist-packages/cinder/db/sqlalchemy/migrate_repo/migrate.cfg <<EOF [db_settings] repository_id=cinder version_table=migrate_version required_dbs=[] EOF ############################ # DATABASE BOOTSTRAP ############################ if ! does_db_exist cinder; then # create database mysql -uroot -p$MYSQL_ROOT_PASSWORD -h $MYSQL_HOST <$SQL_SCRIPT # configure the service and endpoint url export OS_USERNAME=$ADMIN_USER_NAME export OS_PASSWORD=$ADMIN_PASSWORD export OS_TENANT_NAME=$ADMIN_TENANT_NAME export OS_AUTH_URL=$OS_URL openstack service create --name cinder --description "OpenStack Block Storage" volume openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2 openstack endpoint create --region $REGION volume public https://$CINDER_OFUSCADO/v1/%\(tenant_id\)s openstack endpoint create --region $REGION volume internal http://$CINDER_HOSTNAME:8776/v1/%\(tenant_id\)s openstack endpoint create --region $REGION volume admin http://$CINDER_HOSTNAME:8776/v1/%\(tenant_id\)s openstack endpoint create --region $REGION volumev2 public https://$CINDER_OFUSCADO/v2/%\(tenant_id\)s openstack endpoint create --region $REGION volumev2 internal http://$CINDER_HOSTNAME:8776/v2/%\(tenant_id\)s openstack endpoint create --region $REGION volumev2 admin http://$CINDER_HOSTNAME:8776/v2/%\(tenant_id\)s openstack user create --domain default --password $CINDER_PASSWORD $CINDER_USERNAME openstack role add --project services --user $CINDER_USERNAME admin # sync the database cinder-manage db sync fi # create a admin-openrc.sh file cat >~/openrc <<EOF export OS_PROJECT_DOMAIN_NAME=default export OS_USER_DOMAIN_NAME=default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD export OS_AUTH_URL=http://$KEYSTONE_HOSTNAME:35357/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 export OS_INTERFACE=internal EOF cat ~/openrc # start cinder service cinder-scheduler --config-file=/etc/cinder/cinder.conf & cinder-api --config-file=/etc/cinder/cinder.conf & cinder-volume --config-file=/etc/cinder/cinder.conf sleep 1d
BBVA/openstack-k8s
cinder/data/bootstrap/bootstrap.sh
Shell
apache-2.0
2,966
#!/bin/sh ######################### # Package BitcoinWallet # ######################### if [ -z "$1" ] ; then echo "You must specify the version to package" exit 1 fi VERSION="$1" if [ ! -f target/BitcoinWallet-$VERSION.jar ] ; then echo "You must build the BitcoinWallet-$VERSION.jar file" exit 1 fi if [ ! -d package ] ; then mkdir package else rm -r package/* fi cd package cp ../ChangeLog.txt ../LICENSE ../README.md ../sample.BitcoinWallet.conf ../sample.logging.properties . cp -r ../target/BitcoinWallet-$VERSION.jar ../target/lib . zip -r BitcoinWallet-$VERSION.zip BitcoinWallet-$VERSION.jar lib ChangeLog.txt LICENSE README.md sample.BitcoinWallet.conf sample.logging.properties echo "Created BitcoinWallet-$VERSION.zip" tar zchf BitcoinWallet-$VERSION.tar.gz BitcoinWallet-$VERSION.jar lib ChangeLog.txt LICENSE README.md sample.BitcoinWallet.conf sample.logging.properties echo "Created BitcoinWallet-$VERSION.tar.gz" exit 0
ScripterRon/BitcoinWallet
package.sh
Shell
apache-2.0
957
#!/bin/bash source /bin/dockbit_bootstrap.sh # Load Erlang if [ -f .erlang-version ]; then ERLANG_VERSION=`cat .erlang-version` else ERLANG_VERSION=`kerl list installations | head -n 1 | cut -d" " -f1` fi . $HOME/.kerl/$ERLANG_VERSION/activate if [ -f .elixir-version ]; then ELIXIR_VERSION=`cat .elixir-version` else ELIXIR_VERSION="1.3.4" fi kiex use $ELIXIR_VERSION if [ -f mix.lock ]; then hacher get -k hex -f mix.lock / run mix deps.get hacher set -k hex -f mix.lock $HOME/.hex fi run_user
Dockbit/library
elixir/1.3.4/docker-entrypoint.sh
Shell
apache-2.0
514
#!/bin/sh NAME=awa.cov lcov --quiet --base-directory . --directory . -c -o $NAME lcov --quiet --remove $NAME "/usr*" -o $NAME lcov --quiet --remove $NAME "/build*" -o $NAME lcov --quiet --remove $NAME "/opt*" -o $NAME lcov --quiet --remove $NAME "*/adainclude*" -o $NAME lcov --quiet --remove $NAME "*/regtests*" -o $NAME lcov --quiet --remove $NAME "*/b__*" -o $NAME lcov --quiet --remove $NAME "*/regtests/*" -o $NAME lcov --quiet --remove $NAME "*/awaunit/*" -o $NAME lcov --quiet --remove $NAME "*/ada-util/*" -o $NAME lcov --quiet --remove $NAME "*/ada-el/*" -o $NAME lcov --quiet --remove $NAME "*/ada-servlet/*" -o $NAME lcov --quiet --remove $NAME "*/ada-security/*" -o $NAME lcov --quiet --remove $NAME "*/ada-asf/*" -o $NAME lcov --quiet --remove $NAME "*/ada-ado/*" -o $NAME lcov --quiet --remove $NAME "*/openapi-ada/*" -o $NAME # Ignore generated code lcov --quiet --remove $NAME "*/model/*" -o $NAME rm -rf cover genhtml --quiet --ignore-errors source -o ./cover -t "test coverage" --num-spaces 4 $NAME
stcarrez/ada-awa
awa/coverage.sh
Shell
apache-2.0
1,019
docker run -d -e MONGODB_USER=$username -e MONGODB_PASSWORD=$password -e MONGODB_DATABASE=$database_name -e MONGODB_ADMIN_PASSWORD=$admin_password -p 27017:27017 openshift/mongodb-24-centos7
markllama/atomicapp-examples
mongodb/centos/mongodb_start.sh
Shell
apache-2.0
191
#!/bin/sh if [ $# != 3 ]; then echo "usage $0: archive s3-bucket destdir" exit 1 fi if [ -z "${AWS_ACCESS_KEY_ID}" ]; then echo "No AWS_ACCESS_KEY_ID defined" exit 1 fi if [ -z "${AWS_SECRET_ACCESS_KEY}" ]; then echo "No AWS_SECRET_ACCESS_KEY defined" exit 1 fi upload2s3() { FULLPATH=$1 BUCKET=$2 DESTDIR=$3 FILE=`basename "${FULLPATH}"` FILE=`escape "${FILE}"` DESTDIR=`escape "${DESTDIR}"` DATE=`date -R` RESOURCE="/${BUCKET}/${DESTDIR}/${FILE}" CONTENT_TYPE="application/x-compressed-tar" # XXX: and really sha1 below? ACL="public-read" TO_SIGN="PUT\n\n${CONTENT_TYPE}\n${DATE}\nx-amz-acl:${ACL}\n${RESOURCE}" SIG=`echo -en ${TO_SIGN} | \ openssl sha1 -hmac ${AWS_SECRET_ACCESS_KEY} -binary | \ base64` http_code=$( \ curl -s -w "%{http_code}" -o /dev/stderr \ -X PUT -T "${FULLPATH}" \ -H "Host: ${BUCKET}.s3.amazonaws.com" \ -H "Date: ${DATE}" \ -H "Content-Type: ${CONTENT_TYPE}" \ -H "x-amz-acl: ${ACL}" \ -H "Authorization: AWS ${AWS_ACCESS_KEY_ID}:${SIG}" \ https://${BUCKET}.s3.amazonaws.com/${DESTDIR}/${FILE} ) return $([ $http_code -eq 200 ]) } escape() { echo $1 | sed 's/ /%20/g' } upload2s3 "$1" "$2" "$3"
loads/loads-broker
loadsbroker/support/upload2s3.sh
Shell
apache-2.0
1,334
tclsh graph_size.tcl metis_15000_RAID1_8GB_i37_t1.log metis_15000_RAID1_8GB_i37_t2.log metis_15000_RAID1_8GB_i37_t4.log harddisk_threading tclsh graph_size.tcl metis_flash_RAID0_8GB_i37_t1.log metis_flash_RAID0_8GB_i37_t2.log metis_flash_RAID0_8GB_i37_t4.log ssd_threading
statsbiblioteket/summa
Core/scripts/performance/threading.sh
Shell
apache-2.0
276
#!/usr/bin/env bash ############################################################################### # Copyright 2018 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### # Fail on first error. set -e cd "$(dirname "${BASH_SOURCE[0]}")" . ./installer_base.sh # Ref https://classic.yarnpkg.com/en/docs/install/#debian-stable # Don't use tee here. It complains # "Warning: apt-key output should not be parsed (stdout is not a terminal)" # otherwise. curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - echo "deb https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list apt_get_update_and_install yarn info "Successfully installed yarn" apt-get clean rm -fr /etc/apt/sources.list.d/yarn.list
ApolloAuto/apollo
docker/build/installers/install_yarn.sh
Shell
apache-2.0
1,343
#!/bin/bash # This script will set up the java classpath with the required libraries # then call diffxml with the given arguments. java -cp @INSTALL_DIR@/lib/diffxml.jar org.diffxml.diffxml.DiffXML "$@"
amouat/diffxml
diffxml_install.sh
Shell
apache-2.0
204
#set ip_address = "" #set ikeys = $interfaces.keys() #for $iname in $ikeys #set $idata = $interfaces[$iname] #set $static = $idata["static"] #set $management = $idata["management"] #set $ip = $idata["ip_address"] #if $management and $ip #set $ip_address = $ip #end if #end for #set $proxy_url = "" #set $local_repo_url = "" #if $getVar("local_repo","") != "" #set $local_repo_url = $local_repo #end if #if $getVar("proxy","") != "" #set $proxy_url = $proxy #end if #if $getVar('compass_server', '') != "" #set srv = $getVar('compass_server','') #else #set srv = $getVar('server','') #end if cat << EOF > /etc/chef/chef_client_run.sh #!/bin/bash touch /var/log/chef.log PIDFILE=/tmp/chef_client_run.pid if [ -f \\$PIDFILE ]; then pid=\\$(cat \\$PIDFILE) if [ -f /proc/\\$pid/exe ]; then echo "there are chef_client_run.sh running with pid \\$pid" >> /var/log/chef.log 2>&1 exit 1 fi fi echo \\$$ > \\$PIDFILE while true; do echo "run chef-client on \`date\`" >> /var/log/chef.log 2>&1 clients=\\$(pgrep chef-client) if [[ "\\$?" == "0" ]]; then echo "there are chef-clients '\\$clients' running" >> /var/log/chef.log 2>&1 break else echo "knife search nodes" >> /var/log/chef.log 2>&1 # use knife node list here to check if node has been registered because knife search node # doesn't work as expected. USER=root HOME=/root knife node list |grep \\$HOSTNAME. >> /var/log/chef.log 2>&1 nodes=\\$(USER=root HOME=/root knife node list |grep \\$HOSTNAME.) echo "found nodes \\$nodes" >> /var/log/chef.log 2>&1 let all_nodes_success=1 for node in \\$nodes; do mkdir -p /var/log/chef/\\$node if [ ! -f /etc/chef/\\$node.json ]; then cat << EOL > /etc/chef/\\$node.json { "local_repo": "$local_repo_url", "proxy_url": "$proxy_url", "ip_address": "$ip_address" } EOL fi if [ ! -f "/etc/chef/\\$node.pem" ]; then cat << EOL > /etc/rsyslog.d/\\$node.conf \\\\$ModLoad imfile \\\\$InputFileName /var/log/chef/\\$node/chef-client.log \\\\$InputFileReadMode 0 \\\\$InputFileTag \\$node \\\\$InputFileStateFile chef_\\${node}_log \\\\$InputFileSeverity notice \\\\$InputFileFacility local3 \\\\$InputRunFileMonitor \\\\$InputFilePollInterval 1 #if $getVar("compass_server","") != "" local3.info @$compass_server:514 #else local3.info @@$server:514 #end if EOL rm -rf /var/lib/rsyslog/chef_\\$node_log service rsyslog restart fi if [ -f "/etc/chef/\\$node.done" ]; then USER=root HOME=/root chef-client --node-name \\$node -j /etc/chef/\\$node.json --client_key /etc/chef/\\$node.pem >> /var/log/chef.log 2>&1 else USER=root HOME=/root chef-client --node-name \\$node -j /etc/chef/\\$node.json --client_key /etc/chef/\\$node.pem -L /var/log/chef/\\$node/chef-client.log >> /var/log/chef.log 2>&1 fi if [ "\\$?" != "0" ]; then echo "chef-client --node-name \\$node run failed" >> /var/log/chef.log 2>&1 let all_nodes_success=0 else echo "chef-client --node-name \\$node run success" >> /var/log/chef.log 2>&1 touch /etc/chef/\\$node.done wget -O /tmp/package_state.\\$node --post-data='{"ready": true}' --header=Content-Type:application/json "http://$srv/api/clusterhosts/\\${node}/state_internal" fi done if [ \\$all_nodes_success -eq 0 ]; then sleep 1m else break fi fi done EOF chmod +x /etc/chef/chef_client_run.sh
weidongshao/compass-adapters
cobbler/snippets/kickstart_chef_run.sh
Shell
apache-2.0
3,743
#!/usr/bin/env bash set -euo pipefail os_name="$(go env GOOS)" # gofmt must be on PATH command -v gofmt if [[ "${os_name}" == "windows" ]]; then echo "Skipping go-fmt on Windows because line-endings cause every file to need formatting." echo "Linux is treated as authoritative." echo "Exiting 0..." exit 0 fi fmt="$(go fmt github.com/aelsabbahy/goss/...)" if [[ -z "${fmt}" ]]; then echo "valid gofmt" else echo "invalid gofmt:" echo "${fmt}" exit 1 fi
aelsabbahy/goss
ci/go-fmt.sh
Shell
apache-2.0
474
#!/bin/bash #setup infra-cli and default srcs.list echo "deb http://wzy-mirror.nm.flipkart.com/ftp.debian.org/debian wheezy-backports main" > /etc/apt/sources.list.d/wzy-backports.list echo "deb http://10.47.2.22/repos/infra-cli/3 /" > /etc/apt/sources.list.d/infra-cli-svc.list apt-get update apt-get install --yes --allow-unauthenticated infra-cli #setup your package echo "w3_zipkin_prod" > /etc/fk-zipkin-bucket echo "team_name=mobile-api" > /etc/default/nsca_wrapper reposervice --host repo-svc-app-0001.nm.flipkart.com --port 8080 env --name fk-zipkin --appkey fk-zipkin > /etc/apt/sources.list.d/fk-zipkin.list apt-get update apt-get install --yes --allow-unauthenticated fk-zipkin apt-get install --yes --allow-unauthenticated fk-nagios-common
Flipkart/zipkin
iaas/fk-zipkin.setup.sh
Shell
apache-2.0
753
#!/bin/bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Grid cluster control. # # # Import common functions. # if [ "${IGNITE_HOME}" = "" ]; then IGNITE_HOME_TMP="$(dirname "$(cd "$(dirname "$0")"; "pwd")")"; else IGNITE_HOME_TMP=${IGNITE_HOME}; fi # # Set SCRIPTS_HOME - base path to scripts. # SCRIPTS_HOME="${IGNITE_HOME_TMP}/bin" source "${SCRIPTS_HOME}"/include/functions.sh # # Discover path to Java executable and check it's version. # checkJava # # Discover IGNITE_HOME environment variable. # setIgniteHome if [ "${DEFAULT_CONFIG}" == "" ]; then DEFAULT_CONFIG=config/default-config.xml fi # # Set IGNITE_LIBS. # . "${SCRIPTS_HOME}"/include/setenv.sh . "${SCRIPTS_HOME}"/include/build-classpath.sh # Will be removed in the binary release. CP="${IGNITE_LIBS}" RANDOM_NUMBER=$("$JAVA" -cp "${CP}" org.apache.ignite.startup.cmdline.CommandLineRandomNumberGenerator) RESTART_SUCCESS_FILE="${IGNITE_HOME}/work/ignite_success_${RANDOM_NUMBER}" RESTART_SUCCESS_OPT="-DIGNITE_SUCCESS_FILE=${RESTART_SUCCESS_FILE}" # # Find available port for JMX # # You can specify IGNITE_JMX_PORT environment variable for overriding automatically found JMX port # # This is executed when -nojmx is not specified # if [ "${NOJMX}" == "0" ] ; then findAvailableJmxPort fi # Mac OS specific support to display correct name in the dock. osname=`uname` if [ "${DOCK_OPTS}" == "" ]; then DOCK_OPTS="-Xdock:name=Ignite Node" fi # # JVM options. See http://java.sun.com/javase/technologies/hotspot/vmoptions.jsp for more details. # # ADD YOUR/CHANGE ADDITIONAL OPTIONS HERE # if [ -z "$JVM_OPTS" ] ; then if [[ `"$JAVA" -version 2>&1 | egrep "1\.[7]\."` ]]; then JVM_OPTS="-Xms256m -Xmx1g" else JVM_OPTS="-Xms256m -Xmx1g" fi fi # # Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection. # # JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseTLAB -XX:NewSize=128m -XX:MaxNewSize=128m" # JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=0 -XX:SurvivorRatio=1024 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=60" # # Uncomment if you get StackOverflowError. # On 64 bit systems this value can be larger, e.g. -Xss16m # # JVM_OPTS="${JVM_OPTS} -Xss4m" # # Uncomment to set preference for IPv4 stack. # # JVM_OPTS="${JVM_OPTS} -Djava.net.preferIPv4Stack=true" # # Assertions are disabled by default since version 3.5. # If you want to enable them - set 'ENABLE_ASSERTIONS' flag to '1'. # ENABLE_ASSERTIONS="1" # # Set '-ea' options if assertions are enabled. # if [ "${ENABLE_ASSERTIONS}" = "1" ]; then JVM_OPTS="${JVM_OPTS} -ea" fi # # Set main class to start service (grid node by default). # if [ "${MAIN_CLASS}" = "" ]; then MAIN_CLASS=org.apache.ignite.internal.commandline.CommandHandler fi # # Remote debugging (JPDA). # Uncomment and change if remote debugging is required. # # JVM_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8787 ${JVM_OPTS}" ERRORCODE="-1" while [ "${ERRORCODE}" -ne "130" ] do if [ "${INTERACTIVE}" == "1" ] ; then case $osname in Darwin*) "$JAVA" ${JVM_OPTS} ${QUIET} "${DOCK_OPTS}" "${RESTART_SUCCESS_OPT}" ${JMX_MON} \ -DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \ -DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@ ;; *) "$JAVA" ${JVM_OPTS} ${QUIET} "${RESTART_SUCCESS_OPT}" ${JMX_MON} \ -DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \ -DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@ ;; esac else case $osname in Darwin*) "$JAVA" ${JVM_OPTS} ${QUIET} "${DOCK_OPTS}" "${RESTART_SUCCESS_OPT}" ${JMX_MON} \ -DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \ -DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@ ;; *) "$JAVA" ${JVM_OPTS} ${QUIET} "${RESTART_SUCCESS_OPT}" ${JMX_MON} \ -DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \ -DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@ ;; esac fi ERRORCODE="$?" if [ ! -f "${RESTART_SUCCESS_FILE}" ] ; then break else rm -f "${RESTART_SUCCESS_FILE}" fi done if [ -f "${RESTART_SUCCESS_FILE}" ] ; then rm -f "${RESTART_SUCCESS_FILE}" fi
wmz7year/ignite
bin/control.sh
Shell
apache-2.0
5,317
#!/bin/bash -eux # Usage: agent.sh VERSION ROOT . <(curl -s https://raw.githubusercontent.com/bigfix/boxes/master/bigfix/common/util.source.sh) function _get { local version="$1" local package="" if [[ -f /etc/redhat-release ]]; then package="BESAgent-$version-rhe5.x86_64.rpm" elif [[ -f /etc/lsb-release ]]; then if grep -q -i "Ubuntu" /etc/lsb-release; then package="BESAgent-$version-ubuntu10.amd64.deb" fi fi echo $package } function _install { local agent="$1" if [[ -f /etc/redhat-release ]]; then rpm -i $agent elif [[ -f /etc/lsb-release ]]; then if grep -q -i "Ubuntu" /etc/lsb-release; then dpkg -i $agent fi fi } version=${1:-$BIGFIX_VERSION} agent=$(_get $version) download $version $agent >/dev/null root_server=${2:-$BIGFIX_ROOT} mkdir /etc/opt/BESClient curl -s "http://${root_server}/masthead" -o /etc/opt/BESClient/actionsite.afxm _install $agent service besclient start
bigfix/boxes
bigfix/common/agent.sh
Shell
apache-2.0
928
### # #%L # servo-influxdb # $Id:$ # $HeadURL:$ # %% # Copyright (C) 2016 PolymathicCoder LLC # %% # Copyright 2,016 PolymathicCoder LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #L% ### hub create PolymathicCoder/servo-influxdb -d "A Servo and InfluxDB Integration."
PolymathicCoder/servo-influxdb
src/main/sh/genesis.sh
Shell
apache-2.0
777
#!/usr/bin/env bash ################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ STAGE_COMPILE="compile" STAGE_CORE="core" STAGE_PYTHON="python" STAGE_LIBRARIES="libraries" STAGE_BLINK_PLANNER="blink_planner" STAGE_CONNECTORS="connectors" STAGE_KAFKA_GELLY="kafka/gelly" STAGE_TESTS="tests" STAGE_MISC="misc" STAGE_CLEANUP="cleanup" STAGE_LEGACY_SLOT_MANAGEMENT="legacy_slot_management" STAGE_FINEGRAINED_RESOURCE_MANAGEMENT="finegrained_resource_management" MODULES_CORE="\ flink-annotations,\ flink-test-utils-parent/flink-test-utils,\ flink-state-backends/flink-statebackend-rocksdb,\ flink-clients,\ flink-core,\ flink-java,\ flink-optimizer,\ flink-runtime,\ flink-runtime-web,\ flink-scala,\ flink-streaming-java,\ flink-streaming-scala,\ flink-metrics,\ flink-metrics/flink-metrics-core,\ flink-external-resources,\ flink-external-resources/flink-external-resource-gpu" MODULES_LIBRARIES="\ flink-libraries/flink-cep,\ flink-libraries/flink-cep-scala,\ flink-libraries/flink-state-processing-api,\ flink-table/flink-table-common,\ flink-table/flink-table-api-java,\ flink-table/flink-table-api-scala,\ flink-table/flink-table-api-java-bridge,\ flink-table/flink-table-api-scala-bridge,\ flink-table/flink-table-planner,\ flink-table/flink-sql-client" MODULES_BLINK_PLANNER="\ flink-table/flink-table-planner-blink,\ flink-table/flink-table-runtime-blink" MODULES_CONNECTORS="\ flink-contrib/flink-connector-wikiedits,\ flink-filesystems,\ flink-filesystems/flink-fs-hadoop-shaded,\ flink-filesystems/flink-hadoop-fs,\ flink-filesystems/flink-mapr-fs,\ flink-filesystems/flink-oss-fs-hadoop,\ flink-filesystems/flink-s3-fs-base,\ flink-filesystems/flink-s3-fs-hadoop,\ flink-filesystems/flink-s3-fs-presto,\ flink-filesystems/flink-swift-fs-hadoop,\ flink-fs-tests,\ flink-formats,\ flink-formats/flink-avro-confluent-registry,\ flink-formats/flink-avro,\ flink-formats/flink-parquet,\ flink-formats/flink-sequence-file,\ flink-formats/flink-json,\ flink-formats/flink-csv,\ flink-formats/flink-orc,\ flink-formats/flink-orc-nohive,\ flink-connectors/flink-connector-hbase-base,\ flink-connectors/flink-connector-hbase-1.4,\ flink-connectors/flink-connector-hbase-2.2,\ flink-connectors/flink-hcatalog,\ flink-connectors/flink-hadoop-compatibility,\ flink-connectors,\ flink-connectors/flink-connector-jdbc,\ flink-connectors/flink-connector-cassandra,\ flink-connectors/flink-connector-elasticsearch5,\ flink-connectors/flink-connector-elasticsearch6,\ flink-connectors/flink-connector-elasticsearch7,\ flink-connectors/flink-sql-connector-elasticsearch6,\ flink-connectors/flink-sql-connector-elasticsearch7,\ flink-connectors/flink-connector-elasticsearch-base,\ flink-connectors/flink-connector-nifi,\ flink-connectors/flink-connector-rabbitmq,\ flink-connectors/flink-connector-twitter,\ flink-connectors/flink-connector-kinesis,\ flink-metrics/flink-metrics-dropwizard,\ flink-metrics/flink-metrics-graphite,\ flink-metrics/flink-metrics-jmx,\ flink-metrics/flink-metrics-influxdb,\ flink-metrics/flink-metrics-prometheus,\ flink-metrics/flink-metrics-statsd,\ flink-metrics/flink-metrics-datadog,\ flink-metrics/flink-metrics-slf4j,\ flink-queryable-state/flink-queryable-state-runtime,\ flink-queryable-state/flink-queryable-state-client-java" MODULES_KAFKA_GELLY="\ flink-libraries/flink-gelly,\ flink-libraries/flink-gelly-scala,\ flink-libraries/flink-gelly-examples,\ flink-connectors/flink-connector-kafka,\ flink-connectors/flink-sql-connector-kafka," MODULES_TESTS="\ flink-tests" MODULES_LEGACY_SLOT_MANAGEMENT=${MODULES_CORE},${MODULES_TESTS} MODULES_FINEGRAINED_RESOURCE_MANAGEMENT=${MODULES_CORE},${MODULES_TESTS} # we can only build the Scala Shell when building for Scala 2.11 if [[ $PROFILE == *"scala-2.11"* ]]; then MODULES_CORE="$MODULES_CORE,flink-scala-shell" fi function get_compile_modules_for_stage() { local stage=$1 case ${stage} in (${STAGE_CORE}) echo "-pl $MODULES_CORE -am" ;; (${STAGE_LIBRARIES}) echo "-pl $MODULES_LIBRARIES -am" ;; (${STAGE_BLINK_PLANNER}) echo "-pl $MODULES_BLINK_PLANNER -am" ;; (${STAGE_CONNECTORS}) echo "-pl $MODULES_CONNECTORS -am" ;; (${STAGE_KAFKA_GELLY}) echo "-pl $MODULES_KAFKA_GELLY -am" ;; (${STAGE_TESTS}) echo "-pl $MODULES_TESTS -am" ;; (${STAGE_MISC}) # compile everything; using the -am switch does not work with negated module lists! # the negation takes precedence, thus not all required modules would be built echo "" ;; (${STAGE_PYTHON}) # compile everything for PyFlink. echo "" ;; (${STAGE_LEGACY_SLOT_MANAGEMENT}) echo "-pl $MODULES_LEGACY_SLOT_MANAGEMENT -am" ;; (${STAGE_FINEGRAINED_RESOURCE_MANAGEMENT}) echo "-pl $MODULES_FINEGRAINED_RESOURCE_MANAGEMENT -am" ;; esac } function get_test_modules_for_stage() { local stage=$1 local modules_core=$MODULES_CORE local modules_libraries=$MODULES_LIBRARIES local modules_blink_planner=$MODULES_BLINK_PLANNER local modules_connectors=$MODULES_CONNECTORS local modules_tests=$MODULES_TESTS local negated_core=\!${MODULES_CORE//,/,\!} local negated_libraries=\!${MODULES_LIBRARIES//,/,\!} local negated_blink_planner=\!${MODULES_BLINK_PLANNER//,/,\!} local negated_kafka_gelly=\!${MODULES_KAFKA_GELLY//,/,\!} local negated_connectors=\!${MODULES_CONNECTORS//,/,\!} local negated_tests=\!${MODULES_TESTS//,/,\!} local modules_misc="$negated_core,$negated_libraries,$negated_blink_planner,$negated_connectors,$negated_kafka_gelly,$negated_tests" local modules_legacy_slot_management=$MODULES_LEGACY_SLOT_MANAGEMENT local modules_finegrained_resource_management=$MODULES_FINEGRAINED_RESOURCE_MANAGEMENT case ${stage} in (${STAGE_CORE}) echo "-pl $modules_core" ;; (${STAGE_LIBRARIES}) echo "-pl $modules_libraries" ;; (${STAGE_BLINK_PLANNER}) echo "-pl $modules_blink_planner" ;; (${STAGE_CONNECTORS}) echo "-pl $modules_connectors" ;; (${STAGE_KAFKA_GELLY}) echo "-pl $MODULES_KAFKA_GELLY" ;; (${STAGE_TESTS}) echo "-pl $modules_tests" ;; (${STAGE_MISC}) echo "-pl $modules_misc" ;; (${STAGE_LEGACY_SLOT_MANAGEMENT}) echo "-pl $modules_legacy_slot_management" ;; (${STAGE_FINEGRAINED_RESOURCE_MANAGEMENT}) echo "-pl $modules_finegrained_resource_management" ;; esac }
kl0u/flink
tools/ci/stage.sh
Shell
apache-2.0
7,661
#! /bin/sh # 发布程序函数库 # 合并分支代码 merge(){ cd $WORKDIR if [ "`git branch | grep $WORKBRANCH | awk '{print $1}'`" != '*' ] ; then git checkout $WORKBRANCH >> /dev/null if [ "$?" = "1" ]; then echo "[error] `tail -n 1 $ERRORLOG`" exit 0 fi fi git pull 1> /dev/null && git checkout $MASTERBRANCH > /dev/null && git merge $WORKBRANCH 1> /dev/null && git push 1> /dev/null && git checkout $WORKBRANCH > /dev/null if [ "$?" = "1" ]; then echo "[error] `tail -n 1 $ERRORLOG`" exit 0 fi if [ ! -d "$ROOTDIR""/execute" ]; then rm -rf "$ROOTDIR""/execute" fi cp -rfp $WORKDIR "$ROOTDIR""/execute" if [ "$?" = "1" ]; then echo "[error] `tail -n 1 $ERRORLOG`" exit 0 fi } # 初始化目录 init(){ if [ ! -d "$ROOTDIR""/version" ];then mkdir "$ROOTDIR""/version" fi cd $WORKDIR # 检测发布分支 if [ `git branch | grep master | awk '{print $1}'` != 'master' ] && [ `git branch | grep master | awk '{print $2}'` != 'master' ]; then echo 'master branch not found!' fi # 检测开发分支 if [ "`git branch | grep test | awk '{print $1}'`" == 'test' ] && [ "`git branch | grep test | awk '{print $2}'`" != 'test' ]; then echo 'test branch not found!' fi cd $ROOTDIR } #发布 commit(){ cd $ROOTDIR if [ ! -d "$ROOTDIR""/execute" ]; then echo '[error]please use [merge] param to update your code or use [rollback] param to reset code at first!' exit 0 fi if [ ! -n "$1" ]; then echo "input version message:" read input MESSAGE="$input" else MESSAGE=$1 fi push COMMITID=`date|md5sum|awk '{print $1}'` changeversion $VERSIONLOG echo '<线上>'` date "+%Y-%m-%d %H:%M:%S"` " $COMMITID" " $MESSAGE" >> $VERSIONLOG mv "$ROOTDIR""/execute" "$ROOTDIR""/version/""$COMMITID" if [ "$?" == "1" ]; then echo "[error] `tail -n 1 $ERRORLOG`" exit 0 fi echo '[ok] commit completed!' } #显示日志 log(){ awk '{if(NR==1){ print $0 }else{ print $0 } }' $VERSIONLOG | sort -r } push(){ var=$SERVICEADDRESS var=${var//,/ } #这里是将var中的,替换为空格 for element in $var do if [ ! -n "$1" ]; then rsync --delete -avzh --exclude ".git" -e ssh "$ROOTDIR""/execute/" $element 2>>$ERRORLOG 1>/dev/null else rsync -avzh --delete --exclude ".git" -e ssh "$ROOTDIR""/version/""$1/" $SERVICEADDRESS 2>>$ERRORLOG 1>$ACCESSLOG fi if [ "$?" == "1" ]; then echo "[error] `tail -n 1 $ERRORLOG`" exit 0 fi done } #回滚 rollback(){ if [ ! -n "$1" ]; then echo "[error] you should choice which version you want to rollback!" exit 0 fi NUMBER=`awk -v COMMITID=$1 '{ if( $3 == COMMITID || $4 == COMMITID) print FNR }' $VERSIONLOG` if [ ! -n "$NUMBER" ]; then echo "[error] can not found this version!use [log] param to look for!" exit 0 fi if [ ! -d "$ROOTDIR""/version/""$COMMITID" ]; then echo "[error] can not found back file!" exit 0 fi push $1 awk -v number=$NUMBER '{ if(NR==number){gsub(/<线上>/,"",$1);print "<线上>"$0 }else{gsub(/<线上>/,"",$1);print $0}}' $VERSIONLOG > version.bak mv version.bak $VERSIONLOG } #回滚日至文件 changeversion(){ sed 's/<线上>//' $VERSIONLOG > version.bak mv version.bak $VERSIONLOG } #清理历史版本 clean(){ sed "s/\\n//g" $VERSIONLOG > /dev/null if [ ! -n "$1" ]; then NUMBER=`cat $VERSIONLOG | wc -l` awk -v num="$NUMBER" '{ if(NR > num-4){ print $0 } }' $VERSIONLOG > version.bak RMDIR=`awk -v num="$NUMBER" '{ if(NR < num-4){ print $3" " } }' $VERSIONLOG ` for element in $RMDIR do rm -rf "$ROOTDIR""/version/""$element" done else NUMBER=$1 awk -v number="$NUMBER" '{ if($3 != number){ print $0 } }' $VERSIONLOG > version.bak RMDIR=`awk -v number="$NUMBER" '{ if( $3 == number ){ print $3 } }' $VERSIONLOG` rm -rf "$ROOTDIR""/version/""$RMDIR" fi mv version.bak $VERSIONLOG }
zhangsheng1992/push
push/function.sh
Shell
apache-2.0
4,365
# ---------------------------------------------------------------------------- # # Package : loopback-explorer # Version : 4.2.0 # Source repo : https://github.com/strongloop/loopback-explorer.git # Tested on : ubuntu_16.04 # Script License: Apache License, Version 2 or later # Maintainer : Atul Sowani <[email protected]> # # Disclaimer: This script has been tested in non-root mode on given # ========== platform using the mentioned version of the package. # It may not work as expected with newer versions of the # package and/or distribution. In such case, please # contact "Maintainer" of this script. # # ---------------------------------------------------------------------------- # Install dependencies. sudo apt-get update -y sudo apt-get install -y build-essential npm wget git WDIR=`pwd` # Build and install node. cd $WDIR wget https://nodejs.org/dist/v4.2.3/node-v4.2.3.tar.gz tar -xzf node-v4.2.3.tar.gz cd node-v4.2.3 ./configure make sudo make install # Clone and build source code. cd $WDIR git clone https://github.com/strongloop/loopback-explorer.git cd loopback-explorer npm install && npm test
ppc64le/build-scripts
l/loopback-explorer/loopback-explorer_ubuntu_16.04.sh
Shell
apache-2.0
1,156
#!/bin/bash ## Try and make the bash work safely # Terminate on unitialised variables set -u # Terminate on error set -e # Terminate if any part of a pipeline fails set -o pipefail # Uncomment for debugging # set -x PORT=10018 # PORT=8098 BUCKET=$1 INDEXNAME=$2 INDEXVAL=$3 echo "getting from ${BUCKET} on port ${PORT} with an index ${INDEXVAL} in index ${INDEXNAME}" URL="http://127.0.0.1:${PORT}/buckets/${BUCKET}/index/${INDEXNAME}_bin/${INDEXVAL}" CMD="curl -v ${URL}" echo ${CMD} eval ${CMD} echo ""
gordonguthrie/bits
priv/get_value_with_index.sh
Shell
apache-2.0
509
#!/bin/bash # # Copyright 2017 StreamSets Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -x /bin/pwd BASE_DIR=. TARGET_DIR=./target DOWNLOAD_DIR=${BASE_DIR}/thirdparty download() { url=$1; finalName=$TARGET_DIR/$2 tarName=$(basename $url) sparkTar=${DOWNLOAD_DIR}/${tarName} rm -rf $BASE_DIR/$finalName if [[ ! -f $sparkTar ]]; then curl -Sso $DOWNLOAD_DIR/$tarName $url fi tar -zxf $DOWNLOAD_DIR/$tarName -C $BASE_DIR mv $BASE_DIR/spark-1.3.1-bin-hadoop-2.5.0 $BASE_DIR/$finalName } mkdir -p $DOWNLOAD_DIR download "https://s3-us-west-2.amazonaws.com/streamsets-public/thirdparty/spark-1.3.1-bin-hadoop-2.5.0.tar.gz" "spark"
z123/datacollector
miniIT/src/test/resources/download.sh
Shell
apache-2.0
1,150
#!/usr/bin/env bash # Copyright The containerd Authors. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail export DOCKER_CLI_EXPERIMENTAL=enabled # Expected builder output # # Name: containerd-buildkit-multiarch # Driver: docker-container # # Nodes: # Name: containerd-buildkit-multiarch0 # Endpoint: unix:///var/run/docker.sock # Status: running # Platforms: linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6 current_builder="$(docker buildx inspect)" # We can skip setup if the current builder already has multi-arch # AND if it isn't the "docker" driver, which doesn't work # # From https://docs.docker.com/buildx/working-with-buildx/#build-with-buildx: # "You can run Buildx in different configurations that are exposed through a # driver concept. Currently, Docker supports a “docker” driver that uses the # BuildKit library bundled into the docker daemon binary, and a # “docker-container” driver that automatically launches BuildKit inside a # Docker container. # # The user experience of using Buildx is very similar across drivers. # However, there are some features that are not currently supported by the # “docker” driver, because the BuildKit library which is bundled into docker # daemon uses a different storage component. In contrast, all images built with # the “docker” driver are automatically added to the “docker images” view by # default, whereas when using other drivers, the method for outputting an image # needs to be selected with --output." if ! grep -q "^Driver: docker$" <<<"${current_builder}" \ && grep -q "linux/amd64" <<<"${current_builder}" \ && grep -q "linux/arm" <<<"${current_builder}" \ && grep -q "linux/arm64" <<<"${current_builder}" \ && grep -q "linux/ppc64le" <<<"${current_builder}" \ && grep -q "linux/s390x" <<<"${current_builder}"; then exit 0 fi # Ensure qemu is in binfmt_misc # NOTE: Please always pin this to a digest for predictability/auditability # Last updated: 08/21/2020 if [ "$(uname)" == 'Linux' ]; then docker run --rm --privileged multiarch/qemu-user-static@sha256:c772ee1965aa0be9915ee1b018a0dd92ea361b4fa1bcab5bbc033517749b2af4 --reset -p yes fi # Ensure we use a builder that can leverage it (the default on linux will not) docker buildx rm containerd-buildkit-multiarch || true docker buildx create --use --name=containerd-buildkit-multiarch docker buildx inspect --bootstrap
vdemeester/containerd
test/init-buildx.sh
Shell
apache-2.0
3,598
#!/bin/sh -xe # Download and run Boulder instance for integration testing export GOPATH="${GOPATH:-/tmp/go}" # $ go get github.com/letsencrypt/boulder # package github.com/letsencrypt/boulder # imports github.com/letsencrypt/boulder # imports github.com/letsencrypt/boulder: no buildable Go source files in /tmp/go/src/github.com/letsencrypt/boulder go get -d github.com/letsencrypt/boulder/cmd/boulder cd $GOPATH/src/github.com/letsencrypt/boulder ./start.py & # Hopefully start.py bootstraps before integration test is started...
tdfischer/lets-encrypt-preview
tests/boulder-start.sh
Shell
apache-2.0
551
#!/usr/bin/env bash # # Copyright 2017 Toyota Research Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -euo pipefail display_usage() { echo "usage: $0 <new_version_string>" } if [ $# -ne 1 ] then display_usage exit 1 fi SCRIPT_PATH="$(dirname "$0")" ROOT_PATH="${SCRIPT_PATH}"/.. VERSION=$1 DEB=bazel_${VERSION}-linux-x86_64.deb URL=https://github.com/bazelbuild/bazel/releases/download/${VERSION}/${DEB} wget "${URL}" -O "${DEB}" SHA256=$(shasum -a 256 "${DEB}" | cut -d ' ' -f 1) rm "${DEB}" sed -i "s/BAZEL_VERSION=.*/BAZEL_VERSION=${VERSION}/g" $(find "${ROOT_PATH}"/setup -type f) sed -i "s/BAZEL_SHA256=.*/BAZEL_SHA256=${SHA256}/g" $(find "${ROOT_PATH}"/setup -type f)
dreal/dreal4
scripts/update_bazel_version.sh
Shell
apache-2.0
1,213
#!/bin/sh # Continuous integration script for Travis # Build the library and install it. echo "## Building and installing libs2..." rm -rf build mkdir build cd build cmake -DCMAKE_INSTALL_PREFIX=./install ../geometry make -j3 make install if [ "${TRAVIS_OS_NAME}" = "linux" ]; then sudo ldconfig -v | grep libs2 fi # Build and run the C++ tests echo "## Building and running the C++ tests..." mkdir tests cd tests cmake ../../geometry/tests make -j3 ./tests exit 0 if [ "${TRAVIS_OS_NAME}" = "linux" ]; then # We really want to use the system version of Python. Travis' # has broken distutils paths, and assumes a virtualenv. PATH="/usr/bin:${PATH}" which python2.7 python2.7 -V fi # Build and install the Python bindings echo "## Building and installing the Python bindings..." cd ../ mkdir python cd python cmake ../../geometry/python make VERBOSE=1 sudo make install exit 0 # Run the Python tests echo "## Running the Python tests..." python2.7 -v -c 'import s2' python2.7 test.py || exit 1
simonzhangsm/s2-geometry-library
travis.sh
Shell
apache-2.0
1,012
#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # resolve links - $0 may be a softlink project=mumak HADOOP_VERSION= this="$0" while [ -h "$this" ]; do ls=`ls -ld "$this"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '.*/.*' > /dev/null; then this="$link" else this=`dirname "$this"`/"$link" fi done # convert relative path to absolute path bin=`dirname "$this"` bin=`cd "$bin"; pwd` script=`basename $bin` this="$bin/$script" MUMAK_HOME=`dirname $bin` if [ -d "$MUMAK_HOME/../../../build/classes" ]; then HADOOP_HOME=`cd $MUMAK_HOME/../../.. ; pwd` IN_RELEASE=0 else HADOOP_HOME=`cd $MUMAK_HOME/../.. ; pwd` IN_RELEASE=1 MAPRED_JAR=$HADOOP_HOME/hadoop-mapred-${HADOOP_VERSION}.jar if [ ! -e $MAPRED_JAR ]; then echo "Error: Cannot find $MAPRED_JAR." exit 1 fi fi # parse command line option if [ $# -gt 1 ] then if [ "--config" = "$1" ] then shift confdir=$1 shift HADOOP_CONF_DIR=$confdir fi fi # Allow alternate conf dir location. HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}" if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then . "${HADOOP_CONF_DIR}/hadoop-env.sh" fi # Define HADOOP_CORE_HOME if [ "$HADOP_CORE_HOME" = "" ]; then HADOOP_CORE_HOME=$HADOOP_HOME fi if [ "$JAVA_HOME" = "" ]; then echo "Error: JAVA_HOME is not set." exit 1 fi JAVA=$JAVA_HOME/bin/java JAVA_HEAP_MAX=-Xmx1200m # Setting classpath # Mumak needs to have the followinw classes and resources in place (roughly in this # order): # Mumak's conf directory (log4j.properties), must override Hadoop's conf dir. # Hadoop's conf directory # Mumak classes (including aspectj-generated classes) (or mumak jar), must # override MapReduce project classes or jar.. # MapReduce project classes (mapred jar) # MapReduce webapps files (included in mapred jar) # MapReduce tools classes (or mapred-tools jar) # Hadoop Common jar # Hadoop Common test jar # Depending 3rd party jars CLASSPATH=${MUMAK_HOME}/conf:${HADOOP_HOME}/conf:$JAVA_HOME/lib/tools.jar if [ $IN_RELEASE = 0 ]; then CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/build/contrib/${project}/classes CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/build/classes CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/build CLASSPATH=${CLASSPATH}:${HADOOP_HOME}/build/tools # add libs to CLASSPATH for f in $HADOOP_HOME/lib/hadoop-core-*.jar; do CLASSPATH=${CLASSPATH}:$f; done for f in $HADOOP_HOME/build/ivy/lib/${project}/common/*.jar; do CLASSPATH=${CLASSPATH}:$f; done else CLASSPATH=${CLASSPATH}:$HADOOP_HOME; for f in $HADOOP_HOME/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done CLASSPATH=${CLASSPATH}:$MUMAK_HOME/hadoop-${HADOOP_VERSION}-${project}.jar CLASSPATH=${CLASSPATH}:$HADOOP_HOME/hadoop-mapred-${HADOOP_VERSION}.jar CLASSPATH=${CLASSPATH}:$HADOOP_HOME/hadoop-mapred-tools-${HADOOP_VERSION}.jar fi # check envvars which might override default args if [ "$HADOOP_HEAPSIZE" != "" ]; then #echo "run with heapsize $HADOOP_HEAPSIZE" JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m" #echo $JAVA_HEAP_MAX fi # default log directory & file if [ "$HADOOP_LOG_DIR" = "" ]; then HADOOP_LOG_DIR="$HADOOP_HOME/logs" fi # default policy file for service-level authorization if [ "$HADOOP_POLICYFILE" = "" ]; then HADOOP_POLICYFILE="hadoop-policy.xml" fi # setup 'java.library.path' for native-hadoop code if necessary JAVA_LIBRARY_PATH='' if [ -d "${HADOOP_CORE_HOME}/build/native" -o -d "${HADOOP_CORE_HOME}/lib/native" ]; then JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` if [ -d "$HADOOP_CORE_HOME/build/native" ]; then JAVA_LIBRARY_PATH=${HADOOP_CORE_HOME}/build/native/${JAVA_PLATFORM}/lib fi if [ -d "${HADOOP_CORE_HOME}/lib/native" ]; then if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_CORE_HOME}/lib/native/${JAVA_PLATFORM} else JAVA_LIBRARY_PATH=${HADOOP_CORE_HOME}/lib/native/${JAVA_PLATFORM} fi fi fi HADOOP_OPTS="$HADOOP_OPTS -Dmumak.log.dir=$HADOOP_LOG_DIR" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.tmp.dir=$HADOOP_LOG_DIR/tmp" if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE" function print_usage(){ echo "Usage: $script trace.json topology.json" } if [ $# != 2 ]; then print_usage exit fi exec "$JAVA" -enableassertions $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" org.apache.hadoop.mapred.SimulatorEngine -conf=${MUMAK_HOME}/conf/${project}.xml "$@"
apache/hadoop-mapreduce
src/contrib/mumak/bin/mumak.sh
Shell
apache-2.0
5,427
#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" fi exec $(dirname $0)/kafka-run-class.sh kafka.tools.SimpleConsumerPerformance $@
stealthly/kafka
bin/kafka-simple-consumer-perf-test.sh
Shell
apache-2.0
962
#!/bin/bash #ident: correct-crushmap.sh, ver 0.1, 2017/11/23. (C)2017,Red Hat Inc.,[email protected] # NEEDS: create-crushmap.sh CMDPATH=`pwd` COMPILED_OUT=/tmp/compiled_out.$$ DECOMPILED_OUT=/tmp/decompiled_out.$$ DECOMPILED_IN=/tmp/decompiled_in.$$ COMPILED_IN=/tmp/compiled_in.$$ if [ -d $COMPILED_OUT -o -d $COMPILED_IN -o -d $DECOMPILED_OUT -o -d $DECOMPILED_IN ]; then echo "$0 - FATAL 1: one of the specified filenames to use is a directory" exit 1 fi if [ -f $COMPILED_OUT -o -f $COMPILED_IN -o -f $DECOMPILED_OUT -o -f $DECOMPILED_IN ]; then echo "$0 - FATAL 5: one of the specified filenames already exist" exit 1 fi if [ ! -x create-crushmap.sh ]; then echo "$0 - FATAL 10: need create-crushmap.sh in local path with executable rights." exit 1 fi # check whether we are root if [ `id|awk '{print $1}'|cut -d= -f2|cut -d'(' -f1` -ne 0 ]; then echo "$0 - FATAL 20: need to be root on host to execute ceph command" exit 2 fi ceph -s 2>/dev/null >/dev/null if [ $? -ne 0 ]; then echo "$0 - ERROR 30: unable to use ceph command" exit 3 fi # set tunables ceph osd crush tunables optimal if [ $? -ne 0 ]; then echo "$0 - ERROR 40: failed to apply optimal tunings" exit 2 else echo "WAITING 20 seconds ..." sleep 20 fi # get crushmap ceph osd getcrushmap -o $COMPILED_OUT if [ $? -ne 0 ]; then echo "$0 - ERROR 50: failed to get crushmap" exit 2 fi # transfer into readable crushtool -d $COMPILED_OUT -o $DECOMPILED_OUT if [ $? -ne 0 ]; then echo "$0 - ERROR 60: failed to generate human readable form of crushmap" exit 2 fi # create new crushmap from existing $CMDPATH/create-crushmap.sh $DECOMPILED_OUT $DECOMPILED_IN if [ $? -ne 0 ]; then echo "$0 - ERROR 70: failed to create new 2 DC crushmap from file $DECOMPILED_OUT." exit 2 fi # compile new crushmap crushtool -c $DECOMPILED_IN -o $COMPILED_IN if [ $? -ne 0 ]; then echo "$0 - ERROR 80: failed to compile crushmap" exit 2 fi # check crushmap crushtool -i $COMPILED_IN --test 2>/dev/null >/dev/null if [ $? -ne 0 ]; then echo "$0 - ERROR 90: failed to test crushmap successfully" exit 2 fi # insert new crushmap ceph osd setcrushmap -i $COMPILED_IN if [ $? -ne 0 ]; then echo "$0 - ERROR 100: failed inject new crushmap" exit 2 fi # Now, everything is (mostly) done. Wait for cluster to stable. echo "================================================================" echo "NEW crushmap inserted - now WAIT UNTIL cluster is HEALTHY again." echo "" echo " Stop output of 'ceph -w' command with ^C." echo "================================================================" echo "" echo "" sleep 10 ceph -w
Joint-SddC-PoC-Team/hailstorm
ceph/sddc/correct-crushmap.sh
Shell
apache-2.0
2,707