code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#
# Copyright 2014 Gaurav Kumar. Apache 2.0
# Recipe for Fisher/Callhome-Spanish
# Made to integrate KALDI with JOSHUA for end-to-end ASR and SMT
. cmd.sh
. path.sh
mfccdir=`pwd`/mfcc
set -e
stage=1
# call the next line with the directory where the Spanish Fisher data is
# (the values below are just an example). This should contain
# subdirectories named as follows:
# DISC1 DIC2
sfisher_speech=/export/a16/gkumar/corpora/LDC2010S01
sfisher_transcripts=/export/a16/gkumar/corpora/LDC2010T04
spanish_lexicon=/export/a16/gkumar/corpora/LDC96L16
split=local/splits/split_fisher
callhome_speech=/export/a16/gkumar/corpora/LDC96S35
callhome_transcripts=/export/a16/gkumar/corpora/LDC96T17
split_callhome=local/splits/split_callhome
if [ $stage -lt 1 ]; then
local/fsp_data_prep.sh $sfisher_speech $sfisher_transcripts
local/callhome_data_prep.sh $callhome_speech $callhome_transcripts
# The lexicon is created using the LDC spanish lexicon, the words from the
# fisher spanish corpus. Additional (most frequent) words are added from the
# ES gigaword corpus to bring the total to 64k words. The ES frequency sorted
# wordlist is downloaded if it is not available.
local/fsp_prepare_dict.sh $spanish_lexicon
# Added c,j, v to the non silences phones manually
utils/prepare_lang.sh data/local/dict "<unk>" data/local/lang data/lang
# Make sure that you do not use your test and your dev sets to train the LM
# Some form of cross validation is possible where you decode your dev/set based on an
# LM that is trained on everything but that that conversation
# When in doubt about what your data partitions should be use local/fsp_ideal_data_partitions.pl
# to get the numbers. Depending on your needs, you might have to change the size of
# the splits within that file. The default paritions are based on the Kaldi + Joshua
# requirements which means that I have very large dev and test sets
local/fsp_train_lms.sh $split
local/fsp_create_test_lang.sh
utils/fix_data_dir.sh data/local/data/train_all
steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" data/local/data/train_all exp/make_mfcc/train_all $mfccdir || exit 1;
utils/fix_data_dir.sh data/local/data/train_all
utils/validate_data_dir.sh data/local/data/train_all
cp -r data/local/data/train_all data/train_all
# For the CALLHOME corpus
utils/fix_data_dir.sh data/local/data/callhome_train_all
steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" data/local/data/callhome_train_all exp/make_mfcc/callhome_train_all $mfccdir || exit 1;
utils/fix_data_dir.sh data/local/data/callhome_train_all
utils/validate_data_dir.sh data/local/data/callhome_train_all
cp -r data/local/data/callhome_train_all data/callhome_train_all
# Creating data partitions for the pipeline
# We need datasets for both the ASR and SMT system
# We have 257455 utterances left, so the partitions are roughly as follows
# ASR Train : 100k utterances
# ASR Tune : 17455 utterances
# ASR Eval : 20k utterances
# MT Train : 100k utterances
# MT Tune : Same as the ASR eval set (Use the lattices from here)
# MT Eval : 20k utterances
# The dev and the test sets need to be carefully chosen so that there is no conversation/speaker
# overlap. This has been setup and the script local/fsp_ideal_data_partitions provides the numbers that are needed below.
# As noted above, the LM has not been trained on the dev and the test sets.
#utils/subset_data_dir.sh --first data/train_all 158126 data/dev_and_test
#utils/subset_data_dir.sh --first data/dev_and_test 37814 data/asr_dev_and_test
#utils/subset_data_dir.sh --last data/dev_and_test 120312 data/mt_train_and_test
#utils/subset_data_dir.sh --first data/asr_dev_and_test 17662 data/dev
#utils/subset_data_dir.sh --last data/asr_dev_and_test 20152 data/test
#utils/subset_data_dir.sh --first data/mt_train_and_test 100238 data/mt_train
#utils/subset_data_dir.sh --last data/mt_train_and_test 20074 data/mt_test
#rm -r data/dev_and_test
#rm -r data/asr_dev_and_test
#rm -r data/mt_train_and_test
local/create_splits.sh $split
local/callhome_create_splits.sh $split_callhome
fi
if [ $stage -lt 2 ]; then
# Now compute CMVN stats for the train, dev and test subsets
steps/compute_cmvn_stats.sh data/dev exp/make_mfcc/dev $mfccdir
steps/compute_cmvn_stats.sh data/test exp/make_mfcc/test $mfccdir
steps/compute_cmvn_stats.sh data/dev2 exp/make_mfcc/dev2 $mfccdir
#steps/compute_cmvn_stats.sh data/mt_train exp/make_mfcc/mt_train $mfccdir
#steps/compute_cmvn_stats.sh data/mt_test exp/make_mfcc/mt_test $mfccdir
#n=$[`cat data/train_all/segments | wc -l` - 158126]
#utils/subset_data_dir.sh --last data/train_all $n data/train
steps/compute_cmvn_stats.sh data/train exp/make_mfcc/train $mfccdir
steps/compute_cmvn_stats.sh data/callhome_dev exp/make_mfcc/callhome_dev $mfccdir
steps/compute_cmvn_stats.sh data/callhome_test exp/make_mfcc/callhome_test $mfccdir
steps/compute_cmvn_stats.sh data/callhome_train exp/make_mfcc/callhome_train $mfccdir
# Again from Dan's recipe : Reduced monophone training data
# Now-- there are 1.6 million utterances, and we want to start the monophone training
# on relatively short utterances (easier to align), but not only the very shortest
# ones (mostly uh-huh). So take the 100k shortest ones, and then take 10k random
# utterances from those.
utils/subset_data_dir.sh --shortest data/train 90000 data/train_100kshort
utils/subset_data_dir.sh data/train_100kshort 10000 data/train_10k
local/remove_dup_utts.sh 100 data/train_10k data/train_10k_nodup
utils/subset_data_dir.sh --speakers data/train 30000 data/train_30k
utils/subset_data_dir.sh --speakers data/train 90000 data/train_100k
fi
steps/train_mono.sh --nj 10 --cmd "$train_cmd" \
data/train_10k_nodup data/lang exp/mono0a
steps/align_si.sh --nj 30 --cmd "$train_cmd" \
data/train_30k data/lang exp/mono0a exp/mono0a_ali || exit 1;
steps/train_deltas.sh --cmd "$train_cmd" \
2500 20000 data/train_30k data/lang exp/mono0a_ali exp/tri1 || exit 1;
(utils/mkgraph.sh data/lang_test exp/tri1 exp/tri1/graph
steps/decode.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri1/graph data/dev exp/tri1/decode_dev)&
steps/align_si.sh --nj 30 --cmd "$train_cmd" \
data/train_30k data/lang exp/tri1 exp/tri1_ali || exit 1;
steps/train_deltas.sh --cmd "$train_cmd" \
2500 20000 data/train_30k data/lang exp/tri1_ali exp/tri2 || exit 1;
(
utils/mkgraph.sh data/lang_test exp/tri2 exp/tri2/graph || exit 1;
steps/decode.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri2/graph data/dev exp/tri2/decode_dev || exit 1;
)&
steps/align_si.sh --nj 30 --cmd "$train_cmd" \
data/train_100k data/lang exp/tri2 exp/tri2_ali || exit 1;
# Train tri3a, which is LDA+MLLT, on 100k data.
steps/train_lda_mllt.sh --cmd "$train_cmd" \
--splice-opts "--left-context=3 --right-context=3" \
3000 40000 data/train_100k data/lang exp/tri2_ali exp/tri3a || exit 1;
(
utils/mkgraph.sh data/lang_test exp/tri3a exp/tri3a/graph || exit 1;
steps/decode.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri3a/graph data/dev exp/tri3a/decode_dev || exit 1;
)&
# Next we'll use fMLLR and train with SAT (i.e. on
# fMLLR features)
steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \
data/train_100k data/lang exp/tri3a exp/tri3a_ali || exit 1;
steps/train_sat.sh --cmd "$train_cmd" \
4000 60000 data/train_100k data/lang exp/tri3a_ali exp/tri4a || exit 1;
(
utils/mkgraph.sh data/lang_test exp/tri4a exp/tri4a/graph
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri4a/graph data/dev exp/tri4a/decode_dev
)&
steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \
data/train data/lang exp/tri4a exp/tri4a_ali || exit 1;
# Reduce the number of gaussians
steps/train_sat.sh --cmd "$train_cmd" \
5000 120000 data/train data/lang exp/tri4a_ali exp/tri5a || exit 1;
(
utils/mkgraph.sh data/lang_test exp/tri5a exp/tri5a/graph
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri5a/graph data/dev exp/tri5a/decode_dev
)&
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri5a/graph data/test exp/tri5a/decode_test
# Decode CALLHOME
(
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri5a/graph data/callhome_test exp/tri5a/decode_callhome_test
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri5a/graph data/callhome_dev exp/tri5a/decode_callhome_dev
steps/decode_fmllr.sh --nj 25 --cmd "$decode_cmd" --config conf/decode.config \
exp/tri5a/graph data/callhome_train exp/tri5a/decode_callhome_train
) &
steps/align_fmllr.sh \
--boost-silence 0.5 --nj 32 --cmd "$train_cmd" \
data/train data/lang exp/tri5a exp/tri5a_ali
steps/train_ubm.sh \
--cmd "$train_cmd" 750 \
data/train data/lang exp/tri5a_ali exp/ubm5
steps/train_sgmm2.sh \
--cmd "$train_cmd" 5000 18000 \
data/train data/lang exp/tri5a_ali exp/ubm5/final.ubm exp/sgmm5
utils/mkgraph.sh data/lang_test exp/sgmm5 exp/sgmm5/graph
(
steps/decode_sgmm2.sh --nj 13 --cmd "$decode_cmd" --num-threads 5 \
--config conf/decode.config --scoring-opts "--min-lmwt 8 --max-lmwt 16" --transform-dir exp/tri5a/decode_dev \
exp/sgmm5/graph data/dev exp/sgmm5/decode_dev
)&
steps/align_sgmm2.sh \
--nj 32 --cmd "$train_cmd" --transform-dir exp/tri5a_ali \
--use-graphs true --use-gselect true \
data/train data/lang exp/sgmm5 exp/sgmm5_ali
steps/make_denlats_sgmm2.sh \
--nj 32 --sub-split 32 --num-threads 4 \
--beam 10.0 --lattice-beam 6 --cmd "$decode_cmd" --transform-dir exp/tri5a_ali \
data/train data/lang exp/sgmm5_ali exp/sgmm5_denlats
steps/train_mmi_sgmm2.sh \
--cmd "$train_cmd" --drop-frames true --transform-dir exp/tri5a_ali --boost 0.1 \
data/train data/lang exp/sgmm5_ali exp/sgmm5_denlats \
exp/sgmm5_mmi_b0.1
(
utils/mkgraph.sh data/lang_test exp/tri5a exp/tri5a/graph
steps/decode_fmllr_extra.sh --nj 13 --cmd "$decode_cmd" --num-threads 4 --parallel-opts " -pe smp 4" \
--config conf/decode.config --scoring-opts "--min-lmwt 8 --max-lmwt 12"\
exp/tri5a/graph data/dev exp/tri5a/decode_dev
utils/mkgraph.sh data/lang_test exp/sgmm5 exp/sgmm5/graph
steps/decode_sgmm2.sh --nj 13 --cmd "$decode_cmd" --num-threads 5 \
--config conf/decode.config --scoring-opts "--min-lmwt 8 --max-lmwt 16" --transform-dir exp/tri5a/decode_dev \
exp/sgmm5/graph data/dev exp/sgmm5/decode_dev
for iter in 1 2 3 4; do
decode=exp/sgmm5_mmi_b0.1/decode_dev_it$iter
mkdir -p $decode
steps/decode_sgmm2_rescore.sh \
--cmd "$decode_cmd" --iter $iter --transform-dir exp/tri5a/decode_dev \
data/lang_test data/dev/ exp/sgmm5/decode_dev $decode
done
) &
dnn_cpu_parallel_opts=(--minibatch-size 128 --max-change 10 --num-jobs-nnet 8 --num-threads 16 \
--parallel-opts "-pe smp 16" --cmd "queue.pl -l arch=*64 --mem 2G")
dnn_gpu_parallel_opts=(--minibatch-size 512 --max-change 40 --num-jobs-nnet 4 --num-threads 1 \
--parallel-opts "-l gpu=1" --cmd "queue.pl -l arch=*64 --mem 2G")
steps/nnet2/train_pnorm_ensemble.sh \
--mix-up 5000 --initial-learning-rate 0.008 --final-learning-rate 0.0008\
--num-hidden-layers 4 --pnorm-input-dim 2000 --pnorm-output-dim 200\
--cmd "$train_cmd" \
"${dnn_gpu_parallel_opts[@]}" \
--ensemble-size 4 --initial-beta 0.1 --final-beta 5 \
data/train data/lang exp/tri5a_ali exp/tri6a_dnn
(
steps/nnet2/decode.sh --nj 13 --cmd "$decode_cmd" --num-threads 4 --parallel-opts " -pe smp 4" \
--scoring-opts "--min-lmwt 8 --max-lmwt 16" --transform-dir exp/tri5a/decode_dev exp/tri5a/graph data/dev exp/tri6a_dnn/decode_dev
) &
wait
exit 0;
|
michellemorales/OpenMM
|
kaldi/egs/fisher_callhome_spanish/s5/run.sh
|
Shell
|
gpl-2.0
| 11,845 |
#!/bin/bash
#CXX='/usr/bin/g++'
./configure --enable-sse --enable-ffmpeg --enable-mp3 --enable-sndfile
|
claytonotey/sbsmsview
|
build/macosx.sh
|
Shell
|
gpl-2.0
| 105 |
#!/bin/bash
# Simple script to list the differences between two hugepagecapability results
# Print usage of command
usage() {
echo "diff-hugepagecapability.sh (c) Mel Gorman 2005"
echo
echo "Usage: diff-hugepagecapability.sh File1 File2"
echo " -h, --help Print this help message"
echo
exit
}
FILE1=$1
FILE2=$2
FILE3=$3
# Parse command line arguements
ARGS=`getopt -o h --long help -n bench-hugepagecapability.sh -- "$@"`
# Cycle through arguements
eval set -- "$ARGS"
while true ; do
case "$1" in
-h|--help) usage;;
*) shift 1; break;;
esac
done
if [ "$FILE1" = "" ] || [ ! -e "$FILE1" ]; then
echo "File1 ($FILE1) does not exist or was not specified"
usage
fi
if [ -d "$FILE1" ]; then
if [ -d "$FILE1/hugetlb-capability" ]; then
FILE1=$FILE1/hugetlb-capability/log.txt
fi
fi
if [ "$FILE2" = "" ] || [ ! -e "$FILE2" ]; then
echo "File2 ($FILE2) does not exist or was not specified"
usage
fi
if [ -d "$FILE2" ]; then
if [ -d "$FILE2/hugetlb-capability" ]; then
FILE2=$FILE2/hugetlb-capability/log.txt
fi
fi
IFS="
"
PASS1_1=`grep "Number huge pages before pass 1" "$FILE1" | awk -F : '{print $2}' | tr -d " "`
PASS1_2=`grep "Number huge pages before pass 1" "$FILE2" | awk -F : '{print $2}' | tr -d " "`
BEFOREDD_1=`grep "Number huge pages at rest before dd of large file" "$FILE1" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
BEFOREDD_2=`grep "Number huge pages at rest before dd of large file" "$FILE2" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
AFTERDD_1=`grep "Number huge pages at rest after dd of large file" "$FILE1" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
AFTERDD_2=`grep "Number huge pages at rest after dd of large file" "$FILE2" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
NAME1=`head -5 "$FILE1" | grep ^Linux | awk '{print $3}'`
NAME2=`head -5 "$FILE2" | grep ^Linux | awk '{print $3}'`
WIDTH1=`echo $NAME1 | wc -c`
WIDTH2=`echo $NAME2 | wc -c`
if [ "$FILE3" != "" ]; then
PASS1_3=`grep "Number huge pages before pass 1" "$FILE3" | awk -F : '{print $2}' | tr -d " "`
BEFOREDD_3=`grep "Number huge pages at rest before dd of large file" "$FILE3" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
AFTERDD_3=`grep "Number huge pages at rest after dd of large file" "$FILE3" | awk -F : '{print $2}' | awk '{print $1}' | tr -d " "`
NAME3=`head -5 "$FILE3" | grep ^Linux | awk '{print $3}'`
WIDTH3=`echo $NAME3 | wc -c`
printf " %${WIDTH1}s %${WIDTH2}s %${WIDTH3}s\n" $NAME1 $NAME2 $NAME3
printf "During compile: %${WIDTH1}s %${WIDTH2}s %${WIDTH3}s\n" $PASS1_1 $PASS1_2 $PASS1_3
printf "At rest before dd of large file: %${WIDTH1}s %${WIDTH2}s %${WIDTH3}s\n" $BEFOREDD_1 $BEFOREDD_2 $BEFOREDD_3
printf "At rest after dd of large file: %${WIDTH1}s %${WIDTH2}s %${WIDTH3}s\n" $AFTERDD_1 $AFTERDD_2 $AFTERDD_3
else
printf " %${WIDTH1}s %${WIDTH2}s\n" $NAME1 $NAME2
printf "During compile: %${WIDTH1}s %${WIDTH2}s\n" $PASS1_1 $PASS1_2
printf "At rest before dd of large file: %${WIDTH1}s %${WIDTH2}s\n" $BEFOREDD_1 $BEFOREDD_2
printf "At rest after dd of large file: %${WIDTH1}s %${WIDTH2}s\n" $AFTERDD_1 $AFTERDD_2
fi
|
kosaki/vmregress
|
bin/diff-hugepagecapability.sh
|
Shell
|
gpl-2.0
| 3,286 |
#
# $Id: build-checklanguage.sh,v 1.1 2015/02/05 22:25:27 harry Exp $
#
#
# Prüft, ob die Sprachdateien konsitent sind. D.h. ob die Schlüssel der
# einer Sprachdatei $1 auch in der Sprachdatei $2 enthalten ist
#
# {LANG_FILE_DIRECTORY}/language/${1}/${1}.${FILE}${EXTENTION}.ini
# {LANG_FILE_DIRECTORY}/language/${2}/${2}.${FILE}${EXTENTION}.ini
#
# @param $1 Sprache der Quelldatei
# @param $2 Sprache, die überprüft werden soll
# @param $3 Erweiterug: "" oder sys
# Enviroment:
# $FAST
# LANG_FILE_NAME
# LANG_FILE_DIRECTORY
# ZIP_FILE_NAME
#
function checkLanguageFile(){
if [ "${FAST}" == "1" ]
then
echo "Ignore check language file"
return
fi
if [ -z "${LANG_FILE_NAME}" ]
then
local FILE=${ZIP_FILE_NAME}
else
local FILE=${LANG_FILE_NAME}
fi
local LANGUAGE_1=${1}
local LANGUAGE_2=${2}
local EXTENTION=${3}
if [ -n "${EXTENTION}" ]
then
EXTENTION=".${EXTENTION}"
fi
if [ -z "${LANG_FILE_DIRECTORY}" ]
then
LANG_FILE_DIRECTORY="."
fi
for dir in ${LANG_FILE_DIRECTORY}
do
FILE_1=${dir}/language/${LANGUAGE_1}/${LANGUAGE_1}.${FILE}${EXTENTION}.ini
FILE_2=${dir}/language/${LANGUAGE_2}/${LANGUAGE_2}.${FILE}${EXTENTION}.ini
if [ ! -r ${FILE_1} -o ! -r ${FILE_2} ]
then
check_exit_code 1 "Datei [${FILE_1}] oder [${FILE_2}] wurde nicht gefunden"
fi
echo " - [${FILE_2}] mit den Labels aus [${FILE_1}] prüfen"
local LABELS=$(cat ${FILE_1} | grep -v '^#' | grep '=' | grep -v ';' | grep '^[A-Z_][A-Z_][A-Z_]' | cut -d '=' -f 1)
local i
for i in $LABELS
do
grep -q "^${i}=" ${FILE_1}
if [ $? -ne 0 ]
then
echo "Not found label [${i}] in [${FILE_1}]"
ERROR_COUNTER=$((ERROR_COUNTER+1))
else
grep -q "^${i}=" ${FILE_2}
if [ $? -ne 0 ]
then
printf "Not found label %-40s in %s \n" "[${i}]" "[${FILE_2}]"
ERROR_COUNTER=$((ERROR_COUNTER+1))
fi
fi
done
done
}
function checkAllLanguageFiles(){
echo "* Prüfe die Sprachfiles auf Konsistenz."
local LANGUAGE_1="en-GB"
local LANGUAGE_2="de-DE"
checkLanguageFile ${LANGUAGE_1} ${LANGUAGE_2}
checkLanguageFile ${LANGUAGE_2} ${LANGUAGE_1}
checkLanguageFile ${LANGUAGE_1} ${LANGUAGE_2} "sys"
checkLanguageFile ${LANGUAGE_2} ${LANGUAGE_1} "sys"
if [ "${IGNORE_LANG_ERROR}" != "1" ]
then
check_exit_code ${ERROR_COUNTER} "Sparchfiles sind nicht konsitent. Es wurden [${ERROR_COUNTER}] Fehler gefunden."
fi
}
changeIntoWorkingDirectory
checkAllLanguageFiles
|
harryklein/pkg_mosimage
|
com_mosimage/bin/build-checklanguage.sh
|
Shell
|
gpl-2.0
| 2,554 |
#!/usr/bin/env bash
#== Variables ==
#== Functionality ==
webserver_install() {
apt-get install -y \
apache2 php5 \
libapache2-mod-php5 \
php5-cli php5-common php5-dev \
php5-pgsql php5-sqlite php5-gd \
php5-curl php5-memcached \
php5-imap php5-mysqlnd php5-intl \
php5-xmlrpc php5-xsl php5-imagick \
php5-mcrypt php-apc php-pear
}
webserver_setup() {
local DOMAIN='gozma14.local'
echo "<VirtualHost *:80>
ServerName ${DOMAIN}
DocumentRoot /var/www/html
AllowEncodedSlashes On
<Directory /var/www/html>
Options +Indexes +FollowSymLinks
DirectoryIndex index.php index.html
Order allow,deny
Allow from all
AllowOverride All
</Directory>
ErrorLog \${APACHE_LOG_DIR}/error.log
CustomLog \${APACHE_LOG_DIR}/access.log combined
</VirtualHost>" > /etc/apache2/sites-available/000-default.conf
if [ ! -f "/etc/apache2/conf-available/fqdn.conf" ];
then
echo "ServerName localhost" > /etc/apache2/conf-available/fqdn.conf
ln -s /etc/apache2/conf-available/fqdn.conf /etc/apache2/conf-enabled/fqdn.conf
fi
sed -i "s/memory_limit = .*/memory_limit = 256M/" /etc/php5/apache2/php.ini
sed -i "s/post_max_size = .*/post_max_size = 64M/" /etc/php5/apache2/php.ini
sed -i "s/upload_max_filesize = .*/upload_max_filesize = 32M/" /etc/php5/apache2/php.ini
sed -i "s/expose_php = .*/expose_php = Off/" /etc/php5/apache2/php.ini
a2enmod expires
a2enmod headers
a2enmod include
a2enmod rewrite
php5enmod mcrypt
}
webserver_ownership() {
sed -ri 's/^(export APACHE_RUN_USER=)(.*)$/\1vagrant/' /etc/apache2/envvars
sed -ri 's/^(export APACHE_RUN_GROUP=)(.*)$/\1vagrant/' /etc/apache2/envvars
chown -R vagrant:vagrant /var/lock/apache2
chown -R vagrant:vagrant /var/log/apache2
chown -R vagrant:vagrant /var/www
}
#== Provisioning Script ==
export DEBIAN_FRONTEND=noninteractive
webserver_install
webserver_setup
webserver_ownership
# Restart service
service apache2 restart
|
trsenna/gozma14
|
vagrant/provision/provision-02--webserver.sh
|
Shell
|
gpl-2.0
| 2,000 |
#!/bin/sh
# input parameter 1 is the base folder
# input parameter 2 is the output xml file
BASE_FOLDER=${1:-content}
OUTPUT_NAME=${2:-duplicates}
du -a -b -S $BASE_FOLDER | awk -f ci2.awk | sort - | awk -f ci3.awk >${OUTPUT_NAME}.xml
|
NigelWhitley/duplicates
|
ci_s.sh
|
Shell
|
gpl-2.0
| 236 |
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
check() {
local _rootdev
# if we don't have dmraid installed on the host system, no point
# in trying to support it in the initramfs.
type -P dmraid >/dev/null || return 1
[[ $hostonly ]] || [[ $mount_needs ]] && {
for fs in "${host_fs_types[@]}"; do
[[ $fs = *_raid_member ]] && return 0
done
return 255
}
return 0
}
depends() {
echo dm rootfs-block
return 0
}
install() {
local _i
check_dmraid() {
local dev=$1 fs=$2 holder DEVPATH DM_NAME
[[ "$fs" != *_raid_member ]] && return 1
DEVPATH=$(udevadm info --query=property --name=$dev \
| while read line; do
[[ ${line#DEVPATH} = $line ]] && continue
eval "$line"
echo $DEVPATH
break
done)
for holder in /sys/$DEVPATH/holders/*; do
[[ -e $holder ]] || continue
DM_NAME=$(udevadm info --query=property --path=$holder \
| while read line; do
[[ ${line#DM_NAME} = $line ]] && continue
eval "$line"
echo $DM_NAME
break
done)
done
[[ ${DM_NAME} ]] || return 1
if ! [[ $kernel_only ]]; then
echo " rd.dm.uuid=${DM_NAME} " >> "${initdir}/etc/cmdline.d/90dmraid.conf"
fi
return 0
}
for_each_host_dev_fs check_dmraid
dracut_install dmraid
dracut_install -o kpartx
inst $(command -v partx) /sbin/partx
inst "$moddir/dmraid.sh" /sbin/dmraid_scan
inst_rules 64-md-raid.rules
inst_libdir_file "libdmraid-events*.so*"
inst_rules "$moddir/61-dmraid-imsm.rules"
#inst "$moddir/dmraid-cleanup.sh" /sbin/dmraid-cleanup
inst_hook pre-trigger 30 "$moddir/parse-dm.sh"
}
|
zfsonlinux/dracut
|
modules.d/90dmraid/module-setup.sh
|
Shell
|
gpl-2.0
| 1,976 |
#!/bin/sh
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
if [ "$MYSQL_HOME" = "" ] ; then
source ../env.properties
echo MYSQL_HOME=$MYSQL_HOME
fi
#set -x
cwd="$(pwd)"
mylogdir="$cwd/ndblog"
mysock="/tmp/mysql.sock"
#mysock="$mylogdir/mysql.sock"
echo shut down mysqld...
#"$MYSQL_BIN/mysqladmin" shutdown
"$MYSQL_BIN/mysqladmin" --socket="$mysock" shutdown
# need some extra time
for ((i=0; i<5; i++)) ; do printf "." ; sleep 1 ; done ; echo
#echo
#ps -efa | grep mysqld
#set +x
|
hasegaw/ref-mysql-labs-tplc
|
storage/ndb/test/crund/scripts/stop_mysqld.sh
|
Shell
|
gpl-2.0
| 1,177 |
#!/bin/bash
eeglraw | dieharder -g 200 -a
|
aquila62/eegl
|
cplusplus/tstdie.sh
|
Shell
|
gpl-2.0
| 42 |
#!/bin/bash
usage(){
echo "It's your responsibility to be sure the buffer size specified in the"
echo "passed in conf file is the same as the value passed in for -b here (which"
echo "is only used for naming the output priors file but could still be confusing."
echo ""
echo "-b : buffer size (required)"
echo "-c : msbayes conf file (required)"
echo "-o : Output dir <default is ./priors>"
echo "-n : number of 250k priors files <default 1>"
echo "-s : msbayes sort sumstats <default 7>"
echo "-t : Do test run. Run fast and generate 1 small priors file"
}
PRIORS_FILES=1
BUFFER_SIZE=-1
PRIORS_SIZE=250000
MSBAYES_SORT_STATS=7
OUTDIR=./priors
while getopts b:c:n:o:s:t flag; do
case $flag in
b)
echo "Doing Buffer Size: $OPTARG";
BUFFER_SIZE=$OPTARG
;;
c)
echo "Using config: $OPTARG";
MSBAYES_CONF=$OPTARG
;;
n)
echo "Generating this many 250k priors files: $OPTARG";
PRIORS_FILES=$OPTARG;
;;
o)
echo "Output directory: $OPTARG";
OUTDIR=$OPTARG;
;;
s)
echo "Using msbayes sort version: $OPTARG";
MSBAYES_SORT_STATS=$OPTARG
;;
t)
echo "Doing TEST run. Set defaults so it'll run quick."
echo "Generate 1 prior file with 1000 draws, automatically"
echo "cram it into /tmp"
PRIORS_SIZE=1000
OUTDIR=/tmp
;;
?) usage; exit;
;;
:) echo "Missing option argument for -$OPTARG" >&2; exit 1;;
esac
done
if [ ! -f "$MSBAYES_CONF" ] || [ "$BUFFER_SIZE" == "-1" ]; then
echo "Problem with input either conf file is bad or no buffer size provided: $MSBAYES_CONF - $BUFFER_SIZE"
exit -1
fi
for i in `seq 1 $PRIORS_FILES`;
do
# Seed with random value because the perl seed will generate
# identical priors if you run it too close together.
./msbayes.pl -s $MSBAYES_SORT_STATS -r $PRIORS_SIZE -c $MSBAYES_CONF -S $RANDOM -o $OUTDIR/buffer_"$BUFFER_SIZE"_$i.prior &
done
|
Hickerlab/msBayes
|
scripts/make_priors.sh
|
Shell
|
gpl-2.0
| 2,075 |
#!/bin/sh
convert -resize 62x62 icon-196.png windows-phone/icon-62-tile.png
convert -resize 48x48 icon-196.png windows-phone/icon-48.png
convert -resize 173x173 icon-196.png windows-phone/icon-173-tile.png
convert -resize 57x57 icon-196.png ios/icon-57.png
convert -resize 113x113 icon-196.png ios/icon-57-2x.png
convert -resize 72x72 icon-196.png ios/icon-72.png
convert -resize 144x144 icon-196.png ios/icon-72-2x.png
convert -resize 128x128 icon-196.png bada/icon-128.png
convert -resize 64x64 icon-196.png webos/icon-64.png
convert -resize 80x80 icon-196.png blackberry/icon-80.png
convert -resize 128x128 icon-196.png tizen/icon-128.png
convert -resize 48x48 icon-196.png android/icon-48-mdpi.png
convert -resize 36x36 icon-196.png android/icon-36-ldpi.png
convert -resize 72x72 icon-196.png android/icon-72-hdpi.png
convert -resize 96x96 icon-196.png android/icon-96-xhdpi.png
convert -resize 50x50 icon-196.png bada-wac/icon-50-type3.png
convert -resize 80x80 icon-196.png bada-wac/icon-80-type4.png
convert -resize 48x48 icon-196.png bada-wac/icon-48-type5.png
|
bmatusiak/repairshopr-mobile-app
|
www/res/icon/iconbuild.sh
|
Shell
|
gpl-2.0
| 1,069 |
convert images/OCS-291.png -crop 1649x4962+0+0 +repage images/OCS-291-A.png
convert images/OCS-291.png -crop 1681x4962+1649+0 +repage images/OCS-291-B.png
#/OCS-291.png
#
#
#
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/middlesplit.OCS-291.sh
|
Shell
|
gpl-2.0
| 175 |
#!/bin/sh
mkdir -p po m4
echo "gettextize..."
echo "no" | gettextize --force --copy --no-changelog
echo "intltoolize..."
intltoolize --copy --force --automake
echo "libtoolize"
libtoolize
echo "aclocal..."
aclocal -I m4
echo "autoconf..."
autoconf
echo "autoheader..."
autoheader
echo "automake..."
automake --add-missing --copy --gnu
|
rickyrockrat/la2vcd2
|
autogen.sh
|
Shell
|
gpl-2.0
| 345 |
#!/bin/bash
. $(dirname $0)/include.rc
. $(dirname $0)/volume.rc
function pidgrep()
{
ps ax | grep "$1" | awk '{print $1}' | head -1
}
## Start glusterd
glusterd;
pidof glusterd;
$CLI volume info;
mkdir -p $B0/brick1/brick;
mkdir -p $B0/brick2/brick;
## Lets create volume
$CLI volume create $V0 stripe 2 replica 2 $H0:$B0/brick1/brick $H1:$B0/brick1/brick $H0:$B0/brick2/brick $H1:$B0/brick2/brick;
## Start volume and verify
$CLI volume start $V0;
|
raghavendra-talur/gluster-helpscripts
|
createstriperep.sh
|
Shell
|
gpl-2.0
| 463 |
#!/bin/bash
# Copyright 2016, 2017 Richard Rodrigues, Nyle Rodgers, Mark Williams,
# Virginia Tech
#
# This file is part of Coremic.
#
# Coremic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Coremic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Coremic. If not, see <http://www.gnu.org/licenses/>.
INTEREST=$1
OUT=$2
OUTPUT=$3
# Directory of script
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
VISUALIZE_CORE="python2 $DIR/visualize_core.py"
RASTER="png"
VECTOR="svg"
$VISUALIZE_CORE $INTEREST $OUT $OUTPUT.$RASTER $OUTPUT.$VECTOR
for frac in 75 80 85 90 95 100; do
$VISUALIZE_CORE -n $frac -x $frac $INTEREST $OUT ${OUTPUT}_$frac.$RASTER ${OUTPUT}_$frac.$VECTOR
done
|
richrr/coremicro
|
scripts/tree_multiple.sh
|
Shell
|
gpl-2.0
| 1,142 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2016-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Check a rule with escaped quote in a Tupfile
. ./tup.sh
check_no_windows unlinkat_quote
cat > Tupfile << HERE
: |> touch 'a"' ' b' |> a\" \ b
HERE
update
tup_object_exist . 'a"'
tup_object_exist . ' b'
eotup
|
ppannuto/tup
|
test/t2200-tupfile-space-escaped.sh
|
Shell
|
gpl-2.0
| 961 |
#!/bin/bash
# TYPE: Bash script.
# PURPOSE: This bash shell script allows to easily remove GNU/Linux kernels.
# The script should be copied to /usr/bin/removekernel and set as executable.
# It is a poor man's alternative to Ubuntu Tweak.
# REQUIRES: bash, Ubuntu 12.04 LTS or newer
# REVISED: 20130302
# REVISED: 20130516
echo "Please press one of the 4 numbers below and then hit the <ENTER> key to execute"
select CHOICE in list-installed-kernels remove-a-specific-kernel remove-all-previous-kernels quit
do
case "$CHOICE" in
list-installed-kernels)
echo "List of currently installed Linux kernels"
dpkg --list | grep linux-image | egrep '^[r,i]i' | cut -d" " -f3
;;
remove-a-specific-kernel)
echo "List of currently installed Linux kernels"
dpkg --list | grep linux-image | egrep '^[r,i]i' | cut -d" " -f3
echo "Please enter kernel package to uninstall from your pc (for example: linux-image-3.9.0-030900rc5-generic) "
read KERNELVERSION
apt-cache search $KERNELVERSION|cut -d" " -f1|xargs sudo apt-get remove -y
sudo apt autoremove
;;
remove-all-previous-kernels)
echo "Will now purge all the following kernel packages:"
dpkg -l 'linux-*' | grep -v libc| sed '/^ii/!d;/'"$(uname -r | sed "s/\(.*\)-\([^0-9]\+\)/\1/")"'/d;s/^[^ ]* [^ ]* \([^ ]*\).*/\1/;/[0-9]/!d'
dpkg -l 'linux-*' | grep -v libc| sed '/^ii/!d;/'"$(uname -r | sed "s/\(.*\)-\([^0-9]\+\)/\1/")"'/d;s/^[^ ]* [^ ]* \([^ ]*\).*/\1/;/[0-9]/!d' | xargs sudo apt-get -y purge
;;
quit)
exit
;;
esac
done
|
MarkRijckenberg/shell-scripts
|
kernelremoval.bash
|
Shell
|
gpl-3.0
| 1,666 |
#Let's get sound working
sudo apt-get update
sudo apt-get install unzip -y
wget "https://github.com/Grippentech/Asus-E200HA-Linux-Post-Install-Script/releases/download/0.3/kernel.zip"
unzip kernel.zip
cd kernel
sudo dpkg -i *.deb
tar -xvf chtcx2072x.tar
cd chtcx2072x
sudo mkdir /usr/share/alsa/ucm/chtcx2072x/
sudo mv chtcx2072x.conf /usr/share/alsa/ucm/chtcx2072x/
sudo mv HiFi.conf /usr/share/alsa/ucm/chtcx2072x/
#Now cleanup the files
sudo apt autoremove #Remove old versions of kernel
cd ..
cd ..
sudo rm -r kernel
sudo rm kernel.zip
#Now let's fix Backlight Controls up and down, THIS SEEMS TO BE CAUSING ISSUES> REMOVED TEMPORARILY.
#sudo touch /usr/share/X11/xorg.conf.d/20-intel.conf
#sudo sh -c "{ echo 'Section "Device"
# Identifier "card0"
# Driver "intel"
# Option "Backlight" "intel_backlight"
# BusID "PCI:0:2:0"
#EndSection'; } >> /usr/share/X11/xorg.conf.d/20-intel.conf"
#Now let's take care of Linux Cherry Trail Suspend Issues by disabling Sleep/Hybernation
sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target #Disable Suspend, broken on CherryTrail
sudo sh -c 'echo -e "[Login]\nHandleLidSwitch=ignore\nHandleLidSwitchDocked=ignore" >> /etc/systemd/logind.conf' #Disable Sleep/Hybernate when Lid is Closed
#Let's now install tlp and thermald to save you some battery life
sudo apt-get install tlp thermald -y
#Change fstab a little bit for better eMMC support, eMMC Conservation settings
sudo sed -i -e 's/errors=remount-ro 0/discard,noatime,errors=remount-ro 0/g' /etc/fstab
sleep 15 #wait 15 seconds
sudo reboot now #Reboot to reenable Trackpad
|
Grippentech/Asus-E200HA-Linux-Post-Install-Script
|
e200ha_with_sound.sh
|
Shell
|
gpl-3.0
| 1,653 |
#!/bin/bash
vendor/bin/phpunit test
|
blanketastronomer/yaml-to-manifest
|
run_tests.sh
|
Shell
|
gpl-3.0
| 38 |
#!/bin/bash
# Source this script to set up the DD4Hep installation that this script is part of.
#
# This script if for the csh like shells, see thisdd4hep.csh for csh like shells.
#
# Author: Pere Mato. F. Gaede, M.Frank
#-------------------------------------------------------------------------------
#
#echo " ### thisdd4hep.sh: initialize the environment for DD4hep ! "
#
#-----------------------------------------------------------------------------
dd4hep_parse_this() {
package=${2};
if [ "x${1}" = "x" ]; then
if [ ! -f bin/this${package}.sh ]; then
echo ERROR: must "cd where/${package}/is" before calling ". bin/this${package}.sh" for this version of bash!;
return 1;
fi
THIS="${PWD}";
else
# get param to "."
THIS=$(dirname $(dirname ${1}));
#if [ ! -f ${THIS}/bin/this${package}.sh ]; then
# THIS=$(dirname ${package});
#fi;
fi;
THIS=$(cd ${THIS} > /dev/null; pwd);
}
#-----------------------------------------------------------------------------
dd4hep_add_path() {
path_name=${1};
path_prefix=${2};
eval path_value=\$$path_name;
if [ ${path_value} ]; then
path_value=${path_prefix}:${path_value};
else
path_value=${path_prefix};
fi;
eval export ${path_name}=${path_value};
## echo "dd4hep_add_path: ${path_name}=${path_value}";
}
#-----------------------------------------------------------------------------
dd4hep_add_library_path() {
path_prefix=${1};
if [ @USE_DYLD@ ];
then
if [ ${DYLD_LIBRARY_PATH} ]; then
export DYLD_LIBRARY_PATH=${path_prefix}:$DYLD_LIBRARY_PATH;
else
export DYLD_LIBRARY_PATH=${path_prefix};
fi;
else
if [ ${LD_LIBRARY_PATH} ]; then
export LD_LIBRARY_PATH=${path_prefix}:$LD_LIBRARY_PATH;
else
export LD_LIBRARY_PATH=${path_prefix};
fi;
fi;
}
#-----------------------------------------------------------------------------
#
dd4hep_parse_this ${BASH_ARGV[0]} DD4hep;
#
# These 3 are the main configuration variables: ROOT, Geant4 and XercesC
# --> LCIO & Co. are handled elsewhere!
export ROOTSYS=@ROOT_ROOT@;
export Geant4_DIR=@Geant4_DIR@;
export XERECESCINSTALL=@XERCESC_ROOT_DIR@;
#
#----DD4hep installation directory--------------------------------------------
export DD4hepINSTALL=${THIS};
export DD4hep_DIR=${THIS};
export DD4hep_ROOT=${THIS};
#
#----------- source the ROOT environment first ------------------------------
ROOTENV_INIT=${ROOTSYS}/bin/thisroot.sh;
test -r ${ROOTENV_INIT} && { cd $(dirname ${ROOTENV_INIT}); . ./$(basename ${ROOTENV_INIT}) ; cd $OLDPWD ; }
#
#----Geant4 LIBRARY_PATH------------------------------------------------------
if [ ${Geant4_DIR} ]; then
G4LIB_DIR=`dirname ${Geant4_DIR}`;
export G4INSTALL=`dirname ${G4LIB_DIR}`;
export G4ENV_INIT=${G4INSTALL}/bin/geant4.sh
# ---------- initialze geant4 environment
test -r ${G4ENV_INIT} && { cd $(dirname ${G4ENV_INIT}) ; . ./$(basename ${G4ENV_INIT}) ; cd $OLDPWD ; }
#---- if geant4 was built with external CLHEP we have to extend the dynamic search path
if [ @GEANT4_USE_CLHEP@ ] ; then
dd4hep_add_library_path @CLHEP_LIBRARY_PATH@;
export CLHEP_DIR=@CLHEP_DIR@
fi;
dd4hep_add_library_path ${G4LIB_DIR};
unset G4ENV_INIT;
unset G4LIB_DIR;
fi;
#
#----XercesC LIBRARY_PATH-----------------------------------------------------
if [ ${XERCESCINSTALL} ]; then
#dd4hep_add_path PATH ${XERCESCINSTALL}/bin;
dd4hep_add_library_path ${XERCESCINSTALL}/lib;
fi;
#
#----PATH---------------------------------------------------------------------
dd4hep_add_path PATH ${THIS}/bin;
#----LIBRARY_PATH-------------------------------------------------------------
dd4hep_add_library_path ${THIS}/lib;
#----PYTHONPATH---------------------------------------------------------------
dd4hep_add_path PYTHONPATH ${THIS}/python;
#----ROOT_INCLUDE_PATH--------------------------------------------------------
dd4hep_add_path ROOT_INCLUDE_PATH ${THIS}/include;
#-----------------------------------------------------------------------------
#
unset ROOTENV_INIT;
unset THIS;
#-----------------------------------------------------------------------------
|
vvolkl/DD4hep
|
cmake/thisdd4hep.sh
|
Shell
|
gpl-3.0
| 4,216 |
#!/bin/sh
set -e
echo "Creating Build Directory"
rm -rf build
mkdir build
echo "Compiling"
javac -classpath lib/lwjgl.jar:lib/lwjgl_util.jar:lib/slick-util.jar src/*.java -d build
cd build
echo "Creating Jar"
jar -cfm ../MineCraft.jar ../src/manifest.txt *.class
echo "Done"
|
wyndwarrior/MineCraftMini
|
build.sh
|
Shell
|
gpl-3.0
| 279 |
#!/bin/bash
#
# runTests.sh [--test-speed]
# --test-speed = Run speed tests.
#
# The following prerequesites must exist for proper
# operations of this script:
# pyflakes
# coverage
# lizard # For cyclomatic complexity.
# # https://github.com/terryyin/lizard
#
# Install the above python packages in the normal way.
#
#
# Recommended way to operate:
# script # Start collecting all output
# ./runTests.sh ; exit # run this script then exit script shell
# vim typescript # Default output of "script" goes into typescript
#
# Errors are expected! The get noted as CMD_FAIL commands.
# However, there should be no tracebacks.
#
# An unfortunate reason that this script runs so many test rests in the
# inability of coverage to handle subprocesses properly!
# Much time was spent attempting to encapsulate the repetitious
# nature of these test into python scripts.
#
# The fantastic coverage utility does not accurately catch all
# coverage. In the output reports I can see some code noted as
# not covered when in fact I do know for a fact the code
# *was* covered. Thus, the coverage reports a lower value
# that the actual coverge.
#
# The output uses the following bash functions to provide
# feedback on progress:
# ECHO "..." - The string simply gets echoed. Just a comment.
# CMD "..." - The command gets run. The exit status does not matter.
# CMD_PASS "..." - A 0 status code indicates a successful execution.
# CMD_FAIL "..." - A non-zero status code indicates a successful execution.
# On all these the line number provides a convenient link to where
# the commands get executed.
#
# Notice that programs started in background do not get handled with
# CMD-style tracking. This is OK.
# All background processes get killed with an "@EXIT" from the processes
# that send messages.
#
# TODO:
# Provide stricter checking of these commands!
# Just because a zero status indicates success does NOT mean
# the output was correct. LOTS more validation work for this
# to become really solid.
#
# Uncomment next two lines for debugging and tracing of this script.
#set -x
#PS4='$LINENO: '
# Assume no speed tests
TEST_SPEED=false
for ndx in "$@"
do
case $ndx in
--test_speed|--test)
TEST_SPEED=true
shift # get past arg
;;
*)
echo "Unknown option: \"$ndx\" "
exit 1
esac
done
# Start a timer for this entire script. The end produces a duration.
SECONDS=0
# Future versions may use python3?
alias python=/usr/bin/python2.7
export PYTHON=/usr/bin/python2.7
# Function to echo a command and then execute it.
# Do NOT try to start backgrounded commands with this function.
CMD () {
# BASH_LINENO is an array. Use the top of the stack == 0
echo
echo "CMD ${BASH_LINENO[0]}: $*"
$1
ret_code=$?
echo return code $ret_code
return $ret_code
}
trap "echo +++ signal received +++; exit" SIGHUP SIGINT SIGTERM
# Total number of tested commands
TEST_COUNT=0
# Total count of errors
ERROR_COUNT=0
EXPECTED_PASS_BUT_FAIL=0
EXPECTED_FAIL_BUT_PASS=0
SLEEP=/usr/bin/sleep
# function to echo command and then execute it.
# Expect the command to pass with return code 0
CMD_PASS () {
(( TEST_COUNT = TEST_COUNT + 1 ))
# BASH_LINENO is an array. Use the top of the stack == 0
echo
echo "CMD_PASS ${BASH_LINENO[0]}: $*"
$1
return_code=$?
if [ $return_code -ne 0 ]
then
echo "+++ERROR: Expected 0 return code, got $return_code"
(( ERROR_COUNT = ERROR_COUNT + 1 ))
(( EXPECTED_PASS_BUT_FAIL = EXPECTED_PASS_BUT_FAIL + 1 ))
fi
}
# function to echo command and then execute it.
# Expect the command to fail with return code != 0
CMD_FAIL () {
(( TEST_COUNT = TEST_COUNT + 1 ))
# BASH_LINENO is an array. Use the top of the stack == 0
echo
echo "CMD_FAIL ${BASH_LINENO[0]}: $*"
$1
return_code=$?
if [ $return_code -eq 0 ]
then
echo "+++ERROR: Expected != 0 return code, got $return_code"
(( ERROR_COUNT = ERROR_COUNT + 1 ))
(( EXPECTED_FAIL_BUT_PASS = EXPECTED_FAIL_BUT_PASS + 1 ))
fi
}
# function to simply echo with a line number
ECHO () {
# BASH_LINENO is an array. Use the top of the stack == 0
echo "ECHO ${BASH_LINENO[0]}: $*"
}
KILL_LOG_COLLECTOR() {
# This test script uses log collector at port 5570 only.
echo "KILL_LOG_COLLECTOR ${BASH_LINENO[0]} $*"
./listening 5570
ret_code=$?
if [ $ret_code -ne 0 ]
then
CMD "./logCmd.py @EXIT"
CMD "$SLEEP 1"
# Test once again
./listening 5570
ret_code=$?
if [ $ret_code -ne 0]
then
pid=$(./listening 5570 | awk '{print $2;}' )
kill -9 $pid
fi
fi
}
KILL_ASYNC_SERVER() {
# Kill the async message server if it is running.
echo "KILL_ASYNC_SERVER ${BASH_LINENO[0]} $*"
CMD "./listening 5590"
RET=$?
if [ $RET -ne 0 ]
then
kill $(./listening 5590 | awk '{print $2;}' )
sleep 1
fi
}
# Run various python metric utilities
#CMD "pyflakes *.py"
#CMD "pep8 *.py"
#CMD "pylint *.py"
#CMD "lizard ."
# Env var for tracking subprocesses
# Ref: http://coverage.readthedocs.org/en/coverage-4.0.3/subprocess.html
export COVERAGE_PROCESS_START=$PWD/.coveragerc
ECHO " export COVERAGE_PROCESS_START=$COVERAGE_PROCESS_START"
export CPS=$COVERAGE_PROCESS_START
ECHO "CPS=$CPS"
export PYTHONPATH=$PYTHONPATH:$PWD
ECHO "PYTHONPATH=$PYTHONPATH"
../wc.sh # How big is this getting?
export BASE_DIR=$PWD
ECHO "BASE_DIR=$BASE_DIR"
export LIB_DIR=$BASE_DIR
ECHO "LIB_DIR=$LIB_DIR"
export TOOLS_DIR=$BASE_DIR
ECHO "TOOLS_DIR=$TOOLS_DIR"
export TEST_DIR=$BASE_DIR/test
ECHO "TEST_DIR=$TEST_DIR"
export DATA_DIR=$BASE_DIR/data
ECHO "DATA_DIR=$DATA_DIR"
export SCRIPTS_DIR=$BASE_DIR/scripts
ECHO "SCRIPTS_DIR=$SCRIPTS_DIR"
export GEN_DATA=$TEST_DIR/genData.py
ECHO "GEN_DATA=$GEN_DATA"
ECHO Remove all logs.log
ECHO "rm $(find . -name logs.log)"
ECHO Remove all .coverage.*
rm $(find . -name '.coverge.*' -type f)
ECHO Remove .coverage_html/*
CMD "rm -rf .coverage_html"
ECHO "Before starting, make sure the logCollector is not running"
CMD "$TOOLS_DIR/listening 5570 5571 5572 5573 5574 5575"
KILL_LOG_COLLECTOR
#
#
export COVERAGE=1
CMD "coverage erase "
# Generate a "standard" log of data frequently used in testing.
export DATA_LOG=$DATA_DIR/data.data
CMD "$GEN_DATA >$DATA_LOG"
$GEN_DATA >$DATA_LOG # CMD does not handle redirection properly.
ECHO "Kill async message server if running"
CMD KILL_ASYNC_SERVER
ECHO "=============== Run unit tests ================"
ECHO "logCollector still going before testLogging.py ?"
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py "
CMD_PASS "coverage run --branch --parallel-mode $TEST_DIR/testLogging.py "
ECHO "Give time for logCollector to stop"
CMD "$SLEEP 2"
ECHO "logCollector still going after testLogging.py ?"
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574"
ECHO "=============== End of unit tests ================"
ECHO "Need to get a timed alarm in case the collector does not start."
ECHO "echo ==== Starting logCollector ===="
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py &
COL_PID=$!
CMD "$SLEEP 2" # Time to get log collector started
ps ax | grep logCollector
ECHO "the logCollector must be running in port 5570"
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574"
ECHO "Test the operations of 'kill -USR[12] <pid>"
CMD_PASS "kill -USR1 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
CMD_PASS "kill -USR2 $COL_PID"
ECHO "Run a few simple logs to the logCollector"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/loggingClientTask.py "
ECHO "Test the listening port utility"
CMD_FAIL "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --help "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --short 5570 "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --short "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --pid 5570 "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --pid "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --proc "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --proc 5570 "
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --proc 6666 "
CMD_FAIL "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py --bogus 6666 "
CMD_FAIL "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py bogus-port "
ECHO "kill logCollector and restart with output to /dev/null for Speed test"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT "
ECHO " coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --log-file=/dev/null "
$LIB_DIR/logCollector.py --log-file=/dev/null &
ECHO "Only run speed test if command line uses --speed-test"
if [ $TEST_SPEED = "true" ]
then
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/loggingSpeedTest.py "
ECHO "Speed test may keep the log collector busy for awhile."
ECHO "Stop logCollector with /dev/null output, open again with echo"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT"
CMD "sleep 3"
KILL_LOG_COLLECTOR
fi
ECHO "logCollector still going...? Should have been killed."
KILL_LOG_COLLECTOR
CMD_PASS "$TOOLS_DIR/listening 5570 5571 5572 5573 5574"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py &
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/loggingLoopApp.py 5 "
ECHO "Passing logCmd"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py Testing a new log config option."
ECHO Misc logCmd testing
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --help"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --xxx stuff"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --port=XYZ stuff"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --level=XYZ"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --level=DEBUG Should be at debug level"
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --happy "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --missing "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --mixed "
CMD_FAIL "coverage run --branch --parallel-mode $GEN_DATA --help "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --config=datedFilter "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --config=csvFilter "
CMD_PASS "coverage run --branch --parallel-mode $GEN_DATA --config=baseFilter "
CMD_FAIL "coverage run --branch --parallel-mode $GEN_DATA --config=bogusFilter "
ECHO An invalid filter
CMD_FAIL "coverage run --branch --parallel-mode $GEN_DATA ----config=bogusFilter " # An invalid filter
ECHO Coverage for apiLoggerInit
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/apiLoggerInit.py "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/apiLoggerInit.py "
ECHO "logCollector still going?"
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574"
ECHO Multiple runs passing various flags both valid and bogus.
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --JSON "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --JSON --level=ERROR "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --CSV "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --CSV --level=ERROR "
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --Xin-file=$DATA_LOG --CSV --level=ERROR "
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --help "
ECHO Expect ERRORs
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=bogus_file --CSV --level=ERROR "
ECHO Expect ERRORs - bogus log level
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=bogus_file --CSV --level=Bogus "
ECHO Expect ERRORs
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --CSV --level=ERROR "
ECHO Expect ERRORs
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --JSON --level=ERROR "
ECHO Expect ERRORS
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/csv.conf --in-file=/dev/null --JSON --level=ERROR "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/happy.conf "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad2.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad3.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/no_start.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/no_end.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/no_end1.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/no_start1.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad_start.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad_end.conf "
ECHO Expect ERRORS
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/end_before_start.conf "
ECHO "No infile. Reads from stdin"
cat happy.data | coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --in-file=$DATA_LOG --JSON "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --start=1970-01-01T00:00:00.000 --in-file=/dev/null --in-file=$DATA_LOG --JSON "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --start=1970-01-01T00:00:00.000 --end=2020-01-01T00:00:00.000 --in-file=/dev/null --in-file=$DATA_LOG --JSON "
ECHO
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --end=2020-01-01T00:00:00.000 --in-file=/dev/null --in-file=$DATA_LOG --JSON "
ECHO Syntax error on end date
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --end=2017-01-01:00:00:00.000 --in-file=/dev/null --in-file=$DATA_LOG --JSON "
ECHO Permission denied on output file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/ --out-file=$DATA_LOG --JSON "
ECHO Permission denied on input file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --in-file=/var/log/messages --JSON "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --in-file=$DATA_LOG --CSV "
ECHO Permission denied on output file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/ --out-file=$DATA_LOG --CSV "
ECHO Permission denied on input file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=/dev/null --in-file=/var/log/messages --CSV "
ECHO "Filter on dates as well."
ECHO "These test depend on the dates as set in ./test/getData.py and the $DATA_LOG file"
ECHO "start only"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --JSON --start=2016-03-14T08:00:00.000 "
ECHO Syntax Error for start only
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --JSON --start=2016-03-14:08:00:00.000 "
ECHO start and end dates
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --in-file=$DATA_LOG --JSON --start=2016-03-14T08:00:00.000 end=2016-03-14T08:05:15.876 "
ECHO "Work with dirSvc - Directory Service"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py --help"
ECHO Pass invalid run time option
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py --FooBar"
ECHO Pass invalid port number
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py --clear --port=XYZ"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py --noisy --memory-file=/ --port=1234"
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574"
# Start the directory server in the background.
ECHO coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py
coverage run --branch --parallel-mode $LIB_DIR/dirSvc.py --noisy &
# Start a logCollector in the background as well
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --noisy &
CMD_PASS "coverage run --branch --parallel-mode $TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574"
echo " If dirClient passes, it means it could send the params to dirSvc."
echo " Passing does not mean the parameter is valid!"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --help"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy foobar"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy abc def ghi jkl"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @DIR"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @CLEAR"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @CLEAR_DIRECTORY"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @PERSIST"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --node=abc @PERSIST"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --memory-file=/ "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --clear "
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @MEMORY_FILENAME"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @DOES_NOT_EXIST"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy --port=5599 stuff"
ECHO "Verify that abc gets deleted from the directory"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy abc | grep abc"
ECHO "Delete a name from the directory"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy ~abc"
CMD_PASS "$LIB_DIR/dirClient.py --noisy abc | grep abc"
STATUS=$?
if [ $STATUS -ne 0 ]
then
ECHO 'Dir delete of abc failed.'
fi
ECHO "Try to delete a bogus name from the directory"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py ~bogusName"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @DIR"
ECHO "Various commands to the driver dirClient for coverage purposes."
ECHO "A request for help is considered a failure"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --help abc def"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --port=XYZ"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy --clear"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --bogusArg"
CMD "$TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574 5575"
# An orderly exit so coverage can collect the runs.
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/dirClient.py --noisy @EXIT"
CMD "$TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574 5575"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT"
KILL_LOG_COLLECTOR
CMD "$TOOLS_DIR/listeningPort.py 5570 5571 5572 5573 5574 5575"
ECHO Log Filter with configuration file. Notice in-file override
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf --in-file=$DATA_DIR/mixed.data "
ECHO Log Filter with configuration file. Read from stdin
CMD "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf < $DATA_DIR/happy.data "
coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf < $DATA_DIR/happy.data
ECHO Log Filter with invalid configuration file. Has bad syntax
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/bad --in-file=$DATA_DIR/mixed.data "
ECHO Log Filter with configuration file. Uses invalid in-file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf --in-file=$DATA_DIR/does_not_exist.data "
ECHO Log Filter with non-existent configuration file.
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/does-not-exist.conf "
ECHO Log Filter with configuration file. Uses out_file.
TMP=/tmp/$$.json
export TEST_DIR=$PWD
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf --in-file=$DATA_DIR/mixed.data --out-file=$TMP "
# Should have something
[ -s $TMP ]
iszero=$?
if [ $iszero -eq 1 ]
then
echo '==============================================================================='
echo logFilterApp with in-file and in-file should have produced output, but did not.
echo '==============================================================================='
fi
rm $TMP # Clean up tmp file.
ECHO Same outfile, but with CSV
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logFilterApp.py --config=$DATA_DIR/mixed.conf --CSV --in-file=$DATA_DIR/mixed.data --out-file=$TMP "
# Should have something
[ -s $TMP ]
iszero=$?
if [ $iszero -eq 1 ]
then
echo '==============================================================================='
echo logFilterApp with in-file and in-file should have produced output, but did not.
echo '==============================================================================='
fi
CMD "rm $TMP" # Clean up tmp file.
ECHO "Test various command line options for the logCollector"
ECHO "Set log file to ./abc.log"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --config=$DATA_DIR/logRC.conf --log-file=$DATA_DIR/abc.log --trunc &
CMD_PASS "$LIB_DIR/logCmd.py Testing a new log config option."
CMD_PASS "$LIB_DIR/logCmd.py @EXIT"
if [ -f ./abc.log ]
then
ECHO Expected logfile exists.
rm ./abc.log
fi
export TMP_CONF=/tmp/$$.conf # Work configuration file
export TMP_LOG=./zzz.log # Test log file in local dir.
ECHO " Create a logCollector config file in $TMP_CONF "
cat >$TMP_CONF <<EOF
{
"append": True,
"log-file": '$TMP_LOG',
"noisy": False,
"port": 5570,
}
EOF
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --config=$TMP_CONF &
CMD "sleep 1" # Let the collector get started.
CMD_PASS "$LIB_DIR/logCmd.py Testing log config in $TMP_CONF "
CMD_PASS "$LIB_DIR/logCmd.py Another testing log config in $TMP_CONF "
CMD "sleep 1"
CMD_PASS "$LIB_DIR/logCmd.py @EXIT"
if [ -f $TMP_LOG ]
then
ECHO Expected logfile $TMP_LOG exists.
CMD "cat $TMP_LOG"
if [ -x $TMP_LOG ]
then
ECHO Size of $TMP_LOG is non-zero
CMD "cat $TMP_LOG"
else
ECHO ERROR: Expected non-zero log size for $TMP_LOG
fi
else
ECHO FAIL - Expected log file at $TMP_LOG, not found
fi
CMD "rm $TMP_CONF"
CMD "rm $TMP_LOG"
CMD "$TOOLS_DIR/listening 5570 5571 5572 5573 5574 5575"
ECHO ""
ECHO "Cover signal interrupt handlers in logCollector"
ECHO ""
ECHO "Use kill -INT pid"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py&
PID=$!
CMD_PASS "sleep 2" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "kill -INT $PID"
ECHO "Use kill -TERM pid"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py&
PID=$!
CMD_PASS "sleep 2" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "kill -TERM $PID"
ECHO "Use kill -USR1 pid"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py&
PID=$!
CMD_PASS "sleep 2" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "kill -USR1 $PID"
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT "
CMD "$TOOLS_DIR/listening 5570 5571 5572 5573 5574 5575"
ECHO "Various options to logCollector"
ECHO "help option passed"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --help
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py -a &
PID=$!
CMD_PASS "sleep 1" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT "
ECHO "Non-numeric port test"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --port=XYZ
ECHO "Bogus options --BOGUS"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --BOGUS=XYZ
ECHO "Bogus configuration file"
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --config=/XYZ/does_not_exist
PID=$!
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --quiet &
PID=$!
CMD_PASS "sleep 1" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py @EXIT "
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --port=5572 &
PID=$!
CMD_PASS "sleep 1" # Let the log collector start
ECHO PID logCollector=$PID
CMD_PASS "coverage run --branch --parallel-mode $LIB_DIR/logCmd.py --port=5572 @EXIT "
CMD "$TOOLS_DIR/listening 5570 5571 5572 5573 5574 5575"
ECHO "============================================================"
ECHO "============================================================"
ECHO "Start using mongodb for log storage"
ECHO "============================================================"
ECHO "============================================================"
mongod &
ret_code=$?
if [ $ret_code -ne 100 ]
then
ECHO "mongod already running"
fi
CMD "sleep 3" # Let mongo get started
CMD "mongo logs --eval 'db.logs.count()' "
ECHO ""
ECHO "Drop the entries in the log collection"
mongo <<HERE
use logs
db.logs.drop()
HERE
CMD "mongo logs --eval 'db.logs.count()' "
ECHO "Start logCollector with JSON format into database 'logs' "
coverage run --branch --parallel-mode $LIB_DIR/logCollector.py --mongo --format=JSON --mongo-database=logs &
CMD "sleep 3"
ECHO "First - clear the logs in the MongoDB"
mongo <<HERE
use logs
db.logs.drop()
HERE
ECHO "Send a simple message"
CMD_PASS "./logCmd.py sw1=ON, pump02=OFF, light42=UNKNOWN"
ECHO "=================== LOTS more on MongoDB =========="
CMD "$SLEEP 1"
ECHO "Kill logCollector if still running"
KILL_LOG_COLLECTOR
ECHO ""
ECHO '============ Testing dbquery ============'
ECHO "Some error cases for dbquery"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs --config=$DATA_DIR/does-not-exist.conf "
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dbquery.py --help"
CMD_FAIL "coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs --port=ABC"
ECHO ""
ECHO "Simple queries"
ECHO "Use CMD_PASS prefix with no double quotes"
CMD_PASS coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.count()'
CMD_PASS coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.find()'
ECHO Without CMD but with coverage. Uses single quotes.
coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.count()'
CMD coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.count()'
ECHO "Insert some standard logs"
mongo < $SCRIPTS_DIR/insertData.dat
ECHO count Without CMD
coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.count()'
ECHO "find first: Foo"
coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.find({"first":"Foo"})'
ECHO Now with simple CMD in front
CMD coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.find({"first":"Foo"})'
ECHO "A find first:Foo" without CMD
coverage run --branch --parallel-mode $LIB_DIR/dbquery.py logs 'db.logs.find({"first":"Foo"})'
CMD "coverage combine "
CMD "coverage report -m "
CMD "coverage html -d .coverage_html "
ECHO Paste into browser for details: file://$PWD/.coverage_html/index.html
echo
echo
echo =================================================
echo
echo Total number of tests: $TEST_COUNT
echo
echo Error count: $ERROR_COUNT
echo Expected to pass but failed: $EXPECTED_PASS_BUT_FAIL
echo Expected to fail but passed: $EXPECTED_FAIL_BUT_PASS
echo
echo =================================================
duration=$SECONDS
echo "Total elapsed time: $(($duration / 60)) minutes and $(($duration % 60)) seconds."
|
TrailingDots/async_py_messaging
|
async_py_messaging/runTests.sh
|
Shell
|
gpl-3.0
| 29,643 |
#!/usr/bin/expect
set timeout 10
expect "Hello"
puts "World!"
|
torchhound/projects
|
bash/exp.sh
|
Shell
|
gpl-3.0
| 61 |
#!/bin/bash
cd "${0%/*}"
DYLD_FALLBACK_LIBRARY_PATH=/Library/Frameworks/Mono.framework/Versions/Current/lib:/lib:/usr/lib mono BLauncher.exe
|
SirSengir/Starliners
|
Deploy/MacApp/Starliners/Contents/MacOS/BLauncher.sh
|
Shell
|
gpl-3.0
| 141 |
#!/bin/bash
echo "---------- $0 start ----------"
set -e
set -x
if [ $EUID == 0 ]; then
echo "Please do not run this script as root; don't sudo it!"
exit 1
fi
OPT="/opt"
# Ardupilot Tools
ARDUPILOT_TOOLS="Tools/autotest"
ASSUME_YES=false
QUIET=false
sep="##############################################"
OPTIND=1 # Reset in case getopts has been used previously in the shell.
while getopts "yq" opt; do
case "$opt" in
\?)
exit 1
;;
y) ASSUME_YES=true
;;
q) QUIET=true
;;
esac
done
APT_GET="sudo apt-get"
if $ASSUME_YES; then
APT_GET="$APT_GET --assume-yes"
fi
if $QUIET; then
APT_GET="$APT_GET -qq"
fi
# update apt package list
$APT_GET update
function package_is_installed() {
dpkg-query -W -f='${Status}' "$1" 2>/dev/null | grep -c "ok installed"
}
function heading() {
echo "$sep"
echo $*
echo "$sep"
}
# Install lsb-release as it is needed to check Ubuntu version
if ! package_is_installed "lsb-release"; then
heading "Installing lsb-release"
$APT_GET install lsb-release
echo "Done!"
fi
# Checking Ubuntu release to adapt software version to install
RELEASE_CODENAME=$(lsb_release -c -s)
PYTHON_V="python" # starting from ubuntu 20.04, python isn't symlink to default python interpreter
PIP=pip2
if [ ${RELEASE_CODENAME} == 'xenial' ]; then
SITLFML_VERSION="2.3v5"
SITLCFML_VERSION="2.3"
elif [ ${RELEASE_CODENAME} == 'disco' ]; then
SITLFML_VERSION="2.5"
SITLCFML_VERSION="2.5"
elif [ ${RELEASE_CODENAME} == 'eoan' ]; then
SITLFML_VERSION="2.5"
SITLCFML_VERSION="2.5"
elif [ ${RELEASE_CODENAME} == 'focal' ] || [ ${RELEASE_CODENAME} == 'ulyssa' ]; then
SITLFML_VERSION="2.5"
SITLCFML_VERSION="2.5"
PYTHON_V="python3"
PIP=pip3
elif [ ${RELEASE_CODENAME} == 'groovy' ] || [ ${RELEASE_CODENAME} == 'hirsute' ]; then
SITLFML_VERSION="2.5"
SITLCFML_VERSION="2.5"
PYTHON_V="python3"
PIP=pip3
elif [ ${RELEASE_CODENAME} == 'trusty' ]; then
SITLFML_VERSION="2"
SITLCFML_VERSION="2"
else
# We assume APT based system, so let's try with apt-cache first.
SITLCFML_VERSION=$(apt-cache search -n '^libcsfml-audio' | cut -d" " -f1 | head -1 | grep -Eo '[+-]?[0-9]+([.][0-9]+)?')
SITLFML_VERSION=$(apt-cache search -n '^libsfml-audio' | cut -d" " -f1 | head -1 | grep -Eo '[+-]?[0-9]+([.][0-9]+)?')
# If we cannot retrieve the number with apt-cache, try a last time with dpkg-query
re='^[+-]?[0-9]+([.][0-9]+)?$'
if ! [[ $SITLCFML_VERSION =~ $re ]] || ! [[ $SITLFML_VERSION =~ $re ]] ; then
# Extract the floating point number that is the version of the libcsfml package.
SITLCFML_VERSION=$(dpkg-query --search libcsfml-audio | cut -d":" -f1 | grep libcsfml-audio | head -1 | grep -Eo '[+-]?[0-9]+([.][0-9]+)?')
# And same for libsfml-audio.
SITLFML_VERSION=$(dpkg-query --search libsfml-audio | cut -d":" -f1 | grep libsfml-audio | head -1 | grep -Eo '[+-]?[0-9]+([.][0-9]+)?')
fi
fi
# Check whether the specific ARM pkg-config package is available or whether we should emulate the effect of installing it.
# Check if we need to manually install libtool-bin
ARM_PKG_CONFIG_NOT_PRESENT=0
if [ -z "$(apt-cache search -n '^pkg-config-arm-linux-gnueabihf')" ]; then
ARM_PKG_CONFIG_NOT_PRESENT=$(dpkg-query --search pkg-config-arm-linux-gnueabihf |& grep -c "dpkg-query:")
fi
if [ "$ARM_PKG_CONFIG_NOT_PRESENT" -eq 1 ]; then
INSTALL_PKG_CONFIG=""
# No need to install Ubuntu's pkg-config-arm-linux-gnueabihf, instead install the base pkg-config.
$APT_GET install pkg-config
if [ -f /usr/share/pkg-config-crosswrapper ]; then
# We are on non-Ubuntu so simulate effect of installing pkg-config-arm-linux-gnueabihf.
sudo ln -s /usr/share/pkg-config-crosswrapper /usr/bin/arm-linux-gnueabihf-pkg-config
else
echo "Warning: unable to link to pkg-config-crosswrapper"
fi
else
# Package is available so install it later.
INSTALL_PKG_CONFIG="pkg-config-arm-linux-gnueabihf"
fi
# Lists of packages to install
BASE_PKGS="build-essential ccache g++ gawk git make wget"
PYTHON_PKGS="future lxml pymavlink MAVProxy pexpect flake8 geocoder"
# add some Python packages required for commonly-used MAVProxy modules and hex file generation:
if [[ $SKIP_AP_EXT_ENV -ne 1 ]]; then
PYTHON_PKGS="$PYTHON_PKGS pygame intelhex"
fi
ARM_LINUX_PKGS="g++-arm-linux-gnueabihf $INSTALL_PKG_CONFIG"
# python-wxgtk packages are added to SITL_PKGS below
SITL_PKGS="libtool libxml2-dev libxslt1-dev ${PYTHON_V}-dev ${PYTHON_V}-pip ${PYTHON_V}-setuptools ${PYTHON_V}-numpy ${PYTHON_V}-pyparsing ${PYTHON_V}-psutil"
# add some packages required for commonly-used MAVProxy modules:
if [[ $SKIP_AP_GRAPHIC_ENV -ne 1 ]]; then
SITL_PKGS="$SITL_PKGS xterm ${PYTHON_V}-matplotlib ${PYTHON_V}-serial ${PYTHON_V}-scipy ${PYTHON_V}-opencv libcsfml-dev libcsfml-audio${SITLCFML_VERSION} libcsfml-dev libcsfml-graphics${SITLCFML_VERSION} libcsfml-network${SITLCFML_VERSION} libcsfml-system${SITLCFML_VERSION} libcsfml-window${SITLCFML_VERSION} libsfml-audio${SITLFML_VERSION} libsfml-dev libsfml-graphics${SITLFML_VERSION} libsfml-network${SITLFML_VERSION} libsfml-system${SITLFML_VERSION} libsfml-window${SITLFML_VERSION} ${PYTHON_V}-yaml"
fi
if [[ $SKIP_AP_COV_ENV -ne 1 ]]; then
# Coverage utilities
COVERAGE_PKGS="lcov gcovr"
fi
# ArduPilot official Toolchain for STM32 boards
function install_arm_none_eabi_toolchain() {
# GNU Tools for ARM Embedded Processors
# (see https://launchpad.net/gcc-arm-embedded/)
ARM_ROOT="gcc-arm-none-eabi-10-2020-q4-major"
ARM_TARBALL="$ARM_ROOT-x86_64-linux.tar.bz2"
ARM_TARBALL_URL="https://firmware.ardupilot.org/Tools/STM32-tools/$ARM_TARBALL"
if [ ! -d $OPT/$ARM_ROOT ]; then
(
cd $OPT;
heading "Installing toolchain for STM32 Boards"
echo "Downloading from ArduPilot server"
sudo wget $ARM_TARBALL_URL
echo "Installing..."
sudo tar xjf ${ARM_TARBALL}
echo "... Cleaning"
sudo rm ${ARM_TARBALL};
)
fi
echo "Registering STM32 Toolchain for ccache"
sudo ln -s -f $CCACHE_PATH /usr/lib/ccache/arm-none-eabi-g++
sudo ln -s -f $CCACHE_PATH /usr/lib/ccache/arm-none-eabi-gcc
echo "Done!"
}
function maybe_prompt_user() {
if $ASSUME_YES; then
return 0
else
read -p "$1"
if [[ $REPLY =~ ^[Yy]$ ]]; then
return 0
else
return 1
fi
fi
}
heading "Add user to dialout group to allow managing serial ports"
sudo usermod -a -G dialout $USER
echo "Done!"
# Add back python symlink to python interpreter on Ubuntu >= 20.04
if [ ${RELEASE_CODENAME} == 'focal' ] || [ ${RELEASE_CODENAME} == 'ulyssa' ]; then
BASE_PKGS+=" python-is-python3"
SITL_PKGS+=" libpython3-stdlib" # for argparse
elif [ ${RELEASE_CODENAME} == 'groovy' ] || [ ${RELEASE_CODENAME} == 'hirsute' ]; then
BASE_PKGS+=" python-is-python3"
SITL_PKGS+=" libpython3-stdlib" # for argparse
else
SITL_PKGS+=" python-argparse"
fi
# Check for graphical package for MAVProxy
if [[ $SKIP_AP_GRAPHIC_ENV -ne 1 ]]; then
if [ ${RELEASE_CODENAME} == 'groovy' ] || [ ${RELEASE_CODENAME} == 'hirsute' ]; then
SITL_PKGS+=" python3-wxgtk4.0"
SITL_PKGS+=" fonts-freefont-ttf libfreetype6-dev libjpeg8-dev libpng16-16 libportmidi-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsdl1.2-dev" # for pygame
elif [ ${RELEASE_CODENAME} == 'focal' ] || [ ${RELEASE_CODENAME} == 'ulyssa' ]; then
SITL_PKGS+=" python3-wxgtk4.0"
SITL_PKGS+=" fonts-freefont-ttf libfreetype6-dev libjpeg8-dev libpng16-16 libportmidi-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsdl1.2-dev" # for pygame
elif apt-cache search python-wxgtk3.0 | grep wx; then
SITL_PKGS+=" python-wxgtk3.0"
else
# we only support back to trusty:
SITL_PKGS+=" python-wxgtk2.8"
SITL_PKGS+=" fonts-freefont-ttf libfreetype6-dev libjpeg8-dev libpng12-0 libportmidi-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsdl1.2-dev" # for pygame
fi
fi
# Check if we need to manually install realpath
RP=$(apt-cache search -n '^realpath$')
if [ -n "$RP" ]; then
BASE_PKGS+=" realpath"
fi
# Check if we need to manually install libtool-bin
LBTBIN=$(apt-cache search -n '^libtool-bin')
if [ -n "$LBTBIN" ]; then
SITL_PKGS+=" libtool-bin"
fi
# Install all packages
$APT_GET install $BASE_PKGS $SITL_PKGS $PX4_PKGS $ARM_LINUX_PKGS $COVERAGE_PKGS
$PIP install --user -U $PYTHON_PKGS
if [[ -z "${DO_AP_STM_ENV}" ]] && maybe_prompt_user "Install ArduPilot STM32 toolchain [N/y]?" ; then
DO_AP_STM_ENV=1
fi
heading "Removing modemmanager package that could conflict with firmware uploading"
if package_is_installed "modemmanager"; then
$APT_GET remove modemmanager
fi
echo "Done!"
CCACHE_PATH=$(which ccache)
if [[ $DO_AP_STM_ENV -eq 1 ]]; then
install_arm_none_eabi_toolchain
fi
heading "Check if we are inside docker environment..."
IS_DOCKER=false
if [[ -f /.dockerenv ]] || grep -Eq '(lxc|docker)' /proc/1/cgroup ; then
IS_DOCKER=true
fi
echo "Done!"
SHELL_LOGIN=".profile"
if $IS_DOCKER; then
echo "Inside docker, we add the tools path into .bashrc directly"
SHELL_LOGIN=".bashrc"
fi
heading "Adding ArduPilot Tools to environment"
SCRIPT_DIR=$(dirname $(realpath ${BASH_SOURCE[0]}))
ARDUPILOT_ROOT=$(realpath "$SCRIPT_DIR/../../")
if [[ $DO_AP_STM_ENV -eq 1 ]]; then
exportline="export PATH=$OPT/$ARM_ROOT/bin:\$PATH";
grep -Fxq "$exportline" ~/$SHELL_LOGIN 2>/dev/null || {
if maybe_prompt_user "Add $OPT/$ARM_ROOT/bin to your PATH [N/y]?" ; then
echo $exportline >> ~/$SHELL_LOGIN
eval $exportline
else
echo "Skipping adding $OPT/$ARM_ROOT/bin to PATH."
fi
}
fi
exportline2="export PATH=$ARDUPILOT_ROOT/$ARDUPILOT_TOOLS:\$PATH";
grep -Fxq "$exportline2" ~/$SHELL_LOGIN 2>/dev/null || {
if maybe_prompt_user "Add $ARDUPILOT_ROOT/$ARDUPILOT_TOOLS to your PATH [N/y]?" ; then
echo $exportline2 >> ~/$SHELL_LOGIN
eval $exportline2
else
echo "Skipping adding $ARDUPILOT_ROOT/$ARDUPILOT_TOOLS to PATH."
fi
}
if [[ $SKIP_AP_COMPLETION_ENV -ne 1 ]]; then
exportline3="source $ARDUPILOT_ROOT/Tools/completion/completion.bash";
grep -Fxq "$exportline3" ~/$SHELL_LOGIN 2>/dev/null || {
if maybe_prompt_user "Add ArduPilot Bash Completion to your bash shell [N/y]?" ; then
echo $exportline3 >> ~/.bashrc
eval $exportline3
else
echo "Skipping adding ArduPilot Bash Completion."
fi
}
fi
exportline4="export PATH=/usr/lib/ccache:\$PATH";
grep -Fxq "$exportline4" ~/$SHELL_LOGIN 2>/dev/null || {
if maybe_prompt_user "Append CCache to your PATH [N/y]?" ; then
echo $exportline4 >> ~/$SHELL_LOGIN
eval $exportline4
else
echo "Skipping appending CCache to PATH."
fi
}
echo "Done!"
if [[ $SKIP_AP_GIT_CHECK -ne 1 ]]; then
if [ -d ".git" ]; then
heading "Update git submodules"
cd $ARDUPILOT_ROOT
git submodule update --init --recursive
echo "Done!"
fi
fi
echo "---------- $0 end ----------"
|
khancyr/ardupilot
|
Tools/environment_install/install-prereqs-ubuntu.sh
|
Shell
|
gpl-3.0
| 11,163 |
#!/bin/bash
# Copyright 2015-2021 Gilbert Standen
# This file is part of Orabuntu-LXC.
# Orabuntu-LXC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Orabuntu-LXC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Orabuntu-LXC. If not, see <http://www.gnu.org/licenses/>.
clear
echo ''
echo "=============================================="
echo "Display Install Settings... "
echo "=============================================="
echo ''
PreSeed=$1
LXDCluster=$2
GRE=$3
Release=$4
MultiHost=$5
LXDStorageDriver=$6
echo "PreSeed = "$1
echo "LXDCluster = "$2
echo "GRE = "$3
echo "Release = "$4
echo "StorageDriver = "$6
# Reference
# /etc/network/openvswitch/preseed.sw1a.zfs.001.lxd.cluster
# /etc/network/openvswitch/preseed.sw1a.zfs.002.lxd.cluster
# /etc/network/openvswitch/preseed.sw1a.btr.001.lxd.cluster
# /etc/network/openvswitch/preseed.sw1a.btr.002.lxd.cluster
echo ''
echo "=============================================="
echo "Done: Display Install Settings. "
echo "=============================================="
echo ''
sleep 5
clear
if [ $PreSeed = 'Y' ] && [ $Release -ge 7 ]
then
echo ''
echo "=============================================="
echo "Run LXD Init (takes awhile...patience...) "
echo "=============================================="
echo ''
m=1; n=1
while [ $m -eq 1 ]
do
if [ $LXDCluster = 'N' ]
then
cat /etc/network/openvswitch/preseed.sw1a.olxc.001.lxd | lxd init --preseed
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
elif [ $LXDCluster = 'Y' ]
then
if [ $GRE = 'N' ]
then
if [ $n -le 10 ]
then
if [ $LXDStorageDriver = 'zfs' ]
then
cat /etc/network/openvswitch/preseed.sw1a.zfs.001.lxd.cluster | lxd init --preseed
if [ $? -ne 0 ]
then
m=1
echo ''
echo 'Re-trying LXD cluster formation ...'
echo 'Sleeping 2 minutes if you need to edit the preseed cluster file or diagnose other cluster errors before next cluster formation attempt.'
sleep 120
else
m=0
fi
n=$((n+1))
elif [ $LXDStorageDriver = 'btrfs' ]
then
cat /etc/network/openvswitch/preseed.sw1a.btr.001.lxd.cluster | lxd init --preseed
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
fi
else
sudo lxd init
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
fi
elif [ $GRE = 'Y' ]
then
if [ $n -le 10 ]
then
if [ $LXDStorageDriver = 'zfs' ]
then
cat /etc/network/openvswitch/preseed.sw1a.zfs.002.lxd.cluster | lxd init --preseed
if [ $? -ne 0 ]
then
m=1
echo ''
echo 'Re-trying LXD cluster formation ...'
echo 'Sleeping 2 minutes if you need to edit the preseed cluster file or diagnose other cluster errors before next cluster formation attempt.'
echo ''
sleep 120
else
m=0
fi
n=$((n+1))
elif [ $LXDStorageDriver = 'btrfs' ]
then
cat /etc/network/openvswitch/preseed.sw1a.btr.002.lxd.cluster | lxd init --preseed
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
n=$((n+1))
fi
else
sudo lxd init
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
fi
fi
fi
done
echo "/var/lib/snapd/snap/bin/lxc cluster list" | sg lxd
echo ''
echo "=============================================="
echo "Done: Run LXD Init. "
echo "=============================================="
echo ''
sleep 5
clear
elif [ $PreSeed = 'E' ]
then
m=1
while [ $m -eq 1 ]
do
sleep 120
sudo cat /etc/network/openvswitch/lxd-init-node1.sh
sudo chmod +x /etc/network/openvswitch/lxd-init-node1.sh
/etc/network/openvswitch/lxd-init-node1.sh
if [ $? -ne 0 ]
then
m=1
else
m=0
fi
done
echo ''
echo "=============================================="
echo "Done: Run LXD Init. "
echo "=============================================="
echo ''
sleep 5
clear
else
lxd init --auto
echo ''
echo "=============================================="
echo "Done: Run LXD Init. "
echo "=============================================="
echo ''
sleep 5
clear
fi
echo ''
echo "=============================================="
echo "Done: Configure LXD "
echo "=============================================="
echo ''
sleep 5
clear
|
gstanden/orabuntu-lxc
|
uekulele/archives/lxd_install_uekulele.sh
|
Shell
|
gpl-3.0
| 5,062 |
#!/bin/bash
# Convert a 1024x1024 png to the required icns file. only for MacOs.
# Before proceeding make sure you have image named `Icon1024.png' with
# 1024x1024 pixel (px) at 72 pixel-per-inch (ppi) .
cd $PWD
mkdir MyIcon.iconset
sips -z 16 16 Icon1024.png --out MyIcon.iconset/icon_16x16.png
sips -z 32 32 Icon1024.png --out MyIcon.iconset/[email protected]
sips -z 32 32 Icon1024.png --out MyIcon.iconset/icon_32x32.png
sips -z 64 64 Icon1024.png --out MyIcon.iconset/[email protected]
sips -z 128 128 Icon1024.png --out MyIcon.iconset/icon_128x128.png
sips -z 256 256 Icon1024.png --out MyIcon.iconset/[email protected]
sips -z 256 256 Icon1024.png --out MyIcon.iconset/icon_256x256.png
sips -z 512 512 Icon1024.png --out MyIcon.iconset/[email protected]
sips -z 512 512 Icon1024.png --out MyIcon.iconset/icon_512x512.png
cp Icon1024.png MyIcon.iconset/[email protected]
iconutil -c icns MyIcon.iconset -o videomass.icns
rm -R MyIcon.iconset
|
jeanslack/Videomass
|
develop/tools/create_ICNS.sh
|
Shell
|
gpl-3.0
| 982 |
#!/bin/sh -e
DJANGO_CA_UWSGI_INI=${DJANGO_CA_UWSGI_INI:-/usr/src/django-ca/uwsgi/uwsgi.ini}
DJANGO_CA_UWSGI_PARAMS=${DJANGO_CA_UWSGI_PARAMS:-}
DJANGO_CA_LIB_DIR=${DJANGO_CA_LIB_DIR:-/var/lib/django-ca}
if [ ! -e ${DJANGO_CA_UWSGI_INI} ]; then
echo "${DJANGO_CA_UWSGI_INI}: No such file or directory."
exit 1
fi
DJANGO_CA_SECRET_KEY=${DJANGO_CA_SECRET_KEY:-}
DJANGO_CA_SECRET_KEY_FILE=${DJANGO_CA_SECRET_KEY_FILE:-/var/lib/django-ca/certs/ca/shared/secret_key}
if [ -z "${DJANGO_CA_SECRET_KEY}" ]; then
KEY_DIR=`dirname $DJANGO_CA_SECRET_KEY_FILE`
if [ ! -e "${KEY_DIR}" ]; then
mkdir -p ${KEY_DIR}
chmod go-rwx ${KEY_DIR}
fi
if [ ! -e "${DJANGO_CA_SECRET_KEY_FILE}" ]; then
echo "Create secret key at ${DJANGO_CA_SECRET_KEY_FILE}..."
python <<EOF
import random, string
key = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))
with open('${DJANGO_CA_SECRET_KEY_FILE}', 'w') as stream:
stream.write(key)
EOF
fi
chmod go-rwx ${DJANGO_CA_SECRET_KEY_FILE}
# Export DJANGO_CA_SECRET_KEY_FILE so that django-ca itself will pick it up.
export DJANGO_CA_SECRET_KEY_FILE
fi
if [ -n "${WAIT_FOR_CONNECTIONS}" ]; then
for conn in ${WAIT_FOR_CONNECTIONS}; do
conn=${conn/:/ }
while ! nc -z $conn; do
echo "Wait for $conn..."
sleep 0.1 # wait for 1/10 of the second before check again
done
done
fi
set -x
python manage.py migrate --noinput
python manage.py collectstatic --no-input &
python manage.py cache_crls &
python manage.py regenerate_ocsp_keys &
uwsgi --ini ${DJANGO_CA_UWSGI_INI} ${DJANGO_CA_UWSGI_PARAMS} "$@"
|
mathiasertl/django-ca
|
scripts/uwsgi.sh
|
Shell
|
gpl-3.0
| 1,691 |
#/bin/bash
# Start websockify and the inbuilt PHP server
websockify/run 8800 localhost:6600 &
cd www
php -S localhost:8000
|
anthony-mills/raspberrypi-carputer
|
start.sh
|
Shell
|
gpl-3.0
| 124 |
#!/bin/bash
REVISION=1
if [ -n "$1" ]; then
REVISION=$1
fi
CURRENTVERSION=$(git describe |sed -s 's/-.*$//')
#CURRENTVERSION=$(git tag|tail -1)
NEXTVERSION=""
function incrementVersion {
PRE=$(echo $CURRENTVERSION|sed -e 's/\.[0-9]\+$//'|sed -e 's/^v//')
LAST=$(echo $CURRENTVERSION|sed -e 's/^v[0-9]\+\.[0-9]\+\.//')
NEXTVERSION="$PRE"".""$(( $LAST + 1 ))"
}
function applyTemplate {
local TMPL=$1
local OUT=$2
local TMPFILE="$OUT""~"
cp -af $TMPL $TMPFILE
sed -i -e "s/%%%VERSION%%%/$NEXTVERSION/g" $TMPFILE
sed -i -e "s/%%%REVISION%%%/$REVISION/g" $TMPFILE
mv -f $TMPFILE $OUT
}
incrementVersion
echo "Current version is $NEXTVERSION-$REVISION"
applyTemplate configure.ac.tmpl configure.ac
applyTemplate debian/changelog.tmpl debian/changelog
echo $NEXTVERSION > package_version
echo $REVISION > package_revision
git submodule update --init --recursive
autoreconf -f -i
|
mydlp/mydlp-endpoint-linux
|
bootstrap.sh
|
Shell
|
gpl-3.0
| 918 |
#!/bin/bash
BASEDIR=$(cd $(dirname $0); pwd)
. $BASEDIR/virtualenv/bin/activate
export PYTHONPATH=$BASEDIR:$PYTHONPATH
python $BASEDIR/garnish.py "$@"
|
hgdeoro/GarnishMyPic
|
gmp.sh
|
Shell
|
gpl-3.0
| 155 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
DIE=0
if [ -n "$GNOME2_DIR" ]; then
ACLOCAL_FLAGS="-I $GNOME2_DIR/share/aclocal $ACLOCAL_FLAGS"
LD_LIBRARY_PATH="$GNOME2_DIR/lib:$LD_LIBRARY_PATH"
PATH="$GNOME2_DIR/bin:$PATH"
export PATH
export LD_LIBRARY_PATH
fi
(test -f $srcdir/configure.ac) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level package directory"
exit 1
}
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`autoconf' installed."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(grep "^IT_PROG_INTLTOOL" $srcdir/configure.ac >/dev/null) && {
(intltoolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`intltool' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
(grep "^AM_PROG_XML_I18N_TOOLS" $srcdir/configure.ac >/dev/null) && {
(xml-i18n-toolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`xml-i18n-toolize' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
(grep "^AM_PROG_LIBTOOL" $srcdir/configure.ac >/dev/null) && {
(libtool --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`libtool' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
}
(grep "^AM_GLIB_GNU_GETTEXT" $srcdir/configure.ac >/dev/null) && {
(grep "sed.*POTFILES" $srcdir/configure.ac) > /dev/null || \
(glib-gettextize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`glib' installed."
echo "You can get it from: ftp://ftp.gtk.org/pub/gtk"
DIE=1
}
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`automake' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
NO_AUTOMAKE=yes
}
# if no automake, don't bother testing for aclocal
test -n "$NO_AUTOMAKE" || (aclocal --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: Missing \`aclocal'. The version of \`automake'"
echo "installed doesn't appear recent enough."
echo "You can get automake from ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
if test "$DIE" -eq 1; then
exit 1
fi
if test -z "$*"; then
echo "**Warning**: I am going to run \`configure' with no arguments."
echo "If you wish to pass any to it, please specify them on the"
echo \`$0\'" command line."
echo
fi
case $CC in
xlc )
am_opt=--include-deps;;
esac
for coin in `find $srcdir -path $srcdir/CVS -prune -o -name configure.ac -print`
do
dr=`dirname $coin`
if test -f $dr/NO-AUTO-GEN; then
echo skipping $dr -- flagged as no auto-gen
else
echo processing $dr
( cd $dr
aclocalinclude="$ACLOCAL_FLAGS"
if grep "^AM_GLIB_GNU_GETTEXT" configure.ac >/dev/null; then
echo "Creating $dr/aclocal.m4 ..."
test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
echo "Running glib-gettextize... Ignore non-fatal messages."
echo "no" | glib-gettextize --force --copy
echo "Making $dr/aclocal.m4 writable ..."
test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
fi
if grep "^IT_PROG_INTLTOOL" configure.ac >/dev/null; then
echo "Running intltoolize..."
intltoolize --copy --force --automake
fi
if grep "^AM_PROG_XML_I18N_TOOLS" configure.ac >/dev/null; then
echo "Running xml-i18n-toolize..."
xml-i18n-toolize --copy --force --automake
fi
if grep "^AM_PROG_LIBTOOL" configure.ac >/dev/null; then
if test -z "$NO_LIBTOOLIZE" ; then
echo "Running libtoolize..."
libtoolize --force --copy
fi
fi
echo "Running aclocal $aclocalinclude ..."
aclocal $aclocalinclude
if grep "^A[CM]_CONFIG_HEADER" configure.ac >/dev/null; then
echo "Running autoheader..."
autoheader
fi
echo "Running automake --gnu $am_opt ..."
automake --add-missing --copy --gnu $am_opt
echo "Running autoconf ..."
autoconf
)
fi
done
conf_flags="--enable-maintainer-mode"
if test x$NOCONFIGURE = x; then
echo Running $srcdir/configure $conf_flags "$@" ...
$srcdir/configure $conf_flags "$@" \
&& echo Now type \`make\' to compile. || exit 1
else
echo Skipping configure process.
fi
|
upcomingnewton/gtk_oyranos
|
autogen.sh
|
Shell
|
gpl-3.0
| 4,514 |
#!/bin/bash
# Script to sign an installer file using GPG secure key downloaded from Azure.
# The keys are downloaded in the Azure Pipelines file. This script depends on
# several environment variables used by the Azure Pipelines build.
# Enable "strict mode", see
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
if [ -z $AGENT_TEMPDIRECTORY ]; then
echo "AGENT_TEMPDIRECTORY environment variable not set"
exit 1
fi
if [ -z $SIGNDATAPW ]; then
echo "SIGNDATAPW not set, will not sign anything"
exit 0
fi
ATMP=`cygpath -u "$AGENT_TEMPDIRECTORY"`
if ! [ -d $ATMP ]; then
echo "AGENT_TEMPDIRECTORY is not a valid directory: $AGENT_TEMPDIRECTORY"
exit 1
fi
KEYA=$ATMP/al2_sign.tar.gpg
if [ -r $KEYA ]; then
export GNUPGHOME=$ATMP/$BUILD_BUILDID.gnupg
mkdir -p $GNUPGHOME
gpg --decrypt --quiet --batch --yes --passphrase $SIGNDATAPW "$KEYA" |\
tar xv -C $GNUPGHOME
PUBKEY=$GNUPGHOME/al2_sign_pub.asc
SECKEY=$GNUPGHOME/al2_sign_sec.bin
gpg --import $PUBKEY
[ $? -ne 0 ] && echo "Public key import failed." && exit 1
gpg --allow-secret-key-import --import $SECKEY
[ $? -ne 0 ] && echo "Private key import failed." && exit 1
else
# If I configured the pipeline correctly, the keys will not be downloaded
# for pull requests outside the main repository -- need to handle this
# case gracefully and not fail the build...
echo "No signing keys found, will not sign anything"
exit 0
fi
EXIT_CODE=0;
PKG=`ls ActivityLog2Setup-*.exe 2>/dev/null`
if [ -z $PKG ]; then
echo "Could not find installer file to sign"
EXIT_CODE=1
else
echo "Will sign $PKG"
SIG=`echo $PKG | sed 's/\\.exe/.sig/'`
echo "Signature file will be $SIG"
if [ -e $SIG ]; then
echo "Removing previous signature file..."
rm "$SIG"
fi
gpg --detach-sign --armor --output $SIG $PKG
if [ $? -ne 0 ]; then
echo "Failed to create signature"
EXIT_CODE=1
else
echo "Created signature."
fi
fi
rm -rf "$GNUPGHOME"
exit $EXIT_CODE
|
alex-hhh/ActivityLog2
|
etc/scripts/sign-release.sh
|
Shell
|
gpl-3.0
| 2,103 |
SAMBA4_GIT_REV=
SAMBA4_GIT_VER=4.0.0alpha19
SAMBA4_RELEASE=4.0.0alpha19
|
inverse-inc/openchange.old
|
script/samba4_ver.sh
|
Shell
|
gpl-3.0
| 73 |
#!/bin/bash
# By: Liptan Biswas
yum -y install wget nano
# Check if user has root privileges
if [[ $EUID -ne 0 ]]; then
echo "You must run the script as root or using sudo"
exit 1
fi
mkdir /usr/local/tomcat
groupadd tomcat
useradd -M -s /bin/nologin -g tomcat -d /usr/local/tomcat tomcat
wget --header 'Cookie: oraclelicense=a' http://download.oracle.com/otn-pub/java/jdk/9.0.4+11/c2514751926b4512b076cc82f959763f/jdk-9.0.4_linux-x64_bin.rpm
rpm -Uvh jdk-9.0.4_linux-x64_bin.rpm
echo "export JAVA_HOME=/usr/java/jdk-9.0.4" >> ~/.bash_profile
echo "export JRE_HOME=/usr/java/jdk-9.0.4/jre" >> ~/.bash_profile
source ~/.bash_profile
java -version
cd /usr/local/tomcat
wget http://www-us.apache.org/dist/tomcat/tomcat-9/v9.0.4/bin/apache-tomcat-9.0.4.tar.gz
tar -xf apache-tomcat-*.tar.gz --strip-components=1
cd /usr/local/tomcat
chgrp -R tomcat conf
chmod g+rwx conf
chmod g+r conf/*
chown -R tomcat webapps/ work/ temp/ logs/
chown -R tomcat:tomcat *
chown -R tomcat:tomcat /usr/local/tomcat
echo '# Systemd unit file for tomcat
[Unit]
Description=Apache Tomcat Web Application Container
After=syslog.target network.target
[Service]
Type=forking
Environment=JAVA_HOME=/usr/java/jdk-9.0.4
Environment=CATALINA_PID=/usr/local/tomcat/temp/tomcat.pid
Environment=CATALINA_HOME=/usr/local/tomcat
Environment=CATALINA_BASE=/usr/local/tomcat
Environment='CATALINA_OPTS=-Xms512M -Xmx1024M -server -XX:+UseParallelGC'
Environment='JAVA_OPTS=-Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom'
ExecStart=/usr/local/tomcat/bin/startup.sh
ExecStop=/bin/kill -15 $MAINPID
User=tomcat
Group=tomcat
[Install]
WantedBy=multi-user.target' > /etc/systemd/system/tomcat.service
systemctl daemon-reload
systemctl start tomcat
systemctl enable tomcat
echo "[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1" | sudo tee /etc/yum.repos.d/mariadb.repo
sudo yum -y install MariaDB-server MariaDB-client
sudo systemctl start mariadb
sudo systemctl enable mariadb
sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
setenforce 0
echo "Tomcat is installed in /usr/local/tomcat"
echo "Open in web browser:"
echo "http://server_IP_address:8080"
echo "MySQL is also installed with no root password. Secure MySQL by running 'mysql_secure_installation'"
|
liptanbiswas/OneClickInstall
|
centos7/tomcat.sh
|
Shell
|
gpl-3.0
| 2,369 |
#!/bin/sh
# This script intends to decrease the effort of updating the package.
PACKAGE="intellij-idea-community"
DISTRIBUTION="impish"
main() {
last_tag=$(git describe --abbrev=0 --tags)
old="${last_tag#?}"
new="$1"
name="$(git config --get user.name)"
email="$(git config --get user.email)"
git checkout -b version-"$new"
mv "$PACKAGE"_"$old" "$PACKAGE"_"$new"
mv "$PACKAGE"_"$old".orig.tar.gz "$PACKAGE"_"$new".orig.tar.gz
cd "$PACKAGE"_"$new" || exit
# Update the debian/changelog file with dch
NAME="$name" EMAIL="$email" dch \
--newversion "$new"-1 \
--distribution "$DISTRIBUTION" \
"Upstream version $new"
sed -i "s/$old/$new/g" ./debian/preinst
sed -i "s/$old/$new/g" ./debian/postinst
debuild -us -uc
cd ..
rm "$PACKAGE"_"$old"-*
# disabled until it is only called with an specific argument
# sudo dpkg -i "$PACKAGE"_"$new"-1_all.deb
}
main "$1"
|
mmk2410/intellij-idea-community
|
update-new-version.sh
|
Shell
|
gpl-3.0
| 944 |
#!/bin/bash
# Simple script to list version numbers of critical development tools
export LC_ALL=C
bash --version | head -n1 | cut -d" " -f2-4
echo "/bin/sh -> 'readlink -f /bin/sh'"
echo -n "Binutils: "; ld --version | head -n1 | cut -d" " -f3-
bison --version | head -n1
if [ -h /usr/bin/yacc ]; then
echo "/usr/bin/yacc -> 'readlink -f /usr/bin/yacc'";
elif [ -x /usr/bin/yacc ]; then
echo yacc is '/usr/bin/yacc --version | head -n1'
else
echo "yacc not found"
fi
bzip2 --version 2>&1 < /dev/null | head -n1 | cut -d" " -f1,6-
echo -n "Coreutils: "; chown --version | head -n1 | cut -d")" -f2
diff --version | head -n1
find --version | head -n1
gawk --version | head -n1
if [ -h /usr/bin/awk ]; then
echo "/usr/bin/awk -> 'readlink -f /usr/bin/awk'";
elif [ -x /usr/bin/awk ]; then
echo awk is '/usr/bin/awk --version | head -n1'
else
echo "awk not found"
fi
gcc --version | head -n1
g++ --version | head -n1
ldd --version | head -n1 | cut -d" " -f2 # glibc version
grep --version | head -n1
gzip --version | head -n1
cat /proc/version
m4 --version | head -n1
make --version | head -n1
patch --version | head -n1
echo Perl 'perl -V:version'
sed --version | head -n1
tar --version | head -n1
makeinfo --version | head -n1
xz --version | head -n1
echo 'int main(){}' > dummy.c && g++ -o dummy dummy.c
if [ -x dummy ]
then echo "g++ compilation ok";
else echo "g++ compilation failed"; fi
rm -f dummy.c dummy
|
Drakesinger/LFS
|
Scripts/version-check.sh
|
Shell
|
gpl-3.0
| 1,444 |
#!/bin/bash
set -eu
. `dirname $0`/../secrets.sh
if [ -z "${ETHERHOUSE_HOST}" ]; then
ETHERHOUSE_HOST=etherhouse.xkyle.com
fi
curl "${ETHERHOUSE_HOST}/off?id=0&api_key=${APIKEY0}"
curl "${ETHERHOUSE_HOST}/off?id=1&api_key=${APIKEY1}"
curl "${ETHERHOUSE_HOST}/off?id=2&api_key=${APIKEY2}"
curl "${ETHERHOUSE_HOST}/off?id=3&api_key=${APIKEY3}"
curl "${ETHERHOUSE_HOST}/off?id=4&api_key=${APIKEY4}"
curl "${ETHERHOUSE_HOST}/off?id=5&api_key=${APIKEY5}"
curl "${ETHERHOUSE_HOST}/off?id=6&api_key=${APIKEY6}"
curl "${ETHERHOUSE_HOST}/off?id=7&api_key=${APIKEY7}"
|
solarkennedy/ether_housed
|
tools/all_off.sh
|
Shell
|
gpl-3.0
| 561 |
#!/bin/bash
echo 'fixing bluebird'
find . -path "*/bluebird/js/*.js" -exec sed -i '' s/process.versions.node.split/process.version.node.split/ {} +
|
tradle/tim
|
scripts/fix-bluebird.sh
|
Shell
|
gpl-3.0
| 148 |
#!/bin/bash
# config-existing-irods.sh
# Author: Michael Stealey <[email protected]>
SERVICE_ACCOUNT_CONFIG_FILE="/etc/irods/service_account.config"
IRODS_HOME_DIR="/var/lib/irods"
# Get environment variables from iRODS setup
while read line; do export $line; done < <(cat ${SERVICE_ACCOUNT_CONFIG_FILE})
# get service account name
MYACCTNAME=`echo "${IRODS_SERVICE_ACCOUNT_NAME}" | sed -e "s/\///g"`
# get group name
MYGROUPNAME=`echo "${IRODS_SERVICE_GROUP_NAME}" | sed -e "s/\///g"`
##################################
# Set up Service Group and Account
##################################
# Group
set +e
CHECKGROUP=`getent group $MYGROUPNAME `
set -e
if [ "$CHECKGROUP" = "" ] ; then
# new group
echo "Creating Service Group: $MYGROUPNAME "
/usr/sbin/groupadd -r $MYGROUPNAME
else
# use existing group
echo "Existing Group Detected: $MYGROUPNAME "
fi
# Account
set +e
CHECKACCT=`getent passwd $MYACCTNAME `
set -e
if [ "$CHECKACCT" = "" ] ; then
# new account
echo "Creating Service Account: $MYACCTNAME at $IRODS_HOME_DIR "
/usr/sbin/useradd -r -d $IRODS_HOME_DIR -M -s /bin/bash -g $MYGROUPNAME -c "iRODS Administrator" $MYACCTNAME
else
# use existing account
# leave user settings and files as is
echo "Existing Account Detected: $MYACCTNAME "
fi
#############
# Permissions
#############
chown -R ${MYACCTNAME}:${MYGROUPNAME} ${IRODS_HOME_DIR}
chown -R ${MYACCTNAME}:${MYGROUPNAME} /etc/irods
# set permissions on iRODS authentication mechanisms
chmod 4755 ${IRODS_HOME_DIR}/iRODS/server/bin/PamAuthCheck
chmod 4755 /usr/bin/genOSAuth
# start iRODS server as user irods
su irods <<'EOF'
sed -i 's/"irods_host".*/"irods_host": "localhost",/g' /var/lib/irods/.irods/irods_environment.json
/var/lib/irods/iRODS/irodsctl restart
while read line; do iadmin modresc ${line} host `hostname`; done < <(ilsresc)
EOF
# OPTIONAL: Install irods-dev
#rpm -i $(ls -l | tr -s ' ' | grep irods-dev | cut -d ' ' -f 9)
# Install irods-runtime
#rpm -i $(ls -l | tr -s ' ' | grep irods-runtime | cut -d ' ' -f 9)
# Install irods-microservice-plugins
#rpm -i $(ls -l | tr -s ' ' | grep irods-microservice-plugins | cut -d ' ' -f 9)
# Keep container in a running state
/usr/bin/tail -f /dev/null
|
mjstealey/appstack-irods-icat-4.1.x-pgsql
|
scripts/config-existing-irods.sh
|
Shell
|
gpl-3.0
| 2,225 |
#! /bin/bash
# mandatory arguments
[ "$#" -eq 3 ] || exit "3 arguments required, $# provided - <prefix of nodes> <number of nodes> <path image to use>"
NAME_PREFIX="$1"
NODE_COUNT="$2"
IMAGE="$3"
echo "Preparing Images..."
for (( i=1; i<=$NODE_COUNT; i++ ))
do
flash --hostname ${NAME_PREFIX}${i} ${IMAGE}
done
|
gregdaynes/Docker-Bramble
|
init.sh
|
Shell
|
gpl-3.0
| 318 |
#!/usr/bin/env bash
#A support function used to append export commands to .bashrc, .zshrc, etc.
#At this point, it can be used for any command on any file, but, it's mainly used for appending export commands.
#Author: Toberumono (https://github.com/Toberumono)
should_reopen=""
update_rc() {
local client="$1"
shift
local file_path="$1"
shift
local added=false
for p in "$@"; do
local add_path=$(grep -F -e "$p" "$file_path")
if [ "$add_path" != "" ]; then
echo "$p is already in $file_path."
continue;
fi
if ( ! $added ); then
echo "" >> "$file_path"
echo "# This adds the necessary $client paths." >> "$file_path"
[ "$should_reopen" == "" ] && should_reopen="$file_path" || should_reopen=$should_reopen" $file_path"
added=true
fi
echo "Adding $p to $file_path."; echo "$p" >> "$file_path"
done
}
|
Toberumono/Miscellaneous
|
common/update_rc.sh
|
Shell
|
gpl-3.0
| 839 |
#!/usr/bin/env bash
gnome-terminal -e 'java -mx1000m -jar tools/tika-app-1.24.1.jar --port 9998' #> /dev/null 2>&1 &
gnome-terminal -e 'java -mx2000m -cp "tools/stanford-corenlp-4.1.0/*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer' #> /dev/null 2>&1 &
|
ross-spencer/nerlinks
|
start-tools.sh
|
Shell
|
gpl-3.0
| 260 |
#!/bin/bash
#-------------------------------------------------------------------------------
#
# script_name
#
#-------------------------------------------------------------------------------
# Help
if [ -z "$HELP" ]
then
export HELP="
This script does something useful (hopefully).
--------------------------------------------------------------------------------
Tested under: Whatever you're using to develop
Licensed under: GPLv3
See the project page at: http://domain/project
Report issues here: http://domain/issue_tracker
"
fi
if [ -z "$USAGE" ]
then
export USAGE="
usage: script_name [ -h | --help ] | Show usage information
--------------------------------------------------------------------------------
"
fi
#-------------------------------------------------------------------------------
# Parameters
STATUS=0
SCRIPT_DIR="$(cd "$(dirname "$([ `readlink "$0"` ] && echo "`readlink "$0"`" || echo "$0")")"; pwd -P)"
SHELL_LIB_DIR="$SCRIPT_DIR/lib/shell"
source "$SHELL_LIB_DIR/load.sh" || exit 1
#---
PARAMS=`normalize_params "$@"`
parse_flag '-h|--help' HELP_WANTED
# Standard help message.
if [ "$HELP_WANTED" ]
then
echo "$HELP"
echo "$USAGE"
exit 0
fi
if [ $STATUS -ne 0 ]
then
echo "$USAGE"
exit $STATUS
fi
ARGS=`get_args "$PARAMS"`
#-------------------------------------------------------------------------------
# Start
exit $STATUS
|
coralnexus/bash-lib
|
starter.sh
|
Shell
|
gpl-3.0
| 1,394 |
#!/bin/bash
NAME=$1
PORT=$2
TAG=penginecore:latest
./Scripts/docker-rm.sh $NAME
./Scripts/docker-create.sh $NAME $PORT
./Scripts/docker-refresh-app-volume.sh $NAME
./Scripts/docker-start.sh $NAME
|
jaylittle/presentation-engine
|
Scripts/docker-recreate.sh
|
Shell
|
gpl-3.0
| 196 |
function refresh_docker_images {
# Arguments:
# $1 - Path to top level Consul source
# $2 - Which make target to invoke (optional)
#
# Return:
# 0 - success
# * - failure
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. refresh_docker_images must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local targets="$2"
test -n "${targets}" || targets="docker-images"
make -C "${sdir}" ${targets}
return $?
}
function build_ui {
# Arguments:
# $1 - Path to the top level Consul source
# $2 - The docker image to run the build within (optional)
# $3 - Version override
#
# Returns:
# 0 - success
# * - error
#
# Notes:
# Use the GIT_COMMIT environment variable to pass off to the build
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_ui must be called with the path to the top level source as the first argument'"
return 1
fi
local image_name=${UI_BUILD_CONTAINER_DEFAULT}
if test -n "$2"
then
image_name="$2"
fi
local sdir="$1"
local ui_dir="${1}/ui-v2"
# parse the version
version=$(parse_version "${sdir}")
if test -n "$3"
then
version="$3"
fi
local commit_hash="${GIT_COMMIT}"
if test -z "${commit_hash}"
then
commit_hash=$(git rev-parse --short HEAD)
fi
# make sure we run within the ui dir
pushd ${ui_dir} > /dev/null
status "Creating the UI Build Container with image: ${image_name} and version '${version}'"
local container_id=$(docker create -it -e "CONSUL_GIT_SHA=${commit_hash}" -e "CONSUL_VERSION=${version}" ${image_name})
local ret=$?
if test $ret -eq 0
then
status "Copying the source from '${ui_dir}' to /consul-src within the container"
(
tar -c $(ls -A | grep -v "^(node_modules\|dist\|tmp)") | docker cp - ${container_id}:/consul-src &&
status "Running build in container" && docker start -i ${container_id} &&
rm -rf ${1}/ui-v2/dist &&
status "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist
)
ret=$?
docker rm ${container_id} > /dev/null
fi
if test ${ret} -eq 0
then
local ui_vers=$(ui_version "${1}/ui-v2/dist/index.html")
if test "${version}" != "${ui_vers}"
then
err "ERROR: UI version mismatch. Expecting: '${version}' found '${ui_vers}'"
ret=1
else
rm -rf ${1}/pkg/web_ui/v2
mkdir -p ${1}/pkg/web_ui
cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2
fi
fi
popd > /dev/null
return $ret
}
function build_ui_legacy {
# Arguments:
# $1 - Path to the top level Consul source
# $2 - The docker image to run the build within (optional)
#
# Returns:
# 0 - success
# * - error
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_ui_legacy must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local ui_legacy_dir="${sdir}/ui"
local image_name=${UI_LEGACY_BUILD_CONTAINER_DEFAULT}
if test -n "$2"
then
image_name="$2"
fi
pushd ${ui_legacy_dir} > /dev/null
status "Creating the Legacy UI Build Container with image: ${image_name}"
rm -r ${sdir}/pkg/web_ui/v1 >/dev/null 2>&1
mkdir -p ${sdir}/pkg/web_ui/v1
local container_id=$(docker create -it ${image_name})
local ret=$?
if test $ret -eq 0
then
status "Copying the source from '${ui_legacy_dir}' to /consul-src/ui within the container"
(
docker cp . ${container_id}:/consul-src/ui &&
status "Running build in container" &&
docker start -i ${container_id} &&
status "Copying back artifacts" &&
docker cp ${container_id}:/consul-src/pkg/web_ui/v1/. ${sdir}/pkg/web_ui/v1
)
ret=$?
docker rm ${container_id} > /dev/null
fi
popd > /dev/null
return $ret
}
function build_assetfs {
# Arguments:
# $1 - Path to the top level Consul source
# $2 - The docker image to run the build within (optional)
#
# Returns:
# 0 - success
# * - error
#
# Note:
# The GIT_COMMIT, GIT_DIRTY and GIT_DESCRIBE environment variables will be used if present
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_assetfs must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local image_name=${GO_BUILD_CONTAINER_DEFAULT}
if test -n "$2"
then
image_name="$2"
fi
pushd ${sdir} > /dev/null
status "Creating the Go Build Container with image: ${image_name}"
local container_id=$(docker create -it -e GIT_COMMIT=${GIT_COMMIT} -e GIT_DIRTY=${GIT_DIRTY} -e GIT_DESCRIBE=${GIT_DESCRIBE} ${image_name} make static-assets ASSETFS_PATH=bindata_assetfs.go)
local ret=$?
if test $ret -eq 0
then
status "Copying the sources from '${sdir}/(pkg/web_ui|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg"
(
tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul &&
status "Running build in container" && docker start -i ${container_id} &&
status "Copying back artifacts" && docker cp ${container_id}:/go/src/github.com/hashicorp/consul/bindata_assetfs.go ${sdir}/agent/bindata_assetfs.go
)
ret=$?
docker rm ${container_id} > /dev/null
fi
popd >/dev/null
return $ret
}
function build_consul_post {
# Arguments
# $1 - Path to the top level Consul source
# $2 - Subdirectory under pkg/bin (Optional)
#
# Returns:
# 0 - success
# * - error
#
# Notes:
# pkg/bin is where to place binary packages
# pkg.bin.new is where the just built binaries are located
# bin is where to place the local systems versions
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_consul_post must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local extra_dir_name="$2"
local extra_dir=""
if test -n "${extra_dir_name}"
then
extra_dir="${extra_dir_name}/"
fi
pushd "${sdir}" > /dev/null
# recreate the pkg dir
rm -r pkg/bin/${extra_dir}* 2> /dev/null
mkdir -p pkg/bin/${extra_dir} 2> /dev/null
# move all files in pkg.new into pkg
cp -r pkg.bin.new/${extra_dir}* pkg/bin/${extra_dir}
rm -r pkg.bin.new
DEV_PLATFORM="./pkg/bin/${extra_dir}$(go env GOOS)_$(go env GOARCH)"
for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f 2>/dev/null)
do
# recreate the bin dir
rm -r bin/* 2> /dev/null
mkdir -p bin 2> /dev/null
cp ${F} bin/
cp ${F} ${MAIN_GOPATH}/bin
done
popd > /dev/null
return 0
}
function build_consul {
# Arguments:
# $1 - Path to the top level Consul source
# $2 - Subdirectory to put binaries in under pkg/bin (optional - must specify if needing to specify the docker image)
# $3 - The docker image to run the build within (optional)
#
# Returns:
# 0 - success
# * - error
#
# Note:
# The GOLDFLAGS and GOTAGS environment variables will be used if set
# If the CONSUL_DEV environment var is truthy only the local platform/architecture is built.
# If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures
# will be built. Otherwise all supported platform/architectures are built
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local extra_dir_name="$2"
local extra_dir=""
local image_name=${GO_BUILD_CONTAINER_DEFAULT}
if test -n "$3"
then
image_name="$3"
fi
pushd ${sdir} > /dev/null
status "Creating the Go Build Container with image: ${image_name}"
if is_set "${CONSUL_DEV}"
then
if test -z "${XC_OS}"
then
XC_OS=$(go env GOOS)
fi
if test -z "${XC_ARCH}"
then
XC_ARCH=$(go env GOARCH)
fi
fi
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"}
if test -n "${extra_dir_name}"
then
extra_dir="${extra_dir_name}/"
fi
local container_id=$(docker create -it -e CGO_ENABLED=0 ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/bin/${extra_dir}{{.OS}}_{{.Arch}}/consul" -tags="${GOTAGS}")
ret=$?
if test $ret -eq 0
then
status "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul"
(
tar -c $(ls | grep -v "^(ui\|ui-v2\|website\|bin\|pkg\|.git)") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul &&
status "Running build in container" &&
docker start -i ${container_id} &&
status "Copying back artifacts" &&
docker cp ${container_id}:/go/src/github.com/hashicorp/consul/pkg/bin pkg.bin.new
)
ret=$?
docker rm ${container_id} > /dev/null
if test $ret -eq 0
then
build_consul_post "${sdir}" "${extra_dir_name}"
ret=$?
else
rm -r pkg.bin.new 2> /dev/null
fi
fi
popd > /dev/null
return $ret
}
function build_consul_local {
# Arguments:
# $1 - Path to the top level Consul source
# $2 - Space separated string of OSes to build. If empty will use env vars for determination.
# $3 - Space separated string of architectures to build. If empty will use env vars for determination.
# $4 - Subdirectory to put binaries in under pkg/bin (optional)
#
# Returns:
# 0 - success
# * - error
#
# Note:
# The GOLDFLAGS and GOTAGS environment variables will be used if set
# If the CONSUL_DEV environment var is truthy only the local platform/architecture is built.
# If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures
# will be built. Otherwise all supported platform/architectures are built
# The NOGOX environment variable will be used if present. This will prevent using gox and instead
# build with go install
if ! test -d "$1"
then
err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'"
return 1
fi
local sdir="$1"
local build_os="$2"
local build_arch="$3"
local extra_dir_name="$4"
local extra_dir=""
if test -n "${extra_dir_name}"
then
extra_dir="${extra_dir_name}/"
fi
pushd ${sdir} > /dev/null
if is_set "${CONSUL_DEV}"
then
if test -z "${XC_OS}"
then
XC_OS=$(go env GOOS)
fi
if test -z "${XC_ARCH}"
then
XC_ARCH=$(go env GOARCH)
fi
fi
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"}
if test -z "${build_os}"
then
build_os="${XC_OS}"
fi
if test -z "${build_arch}"
then
build_arch="${XC_ARCH}"
fi
local use_gox=1
is_set "${NOGOX}" && use_gox=0
which gox > /dev/null || use_gox=0
status_stage "==> Building Consul - OSes: ${build_os}, Architectures: ${build_arch}"
mkdir pkg.bin.new 2> /dev/null
if is_set "${use_gox}"
then
status "Using gox for concurrent compilation"
CGO_ENABLED=0 gox \
-os="${build_os}" \
-arch="${build_arch}" \
-osarch="!darwin/arm !darwin/arm64" \
-ldflags="${GOLDFLAGS}" \
-output "pkg.bin.new/${extra_dir}{{.OS}}_{{.Arch}}/consul" \
-tags="${GOTAGS}" \
.
if test $? -ne 0
then
err "ERROR: Failed to build Consul"
rm -r pkg.bin.new
return 1
fi
else
status "Building sequentially with go install"
for os in ${build_os}
do
for arch in ${build_arch}
do
outdir="pkg.bin.new/${extra_dir}${os}_${arch}"
osarch="${os}/${arch}"
if test "${osarch}" == "darwin/arm" -o "${osarch}" == "darwin/arm64" -o "${osarch}" == "freebsd/arm64" -o "${osarch}" == "windows/arm" -o "${osarch}" == "windows/arm64"
then
continue
fi
if test "${os}" == "solaris" -a "${arch}" != "amd64"
then
continue
fi
echo "---> ${osarch}"
mkdir -p "${outdir}"
GOBIN_EXTRA=""
if test "${os}" != "$(go env GOOS)" -o "${arch}" != "$(go env GOARCH)"
then
GOBIN_EXTRA="${os}_${arch}/"
fi
CGO_ENABLED=0 GOOS=${os} GOARCH=${arch} go install -ldflags "${GOLDFLAGS}" -tags "${GOTAGS}" && cp "${MAIN_GOPATH}/bin/${GOBIN_EXTRA}consul" "${outdir}/consul"
if test $? -ne 0
then
err "ERROR: Failed to build Consul for ${osarch}"
rm -r pkg.bin.new
return 1
fi
done
done
fi
build_consul_post "${sdir}" "${extra_dir_name}"
if test $? -ne 0
then
err "ERROR: Failed postprocessing Consul binaries"
return 1
fi
return 0
}
|
calgaryscientific/consul
|
build-support/functions/20-build.sh
|
Shell
|
mpl-2.0
| 13,723 |
#!/bin/bash
set -e
set -o pipefail
COVERAGE_FILES_PATTERN='./src/packages/*/coverage/lcov.info'
CC_COVERAGE_DIR='./coverage'
CC_REPORTER="${CC_COVERAGE_DIR}/cc-test-reporter"
### Check whether the upload should be run or not. ###
if [ "$TRAVIS_BRANCH" != 'master' ]; then
echo 'The build is not running on branch "master". Coverage report will not be uploaded.'
exit 0
fi
if [ "$TRAVIS_TEST_RESULT" != 0 ]; then
echo 'Tests failed. Coverage report will not be uploaded.'
exit 0
fi
if [ -z "${CC_TEST_REPORTER_ID}" ]; then
>&2 echo '"CC_TEST_REPORTER_ID" is not set, coverage report cannot be uploaded.'
exit 1
fi
# Fetch Code Climate reporter
if [ ! -f "$CC_REPORTER" ]; then
mkdir -p "$CC_COVERAGE_DIR"
curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > "$CC_REPORTER"
chmod +x "$CC_REPORTER"
fi
# Prepare
"$CC_REPORTER" before-build
# Format coverage files
find . -type f -wholename "$COVERAGE_FILES_PATTERN" -print0 |
while IFS= read -r -d '' file; do
src_package_name="$(echo "$file" | cut -d'/' -f 4)"
"$CC_REPORTER" format-coverage -t lcov -o "${CC_COVERAGE_DIR}/codeclimate.${src_package_name}.json" "$file"
coverage_files_count=$((coverage_files_count + 1))
echo "Formatted ${CC_COVERAGE_DIR}/codeclimate.${src_package_name}.json from ${file}."
done
# Merge all coverage parts
coverage_files_count="$(find . -type f -wholename "$COVERAGE_FILES_PATTERN" | wc -l)"
if [ "$coverage_files_count" != 0 ]; then
echo "Merging $coverage_files_count coverage files"
"$CC_REPORTER" sum-coverage "${CC_COVERAGE_DIR}"/codeclimate.*.json -p "$coverage_files_count"
else
>&2 echo 'No coverage files found to merge.'
exit 2
fi
# Upload report
"$CC_REPORTER" upload-coverage
# Cleanup
rm -r "$CC_COVERAGE_DIR"
|
MikkCZ/pontoon-tools
|
scripts/upload-coverage-to-codeclimate.sh
|
Shell
|
mpl-2.0
| 1,846 |
#!/bin/csh -f
#
# svn $Id: job_psas_sen.sh 709 2014-01-23 20:09:38Z arango $
#######################################################################
# Copyright (c) 2002-2014 The ROMS/TOMS Group #
# Licensed under a MIT/X style license #
# See License_ROMS.txt #
#######################################################################
# #
# Strong/Weak constraint 4D-PSAS observation impact or sensitivity #
# job script: #
# #
# This script NEEDS to be run before any run: #
# #
# (1) It copies a new clean nonlinear model initial conditions #
# file. The nonlinear model is initialized from the #
# background or reference state. #
# (2) It copies Lanczos vectors from previous 4D-PSAS run. They #
# are stored in 4D-Var data assimilation file. #
# (3) It copies the adjoint sensitivy functional file for the #
# observation impact or sensitivity. #
# (4) Specify model, initial conditions, boundary conditions, and #
# surface forcing error convariance input standard deviations #
# files. #
# (5) Specify model, initial conditions, boundary conditions, and #
# surface forcing error convariance input/output normalization #
# factors files. #
# (6) Copy a clean copy of the observations NetCDF file. #
# (7) Create 4D-Var input script "psas.in" from template and #
# specify the error covariance standard deviation, error #
# covariance normalization factors, and observation files to #
# be used. #
# #
#######################################################################
# Set path definition to one directory up in the tree.
set Dir=`dirname ${PWD}`
# Set string manipulations perl script.
set SUBSTITUTE=${ROMS_ROOT}/ROMS/Bin/substitute
# Copy nonlinear model initial conditions file.
cp -p ${Dir}/Data/wc13_ini.nc wc13_ini.nc
# Copy Lanczos vectors from previous 4D-PSAS run. They are stored
# in 4D-Var data assimilation file.
cp -p ${Dir}/PSAS/wc13_mod.nc wc13_lcz.nc
# Copy adjoint sensitivity functional.
cp -p ${Dir}/Data/wc13_ads.nc wc13_ads.nc
# Set model, initial conditions, boundary conditions and surface
# forcing error covariance standard deviations files.
set STDnameM=${Dir}/Data/wc13_std_m.nc
set STDnameI=${Dir}/Data/wc13_std_i.nc
set STDnameB=${Dir}/Data/wc13_std_b.nc
set STDnameF=${Dir}/Data/wc13_std_f.nc
# Set model, initial conditions, boundary conditions and surface
# forcing error covariance normalization factors files.
set NRMnameM=${Dir}/Data/wc13_nrm_m.nc
set NRMnameI=${Dir}/Data/wc13_nrm_i.nc
set NRMnameB=${Dir}/Data/wc13_nrm_b.nc
set NRMnameF=${Dir}/Data/wc13_nrm_f.nc
# Set observations file.
set OBSname=wc13_obs.nc
# Get a clean copy of the observation file. This is really
# important since this file is modified.
cp -p ${Dir}/Data/${OBSname} .
# Modify 4D-Var template input script and specify above files.
set PSAS=psas.in
if (-e $PSAS) then
/bin/rm $PSAS
endif
cp s4dvar.in $PSAS
$SUBSTITUTE $PSAS ocean_std_m.nc $STDnameM
$SUBSTITUTE $PSAS ocean_std_i.nc $STDnameI
$SUBSTITUTE $PSAS ocean_std_b.nc $STDnameB
$SUBSTITUTE $PSAS ocean_std_f.nc $STDnameF
$SUBSTITUTE $PSAS ocean_nrm_m.nc $NRMnameM
$SUBSTITUTE $PSAS ocean_nrm_i.nc $NRMnameI
$SUBSTITUTE $PSAS ocean_nrm_b.nc $NRMnameB
$SUBSTITUTE $PSAS ocean_nrm_f.nc $NRMnameF
$SUBSTITUTE $PSAS ocean_obs.nc $OBSname
$SUBSTITUTE $PSAS ocean_hss.nc wc13_hss.nc
$SUBSTITUTE $PSAS ocean_lcz.nc wc13_lcz.nc
$SUBSTITUTE $PSAS ocean_mod.nc wc13_mod.nc
$SUBSTITUTE $PSAS ocean_err.nc wc13_err.nc
|
theelectricbrain/ROMS-Tidal-Array
|
ROMS/Bin/job_psas_sen.sh
|
Shell
|
agpl-3.0
| 4,275 |
#!/usr/bin/env bash
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Please refer to the README for information about making permanent changes. #
################################################################################
# Use directory of current script as the build directory and working directory
cd "$( dirname "${BASH_SOURCE[0]}" )"
ANDROID_BUILD_DIR="$(pwd)"
# Get access to android_build functions and variables
source ${ANDROID_BUILD_DIR}/android_build_helper.sh
# Choose a C++ standard library implementation from the ndk
ANDROID_BUILD_CXXSTL="gnustl_shared_48"
# Set up android build environment and set ANDROID_BUILD_OPTS array
android_build_env
android_build_opts
# Use a temporary build directory
cache="/tmp/android_build/${TOOLCHAIN_NAME}"
mkdir -p "${cache}"
##
# Make sure zyre is built and copy the prefix
(android_build_verify_so "libzyre.so" &> /dev/null) || {
# Use a default value assuming the zyre project sits alongside this one
test -z "$ZYRE_ROOT" && ZYRE_ROOT="$(cd ../../../zyre && pwd)"
if [ ! -d "$ZYRE_ROOT" ]; then
echo "The ZYRE_ROOT directory does not exist"
echo " ${ZYRE_ROOT}"
exit 1
fi
(${ZYRE_ROOT}/builds/qt-android/build.sh) || exit 1
UPSTREAM_PREFIX=${ZYRE_ROOT}/builds/qt-android/prefix/${TOOLCHAIN_NAME}
cp -r ${UPSTREAM_PREFIX}/* ${ANDROID_BUILD_PREFIX}
}
##
# Build drops from local source
(android_build_verify_so "libdrops.so" "libzyre.so" &> /dev/null) || {
rm -rf "${cache}/drops"
(cp -r ../.. "${cache}/drops" && cd "${cache}/drops" \
&& make clean && rm configure config.status)
rm
export LIBTOOL_EXTRA_LDFLAGS='-avoid-version'
(cd "${cache}/drops" && ./autogen.sh \
&& ./configure "${ANDROID_BUILD_OPTS[@]}" \
&& make \
&& make install) || exit 1
}
##
# Verify shared libraries in prefix
android_build_verify_so "libzmq.so"
android_build_verify_so "libczmq.so"
android_build_verify_so "libzyre.so"
android_build_verify_so "libdrops.so" "libzyre.so"
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Please refer to the README for information about making permanent changes. #
################################################################################
|
edgenet/drops
|
builds/qt-android/build.sh
|
Shell
|
lgpl-3.0
| 2,487 |
export VIRTUAL_ENV_DISABLE_PROMPT=yes
infoline-virtualenv() {
if [ -n "$VIRTUAL_ENV" ]; then
print -n -- ${VIRTUAL_ENV:t}
fi
}
infoline_left+=infoline-virtualenv
|
hevi9/infoline-zsh-theme
|
infoline-virtualenv.zsh
|
Shell
|
lgpl-3.0
| 171 |
#!/bin/bash
set -eux
file=${1:-"instackenv.json"}
jq '.["nodes"][] | "sudo vbmc add --username " + ."pm_user" + " --password " + ."pm_password" + " --port " + ."pm_port" + " --address " + ."pm_addr" + " --libvirt-uri \"qemu+ssh://root@" + "192.168.23.1" + "/system?&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1\" " + (."name"|sub("-";"_"))' $file -r
echo
jq '.["nodes"][] | "vbmc add --username " + ."pm_user" + " --password " + ."pm_password" + " --port " + ."pm_port" + " --address " + ."pm_addr" + " --libvirt-uri \"qemu:///session\" " + (."name"|sub("-";"_"))' $file -r
set +x
for node in $(sudo vbmc list | grep down | awk '{print $2}'); do
echo sudo vbmc start $node
done
|
slagle/tripleo
|
scripts/instackenv-to-vbmc.sh
|
Shell
|
apache-2.0
| 699 |
#!/bin/bash
#
# Simple script to enable maintenance-login before sealing up the template
#
######################################################################
PROGNAME="$( basename "${0}" )"
CHROOT="${CHROOT:-/mnt/ec2-root}"
ROOTPWSTRING="${PWSTRING:-UNDEF}"
MAINTUSER="${MAINTUSER:-root}"
# Error handler function
function err_exit {
local ERRSTR
local ISNUM
local SCRIPTEXIT
ERRSTR="${1}"
ISNUM='^[0-9]+$'
SCRIPTEXIT="${2:-1}"
if [[ ${DEBUG} == true ]]
then
# Our output channels
logger -i -t "${PROGNAME}" -p kern.crit -s -- "${ERRSTR}"
else
logger -i -t "${PROGNAME}" -p kern.crit -- "${ERRSTR}"
fi
# Only exit if requested exit is numerical
if [[ ${SCRIPTEXIT} =~ ${ISNUM} ]]
then
exit "${SCRIPTEXIT}"
fi
}
# Print out a basic usage message
function UsageMsg {
local SCRIPTEXIT
local PART
SCRIPTEXIT="${1:-1}"
(
echo "Usage: ${0} [GNU long option] [option] ..."
echo " Options:"
printf '\t%-4s%s\n' '-h' 'Print this message'
printf '\t%-4s%s\n' '-m' 'Template maintenance user (default: "root")'
printf '\t%-4s%s\n' '-p' 'Password to assign to template maintenance user'
printf '\t%-6s%s\n' '' 'Default layout:'
for PART in ${DEFGEOMARR[*]}
do
printf '\t%-8s%s\n' '' "${PART}"
done
echo " GNU long options:"
printf '\t%-20s%s\n' '--help' 'See "-h" short-option'
printf '\t%-20s%s\n' '--maintuser' 'See "-m" short-option'
printf '\t%-20s%s\n' '--password' 'See "-p" short-option'
)
exit "${SCRIPTEXIT}"
}
function SetPassString {
local PROTECTPWLOGGING
# Suppress trace-logging (if tracing is set)
if [[ $- =~ "x" ]]
then
PROTECTPWLOGGING=TRUE
set +x
fi
# Set password for the selected user
printf "Setting password for %s... " "${MAINTUSER}"
echo "${ROOTPWSTRING}" | chroot "${CHROOT}" /bin/passwd --stdin "${MAINTUSER}" || \
err_exit "Failed setting password for ${MAINTUSER}" 1
echo "Success"
# Probably superfluous w/in a function...
if [[ ${PROTECTPWLOGGING:-} == "TRUE" ]]
then
set -x
fi
}
function AllowRootSsh {
local SSHDCFGFILE
local CFGITEM
SSHDCFGFILE="${CHROOT}/etc/ssh/sshd_config"
CFGITEM="PermitRootLogin"
printf "Allow remote-login for root... "
if [[ $( grep -q "^${CFGITEM}" "${SSHDCFGFILE}" )$? -eq 0 ]]
then
sed -i "/^${CFGITEM}/s/[ ][ ]*.*$/ yes/" "${SSHDCFGFILE}" || \
err_exit "Failed changing ${CFGITEM} value in ${SSHDCFGFILE}" 1
echo "Change ${CFGITEM} value in ${SSHDCFGFILE}"
else
echo "PermitRootLogin yes" > "${SSHDCFGFILE}" || \
err_exit "Failed adding ${CFGITEM} to ${SSHDCFGFILE}" 1
echo "Added ${CFGITEM} to ${SSHDCFGFILE}"
fi
}
function EnableProvUser {
# Create maintenance user
printf 'Creating %s in chroot [%s]... ' "${MAINTUSER}" "${CHROOT}"
chroot "${CHROOT}" useradd -c "Maintenance User Account" -m \
-s /bin/bash "${MAINTUSER}" || err_exit "Failed creating ${MAINTUSER}" 1
echo "Success!"
# Apply SELinux context to maintenance user
printf 'Setting SELinux context on %s in chroot [%s]... ' "${MAINTUSER}" "${CHROOT}"
chroot "${CHROOT}" semanage login -a -s unconfined_u "${MAINTUSER}" || \
err_exit "Failed setting SELinux context for ${MAINTUSER}" 1
echo "Success!"
# Give maintenance user privileges
printf 'Adding %s to sudoers... ' "${MAINTUSER}"
printf '%s\tALL=(ALL)\tNOPASSWD:ALL\n' "${MAINTUSER}" > \
"${CHROOT}/etc/sudoers.d/user_${MAINTUSER}" || \
err_exit "Failed adding ${MAINTUSER} to sudoers" 1
echo "Success!"
# Set password
SetPassString
}
######################
## Main program-flow
######################
OPTIONBUFR=$(getopt -o hm:p: --long help,maintuser:,password: -n "${PROGNAME}" -- "$@")
eval set -- "${OPTIONBUFR}"
###################################
# Parse contents of ${OPTIONBUFR}
###################################
while true
do
case "$1" in
-h|--help)
UsageMsg 0
;;
-m|--maintuser)
case "$2" in
"")
err_exit "Error: option required but not specified" 1
shift 2;
exit 1
;;
*)
MAINTUSER=${2}
shift 2;
;;
esac
;;
-p|--password)
case "$2" in
"")
err_exit "Error: option required but not specified" 1
shift 2;
exit 1
;;
*)
ROOTPWSTRING=${2}
shift 2;
;;
esac
;;
--)
shift
break
;;
*)
err_exit "Internal error!"
exit 1
;;
esac
done
# Exit if password not passed
if [[ ${ROOTPWSTRING} == UNDEF ]]
then
err_exit "No password string passed to script. ABORTING!" 1
fi
# Configure for direct-root or sudo-to-root
if [[ ${MAINTUSER} == root ]]
then
# Set root's password
SetPassString
# Set up SSH to allow direct-root
AllowRootSsh
else
EnableProvUser
fi
|
ferricoxide/AMIgen7
|
SetRootPW.sh
|
Shell
|
apache-2.0
| 5,161 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Bolts/Bolts.framework"
install_framework "$BUILT_PRODUCTS_DIR/Firebase/Firebase.framework"
install_framework "$BUILT_PRODUCTS_DIR/JSMessagesViewController/JSMessagesViewController.framework"
install_framework "$BUILT_PRODUCTS_DIR/JSQSystemSoundPlayer/JSQSystemSoundPlayer.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Parse/Parse.framework"
install_framework "$BUILT_PRODUCTS_DIR/PhoneNumberKit/PhoneNumberKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftHTTP/SwiftHTTP.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftQRCode/SwiftQRCode.framework"
install_framework "$BUILT_PRODUCTS_DIR/TPKeyboardAvoiding/TPKeyboardAvoiding.framework"
install_framework "$BUILT_PRODUCTS_DIR/VK-ios-sdk/VK_ios_sdk.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Bolts/Bolts.framework"
install_framework "$BUILT_PRODUCTS_DIR/Firebase/Firebase.framework"
install_framework "$BUILT_PRODUCTS_DIR/JSMessagesViewController/JSMessagesViewController.framework"
install_framework "$BUILT_PRODUCTS_DIR/JSQSystemSoundPlayer/JSQSystemSoundPlayer.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Parse/Parse.framework"
install_framework "$BUILT_PRODUCTS_DIR/PhoneNumberKit/PhoneNumberKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftHTTP/SwiftHTTP.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftQRCode/SwiftQRCode.framework"
install_framework "$BUILT_PRODUCTS_DIR/TPKeyboardAvoiding/TPKeyboardAvoiding.framework"
install_framework "$BUILT_PRODUCTS_DIR/VK-ios-sdk/VK_ios_sdk.framework"
fi
|
groschovskiy/lerigos_music
|
Mobile/iOS/Beta/Pods/Target Support Files/Pods-Lerigos Music/Pods-Lerigos Music-frameworks.sh
|
Shell
|
apache-2.0
| 5,337 |
# Common routines
# TODO: get token from target peer more elegantly
# peername -> orgname -> orgtoken
get_token() {
if [ "$#" -eq 1 ] ; then
local peer=$1
if [[ "$peer" =~ org1 ]] ; then
find $CREDENTIAL_STORE_DIR/ -type f | grep -i org1 | head -n1 | xargs cat
elif [[ "$peer" =~ org2 ]] ; then
find $CREDENTIAL_STORE_DIR/ -type f | grep -i org2 | head -n1 | xargs cat
else
echo "Failed to get token for $peer" >&2
exit 1
fi
else
local username=$1
local orgname=$2
if [ -s $CREDENTIAL_STORE_DIR-$orgname/token.$username ] ; then
cat $CREDENTIAL_STORE_DIR-$orgname/token.$username
else
echo "Token not found" >&2
return 1
fi
fi
}
request_token() {
local username=$1
local orgname=$2
if [ -s $CREDENTIAL_STORE_DIR-$orgname/tmp.$username ] ; then
echo "token ' $CREDENTIAL_STORE_DIR-$orgname/tmp.$username' already exists."
else
echo requestToken "$username" "$orgname"
requestToken "$username" "$orgname"
echo ''
fi
}
get_rand() {
od -An -N1 -i /dev/random | tr -d ' '
}
mkdir_if_not_exist() {
local dir=$1
[ ! -d "$dir" ] && mkdir -p $dir
}
# API routines
requestToken() {
local username=$1
local orgname=$2
curl -s -X POST \
http://$HOST:$PORT/users \
-H "content-type: application/x-www-form-urlencoded" \
-d "username=${username}&orgName=${orgname}"
}
createChannel() {
local channelname=$1
local channelconfigpath=$2
local token=$3
curl -s -X POST \
http://$HOST:$PORT/channels \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json" \
-d "{
\"channelName\":\"$channelname\",
\"channelConfigPath\":\"$channelconfigpath\"
}"
echo
}
joinChannel() {
local channel=$1
local peers="$2"
local token=$3
curl -s -X POST \
http://$HOST:$PORT/channels/${channel}/peers \
-H "authorization: Bearer $token" \
-H "content-type: application/json" \
-d "{
\"peers\": $peers
}"
echo
}
installChaincode() {
local token=$1
local peers="$2" # TODO: format?
local chaincodename=$3
local chaincodepath=$4
local chaincodetype=$5
local chaincodeversion=$6
curl -s -X POST \
http://$HOST:$PORT/chaincodes \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json" \
-d "{
\"peers\": $peers,
\"chaincodeName\":\"$chaincodename\",
\"chaincodePath\":\"$chaincodepath\",
\"chaincodeType\": \"$chaincodetype\",
\"chaincodeVersion\":\"$chaincodeversion\"
}"
echo ""
}
instantiateChaincode() {
local token=$1
local channel=$2
local peers="$3"
local chaincodename=$4
local chaincodetype=$5
local chaincodeversion=$6
local args="$7" # TODO: format?
curl -s -X POST \
http://$HOST:$PORT/channels/${channel}/chaincodes \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json" \
-d "{
\"peers\": $peers,
\"chaincodeName\":\"$chaincodename\",
\"chaincodeVersion\":\"$chaincodeversion\",
\"chaincodeType\": \"$chaincodetype\",
\"args\": $args
}"
echo
}
invokeChaincode() {
local token=$1
local channel=$2
local ccid=$3
local peers="$4" # TODO: format?
local fcn=$5
local args="$6" # TODO: format?
local newif="$7" # using submitTransaction() if true
local peers_str=
if [ "$peers" ] ; then
peers_str="\"peers\": $peers,"
fi
local newif_str=
if [ "$newif" ] ; then
newif_str="\"newif\": true,"
fi
curl -s -X POST \
http://$HOST:$PORT/channels/${channel}/chaincodes/${ccid} \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer ${token}" \
-H "content-type: application/json" \
-d "{
$peers_str $newif_str
\"fcn\": \"$fcn\",
\"args\": $args
}"
echo
}
queryChaincode() {
local channel=$1
local peer=$2
local token=$3
local ccid=$4
local arg=$5
curl -s -G --data-urlencode "args=[\"$arg\"]" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/x-www-form-urlencoded" \
"http://$HOST:$PORT/channels/${channel}/chaincodes/${ccid}?peer=${peer}&fcn=query"
echo
}
queryBlockByNumber() {
local channel=$1
local peer=$2
local token=$3
local number=$4
curl -s -X GET \
"http://$HOST:$PORT/channels/${channel}/blocks/${number}?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
queryBlocks() {
local channel=$1
local peer=$2
local token=$3
curl -s -X GET \
"http://$HOST:$PORT/channels/${channel}/blocks?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
queryTransactionByTxID() {
local channel=$1
local peer=$2
local token=$3
local txid=$4
curl -s -X GET http://$HOST:$PORT/channels/${channel}/transactions/${txid}?peer=${peer} \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
queryChannels() {
local peer=$1
local token=$2
curl -s -X GET \
"http://$HOST:$PORT/channels?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
queryChainInfo() {
local channel=$1
local peer=$2
local token=$3
curl -s -X GET \
"http://$HOST:$PORT/channels/${channel}?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
# Why no argument for channel?
queryInstalledChaincodes() {
local channel=$1
local peer=$2
local token=$3
curl -s -X GET \
"http://$HOST:$PORT/chaincodes?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
queryInstantiatedChaincodes() {
local channel=$1
local peer=$2
local token=$3
curl -s -X GET \
"http://$HOST:$PORT/channels/${channel}/chaincodes?peer=${peer}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
filter_raw_data() {
local file=$1
if [ "$file" ] ; then
# cat $file | ruby -ne 'puts $_.gsub(/\[\d+,[\d,]+\]/, "\"[...]\"")'
cat $file | ruby -ne 'puts $_.gsub(/(\[\d+,\d+,[\d,]+\])/) {|num| "\"" + num[1..-2].split(",").map {|x| "%02x" % x.to_i}.join + "\""}'
else
while read line ; do
echo $line | ruby -ne 'puts $_.gsub(/(\[\d+,\d+,[\d,]+\])/) {|num| "\"" + num[1..-2].split(",").map {|x| "%02x" % x.to_i}.join + "\""}'
done
fi
}
setup_directory() {
local config=$1
RUN_DIR=$(jq ".run_dir" $config | tr -d \")
RUN_DIR_OUTSIDE_DOCKER=$(jq ".run_dir_outside_docker" $config | tr -d \")
if [ "$RUN_DIR_OUTSIDE_DOCKER" != null ] ; then
echo "We have RUN_DIR_OUTSIDE_DOCKER ($RUN_DIR_OUTSIDE_DOCKER) setting ..."
RUN_DIR=$RUN_DIR_OUTSIDE_DOCKER
fi
BC_DATA_DIR=$(jq ".bc_data_dir" $config | tr -d \")
BC_DATA_DIR_OUTSIDE_DOCKER=$(jq ".bc_data_dir_outside_docker" $config | tr -d \")
if [ "$BC_DATA_DIR_OUTSIDE_DOCKER" != null ] ; then
echo "We have BC_DATA_DIR_OUTSIDE_DOCKER ($BC_DATA_DIR_OUTSIDE_DOCKER) setting ..."
BC_DATA_DIR=$BC_DATA_DIR_OUTSIDE_DOCKER
fi
CREDENTIAL_STORE_DIR=$(jq ".credential_store_dir" $config | tr -d \")
CREDENTIAL_STORE_DIR_OUTSIDE_DOCKER=$(jq ".credential_store_dir_outside_docker" $config | tr -d \")
if [ "$CREDENTIAL_STORE_DIR_OUTSIDE_DOCKER" != null ] ; then
CREDENTIAL_STORE_DIR=$CREDENTIAL_STORE_DIR_OUTSIDE_DOCKER
fi
SCRIPT_DIR=$(dirname $BASH_SOURCE)
echo RUN_DIR: $RUN_DIR
echo BC_DATA_DIR: $BC_DATA_DIR
echo SCRIPT_DIR: $SCRIPT_DIR
echo CREDENTIAL_STORE_DIR: $CREDENTIAL_STORE_DIR
mkdir_if_not_exist "$RUN_DIR"
mkdir_if_not_exist "$BC_DATA_DIR"
mkdir_if_not_exist "$RUN_DIR/channels"
}
getIndexPage() {
curl -s -X GET \
"http://$HOST:$PORT/summary" \
-H "Accept:$ACCEPT" \
-H "content-type: application/json"
echo
}
getSummaryPage() {
local token=$1
curl -s -X GET \
"http://$HOST:$PORT/summary" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
# need service discovery support
getServices() {
local token=$1
local channel=$2
local peer=$3
local cc=$4
local config=$5
local local=$6
curl -s -X GET \
"http://$HOST:$PORT/services?channel=${channel}&peer=${peer}&cc=${cc}&config=${config}&local=${local}" \
-H "Accept:$ACCEPT" \
-H "authorization: Bearer $token" \
-H "content-type: application/json"
echo
}
jq --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Please Install 'jq' https://stedolan.github.io/jq/ to execute this script"
echo
exit 1
fi
|
Naoya-Horiguchi/hl-tools
|
dev_app/scripts/common.sh
|
Shell
|
apache-2.0
| 8,504 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 6+ Debian 8+ and Ubuntu 14+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
Install_Percona55() {
pushd ${oneinstack_dir}/src > /dev/null
id -u mysql >/dev/null 2>&1
[ $? -ne 0 ] && useradd -M -s /sbin/nologin mysql
[ ! -d "${percona_install_dir}" ] && mkdir -p ${percona_install_dir}
mkdir -p ${percona_data_dir};chown mysql.mysql -R ${percona_data_dir}
if [ "${dbinstallmethod}" == "1" ]; then
perconaVerStr1=$(echo ${percona55_ver} | sed "s@-@-rel@")
tar xzf ./Percona-Server-${perconaVerStr1}-Linux.${SYS_BIT_b}.${sslLibVer}.tar.gz
mv Percona-Server-${perconaVerStr1}-Linux.${SYS_BIT_b}.${sslLibVer}/* ${percona_install_dir}
sed -i 's@executing mysqld_safe@executing mysqld_safe\nexport LD_PRELOAD=/usr/local/lib/libjemalloc.so@' ${percona_install_dir}/bin/mysqld_safe
sed -i "s@/usr/local/Percona-Server-${perconaVerStr1}-Linux.${SYS_BIT_b}.${sslLibVer}@${percona_install_dir}@g" ${percona_install_dir}/bin/mysqld_safe
elif [ "${dbinstallmethod}" == "2" ]; then
tar xzf percona-server-${percona55_ver}.tar.gz
pushd percona-server-${percona55_ver}
[ "${armplatform}" == "y" ] && patch -p1 < ../mysql-5.5-fix-arm-client_plugin.patch
cmake . -DCMAKE_INSTALL_PREFIX=${percona_install_dir} \
-DMYSQL_DATADIR=${percona_data_dir} \
-DSYSCONFDIR=/etc \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DENABLE_DTRACE=0 \
-DENABLED_LOCAL_INFILE=1 \
-DDEFAULT_CHARSET=utf8mb4 \
-DDEFAULT_COLLATION=utf8mb4_general_ci \
-DEXTRA_CHARSETS=all \
-DCMAKE_EXE_LINKER_FLAGS='-ljemalloc'
make -j ${THREAD}
make install
popd
fi
if [ -d "${percona_install_dir}/support-files" ]; then
sed -i "s+^dbrootpwd.*+dbrootpwd='${dbrootpwd}'+" ../options.conf
echo "${CSUCCESS}Percona installed successfully! ${CEND}"
if [ "${dbinstallmethod}" == "1" ]; then
rm -rf Percona-Server-${perconaVerStr1}-Linux.${SYS_BIT_b}.${sslLibVer}
elif [ "${dbinstallmethod}" == "2" ]; then
rm -rf percona-server-${percona55_ver}
fi
else
rm -rf ${percona_install_dir}
echo "${CFAILURE}Percona install failed, Please contact the author! ${CEND}" && lsb_release -a
kill -9 $$
fi
/bin/cp ${percona_install_dir}/support-files/mysql.server /etc/init.d/mysqld
sed -i "s@^basedir=.*@basedir=${percona_install_dir}@" /etc/init.d/mysqld
sed -i "s@^datadir=.*@datadir=${percona_data_dir}@" /etc/init.d/mysqld
chmod +x /etc/init.d/mysqld
[ "${PM}" == 'yum' ] && { chkconfig --add mysqld; chkconfig mysqld on; }
[ "${PM}" == 'apt-get' ] && update-rc.d mysqld defaults
popd
# my.cnf
cat > /etc/my.cnf << EOF
[client]
port = 3306
socket = /tmp/mysql.sock
[mysql]
prompt="Percona [\\d]> "
no-auto-rehash
[mysqld]
port = 3306
socket = /tmp/mysql.sock
basedir = ${percona_install_dir}
datadir = ${percona_data_dir}
pid-file = ${percona_data_dir}/mysql.pid
user = mysql
bind-address = 0.0.0.0
server-id = 1
init-connect = 'SET NAMES utf8mb4'
character-set-server = utf8mb4
skip-name-resolve
#skip-networking
back_log = 300
max_connections = 1000
max_connect_errors = 6000
open_files_limit = 65535
table_open_cache = 128
max_allowed_packet = 500M
binlog_cache_size = 1M
max_heap_table_size = 8M
tmp_table_size = 16M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
sort_buffer_size = 8M
join_buffer_size = 8M
key_buffer_size = 4M
thread_cache_size = 8
query_cache_type = 1
query_cache_size = 8M
query_cache_limit = 2M
ft_min_word_len = 4
log_bin = mysql-bin
binlog_format = mixed
expire_logs_days = 7
log_error = ${percona_data_dir}/mysql-error.log
slow_query_log = 1
long_query_time = 1
slow_query_log_file = ${percona_data_dir}/mysql-slow.log
performance_schema = 0
#lower_case_table_names = 1
skip-external-locking
default_storage_engine = InnoDB
innodb_file_per_table = 1
innodb_open_files = 500
innodb_buffer_pool_size = 64M
innodb_write_io_threads = 4
innodb_read_io_threads = 4
innodb_thread_concurrency = 0
innodb_purge_threads = 1
innodb_flush_log_at_trx_commit = 2
innodb_log_buffer_size = 2M
innodb_log_file_size = 32M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 90
innodb_lock_wait_timeout = 120
bulk_insert_buffer_size = 8M
myisam_sort_buffer_size = 8M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
interactive_timeout = 28800
wait_timeout = 28800
[mysqldump]
quick
max_allowed_packet = 500M
[myisamchk]
key_buffer_size = 8M
sort_buffer_size = 8M
read_buffer = 4M
write_buffer = 4M
EOF
sed -i "s@max_connections.*@max_connections = $((${Mem}/3))@" /etc/my.cnf
if [ ${Mem} -gt 1500 -a ${Mem} -le 2500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 16@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 16M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 128M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 32M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 256@' /etc/my.cnf
elif [ ${Mem} -gt 2500 -a ${Mem} -le 3500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 32@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 32M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 32M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 512M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 64M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 512@' /etc/my.cnf
elif [ ${Mem} -gt 3500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 64@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 64M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 256M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 1024M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 128M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 1024@' /etc/my.cnf
fi
${percona_install_dir}/scripts/mysql_install_db --user=mysql --basedir=${percona_install_dir} --datadir=${percona_data_dir}
[ "${Wsl}" == true ] && chmod 600 /etc/my.cnf
chown mysql.mysql -R ${percona_data_dir}
[ -d "/etc/mysql" ] && /bin/mv /etc/mysql{,_bk}
service mysqld start
[ -z "$(grep ^'export PATH=' /etc/profile)" ] && echo "export PATH=${percona_install_dir}/bin:\$PATH" >> /etc/profile
[ -n "$(grep ^'export PATH=' /etc/profile)" -a -z "$(grep ${percona_install_dir} /etc/profile)" ] && sed -i "s@^export PATH=\(.*\)@export PATH=${percona_install_dir}/bin:\1@" /etc/profile
. /etc/profile
${percona_install_dir}/bin/mysql -e "grant all privileges on *.* to root@'127.0.0.1' identified by \"${dbrootpwd}\" with grant option;"
${percona_install_dir}/bin/mysql -e "grant all privileges on *.* to root@'localhost' identified by \"${dbrootpwd}\" with grant option;"
${percona_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "delete from mysql.user where Password='';"
${percona_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "delete from mysql.db where User='';"
${percona_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "delete from mysql.proxies_priv where Host!='localhost';"
${percona_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "drop database test;"
${percona_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "reset master;"
rm -rf /etc/ld.so.conf.d/{mysql,mariadb,percona,alisql}*.conf
echo "${percona_install_dir}/lib" > /etc/ld.so.conf.d/z-percona.conf
ldconfig
service mysqld stop
}
|
lj2007331/oneinstack
|
include/percona-5.5.sh
|
Shell
|
apache-2.0
| 8,168 |
java -cp target/uber-SimpleCounter-1.0-SNAPSHOT.jar com.shapira.examples.producer.simplecounter.SimpleCounter localhost:9092 v1 new async 500 10
|
gwenshap/kafka-examples
|
SimpleCounter/run.sh
|
Shell
|
apache-2.0
| 145 |
#!/usr/bin/env bash
# Just in case
if ! yum list installed | grep puppet; then
sudo rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-6.noarch.rpm
sudo yum install -y puppet
fi
module_name="bi_java"
#
# Make local modules dir look like a real install
#
puppet_dirs="manifests templates lib data files"
if [ ! -d "/vagrant/modules/$module_name" ]; then
mkdir -p /vagrant/modules/$module_name
fi
for d in $puppet_dirs; do
if [ -d "/vagrant/$d" ]; then
if [ ! -h "/vagrant/modules/$module_name/$d" ]; then
echo "Linking /vagrant/modules/$module_name/$d -> /vagrant/$d"
ln -s /vagrant/$d /vagrant/modules/$module_name/$d
fi
fi
done
|
boundedinfinity/bi-puppet-java
|
scripts/provision.bash
|
Shell
|
apache-2.0
| 700 |
git clone https://github.com/Alexander-Minyushkin/aistreamer.git
cd aistreamer/
cd worker/
sudo apt-get update
sudo apt-get install python-pip
sudo pip install --upgrade pip
sudo pip install google-cloud<0.34.0
sudo pip install luigi
sudo pip install -r requirements.txt
python main.py pubsub_pull
|
Alexander-Minyushkin/aistreamer
|
worker/startup-script.sh
|
Shell
|
apache-2.0
| 298 |
#!/bin/bash
# Strict mode
set -euo pipefail
IFS=$'\n\t'
# Create a self signed certificate for the user if one doesn't exist
if [ ! -f $PEM_FILE ]; then
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $PEM_FILE -out $PEM_FILE \
-subj "/C=XX/ST=XX/L=XX/O=dockergenerated/CN=dockergenerated"
fi
# Create the hash to pass to the IPython notebook, but don't export it so it doesn't appear
# as an environment variable within IPython kernels themselves
HASH=$(python -c "from IPython.lib import passwd; print(passwd('${PASSWORD}'))")
unset PASSWORD
CERTFILE_OPTION="--certfile=$PEM_FILE"
if [ $USE_HTTP -ne 0 ]; then
CERTFILE_OPTION=""
fi
ipython notebook --no-browser --port 8888 --ip=* $CERTFILE_OPTION --NotebookApp.password="$HASH" --matplotlib=inline
|
rethore/docker_openmdao
|
0.12.0/notebook.sh
|
Shell
|
apache-2.0
| 777 |
#!/bin/bash
#
# Copyright 2017 Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPTDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# only ask if in interactive mode
if [[ -t 0 ]]; then
echo -n "namespace ? [default] "
read NAMESPACE
fi
if [[ -z ${NAMESPACE} ]]; then
NAMESPACE=default
fi
echo "using NAMESPACE=${NAMESPACE}"
for rule in $(istioctl get -n ${NAMESPACE} routerules); do
istioctl delete -n ${NAMESPACE} routerule $rule
done
#istioctl delete mixer-rule ratings-ratelimit
export OUTPUT=$(mktemp)
echo "Application cleanup may take up to one minute"
kubectl delete -n ${NAMESPACE} -f $SCRIPTDIR/bookinfo.yaml >${OUTPUT} 2>&1
ret=$?
function cleanup() {
rm -f ${OUTPUT}
}
trap cleanup EXIT
if [[ ${ret} -eq 0 ]]; then
cat ${OUTPUT}
else
# ignore NotFound errors
OUT2=$(grep -v NotFound ${OUTPUT})
if [[ ! -z ${OUT2} ]]; then
cat ${OUTPUT}
exit ${ret}
fi
fi
echo "Application cleanup successful"
|
controlplaneio/theseus
|
istio-0.2.4/samples/bookinfo/kube/cleanup.sh
|
Shell
|
apache-2.0
| 1,474 |
#!/bin/bash
# Get configuration and endpoint
# Get configuration and extraction type
while getopts c:e:r: option
do
case "${option}"
in
c) conf_name=${OPTARG};;
e) endpoint=${OPTARG};;
r) repo_path=${OPTARG};;
esac
done
if [ ${conf_name+x} ]; then
echo "conf_name: "${conf_name}
else
echo "conf_name not set. Use -c to set conf_name please."
exit -1
fi
if [ ${repo_path+x} ]; then
echo "repo_path: "${repo_path}
else
echo "repo_path not set. Use -r to set repo_path please."
exit -1
fi
if [ ${endpoint+x} ]; then
echo "endpoint: "${endpoint}
else
echo "endpoint not set. Use -e to set endpoint please."
exit -1
fi
cd ${repo_path}
#mkdir "./logs"
# Initialize path environment variables
source ~/.bashrc
# Make sure path are consistent by moving to repo root.
repo_path=$(git rev-parse --show-toplevel)
cd ${repo_path}
# Start and keep API alive
cmd="python ./www/run_search_api.py"
args=" -c ./conf/generated/conf_search_"${conf_name}".json -e "${endpoint}
#log="./logs/log_searchapi_"${endpoint}
#bash ./scripts/keep_alive_process.sh --cmd="${cmd}" --args="${args}" --log="${log}"
bash ./scripts/keep_alive_process.sh --cmd="${cmd}" --args="${args}"
echo "Search process failed. Restarting docker container..."
exit 1
|
ColumbiaDVMM/ColumbiaImageSearch
|
scripts/run_search.sh
|
Shell
|
apache-2.0
| 1,257 |
#!/bin/bash
# Submit request to launch docker container
curl -X POST -H "Content-Type: application/json" mesos.appsoma.com:8080/v2/apps -d '
{
"id": "/simple-webpage",
"instances": 3,
"cpus": 0.5,
"mem": 256,
"ports": [
31111
],
"healthChecks": [
{
"path": "/",
"protocol": "HTTP",
"portIndex": 0,
"gracePeriodSeconds": 300,
"intervalSeconds": 10,
"timeoutSeconds": 20,
"maxConsecutiveFailures": 3
}
],
"uris": [ "file:///home/oink54321/data/haproxy/simple_webpage.js" ],
"cmd": "/home/oink54321/.nvm/v0.10.31/bin/npm install express process request && /home/oink54321/.nvm/v0.10.31/bin/node simple_webpage.js"
}'
|
appsoma/devops
|
webpage_launcher.bash
|
Shell
|
apache-2.0
| 651 |
#!/bin/bash
#
# Copyright 2017 Goldman Sachs.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
## Variable Setup
DB2_VERSION=$1
# These props are also defined in docker-db2-creds.yaml. Copy this from there
INSTANCE_PORT=50000
INSTANCE_DBNAME="dbdeploy"
INSTANCE_SCHEMAS="dbdeploy01 dbdeploy02 dbdeploy03"
INSTANCE_USERID="db2inst1" # note - this user ID is hardcoded by the container
INSTANCE_PASSWORD="db2inst1-pwd"
CONTAINER_NAME=obevo-db2-instance
## Now start the setup
docker pull ibmcom/db2express-c
OLD_CONTAINER_ID=$(docker ps -aqf "name=$CONTAINER_NAME")
if [ ! -z "$OLD_CONTAINER_ID" ]
then
echo "Shutting down old container"
docker stop $OLD_CONTAINER_ID
docker rm $OLD_CONTAINER_ID
fi
echo "Starting new container"
docker run --name $CONTAINER_NAME -d -i -t -p $INSTANCE_PORT:$INSTANCE_PORT -e DB2INST1_PASSWORD=$INSTANCE_PASSWORD -e LICENSE=accept ibmcom/db2express-c:10.5.0.5-3.10.0 db2start
export CONTAINER_ID=$(docker ps -aqf "name=$CONTAINER_NAME")
echo "Creating the database (may take a few seconds)"
docker exec $CONTAINER_ID bash -c "su - $INSTANCE_USERID -c 'db2 create db $INSTANCE_DBNAME'"
for SCHEMA in $INSTANCE_SCHEMAS; do
SCHEMAS_CREATE_COMMAND="$SCHEMAS_CREATE_COMMAND db2 create schema $SCHEMA;"
done
echo "Logging into the database to create the schema"
docker exec $CONTAINER_ID bash -c "su - $INSTANCE_USERID -c 'db2 connect to $INSTANCE_DBNAME; $SCHEMAS_CREATE_COMMAND'"
|
shantstepanian/obevo
|
obevo-db-impls/obevo-db-db2/db2-setup.sh
|
Shell
|
apache-2.0
| 1,931 |
#!/bin/bash
#
# Initialize openvswitch configuration database.
#
# Turn off network manager so it doesn't interfere with IF configurations.
#
nmcli nm enable false
#
# eth1 is bridged to an external port.
#
# Set eth1 as un-numbered interface.
#
sudo ifconfig eth1 0.0.0.0
sudo ifconfig eth2 0.0.0.0
#
# Set up veth pair
#
#sudo ip link add name veth0 type veth peer name veth1
#sudo ifconfig veth0 up
#sudo ifconfig veth1 192.168.1.3
#
# Create single tagged vlan IF
#
#sudo ip link add name eth1.999 link eth1 type vlan id 999 proto 802.1q
#sudo ifconfig eth1.999 up
#
# Push another tag to create double tagged vlan IF
#
sudo ip link add name eth1.100 link eth1 type vlan id 100 proto 802.1ad
sudo ifconfig eth1.100 up
#
# Create provider test bridge.
#
sudo ip link add name br0 type bridge
sudo ifconfig br0 up
#
# Add interfaces to bridge
#
sudo ip link set dev eth2 master br0
sudo ip link set dev eth1.100 master br0
|
tfherbert/ovs_entpnt
|
scripts/VMs/lubuntu2/init.sh
|
Shell
|
apache-2.0
| 926 |
docker rm camunda-database-size-calculator_db_1 --force
docker rm camunda-database-size-calculator_camunda_1 --force
docker rmi camunda-database-size-calculator_db
docker rmi camunda-database-size-calculator_camunda
|
camunda/camunda-consulting
|
snippets/camunda-database-size-calculator/clean.sh
|
Shell
|
apache-2.0
| 215 |
#!/bin/bash
#Used for all nodes, set different value on each node
NODE_HOST_NAME=controller-03
NODE_HOST_NAME_FQDN=controller-03.com
#Used for all keepalived nodes, set different value on each node
KEEPALIVED_ROUTER_ID=ka01
KEEPALIVED_INTERNAL_NET_PRIORITY=150
KEEPALIVED_EXTERNAL_NET_PRIORITY=150
#Used for all keepalived nodes, set value as "MASTER" or "BACKUP"
KEEPALIVED_STATE=MASTER
#Used for all keepalived nodes, set same value on each node
KEEPALIVED_INTERNAL_NET_VIRTUAL_ROUTER_ID=51
KEEPALIVED_EXTERNAL_NET_VIRTUAL_ROUTER_ID=61
KEEPALIVED_INTERNAL_NET_INTERFACE_NAME=eth0
KEEPALIVED_EXTERNAL_NET_INTERFACE_NAME=eth1
[email protected]
#Used for all nodes, set same value on each node
KEEPALIVED_INTERNAL_NET_VIRTUAL_IP=10.10.20.222
KEEPALIVED_EXTERNAL_NET_VIRTUAL_IP=10.10.10.222
MY_DNS1=211.155.27.88
MY_DNS2=211.155.23.88
#Used for all haproxy nodes, set same value on each node
CONTROLLER_NODE_01_INTERNAL_IP=172.16.0.48
CONTROLLER_NODE_02_INTERNAL_IP=172.16.0.49
CONTROLLER_NODE_03_INTERNAL_IP=172.16.0.50
#Used for all database nodes, set value as "FirstNode" or "FollowNode"
DB_NODE_TYPE=FollowNode
#Used for all database nodes, set different value on each node
DB_NODE_SELF_IP=10.10.20.14
#Used for all nodes, set same value on each node
DB_NODE_01_IP=10.10.20.12
DB_NODE_02_IP=10.10.20.13
DB_NODE_03_IP=10.10.20.14
#Used for all message queue nodes, set value as "FirstNode" or "FollowNode"
MQ_NODE_TYPE=FollowNode
#Used for all message queue nodes, set same value on each node
MQ_NODE_01_HOST_NAME=controller-01
MQ_NODE_02_HOST_NAME=controller-02
MQ_NODE_03_HOST_NAME=controller-03
#Used for all nodes, set same value on each node
MQ_NODE_01_IP=10.10.20.12
MQ_NODE_02_IP=10.10.20.13
MQ_NODE_03_IP=10.10.20.14
RABBIT_HOSTS=$MQ_NODE_01_IP:5672,$MQ_NODE_02_IP:5672,$MQ_NODE_03_IP:5672
#Used for all nodes, set same value on each node
CONTROLLER_NODE_MANAGEMENT_IP=$KEEPALIVED_INTERNAL_NET_VIRTUAL_IP
CONTROLLER_NODE_EXTERNAL_NET_IP=$KEEPALIVED_EXTERNAL_NET_VIRTUAL_IP
CONTROLLER_NODE_EXTERNAL_NET_GATEWAY=xxx.xxx.xxx.xxx
######
#Used for all nodes, set same value on each node, one of : neutron, nova-network.
NETWORK_API_CLASS=nova-network
#Used for all nodes, set same value on each node, one of : FlatDHCPManager, VlanManager. When:( nova-network )
NETWORK_MANAGER=FlatDHCPManager
#Used for all nodes, set same value on each node. When:( nova-network )
NETWORK_SIZE=255
PUBLIC_INTERFACE=eth0
FIXED_RANGE_V4=10.10.60.0/24
FLOATING_IP_ALLOCATION_POOLS=10.10.10.100-10.10.10.120,10.10.10.130-10.10.10.150,10.10.10.160-10.10.10.180
#Used for all nodes, set same value on each node. When:( nova-network + FlatDHCPManager )
FLAT_NETWORK_BRIDGE=br100
FLAT_INTERFACE=eth1
#Used for all nodes, set same value on each node. When:( nova-network + VlanManager )
VLAN_INTERFACE=eth0
NUM_NETWORKS=1
VLAN_START=100
#Used for all nodes, set same value on each node. When:( neutron )
CORE_PLUGIN=ml2
SERVICE_PLUGINS=router
ALLOW_OVERLAPPING_IPS=True
VERBOSE=True
MECHANISM_DRIVERS=openvswitch
METADATA_PROXY_SHARED_SECRET=howareyouOpenStack_Nwg
NEUTRON_EXT_NET_CIDR=192.168.11.0/24
NEUTRON_EXT_NET_GATEWAY=192.168.11.1
NEUTRON_EXT_NET_IP_POOL_START=192.168.11.108
NEUTRON_EXT_NET_IP_POOL_END=192.168.11.122
NEUTRON_DNS_NAMESERVER_1=$MY_DNS1
NEUTRON_DNS_NAMESERVER_2=$MY_DNS2
NEUTRON_INT_NET_CIDR=10.10.10.0/24
NEUTRON_INT_NET_GATEWAY=10.10.10.1
NEUTRON_INT_NET_IP_POOL_START=10.10.10.2
NEUTRON_INT_NET_IP_POOL_END=10.10.10.254
#Used for all nodes, set same value on each node, one of : gre, vxlan. When:( neutron )
TYPE_DRIVERS=gre
TENANT_NETWORK_TYPES=gre
#Used for all Network nodes and Compute nodes, set different value on each node. When:( neutron )
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS=172.16.0.xx
#Used for all Network nodes, set same value on each node. When:( neutron )
EXTERNAL_NETWORK_INTERFACE_NAME=eth1
######
#Used for all corosync nodes, set same value on each node
COROSYNC_NODE_01_HOSTNAME=HK01
COROSYNC_NODE_02_HOSTNAME=HK02
COROSYNC_NODE_01_IP=172.16.0.12
COROSYNC_NODE_02_IP=172.16.0.13
COROSYNC_BINDNETADDR=172.16.0.0
#Used for all controller nodes, set value as "Yes" or "No"
FIRST_KEYSTONE_NODE=No
FIRST_GLANCE_NODE=No
FIRST_CINDER_NODE=No
FIRST_NOVA_NODE=No
FIRST_NEUTRON_NODE=No
#Used for all controller nodes, set same value on each node
FIRST_CONTROLLER_NODE_INTERNAL_NET_IP=10.10.20.12
FIRST_KEYSTONE_NODE_INTERNAL_NET_IP=$FIRST_CONTROLLER_NODE_INTERNAL_NET_IP
#Used for all controller nodes, set different value on each node
CONTROLLER_NODE_NOVA_MY_IP=10.10.20.14
CONTROLLER_NODE_VNCSERVER_LISTEN=10.10.20.14
CONTROLLER_NODE_VNCSERVER_PROXYCLIENT_ADDRESS=10.10.20.14
#Used for all computer nodes, set same value on each node, one of : kvm, qemu
COMPUTE_NODE_LIBVIRT_TYPE=kvm
#Used for all computer nodes, set different value on each node
COMPUTE_NODE_NOVA_MY_IP=172.16.0.xx
COMPUTE_NODE_VNCSERVER_PROXYCLIENT_ADDRESS=172.16.0.xx
#Used for all nodes, set same value on each node
DATABASE_IP=$CONTROLLER_NODE_MANAGEMENT_IP
MESSAGE_QUEUE_IP=$CONTROLLER_NODE_MANAGEMENT_IP
GLANCE_HOST_IP=$CONTROLLER_NODE_MANAGEMENT_IP
CINDER_HOST_IP=$CONTROLLER_NODE_MANAGEMENT_IP
MYSQL_ROOT_PASSWORD=root123
KEYSTONE_ADMIN_TOKEN=helloOpenStack_Nwg
KEYSTONE_HOST_IP=$CONTROLLER_NODE_MANAGEMENT_IP
KEYSTONE_EXT_HOST_IP=$CONTROLLER_NODE_EXTERNAL_NET_IP
KEYSTONE_ADMIN_PASSWORD=adminpass123
KEYSTONE_SERVICE_PASSWORD=servicepass123
KEYSTONE_USER=keystone123
KEYSTONE_PASS=keystonepass123
GLANCE_USER=glance123
GLANCE_PASS=glancepass123
NEUTRON_USER=neutron123
NEUTRON_PASS=neutronpass123
NOVA_USER=nova123
NOVA_PASS=novapass123
CINDER_USER=cinder123
CINDER_PASS=cinderpass123
CINDER_VOLUMES_FILE=/root/OpenStack-Install-HA/my-cinder-volumes-file
CINDER_VOLUMES_FILE_SIZE=2G
NEW_UBUNTU_APT_SOURCES=192.168.11.63
#Used for all nodes, set same value on each node, one of : Yes, No.
USE_OTHER_UBUNTU_APT_SOURCES=Yes
#------ Here ------
#CONTROLLER_NODE_NOVA_URL_IP=$CONTROLLER_NODE_MANAGEMENT_IP
#CONTROLLER_NODE_NOVNCPROXY_BASE_URL_IP=$CONTROLLER_NODE_EXTERNAL_NET_IP
#CONTROLLER_NODE_NEUTRON_URL_IP=$CONTROLLER_NODE_MANAGEMENT_IP
#NOVA_METADATA_IP=$CONTROLLER_NODE_MANAGEMENT_IP
#NETWORK_NODE_MANAGEMENT_IP=172.16.0.52
#NETWORK_NODE_VM_NETWORK_IP=10.20.20.52
#NETWORK_NODE_EXTERNAL_NET_IP=192.168.11.18
#COMPUTE_NODE_MANAGEMENT_IP=172.16.0.53
#COMPUTE_NODE_VM_NETWORK_IP=10.20.20.53
#COMPUTE_NODE_NOVA_URL_IP=$CONTROLLER_NODE_NOVA_URL_IP
#COMPUTE_NODE_NOVNCPROXY_BASE_URL_IP=$CONTROLLER_NODE_NOVNCPROXY_BASE_URL_IP
#COMPUTE_NODE_NEUTRON_URL_IP=$CONTROLLER_NODE_NEUTRON_URL_IP
|
wingon-niu/OpenStackDeploy
|
DeployScripts/conf_orig/102-Server-5-env.sh
|
Shell
|
apache-2.0
| 6,557 |
#!/bin/bash
set -e
# Workaround
docker restart kubernetes-controller-manager
docker restart kubernetes-scheduler
# --------------------------------------------------------------------------------------------
source /root/demo-openrc
source /vagrant/cache/env.rc
# Create openstack server 'testvm'
nova boot --flavor m1.nano --image cirros --nic net-id=$DEMO_NET_ID --config-drive=true testvm
OS_VM1_IP=$(openstack server show testvm -f value -c addresses | awk -F '=' '{print $2}')
# Create k8s deployment 'demo'
kubectl run demo --image=demo:demo
K8S_POD1_NAME=$(kubectl get pods -l run=demo -o jsonpath='{.items[].metadata.name}')
kubectl get po $K8S_POD1_NAME
# Create k8s service 'demo'
kubectl expose deployment demo --port=80 --target-port=8080
K8S_SVC1_IP=$(kubectl get services demo -o json | jq -r '.spec.clusterIP')
# Scale k8s deployment 'demo'
kubectl scale deployment demo --replicas=2
# Check ik8s endpoints of pods
K8S_POD1_IP=$(kubectl get endpoints demo -o json | jq -r '.subsets[].addresses[0].ip')
K8S_POD2_IP=$(kubectl get endpoints demo -o json | jq -r '.subsets[].addresses[1].ip')
# Check network traffic between openstack server, k8s service, and k8s pods
kubectl exec $K8S_POD1_NAME -- curl -s http://127.0.0.1:8080
kubectl exec $K8S_POD1_NAME -- curl -s http://$K8S_POD1_IP:8080
kubectl exec $K8S_POD1_NAME -- curl -s http://$K8S_POD2_IP:8080
kubectl exec $K8S_POD1_NAME -- curl -s http://$K8S_SVC1_IP
kubectl exec $K8S_POD1_NAME -- curl -s http://$K8S_SVC1_IP
kubectl exec $K8S_POD1_NAME -- ping -c 1 $OS_VM1_IP
# Enter interavtive mode
kubectl exec -it $K8S_POD1_NAME -- bash
<<COMMANDS
export PS1='POD # '
ssh cirros@$OS_VM1_IP # password: cubswin:)
export PS1='VM # '
hostname
curl http://$K8S_POD1_IP:8080
curl http://$K8S_POD2_IP:8080
curl http://$K8S_SVC1_IP
curl http://$K8S_SVC1_IP
exit # logout vm
exit # logout pod
COMMANDS
|
jinsenglin/openstack
|
vagrant-up-3-node-openstack/o/vagrant/run-e2e-tests.sh
|
Shell
|
apache-2.0
| 1,884 |
#!/bin/sh
set -e -u
set -o pipefail
resource_dir=/opt/resource
run() {
export TMPDIR=$(mktemp -d ${TMPDIR_ROOT}/git-tests.XXXXXX)
echo -e 'running \e[33m'"$@"$'\e[0m...'
eval "$@" 2>&1 | sed -e 's/^/ /g'
echo ""
}
init_repo() {
(
set -e
cd $(mktemp -d $TMPDIR/repo.XXXXXX)
git init -q
# start with an initial commit
git \
-c user.name='test' \
-c user.email='[email protected]' \
commit -q --allow-empty -m "init"
# create some bogus branch
git checkout -b bogus
git \
-c user.name='test' \
-c user.email='[email protected]' \
commit -q --allow-empty -m "commit on other branch"
# back to master
git checkout master
# print resulting repo
pwd
)
}
make_commit_to_file_on_branch() {
local repo=$1
local file=$2
local branch=$3
local msg=${4-}
# ensure branch exists
if ! git -C $repo rev-parse --verify $branch > /dev/null 2>&1; then
git -C $repo branch $branch master
fi
# switch to branch
git -C $repo checkout -q $branch
# modify file and commit
echo x >> $repo/$file
git -C $repo add $file
git -C $repo \
-c user.name='test' \
-c user.email='[email protected]' \
commit -q -m "commit $(wc -l $repo/$file) $msg"
}
make_commit_to_file() {
make_commit_to_file_on_branch $1 $2 master "${3-}"
}
make_commit_to_branch() {
make_commit_to_file_on_branch $1 some-file $2
}
make_commit() {
make_commit_to_file $1 some-file
}
check_uri() {
jq -n "{
source: {
uri: $(echo $1 | jq -R .)
},
version: {increment: \"1\", branches: \"bogus master\"}
}" | ${resource_dir}/check | tee /dev/stderr
}
check_uri_with_branch_regexp() {
jq -n "{
source: {
uri: $(echo $1 | jq -R .),
branch_regexp: \"feature\"
},
version: {increment: \"1\", branches: \"bogus master\"}
}" | ${resource_dir}/check | tee /dev/stderr
}
check_uri_with_max_branches() {
jq -n "{
source: {
uri: $(echo $1 | jq -R .),
max_branches: 1
},
version: {increment: \"1\", branches: \"bogus master\"}
}" | ${resource_dir}/check | tee /dev/stderr
}
check_uri_first_time() {
jq -n "{
source: {
uri: $(echo $1 | jq -R .)
},
version: null
}" | ${resource_dir}/check | tee /dev/stderr
}
check_uri_with_key() {
jq -n "{
source: {
private_key: $(cat $2 | jq -s -R .)
}
}" | ${resource_dir}/check | tee /dev/stderr
}
get_version() {
jq -n "{
source: {
uri: $(echo $1 | jq -R .)
},
version: {
increment: \"1\",
branches: \"bogus master\"
}
}" | ${resource_dir}/in "$2" | tee /dev/stderr
}
|
pivotaltracker/git-branches-resource
|
test/helpers.sh
|
Shell
|
apache-2.0
| 2,652 |
../ann -w xor-test-weights.xml -t xor_train2.txt -x 100 -e 1000 -r 0.008 -m 0.006 -l S2 S7 S2; ../ann -w xor-test-weights.xml -i xor_input.txt
|
busysteve/ANN
|
xor/test-xor2.sh
|
Shell
|
apache-2.0
| 143 |
#!/bin/sh
PROG=./ei
EXPECT_DIR=test/expect
ACTUAL_DIR=test/actual
FILE=$1
SRC=test/src/$FILE
EXPECT=$EXPECT_DIR/$FILE
ACTUAL=$ACTUAL_DIR/$FILE
if ! [ -e $SRC ]; then
echo "error: $SRC not found"
exit 1
fi
if ! [ -e $EXPECT ]; then
echo "error: $EXPECT not found"
exit 1
fi
mkdir -p $ACTUAL_DIR
$PROG fmt $SRC > $ACTUAL
DIFF=`diff $EXPECT $ACTUAL`
if [ "$DIFF" = "" ]; then
exit 0
else
cat < $DIFF
exit 1
fi
|
shiguredo/eryngii
|
test.sh
|
Shell
|
apache-2.0
| 441 |
#!/usr/bin/env bash
t1=$(curl -s --data-binary @logo.png https://telecom-tower-renderer.appspot.com/renderImage)
t2=$(curl -s -d '{"text":"A", "fgColor":"#0000ff", "bgColor":"#000000", "fontSize":6}' https://telecom-tower-renderer.appspot.com/renderText)
t3=$(curl -s -d '{"len":11, "bgColor":"#000000"}' https://telecom-tower-renderer.appspot.com/renderSpace)
t4=$(curl -s -d '{"text":"B", "fgColor":"#003311", "bgColor":"#000000", "fontSize":8}' https://telecom-tower-renderer.appspot.com/renderText)
t5=$(echo "[$t1, $t2, $t3, $t4]" | curl -s -d @- https://telecom-tower-renderer.appspot.com/join)
echo "----- IMAGE -----"
echo $t1
echo "----- LETTER IN 6x8 FONT -----"
echo $t2
echo "----- SPACE -----"
echo $t3
echo "----- LETTER IN 8x8 FONT -----"
echo $t4
echo "----- EVERYTHING JOINED -----"
echo $t5
|
heia-fr/telecom-tower-renderer
|
test.sh
|
Shell
|
apache-2.0
| 811 |
#!/bin/bash
echo ">> postbuild"
set -e
source /opt/qnib/gocd/helpers/gocd-functions.sh
# Create BUILD_IMG_NAME, which includes the git-hash and the revision of the pipeline
assemble_build_img_name
mkdir -p binpkg
if [ -d docker ];then
DOCKER_DIR=true
fi
if [ -x postbuild.sh ];then
echo ">>>> Run postbuild script"
if [ -d docker ];then
./docker/postbuild.sh
else
./postbuild.sh
fi
fi
if [ "${GOCD_LOCAL_DOCKERENGINE}" != "true" ] && [ "${GOCD_CLEAN_IMAGES}" == "true" ];then
echo "> Remove unstarted containers"
docker rm $(docker ps -qa)
echo "> Remove unused images"
docker rmi $(docker images -qa)
fi
|
qnib/alpn-gocd-agent
|
opt/qnib/gocd/tasks/docker/postbuild.sh
|
Shell
|
apache-2.0
| 665 |
#!/bin/bash
set -e
# Runs codecleaver tests
pushd $(dirname $0) > /dev/null
./codecleaver.sh < test/test.codecleaver > /dev/null
if diff test/expected-results.txt out/actual-results.txt > out/results.diff ; then
echo Tests Passed.
else
echo Tests Failed. See out/results.diff for details.
fi
popd > /dev/null
|
ericmckean/codecleaver
|
run_tests.sh
|
Shell
|
apache-2.0
| 320 |
#!/bin/sh
. ./include.sh
REDIRECT=/dev/null
$PYTHON $examples_src/grib_clone.py
rm -f out.clone.grib
|
0x1mason/GribApi.XP
|
grib_api/examples/python/grib_clone.sh
|
Shell
|
apache-2.0
| 103 |
#!/bin/bash
cd ../OracleJava
docker rmi "docker-001.bluegarden.tst:5000/serverjre:8"
time docker build -t "docker-001.bluegarden.tst:5000/serverjre:8" .
time docker push "docker-001.bluegarden.tst:5000/serverjre:8"
cd /opt/sw/support/git/public/rdbms_support/docker/bin
|
testor321/rdbms_support
|
docker/bin/build_jre8.sh
|
Shell
|
apache-2.0
| 270 |
#!/bin/sh
if [[ -z "$1" || -z "$2" ]]; then
echo "Usage: $0 <directory> <script>" >&2
exit
fi
if [[ ! -e "$1" ]]; then
echo "The file or directory is not valid." >&2
exit
fi
WATCHDIR="$1"
shift
SCRIPT="$*"
HASH=""
function updatehash() {
HASH=`find $WATCHDIR -not -iname ".*" -exec stat -f "%N %z %m" -L {} \; | md5`
}
updatehash
while true; do
OLDHASH=$HASH
updatehash
if [ "$HASH" != "$OLDHASH" ]; then
echo `date -j "+%Y-%m-%d %r"` $SCRIPT
$SCRIPT
tput bel
updatehash
fi
sleep 0.2
done
|
romannurik/env
|
bin/onchange.sh
|
Shell
|
apache-2.0
| 529 |
perl ta_pipe.pl -in ABA.xls -o ABA -ref gokegg_ref -pf sly -komap /Bio/Database/Database/kegg/data/map_class/plant_ko_map.tab -dc 12
|
BaconKwan/Perl_programme
|
trends_analysis/run_ta_pipe.sh
|
Shell
|
apache-2.0
| 133 |
#!/bin/bash -eux
# Copyright 2018 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# env variables to be supplied by BOSI
fqdn={fqdn}
is_controller={is_controller}
phy1_name={phy1_name}
phy1_nics={phy1_nics}
system_desc={system_desc}
# constants for this job
SERVICE_FILE_1='/usr/lib/systemd/system/send_lldp.service'
SERVICE_FILE_MULTI_USER_1='/etc/systemd/system/multi-user.target.wants/send_lldp.service'
# new vars with evaluated results
HOSTNAME=`hostname -f`
# system name for LLDP depends on whether its a controller or compute node
SYSTEMNAME=${{HOSTNAME}}-${{phy1_name}}
if [[ $is_controller == true ]]; then
SYSTEMNAME=${{HOSTNAME}}
fi
# Make sure only root can run this script
if [ "$(id -u)" != "0" ]; then
echo -e "Please run as root"
exit 1
fi
# if service file exists, stop and disable the service. else, return true
systemctl stop send_lldp | true
systemctl disable send_lldp | true
# rewrite service file
echo "
[Unit]
Description=BSN send_lldp for DPDK physnet
After=syslog.target network.target
[Service]
Type=simple
ExecStart=/bin/python /usr/lib/python2.7/site-packages/networking_bigswitch/bsnlldp/send_lldp.py \
--system-desc ${{system_desc}} \
--system-name ${{SYSTEMNAME}} \
-i 10 \
--network_interface ${{phy1_nics}} \
--sriov
Restart=always
StartLimitInterval=60s
StartLimitBurst=3
[Install]
WantedBy=multi-user.target
" > $SERVICE_FILE_1
# symlink multi user file
ln -sf $SERVICE_FILE_1 $SERVICE_FILE_MULTI_USER_1
# reload service files
systemctl daemon-reload
# start services as required
systemctl enable send_lldp
systemctl start send_lldp
echo "Finished updating with DPDK LLDP scripts."
|
bigswitch/bosi
|
etc/t5/bash_template/redhat_7_dpdk.sh
|
Shell
|
apache-2.0
| 2,237 |
# ----------------------------------------------------------------------------
#
# Package : mongo-cxx-driver
# Version : 1.1.0
# Source repo : https://github.com/mongodb/mongo-cxx-driver.git
# Tested on : rhel_7.3
# Script License: Apache License, Version 2 or later
# Maintainer : Atul Sowani <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# Install dependencies.
sudo yum update -y
sudo yum install -y gcc g++ automake autoconf libtool make libboost* wget \
gcc-c++ boost* tar bzip2 which openssl-devel cyrus-sasl-devel curl \
libcurl libcurl-devel python-devel.ppc64le libxml2-devel.ppc64le \
libxslt-devel.ppc64le boost-devel.ppc64le git
wget -cqO- ftp://rpmfind.net/linux/epel/7/ppc64le/s/scons-*.rpm -O scons-latest.rpm
sudo rpm -ivh scons-latest.rpm
# Build and test code.
git clone https://github.com/mongodb/mongo-cxx-driver.git
cd mongo-cxx-driver
git checkout legacy-1.1.0
scons --prefix=$HOME/mongo-client-install --ssl install
scons build-unit
scons unit
|
ppc64le/build-scripts
|
m/mongo-cxx-driver/mongo-cxx-driver_rhel_7.3.sh
|
Shell
|
apache-2.0
| 1,373 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
current_dir=`dirname "$0"`
current_dir=`cd "$current_dir"; pwd`
root_dir=${current_dir}/../../../../../
workload_config=${root_dir}/conf/workloads/ml/pca.conf
. "${root_dir}/bin/functions/load_bench_config.sh"
enter_bench PCA ${workload_config} ${current_dir}
show_bannar start
rmr_hdfs $OUTPUT_HDFS || true
SIZE=`dir_size $INPUT_HDFS`
START_TIME=`timestamp`
run_spark_job com.intel.hibench.sparkbench.ml.PCAExample ${INPUT_HDFS} ${MAX_RESULT_SIZE_PCA}
END_TIME=`timestamp`
gen_report ${START_TIME} ${END_TIME} ${SIZE}
show_bannar finish
leave_bench
|
kimihe/Swallow
|
swallow-benchmark/HiBench-master/bin/workloads/ml/pca/spark/run.sh
|
Shell
|
apache-2.0
| 1,347 |
#!/bin/bash
find . \( -path ./vendor -o -path ./.glide \) -prune -o -name "*.go" -exec goimports -w {} \;
if [[ -n "$(git -c core.fileMode=false status --porcelain)" ]]; then
echo "goimports modified code; requires attention!"
if [[ "${CI}" == "true" ]]; then
exit 1
fi
fi
|
owulveryck/toscalib
|
scripts/fmt.sh
|
Shell
|
apache-2.0
| 295 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
start=$(date +"%T")
echo "Start time : $start"
SCRIPT_PATH=`dirname $0`
source ${SCRIPT_PATH}/setup.sh
# No need to change below this line
SS_DIR=${DEV_DATA_DIR}/ssdata
SS_REVERB_DIR=${DEV_DATA_DIR}/ssdata_reverb
# Download and unarchive SS data.
# ssdata.tar.gz should have a directory ssdata which has
# train, validation, eval subdirectories underneath
# and under those there should be a example wav files that will be used
# as train, validation and eval mixtures.
mkdir -p ${DOWNLOAD_DIR}
mkdir -p ${DEV_DATA_DIR}
if [ ! -s ${DOWNLOAD_DIR}/ssdata.tar.gz ]; then
curl --output ${DOWNLOAD_DIR}/ssdata.tar.gz ${SSDATA_URL}
else
echo "${DOWNLOAD_DIR}/ssdata.tar.gz exists, skipping download."
fi
if [ ! -d ${SS_DIR} ]; then
tar xzf ${DOWNLOAD_DIR}/ssdata.tar.gz -C ${DEV_DATA_DIR}
else
echo "${SS_DIR} directory exists, skipping unarchiving."
fi
# Download and unarchive SSREVERB data.
# ssdata_reverb.tar.gz should have a top level directory called ssdata_reverb
# and then train, validation and eval subdirectories and
# under those the same structure as ssdata.
if [ ! -s ${DOWNLOAD_DIR}/ssdata_reverb.tar.gz ]; then
curl --output ${DOWNLOAD_DIR}/ssdata_reverb.tar.gz ${SSDATA_REVERB_URL}
else
echo "${DOWNLOAD_DIR}/ssdata_reverb.tar.gz exists, skipping download."
fi
if [ ! -d ${SS_REVERB_DIR} ]; then
tar xzf ${DOWNLOAD_DIR}/ssdata_reverb.tar.gz -C ${DEV_DATA_DIR}
else
echo "${SS_REVERB_DIR} directory exists, skipping unarchiving."
fi
unarchive_time=$(date +"%T")
echo "Start time : $start, download and unarchive finish time: $unarchive_time"
|
google-research/sound-separation
|
datasets/fuss/get_dev_data.sh
|
Shell
|
apache-2.0
| 2,166 |
#!/bin/bash
KUBEVIRT_DIR="$(
cd "$(dirname "$BASH_SOURCE[0]")/../"
pwd
)"
OUT_DIR=$KUBEVIRT_DIR/_out
VENDOR_DIR=$KUBEVIRT_DIR/vendor
CMD_OUT_DIR=$OUT_DIR/cmd
TESTS_OUT_DIR=$OUT_DIR/tests
APIDOCS_OUT_DIR=$OUT_DIR/apidocs
MANIFESTS_OUT_DIR=$OUT_DIR/manifests
PYTHON_CLIENT_OUT_DIR=$OUT_DIR/client-python
function build_func_tests() {
mkdir -p ${TESTS_OUT_DIR}/
ginkgo build ${KUBEVIRT_DIR}/tests
mv ${KUBEVIRT_DIR}/tests/tests.test ${TESTS_OUT_DIR}/
}
# For backward compatibility
KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-${PROVIDER}}
KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.9.3}
# For backward compatibility
KUBEVIRT_NUM_NODES=${KUBEVIRT_NUM_NODES:-${VAGRANT_NUM_NODES}}
KUBEVIRT_NUM_NODES=${KUBEVIRT_NUM_NODES:-1}
# If on a developer setup, expose ocp on 8443, so that the openshift web console can be used (the port is important because of auth redirects)
if [ -z "${JOB_NAME}" ]; then
KUBEVIRT_PROVIDER_EXTRA_ARGS="${KUBEVIRT_PROVIDER_EXTRA_ARGS} --ocp-port 8443"
fi
#If run on jenkins, let us create isolated environments based on the job and
# the executor number
provider_prefix=${JOB_NAME:-${KUBEVIRT_PROVIDER}}${EXECUTOR_NUMBER}
job_prefix=${JOB_NAME:-kubevirt}${EXECUTOR_NUMBER}
# Populate an environment variable with the version info needed.
# It should be used for everything which needs a version when building (not generating)
# IMPORTANT:
# RIGHT NOW ONLY RELEVANT FOR BUILDING, GENERATING CODE OUTSIDE OF GIT
# IS NOT NEEDED NOR RECOMMENDED AT THIS STAGE.
function kubevirt_version() {
if [ -n "${KUBEVIRT_VERSION}" ]; then
echo ${KUBEVIRT_VERSION}
elif [ -d ${KUBEVIRT_DIR}/.git ]; then
echo "$(git describe --always --tags)"
else
echo "undefined"
fi
}
KUBEVIRT_VERSION="$(kubevirt_version)"
|
fabiand/kubevirt
|
hack/common.sh
|
Shell
|
apache-2.0
| 1,783 |
#! /bin/sh
# NB: This file, and the Windows version, should be generated from common source.
HARNESS_ROOT_DIR="${HARNESS_ROOT_DIR:-.}"
LIBRARY="${HARNESS_ROOT_DIR}"/library
target=`echo "$1" | sed -e 's/\.nqp$//'`
rm -f "$target.pir" "$target.pbc" "$target.t.pbc"
parrot-nqp --target=pir --output="$target.pir" "$target.nqp"
parrot --output="$target.pbc" "$target.pir"
pbc_merge --output="$target.t.pbc" "${LIBRARY}"/krt0.pbc "$target.pbc"
parrot -L"${LIBRARY}" "$target.t.pbc"
|
Whiteknight/kakapo
|
t/harness-nqp.sh
|
Shell
|
artistic-2.0
| 480 |
#!/bin/bash
set -e
function error {
echo error: "$@"
exit 1
}
function itime {
/usr/bin/time "$@"
}
BUILD_GO=y
INTEGRATION=y
FAST=n
SHOW_HELP=n
SETUP_GOPATH=n
TEMP=`getopt -o h --long help,no-go,no-integration,fast,setup-gopath -n test.bash -- "$@"`
eval set -- "$TEMP"
while true; do
case "$1" in
-h|--help) SHOW_HELP=y ; shift ;;
--no-go) BUILD_GO=n ; shift ;;
--no-integration) INTEGRATION=n ; shift ;;
--fast) FAST=y ; shift ;;
--setup-gopath) SETUP_GOPATH=y ; shift ;;
--) shift ; break ;;
esac
done
if [ "$SHOW_HELP" = y ]; then
echo "usage: test.bash -h|--help"
echo " test.bash [--no-go] [--no-integration] [--fast] [--setup-gopath]"
exit 0
fi
function do_cmd {
echo "[+] $@"
eval "$@"
}
PROJECT_ROOT="$PWD"
if [ "$SETUP_GOPATH" = y ]; then
do_cmd rm -rf gopath
do_cmd mkdir -p gopath/src/github.com/unigornel
do_cmd ln -s ../../../.. gopath/src/github.com/unigornel/unigornel
export GOPATH="$PWD/gopath"
do_cmd cd gopath/src/github.com/unigornel/unigornel
fi
# Build Go
if [ "$BUILD_GO" = y ]; then
pushd go/src
export GOROOT_BOOTSTRAP
[ -z "$GOROOT_BOOTSTRAP" ] && error "GOROOT_BOOTSTRAP not set"
[ "$FAST" = y ] && fast_opt=--no-clean || fast_opt=
do_cmd GOOS=unigornel GOARCH=amd64 ./make.bash $fast_opt
popd
fi
# Build unigornel
pushd unigornel
do_cmd go get -v
do_cmd go build -o unigornel
cat > .unigornel.yaml <<EOF
goroot: $PWD/../go
minios: $PWD/../minios
libraries: $PWD/../libraries.yaml
EOF
eval $(./unigornel env -c .unigornel.yaml)
export PATH="$PWD:$PATH"
popd
# Run integration tests
if [ "$INTEGRATION" = y ]; then
unigornel_root="$PWD"
pushd integration_tests
do_cmd go get -v
go build -o "test"
./test -junit "integration_tests.xml"
popd
fi
|
unigornel/unigornel
|
test.bash
|
Shell
|
bsd-2-clause
| 1,940 |
#!/bin/bash
mosquitto_sub -t "alert/#"
|
mchmarny/mqtt-pub-sub-demo
|
mon.sh
|
Shell
|
bsd-2-clause
| 39 |
#!/bin/bash
#
# vim: set ts=4 sw=4 et:
#
# Passed arguments:
# $1 - pkgname [REQUIRED]
# $2 - cross target [OPTIONAL]
if [ $# -lt 1 -o $# -gt 2 ]; then
echo "$(basename $0): invalid number of arguments: pkgname [cross-target]"
exit 1
fi
PKGNAME="$1"
XBPS_CROSS_BUILD="$2"
for f in $XBPS_SHUTILSDIR/*.sh; do
. $f
done
setup_pkg "$PKGNAME" $XBPS_CROSS_BUILD
for f in $XBPS_COMMONDIR/environment/extract/*.sh; do
source_file "$f"
done
XBPS_EXTRACT_DONE="${XBPS_STATEDIR}/${sourcepkg}_${XBPS_CROSS_BUILD}_extract_done"
if [ -f $XBPS_EXTRACT_DONE ]; then
exit 0
fi
# Run pre-extract hooks
run_pkg_hooks pre-extract
# If template defines pre_extract(), use it.
if declare -f pre_extract >/dev/null; then
run_func pre_extract
fi
# If template defines do_extract() use it rather than the hooks.
if declare -f do_extract >/dev/null; then
[ ! -d "$wrksrc" ] && mkdir -p $wrksrc
cd $wrksrc
run_func do_extract
else
if [ -n "$build_style" ]; then
if [ ! -r $XBPS_BUILDSTYLEDIR/${build_style}.sh ]; then
msg_error "$pkgver: cannot find build helper $XBPS_BUILDSTYLEDIR/${build_style}.sh!\n"
fi
. $XBPS_BUILDSTYLEDIR/${build_style}.sh
fi
# If the build_style script declares do_extract(), use it rather than hooks.
if declare -f do_extract >/dev/null; then
run_func do_extract
else
# Run do-extract hooks
run_pkg_hooks "do-extract"
fi
fi
touch -f $XBPS_EXTRACT_DONE
[ -d $wrksrc ] && cd $wrksrc
# If template defines post_extract(), use it.
if declare -f post_extract >/dev/null; then
run_func post_extract
fi
# Run post-extract hooks
run_pkg_hooks post-extract
exit 0
|
radare/void-packages
|
common/xbps-src/libexec/xbps-src-doextract.sh
|
Shell
|
bsd-2-clause
| 1,694 |
#!/bin/bash
if [ "$(whoami)" = root ]; then
ln -s $PWD/bin/dotlang /usr/local/bin
else
ln -s $PWD/bin/dotlang ~/bin/
fi
|
zielmicha/dotlang
|
bin/install.sh
|
Shell
|
bsd-2-clause
| 128 |
#!/bin/bash
#
# Zabbix-put
#
# Copyright 2012, Michal Belica <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
ZABBIX_GET="zabbix_get"
LINESIZE=768
prog="zabbix_put"
function fail {
echo "$prog FAIL: $1" >&2
exit 1
}
function usage {
echo "use: $prog <source_file> <destination_host> <destination_dir>"
exit 0
}
function cleanup {
[[ -f "$tmpf" ]] && rm -f "$tmpf"
}
[[ "$1" == "-h" ]] && usage
SRCFILE="$1"
DSTHOST="$2"
DSTDIR="$3"
[[ -n "$SRCFILE" && -n "$DSTHOST" && -n "$DSTDIR" ]] || fail "wrong command line arguments, use -h for help"
if [[ ! -r "$SRCFILE" ]] ; then
echo "$SRCFILE missing"
exit 1
fi
fname=`basename ${SRCFILE}`
tmpf=`mktemp` || exit 1
trap cleanup exit
bzip2 -c9 "$SRCFILE" | base64 -w "$LINESIZE" > "$tmpf" || exit 1
cat "$tmpf" | while read line ; do
out=`"$ZABBIX_GET" -s "$DSTHOST" -k "system.run[echo \"$line\" >> \"${DSTDIR}/${fname}.bzip2.b64\" ; echo .]"` \
|| fail "remote command failed"
[[ "$out" == "." ]] || fail "remote command failed"
echo -n "$out"
done
[[ "$?" == "0" ]] || exit $?
echo
"$ZABBIX_GET" -s "$DSTHOST" -k \
"system.run[base64 -id \"${DSTDIR}/${fname}.bzip2.b64\" | bunzip2 > \"${DSTDIR}/${fname}\" ; rm \"${DSTDIR}/${fname}.bzip2.b64\" ; md5sum \"${DSTDIR}/${fname}\"]"
md5sum "$SRCFILE"
|
beli-sk/zabbix-put
|
zabbix_put.sh
|
Shell
|
bsd-2-clause
| 2,550 |
#!/bin/bash
set -o verbose #echo on
tar -xvf originalrepo.tar
echo ~~~ Every recipe aims to keep what is in the c directory while pruning away everything else.
echo
sleep 3
echo ~~~ Filter and relocate a subdirectory to the root
sleep 3
rm -rf filteredrepo.filtersubdir
git clone --no-hardlinks originalrepo filteredrepo.filtersubdir
cd filteredrepo.filtersubdir
git filter-branch --subdirectory-filter c HEAD
git reflog expire --expire=now --all
git reset --hard
git gc --aggressive --prune=now
cd ..
echo
echo ~~~ Filter and relocate a subdirectory to the root while keeping tags and branches
sleep 3
rm -rf filteredrepo.filtersubdirall
git clone --no-hardlinks originalrepo filteredrepo.filtersubdirall
cd filteredrepo.filtersubdirall
git filter-branch --subdirectory-filter c HEAD -- --all
git reflog expire --expire=now --all
git reset --hard
git gc --aggressive --prune=now
cd ..
echo
echo ~~~ Keep only the references to the c directory, removing all a and b files
sleep 3
rm -rf filteredrepo.treefilter
git clone --no-hardlinks originalrepo filteredrepo.treefilter
cd filteredrepo.treefilter
git filter-branch --tree-filter "rm -rf a b" --prune-empty HEAD
git reflog expire --expire=now --all
git reset --hard
git gc --aggressive --prune=now
cd ..
echo
echo ~~~ Keep the subdirectory that matches the given pattern
sleep 3
rm -rf filteredrepo.indexfilter
git clone --no-hardlinks originalrepo filteredrepo.indexfilter
cd filteredrepo.indexfilter
git filter-branch --index-filter "git rm -r -f --cached --ignore-unmatch a b" --prune-empty HEAD
git reflog expire --expire=now --all
git reset --hard
git gc --aggressive --prune=now
cd ..
echo
echo ~~~ Keep the subdirectory that matches the given pattern while re-writing tags and branches
sleep 3
rm -rf filteredrepo.indexfiltercatall
git clone --no-hardlinks originalrepo filteredrepo.indexfiltercatall
cd filteredrepo.indexfiltercatall
git filter-branch --index-filter "git rm -r -f --cached --ignore-unmatch a b" --prune-empty --tag-name-filter cat -- --all
git reflog expire --expire=now --all
git reset --hard
git gc --aggressive --prune=now
cd ..
set +o verbose #echo off
|
youpeiban/youpeiban.github.io
|
examples/filter-branch/test-filter-branch.sh
|
Shell
|
bsd-2-clause
| 2,155 |
sudo mkdir -p /opt/oracle
sudo rm -rf /opt/oracle/instantclient
find ~/ | grep 'instantclient' | grep 'zip$' | xargs -L 1 sudo unzip -d /opt/oracle/
find /opt/oracle | grep 'sqlplus$' | sed s/sqlplus// | xargs -I {} -L 1 sudo ln -s {} /opt/oracle/instantclient
echo 'export DYLD_LIBRARY_PATH=/opt/oracle/instantclient' >> ~/.bash_profile
echo 'export PATH=/opt/oracle/instantclient:$PATH' >> ~/.bash_profile
echo 'export ORACLE_HOME=/opt/oracle/instantclient' >> ~/.bash_profile
find /opt/oracle | grep 'libclntsh' | xargs -I {} -L 1 sudo ln -s {} /opt/oracle/instantclient/libclntsh.dylib
|
klkane/macosx_oracle_instantclient_installer
|
installer.sh
|
Shell
|
bsd-2-clause
| 590 |
#!/bin/bash
EXTERNALS_PATH=externals
TOOLS_PATH=tools
if [ ! -d "client_src" ] || [ ! -d "landing_src" ]
then
echo "Usage: bin/setup_closure.sh"
echo " Download and set up Closure compiler."
echo " This script should be run from the root of the git repo."
echo
exit -1
fi
function __create_folder__ {
FOLDER=$1
TAB=$2
if [ ! -d $FOLDER ]
then
echo "${TAB}Creating folder: $FOLDER"
mkdir $FOLDER
fi
}
function __setup_closure__ {
EXTERNALS_SUBPATH=$EXTERNALS_PATH/closure-latest
TOOLS_SUBPATH=$TOOLS_PATH/closure
echo "# Downloading Closure..."
if [ ! -d "$EXTERNALS_SUBPATH" ]
then
__create_folder__ $EXTERNALS_SUBPATH
curl --insecure --location http://dl.google.com/closure-compiler/compiler-latest.zip > $EXTERNALS_SUBPATH/compiler-latest.zip
echo " Extracting license..."
unzip $EXTERNALS_SUBPATH/compiler-latest.zip COPYING -d $EXTERNALS_SUBPATH &&\
mv $EXTERNALS_SUBPATH/COPYING $EXTERNALS_SUBPATH/LICENSE
echo "You may delete downloaded files in this folder without affecting the topic model visualizations." > $EXTERNALS_SUBPATH/safe-to-delete.txt
else
echo " Already downloaded: $EXTERNALS_SUBPATH/compiler-latest.zip"
fi
echo
echo "# Setting up Closure..."
if [ ! -d "$TOOLS_SUBPATH" ]
then
__create_folder__ $TOOLS_SUBPATH " "
echo " Uncompressing..."
unzip $EXTERNALS_SUBPATH/compiler-latest.zip compiler.jar -d $TOOLS_SUBPATH
else
echo " Already available: $TOOLS_SUBPATH"
fi
echo
}
__create_folder__ $EXTERNALS_PATH
__create_folder__ $TOOLS_PATH
__setup_closure__
|
uwdata/termite-visualizations
|
bin/setup_closure.sh
|
Shell
|
bsd-3-clause
| 1,556 |
#!/bin/bash
source $(dirname $0)/webapi-service-tests.spec
SRC_ROOT=$(cd $(dirname $0);pwd)
BUILD_ROOT=/tmp/${name}-${path_flag}_pack
BUILD_DEST=/tmp/${name}-${path_flag}
usage="Usage: ./pack.sh [-t <package type: apk | cordova>] [-a <apk runtime arch: x86 | arm>] [-m <package mode: embedded | shared>] [-v <sub version: 3.6 | 4.x>] [-p <local | npm>]
[-t apk] option was set as default.
[-a x86] option was set as default.
[-m embedded] option was set as default.
[-v 3.6] option was set as default.
[-p local] option was set as default.
"
dest_dir=$SRC_ROOT
pack_type="apk"
arch="x86"
pack_mode="embedded"
sub_version="3.6"
crosswalk_version=""
crosswalk_branch=""
plugin_location="local"
while getopts a:t:m:d:v:p: o
do
case "$o" in
a) arch=$OPTARG;;
t) pack_type=$OPTARG;;
m) pack_mode=$OPTARG;;
d) dest_dir=$OPTARG;;
v) sub_version=$OPTARG;;
p) plugin_location=$OPTARG;;
*) echo "$usage"
exit 1;;
esac
done
main_version=$(cat ../../VERSION | awk 'NR==2')
for((i=1;i<=4;i++))
do
crosswalk_version=$(echo $main_version|cut -d "\"" -f$i)
done
crosswalk_branch_tmp=$(cat ../../VERSION | awk 'NR==3')
for((i=1;i<=4;i++))
do
crosswalk_branch=$(echo $crosswalk_branch_tmp|cut -d "\"" -f$i)
done
rm -rf $dest_dir/$name-$version-$sub_version.$pack_type.zip
# clean
function clean_workspace(){
echo "cleaning workspace... >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
rm -rf $BUILD_ROOT
rm -rf $BUILD_DEST
}
echo "cleaning workspace... >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
clean_workspace
mkdir -p $BUILD_ROOT $BUILD_DEST
if [ $pack_type == "cordova" ]; then
for list in $LIST;do
python $SRC_ROOT/../../tools/build/pack.py -t ${pack_type}-aio -m $pack_mode -d $BUILD_DEST --sub-version $sub_version --pack-type $plugin_location -s $SRC_ROOT/../../webapi/$list
if [ -d $BUILD_DEST/opt/$list/HOST_RESOURCES ]; then
mkdir -p $BUILD_ROOT/opt/$name/opt/$list
mv $BUILD_DEST/opt/$list/HOST_RESOURCES/* $BUILD_ROOT/opt/$name/opt/$list
fi
done
else
for list in $LIST;do
python $SRC_ROOT/../../tools/build/pack.py -t ${pack_type}-aio -m $pack_mode -a $arch -d $BUILD_DEST -s $SRC_ROOT/../../webapi/$list
if [ -d $BUILD_DEST/opt/$list/HOST_RESOURCES ]; then
mkdir -p $BUILD_ROOT/opt/$name/opt/$list
mv $BUILD_DEST/opt/$list/HOST_RESOURCES/* $BUILD_ROOT/opt/$name/opt/$list
fi
done
fi
if [ -d $BUILD_ROOT/opt/$name/opt ]; then
mkdir -p $BUILD_ROOT/opt/$name/apps
mv `find $BUILD_DEST -name '*apk'` $BUILD_ROOT/opt/$name/apps
cp -a $SRC_ROOT/../../tools/resources/bdd/bddrunner $BUILD_ROOT/opt/$name
cp -a $SRC_ROOT/../../tools/resources/bdd/data.conf $BUILD_ROOT/opt/$name
cp -a $SRC_ROOT/../../tools/resources/xsl/* $BUILD_ROOT/opt/$name
fi
## creat apk ##
cp -a $SRC_ROOT/icon.png $BUILD_ROOT/
if [ $pack_type == "apk" ];then
#cp -ar $SRC_ROOT/../../tools/crosswalk $BUILD_ROOT/crosswalk
cd $BUILD_ROOT
#python make_apk.py --package=org.xwalk.$appname --name=$appname --app-url=http://127.0.0.1:8080/index.html --icon=$BUILD_ROOT/icon.png --mode=$pack_mode --arch=$arch --enable-remote-debugging
crosswalk-pkg --android=$pack_mode --crosswalk=$crosswalk_version --manifest=''\{\"name\":\"$appname\"\,\"xwalk_package_id\":\"org.xwalk."$appname"\",\"start_url\":\"http://127.0.0.1:8080/index.html\"\}'' -p android --targets=$arch $BUILD_DEST
elif [ $pack_type == "cordova" ];then
if [ $sub_version == "4.x" ]; then
cp -ar $SRC_ROOT/../../tools/cordova_plugins $BUILD_ROOT/cordova_plugins
cd $BUILD_ROOT
cordova create $appname org.xwalk.$appname $appname
sed -i "s/<widget/<widget android-activityName=\"$appname\"/g" $BUILD_ROOT/$appname/config.xml
sed -i "s/<\/widget>/ <allow-navigation href=\"*\" \/>\\n<\/widget>/g" $BUILD_ROOT/$appname/config.xml
cd $BUILD_ROOT/$appname
cordova platform add android
for plugin in `ls $BUILD_ROOT/cordova_plugins`
do
if [ $plugin == "cordova-plugin-crosswalk-webview" ]; then
version_cmd=""
plugin_crosswalk_source=$BUILD_ROOT/cordova_plugins/$plugin
if [ $crosswalk_branch == "beta" ]; then
if [ $pack_mode == "shared" ]; then
version_cmd="--variable XWALK_VERSION="org.xwalk:xwalk_shared_library_beta:$crosswalk_version""
else
version_cmd="--variable XWALK_VERSION="org.xwalk:xwalk_core_library_beta:$crosswalk_version""
fi
else
version_cmd="--variable XWALK_VERSION="$crosswalk_version""
fi
if [ $plugin_location == 'npm' ]; then
plugin_crosswalk_source="cordova-plugin-crosswalk-webview"
fi
echo $version_cmd
echo $plugin_crosswalk_source
cordova plugin add $plugin_crosswalk_source $version_cmd --variable XWALK_MODE="$pack_mode"
else
cordova plugin add $BUILD_ROOT/cordova_plugins/$plugin
fi
done
cd $BUILD_ROOT/$appname/www
cat > index.html << EOF
<!doctype html>
<head>
<meta http-equiv="Refresh" content="1; url=http://127.0.0.1:8080/index.html">
</head>
EOF
cd $BUILD_ROOT/$appname
cordova build android -- --gradleArg=-PcdvBuildArch=$arch
elif [ $sub_version == "3.6" ]; then
cp -ar $SRC_ROOT/../../tools/cordova $BUILD_ROOT/cordova
cp -ar $SRC_ROOT/../../tools/cordova_plugins $BUILD_ROOT/cordova_plugins
cd $BUILD_ROOT/cordova
if [ $pack_mode == "shared" ]; then
bin/create $appname org.xwalk.$appname $appname --xwalk-shared-library
else
if [ $pack_mode != "embedded" ]; then
echo "package mode can only be embedded or shared, now take embedded as default.... >>>>>>>>>>>>>>>>>>>>>>>>>"
fi
bin/create $appname org.xwalk.$appname $appname
fi
cd $BUILD_ROOT/cordova/$appname
for plugin in `ls $BUILD_ROOT/cordova_plugins`
do
plugman install --platform android --project ./ --plugin $BUILD_ROOT/cordova_plugins/$plugin
done
cd $BUILD_ROOT/cordova/$appname/assets/www
cat > index.html << EOF
<!doctype html>
<head>
<meta http-equiv="Refresh" content="1; url=http://127.0.0.1:8080/index.html">
</head>
EOF
cd $BUILD_ROOT/cordova/$appname
./cordova/build
else
echo "package sub version can only be 3.6 or 4.x, now exit.... >>>>>>>>>>>>>>>>>>>>>>>>>"
clean_workspace
exit 1
fi
fi
if [ $? -ne 0 ];then
echo "Create $name.apk fail.... >>>>>>>>>>>>>>>>>>>>>>>>>"
clean_workspace
exit 1
fi
## cp tests.xml and inst.sh ##
mkdir -p $BUILD_ROOT/opt/$name
cp $SRC_ROOT/inst.py $BUILD_ROOT/opt/$name/inst.py
for list in $LIST;do
suite=`basename $list`
cp $SRC_ROOT/../../webapi/$list/tests.xml $BUILD_ROOT/opt/$name/$suite.tests.xml
sed -i "s/<suite/<suite widget=\"$name\"/g" $BUILD_ROOT/opt/$name/$suite.tests.xml
cp $SRC_ROOT/../../webapi/$list/tests.full.xml $BUILD_ROOT/opt/$name/$suite.tests.full.xml
sed -i "s/<suite/<suite widget=\"$name\"/g" $BUILD_ROOT/opt/$name/$suite.tests.full.xml
done
## creat zip package ##
if [ $pack_type == "apk" ];then
mv $BUILD_ROOT/*.apk $BUILD_ROOT/opt/$name/
mv $BUILD_ROOT/opt/$name/org.xwalk.webapi_service_tests*.apk $BUILD_ROOT/opt/$name/$appname.apk
elif [ $pack_type == "cordova" ];then
if [ $sub_version == "3.6" ]; then
if [ -f $BUILD_ROOT/cordova/$appname/bin/$appname-debug.apk ];then
mv $BUILD_ROOT/cordova/$appname/bin/$appname-debug.apk $BUILD_ROOT/opt/$name/$appname.apk
fi
elif [ $sub_version == "4.x" ]; then
apk_name_arch="armv7"
if [ $arch == 'x86' ]; then
apk_name_arch="x86"
else
if [ $arch != 'arm' ]; then
echo "apk runtime arch can only be x86 or arm, now take arm as default.... >>>>>>>>>>>>>>>>>>>>>>>>>"
exit 1
fi
fi
dir_source=$BUILD_ROOT/$appname/platforms/android/build/outputs/apk
apk_source1=$dir_source/$appname-$apk_name_arch-debug.apk
apk_source2=$dir_source/android-$apk_name_arch-debug.apk
apk_dest=$BUILD_ROOT/opt/$name/$appname.apk
if [ -f $apk_source1 ];then
mv $apk_source1 $apk_dest
elif [ -f $apk_source2 ];then
mv $apk_source2 $apk_dest
else
echo "Copy apk failed, " + $apk_source1 + " does not exist"
clean_workspace
exit 1
fi
fi
fi
cd $BUILD_ROOT
zip -Drq $BUILD_ROOT/$name-$version-$sub_version.$pack_type.zip opt/
if [ $? -ne 0 ];then
echo "Create zip package fail... >>>>>>>>>>>>#>>>>>>>>>>>>>"
clean_workspace
exit 1
fi
# copy zip file
echo "copy package from workspace... >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
cd $SRC_ROOT
mkdir -p $dest_dir
cp -f $BUILD_ROOT/$name-$version-$sub_version.$pack_type.zip $dest_dir
# clean workspace
clean_workspace
# validate
echo "checking result... >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
if [ ! -f $dest_dir/$name-$version-$sub_version.$pack_type.zip ];then
echo "------------------------------ FAILED to build $name packages --------------------------"
exit 1
fi
echo "------------------------------ Done to build $name packages --------------------------"
cd $dest_dir
ls *.zip 2>/dev/null
|
wanghongjuan/crosswalk-test-suite
|
misc/webapi-service-tests/pack.sh
|
Shell
|
bsd-3-clause
| 9,653 |
# SPDX-License-Identifier: BSD-3-Clause
source helpers.sh
pcr_ids=0
alg_pcr_policy=sha256
file_pcr_value=pcr.bin
file_policy=policy.data
file_authorized_policy_1=auth_policy_1.data
file_authorized_policy_2=auth_policy_2.data
file_session_file=session.dat
file_private_key=private.pem
file_public_key=public.pem
file_verifying_key_public=verifying_key_public
file_verifying_key_name=verifying_key_name
file_verifying_key_ctx=verifying_key_ctx
file_policyref=policyref
cleanup() {
rm -f $file_pcr_value $file_policy $file_session_file $file_private_key \
$file_public_key $file_verifying_key_public $file_verifying_key_name \
$file_verifying_key_ctx $file_policyref $file_authorized_policy_1 \
$file_authorized_policy_2
tpm2 flushcontext $file_session_file 2>/dev/null || true
if [ "${1}" != "no-shutdown" ]; then
shut_down
fi
}
trap cleanup EXIT
start_up
cleanup "no-shutdown"
generate_policy_authorize () {
tpm2 startauthsession -Q -S $file_session_file
tpm2 policyauthorize -Q -S $file_session_file -L $3 -i $1 -q $2 -n $4
tpm2 flushcontext $file_session_file
rm $file_session_file
}
openssl genrsa -out $file_private_key 2048 2>/dev/null
openssl rsa -in $file_private_key -out $file_public_key -pubout 2>/dev/null
tpm2 loadexternal -G rsa -C n -u $file_public_key -c $file_verifying_key_ctx \
-n $file_verifying_key_name
dd if=/dev/urandom of=$file_policyref bs=1 count=32 2>/dev/null
tpm2 pcrread -Q -o $file_pcr_value ${alg_pcr_policy}:${pcr_ids}
tpm2 startauthsession -Q -S $file_session_file
tpm2 policypcr -Q -S $file_session_file -l ${alg_pcr_policy}:${pcr_ids} \
-f $file_pcr_value -L $file_policy
tpm2 flushcontext $file_session_file
rm $file_session_file
generate_policy_authorize $file_policy $file_policyref \
$file_authorized_policy_1 $file_verifying_key_name
tpm2 pcrextend \
0:sha256=e7011b851ee967e2d24e035ae41b0ada2decb182e4f7ad8411f2bf564c56fd6f
tpm2 pcrread -Q -o $file_pcr_value ${alg_pcr_policy}:${pcr_ids}
tpm2 startauthsession -Q -S $file_session_file
tpm2 policypcr -Q -S $file_session_file -l ${alg_pcr_policy}:${pcr_ids} \
-f $file_pcr_value -L $file_policy
tpm2 flushcontext $file_session_file
rm $file_session_file
generate_policy_authorize $file_policy $file_policyref \
$file_authorized_policy_2 $file_verifying_key_name
diff $file_authorized_policy_1 $file_authorized_policy_2
exit 0
|
01org/tpm2.0-tools
|
test/integration/tests/abrmd_policyauthorize.sh
|
Shell
|
bsd-3-clause
| 2,385 |
#!/bin/sh -ex
mirage configure config_server.ml --xen --no-opam
make
mirage configure config_client.ml --xen --no-opam
make
|
edwintorok/ocaml-conduit
|
tests/mirage/vchan/build.sh
|
Shell
|
isc
| 124 |
#!/bin/bash
# Requires wget `brew install wget`
cd "$( dirname "${BASH_SOURCE[0]}" )"
rm -f foosey.db
wget http://api.foosey.futbol/foosey.db
|
brikr/foosey
|
backend/dbFresh.sh
|
Shell
|
mit
| 145 |
#!/usr/bin/env bash
# because this script is being source-ed via .travis.yaml,
# we need to restore the original options so that that we don't interfere with
# travis' internals
readonly ORIGINAL_SHELL_OPTIONS=$(set +o)
# this script is extra noisy and used in many places during the build so we suppress the trace with +x to reduce the noise
set -u -e -o pipefail
# sets and optionally prints environmental variable
# usage: setEnvVar variableName variableValue
function setEnvVar() {
local name=$1
local value=$2
if [[ ${print} == "print" ]]; then
echo ${name}=${value}
fi
export ${name}=${value}
}
# use BASH_SOURCE so that we get the right path when this script is called AND source-d
readonly thisDir=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
readonly print=${1:-}
# print bash version just so that we know what is running all the scripts
if [[ ${print} == "print" ]]; then
bash --version
fi
#######################
# CUSTOM GLOBALS #
#######################
setEnvVar NODE_VERSION 6.9.5
setEnvVar NPM_VERSION 3.10.7 # do not upgrade to >3.10.8 unless https://github.com/npm/npm/issues/14042 is resolved
setEnvVar YARN_VERSION 0.21.3
setEnvVar CHROMIUM_VERSION 433059 # Chrome 53 linux stable, see https://www.chromium.org/developers/calendar
setEnvVar SAUCE_CONNECT_VERSION 4.3.11
setEnvVar PROJECT_ROOT $(cd ${thisDir}/../..; pwd)
if [[ ${TRAVIS:-} ]]; then
case ${CI_MODE} in
js)
setEnvVar KARMA_JS_BROWSERS ChromeNoSandbox
;;
saucelabs_required)
setEnvVar KARMA_JS_BROWSERS `node -e "console.log(require('/home/travis/build/angular/angular/browser-providers.conf').sauceAliases.CI_REQUIRED.join(','))"`
;;
browserstack_required)
setEnvVar KARMA_JS_BROWSERS `node -e "console.log(require('/home/travis/build/angular/angular/browser-providers.conf').browserstackAliases.CI_REQUIRED.join(','))"`
;;
saucelabs_optional)
setEnvVar KARMA_JS_BROWSERS `node -e "console.log(require('/home/travis/build/angular/angular/browser-providers.conf').sauceAliases.CI_OPTIONAL.join(','))"`
;;
browserstack_optional)
setEnvVar KARMA_JS_BROWSERS `node -e "console.log(require('/home/travis/build/angular/angular/browser-providers.conf').browserstackAliases.CI_OPTIONAL.join(','))"`
;;
aio)
# Due to network latency/server performance, the min accepted PWA score
# on previews is a little lower than on staging.
setEnvVar MIN_PWA_SCORE_PREVIEW 93
setEnvVar MIN_PWA_SCORE_STAGING 95
;;
esac
else
setEnvVar KARMA_JS_BROWSERS Chrome
fi
if [[ ${TRAVIS:-} ]]; then
# used by xvfb that is used by Chromium
setEnvVar DISPLAY :99.0
# Use newer version of GCC to that is required to compile native npm modules for Node v4+ on Ubuntu Precise
# more info: https://docs.travis-ci.com/user/languages/javascript-with-nodejs#Node.js-v4-(or-io.js-v3)-compiler-requirements
setEnvVar CXX g++-4.8
# If NGBUILDS_IO_KEY not set yet, export the NGBUILDS_IO_KEY using the JWT token that Travis generated and exported for SAUCE_ACCESS_KEY.
# This is a workaround for travis-ci/travis-ci#7223
# WARNING: NGBUILDS_IO_KEY should NOT be printed
export NGBUILDS_IO_KEY=${NGBUILDS_IO_KEY:-$SAUCE_ACCESS_KEY}
# Personal token generated by mary-poppins, with only `read_org` permission
export GITHUB_TEAM_MEMBERSHIP_CHECK_KEY=35fc4093c1f29a2ddaf60cce5d57065454180bf6
# Used by karma and karma-chrome-launcher
# In order to have a meaningful SauceLabs badge on the repo page,
# the angular2-ci account is used only when pushing commits to master;
# in all other cases, the regular angular-ci account is used.
if [ "${TRAVIS_PULL_REQUEST}" = "false" ] && [ "${TRAVIS_BRANCH}" = "master" ]; then
setEnvVar SAUCE_USERNAME angular2-ci
# - not using use setEnvVar so that we don't print the key
# - we overwrite the value set by Travis JWT addon here to work around travis-ci/travis-ci#7223 for NGBUILDS_IO_KEY
export SAUCE_ACCESS_KEY=693ebc16208a-0b5b-1614-8d66-a2662f4e
else
setEnvVar SAUCE_USERNAME angular-ci
# - not using use setEnvVar so that we don't print the key
# - we overwrite the value set by Travis JWT addon here to work around travis-ci/travis-ci#7223 for NGBUILDS_IO_KEY
export SAUCE_ACCESS_KEY=9b988f434ff8-fbca-8aa4-4ae3-35442987
fi
setEnvVar BROWSER_STACK_USERNAME angularteam1
# not using use setEnvVar so that we don't print the key
export BROWSER_STACK_ACCESS_KEY=BWCd4SynLzdDcv8xtzsB
setEnvVar CHROME_BIN ${HOME}/.chrome/chromium/chrome-linux/chrome
setEnvVar BROWSER_PROVIDER_READY_FILE /tmp/angular-build/browser-provider-tunnel-init.lock
fi
#######################
# PREEXISTING GLOBALS #
#######################
# Prepend `~/.yarn/bin` to the PATH
setEnvVar PATH $HOME/.yarn/bin:$PATH
# Append dist/all to the NODE_PATH so that cjs module resolver finds find the packages that use
# absolute module ids (e.g. @angular/core)
setEnvVar NODE_PATH ${NODE_PATH:-}:${PROJECT_ROOT}/dist/all:${PROJECT_ROOT}/dist/tools
setEnvVar LOGS_DIR /tmp/angular-build/logs
# strip leading "/home/travis/build/angular/angular/" or "./" path. Could this be done in one shot?
CURRENT_SHELL_SOURCE_FILE=${BASH_SOURCE#${PROJECT_ROOT}/}
export CURRENT_SHELL_SOURCE_FILE=${CURRENT_SHELL_SOURCE_FILE#./}
# Prefix xtrace output with file name/line and optionally function name
# http://wiki.bash-hackers.org/scripting/debuggingtips#making_xtrace_more_useful
# TODO(i): I couldn't figure out how to set this via `setEnvVar` so I just set it manually
export PS4='+(${CURRENT_SHELL_SOURCE_FILE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
if [[ ${print} == "print" ]]; then
echo PS4=${PS4}
fi
eval "${ORIGINAL_SHELL_OPTIONS}"
|
diestrin/angular
|
scripts/ci/env.sh
|
Shell
|
mit
| 5,723 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods/FormatterKit.framework"
install_framework "Pods/HackerNewsKit.framework"
install_framework "Pods/JHChainableAnimations.framework"
install_framework "Pods/MBProgressHUD.framework"
install_framework "Pods/MWFeedParser.framework"
install_framework "Pods/OCMock.framework"
install_framework "Pods/ObjectiveSugar.framework"
install_framework "Pods/Realm.framework"
install_framework "Pods/SFAdditions.framework"
install_framework "Pods/SFSlideOutMenu.framework"
install_framework "Pods/SWTableViewCell.framework"
install_framework "Pods/TTTAttributedLabel.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods/FormatterKit.framework"
install_framework "Pods/HackerNewsKit.framework"
install_framework "Pods/JHChainableAnimations.framework"
install_framework "Pods/MBProgressHUD.framework"
install_framework "Pods/MWFeedParser.framework"
install_framework "Pods/OCMock.framework"
install_framework "Pods/ObjectiveSugar.framework"
install_framework "Pods/Realm.framework"
install_framework "Pods/SFAdditions.framework"
install_framework "Pods/SFSlideOutMenu.framework"
install_framework "Pods/SWTableViewCell.framework"
install_framework "Pods/TTTAttributedLabel.framework"
fi
|
skyefreeman/Cognition
|
Pods/Target Support Files/Pods/Pods-frameworks.sh
|
Shell
|
mit
| 4,646 |
#!/bin/sh -x
IDX=$1
INPUT_SHARED_1_1=$2
INPUT_SHARED_1_2=$3
INPUT_SHARED_1_3=$4
INPUT_SHARED_1_4=$5
INPUT_SHARED_1_5=$6
OUTPUT_1_1_I="output_1_1_$IDX.txt"
OUTPUT_1_2_I="output_1_2_$IDX.txt"
OUTPUT_1_3_I="output_1_3_$IDX.txt"
cat $INPUT_SHARED_1_1 >> $OUTPUT_1_1_I; echo "1:$IDX" >> $OUTPUT_1_1_I
cat $INPUT_SHARED_1_2 >> $OUTPUT_1_2_I; echo "1:$IDX" >> $OUTPUT_1_2_I
cat $INPUT_SHARED_1_3 >> $OUTPUT_1_3_I; echo "1:$IDX" >> $OUTPUT_1_3_I
# pattern to select suitable profiles
regex_1='.*sander.MPI -O -i mdshort.in'
regex_2='.*sander.MPI -O -i min.in'
regex_3='pyCoCo'
regex_4='python postexec.py .*'
regex="$regex_1"
# select and count profiles
profiles=`grep -le "$regex" $HOME/bin.rp/samples/*.prof`
n_profiles=`echo $profiles | wc -w`
# select a random profile
n_random=`awk "BEGIN{srand();print int($n_profiles * rand()) + 1;}"`
profile=`for profile in $profiles; do echo $profile; done | head -n $n_random | tail -n 1`
samples="${profile%.prof}.json"
test -f "$samples" || echo "$samples does not exist"
test -f "$samples" || exit 1
echo "sampling $samples"
. $HOME/bin.rp/ve/bin/activate
radical-synapse-emulate -i "$samples" || true
|
radical-experiments/AIMES-Swift
|
viveks_workflow/stage_1.sh
|
Shell
|
mit
| 1,153 |
#!/usr/bin/env zsh
hash gh 2>/dev/null || { echo "Please install gh before installing gh extensions." && exit 0; }
extensions=(
davidraviv/gh-clean-branches # Safely delete local branches that have no remotes and no hanging changes.
)
echo "${extensions[@]///}" | xargs -n1 gh extension install
|
KenPowers/dotfiles
|
vendor/packages/install-gh.sh
|
Shell
|
mit
| 300 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.