code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
set -eux
find "$LIQUIDSOAP_DIRECTORY" -type f -name '*.mp3' \
>/usr/share/liquidsoap/playlist.m3u
cat <<__EOC__ | exec liquidsoap -
output.icecast (
%mp3,
host = "133.130.91.27",
port = ${ICECAST2_SERVER_PORT},
password = "${ICECAST2_SOURCE_PASSWORD}",
mount = "/${LIQUIDSOAP_MOUNTPOINT}",
name = "/${LIQUIDSOAP_MOUNTPOINT}",
encoding = "UTF-8",
mksafe(playlist("/usr/share/liquidsoap/playlist.m3u"))
)
__EOC__
|
10sr/machine-setups
|
_conoha/docker-compose/icecast2/entrypoint.sh
|
Shell
|
unlicense
| 461 |
#!/bin/bash
set -eo pipefail
gem install bundler
rm -rf lib
cd function
rm -f Gemfile.lock
bundle config set path '../lib'
bundle install
|
awsdocs/aws-doc-sdk-examples
|
lambda_functions/blank-ruby/2-build-layer.sh
|
Shell
|
apache-2.0
| 137 |
#!/bin/sh
python -c "
import sys, django, os
sys.stderr.write('Using Python version {0} from {1}\n'.format(sys.version[:5], sys.executable))
sys.stderr.write('Using Django version {0} from {1}\n'.format(
django.get_version(), os.path.dirname(os.path.abspath(django.__file__))))
"
exec python -Wd example/manage.py test "$@"
|
edoburu/django-fluent-comments
|
runtests.sh
|
Shell
|
apache-2.0
| 330 |
#!/usr/bin/env bash
./docker-run.sh 6
|
bmoliveira/snake-yaml
|
docker-run-jdk6.sh
|
Shell
|
apache-2.0
| 39 |
#!/bin/bash
# Copyright 2012-2013 Johns Hopkins University (Author: Daniel Povey);
# Arnab Ghoshal
# 2014 Guoguo Chen
# 2015 Hainan Xu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# This script prepares a directory such as data/lang/, in the standard format,
# given a source directory containing a dictionary lexicon.txt in a form like:
# word phone1 phone2 ... phoneN
# per line (alternate prons would be separate lines), or a dictionary with probabilities
# called lexiconp.txt in a form:
# word pron-prob phone1 phone2 ... phoneN
# (with 0.0 < pron-prob <= 1.0); note: if lexiconp.txt exists, we use it even if
# lexicon.txt exists.
# and also files silence_phones.txt, nonsilence_phones.txt, optional_silence.txt
# and extra_questions.txt
# Here, silence_phones.txt and nonsilence_phones.txt are lists of silence and
# non-silence phones respectively (where silence includes various kinds of
# noise, laugh, cough, filled pauses etc., and nonsilence phones includes the
# "real" phones.)
# In each line of those files is a list of phones, and the phones on each line
# are assumed to correspond to the same "base phone", i.e. they will be
# different stress or tone variations of the same basic phone.
# The file "optional_silence.txt" contains just a single phone (typically SIL)
# which is used for optional silence in the lexicon.
# extra_questions.txt might be empty; typically will consist of lists of phones,
# all members of each list with the same stress or tone; and also possibly a
# list for the silence phones. This will augment the automatically generated
# questions (note: the automatically generated ones will treat all the
# stress/tone versions of a phone the same, so will not "get to ask" about
# stress or tone).
#
# This script adds word-position-dependent phones and constructs a host of other
# derived files, that go in data/lang/.
# Begin configuration section.
num_sil_states=5
num_nonsil_states=3
num_word_disambig_syms=1
position_dependent_phones=true
# position_dependent_phones is false also when position dependent phones and word_boundary.txt
# have been generated by another source
share_silence_phones=false # if true, then share pdfs of different silence
# phones together.
sil_prob=0.5
phone_symbol_table= # if set, use a specified phones.txt file.
# end configuration sections
. utils/parse_options.sh
if [ $# -ne 4 ]; then
echo "usage: utils/prepare_lang.sh <dict-src-dir> <oov-dict-entry> <tmp-dir> <lang-dir>"
echo "e.g.: utils/prepare_lang.sh data/local/dict <SPOKEN_NOISE> data/local/lang data/lang"
echo "<dict-src-dir> should contain the following files:"
echo " extra_questions.txt lexicon.txt nonsilence_phones.txt optional_silence.txt silence_phones.txt"
echo "See http://kaldi-asr.org/doc/data_prep.html#data_prep_lang_creating for more info."
echo "options: "
echo " --num-sil-states <number of states> # default: 5, #states in silence models."
echo " --num-nonsil-states <number of states> # default: 3, #states in non-silence models."
echo " --position-dependent-phones (true|false) # default: true; if true, use _B, _E, _S & _I"
echo " # markers on phones to indicate word-internal positions. "
echo " --share-silence-phones (true|false) # default: false; if true, share pdfs of "
echo " # all non-silence phones. "
echo " --sil-prob <probability of silence> # default: 0.5 [must have 0 <= silprob < 1]"
echo " --phone-symbol-table <filename> # default: \"\"; if not empty, use the provided "
echo " # phones.txt as phone symbol table. This is useful "
echo " # if you use a new dictionary for the existing setup."
exit 1;
fi
srcdir=$1
oov_word=$2
tmpdir=$3
dir=$4
mkdir -p $dir $tmpdir $dir/phones
silprob=false
[ -f $srcdir/lexiconp_silprob.txt ] && silprob=true
[ -f path.sh ] && . ./path.sh
! utils/validate_dict_dir.pl $srcdir && \
echo "*Error validating directory $srcdir*" && exit 1;
if [[ ! -f $srcdir/lexicon.txt ]]; then
echo "**Creating $dir/lexicon.txt from $dir/lexiconp.txt"
perl -ape 's/(\S+\s+)\S+\s+(.+)/$1$2/;' < $srcdir/lexiconp.txt > $srcdir/lexicon.txt || exit 1;
fi
if [[ ! -f $srcdir/lexiconp.txt ]]; then
echo "**Creating $srcdir/lexiconp.txt from $srcdir/lexicon.txt"
perl -ape 's/(\S+\s+)(.+)/${1}1.0\t$2/;' < $srcdir/lexicon.txt > $srcdir/lexiconp.txt || exit 1;
fi
if ! utils/validate_dict_dir.pl $srcdir >&/dev/null; then
utils/validate_dict_dir.pl $srcdir # show the output.
echo "Validation failed (second time)"
exit 1;
fi
# phones.txt file provided, we will do some sanity check here.
if [[ ! -z $phone_symbol_table ]]; then
# Checks if we have position dependent phones
n1=`cat $phone_symbol_table | grep -v -E "^#[0-9]+$" | cut -d' ' -f1 | sort -u | wc -l`
n2=`cat $phone_symbol_table | grep -v -E "^#[0-9]+$" | cut -d' ' -f1 | sed 's/_[BIES]$//g' | sort -u | wc -l`
$position_dependent_phones && [ $n1 -eq $n2 ] &&\
echo "$0: Position dependent phones requested, but not in provided phone symbols" && exit 1;
! $position_dependent_phones && [ $n1 -ne $n2 ] &&\
echo "$0: Position dependent phones not requested, but appear in the provided phones.txt" && exit 1;
# Checks if the phone sets match.
cat $srcdir/{,non}silence_phones.txt | awk -v f=$phone_symbol_table '
BEGIN { while ((getline < f) > 0) { sub(/_[BEIS]$/, "", $1); phones[$1] = 1; }}
{ for (x = 1; x <= NF; ++x) { if (!($x in phones)) {
print "Phone appears in the lexicon but not in the provided phones.txt: "$x; exit 1; }}}' || exit 1;
fi
if $position_dependent_phones; then
# Create $tmpdir/lexiconp.txt from $srcdir/lexiconp.txt (or
# $tmpdir/lexiconp_silprob.txt from $srcdir/lexiconp_silprob.txt) by
# adding the markers _B, _E, _S, _I depending on word position.
# In this recipe, these markers apply to silence also.
# Do this starting from lexiconp.txt only.
if "$silprob"; then
perl -ane '@A=split(" ",$_); $w = shift @A; $p = shift @A; $silword_p = shift @A;
$wordsil_f = shift @A; $wordnonsil_f = shift @A; @A>0||die;
if(@A==1) { print "$w $p $silword_p $wordsil_f $wordnonsil_f $A[0]_S\n"; }
else { print "$w $p $silword_p $wordsil_f $wordnonsil_f $A[0]_B ";
for($n=1;$n<@A-1;$n++) { print "$A[$n]_I "; } print "$A[$n]_E\n"; } ' \
< $srcdir/lexiconp_silprob.txt > $tmpdir/lexiconp_silprob.txt
else
perl -ane '@A=split(" ",$_); $w = shift @A; $p = shift @A; @A>0||die;
if(@A==1) { print "$w $p $A[0]_S\n"; } else { print "$w $p $A[0]_B ";
for($n=1;$n<@A-1;$n++) { print "$A[$n]_I "; } print "$A[$n]_E\n"; } ' \
< $srcdir/lexiconp.txt > $tmpdir/lexiconp.txt || exit 1;
fi
# create $tmpdir/phone_map.txt
# this has the format (on each line)
# <original phone> <version 1 of original phone> <version 2> ...
# where the versions depend on the position of the phone within a word.
# For instance, we'd have:
# AA AA_B AA_E AA_I AA_S
# for (B)egin, (E)nd, (I)nternal and (S)ingleton
# and in the case of silence
# SIL SIL SIL_B SIL_E SIL_I SIL_S
# [because SIL on its own is one of the variants; this is for when it doesn't
# occur inside a word but as an option in the lexicon.]
# This phone map expands the phone lists into all the word-position-dependent
# versions of the phone lists.
cat <(set -f; for x in `cat $srcdir/silence_phones.txt`; do for y in "" "" "_B" "_E" "_I" "_S"; do echo -n "$x$y "; done; echo; done) \
<(set -f; for x in `cat $srcdir/nonsilence_phones.txt`; do for y in "" "_B" "_E" "_I" "_S"; do echo -n "$x$y "; done; echo; done) \
> $tmpdir/phone_map.txt
else
if "$silprob"; then
cp $srcdir/lexiconp_silprob.txt $tmpdir/lexiconp_silprob.txt
else
cp $srcdir/lexiconp.txt $tmpdir/lexiconp.txt
fi
cat $srcdir/silence_phones.txt $srcdir/nonsilence_phones.txt | \
awk '{for(n=1;n<=NF;n++) print $n; }' > $tmpdir/phones
paste -d' ' $tmpdir/phones $tmpdir/phones > $tmpdir/phone_map.txt
fi
mkdir -p $dir/phones # various sets of phones...
# Sets of phones for use in clustering, and making monophone systems.
if $share_silence_phones; then
# build a roots file that will force all the silence phones to share the
# same pdf's. [three distinct states, only the transitions will differ.]
# 'shared'/'not-shared' means, do we share the 3 states of the HMM
# in the same tree-root?
# Sharing across models(phones) is achieved by writing several phones
# into one line of roots.txt (shared/not-shared doesn't affect this).
# 'not-shared not-split' means we have separate tree roots for the 3 states,
# but we never split the tree so they remain stumps,
# so all phones in the line correspond to the same model.
cat $srcdir/silence_phones.txt | awk '{printf("%s ", $0); } END{printf("\n");}' | cat - $srcdir/nonsilence_phones.txt | \
utils/apply_map.pl $tmpdir/phone_map.txt > $dir/phones/sets.txt
cat $dir/phones/sets.txt | \
awk '{if(NR==1) print "not-shared", "not-split", $0; else print "shared", "split", $0;}' > $dir/phones/roots.txt
else
# different silence phones will have different GMMs. [note: here, all "shared split" means
# is that we may have one GMM for all the states, or we can split on states. because they're
# context-independent phones, they don't see the context.]
cat $srcdir/{,non}silence_phones.txt | utils/apply_map.pl $tmpdir/phone_map.txt > $dir/phones/sets.txt
cat $dir/phones/sets.txt | awk '{print "shared", "split", $0;}' > $dir/phones/roots.txt
fi
cat $srcdir/silence_phones.txt | utils/apply_map.pl $tmpdir/phone_map.txt | \
awk '{for(n=1;n<=NF;n++) print $n;}' > $dir/phones/silence.txt
cat $srcdir/nonsilence_phones.txt | utils/apply_map.pl $tmpdir/phone_map.txt | \
awk '{for(n=1;n<=NF;n++) print $n;}' > $dir/phones/nonsilence.txt
cp $srcdir/optional_silence.txt $dir/phones/optional_silence.txt
cp $dir/phones/silence.txt $dir/phones/context_indep.txt
# if extra_questions.txt is empty, it's OK.
cat $srcdir/extra_questions.txt 2>/dev/null | utils/apply_map.pl $tmpdir/phone_map.txt \
>$dir/phones/extra_questions.txt
# Want extra questions about the word-start/word-end stuff. Make it separate for
# silence and non-silence. Probably doesn't matter, as silence will rarely
# be inside a word.
if $position_dependent_phones; then
for suffix in _B _E _I _S; do
(set -f; for x in `cat $srcdir/nonsilence_phones.txt`; do echo -n "$x$suffix "; done; echo) >>$dir/phones/extra_questions.txt
done
for suffix in "" _B _E _I _S; do
(set -f; for x in `cat $srcdir/silence_phones.txt`; do echo -n "$x$suffix "; done; echo) >>$dir/phones/extra_questions.txt
done
fi
# add disambig symbols to the lexicon in $tmpdir/lexiconp.txt
# and produce $tmpdir/lexicon_*disambig.txt
if "$silprob"; then
ndisambig=`utils/add_lex_disambig.pl --pron-probs --sil-probs $tmpdir/lexiconp_silprob.txt $tmpdir/lexiconp_silprob_disambig.txt`
else
ndisambig=`utils/add_lex_disambig.pl --pron-probs $tmpdir/lexiconp.txt $tmpdir/lexiconp_disambig.txt`
fi
ndisambig=$[$ndisambig+1]; # add one disambig symbol for silence in lexicon FST.
echo $ndisambig > $tmpdir/lex_ndisambig
# Format of lexiconp_disambig.txt:
# !SIL 1.0 SIL_S
# <SPOKEN_NOISE> 1.0 SPN_S #1
# <UNK> 1.0 SPN_S #2
# <NOISE> 1.0 NSN_S
# !EXCLAMATION-POINT 1.0 EH2_B K_I S_I K_I L_I AH0_I M_I EY1_I SH_I AH0_I N_I P_I OY2_I N_I T_E
( for n in `seq 0 $ndisambig`; do echo '#'$n; done ) >$dir/phones/disambig.txt
# Create phone symbol table.
if [[ ! -z $phone_symbol_table ]]; then
start_symbol=`grep \#0 $phone_symbol_table | awk '{print $2}'`
echo "<eps>" | cat - $dir/phones/{silence,nonsilence}.txt | awk -v f=$phone_symbol_table '
BEGIN { while ((getline < f) > 0) { phones[$1] = $2; }} { print $1" "phones[$1]; }' | sort -k2 -g |\
cat - <(cat $dir/phones/disambig.txt | awk -v x=$start_symbol '{n=x+NR-1; print $1, n;}') > $dir/phones.txt
else
echo "<eps>" | cat - $dir/phones/{silence,nonsilence,disambig}.txt | \
awk '{n=NR-1; print $1, n;}' > $dir/phones.txt
fi
# Create a file that describes the word-boundary information for
# each phone. 5 categories.
if $position_dependent_phones; then
cat $dir/phones/{silence,nonsilence}.txt | \
awk '/_I$/{print $1, "internal"; next;} /_B$/{print $1, "begin"; next; }
/_S$/{print $1, "singleton"; next;} /_E$/{print $1, "end"; next; }
{print $1, "nonword";} ' > $dir/phones/word_boundary.txt
else
# word_boundary.txt might have been generated by another source
[ -f $srcdir/word_boundary.txt ] && cp $srcdir/word_boundary.txt $dir/phones/word_boundary.txt
fi
# Create word symbol table.
# <s> and </s> are only needed due to the need to rescore lattices with
# ConstArpaLm format language model. They do not normally appear in G.fst or
# L.fst.
if "$silprob"; then
# remove the silprob
cat $tmpdir/lexiconp_silprob.txt |\
awk '{
for(i=1; i<=NF; i++) {
if(i!=3 && i!=4 && i!=5) printf("%s\t", $i); if(i==NF) print "";
}
}' > $tmpdir/lexiconp.txt
fi
cat $tmpdir/lexiconp.txt | awk '{print $1}' | sort | uniq | awk '
BEGIN {
print "<eps> 0";
}
{
if ($1 == "<s>") {
print "<s> is in the vocabulary!" | "cat 1>&2"
exit 1;
}
if ($1 == "</s>") {
print "</s> is in the vocabulary!" | "cat 1>&2"
exit 1;
}
printf("%s %d\n", $1, NR);
}
END {
printf("#0 %d\n", NR+1);
printf("<s> %d\n", NR+2);
printf("</s> %d\n", NR+3);
}' > $dir/words.txt || exit 1;
# format of $dir/words.txt:
#<eps> 0
#!EXCLAMATION-POINT 1
#!SIL 2
#"CLOSE-QUOTE 3
#...
silphone=`cat $srcdir/optional_silence.txt` || exit 1;
[ -z "$silphone" ] && \
( echo "You have no optional-silence phone; it is required in the current scripts"
echo "but you may use the option --sil-prob 0.0 to stop it being used." ) && \
exit 1;
# create $dir/phones/align_lexicon.{txt,int}.
# This is the new-new style of lexicon aligning.
# First remove pron-probs from the lexicon.
perl -ape 's/(\S+\s+)\S+\s+(.+)/$1$2/;' <$tmpdir/lexiconp.txt >$tmpdir/align_lexicon.txt
# Note: here, $silphone will have no suffix e.g. _S because it occurs as optional-silence,
# and is not part of a word.
[ ! -z "$silphone" ] && echo "<eps> $silphone" >> $tmpdir/align_lexicon.txt
cat $tmpdir/align_lexicon.txt | \
perl -ane '@A = split; print $A[0], " ", join(" ", @A), "\n";' | sort | uniq > $dir/phones/align_lexicon.txt
# create phones/align_lexicon.int
cat $dir/phones/align_lexicon.txt | utils/sym2int.pl -f 3- $dir/phones.txt | \
utils/sym2int.pl -f 1-2 $dir/words.txt > $dir/phones/align_lexicon.int
# Create the basic L.fst without disambiguation symbols, for use
# in training.
if $silprob; then
# Usually it's the same as having a fixed-prob L.fst
# it matters a little bit in discriminative trainings
utils/make_lexicon_fst_silprob.pl $tmpdir/lexiconp_silprob.txt $srcdir/silprob.txt $silphone "<eps>" | \
fstcompile --isymbols=$dir/phones.txt --osymbols=$dir/words.txt \
--keep_isymbols=false --keep_osymbols=false | \
fstarcsort --sort_type=olabel > $dir/L.fst || exit 1;
else
utils/make_lexicon_fst.pl --pron-probs $tmpdir/lexiconp.txt $sil_prob $silphone | \
fstcompile --isymbols=$dir/phones.txt --osymbols=$dir/words.txt \
--keep_isymbols=false --keep_osymbols=false | \
fstarcsort --sort_type=olabel > $dir/L.fst || exit 1;
fi
# The file oov.txt contains a word that we will map any OOVs to during
# training.
echo "$oov_word" > $dir/oov.txt || exit 1;
cat $dir/oov.txt | utils/sym2int.pl $dir/words.txt >$dir/oov.int || exit 1;
# integer version of oov symbol, used in some scripts.
# the file wdisambig.txt contains a (line-by-line) list of the text-form of the
# disambiguation symbols that are used in the grammar and passed through by the
# lexicon. At this stage it's hardcoded as '#0', but we're laying the groundwork
# for more generality (which probably would be added by another script).
# wdisambig_words.int contains the corresponding list interpreted by the
# symbol table words.txt, and wdisambig_phones.int contains the corresponding
# list interpreted by the symbol table phones.txt.
echo '#0' >$dir/phones/wdisambig.txt
utils/sym2int.pl $dir/phones.txt <$dir/phones/wdisambig.txt >$dir/phones/wdisambig_phones.int
utils/sym2int.pl $dir/words.txt <$dir/phones/wdisambig.txt >$dir/phones/wdisambig_words.int
# Create these lists of phones in colon-separated integer list form too,
# for purposes of being given to programs as command-line options.
for f in silence nonsilence optional_silence disambig context_indep; do
utils/sym2int.pl $dir/phones.txt <$dir/phones/$f.txt >$dir/phones/$f.int
utils/sym2int.pl $dir/phones.txt <$dir/phones/$f.txt | \
awk '{printf(":%d", $1);} END{printf "\n"}' | sed s/:// > $dir/phones/$f.csl || exit 1;
done
for x in sets extra_questions; do
utils/sym2int.pl $dir/phones.txt <$dir/phones/$x.txt > $dir/phones/$x.int || exit 1;
done
utils/sym2int.pl -f 3- $dir/phones.txt <$dir/phones/roots.txt \
> $dir/phones/roots.int || exit 1;
#if $position_dependent_phones; then
if [ -f $dir/phones/word_boundary.txt ]; then
utils/sym2int.pl -f 1 $dir/phones.txt <$dir/phones/word_boundary.txt \
> $dir/phones/word_boundary.int || exit 1;
fi
silphonelist=`cat $dir/phones/silence.csl`
nonsilphonelist=`cat $dir/phones/nonsilence.csl`
utils/gen_topo.pl $num_nonsil_states $num_sil_states $nonsilphonelist $silphonelist >$dir/topo
# Create the lexicon FST with disambiguation symbols, and put it in lang_test.
# There is an extra step where we create a loop to "pass through" the
# disambiguation symbols from G.fst.
if $silprob; then
utils/make_lexicon_fst_silprob.pl $tmpdir/lexiconp_silprob_disambig.txt $srcdir/silprob.txt $silphone '#'$ndisambig | \
fstcompile --isymbols=$dir/phones.txt --osymbols=$dir/words.txt \
--keep_isymbols=false --keep_osymbols=false | \
fstaddselfloops $dir/phones/wdisambig_phones.int $dir/phones/wdisambig_words.int | \
fstarcsort --sort_type=olabel > $dir/L_disambig.fst || exit 1;
else
utils/make_lexicon_fst.pl --pron-probs $tmpdir/lexiconp_disambig.txt $sil_prob $silphone '#'$ndisambig | \
fstcompile --isymbols=$dir/phones.txt --osymbols=$dir/words.txt \
--keep_isymbols=false --keep_osymbols=false | \
fstaddselfloops $dir/phones/wdisambig_phones.int $dir/phones/wdisambig_words.int | \
fstarcsort --sort_type=olabel > $dir/L_disambig.fst || exit 1;
fi
echo "$(basename $0): validating output directory"
! utils/validate_lang.pl $dir && echo "$(basename $0): error validating output" && exit 1;
exit 0;
|
keighrim/kaldi-yesno-tutorial
|
utils/prepare_lang.sh
|
Shell
|
apache-2.0
| 19,525 |
#!/bin/sh
# IP::Country::DB_File ipcc.db
echo '2.3|arin|1537592415823|142286|19700101|20180922|-0400
arin|US|ipv4|8.0.0.0|8388608|19921201|allocated|e5e3b9c13678dfc483fb1f819d70883c
arin|US|ipv6|2001:4860::|32|20050314|allocated|9d99e3f7d38d1b8026f2ebbea4017c9f' >delegated-arin
true >delegated-ripencc
true >delegated-afrinic
true >delegated-apnic
true >delegated-lacnic
build_ipcc.pl -b -d .
|
apache/spamassassin
|
t/data/geodb/create_ipcc.sh
|
Shell
|
apache-2.0
| 396 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/DACircularProgress/DACircularProgress.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/MWPhotoBrowser/MWPhotoBrowser.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/DACircularProgress/DACircularProgress.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/MWPhotoBrowser/MWPhotoBrowser.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
fi
|
EasySwift/EasySwift
|
Carthage/Checkouts/MWPhotoBrowser/Example/Pods/Target Support Files/Pods-MWPhotoBrowser_Example/Pods-MWPhotoBrowser_Example-frameworks.sh
|
Shell
|
apache-2.0
| 4,121 |
#!/usr/bin/env bash
_=$(which rubocop >/dev/null 2>&1)
if [ $? -eq 1 ]; then echo "Missing rubocop."; exit 1; fi
# Run rubocop over all .rb files w/out "=begin", used to indicate Ragel.
rubocop $(grep -RL "=begin" --include="*.rb")
|
OpenBEL/bel_parser
|
check_all.sh
|
Shell
|
apache-2.0
| 234 |
#!/bin/bash
# This file contains some utilities to test the elasticsearch scripts with
# the .deb/.rpm packages.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
env_file() {
if is_dpkg; then
echo "/etc/default/elasticsearch"
fi
if is_rpm; then
echo "/etc/sysconfig/elasticsearch"
fi
}
# Export some useful paths.
export_elasticsearch_paths() {
export ESHOME="/usr/share/elasticsearch"
export ESPLUGINS="$ESHOME/plugins"
export ESMODULES="$ESHOME/modules"
export ESCONFIG="/etc/elasticsearch"
export ESDATA="/var/lib/elasticsearch"
export ESLOG="/var/log/elasticsearch"
export ESENVFILE=$(env_file)
export PACKAGE_NAME
}
# Install the rpm or deb package.
# -u upgrade rather than install. This only matters for rpm.
# -v the version to upgrade to. Defaults to the version under test.
install_package() {
local version=$(cat version)
local rpmCommand='-i'
local dir='./'
while getopts ":fuv:" opt; do
case $opt in
u)
rpmCommand='-U'
dpkgCommand='--force-confnew'
;;
f)
rpmCommand='-U --force'
dpkgCommand='--force-conflicts'
;;
d)
dir=$OPTARG
;;
v)
version=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
local rpm_classifier="-x86_64"
local deb_classifier="-amd64"
if [[ $version == 6* ]]; then
rpm_classifier=""
deb_classifier=""
fi
if is_rpm; then
rpm $rpmCommand $dir/$PACKAGE_NAME-$version$rpm_classifier.rpm
elif is_dpkg; then
run dpkg $dpkgCommand -i $dir/$PACKAGE_NAME-$version$deb_classifier.deb
[[ "$status" -eq 0 ]] || {
echo "dpkg failed:"
echo "$output"
run lsof /var/lib/dpkg/lock
echo "lsof /var/lib/dpkg/lock:"
echo "$output"
false
}
else
skip "Only rpm or deb supported"
fi
# pass through java home to package
echo "JAVA_HOME=\"$SYSTEM_JAVA_HOME\"" >> $(env_file)
}
# Checks that all directories & files are correctly installed after a deb or
# rpm install.
verify_package_installation() {
id elasticsearch
getent group elasticsearch
# homedir is set in /etc/passwd but to a non existent directory
assert_file_not_exist $(getent passwd elasticsearch | cut -d: -f6)
assert_file "$ESHOME" d root root 755
assert_file "$ESHOME/bin" d root root 755
assert_file "$ESHOME/bin/elasticsearch" f root root 755
assert_file "$ESHOME/bin/elasticsearch-plugin" f root root 755
assert_file "$ESHOME/bin/elasticsearch-shard" f root root 755
assert_file "$ESHOME/bin/elasticsearch-node" f root root 755
assert_file "$ESHOME/lib" d root root 755
assert_file "$ESCONFIG" d root elasticsearch 2750
assert_file "$ESCONFIG/elasticsearch.keystore" f root elasticsearch 660
sudo -u elasticsearch "$ESHOME/bin/elasticsearch-keystore" list | grep "keystore.seed"
assert_file "$ESCONFIG/.elasticsearch.keystore.initial_md5sum" f root elasticsearch 644
assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 660
assert_file "$ESCONFIG/jvm.options" f root elasticsearch 660
assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 660
assert_file "$ESDATA" d elasticsearch elasticsearch 2750
assert_file "$ESLOG" d elasticsearch elasticsearch 2750
assert_file "$ESPLUGINS" d root root 755
assert_file "$ESMODULES" d root root 755
assert_file "$ESHOME/NOTICE.txt" f root root 644
assert_file "$ESHOME/README.textile" f root root 644
if is_dpkg; then
# Env file
assert_file "/etc/default/elasticsearch" f root elasticsearch 660
# Machine-readable debian/copyright file
local copyrightDir=$(readlink -f /usr/share/doc/$PACKAGE_NAME)
assert_file $copyrightDir d root root 755
assert_file "$copyrightDir/copyright" f root root 644
fi
if is_rpm; then
# Env file
assert_file "/etc/sysconfig/elasticsearch" f root elasticsearch 660
# License file
assert_file "/usr/share/elasticsearch/LICENSE.txt" f root root 644
fi
if is_systemd; then
assert_file "/usr/lib/systemd/system/elasticsearch.service" f root root 644
assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root root 644
assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root root 644
if is_rpm; then
[[ $(/usr/sbin/sysctl vm.max_map_count) =~ "vm.max_map_count = 262144" ]]
else
[[ $(/sbin/sysctl vm.max_map_count) =~ "vm.max_map_count = 262144" ]]
fi
fi
if is_sysvinit; then
assert_file "/etc/init.d/elasticsearch" f root root 750
fi
run sudo -E -u vagrant LANG="en_US.UTF-8" cat "$ESCONFIG/elasticsearch.yml"
[ $status = 1 ]
[[ "$output" == *"Permission denied"* ]] || {
echo "Expected permission denied but found $output:"
false
}
}
|
coding0011/elasticsearch
|
qa/os/bats/utils/packages.bash
|
Shell
|
apache-2.0
| 6,266 |
#!/bin/sh
set -e
SIZES="
16,16x16
32,16x16@2x
32,32x32
64,32x32@2x
128,128x128
256,128x128@2x
256,256x256åç
512,256x256@2x
512,512x512
1024,512x512@2x
"
for SVG in "$@"; do
BASE=$(basename "$SVG" | sed 's/\.[^\.]*$//')
ICONSET="$BASE.iconset"
mkdir -p "$ICONSET"
for PARAMS in $SIZES; do
SIZE=$(echo $PARAMS | cut -d, -f1)
LABEL=$(echo $PARAMS | cut -d, -f2)
svg2png "$SVG" -w $SIZE -h $SIZE "$ICONSET"/icon_$LABEL.png
done
iconutil -c icns "$ICONSET"
rm -rf "$ICONSET"
done
|
sauliusg/jabref
|
src/main/resources/icons/svg2icns.sh
|
Shell
|
mit
| 533 |
#!/bin/bash
THINGPLUS_PREFIX=/opt/thingplus
GATEWAY_DIR=$THINGPLUS_PREFIX/gateway
SDK_DIR=$THINGPLUS_PREFIX/.
NODE_VERSION=0.10.16
NODE_INSTALL_PREFIX=/usr/local
MODEL="debian"
RSYNC_SERVER="rsync.thingplus.net"
RSYNC_USER="openhardware_pi"
RSYNC_PASSWORD="SdckrvL3hq9bKHBU"
RSYNC_SIZE_ONLY_OPTION="false"
thingplus_openhardware_pi_install () {
INSTALL_DIR=$1
PI_SOURCE_DIR=openhardware/raspberrypi/motion-detector
pushd .
cd $INSTALL_DIR/$PI_SOURCE_DIR;
npm install
popd
}
thingplus_openhardware_sdk_install () {
INSTALL_DIR=$1
git clone https://github.com/daliworks/openhardware.git $INSTALL_DIR/openhardware
}
thingplus_config_set() {
cat <<EOF >./config
MODEL=$MODEL
RSYNC_SERVER=$RSYNC_SERVER
RSYNC_USER=$RSYNC_USER
RSYNC_PASSWORD=$RSYNC_PASSWORD
RSYNC_SIZE_ONLY_OPTION=$RSYNC_SIZE_ONLY_OPTION
DEST_DIR=$GATEWAY_DIR
EOF
}
tube_install () {
thingplus_config_set
wget http://support.thingplus.net/download/install/tube_install.sh
chmod +x ./tube_install.sh
./tube_install.sh b2d
rm ./tube_install.sh
rm ./config
}
node_install() {
wget http://support.thingplus.net/download/install/thingplus_embedded_node_install.sh
chmod +x ./thingplus_embedded_node_install.sh
./thingplus_embedded_node_install.sh -nv $NODE_VERSION -p $NODE_INSTALL_PREFIX
rm ./thingplus_embedded_node_install.sh
}
root_permission_check() {
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
echo "sudo ./thingplus_embedded_sdk_pi_install.sh"
exit 1
fi
}
npm_verify() {
npm help > /dev/null
return $?
}
########## START ##########
root_permission_check
INSTALLED_NODE_VERSION="$(node --version)"
if [ $INSTALLED_NODE_VERSION != v"$NODE_VERSION" ]; then
node_install
elif ! npm_verify; then
node_install
fi
if [ ! -d $SDK_DIR ]; then
mkdir -p $SDK_DIR
fi
tube_install
thingplus_openhardware_sdk_install $SDK_DIR
thingplus_openhardware_pi_install $SDK_DIR
echo 'Installation is finished'
|
thingplus/thingplus.github.io
|
download/install_dev/thingplus_embedded_sdk_hbe_install.sh
|
Shell
|
mit
| 1,966 |
#!/bin/bash
set -e
# Global variables:
# ${GV_LOG}: Prefix this variable in echo to log echoed string.
# ${GV_SETTINGS_DIR}: Hold settings data.
# ${GV_BINARY_DIR}: Hold settings binary data.
SCRIPT_NAME="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"
echo "${GV_LOG}>>>>>>>>> Running ${SCRIPT_NAME} ..."
# Install php5-gd to run Drupal.
apt-get -y --force-yes install php5-gd
# Log
echo "${GV_LOG} * Install php5-gd to run Drupal."
|
bankonme/cust-live-deb
|
scripts/repository/inst-std-xtra-opw-php5-gd.sh
|
Shell
|
gpl-2.0
| 456 |
#!/bin/bash
# Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# [email protected]
# www.roderickmackenzie.eu
# Room B86 Coats, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
count=`cat build.inp`
let "count = $count+1"
echo $count >build.inp
|
srikantvv/org-sol
|
opvdm-core-2.70/build.sh
|
Shell
|
gpl-2.0
| 1,114 |
#!/usr/bin/env bash
set -eux
platform="$(uname)"
function setup() {
if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then
ifconfig lo0
existing=$(ifconfig lo0 | grep '^[[:blank:]]inet 127\.0\.0\. ' || true)
echo "${existing}"
for i in 3 4 254; do
ip="127.0.0.${i}"
if [[ "${existing}" != *"${ip}"* ]]; then
ifconfig lo0 alias "${ip}" up
fi
done
ifconfig lo0
fi
}
function teardown() {
if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then
for i in 3 4 254; do
ip="127.0.0.${i}"
if [[ "${existing}" != *"${ip}"* ]]; then
ifconfig lo0 -alias "${ip}"
fi
done
ifconfig lo0
fi
}
setup
trap teardown EXIT
ANSIBLE_SSH_ARGS='-C -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null' \
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_delegate_to.yml -i inventory -v "$@"
# this test is not doing what it says it does, also relies on var that should not be available
#ansible-playbook test_loop_control.yml -v "$@"
ansible-playbook test_delegate_to_loop_randomness.yml -v "$@"
ansible-playbook delegate_and_nolog.yml -i inventory -v "$@"
ansible-playbook delegate_facts_block.yml -i inventory -v "$@"
ansible-playbook test_delegate_to_loop_caching.yml -i inventory -v "$@"
# ensure we are using correct settings when delegating
ANSIBLE_TIMEOUT=3 ansible-playbook delegate_vars_hanldling.yml -i inventory -v "$@"
# test ansible_x_interpreter
# python
source virtualenv.sh
(
cd "${OUTPUT_DIR}"/venv/bin
ln -s python firstpython
ln -s python secondpython
)
ansible-playbook verify_interpreter.yml -i inventory_interpreters -v "$@"
ansible-playbook discovery_applied.yml -i inventory -v "$@"
|
azaghal/ansible
|
test/integration/targets/delegate_to/runme.sh
|
Shell
|
gpl-3.0
| 1,870 |
#!/usr/bin/env bash
scp *.sql oracle:/opt/oracle/fasedos/insercion
|
manuelalcocer/proyectobbdd
|
2aFase/postgres/ficherosinsercion/copiar.sh
|
Shell
|
gpl-3.0
| 67 |
#!/usr/bin/env bash
THIS_DIR=$(cd $(dirname $0); pwd)
RAM=`grep MemTotal /proc/meminfo | awk '{print $2}'`
VBIN=virtualenv-3.4
PYBIN=python
if ! hash $VBIN 2>/dev/null; then
VBIN=virtualenv3
fi
if ! hash $VBIN 2>/dev/null; then
VBIN=virtualenv
fi
if ! hash $VBIN 2>/dev/null; then
echo "You have to install virtualenv"
exit 1
fi
cd $THIS_DIR
update() {
git pull
git submodule update --init --recursive
if [ ! -f ./env/bin/activate ]; then
echo "You need virtualenv in env directory"
echo "Run virtualenv -p python3 env"
exit 1
fi
source env/bin/activate
pip install -r requirements.txt
}
opt_install() {
if [ ! -f ./env/bin/activate ]; then
echo "You need virtualenv in env directory"
echo "Run ./launch.sh install first"
exit 1
fi
source env/bin/activate
pip install -r opt_requirements.txt
}
install_no_lua() {
if [ $RAM -lt 307200 ]; then
./configure --disable-extf --disable-liblua && make
else
./configure --disable-liblua && make
fi
RET=$?
if [ $RET -ne 0 ];then
echo "Error installing tg"; exit $RET;
fi
}
check_python3dev() {
local res=1
for python in python3.4 python3 python; do
local path=`$python -c "from distutils.sysconfig import *; print(get_config_var('CONFINCLUDEPY'))"`
if [[ $path == *"python3.4m"* ]]; then
PYBIN=$python
res=0
fi
done
if [ $res -ne 0 ]; then
echo "You need to install the python 3 libs, in ubuntu: 'sudo apt-get install python3-dev'"
exit 1
fi
}
install() {
check_python3dev
$VBIN -p python3 env
RET=$?
if [ $RET -ne 0 ]; then
echo "Error creating the virtualenv with python 3, check the install instructions"; exit $RET
fi
update
check_python3dev
if [ $RAM -lt 307200 ]; then
cd tg && ./configure --disable-extf && make
else
cd tg && ./configure && make
fi
if [ $? -ne 0 ]; then
install_no_lua
fi
cd ..
}
if [ "$1" = "install" ]; then
install
elif [ "$1" = "update" ]; then
update
elif [ "$1" = "optdeps" ]; then
opt_install
else
if [ ! -f ./tg/telegram.h ]; then
echo "tg not found"
echo "Run $0 install"
exit 1
fi
if [ ! -f ./tg/bin/telegram-cli ]; then
echo "tg binary not found"
echo "Run $0 install"
exit 1
fi
if [ ! -f ./env/bin/activate ]; then
echo "You need virtualenv in env directory"
echo "Run virtualenv -p python3 env"
exit 1
fi
source env/bin/activate
./tg/bin/telegram-cli -k ./tg/tg-server.pub -Z bot/bot.py -l 1 -E
fi
|
MRAHS/phbot
|
launch.sh
|
Shell
|
agpl-3.0
| 2,722 |
#!/usr/bin/env bash
ci_dir="$(dirname "$0")"
. "${ci_dir}/ci-common.sh"
git_download Flocq
( cd "${CI_BUILD_DIR}/Flocq" && ./autogen.sh && ./configure && ./remake "-j${NJOBS}" )
|
ppedrot/coq
|
dev/ci/ci-flocq.sh
|
Shell
|
lgpl-2.1
| 181 |
#!/bin/bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR/../../.."
DOWNLOADS_DIR=tensorflow/contrib/lite/downloads
BZL_FILE_PATH=tensorflow/workspace.bzl
# Ensure it is being run from repo root
if [ ! -f $BZL_FILE_PATH ]; then
echo "Could not find ${BZL_FILE_PATH}":
echo "Likely you are not running this from the root directory of the repository.";
exit 1;
fi
EIGEN_URL="$(grep -o 'http.*bitbucket.org/eigen/eigen/get/.*tar\.gz' "${BZL_FILE_PATH}" | grep -v mirror.bazel | head -n1)"
GEMMLOWP_URL="$(grep -o 'https://mirror.bazel.build/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)"
GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz"
ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)"
NEON_2_SSE_URL="https://github.com/intel/ARM_NEON_2_x86_SSE/archive/master.zip"
FARMHASH_URL="https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz"
FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/master.zip"
# TODO(petewarden): Some new code in Eigen triggers a clang bug with iOS arm64,
# so work around it by patching the source.
replace_by_sed() {
local regex="${1}"
shift
# Detect the version of sed by the return value of "--version" flag. GNU-sed
# supports "--version" while BSD-sed doesn't.
if ! sed --version >/dev/null 2>&1; then
# BSD-sed.
sed -i '' -e "${regex}" "$@"
else
# GNU-sed.
sed -i -e "${regex}" "$@"
fi
}
download_and_extract() {
local usage="Usage: download_and_extract URL DIR"
local url="${1:?${usage}}"
local dir="${2:?${usage}}"
echo "downloading ${url}" >&2
mkdir -p "${dir}"
if [[ "${url}" == *gz ]]; then
curl -Ls "${url}" | tar -C "${dir}" --strip-components=1 -xz
elif [[ "${url}" == *zip ]]; then
tempdir=$(mktemp -d)
tempdir2=$(mktemp -d)
curl -L ${url} > ${tempdir}/zipped.zip
unzip ${tempdir}/zipped.zip -d ${tempdir2}
# If the zip file contains nested directories, extract the files from the
# inner directory.
if ls ${tempdir2}/*/* 1> /dev/null 2>&1; then
# unzip has no strip components, so unzip to a temp dir, and move the
# files we want from the tempdir to destination.
cp -R ${tempdir2}/*/* ${dir}/
else
cp -R ${tempdir2}/* ${dir}/
fi
rm -rf ${tempdir2} ${tempdir}
fi
# Delete any potential BUILD files, which would interfere with Bazel builds.
find "${dir}" -type f -name '*BUILD' -delete
}
download_and_extract "${EIGEN_URL}" "${DOWNLOADS_DIR}/eigen"
download_and_extract "${GEMMLOWP_URL}" "${DOWNLOADS_DIR}/gemmlowp"
download_and_extract "${GOOGLETEST_URL}" "${DOWNLOADS_DIR}/googletest"
download_and_extract "${ABSL_URL}" "${DOWNLOADS_DIR}/absl"
download_and_extract "${NEON_2_SSE_URL}" "${DOWNLOADS_DIR}/neon_2_sse"
download_and_extract "${FARMHASH_URL}" "${DOWNLOADS_DIR}/farmhash"
download_and_extract "${FLATBUFFERS_URL}" "${DOWNLOADS_DIR}/flatbuffers"
replace_by_sed 's#static uint32x4_t p4ui_CONJ_XOR = vld1q_u32( conj_XOR_DATA );#static uint32x4_t p4ui_CONJ_XOR; // = vld1q_u32( conj_XOR_DATA ); - Removed by script#' \
"${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
replace_by_sed 's#static uint32x2_t p2ui_CONJ_XOR = vld1_u32( conj_XOR_DATA );#static uint32x2_t p2ui_CONJ_XOR;// = vld1_u32( conj_XOR_DATA ); - Removed by scripts#' \
"${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
replace_by_sed 's#static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );#static uint64x2_t p2ul_CONJ_XOR;// = vld1q_u64( p2ul_conj_XOR_DATA ); - Removed by script#' \
"${DOWNLOADS_DIR}/eigen/Eigen/src/Core/arch/NEON/Complex.h"
echo "download_dependencies.sh completed successfully." >&2
|
allenlavoie/tensorflow
|
tensorflow/contrib/lite/download_dependencies.sh
|
Shell
|
apache-2.0
| 4,507 |
#!/usr/bin/env bash
set -e
ORG_PATH="github.com/appc"
REPO_PATH="${ORG_PATH}/docker2aci"
VERSION=$(git describe --dirty)
GLDFLAGS="-X github.com/appc/docker2aci/lib.Version=${VERSION}"
if [ ! -h gopath/src/${REPO_PATH} ]; then
mkdir -p gopath/src/${ORG_PATH}
ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
fi
export GOBIN=${PWD}/bin
export GOPATH=${PWD}/gopath:${PWD}/Godeps/_workspace
eval $(go env)
echo "Building docker2aci..."
go build -o $GOBIN/docker2aci -ldflags "${GLDFLAGS}" ${REPO_PATH}/
|
jonboulle/docker2aci
|
build.sh
|
Shell
|
apache-2.0
| 513 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
# This removes the final character in bash (somehow)
AWS_REGION=${ZONE%?}
export AWS_DEFAULT_REGION=${AWS_REGION}
AWS_CMD="aws --output json ec2"
AWS_ELB_CMD="aws --output json elb"
MASTER_INTERNAL_IP=172.20.0.9
function json_val {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1''
}
# TODO (ayurchuk) Refactor the get_* functions to use filters
# TODO (bburns) Parameterize this for multiple cluster per project
function get_vpc_id {
$AWS_CMD --output text describe-vpcs \
--filters Name=tag:Name,Values=kubernetes-vpc \
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
--query Vpcs[].VpcId
}
function get_subnet_id {
python -c "import json,sys; lst = [str(subnet['SubnetId']) for subnet in json.load(sys.stdin)['Subnets'] if subnet['VpcId'] == '$1']; print ''.join(lst)"
}
function get_igw_id {
python -c "import json,sys; lst = [str(igw['InternetGatewayId']) for igw in json.load(sys.stdin)['InternetGateways'] for attachment in igw['Attachments'] if attachment['VpcId'] == '$1']; print ''.join(lst)"
}
function get_route_table_id {
python -c "import json,sys; lst = [str(route_table['RouteTableId']) for route_table in json.load(sys.stdin)['RouteTables'] if route_table['VpcId'] == '$1']; print ''.join(lst)"
}
function get_elbs_in_vpc {
# ELB doesn't seem to be on the same platform as the rest of AWS; doesn't support filtering
$AWS_ELB_CMD describe-load-balancers | \
python -c "import json,sys; lst = [str(lb['LoadBalancerName']) for lb in json.load(sys.stdin)['LoadBalancerDescriptions'] if lb['VPCId'] == '$1']; print '\n'.join(lst)"
}
function expect_instance_states {
python -c "import json,sys; lst = [str(instance['InstanceId']) for reservation in json.load(sys.stdin)['Reservations'] for instance in reservation['Instances'] if instance['State']['Name'] != '$1']; print ' '.join(lst)"
}
function get_instance_public_ip {
local tagName=$1
$AWS_CMD --output text describe-instances \
--filters Name=tag:Name,Values=${tagName} \
Name=instance-state-name,Values=running \
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
--query Reservations[].Instances[].NetworkInterfaces[0].Association.PublicIp
}
function detect-master () {
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$(get_instance_public_ip $MASTER_NAME)
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
function detect-minions () {
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$(get_instance_public_ip ${MINION_NAMES[$i]})
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
done
if [[ -z "$KUBE_MINION_IP_ADDRESSES" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
exit 1
fi
}
# Detects the AMI to use (considering the region)
#
# Vars set:
# AWS_IMAGE
function detect-image () {
# This is the ubuntu 14.04 image for <region>, amd64, hvm:ebs-ssd
# See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images
# This will need to be updated from time to time as amis are deprecated
if [[ -z "${AWS_IMAGE-}" ]]; then
case "${AWS_REGION}" in
ap-northeast-1)
AWS_IMAGE=ami-93876e93
;;
ap-southeast-1)
AWS_IMAGE=ami-66546234
;;
eu-central-1)
AWS_IMAGE=ami-e2a694ff
;;
eu-west-1)
AWS_IMAGE=ami-d7fd6ea0
;;
sa-east-1)
AWS_IMAGE=ami-a357eebe
;;
us-east-1)
AWS_IMAGE=ami-6089d208
;;
us-west-1)
AWS_IMAGE=ami-cf7d998b
;;
cn-north-1)
AWS_IMAGE=ami-d436a4ed
;;
us-gov-west-1)
AWS_IMAGE=ami-01523322
;;
ap-southeast-2)
AWS_IMAGE=ami-cd4e3ff7
;;
us-west-2)
AWS_IMAGE=ami-3b14370b
;;
*)
echo "Please specify AWS_IMAGE directly (region not recognized)"
exit 1
esac
fi
}
# Verify prereqs
function verify-prereqs {
if [[ "$(which aws)" == "" ]]; then
echo "Can't find aws in PATH, please fix and retry."
exit 1
fi
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
}
# Take the local tar files and upload them to S3. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
if [[ -z ${AWS_S3_BUCKET-} ]]; then
local project_hash=
local key=$(aws configure get aws_access_key_id)
if which md5 > /dev/null 2>&1; then
project_hash=$(md5 -q -s "${USER} ${key}")
else
project_hash=$(echo -n "${USER} ${key}" | md5sum | awk '{ print $1 }')
fi
AWS_S3_BUCKET="kubernetes-staging-${project_hash}"
fi
echo "Uploading to Amazon S3"
if ! aws s3 ls "s3://${AWS_S3_BUCKET}" > /dev/null 2>&1 ; then
echo "Creating ${AWS_S3_BUCKET}"
# Buckets must be globally uniquely named, so always create in a known region
# We default to us-east-1 because that's the canonical region for S3,
# and then the bucket is most-simply named (s3.amazonaws.com)
aws s3 mb "s3://${AWS_S3_BUCKET}" --region ${AWS_S3_REGION}
fi
local s3_bucket_location=$(aws --output text s3api get-bucket-location --bucket ${AWS_S3_BUCKET})
local s3_url_base=https://s3-${s3_bucket_location}.amazonaws.com
if [[ "${s3_bucket_location}" == "None" ]]; then
# "US Classic" does not follow the pattern
s3_url_base=https://s3.amazonaws.com
fi
local -r staging_path="devel"
echo "+++ Staging server tars to S3 Storage: ${AWS_S3_BUCKET}/${staging_path}"
local server_binary_path="${staging_path}/${SERVER_BINARY_TAR##*/}"
aws s3 cp "${SERVER_BINARY_TAR}" "s3://${AWS_S3_BUCKET}/${server_binary_path}"
aws s3api put-object-acl --bucket ${AWS_S3_BUCKET} --key "${server_binary_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
SERVER_BINARY_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${server_binary_path}"
local salt_tar_path="${staging_path}/${SALT_TAR##*/}"
aws s3 cp "${SALT_TAR}" "s3://${AWS_S3_BUCKET}/${salt_tar_path}"
aws s3api put-object-acl --bucket ${AWS_S3_BUCKET} --key "${salt_tar_path}" --grant-read 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Adds a tag to an AWS resource
# usage: add-tag <resource-id> <tag-name> <tag-value>
function add-tag {
echo "Adding tag to ${1}: ${2}=${3}"
# We need to retry in case the resource isn't yet fully created
sleep 3
n=0
until [ $n -ge 5 ]; do
$AWS_CMD create-tags --resources ${1} --tags Key=${2},Value=${3} > $LOG && return
n=$[$n+1]
sleep 15
done
echo "Unable to add tag to AWS resource"
exit 1
}
# Creates the IAM profile, based on configuration files in templates/iam
function create-iam-profile {
local key=$1
local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam
echo "Creating IAM role: ${key}"
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${key}-role.json > $LOG
echo "Creating IAM role-policy: ${key}"
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${key}-policy.json > $LOG
echo "Creating IAM instance-policy: ${key}"
aws iam create-instance-profile --instance-profile-name ${key} > $LOG
echo "Adding IAM role to instance-policy: ${key}"
aws iam add-role-to-instance-profile --instance-profile-name ${key} --role-name ${key} > $LOG
}
# Creates the IAM roles (if they do not already exist)
function ensure-iam-profiles {
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MASTER} || {
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
create-iam-profile ${IAM_PROFILE_MASTER}
}
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MINION} || {
echo "Creating minion IAM profile: ${IAM_PROFILE_MINION}"
create-iam-profile ${IAM_PROFILE_MINION}
}
}
# Wait for instance to be in running state
function wait-for-instance-running {
instance_id=$1
while true; do
instance_state=$($AWS_CMD describe-instances --instance-ids $instance_id | expect_instance_states running)
if [[ "$instance_state" == "" ]]; then
break
else
echo "Waiting for instance ${instance_id} to spawn"
echo "Sleeping for 3 seconds..."
sleep 3
fi
done
}
function kube-up {
find-release-tars
upload-server-tars
ensure-temp-dir
ensure-iam-profiles
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
if [[ ! -f "$AWS_SSH_KEY" ]]; then
ssh-keygen -f "$AWS_SSH_KEY" -N ''
fi
detect-image
$AWS_CMD import-key-pair --key-name kubernetes --public-key-material "file://$AWS_SSH_KEY.pub" > $LOG 2>&1 || true
VPC_ID=$(get_vpc_id)
if [[ -z "$VPC_ID" ]]; then
echo "Creating vpc."
VPC_ID=$($AWS_CMD create-vpc --cidr-block 172.20.0.0/16 | json_val '["Vpc"]["VpcId"]')
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
add-tag $VPC_ID Name kubernetes-vpc
add-tag $VPC_ID KubernetesCluster ${CLUSTER_ID}
fi
echo "Using VPC $VPC_ID"
SUBNET_ID=$($AWS_CMD describe-subnets | get_subnet_id $VPC_ID)
if [[ -z "$SUBNET_ID" ]]; then
echo "Creating subnet."
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block 172.20.0.0/24 --vpc-id $VPC_ID --availability-zone ${ZONE} | json_val '["Subnet"]["SubnetId"]')
fi
echo "Using subnet $SUBNET_ID"
IGW_ID=$($AWS_CMD describe-internet-gateways | get_igw_id $VPC_ID)
if [[ -z "$IGW_ID" ]]; then
echo "Creating Internet Gateway."
IGW_ID=$($AWS_CMD create-internet-gateway | json_val '["InternetGateway"]["InternetGatewayId"]')
$AWS_CMD attach-internet-gateway --internet-gateway-id $IGW_ID --vpc-id $VPC_ID > $LOG
fi
echo "Using Internet Gateway $IGW_ID"
echo "Associating route table."
ROUTE_TABLE_ID=$($AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID | json_val '["RouteTables"][0]["RouteTableId"]')
$AWS_CMD associate-route-table --route-table-id $ROUTE_TABLE_ID --subnet-id $SUBNET_ID > $LOG || true
echo "Configuring route table."
$AWS_CMD describe-route-tables --filters Name=vpc-id,Values=$VPC_ID > $LOG || true
echo "Adding route to route table."
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $IGW_ID > $LOG || true
echo "Using Route Table $ROUTE_TABLE_ID"
SEC_GROUP_ID=$($AWS_CMD --output text describe-security-groups \
--filters Name=vpc-id,Values=$VPC_ID \
Name=group-name,Values=kubernetes-sec-group \
--query SecurityGroups[].GroupId \
| tr "\t" "\n")
if [[ -z "$SEC_GROUP_ID" ]]; then
echo "Creating security group."
SEC_GROUP_ID=$($AWS_CMD create-security-group --group-name kubernetes-sec-group --description kubernetes-sec-group --vpc-id $VPC_ID | json_val '["GroupId"]')
$AWS_CMD authorize-security-group-ingress --group-id $SEC_GROUP_ID --protocol -1 --port all --cidr 0.0.0.0/0 > $LOG
fi
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly SALT_MASTER='${MASTER_INTERNAL_IP}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly ZONE='${ZONE}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-}'"
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "readonly DNS_REPLICAS='${DNS_REPLICAS:-}'"
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
echo "Starting Master"
master_id=$($AWS_CMD run-instances \
--image-id $AWS_IMAGE \
--iam-instance-profile Name=$IAM_PROFILE_MASTER \
--instance-type $MASTER_SIZE \
--subnet-id $SUBNET_ID \
--private-ip-address 172.20.0.9 \
--key-name kubernetes \
--security-group-ids $SEC_GROUP_ID \
--associate-public-ip-address \
--user-data file://${KUBE_TEMP}/master-start.sh | json_val '["Instances"][0]["InstanceId"]')
add-tag $master_id Name $MASTER_NAME
add-tag $master_id Role $MASTER_TAG
add-tag $master_id KubernetesCluster ${CLUSTER_ID}
echo "Waiting for master to be ready"
local attempt=0
while true; do
echo -n Attempt "$(($attempt+1))" to check for master node
local ip=$(get_instance_public_ip $MASTER_NAME)
if [[ -z "${ip}" ]]; then
if (( attempt > 30 )); then
echo
echo -e "${color_red}master failed to start. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
exit 1
fi
else
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP=${ip}
echo -e " ${color_green}[master running @${KUBE_MASTER_IP}]${color_norm}"
# We are not able to add a route to the instance until that instance is in "running" state.
wait-for-instance-running $master_id
sleep 10
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block ${MASTER_IP_RANGE} --instance-id $master_id > $LOG
break
fi
echo -e " ${color_yellow}[master not working yet]${color_norm}"
attempt=$(($attempt+1))
sleep 10
done
# We need the salt-master to be up for the minions to work
attempt=0
while true; do
echo -n Attempt "$(($attempt+1))" to check for salt-master
local output
output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} pgrep salt-master 2> $LOG) || output=""
if [[ -z "${output}" ]]; then
if (( attempt > 30 )); then
echo
echo -e "${color_red}salt-master failed to start on ${KUBE_MASTER_IP}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
exit 1
fi
else
echo -e " ${color_green}[salt-master running]${color_norm}"
break
fi
echo -e " ${color_yellow}[salt-master not working yet]${color_norm}"
attempt=$(($attempt+1))
sleep 10
done
MINION_IDS=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
echo "Starting Minion (${MINION_NAMES[$i]})"
(
# We pipe this to the ami as a startup script in the user-data field. Requires a compatible ami
echo "#! /bin/bash"
echo "SALT_MASTER='${MASTER_INTERNAL_IP}'"
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
minion_id=$($AWS_CMD run-instances \
--image-id $AWS_IMAGE \
--iam-instance-profile Name=$IAM_PROFILE_MINION \
--instance-type $MINION_SIZE \
--subnet-id $SUBNET_ID \
--private-ip-address 172.20.0.1${i} \
--key-name kubernetes \
--security-group-ids $SEC_GROUP_ID \
--associate-public-ip-address \
--user-data file://${KUBE_TEMP}/minion-start-${i}.sh | json_val '["Instances"][0]["InstanceId"]')
add-tag $minion_id Name ${MINION_NAMES[$i]}
add-tag $minion_id Role $MINION_TAG
add-tag $minion_id KubernetesCluster ${CLUSTER_ID}
MINION_IDS[$i]=$minion_id
done
# Add routes to minions
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# We are not able to add a route to the instance until that instance is in "running" state.
# This is quite an ugly solution to this problem. In Bash 4 we could use assoc. arrays to do this for
# all instances at once but we can't be sure we are running Bash 4.
minion_id=${MINION_IDS[$i]}
wait-for-instance-running $minion_id
echo "Minion ${MINION_NAMES[$i]} running"
sleep 10
$AWS_CMD modify-instance-attribute --instance-id $minion_id --source-dest-check '{"Value": false}' > $LOG
$AWS_CMD create-route --route-table-id $ROUTE_TABLE_ID --destination-cidr-block ${MINION_IP_RANGES[$i]} --instance-id $minion_id > $LOG
done
FAIL=0
for job in `jobs -p`; do
wait $job || let "FAIL+=1"
done
if (( $FAIL != 0 )); then
echo "${FAIL} commands failed. Exiting."
exit 2
fi
detect-master > $LOG
detect-minions > $LOG
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
# make sure that everything is well configured.
# TODO: Can we poll here?
echo "Waiting 3 minutes for cluster to settle"
local i
for (( i=0; i < 6*3; i++)); do
printf "."
sleep 10
done
echo "Re-running salt highstate"
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo salt '*' state.highstate > $LOG
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \
--fail --output $LOG --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do
printf "."
sleep 2
done
echo "Kubernetes cluster created."
# TODO use token instead of kube_auth
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="aws_${INSTANCE_PREFIX}"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(
umask 077
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG"
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG"
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG"
create-kubeconfig
)
echo "Sanity checking cluster..."
sleep 5
# Don't bail on errors, we want to be able to print some info.
set +e
# Basic sanity checking
local rc # Capture return code without exiting because of errexit bash option
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed and working.
local attempt=0
while true; do
local minion_name=${MINION_NAMES[$i]}
local minion_ip=${KUBE_MINION_IP_ADDRESSES[$i]}
echo -n Attempt "$(($attempt+1))" to check Docker on node "${minion_name} @ ${minion_ip}" ...
local output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo docker ps -a 2>/dev/null)
if [[ -z "${output}" ]]; then
if (( attempt > 9 )); then
echo
echo -e "${color_red}Docker failed to install on node ${minion_name}. Your cluster is unlikely" >&2
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
echo -e "cluster. (sorry!)${color_norm}" >&2
exit 1
fi
# TODO: Reintroduce this (where does this container come from?)
# elif [[ "${output}" != *"kubernetes/pause"* ]]; then
# if (( attempt > 9 )); then
# echo
# echo -e "${color_red}Failed to observe kubernetes/pause on node ${minion_name}. Your cluster is unlikely" >&2
# echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2
# echo -e "cluster. (sorry!)${color_norm}" >&2
# exit 1
# fi
else
echo -e " ${color_green}[working]${color_norm}"
break
fi
echo -e " ${color_yellow}[not working yet]${color_norm}"
# Start Docker, in case it failed to start.
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@$minion_ip sudo service docker start > $LOG 2>&1
attempt=$(($attempt+1))
sleep 30
done
done
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
echo
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
echo
}
function kube-down {
vpc_id=$(get_vpc_id)
if [[ -n "${vpc_id}" ]]; then
local elb_ids=$(get_elbs_in_vpc ${vpc_id})
if [[ -n ${elb_ids} ]]; then
echo "Deleting ELBs in: ${vpc_id}"
for elb_id in ${elb_ids}; do
$AWS_ELB_CMD delete-load-balancer --load-balancer-name=${elb_id}
done
echo "Waiting for ELBs to be deleted"
while true; do
elb_ids=$(get_elbs_in_vpc ${vpc_id})
if [[ -z "$elb_ids" ]]; then
echo "All ELBs deleted"
break
else
echo "ELBs not yet deleted: $elb_ids"
echo "Sleeping for 3 seconds..."
sleep 3
fi
done
fi
echo "Deleting instances in VPC: ${vpc_id}"
instance_ids=$($AWS_CMD --output text describe-instances \
--filters Name=vpc-id,Values=${vpc_id} \
Name=tag:KubernetesCluster,Values=${CLUSTER_ID} \
--query Reservations[].Instances[].InstanceId)
if [[ -n ${instance_ids} ]]; then
$AWS_CMD terminate-instances --instance-ids $instance_ids > $LOG
echo "Waiting for instances to be deleted"
while true; do
instance_states=$($AWS_CMD describe-instances --instance-ids $instance_ids | expect_instance_states terminated)
if [[ "$instance_states" == "" ]]; then
echo "All instances deleted"
break
else
echo "Instances not yet deleted: $instance_states"
echo "Sleeping for 3 seconds..."
sleep 3
fi
done
fi
echo "Deleting VPC: ${vpc_id}"
default_sg_id=$($AWS_CMD --output text describe-security-groups \
--filters Name=vpc-id,Values=$vpc_id Name=group-name,Values=default \
--query SecurityGroups[].GroupId \
| tr "\t" "\n")
sg_ids=$($AWS_CMD --output text describe-security-groups \
--filters Name=vpc-id,Values=$vpc_id \
--query SecurityGroups[].GroupId \
| tr "\t" "\n")
for sg_id in ${sg_ids}; do
# EC2 doesn't let us delete the default security group
if [[ "${sg_id}" != "${default_sg_id}" ]]; then
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG
fi
done
subnet_ids=$($AWS_CMD --output text describe-subnets \
--filters Name=vpc-id,Values=$vpc_id \
--query Subnets[].SubnetId \
| tr "\t" "\n")
for subnet_id in ${subnet_ids}; do
$AWS_CMD delete-subnet --subnet-id ${subnet_id} > $LOG
done
igw_ids=$($AWS_CMD --output text describe-internet-gateways \
--filters Name=attachment.vpc-id,Values=$vpc_id \
--query InternetGateways[].InternetGatewayId \
| tr "\t" "\n")
for igw_id in ${igw_ids}; do
$AWS_CMD detach-internet-gateway --internet-gateway-id $igw_id --vpc-id $vpc_id > $LOG
$AWS_CMD delete-internet-gateway --internet-gateway-id $igw_id > $LOG
done
route_table_ids=$($AWS_CMD --output text describe-route-tables \
--filters Name=vpc-id,Values=$vpc_id \
Name=route.destination-cidr-block,Values=0.0.0.0/0 \
--query RouteTables[].RouteTableId \
| tr "\t" "\n")
for route_table_id in ${route_table_ids}; do
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
done
$AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG
fi
}
# Update a kubernetes cluster with latest source
function kube-push {
detect-master
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
(
echo "#! /bin/bash"
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
echo "echo Executing configuration"
echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate"
) | ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo bash
get-password
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e.go only when running -up (it is run after kube-up).
#
# Assumed vars:
# Variables from config.sh
function test-setup {
echo "test-setup complete"
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go
function test-teardown {
echo "Shutting down test cluster."
"${KUBE_ROOT}/cluster/kube-down.sh"
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
local ip=$(get_instance_public_ip ${node})
if [[ -z "$ip" ]]; then
echo "Could not detect IP for ${node}."
exit 1
fi
for try in $(seq 1 5); do
if ssh -oLogLevel=quiet -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${ip} "${cmd}"; then
break
fi
done
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-apiserver on a node ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
# (AWS runs detect-project, I don't think we need to anything)
# Note: we can't print anything here, or else the test tools will break with the extra output
return
}
|
wattsteve/kubernetes-1
|
cluster/aws/util.sh
|
Shell
|
apache-2.0
| 31,166 |
#!/usr/bin/env bash
#
# Prepare summary report per measure with confidence intervals
set -e
usage="Usage: $0 OUT_DIR MEASURE ..."
if [ "$#" -lt 2 ]; then
echo $usage
exit 1
fi
outdir=$1; shift # directory to which results are written
for measure in ${@}
do
echo "INFO preparing $measure report.."
# INITIALISE REPORT HEADER
report=$outdir/00report.$measure
echo -e "99%(\t95%(\t90%(\tscore\t)90%\t)95%\t)99%\tsystem" \
> $report
# ADD SYSTEM SCORES
(
for sys_eval in $outdir/*.confidence
do
cat $sys_eval \
| grep "^$measure" \
| grep "fscore" \
| awk 'BEGIN{OFS="\t"} {print $3,$4,$5,$6,$7,$8,$9}' \
| tr '\n' '\t'
basename $sys_eval \
| sed 's/\.confidence//'
done
) \
| sort -t$'\t' -k4 -nr \
>> $report
done
|
wikilinks/neleval
|
scripts/run_report_confidence.sh
|
Shell
|
apache-2.0
| 908 |
#!/bin/bash
set -e
cat <<-EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-dashboards-0
data:
EOF
virtualenv -p python3 .env
source .env/bin/activate
pip install -Ur requirements.txt
for f in assets/grafana/*.dashboard.py
do
JSON_FILENAME="$(pwd)/${f%%.*}-dashboard.json"
generate-dashboard $f -o $JSON_FILENAME 2>&1 > /dev/null
done
for f in assets/grafana/*-dashboard.json
do
echo " $(basename $f): |+"
hack/scripts/wrap-dashboard.sh $f | sed "s/^/ /g"
done
for f in assets/grafana/*-datasource.json
do
echo " $(basename $f): |+"
cat $f | sed "s/^/ /g"
done
|
ngtuna/kubeless
|
vendor/github.com/coreos/prometheus-operator/contrib/kube-prometheus/hack/scripts/generate-dashboards-configmap.sh
|
Shell
|
apache-2.0
| 596 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
# Error if we somehow forget to set the path to bazel_wrapper.py
set -u
BAZEL_WRAPPER_PATH=$1
set +u
# From this point on, logs can be publicly available
set -x
function setup_pip () {
python3.7 -m virtualenv tf_build_env --system-site-packages
source tf_build_env/bin/activate
install_macos_pip_deps
}
function run_build () {
# Run configure.
export TF_NEED_CUDA=0
export PYTHON_BIN_PATH=$(which python3.7)
yes "" | $PYTHON_BIN_PATH configure.py
tag_filters="-no_oss,-no_oss_py2,-gpu,-tpu,-benchmark-test,-nomac,-no_mac,-v1only"
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh
"${BAZEL_WRAPPER_PATH}" \
test \
--build_tag_filters="${tag_filters}" \
--test_tag_filters="${tag_filters}" \
--action_env=PATH \
--remote_accept_cached=true \
--spawn_strategy=standalone \
--remote_local_fallback=false \
--remote_timeout=600 \
--strategy=Javac=standalone \
--strategy=Closure=standalone \
--genrule_strategy=standalone \
-- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/...
# Copy log to output to be available to GitHub
ls -la "$(bazel info output_base)/java.log"
cp "$(bazel info output_base)/java.log" "${KOKORO_ARTIFACTS_DIR}/"
}
source tensorflow/tools/ci_build/release/common.sh
install_bazelisk
setup_pip
run_build
|
gunan/tensorflow
|
tensorflow/tools/ci_build/presubmit/macos/py37_cc/build.sh
|
Shell
|
apache-2.0
| 2,078 |
set -ex
echo "pre-up sleep 2" >> /etc/network/interfaces
|
maier/packer-templates
|
debian9.1/scripts/01networking.sh
|
Shell
|
apache-2.0
| 58 |
#!/bin/bash
# Copyright Johns Hopkins University (Author: Daniel Povey) 2012. Apache 2.0.
# begin configuration section.
cmd=run.pl
stage=0
decode_mbr=true
beam=4 # Use a fairly narrow beam because lattice-align-words is slow-ish.
word_ins_penalty=0.5
min_lmwt=7
max_lmwt=17
cleanup=true
model=
#end configuration section.
#debugging stuff
echo $0 $@
[ -f ./path.sh ] && . ./path.sh
[ -f ./cmd.sh ] && . ./cmd.sh
. parse_options.sh || exit 1;
if [ $# -ne 4 ]; then
echo "Usage: $0 [options] <dataDir> <langDir|graphDir> <w2s-dir> <decodeDir>" && exit;
echo "This is as lattice_to_ctm.sh, but for syllable-based systems where we want to"
echo "obtain word-level ctms. Here, <w2s-dir> is a directory like data/local/w2s,"
echo "as created by run-6-syllables.sh. It contains:"
echo " G.fst, Ldet.fst, words.txt, word_align_lexicon.int"
echo " Options:"
echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
echo " --stage (0|1) # (createCTM | filterCTM )."
exit 1;
fi
data=$1
lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied.
w2sdir=$3
dir=$4
if [ -z "$model" ] ; then
model=`dirname $dir`/final.mdl # Relative path does not work in some cases
#model=$dir/../final.mdl # assume model one level up from decoding dir.
#[ ! -f $model ] && model=`(set +P; cd $dir/../; pwd)`/final.mdl
fi
for f in $lang/words.txt $lang/phones/word_boundary.int \
$model $data/segments $data/reco2file_and_channel $dir/lat.1.gz \
$w2sdir/{G.fst,Ldet.fst,words.txt,word_align_lexicon.int}; do
[ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1;
done
name=`basename $data`; # e.g. eval2000
mkdir -p $dir/scoring/log
# we are counting the LM twice since we have both the original, syllable-level LM
# and the new, word-level one, so we scale by 0.5 to get a reasonably scaled
# LM cost.
if [ $stage -le 0 ]; then
nj=`cat $dir/num_jobs` || exit 1;
$cmd JOB=1:$nj $dir/scoring/log/get_word_lats.JOB.log \
lattice-compose "ark:gunzip -c $dir/lat.JOB.gz|" $w2sdir/Ldet.fst ark:- \| \
lattice-determinize ark:- ark:- \| \
lattice-compose ark:- $w2sdir/G.fst ark:- \| \
lattice-scale --lm-scale=0.5 ark:- "ark:|gzip -c >$dir/wlat.JOB.gz" || exit 1;
fi
if [ $stage -le 1 ]; then
$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/get_ctm.LMWT.log \
mkdir -p $dir/score_LMWT/ '&&' \
lattice-scale --inv-acoustic-scale=LMWT "ark:gunzip -c $dir/wlat.*.gz|" ark:- \| \
lattice-add-penalty --word-ins-penalty=$word_ins_penalty ark:- ark:- \| \
lattice-prune --beam=$beam ark:- ark:- \| \
lattice-push ark:- ark:- \| \
lattice-align-words-lexicon --max-expand=10 --output-if-empty=true $w2sdir/word_align_lexicon.int $model ark:- ark:- \| \
lattice-to-ctm-conf --decode-mbr=$decode_mbr ark:- - \| \
utils/int2sym.pl -f 5 $w2sdir/words.txt \| \
utils/convert_ctm.pl $data/segments $data/reco2file_and_channel \
'>' $dir/score_LMWT/$name.ctm || exit 1;
fi
if [ $stage -le 2 ]; then
# Remove some stuff we don't want to score, from the ctm.
for x in $dir/score_*/$name.ctm; do
cp $x $x.bkup1;
cat $x.bkup1 | grep -v -E '\[NOISE|LAUGHTER|VOCALIZED-NOISE\]' | \
grep -v -E '<UNK>|%HESITATION|\(\(\)\)' | \
grep -v -E '<eps>' | \
grep -v -E '<noise>' | \
grep -v -E '<silence>' | \
grep -v -E '<unk>' | \
grep -v -E '<v-noise>' | \
perl -e '@list = (); %list = ();
while(<>) {
chomp;
@col = split(" ", $_);
push(@list, $_);
$key = "$col[0]" . " $col[1]";
$list{$key} = 1;
}
foreach(sort keys %list) {
$key = $_;
foreach(grep(/$key/, @list)) {
print "$_\n";
}
}' > $x;
done
fi
$cleanup && rm $dir/wlat.*.gz
echo "Lattice2CTM finished on " `date`
exit 0
|
irrawaddy28/babel
|
s5c/local/lattice_to_ctm_syllable.sh
|
Shell
|
apache-2.0
| 3,888 |
#!/bin/sh
function async_run()
{
{
$1 &> /dev/null
}&
}
function git_prompt_dir()
{
# assume the gitstatus.sh is in the same directory as this script
# code thanks to http://stackoverflow.com/questions/59895
if [ -z "$__GIT_PROMPT_DIR" ]; then
local SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
local DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
__GIT_PROMPT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
fi
}
function echoc() {
echo -e "${1}$2${ResetColor}" | sed 's/\\\]//g' | sed 's/\\\[//g'
}
function get_theme()
{
local CUSTOM_THEME_FILE="${HOME}/.git-prompt-colors.sh"
local DEFAULT_THEME_FILE="${__GIT_PROMPT_DIR}/themes/Default.bgptheme"
if [[ -z ${GIT_PROMPT_THEME} ]]; then
if [[ -r $CUSTOM_THEME_FILE ]]; then
GIT_PROMPT_THEME="Custom"
__GIT_PROMPT_THEME_FILE=$CUSTOM_THEME_FILE
else
GIT_PROMPT_THEME="Default"
__GIT_PROMPT_THEME_FILE=$DEFAULT_THEME_FILE
fi
else
if [[ "${GIT_PROMPT_THEME}" = "Custom" ]]; then
GIT_PROMPT_THEME="Custom"
__GIT_PROMPT_THEME_FILE=$CUSTOM_THEME_FILE
if [[ ! (-r $__GIT_PROMPT_THEME_FILE) ]]; then
GIT_PROMPT_THEME="Default"
__GIT_PROMPT_THEME_FILE=$DEFAULT_THEME_FILE
fi
else
local theme=""
# use default theme, if theme was not found
for themefile in `ls "$__GIT_PROMPT_DIR/themes"`; do
if [[ "${themefile}" = "${GIT_PROMPT_THEME}.bgptheme" ]]; then
theme=$GIT_PROMPT_THEME
fi
done
if [[ "${theme}" = "" ]]; then
GIT_PROMPT_THEME="Default"
fi
__GIT_PROMPT_THEME_FILE="${__GIT_PROMPT_DIR}/themes/${GIT_PROMPT_THEME}.bgptheme"
fi
fi
}
function git_prompt_load_theme()
{
get_theme
local DEFAULT_THEME_FILE="${__GIT_PROMPT_DIR}/themes/Default.bgptheme"
source "${DEFAULT_THEME_FILE}"
source "${__GIT_PROMPT_THEME_FILE}"
}
function git_prompt_list_themes()
{
local oldTheme
local oldThemeFile
git_prompt_dir
get_theme
for themefile in `ls "$__GIT_PROMPT_DIR/themes"`; do
local theme="$(basename $themefile .bgptheme)"
if [[ "${GIT_PROMPT_THEME}" = "${theme}" ]]; then
echoc ${Red} "*${theme}"
else
echo $theme
fi
done
if [[ "${GIT_PROMPT_THEME}" = "Custom" ]]; then
echoc ${Magenta} "*Custom"
else
echoc ${Blue} "Custom"
fi
}
function git_prompt_make_custom_theme() {
if [[ -r "${HOME}/.git-prompt-colors.sh" ]]; then
echoc ${Red} "You alread have created a custom theme!"
else
git_prompt_dir
local base="Default"
if [[ -n $1 && -r "${__GIT_PROMPT_DIR}/themes/${1}.bgptheme" ]]; then
base=$1
echoc ${Green} "Using theme ${Magenta}\"${base}\"${Green} as base theme!"
else
echoc ${Green} "Using theme ${Magenta}\"Default\"${Green} as base theme!"
fi
if [[ "${base}" = "Custom" ]]; then
echoc ${Red} "You cannot use the custom theme as base"
else
echoc ${Green} "Creating new cutom theme in \"${HOME}/.git-prompt-colors.sh\""
echoc ${DimYellow} "Please add ${Magenta}\"GIT_PROMPT_THEME=Custom\"${DimYellow} to your .bashrc to use this theme"
if [[ "${base}" == "Default" ]]; then
cp "${__GIT_PROMPT_DIR}/themes/Custom.bgptemplate" "${HOME}/.git-prompt-colors.sh"
else
cp "${__GIT_PROMPT_DIR}/themes/${base}.bgptheme" "${HOME}/.git-prompt-colors.sh"
fi
fi
fi
}
# gp_set_file_var ENVAR SOMEFILE
#
# If ENVAR is set, check that it's value exists as a readable file. Otherwise,
# Set ENVAR to the path to SOMEFILE, based on $HOME, $__GIT_PROMPT_DIR, and the
# directory of the current script. The SOMEFILE can be prefixed with '.', or
# not.
#
# Return 0 (success) if ENVAR not already defined, 1 (failure) otherwise.
function gp_set_file_var() {
local envar="$1"
local file="$2"
if eval "[[ -n \"\$$envar\" && -r \"\$$envar\" ]]" ; then # is envar set to a readable file?
local basefile
eval "basefile=\"\`basename \\\"\$$envar\\\"\`\"" # assign basefile
if [[ "$basefile" = "$file" || "$basefile" = ".$file" ]]; then
return 0
fi
else # envar is not set, or it's set to a different file than requested
eval "$envar=" # set empty envar
gp_maybe_set_envar_to_path "$envar" "$HOME/.$file" "$HOME/$file" "$HOME/lib/$file" && return 0
git_prompt_dir
gp_maybe_set_envar_to_path "$envar" "$__GIT_PROMPT_DIR/$file" "${0##*/}/$file" && return 0
fi
return 1
}
# gp_maybe_set_envar_to_path ENVAR FILEPATH ...
#
# return 0 (true) if any FILEPATH is readable, set ENVAR to it
# return 1 (false) if not
function gp_maybe_set_envar_to_path(){
local envar="$1"
shift
local file
for file in "$@" ; do
if [[ -r "$file" ]]; then
eval "$envar=\"$file\""
return 0
fi
done
return 1
}
# git_prompt_reset
#
# unsets selected GIT_PROMPT variables, causing the next prompt callback to
# recalculate them from scratch.
git_prompt_reset() {
local var
for var in GIT_PROMPT_DIR __GIT_PROMPT_COLORS_FILE __PROMPT_COLORS_FILE __GIT_STATUS_CMD GIT_PROMPT_THEME_NAME; do
unset $var
done
}
# gp_format_exit_status RETVAL
#
# echos the symbolic signal name represented by RETVAL if the process was
# signalled, otherwise echos the original value of RETVAL
gp_format_exit_status() {
local RETVAL="$1"
local SIGNAL
# Suppress STDERR in case RETVAL is not an integer (in such cases, RETVAL
# is echoed verbatim)
if [ "${RETVAL}" -gt 128 ] 2>/dev/null; then
SIGNAL=$(( ${RETVAL} - 128 ))
kill -l "${SIGNAL}" 2>/dev/null || echo "${RETVAL}"
else
echo "${RETVAL}"
fi
}
function git_prompt_config()
{
#Checking if root to change output
_isroot=false
[[ $UID -eq 0 ]] && _isroot=true
# There are two files related to colors:
#
# prompt-colors.sh -- sets generic color names suitable for bash `PS1` prompt
# git-prompt-colors.sh -- sets the GIT_PROMPT color scheme, using names from prompt-colors.sh
if gp_set_file_var __PROMPT_COLORS_FILE prompt-colors.sh ; then
source "$__PROMPT_COLORS_FILE" # outsource the color defs
else
echo 1>&2 "Cannot find prompt-colors.sh!"
fi
# source the user's ~/.git-prompt-colors.sh file, or the one that should be
# sitting in the same directory as this script
git_prompt_load_theme
if [ $GIT_PROMPT_LAST_COMMAND_STATE = 0 ]; then
LAST_COMMAND_INDICATOR="$GIT_PROMPT_COMMAND_OK";
else
LAST_COMMAND_INDICATOR="$GIT_PROMPT_COMMAND_FAIL";
fi
# replace _LAST_COMMAND_STATE_ token with the actual state
GIT_PROMPT_LAST_COMMAND_STATE=$(gp_format_exit_status ${GIT_PROMPT_LAST_COMMAND_STATE})
LAST_COMMAND_INDICATOR="${LAST_COMMAND_INDICATOR//_LAST_COMMAND_STATE_/${GIT_PROMPT_LAST_COMMAND_STATE}}"
# Do this only once to define PROMPT_START and PROMPT_END
if [[ -z "$PROMPT_START" || -z "$PROMPT_END" ]]; then
if [[ -z "$GIT_PROMPT_START" ]] ; then
if $_isroot; then
PROMPT_START="$GIT_PROMPT_START_ROOT"
else
PROMPT_START="$GIT_PROMPT_START_USER"
fi
else
PROMPT_START="$GIT_PROMPT_START"
fi
if [[ -z "$GIT_PROMPT_END" ]] ; then
if $_isroot; then
PROMPT_END="$GIT_PROMPT_END_ROOT"
else
PROMPT_END="$GIT_PROMPT_END_USER"
fi
else
PROMPT_END="$GIT_PROMPT_END"
fi
fi
# set GIT_PROMPT_LEADING_SPACE to 0 if you want to have no leading space in front of the GIT prompt
if [[ "$GIT_PROMPT_LEADING_SPACE" = 0 ]]; then
PROMPT_LEADING_SPACE=""
else
PROMPT_LEADING_SPACE=" "
fi
if [[ "$GIT_PROMPT_ONLY_IN_REPO" = 1 ]]; then
EMPTY_PROMPT="$OLD_GITPROMPT"
else
local ps=""
if [[ -n "$VIRTUAL_ENV" ]]; then
VENV=$(basename "${VIRTUAL_ENV}")
ps="${ps}${GIT_PROMPT_VIRTUALENV//_VIRTUALENV_/${VENV}}"
fi
if [[ -n "$CONDA_DEFAULT_ENV" ]]; then
VENV=$(basename "${CONDA_DEFAULT_ENV}")
ps="${ps}${GIT_PROMPT_VIRTUALENV//_VIRTUALENV_/${VENV}}"
fi
ps="$ps$PROMPT_START$($prompt_callback)$PROMPT_END"
EMPTY_PROMPT="${ps//_LAST_COMMAND_INDICATOR_/${LAST_COMMAND_INDICATOR}}"
fi
# fetch remote revisions every other $GIT_PROMPT_FETCH_TIMEOUT (default 5) minutes
GIT_PROMPT_FETCH_TIMEOUT=${1-5}
if [[ -z "$__GIT_STATUS_CMD" ]] ; then # if GIT_STATUS_CMD not defined..
git_prompt_dir
if ! gp_maybe_set_envar_to_path __GIT_STATUS_CMD "$__GIT_PROMPT_DIR/gitstatus.sh" ; then
echo 1>&2 "Cannot find gitstatus.sh!"
fi
# __GIT_STATUS_CMD defined
fi
}
function setLastCommandState() {
GIT_PROMPT_LAST_COMMAND_STATE=$?
}
function we_are_on_repo() {
if [[ -e "$(git rev-parse --git-dir 2> /dev/null)" ]]; then
echo 1
fi
echo 0
}
function update_old_git_prompt() {
local in_repo=$(we_are_on_repo)
if [[ $GIT_PROMPT_OLD_DIR_WAS_GIT = 0 ]]; then
OLD_GITPROMPT=$PS1
fi
GIT_PROMPT_OLD_DIR_WAS_GIT=$in_repo
}
function setGitPrompt() {
update_old_git_prompt
local repo=`git rev-parse --show-toplevel 2> /dev/null`
if [[ ! -e "$repo" ]] && [[ "$GIT_PROMPT_ONLY_IN_REPO" = 1 ]]; then
# we do not permit bash-git-prompt outside git repos, so nothing to do
PS1="$OLD_GITPROMPT"
return
fi
local EMPTY_PROMPT
local __GIT_STATUS_CMD
git_prompt_config
if [[ ! -e "$repo" ]]; then
PS1="$EMPTY_PROMPT"
return
fi
local FETCH_REMOTE_STATUS=1
if [[ "$GIT_PROMPT_FETCH_REMOTE_STATUS" = 0 ]]; then
FETCH_REMOTE_STATUS=0
fi
unset GIT_PROMPT_IGNORE
if [[ -e "$repo/.bash-git-rc" ]]; then
source "$repo/.bash-git-rc"
fi
if [[ "$GIT_PROMPT_IGNORE" = 1 ]]; then
PS1="$EMPTY_PROMPT"
return
fi
if [[ "$FETCH_REMOTE_STATUS" = 1 ]]; then
checkUpstream
fi
updatePrompt
}
function checkUpstream() {
local GIT_PROMPT_FETCH_TIMEOUT
git_prompt_config
local FETCH_HEAD="$repo/.git/FETCH_HEAD"
# Fech repo if local is stale for more than $GIT_FETCH_TIMEOUT minutes
if [[ ! -e "$FETCH_HEAD" || -e `find "$FETCH_HEAD" -mmin +$GIT_PROMPT_FETCH_TIMEOUT` ]]
then
if [[ -n $(git remote show) ]]; then
(
async_run "git fetch --quiet"
disown -h
)
fi
fi
}
function replaceSymbols()
{
if [[ -z ${GIT_PROMPT_SYMBOLS_NO_REMOTE_TRACKING} ]]; then
GIT_PROMPT_SYMBOLS_NO_REMOTE_TRACKING=L
fi
local VALUE=${1//_AHEAD_/${GIT_PROMPT_SYMBOLS_AHEAD}}
local VALUE1=${VALUE//_BEHIND_/${GIT_PROMPT_SYMBOLS_BEHIND}}
local VALUE2=${VALUE1//_NO_REMOTE_TRACKING_/${GIT_PROMPT_SYMBOLS_NO_REMOTE_TRACKING}}
echo ${VALUE2//_PREHASH_/${GIT_PROMPT_SYMBOLS_PREHASH}}
}
function updatePrompt() {
local LAST_COMMAND_INDICATOR
local PROMPT_LEADING_SPACE
local PROMPT_START
local PROMPT_END
local EMPTY_PROMPT
local Blue="\[\033[0;34m\]"
git_prompt_config
export __GIT_PROMPT_IGNORE_STASH=${GIT_PROMPT_IGNORE_STASH}
local -a GitStatus
GitStatus=($("$__GIT_STATUS_CMD" 2>/dev/null))
local GIT_BRANCH=$(replaceSymbols ${GitStatus[0]})
local GIT_REMOTE="$(replaceSymbols ${GitStatus[1]})"
if [[ "." == "$GIT_REMOTE" ]]; then
unset GIT_REMOTE
fi
local GIT_STAGED=${GitStatus[2]}
local GIT_CONFLICTS=${GitStatus[3]}
local GIT_CHANGED=${GitStatus[4]}
local GIT_UNTRACKED=${GitStatus[5]}
local GIT_STASHED=${GitStatus[6]}
local GIT_CLEAN=${GitStatus[7]}
local NEW_PROMPT="$EMPTY_PROMPT"
if [[ -n "$GitStatus" ]]; then
local STATUS="${PROMPT_LEADING_SPACE}${GIT_PROMPT_PREFIX}${GIT_PROMPT_BRANCH}${GIT_BRANCH}${ResetColor}"
# __add_status KIND VALEXPR INSERT
# eg: __add_status 'STAGED' '-ne 0'
__chk_gitvar_status() {
local v
if [[ "x$2" == "x-n" ]] ; then
v="$2 \"\$GIT_$1\""
else
v="\$GIT_$1 $2"
fi
if eval "test $v" ; then
if [[ $# -lt 2 || "$3" != '-' ]]; then
__add_status "\$GIT_PROMPT_$1\$GIT_$1\$ResetColor"
else
__add_status "\$GIT_PROMPT_$1\$ResetColor"
fi
fi
}
__add_gitvar_status() {
__add_status "\$GIT_PROMPT_$1\$GIT_$1\$ResetColor"
}
# __add_status SOMETEXT
__add_status() {
eval "STATUS=\"$STATUS$1\""
}
__chk_gitvar_status 'REMOTE' '-n'
__add_status "$GIT_PROMPT_SEPARATOR"
__chk_gitvar_status 'STAGED' '-ne 0'
__chk_gitvar_status 'CONFLICTS' '-ne 0'
__chk_gitvar_status 'CHANGED' '-ne 0'
__chk_gitvar_status 'UNTRACKED' '-ne 0'
__chk_gitvar_status 'STASHED' '-ne 0'
__chk_gitvar_status 'CLEAN' '-eq 1' -
__add_status "$ResetColor$GIT_PROMPT_SUFFIX"
NEW_PROMPT=""
if [[ -n "$VIRTUAL_ENV" ]]; then
VENV=$(basename "${VIRTUAL_ENV}")
NEW_PROMPT="$NEW_PROMPT${GIT_PROMPT_VIRTUALENV//_VIRTUALENV_/${VENV}}"
fi
if [[ -n "$CONDA_DEFAULT_ENV" ]]; then
VENV=$(basename "${CONDA_DEFAULT_ENV}")
NEW_PROMPT="$NEW_PROMPT${GIT_PROMPT_VIRTUALENV//_VIRTUALENV_/${VENV}}"
fi
NEW_PROMPT="$NEW_PROMPT$PROMPT_START$($prompt_callback)$STATUS$PROMPT_END"
else
NEW_PROMPT="$EMPTY_PROMPT"
fi
PS1="${NEW_PROMPT//_LAST_COMMAND_INDICATOR_/${LAST_COMMAND_INDICATOR}}"
}
function prompt_callback_default {
return
}
function gp_install_prompt {
if [ "`type -t prompt_callback`" = 'function' ]; then
prompt_callback="prompt_callback"
else
prompt_callback="prompt_callback_default"
fi
if [ -z "$OLD_GITPROMPT" ]; then
OLD_GITPROMPT=$PS1
fi
if [ -z "$GIT_PROMPT_OLD_DIR_WAS_GIT" ]; then
GIT_PROMPT_OLD_DIR_WAS_GIT=$(we_are_on_repo)
fi
if [ -z "$PROMPT_COMMAND" ]; then
PROMPT_COMMAND=setGitPrompt
else
PROMPT_COMMAND=${PROMPT_COMMAND%% }; # remove trailing spaces
PROMPT_COMMAND=${PROMPT_COMMAND%\;}; # remove trailing semi-colon
local new_entry="setGitPrompt"
case ";$PROMPT_COMMAND;" in
*";$new_entry;"*)
# echo "PROMPT_COMMAND already contains: $new_entry"
:;;
*)
PROMPT_COMMAND="$PROMPT_COMMAND;$new_entry"
# echo "PROMPT_COMMAND does not contain: $new_entry"
;;
esac
fi
local setLastCommandStateEntry="setLastCommandState"
case ";$PROMPT_COMMAND;" in
*";$setLastCommandStateEntry;"*)
# echo "PROMPT_COMMAND already contains: $setLastCommandStateEntry"
:;;
*)
PROMPT_COMMAND="$setLastCommandStateEntry;$PROMPT_COMMAND"
# echo "PROMPT_COMMAND does not contain: $setLastCommandStateEntry"
;;
esac
git_prompt_dir
source "$__GIT_PROMPT_DIR/git-prompt-help.sh"
}
gp_install_prompt
|
danfinn/bash-git-prompt
|
gitprompt.sh
|
Shell
|
bsd-2-clause
| 14,587 |
#!/bin/sh
unzip -o GfxBAT.zip
chmod +x runbat.sh
echo "#!/bin/sh
export JAVA_HOME=/usr
sh ./runbat.sh > \$THIS_RUN_TIME.result
if [ \$? -eq 0 ]; then
echo 'PASS' > \$LOG_FILE
else
echo 'FAIL' > \$LOG_FILE
fi" > jgfxbat
chmod +x jgfxbat
|
JanGe/intellicloudbench
|
src/main/resources/test-profiles/pts/jgfxbat-1.1.0/install.sh
|
Shell
|
bsd-3-clause
| 245 |
#!/bin/sh
sloccount Source
|
MovementAndMeaning/Utilities_ChannelManager
|
sloc.sh
|
Shell
|
bsd-3-clause
| 28 |
#!/bin/sh
TEST_PURPOSE=regress
TEST_PROB_REPORT=0
TEST_TYPE=umlplutotest
TESTNAME=ikev2-07-biddown
EASTHOST=east
WESTHOST=west
WEST_ARPREPLY=1
EAST_INPUT=../../klips/inputs/01-sunrise-sunset-ping.pcap
REF_WEST_OUTPUT=../../klips/west-icmp-01/spi1-cleartext.txt
WEST_ARPREPLY=true
#THREEEIGHT=true
REF_EAST_CONSOLE_OUTPUT=east-console.txt
REF26_EAST_CONSOLE_OUTPUT=east-console.txt
REF_WEST_CONSOLE_OUTPUT=west-console.txt
REF26_WEST_CONSOLE_OUTPUT=west-console.txt
REF_WEST_FILTER=../../klips/fixups/no-arp-pcap2.pl
REF_CONSOLE_FIXUPS="kern-list-fixups.sed nocr.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS east-prompt-splitline.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS script-only.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS cutout.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS wilog.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-debug-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-setup-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS pluto-whack-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS host-ping-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-look-esp-sanitize.pl"
EAST_INIT_SCRIPT=eastinit.sh
WEST_INIT_SCRIPT=westinit.sh
WEST_RUN_SCRIPT=westrun.sh
EAST_FINAL_SCRIPT=final.sh
WEST_FINAL_SCRIPT=final.sh
|
y-trudeau/openswan-patch-meraki
|
testing/pluto/ikev2-07-biddown/testparams.sh
|
Shell
|
gpl-2.0
| 1,273 |
#!/bin/bash
#----------------------------------------------------------
#
# Copyright 2009 Pedro Pablo Gomez-Martin,
# Marco Antonio Gomez-Martin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#----------------------------------------------------------
# Recorre todos los ficheros .jpg y .png del directorio, y los
# convierte a eps utilizando el programa sam2p.
# Se basó originalmente en:
# http://www.tug.org/pipermail/texhax/2003-June/000396.html
# http://amath.colorado.edu/documentation/LaTeX/reference/figures.html
# aunque ya no tiene nada que ver :-)
# Convierte a .eps el fichero .jpg o .png recibido
# como primer parámetro. Se hacen algunas comprobaciones
# sobre el fichero (que no está duplicado)
# Si hay algún problema devuelve 1; en otro caso, devuelve 0
function convert {
f=$1
# Nos aseguramos de que no hay un .jpg y un .png con el
# mismo nombre (que terminarían ocasionando el mismo .eps).
jpg=$(echo $f | sed s/png$/jpg/)
png=$(echo $f | sed s/jpg$/png/)
if [ -e $jpg ] && [ -e $png ]; then
echo "Repetición de nombres en $jpg y $png no permitida." > /dev/stderr
exit 1
fi
# Construimos el nombre del fichero con extensión .eps
# Como no sabemos si lo que tenemos es un .jpg o un .png
# sustituimos ambas y listo.
eps=$(echo $f | sed s/png$/eps/)
eps=$(echo $eps | sed s/jpg$/eps/)
echo -n "$f --> $eps ... "
# Si el fichero $eps existe y es más nuevo que el original,
# nos ahorramos la conversión.
[ $eps -nt $f ] && echo $eps está actualizado. && exit 0
# Hacemos la comprobación de si está instalado sam2p
# en un lugar "tan profundo" para que sólo falle si
# realmente lo vamos a usar. Esto significa que vamos
# a comprobarlo por cada imagen... La otra opción
# habría sido comprobarlo directamente en updateAll.sh
# pero en ese caso, si no tenemos el sam2p, pero tampoco
# tenemos imágenes que convertir, acabaríamos con un
# "error" que no resulta problemático.
# De hecho lo hacemos incluso después de haber comprobado
# que no tenemos ya el .eps actualizado, para no quejarnos
# si el fichero existe realmente.
if ! which sam2p > /dev/null; then
echo "El programa sam2p no está disponible." > /dev/stderr
echo "No pueden convertirse las imagenes de mapas de bits." > /dev/stderr
exit 1
fi
sam2p $f $eps >/dev/null 2>&1 && echo Hecho. || (echo ERROR. && rm -f $eps && exit 1)
}
for f in {*.jpg,*.png}; do
[ ! -e $f ] && continue
# Necesario porque el for mete en f un
# *.jpg o *.png si no hay ningún jpg
# o png, a si es que nos aseguramos de
# que el fichero existe.
# Convertimos todos los ficheros encontrados
if ! convert $f; then
exit 1
fi
done
|
yunxao/JN-Sim
|
Documentacion/Imagenes/Bitmap/update-eps.sh
|
Shell
|
apache-2.0
| 3,403 |
#!/bin/bash -e
#
# Mint (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Always install the latest.
python -m pip install s3cmd
|
aead/minio
|
mint/build/s3cmd/install.sh
|
Shell
|
apache-2.0
| 661 |
CTAGS=$1
chmod u+x ./foo.sh
${CTAGS} --quiet --options=NONE -o - \
--langdef=foo \
--xcmd-foo=./foo.sh \
--foo-kinds=+a \
--foo-kinds=-b \
--list-kinds=foo
|
SolaWing/ctags-1
|
Tmain/lxcmd-list-kinds.d/run.sh
|
Shell
|
gpl-2.0
| 173 |
#!/bin/sh
if [ -z "$1" ] ; then
echo "Using Barry default web doc config..."
for f in `cat content_list` ; do
echo "Generating $f.html"
cat prepend.php php_conf1.php $f.php append.php | php > $f.html
done
elif [ "$1" = "netdirect" ] ; then
echo "Using NetDirect web doc config..."
mkdir -p www.netdirect.ca/sites/www.netdirect.ca/files/barry/doxygen
mkdir -p www.netdirect.ca/sites/www.netdirect.ca/files/images/barry
cp *.png www.netdirect.ca/sites/www.netdirect.ca/files/images/barry
mkdir -p www.netdirect.ca/pastefiles
echo "For index files..."
for f in `cat content_list | grep index` ; do
echo "Generating $f.html"
cat php_conf2.php $f.php | php > www.netdirect.ca/pastefiles/$f.html
done
echo "For non-index files..."
for f in `cat content_list | grep -v index` ; do
echo "Generating $f.html"
cat php_conf3.php $f.php | php > www.netdirect.ca/pastefiles/$f.html
done
fi
|
RealVNC/barry
|
doc/www/static.sh
|
Shell
|
gpl-2.0
| 908 |
#!/bin/sh
# This script performs a major compaction on all Hbase tables.
cd ${HBASE_HOME}
command="echo list"
echo "Running $command in hbase shell" >&2
listoutput=`$command | bin/hbase shell | sed -n '/TABLE/,/seconds/p' | tail -n+2 | head -n -1`
for table in ${listoutput}
do
command="echo major_compact '${table}'"
echo "Running $command in hbase shell" >&2
$command | bin/hbase shell
done
exit 0
|
LLNL/magpie
|
scripts/job-scripts/hbase-major-compaction.sh
|
Shell
|
gpl-2.0
| 416 |
#!/usr/bin/env bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Usage:
# ci_parameterized_build.sh
#
# The script obeys the following required environment variables:
# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | GPU_CLANG | ANDROID | ANDROID_FULL)
# TF_BUILD_PYTHON_VERSION: (PYTHON2 | PYTHON3 | PYTHON3.5)
# TF_BUILD_IS_PIP: (NO_PIP | PIP | BOTH)
#
# The below environment variable is required, but will be deprecated together
# with TF_BUILD_MAVX and both will be replaced by TF_BUILD_OPTIONS.
# TF_BUILD_IS_OPT: (NO_OPT | OPT)
#
# Note:
# 1) Certain combinations of parameter values are regarded
# as invalid and will cause the script to exit with code 0. For example:
# NO_OPT & PIP (PIP builds should always use OPT)
# ANDROID & PIP (Android and PIP builds are mutually exclusive)
#
# 2) TF_BUILD_PYTHON_VERSION is set to PYTHON3, the build will use the version
# pointed to by "which python3" on the system, which is typically python3.4. To
# build for python3.5, set the environment variable to PYTHON3.5
#
#
# Additionally, the script follows the directions of optional environment
# variables:
# TF_BUILD_DRY_RUN: If it is set to any non-empty value that is not "0",
# the script will just generate and print the final
# command, but not actually run it.
# TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS:
# String appended to the content of CI_DOCKER_EXTRA_PARAMS
# TF_BUILD_APPEND_ARGUMENTS:
# Additional command line arguments for the bazel,
# pip.sh or android.sh command
# TF_BUILD_MAVX: (Soon to be deprecated, use TF_BUILD_OPTIONS instead)
# (unset | MAVX | MAVX2)
# If set to MAVX or MAVX2, will cause bazel to use the
# additional flag --copt=-mavx or --copt=-mavx2, to
# perform AVX or AVX2 builds, respectively. This requires
# AVX- or AVX2-compatible CPUs.
# TF_BUILD_ENABLE_XLA:
# If it is set to any non-empty value that is not "0",
# will enable XLA and run XLA tests.
# TF_BUILD_BAZEL_TARGET:
# Used to override the default bazel build target:
# //tensorflow/... -//tensorflow/compiler
# TF_BUILD_BAZEL_CLEAN:
# Will perform "bazel clean", if and only if this variable
# is set to any non-empty and non-0 value
# TF_GPU_COUNT:
# Run this many parallel tests for serial builds.
# For now, only can be edited for PIP builds.
# TODO(gunan): Find a way to pass this environment variable
# to the script bazel runs (using --run_under).
# TF_BUILD_TEST_TUTORIALS:
# If set to any non-empty and non-0 value, will perform
# tutorials tests (Applicable only if TF_BUILD_IS_PIP is
# PIP or BOTH).
# See builds/test_tutorials.sh
# TF_BUILD_INTEGRATION_TESTS:
# If set this will perform integration tests. See
# builds/integration_tests.sh.
# TF_BUILD_RUN_BENCHMARKS:
# If set to any non-empty and non-0 value, will perform
# the benchmark tests (see *_logged_benchmark targets in
# tools/test/BUILD)
# TF_BUILD_DISABLE_GCP:
# If set to any non-empty and non-0 value, will disable
# support for Google Cloud Platform (GCP), which is
# enabled by default.
# TF_BUILD_OPTIONS:
# (FASTBUILD | OPT | OPTDBG | MAVX | MAVX2_FMA | MAVX_DBG |
# MAVX2_FMA_DBG)
# Use the specified configurations when building.
# When set, overrides TF_BUILD_IS_OPT and TF_BUILD_MAVX
# options, as this will replace the two.
# TF_SKIP_CONTRIB_TESTS:
# If set to any non-empty or non-0 value, will skipp running
# contrib tests.
#
# This script can be used by Jenkins parameterized / matrix builds.
# TODO(jhseu): Temporary for the gRPC pull request due to the
# protobuf -> protobuf_archive rename. Remove later.
TF_BUILD_BAZEL_CLEAN=1
# Helper function: Convert to lower case
to_lower () {
echo "$1" | tr '[:upper:]' '[:lower:]'
}
# Helper function: Strip leading and trailing whitespaces
str_strip () {
echo -e "$1" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'
}
# Helper function: Exit on failure
die () {
echo $@
exit 1
}
##########################################################
# Default configuration
CI_BUILD_DIR="tensorflow/tools/ci_build"
# Command to call when Docker is available
DOCKER_MAIN_CMD="${CI_BUILD_DIR}/ci_build.sh"
# Command to call when Docker is unavailable
NO_DOCKER_MAIN_CMD="${CI_BUILD_DIR}/builds/configured"
# Additional option flags to apply when Docker is unavailable (e.g., on Mac)
NO_DOCKER_OPT_FLAG="--genrule_strategy=standalone"
DO_DOCKER=1
BAZEL_CMD="bazel test"
BAZEL_BUILD_ONLY_CMD="bazel build"
BAZEL_CLEAN_CMD="bazel clean"
PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
PIP_TEST_TUTORIALS_FLAG="--test_tutorials"
PIP_INTEGRATION_TESTS_FLAG="--integration_tests"
ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh"
ANDROID_FULL_CMD="${CI_BUILD_DIR}/builds/android_full.sh"
TF_GPU_COUNT=${TF_GPU_COUNT:-8}
PARALLEL_GPU_TEST_CMD='//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute'
BENCHMARK_CMD="${CI_BUILD_DIR}/builds/benchmark.sh"
EXTRA_PARAMS=""
export TF_BUILD_ENABLE_XLA=${TF_BUILD_ENABLE_XLA:-0}
if [[ -z $TF_BUILD_ENABLE_XLA ]] || [ $TF_BUILD_ENABLE_XLA == 0 ]; then
BAZEL_TARGET="//tensorflow/... -//tensorflow/compiler/..."
else
BAZEL_TARGET="//tensorflow/compiler/..."
EXTRA_PARAMS="${EXTRA_PARAMS} -e TF_BUILD_ENABLE_XLA=1"
fi
if [[ -n "$TF_SKIP_CONTRIB_TESTS" ]]; then
BAZEL_TARGET="$BAZEL_TARGET -//tensorflow/contrib/..."
fi
TUT_TEST_DATA_DIR="/tmp/tf_tutorial_test_data"
##########################################################
echo "Parameterized build starts at: $(date)"
echo ""
START_TIME=$(date +'%s')
# Convert all the required environment variables to lower case
TF_BUILD_CONTAINER_TYPE=$(to_lower ${TF_BUILD_CONTAINER_TYPE})
TF_BUILD_PYTHON_VERSION=$(to_lower ${TF_BUILD_PYTHON_VERSION})
TF_BUILD_IS_OPT=$(to_lower ${TF_BUILD_IS_OPT})
TF_BUILD_IS_PIP=$(to_lower ${TF_BUILD_IS_PIP})
if [[ ! -z "${TF_BUILD_MAVX}" ]]; then
TF_BUILD_MAVX=$(to_lower ${TF_BUILD_MAVX})
fi
# Print parameter values
echo "Required build parameters:"
echo " TF_BUILD_CONTAINER_TYPE=${TF_BUILD_CONTAINER_TYPE}"
echo " TF_BUILD_PYTHON_VERSION=${TF_BUILD_PYTHON_VERSION}"
echo " TF_BUILD_IS_OPT=${TF_BUILD_IS_OPT}"
echo " TF_BUILD_IS_PIP=${TF_BUILD_IS_PIP}"
echo "Optional build parameters:"
echo " TF_BUILD_DRY_RUN=${TF_BUILD_DRY_RUN}"
echo " TF_BUILD_MAVX=${TF_BUILD_MAVX}"
echo " TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS="\
"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}"
echo " TF_BUILD_APPEND_ARGUMENTS=${TF_BUILD_APPEND_ARGUMENTS}"
echo " TF_BUILD_BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}"
echo " TF_BUILD_BAZEL_CLEAN=${TF_BUILD_BAZEL_CLEAN}"
echo " TF_BUILD_TEST_TUTORIALS=${TF_BUILD_TEST_TUTORIALS}"
echo " TF_BUILD_INTEGRATION_TESTS=${TF_BUILD_INTEGRATION_TESTS}"
echo " TF_BUILD_RUN_BENCHMARKS=${TF_BUILD_RUN_BENCHMARKS}"
echo " TF_BUILD_DISABLE_GCP=${TF_BUILD_DISABLE_GCP}"
echo " TF_BUILD_OPTIONS=${TF_BUILD_OPTIONS}"
echo " TF_BUILD_ENABLE_XLA=${TF_BUILD_ENABLE_XLA}"
# Function that tries to determine CUDA capability, if deviceQuery binary
# is available on path
function get_cuda_capability_version() {
if [[ ! -z $(which deviceQuery) ]]; then
# The first listed device is used
deviceQuery | grep "CUDA Capability .* version" | \
head -1 | awk '{print $NF}'
fi
}
# Container type, e.g., CPU, GPU
CTYPE=${TF_BUILD_CONTAINER_TYPE}
# Determine if Docker is available
OPT_FLAG=""
if [[ -z "$(which docker)" ]]; then
DO_DOCKER=0
echo "It appears that Docker is not available on this system. "\
"Will perform build without Docker."
echo "Also, the additional option flags will be applied to the build:"
echo " ${NO_DOCKER_OPT_FLAG}"
MAIN_CMD="${NO_DOCKER_MAIN_CMD} ${CTYPE}"
OPT_FLAG="${OPT_FLAG} ${NO_DOCKER_OPT_FLAG}"
fi
# Process container type
if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "debian.jessie.cpu" ]]; then
:
elif [[ ${CTYPE} == "gpu" ]] || [[ ${CTYPE} == "gpu_clang" ]]; then
if [[ ${CTYPE} == "gpu" ]]; then
OPT_FLAG="${OPT_FLAG} --config=cuda"
else # ${CTYPE} == "gpu_clang"
OPT_FLAG="${OPT_FLAG} --config=cuda_clang"
fi
# Attempt to determine CUDA capability version automatically and use it if
# CUDA capability version is not specified by the environment variables.
CUDA_CAPA_VER=$(get_cuda_capability_version)
if [[ ! -z ${CUDA_CAPA_VER} ]]; then
AUTO_CUDA_CAPA_VER=0
if [[ ${DO_DOCKER} == "1" ]] && \
[[ "${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}" != \
*"TF_CUDA_COMPUTE_CAPABILITIES="* ]]; then
AUTO_CUDA_CAPA_VER=1
TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS} -e "\
"TF_CUDA_COMPUTE_CAPABILITIES=${CUDA_CAPA_VER}"
echo "Docker GPU build: TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS="\
"\"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}\""
elif [[ ${DO_DOCKER} == "0" ]] && \
[[ -z "${TF_CUDA_COMPUTE_CAPABILITIES}" ]]; then
AUTO_CUDA_CAPA_VER=1
TF_CUDA_COMPUTE_CAPABILITIES="${CUDA_CAPA_VER}"
echo "Non-Docker GPU build: TF_CUDA_COMPUTE_CAPABILITIES="\
"\"${TF_CUDA_COMPUTE_CAPABILITIES}\""
fi
if [[ ${AUTO_CUDA_CAPA_VER} == "1" ]]; then
echo "TF_CUDA_COMPUTE_CAPABILITIES is not set:"
echo "Using CUDA capability version from deviceQuery: ${CUDA_CAPA_VER}"
echo ""
fi
fi
elif [[ ${CTYPE} == "android" ]] || [[ ${CTYPE} == "android_full" ]]; then
:
else
die "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\
"\"${TF_BUILD_CONTAINER_TYPE}\""
fi
# Determine if this is a benchmarks job
RUN_BENCHMARKS=0
if [[ ! -z "${TF_BUILD_RUN_BENCHMARKS}" ]] &&
[[ "${TF_BUILD_RUN_BENCHMARKS}" != "0" ]]; then
RUN_BENCHMARKS=1
fi
# Process Bazel "-c opt" flag
if [[ -z "${TF_BUILD_OPTIONS}" ]]; then
if [[ ${TF_BUILD_IS_OPT} == "no_opt" ]]; then
# PIP builds are done only with the -c opt flag
if [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
echo "Skipping parameter combination: ${TF_BUILD_IS_OPT} & "\
"${TF_BUILD_IS_PIP}"
exit 0
fi
elif [[ ${TF_BUILD_IS_OPT} == "opt" ]]; then
OPT_FLAG="${OPT_FLAG} -c opt"
else
die "Unrecognized value in TF_BUILD_IS_OPT: \"${TF_BUILD_IS_OPT}\""
fi
# Process MAVX option
if [[ ! -z "${TF_BUILD_MAVX}" ]]; then
if [[ "${TF_BUILD_MAVX}" == "mavx" ]]; then
OPT_FLAG="${OPT_FLAG} --copt=-mavx"
elif [[ "${TF_BUILD_MAVX}" == "mavx2" ]]; then
OPT_FLAG="${OPT_FLAG} --copt=-mavx2"
else
die "Unsupported value in TF_BUILD_MAVX: ${TF_BUILD_MAVX}"
fi
fi
else
case $TF_BUILD_OPTIONS in
FASTBUILD)
echo "Running FASTBUILD mode (noopt, nodbg)."
;;
OPT)
OPT_FLAG="${OPT_FLAG} -c opt"
;;
OPTDBG)
OPT_FLAG="${OPT_FLAG} -c opt --copt=-g"
;;
MAVX)
OPT_FLAG="${OPT_FLAG} -c opt --copt=-mavx"
;;
MAVX_DBG)
OPT_FLAG="${OPT_FLAG} -c opt --copt=-g --copt=-mavx"
;;
MAVX2_FMA)
OPT_FLAG="${OPT_FLAG} -c opt --copt=-mavx2 --copt=-mfma"
;;
MAVX2_FMA_DBG)
OPT_FLAG="${OPT_FLAG} -c opt --copt=-g --copt=-mavx2 --copt=-mfma"
;;
esac
fi
# Strip whitespaces from OPT_FLAG
OPT_FLAG=$(str_strip "${OPT_FLAG}")
# 1) Filter out benchmark tests if this is not a benchmarks job;
# 2) Filter out tests with the "nomac" tag if the build is on Mac OS X.
EXTRA_ARGS=""
IS_MAC=0
if [[ "$(uname)" == "Darwin" ]]; then
IS_MAC=1
fi
if [[ "${TF_BUILD_APPEND_ARGUMENTS}" == *"--test_tag_filters="* ]]; then
ITEMS=(${TF_BUILD_APPEND_ARGUMENTS})
for ITEM in "${ITEMS[@]}"; do
if [[ ${ITEM} == *"--test_tag_filters="* ]]; then
NEW_ITEM="${ITEM}"
if [[ ${NEW_ITEM} != *"benchmark-test"* ]]; then
NEW_ITEM="${NEW_ITEM},-benchmark-test"
fi
if [[ ${IS_MAC} == "1" ]] && [[ ${NEW_ITEM} != *"nomac"* ]]; then
NEW_ITEM="${NEW_ITEM},-nomac"
fi
EXTRA_ARGS="${EXTRA_ARGS} ${NEW_ITEM}"
else
EXTRA_ARGS="${EXTRA_ARGS} ${ITEM}"
fi
done
else
EXTRA_ARGS="${TF_BUILD_APPEND_ARGUMENTS} --test_tag_filters=-no_oss,-oss_serial,-benchmark-test"
if [[ ${IS_MAC} == "1" ]]; then
EXTRA_ARGS="${EXTRA_ARGS},-nomac"
fi
fi
# For any "tool" dependencies in genrules, Bazel will build them for host
# instead of the target configuration. We can save some build time by setting
# this flag, and it only affects a few tests.
EXTRA_ARGS="${EXTRA_ARGS} --distinct_host_configuration=false"
# Process PIP install-test option
if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
[[ ${TF_BUILD_IS_PIP} == "both" ]]; then
# Process optional bazel target override
if [[ ! -z "${TF_BUILD_BAZEL_TARGET}" ]]; then
BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}
fi
if [[ ${CTYPE} == "cpu" ]] || \
[[ ${CTYPE} == "debian.jessie.cpu" ]]; then
# CPU only command, fully parallel.
NO_PIP_MAIN_CMD="${MAIN_CMD} ${BAZEL_CMD} ${OPT_FLAG} ${EXTRA_ARGS} -- "\
"${BAZEL_TARGET}"
elif [[ ${CTYPE} == "gpu" ]] || [[ ${CTYPE} == "gpu_clang" ]]; then
# GPU only command, run as many jobs as the GPU count only.
NO_PIP_MAIN_CMD="${BAZEL_CMD} ${OPT_FLAG} "\
"--local_test_jobs=${TF_GPU_COUNT} "\
"--run_under=${PARALLEL_GPU_TEST_CMD} ${EXTRA_ARGS} -- ${BAZEL_TARGET}"
elif [[ ${CTYPE} == "android" ]]; then
# Run android specific script for android build.
NO_PIP_MAIN_CMD="${ANDROID_CMD} ${OPT_FLAG} "
elif [[ ${CTYPE} == "android_full" ]]; then
# Run android specific script for full android build.
NO_PIP_MAIN_CMD="${ANDROID_FULL_CMD} ${OPT_FLAG} "
fi
fi
if [[ ${TF_BUILD_IS_PIP} == "pip" ]] ||
[[ ${TF_BUILD_IS_PIP} == "both" ]]; then
# Android builds conflict with PIP builds
if [[ ${CTYPE} == "android" ]]; then
echo "Skipping parameter combination: ${TF_BUILD_IS_PIP} & "\
"${TF_BUILD_CONTAINER_TYPE}"
exit 0
fi
PIP_MAIN_CMD="${MAIN_CMD} ${PIP_CMD} ${CTYPE} ${EXTRA_ARGS} ${OPT_FLAG}"
# Add flag for integration tests
if [[ ! -z "${TF_BUILD_INTEGRATION_TESTS}" ]] &&
[[ "${TF_BUILD_INTEGRATION_TESTS}" != "0" ]]; then
PIP_MAIN_CMD="${PIP_MAIN_CMD} ${PIP_INTEGRATION_TESTS_FLAG}"
fi
# Add command for tutorial test
if [[ ! -z "${TF_BUILD_TEST_TUTORIALS}" ]] &&
[[ "${TF_BUILD_TEST_TUTORIALS}" != "0" ]]; then
PIP_MAIN_CMD="${PIP_MAIN_CMD} ${PIP_TEST_TUTORIALS_FLAG}"
# Prepare data directory for tutorial tests
mkdir -p "${TUT_TEST_DATA_DIR}" ||
die "FAILED to create data directory for tutorial tests: "\
"${TUT_TEST_DATA_DIR}"
if [[ "${DO_DOCKER}" == "1" ]]; then
EXTRA_PARAMS="${EXTRA_PARAMS} -v ${TUT_TEST_DATA_DIR}:${TUT_TEST_DATA_DIR}"
fi
fi
fi
if [[ ${RUN_BENCHMARKS} == "1" ]]; then
MAIN_CMD="${BENCHMARK_CMD} ${OPT_FLAG}"
elif [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
MAIN_CMD="${NO_PIP_MAIN_CMD}"
elif [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
MAIN_CMD="${PIP_MAIN_CMD}"
elif [[ ${TF_BUILD_IS_PIP} == "both" ]]; then
MAIN_CMD="${NO_PIP_MAIN_CMD} && ${PIP_MAIN_CMD}"
else
die "Unrecognized value in TF_BUILD_IS_PIP: \"${TF_BUILD_IS_PIP}\""
fi
# Process Python version
if [[ ${TF_BUILD_PYTHON_VERSION} == "python2" ]]; then
:
elif [[ ${TF_BUILD_PYTHON_VERSION} == "python3" || \
${TF_BUILD_PYTHON_VERSION} == "python3.4" || \
${TF_BUILD_PYTHON_VERSION} == "python3.5" || \
${TF_BUILD_PYTHON_VERSION} == "python3.6" ]]; then
# Supply proper environment variable to select Python 3
if [[ "${DO_DOCKER}" == "1" ]]; then
EXTRA_PARAMS="${EXTRA_PARAMS} -e CI_BUILD_PYTHON=${TF_BUILD_PYTHON_VERSION}"
else
# Determine the path to python3
PYTHON3_PATH=$(which "${TF_BUILD_PYTHON_VERSION}" | head -1)
if [[ -z "${PYTHON3_PATH}" ]]; then
die "ERROR: Failed to locate ${TF_BUILD_PYTHON_VERSION} binary on path"
else
echo "Found ${TF_BUILD_PYTHON_VERSION} binary at: ${PYTHON3_PATH}"
fi
export PYTHON_BIN_PATH="${PYTHON3_PATH}"
fi
else
die "Unrecognized value in TF_BUILD_PYTHON_VERSION: "\
"\"${TF_BUILD_PYTHON_VERSION}\""
fi
# Append additional Docker extra parameters
EXTRA_PARAMS="${EXTRA_PARAMS} ${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}"
# Finally, do a dry run or call the command
# The command, which may consist of multiple parts (e.g., in the case of
# TF_BUILD_SERIAL_TESTS=1), are written to a bash script, which is
# then called. The name of the script is randomized to make concurrent
# builds on the node possible.
TMP_SCRIPT="$(mktemp)_ci_parameterized_build.sh"
if [[ "${DO_DOCKER}" == "1" ]]; then
# Map the tmp script into the Docker container
EXTRA_PARAMS="${EXTRA_PARAMS} -v ${TMP_SCRIPT}:/tmp/tf_build.sh"
if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
[[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]] &&
[[ "${TF_BUILD_IS_PIP}" != "both" ]]; then
# For TF_BUILD_IS_PIP == both, "bazel clean" will have already
# been performed before the "bazel test" step
EXTRA_PARAMS="${EXTRA_PARAMS} -e TF_BUILD_BAZEL_CLEAN=1"
fi
EXTRA_PARAMS=$(str_strip "${EXTRA_PARAMS}")
echo "Exporting CI_DOCKER_EXTRA_PARAMS: ${EXTRA_PARAMS}"
export CI_DOCKER_EXTRA_PARAMS="${EXTRA_PARAMS}"
fi
# Write to the tmp script
echo "#!/usr/bin/env bash" > ${TMP_SCRIPT}
if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
[[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then
echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT}
fi
echo ${MAIN_CMD} >> ${TMP_SCRIPT}
echo "Executing final command (${TMP_SCRIPT})..."
echo "=========================================="
cat ${TMP_SCRIPT}
echo "=========================================="
echo ""
TMP_DIR=""
DOCKERFILE_FLAG=""
if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]]; then
# Modify Dockerfile for Python3.5 build
TMP_DIR=$(mktemp -d)
echo "Docker build will occur in temporary directory: ${TMP_DIR}"
# Copy the files required for the docker build
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cp -r "${SCRIPT_DIR}/install" "${TMP_DIR}/install" || \
die "ERROR: Failed to copy directory ${SCRIPT_DIR}/install"
DOCKERFILE="${SCRIPT_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
cp "${DOCKERFILE}" "${TMP_DIR}/" || \
die "ERROR: Failed to copy Dockerfile at ${DOCKERFILE}"
DOCKERFILE="${TMP_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
# Replace a line in the Dockerfile
if sed -i \
's/RUN \/install\/install_pip_packages.sh/RUN \/install\/install_python3.5_pip_packages.sh/g' \
"${DOCKERFILE}"
then
echo "Copied and modified Dockerfile for Python 3.5 build: ${DOCKERFILE}"
else
die "ERROR: Faild to copy and modify Dockerfile: ${DOCKERFILE}"
fi
DOCKERFILE_FLAG="--dockerfile ${DOCKERFILE}"
fi
chmod +x ${TMP_SCRIPT}
# Map TF_BUILD container types to containers we actually have.
if [[ "${CTYPE}" == "android_full" ]]; then
CONTAINER="android"
else
CONTAINER=${CTYPE}
fi
FAILURE=0
if [[ ! -z "${TF_BUILD_DRY_RUN}" ]] && [[ ${TF_BUILD_DRY_RUN} != "0" ]]; then
# Do a dry run: just print the final command
echo "*** This is a DRY RUN ***"
else
# Actually run the command
if [[ "${DO_DOCKER}" == "1" ]]; then
${DOCKER_MAIN_CMD} ${CONTAINER} ${DOCKERFILE_FLAG} /tmp/tf_build.sh
else
${TMP_SCRIPT}
fi
if [[ $? != "0" ]]; then
FAILURE=1
fi
fi
[[ ${FAILURE} == "0" ]] && RESULT="SUCCESS" || RESULT="FAILURE"
rm -f ${TMP_SCRIPT}
END_TIME=$(date +'%s')
echo ""
echo "Parameterized build ends with ${RESULT} at: $(date) "\
"(Elapsed time: $((END_TIME - START_TIME)) s)"
# Clean up temporary directory if it exists
if [[ ! -z "${TMP_DIR}" ]]; then
echo "Cleaning up temporary directory: ${TMP_DIR}"
rm -rf "${TMP_DIR}"
fi
exit ${FAILURE}
|
ishay2b/tensorflow
|
tensorflow/tools/ci_build/ci_parameterized_build.sh
|
Shell
|
apache-2.0
| 20,785 |
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <[email protected]>
TESTTYPE="network"
. ${KSTESTDIR}/functions.sh
|
marmarek/anaconda
|
tests/kickstart_tests/vlan.sh
|
Shell
|
gpl-2.0
| 1,061 |
#!/bin/bash
mvn archetype:generate -DarchetypeGroupId=com.vaadin -DarchetypeArtifactId=vaadin-archetype-application -DarchetypeVersion=7.3.5
|
unicesi/QD-SPL
|
ToolSupport/co.edu.icesi.shift.generator/files/vaadinProject.sh
|
Shell
|
lgpl-3.0
| 140 |
#!/bin/sh
# check in case a user was using this mechanism
if [ "x$ES_CLASSPATH" != "x" ]; then
cat >&2 << EOF
Error: Don't modify the classpath with ES_CLASSPATH. Best is to add
additional elements via the plugin mechanism, or if code must really be
added to the main classpath, add jars to lib/ (unsupported).
EOF
exit 1
fi
ES_CLASSPATH="$ES_HOME/lib/elasticsearch-2.2.0.jar:$ES_HOME/lib/*"
if [ "x$ES_MIN_MEM" = "x" ]; then
ES_MIN_MEM=8g
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
ES_MAX_MEM=8g
fi
if [ "x$ES_HEAP_SIZE" != "x" ]; then
ES_MIN_MEM=$ES_HEAP_SIZE
ES_MAX_MEM=$ES_HEAP_SIZE
fi
# min and max heap sizes should be set to the same value to avoid
# stop-the-world GC pauses during resize, and so that we can lock the
# heap in memory on startup to prevent any of it from being swapped
# out.
JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
# new generation
if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
fi
# max direct memory
if [ "x$ES_DIRECT_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
fi
# set to headless, just in case
JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
# Force the JVM to use IPv4 stack
if [ "x$ES_USE_IPV4" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
fi
# Add gc options. ES_GC_OPTS is unsupported, for internal testing
if [ "x$ES_GC_OPTS" = "x" ]; then
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseParNewGC"
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseConcMarkSweepGC"
ES_GC_OPTS="$ES_GC_OPTS -XX:CMSInitiatingOccupancyFraction=75"
ES_GC_OPTS="$ES_GC_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
fi
JAVA_OPTS="$JAVA_OPTS $ES_GC_OPTS"
# GC logging options
if [ -n "$ES_GC_LOG_FILE" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDateStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
JAVA_OPTS="$JAVA_OPTS -Xloggc:$ES_GC_LOG_FILE"
# Ensure that the directory for the log file exists: the JVM will not create it.
mkdir -p "`dirname \"$ES_GC_LOG_FILE\"`"
fi
# Causes the JVM to dump its heap on OutOfMemory.
JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
# The path to the heap dump location, note directory must exists and have enough
# space for a full heap dump.
#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
# Disables explicit GC
JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC"
# Ensure UTF-8 encoding by default (e.g. filenames)
JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8"
# Use our provided JNA always versus the system one
JAVA_OPTS="$JAVA_OPTS -Djna.nosys=true"
|
atgreen/GreenLab-Maintenance
|
roles/elasticsearch/files/elasticsearch.in.sh
|
Shell
|
apache-2.0
| 2,812 |
#!/bin/bash
usage()
{
cat << EOF
usage: $0 options
This script adds a header to all files matching the provided pattern in the given directory
OPTIONS:
-h Show this message
-l license file
-d directory to search
-p file pattern to match
EOF
}
license=
pattern=
directory=
while getopts "hl:p:d:" OPTION
do
case $OPTION in
h)
usage
exit 1
;;
l)
license=$OPTARG
;;
p)
pattern=$OPTARG
;;
d)
directory=$OPTARG
;;
?)
usage
exit
;;
esac
done
echo "Prepending ${license} to files with pattern ${pattern} in directory ${directory}"
for i in ${directory}/${pattern}
do
if ! grep -q Copyright $i
then
cat ${license} $i >$i.new && mv $i.new $i
fi
done
|
fw1121/Pandoras-Toolbox-for-Bioinformatics
|
src/sailfish/scripts/AddHeaders.sh
|
Shell
|
gpl-3.0
| 901 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/sh -Eux
# Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR
trap founderror 1 2 3 15 ERR
founderror()
{
exit 1
}
exitscript()
{
#remove lock file
#rm $lockfile
exit 0
}
apt-get -y update
apt-get install -y wget git gcc g++ zlib1g-dev zlib1g make curl screen vim wget git
mkdir -p /opt/github/edenhill
chmod a+rw -R /opt/github/edenhill
cd /opt/github/edenhill
git clone https://github.com/edenhill/kafkacat.git
cd /opt/github/edenhill/kafkacat
./bootstrap.sh
cp kafkacat /usr/local/bin/
exitscript
|
elodina/go-kafka
|
vagrant/kafkacat.sh
|
Shell
|
apache-2.0
| 1,338 |
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
set -e
TEST_DESCRIPTION="https://github.com/systemd/systemd/issues/1981"
TEST_NO_QEMU=1
. $TEST_BASE_DIR/test-functions
NSPAWN_TIMEOUT=30s
test_setup() {
create_empty_image
mkdir -p $TESTDIR/root
mount ${LOOPDEV}p1 $TESTDIR/root
# Create what will eventually be our root filesystem onto an overlay
(
LOG_LEVEL=5
eval $(udevadm info --export --query=env --name=${LOOPDEV}p2)
setup_basic_environment
# setup the testsuite service
cat >$initdir/etc/systemd/system/testsuite.service <<EOF
[Unit]
Description=Testsuite service
After=multi-user.target
[Service]
ExecStart=/test-segfault.sh
Type=oneshot
EOF
cp test-segfault.sh $initdir/
setup_testsuite
) || return 1
setup_nspawn_root
ddebug "umount $TESTDIR/root"
umount $TESTDIR/root
}
do_test "$@"
|
utezduyar/systemd
|
test/TEST-07-ISSUE-1981/test.sh
|
Shell
|
gpl-2.0
| 970 |
#!/bin/sh
#
#.Distributed under the terms of the GNU General Public License (GPL) version 2.0
#
# script for sending updates to cloudflare.com
#.based on Ben Kulbertis cloudflare-update-record.sh found at http://gist.github.com/benkulbertis
#.and on George Johnson's cf-ddns.sh found at https://github.com/gstuartj/cf-ddns.sh
#.2016-2018 Christian Schoenebeck <christian dot schoenebeck at gmail dot com>
# CloudFlare API documentation at https://api.cloudflare.com/
#
# This script is parsed by dynamic_dns_functions.sh inside send_update() function
#
# using following options from /etc/config/ddns
# option username - your cloudflare e-mail
# option password - cloudflare api key, you can get it from cloudflare.com/my-account/
# option domain - "[email protected]" # syntax changed to remove split_FQDN() function and tld_names.dat.gz
#
# The proxy status would not be changed by this script. Please change it in Cloudflare dashboard manually.
#
# variable __IP already defined with the ip-address to use for update
#
# check parameters
[ -z "$CURL" ] && [ -z "$CURL_SSL" ] && write_log 14 "Cloudflare communication require cURL with SSL support. Please install"
[ -z "$username" ] && write_log 14 "Service section not configured correctly! Missing key as 'username'"
[ -z "$password" ] && write_log 14 "Service section not configured correctly! Missing secret as 'password'"
[ $use_https -eq 0 ] && use_https=1 # force HTTPS
# used variables
local __HOST __DOMAIN __TYPE __URLBASE __PRGBASE __RUNPROG __DATA __IPV6 __ZONEID __RECID __PROXIED
local __URLBASE="https://api.cloudflare.com/client/v4"
# split __HOST __DOMAIN from $domain
# given data:
# @example.com for "domain record"
# [email protected] for a "host record"
__HOST=$(printf %s "$domain" | cut -d@ -f1)
__DOMAIN=$(printf %s "$domain" | cut -d@ -f2)
# Cloudflare v4 needs:
# __DOMAIN = the base domain i.e. example.com
# __HOST = the FQDN of record to modify
# i.e. example.com for the "domain record" or host.sub.example.com for "host record"
# handling domain record then set __HOST = __DOMAIN
[ -z "$__HOST" ] && __HOST=$__DOMAIN
# handling host record then rebuild fqdn [email protected] => host.domain.tld
[ "$__HOST" != "$__DOMAIN" ] && __HOST="${__HOST}.${__DOMAIN}"
# set record type
[ $use_ipv6 -eq 0 ] && __TYPE="A" || __TYPE="AAAA"
# transfer function to use for godaddy
# all needed variables are set global here
# so we can use them directly
cloudflare_transfer() {
local __CNT=0
local __ERR
while : ; do
write_log 7 "#> $__RUNPROG"
eval "$__RUNPROG"
__ERR=$? # save communication error
[ $__ERR -eq 0 ] && break # no error break while
write_log 3 "cURL Error: '$__ERR'"
write_log 7 "$(cat $ERRFILE)" # report error
[ $VERBOSE_MODE -gt 1 ] && {
# VERBOSE_MODE > 1 then NO retry
write_log 4 "Transfer failed - Verbose Mode: $VERBOSE_MODE - NO retry on error"
break
}
__CNT=$(( $__CNT + 1 )) # increment error counter
# if error count > retry_count leave here
[ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
write_log 14 "Transfer failed after $retry_count retries"
write_log 4 "Transfer failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
PID_SLEEP=0
done
# check for error
grep -q '"success":true' $DATFILE || {
write_log 4 "CloudFlare reported an error:"
write_log 7 "$(cat $DATFILE)" # report error
return 1 # HTTP-Fehler
}
}
# Build base command to use
__PRGBASE="$CURL -RsS -o $DATFILE --stderr $ERRFILE"
# force network/interface-device to use for communication
if [ -n "$bind_network" ]; then
local __DEVICE
network_get_physdev __DEVICE $bind_network || \
write_log 13 "Can not detect local device using 'network_get_physdev $bind_network' - Error: '$?'"
write_log 7 "Force communication via device '$__DEVICE'"
__PRGBASE="$__PRGBASE --interface $__DEVICE"
fi
# force ip version to use
if [ $force_ipversion -eq 1 ]; then
[ $use_ipv6 -eq 0 ] && __PRGBASE="$__PRGBASE -4" || __PRGBASE="$__PRGBASE -6" # force IPv4/IPv6
fi
# set certificate parameters
if [ "$cacert" = "IGNORE" ]; then # idea from Ticket #15327 to ignore server cert
__PRGBASE="$__PRGBASE --insecure" # but not empty better to use "IGNORE"
elif [ -f "$cacert" ]; then
__PRGBASE="$__PRGBASE --cacert $cacert"
elif [ -d "$cacert" ]; then
__PRGBASE="$__PRGBASE --capath $cacert"
elif [ -n "$cacert" ]; then # it's not a file and not a directory but given
write_log 14 "No valid certificate(s) found at '$cacert' for HTTPS communication"
fi
# disable proxy if not set (there might be .wgetrc or .curlrc or wrong environment set)
# or check if libcurl compiled with proxy support
if [ -z "$proxy" ]; then
__PRGBASE="$__PRGBASE --noproxy '*'"
elif [ -z "$CURL_PROXY" ]; then
# if libcurl has no proxy support and proxy should be used then force ERROR
write_log 13 "cURL: libcurl compiled without Proxy support"
fi
# set headers
__PRGBASE="$__PRGBASE --header 'X-Auth-Email: $username' "
__PRGBASE="$__PRGBASE --header 'X-Auth-Key: $password' "
__PRGBASE="$__PRGBASE --header 'Content-Type: application/json' "
# __PRGBASE="$__PRGBASE --header 'Accept: application/json' "
# read zone id for registered domain.TLD
__RUNPROG="$__PRGBASE --request GET '$__URLBASE/zones?name=$__DOMAIN'"
cloudflare_transfer || return 1
# extract zone id
__ZONEID=$(grep -o '"id":"[^"]*' $DATFILE | grep -o '[^"]*$' | head -1)
[ -z "$__ZONEID" ] && {
write_log 4 "Could not detect 'zone id' for domain.tld: '$__DOMAIN'"
return 127
}
# read record id for A or AAAA record of host.domain.TLD
__RUNPROG="$__PRGBASE --request GET '$__URLBASE/zones/$__ZONEID/dns_records?name=$__HOST&type=$__TYPE'"
cloudflare_transfer || return 1
# extract record id
__RECID=$(grep -o '"id":"[^"]*' $DATFILE | grep -o '[^"]*$' | head -1)
[ -z "$__RECID" ] && {
write_log 4 "Could not detect 'record id' for host.domain.tld: '$__HOST'"
return 127
}
# extract current stored IP
__DATA=$(grep -o '"content":"[^"]*' $DATFILE | grep -o '[^"]*$' | head -1)
# check data
[ $use_ipv6 -eq 0 ] \
&& __DATA=$(printf "%s" "$__DATA" | grep -m 1 -o "$IPV4_REGEX") \
|| __DATA=$(printf "%s" "$__DATA" | grep -m 1 -o "$IPV6_REGEX")
# we got data so verify
[ -n "$__DATA" ] && {
# expand IPv6 for compare
if [ $use_ipv6 -eq 1 ]; then
expand_ipv6 $__IP __IPV6
expand_ipv6 $__DATA __DATA
[ "$__DATA" = "$__IPV6" ] && { # IPv6 no update needed
write_log 7 "IPv6 at CloudFlare.com already up to date"
return 0
}
else
[ "$__DATA" = "$__IP" ] && { # IPv4 no update needed
write_log 7 "IPv4 at CloudFlare.com already up to date"
return 0
}
fi
}
# update is needed
# let's build data to send
# set proxied parameter
__PROXIED=$(grep -o '"proxied":[^",]*' $DATFILE | grep -o '[^:]*$')
# use file to work around " needed for json
cat > $DATFILE << EOF
{"id":"$__ZONEID","type":"$__TYPE","name":"$__HOST","content":"$__IP","proxied":$__PROXIED}
EOF
# let's complete transfer command
__RUNPROG="$__PRGBASE --request PUT --data @$DATFILE '$__URLBASE/zones/$__ZONEID/dns_records/$__RECID'"
cloudflare_transfer || return 1
return 0
|
InkblotAdmirer/packages
|
net/ddns-scripts/files/update_cloudflare_com_v4.sh
|
Shell
|
gpl-2.0
| 7,145 |
#!/bin/bash -euo
# Add workaround for SSH-based Git connections from Rust/cargo. See https://github.com/rust-lang/cargo/issues/2078 for details.
# We set CARGO_HOME because we don't pass on HOME to conda-build, thus rendering the default "${HOME}/.cargo" defunct.
export CARGO_NET_GIT_FETCH_WITH_CLI=true CARGO_HOME="$(pwd)/.cargo"
RUST_BACKTRACE=1 cargo install --verbose --root $PREFIX
|
phac-nml/bioconda-recipes
|
recipes/fpa/v0.4/build.sh
|
Shell
|
mit
| 391 |
#!/bin/bash
TIMEOUT=$1
DREAL=$2
MAX=$3
RESULTDIR=$4
MAX_QUEUE=50000
TODO=${RESULTDIR}/TODO
SMT_QUEUE=${RESULTDIR}/SMT_QUEUE
CHECK_QUEUE=${RESULTDIR}/CHECK_QUEUE
NOT_PROVED_YET=${RESULTDIR}/NOT_YET
PROVED=${RESULTDIR}/PROVED
WRONG=${RESULTDIR}/WRONG
START_TIME=${RESULTDIR}/START_TIME
END_TIME=${RESULTDIR}/END_TIME
PROOFCHECK_PATH=`dirname $0`
RUN_DREAL=${PROOFCHECK_PATH}/run_dreal.sh
PCHECKER=${PROOFCHECK_PATH}/checker/main.native
SPLIT=${PROOFCHECK_PATH}/split.py
function log_msg {
echo -n "`date`: "
printf "[%-30s]: " `basename $1`
echo $2
}
touch $NOT_PROVED_YET
touch $TODO
date +%s%N | cut -b1-13 > $START_TIME
DEADLINE="`cat $START_TIME` + ( $TIMEOUT * 1000 )"
DEADLINE=`echo "$DEADLINE" | bc`
while [ -f $TODO ]
do
# CHECK THE TIME, TERMINATE IF TIMEOUT
CURTIME=`date +%s%N | cut -b1-13`
if [ "$CURTIME" -gt "$DEADLINE" ]
then
log_msg ${BASE}.smt2 "ProofChecking: Timeout"
touch ${RESULTDIR}/TIMEOUT
exit 1
fi
# INITIALIZE QUEUES
rm -rf $SMT_QUEUE $CHECK_QUEUE
touch $SMT_QUEUE $CHECK_QUEUE
# TODO will be re-created if we need more things TO DO
rm $TODO
# Find SMT2 files and add to SMT QUEUE
# 1) Do not have the result, yet
for SMT2 in `find $RESULTDIR -name "*.smt2"`
do
BASE=${SMT2/%.smt2/}
if [ ! -f $BASE.result ]
then
echo $BASE >> $SMT_QUEUE
log_msg ${BASE}.smt2 "ProofChecking: Adding to the SMT Queue"
fi
done
if [ "`wc $SMT_QUEUE -l | cut -d ' ' -f 1`" -gt $MAX_QUEUE ]
then
log_msg ${BASE}.smt2 "ProofChecking: Too many files"
touch ${RESULTDIR}/TOOMANY
exit 2
fi
# RUN in Parallel: dReal2 to generate results (.result, .time, .trace)
if [ -s $SMT_QUEUE ]
then
log_msg $SMT_QUEUE "ProofChecking: Run dReal"
cat $SMT_QUEUE | parallel --max-procs=$MAX "$RUN_DREAL {}.smt2 $RESULTDIR $DREAL $TIMEOUT"
fi
# RUN: split.py
if [ -s $SMT_QUEUE ]
then
log_msg $SMT_QUEUE "ProofChecking: Split Traces"
cat $SMT_QUEUE | parallel --max-procs=$MAX "$SPLIT {}.trace"
fi
# Find trace files and add to CHECK QUEUE
for TRACE in `find $RESULTDIR -name "*.trace"`
do
BASE=${TRACE/%.trace/}
if [ ! -f $BASE.checked ]
then
echo $BASE >> $CHECK_QUEUE
log_msg ${BASE}.trace "ProofChecking: Adding to the Check Queue"
fi
done
# RUN in Parallel: proof_checker to generate result (possibly sub_problems)
if [ -s $CHECK_QUEUE ]
then
log_msg $CHECK_QUEUE "ProofChecking: Check traces in the queue."
cat $CHECK_QUEUE | parallel --max-procs=$MAX "$PCHECKER {}.trace > {}.check_stat"
for ID in `cat $CHECK_QUEUE`
do
if grep -q "Failed Axioms" $ID.check_stat
then
touch $ID.checked
# Check was run.
if grep -q "Failed Axioms #: 0" $ID.check_stat
then
touch $ID.trace.PROVED
else
touch $ID.trace.not_proved
fi
else
# Check was not run properly. ABORT!
touch $ID.trace.WRONG
touch $WRONG
date +%s%N | cut -b1-13 > $END_TIME
exit 2
fi
done
touch $TODO # We may need to have more things TO DO
fi
done
rm $NOT_PROVED_YET
touch $PROVED
date +%s%N | cut -b1-13 > $END_TIME
exit 0
|
dreal/dreal-next
|
tools/proofcheck/loop.sh
|
Shell
|
gpl-3.0
| 3,525 |
#!/bin/bash -xe
. $(dirname $0)/common.sh
if [ ! -f ${project_tag}_ssh_config ]; then
python -m jiocloud.apply_resources ssh_config --project_tag=${project_tag} ${mappings_arg} environment/${layout:-full}.yaml > ${project_tag}_ssh_config
fi
ssh -F ${project_tag}_ssh_config -l ${ssh_user:-jenkins} gcp1_${project_tag} '~jenkins/tempest/run_tempest.sh -N -- --load-list ~jenkins/tempest_tests.txt'
exit 0
|
rohit-k/puppet-rjil
|
build_scripts/test.sh
|
Shell
|
apache-2.0
| 411 |
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
TEST_DESCRIPTION="Resource limits-related tests"
. $TEST_BASE_DIR/test-functions
check_result_qemu() {
ret=1
mkdir -p $TESTDIR/root
mount ${LOOPDEV}p1 $TESTDIR/root
[[ -e $TESTDIR/root/testok ]] && ret=0
[[ -f $TESTDIR/root/failed ]] && cp -a $TESTDIR/root/failed $TESTDIR
cp -a $TESTDIR/root/var/log/journal $TESTDIR
umount $TESTDIR/root
[[ -f $TESTDIR/failed ]] && cat $TESTDIR/failed
ls -l $TESTDIR/journal/*/*.journal
test -s $TESTDIR/failed && ret=$(($ret+1))
return $ret
}
test_run() {
if run_qemu; then
check_result_qemu || return 1
else
dwarn "can't run QEMU, skipping"
fi
if run_nspawn; then
check_result_nspawn || return 1
else
dwarn "can't run systemd-nspawn, skipping"
fi
return 0
}
test_setup() {
create_empty_image
mkdir -p $TESTDIR/root
mount ${LOOPDEV}p1 $TESTDIR/root
# Create what will eventually be our root filesystem onto an overlay
(
LOG_LEVEL=5
eval $(udevadm info --export --query=env --name=${LOOPDEV}p2)
setup_basic_environment
cat >$initdir/etc/systemd/system.conf <<EOF
[Manager]
DefaultLimitNOFILE=10000:16384
EOF
# setup the testsuite service
cat >$initdir/etc/systemd/system/testsuite.service <<EOF
[Unit]
Description=Testsuite service
After=multi-user.target
[Service]
ExecStart=/test-rlimits.sh
Type=oneshot
EOF
cp test-rlimits.sh $initdir/
setup_testsuite
) || return 1
setup_nspawn_root
ddebug "umount $TESTDIR/root"
umount $TESTDIR/root
}
test_cleanup() {
umount $TESTDIR/root 2>/dev/null
[[ $LOOPDEV ]] && losetup -d $LOOPDEV
return 0
}
do_test "$@"
|
boucman/systemd
|
test/TEST-05-RLIMITS/test.sh
|
Shell
|
gpl-2.0
| 1,839 |
#!/usr/bin/env bash
# postfix-wrapper.sh, version 0.1.0
#
# You cannot start postfix in some foreground mode and
# it's more or less important that docker doesn't kill
# postfix and its chilren if you stop the container.
#
# Use this script with supervisord and it will take
# care about starting and stopping postfix correctly.
#
# supervisord config snippet for postfix-wrapper:
#
# [program:postfix]
# process_name = postfix
# command = /path/to/postfix-wrapper.sh
# startsecs = 0
# autorestart = false
#
# Init vars
if [[ -z "$SERVICE_POSTFIX_OPTS" ]]; then SERVICE_POSTFIX_OPTS=""; fi
source /opt/docker/bin/config.sh
trap "postfix stop" SIGINT
trap "postfix stop" SIGTERM
trap "postfix reload" SIGHUP
includeScriptDir "/opt/docker/bin/service.d/postfix.d/"
# start postfix
postfix start $SERVICE_POSTFIX_OPTS
# lets give postfix some time to start
sleep 3
# wait until postfix is dead (triggered by trap)
if [[ -f /var/spool/postfix/pid/master.pid ]]; then
while kill -0 "$(cat /var/spool/postfix/pid/master.pid 2>/dev/null)" &>/dev/null; do
sleep 5
done
fi
|
webdevops/Dockerfile
|
provisioning/base/general/bin/service.d/postfix.sh
|
Shell
|
mit
| 1,085 |
#!/bin/bash
: ${SSH_USERNAME:=user}
: ${SSH_USERPASS:=$(dd if=/dev/urandom bs=1 count=15 | base64)}
__create_rundir() {
mkdir -p /var/run/sshd
}
__create_user() {
# Create a user to SSH into as.
useradd $SSH_USERNAME
echo -e "$SSH_USERPASS\n$SSH_USERPASS" | (passwd --stdin $SSH_USERNAME)
echo ssh user password: $SSH_USERPASS
}
__create_hostkeys() {
ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ''
}
# Call all functions
__create_rundir
__create_hostkeys
__create_user
exec "$@"
|
lkundrak/Fedora-Dockerfiles
|
ssh/entrypoint.sh
|
Shell
|
gpl-2.0
| 494 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e # exit on the first error
cd $(dirname $(readlink -f $0))/../example
echo $PWD
export LD_LIBRARY_PATH=$(readlink -f ../../lib):$LD_LIBRARY_PATH
echo $LD_LIBRARY_PATH
ls -l ../../lib/
cp ../../build/cpp-package/example/test_score .
./get_mnist.sh
./test_score 0.93
|
Mega-DatA-Lab/mxnet
|
cpp-package/tests/ci_test.sh
|
Shell
|
apache-2.0
| 1,058 |
#!/usr/bin/env bash
if [[ $EUID -ne 0 ]]; then
echo "You must root" 2>&1
exit 1
fi
#update capstone
cd capstone
if [[ `git pull` != "Already up-to-date." ]]; then
git checkout b53a59af53ffbd5dbe8dbcefba41a00cf4fc7469
./make.sh
./make.sh install
uname -a | grep BSD &> /dev/null
if [ $? -eq 0 ]; then
echo 'Installing Capstone python bindings for *bsd'
rm -rf ./build
python setup.py build -b ./build install
else
make install
fi
#check if kali
uname -a | grep -i kali &> /dev/null
if [ $? -eq 0 ]; then
echo "Adding capstone path for Kali64 in /etc/ls.so.conf.d/capstone.conf"
echo "#capstone shared libs" >> /etc/ld.so.conf.d/capstone.conf
echo "/usr/lib64" >> /etc/ld.so.conf.d/capstone.conf
ldconfig
fi
else
echo "Capstone is up-to-date."
fi
# update pefile
pip install --upgrade pefile
|
sigma-random/the-backdoor-factory
|
update.sh
|
Shell
|
bsd-3-clause
| 835 |
#!/bin/sh
#
# dex2jar - Tools to work with android .dex and java .class files
# Copyright (c) 2009-2013 Panxiaobo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# copy from $Tomcat/bin/startup.sh
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
#
# call d2j_invoke.sh to setup java environment
"$PRGDIR/d2j_invoke.sh" "com.googlecode.d2j.smali.SmaliCmd" "$@"
|
kiya-z/Android
|
tools/dex2jar/d2j-smali.sh
|
Shell
|
apache-2.0
| 1,083 |
#!/bin/sh
# Copyright (c) 2010, Jens Lehmann
test_description='Recursive "git fetch" for submodules'
. ./test-lib.sh
pwd=$(pwd)
add_upstream_commit() {
(
cd submodule &&
head1=$(git rev-parse --short HEAD) &&
echo new >> subfile &&
test_tick &&
git add subfile &&
git commit -m new subfile &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/submodule" > ../expect.err &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err
) &&
(
cd deepsubmodule &&
head1=$(git rev-parse --short HEAD) &&
echo new >> deepsubfile &&
test_tick &&
git add deepsubfile &&
git commit -m new deepsubfile &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/deepsubmodule" >> ../expect.err &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err
)
}
test_expect_success setup '
mkdir deepsubmodule &&
(
cd deepsubmodule &&
git init &&
echo deepsubcontent > deepsubfile &&
git add deepsubfile &&
git commit -m new deepsubfile
) &&
mkdir submodule &&
(
cd submodule &&
git init &&
echo subcontent > subfile &&
git add subfile &&
git submodule add "$pwd/deepsubmodule" subdir/deepsubmodule &&
git commit -a -m new
) &&
git submodule add "$pwd/submodule" submodule &&
git commit -am initial &&
git clone . downstream &&
(
cd downstream &&
git submodule update --init --recursive
) &&
echo "Fetching submodule submodule" > expect.out &&
echo "Fetching submodule submodule/subdir/deepsubmodule" >> expect.out
'
test_expect_success "fetch --recurse-submodules recurses into submodules" '
add_upstream_commit &&
(
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "fetch alone only fetches superproject" '
add_upstream_commit &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "fetch --no-recurse-submodules only fetches superproject" '
(
cd downstream &&
git fetch --no-recurse-submodules >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "using fetchRecurseSubmodules=true in .gitmodules recurses into submodules" '
(
cd downstream &&
git config -f .gitmodules submodule.submodule.fetchRecurseSubmodules true &&
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "--no-recurse-submodules overrides .gitmodules config" '
add_upstream_commit &&
(
cd downstream &&
git fetch --no-recurse-submodules >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "using fetchRecurseSubmodules=false in .git/config overrides setting in .gitmodules" '
(
cd downstream &&
git config submodule.submodule.fetchRecurseSubmodules false &&
git fetch >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "--recurse-submodules overrides fetchRecurseSubmodules setting from .git/config" '
(
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err &&
git config --unset -f .gitmodules submodule.submodule.fetchRecurseSubmodules &&
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "--quiet propagates to submodules" '
(
cd downstream &&
git fetch --recurse-submodules --quiet >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "--dry-run propagates to submodules" '
add_upstream_commit &&
(
cd downstream &&
git fetch --recurse-submodules --dry-run >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "Without --dry-run propagates to submodules" '
(
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "recurseSubmodules=true propagates into submodules" '
add_upstream_commit &&
(
cd downstream &&
git config fetch.recurseSubmodules true
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "--recurse-submodules overrides config in submodule" '
add_upstream_commit &&
(
cd downstream &&
(
cd submodule &&
git config fetch.recurseSubmodules false
) &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "--no-recurse-submodules overrides config setting" '
add_upstream_commit &&
(
cd downstream &&
git config fetch.recurseSubmodules true
git fetch --no-recurse-submodules >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "Recursion doesn't happen when no new commits are fetched in the superproject" '
(
cd downstream &&
(
cd submodule &&
git config --unset fetch.recurseSubmodules
) &&
git config --unset fetch.recurseSubmodules
git fetch >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "Recursion stops when no new submodule commits are fetched" '
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
echo "Fetching submodule submodule" > expect.out.sub &&
echo "From $pwd/." > expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> expect.err.sub
head -2 expect.err >> expect.err.sub &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.err.sub actual.err &&
test_i18ncmp expect.out.sub actual.out
'
test_expect_success "Recursion doesn't happen when new superproject commits don't change any submodules" '
add_upstream_commit &&
head1=$(git rev-parse --short HEAD) &&
echo a > file &&
git add file &&
git commit -m "new file" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.file &&
echo " $head1..$head2 master -> origin/master" >> expect.err.file &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
test_i18ncmp expect.err.file actual.err
'
test_expect_success "Recursion picks up config in submodule" '
(
cd downstream &&
git fetch --recurse-submodules &&
(
cd submodule &&
git config fetch.recurseSubmodules true
)
) &&
add_upstream_commit &&
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> expect.err.sub &&
cat expect.err >> expect.err.sub &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err &&
(
cd submodule &&
git config --unset fetch.recurseSubmodules
)
) &&
test_i18ncmp expect.err.sub actual.err &&
test_i18ncmp expect.out actual.out
'
test_expect_success "Recursion picks up all submodules when necessary" '
add_upstream_commit &&
(
cd submodule &&
(
cd subdir/deepsubmodule &&
git fetch &&
git checkout -q FETCH_HEAD
) &&
head1=$(git rev-parse --short HEAD^) &&
git add subdir/deepsubmodule &&
git commit -m "new deepsubmodule"
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/submodule" > ../expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err.sub
) &&
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >> expect.err.2 &&
cat expect.err.sub >> expect.err.2 &&
tail -2 expect.err >> expect.err.2 &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.err.2 actual.err &&
test_i18ncmp expect.out actual.out
'
test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no new commits are fetched in the superproject (and ignores config)" '
add_upstream_commit &&
(
cd submodule &&
(
cd subdir/deepsubmodule &&
git fetch &&
git checkout -q FETCH_HEAD
) &&
head1=$(git rev-parse --short HEAD^) &&
git add subdir/deepsubmodule &&
git commit -m "new deepsubmodule"
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/submodule" > ../expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err.sub
) &&
(
cd downstream &&
git config fetch.recurseSubmodules true &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err &&
git config --unset fetch.recurseSubmodules
) &&
! test -s actual.out &&
! test -s actual.err
'
test_expect_success "'--recurse-submodules=on-demand' recurses as deep as necessary (and ignores config)" '
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
tail -2 expect.err > expect.err.deepsub &&
echo "From $pwd/." > expect.err &&
echo " $head1..$head2 master -> origin/master" >> expect.err
cat expect.err.sub >> expect.err &&
cat expect.err.deepsub >> expect.err &&
(
cd downstream &&
git config fetch.recurseSubmodules false &&
(
cd submodule &&
git config -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive false
) &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err &&
git config --unset fetch.recurseSubmodules
(
cd submodule &&
git config --unset -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive
)
) &&
test_i18ncmp expect.out actual.out &&
test_i18ncmp expect.err actual.err
'
test_expect_success "'--recurse-submodules=on-demand' stops when no new submodule commits are found in the superproject (and ignores config)" '
add_upstream_commit &&
head1=$(git rev-parse --short HEAD) &&
echo a >> file &&
git add file &&
git commit -m "new file" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.file &&
echo " $head1..$head2 master -> origin/master" >> expect.err.file &&
(
cd downstream &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
test_i18ncmp expect.err.file actual.err
'
test_expect_success "'fetch.recurseSubmodules=on-demand' overrides global config" '
(
cd downstream &&
git fetch --recurse-submodules
) &&
add_upstream_commit &&
git config --global fetch.recurseSubmodules false &&
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >> expect.err.2
head -2 expect.err >> expect.err.2 &&
(
cd downstream &&
git config fetch.recurseSubmodules on-demand &&
git fetch >../actual.out 2>../actual.err
) &&
git config --global --unset fetch.recurseSubmodules &&
(
cd downstream &&
git config --unset fetch.recurseSubmodules
) &&
test_i18ncmp expect.out.sub actual.out &&
test_i18ncmp expect.err.2 actual.err
'
test_expect_success "'submodule.<sub>.fetchRecurseSubmodules=on-demand' overrides fetch.recurseSubmodules" '
(
cd downstream &&
git fetch --recurse-submodules
) &&
add_upstream_commit &&
git config fetch.recurseSubmodules false &&
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >> expect.err.2
head -2 expect.err >> expect.err.2 &&
(
cd downstream &&
git config submodule.submodule.fetchRecurseSubmodules on-demand &&
git fetch >../actual.out 2>../actual.err
) &&
git config --unset fetch.recurseSubmodules &&
(
cd downstream &&
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
test_i18ncmp expect.out.sub actual.out &&
test_i18ncmp expect.err.2 actual.err
'
test_expect_success "don't fetch submodule when newly recorded commits are already present" '
(
cd submodule &&
git checkout -q HEAD^^
) &&
head1=$(git rev-parse --short HEAD) &&
git add submodule &&
git commit -m "submodule rewound" &&
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err &&
echo " $head1..$head2 master -> origin/master" >> expect.err &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
! test -s actual.out &&
test_i18ncmp expect.err actual.err
'
test_done
|
overtherain/scriptfile
|
tool-kit/git-2.1.2/t/t5526-fetch-submodules.sh
|
Shell
|
mit
| 12,944 |
echo "DataNode #4"
./hhdfs datanode 127.0.0.1 44452 127.0.0.1:44444:0
|
Ferdinand-vW/HHDFS
|
test/DataNode#4/new.sh
|
Shell
|
isc
| 70 |
#!/bin/bash
#
# UploadToOPUS.sh
#
# Copyright 2011 Francisco Hernandez <francisco@francisco-laptop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# This script uses RUBY to upload all of the OBS files in the current directory to OPUS (Static)
# Variables
email=$1 #E-mail where data will be sent
height=$2 #Height of the antena
counter=0
counter1=0
percent=0
cd $(dirname "${0}")
java -jar ./selenium-server.jar -singleWindow -browserSessionReuse &
sleep 20s
echo "This script uses RUBY to upload all of the OBS files in the current directory to OPUS (Static)
"
echo "Now changing directory to " $(dirname "${0}")"../updata"
echo ""
cd ../updata
fullpath=$(pwd)
for file in *.[1890][890]o
do
counter=$(expr $counter + 1)
done
for file in *.[1890][890]o
do
echo "Now uploading $file"
../scripts/UploadToOPUS.rb $email "$fullpath/$file" $height
counter1=$(expr $counter1 + 1)
percent=$(expr $counter1/$counter)
echo "Done uploading $file: $percent done!
"
echo "Now downloading e-mail
"
../scripts/DownloadFromOPUS.rb $server $user $pass $port >> $DataMail
mv $file ./old/$file
done
killall java
exit 0
|
Fran89/OPUS-Upload
|
scripts/UploadtoOPUS_RT.sh
|
Shell
|
mit
| 1,903 |
clang -c -fpic -O2 ring_pgsql.c -I $PWD/../../language/include -I /usr/local/include -I /usr/local/opt/libpq/include
clang -dynamiclib -o $PWD/../../lib/libring_pgsql.dylib ring_pgsql.o -L $PWD/../../lib -lring -L /usr/local/opt/libpq/lib -lpq
|
ring-lang/ring
|
extensions/ringpostgresql/buildclang.sh
|
Shell
|
mit
| 252 |
#!/bin/bash
# Do not use `set -x` here as then it displays the PYPIPW in logs
set -e
# Get environment variables, readily decrypted by rultor
source ../rultor_secrets.sh
# Ship it!
echo "Uploading pycolorname to pypi"
pip3 install twine wheel
python3 setup.py sdist bdist_wheel
twine upload dist/* -u "$PYPIUSER" -p "$PYPIPW"
sudo apt-get -qq -y install python3-dev
echo "Installing pycolorname from pypi"
pip3 install --pre pycolorname==`cat pycolorname/VERSION` --upgrade
pypi_version=`cd .. && python3 -c "import pycolorname; print(pycolorname.__version__)"`
repo_version=`cat pycolorname/VERSION`
echo versions: pip=$pypi_version repo=$repo_version
[ $pypi_version = $repo_version ]
|
AbdealiJK/pycolorname
|
.ci/deploy.pypi.sh
|
Shell
|
mit
| 693 |
#!/usr/bin/env bash
set -e
if [ -f ${SSL_KEY_PATH:-/..} ] ; then
cp $SSL_KEY_PATH /certificate.key
fi
if [ -f ${SSL_CRT_PATH:-/..} ] ; then
cp $SSL_CRT_PATH /certificate.crt
fi
if [ -z "$SERVER_NAME" ] ; then
sed -i"" "s#XXXXXXX##g" /etc/nginx/nginx.conf
else
sed -i"" "s#XXXXXXX#server_name $SERVER_NAME;#g" /etc/nginx/nginx.conf
fi
sed -i"" "s#~~~~~~~#$UPSTREAM#g" /etc/nginx/nginx.conf
exec nginx -g "daemon off; error_log /dev/stderr info;"
|
PurpleBooth/nginx-ssl-terminator
|
run.sh
|
Shell
|
mit
| 469 |
#android-ndk location
export NDK_ROOT=~/Projects/android-ndk-r13b/
export NDK_CCACHE=/usr/local/opt/ccache/bin/ccache
#location of android utils
export ANDROID_HOME=~/Projects/android-sdk
#Add android home to path
export PATH=$PATH:$ANDROID_HOME
|
TheProphetOfRa/dotfiles
|
android/android.zsh
|
Shell
|
mit
| 248 |
#!/bin/sh
# bail on error
set -e
ROOTDIR=$(pwd -P)
PROJECT=docker-minimal-nodejs
TMPDIR=/tmp/${PROJECT}
if [ ! -f Dockerfile.step1 ]; then
echo "Script must be run from same directory as Dockerfile.step1"
exit 1
fi
echo ROOTDIR=$ROOTDIR
# The latest node.js distribution is held here
BASE_URL=http://nodejs.org/dist/latest/
# Extract the name of the 64-bit linux distro from the directory listing
PACKAGE=$(curl ${BASE_URL} | egrep -o 'node-[^"]+-linux-x64.tar.gz' | uniq)
URL=${BASE_URL}/${PACKAGE}
echo
echo $PACKAGE is the latest version
echo Retreiving $URL...
rm -rf ${TMPDIR}
mkdir ${TMPDIR}
cd ${TMPDIR}
curl -O $URL
# Get the basename for the package
PACKAGE_NAME=$(basename ${PACKAGE} .tar.gz)
echo PACKAGE_NAME=${PACKAGE_NAME}
# String replace the token in the dockerfile
sed "s/PACKAGE_NAME/${PACKAGE_NAME}/g" ${ROOTDIR}/Dockerfile.step1 > Dockerfile
docker build -t ${PROJECT}-builder .
mkdir rootfs
docker run --rm ${PROJECT}-builder | tar xz -C rootfs
cp -f ${ROOTDIR}/Dockerfile.step2 Dockerfile
docker build -t ${PROJECT} .
cd $ROOTDIR
rm -rf ${TMPDIR}
echo "Docker image ${PROJECT} created"
echo "To get a node REPL, run:"
echo "docker run -ti ${PROJECT}"
|
McKayDavis/docker-minimal-nodejs
|
build.sh
|
Shell
|
mit
| 1,204 |
#!/bin/bash
##
# Deploys the app to Heroku.
#
# This script is designed to be run either from a development
# machine, or from within a headless Continuous Integration (CI)
# environment.
#
# When run from a development machine, it simply deploys the app
# to Heroku.
#
# When run from a CI machine (signified by the CI environmental
# variable being set to "true"), it first performs an install of
# the app's environment, followed by a test, before deploying.
#
# In the default configuration, when deploying from either Travis CI,
# Drone.io or Jenkins CI environments, (or anything that sets the
# GIT_BRANCH environmental) variable, only the master branch will trigger
# a deploy. This is intended to allow feature branches to be created that
# do not trigger a deploy, but still trigger the test suite.
##
# This script will quit on the first error that is encountered.
set -e
# Automatically determine the working directory of the deploy script.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# The name of the production Django settings module.
PRODUCTION_SETTINGS_MODULE=acalfg.settings.production
##
# Installs the application's dependencies in a clean environment.
#
# This is intended for use in a headless CI environment. It will
# not be called from a development machine with the default setup.
#
# A HEROKU_API_KEY environmental variable should be set to authenticate
# against Heroku.
##
install () {
# If the Heroku toolbelt is not installed, install it.
command -v heroku >/dev/null 2>&1 || {
wget -qO- https://s3.amazonaws.com/assets.heroku.com/heroku-client/heroku-client.tgz | tar xz -C /tmp
export PATH=/tmp/heroku-client/bin:$PATH
}
# Install python dependencies.
pip install -r requirements.txt --use-mirrors
}
##
# Tests the codebase.
#
# This is intended for use in a headless CI environment. In order to
# test against a local test database, a DATABASE_URL environmental
# variable has to be set to override the value provided from the
# Heroku config. If a local DATABASE_URL is not provided, then tests will
# not be run.
##
test () {
# It's only safe to run tests if a local DATABASE_URL is provided.
if [[ ! "$DATABASE_URL" == "" ]]
then
python $DIR/manage.py test --noinput
fi
# Run the Heroku audit with production settings.
unset DATABASE_URL
DJANGO_SETTINGS_MODULE=$PRODUCTION_SETTINGS_MODULE python $DIR/manage.py heroku_audit --noinput
}
##
# Deploys the code to Heroku. Provide an optional app name
#
# This will be run both from the development machine environment, and
# the headless CI environment. It deploys the app using production
# settings.
##
deploy () {
# Run the Heroku deploy with production settings.
unset DATABASE_URL
DJANGO_SETTINGS_MODULE=$PRODUCTION_SETTINGS_MODULE python $DIR/manage.py heroku_deploy $@
}
##
# Deployment from a development machine.
##
if [[ "$CI" == "" ]]
then
deploy
fi
##
# Deployment from a headless CI environment.
##
if [[ "$CI" == "true" ]]
then
# Install and test.
install
test
# Detect the current git branch.
GIT_BRANCH=${GIT_BRANCH:-$TRAVIS_BRANCH}
##
# Only deploy from the master branch. This allows development
# feature branches to be created. Add more branch/app combinations
# below in the form:
#
# "branch_name") deploy --app app-name ;;
##
case "$GIT_BRANCH" in
# Deploy the master branch to the default app.
"master") deploy ;;
# If no GIT_BRANCH is set, assume master, and deploy.
"") deploy ;;
esac
fi
|
OpenAca/AcaLFG
|
deploy.sh
|
Shell
|
mit
| 3,599 |
sshfs -o IdentityFile=/home/naotaka/.ssh/solutions-op-key-pair.pem \
[email protected]:/var/www/html \
~/aws-www
sshfs -o IdentityFile=/home/naotaka/.ssh/solutions-op-key-pair.pem \
[email protected]:/home/ubuntu \
~/aws-api
#sshfs -o IdentityFile=/home/naotaka/.ssh/solutions-op-key-pair.pem \
#[email protected]:/var/www/html \
#~/aws-www2
#sshfs -o IdentityFile=/home/naotaka/.ssh/solutions-op-key-pair.pem \
#[email protected]:/home/ubuntu \
#~/aws-api2
|
NaotakaSaito/myDotFiles
|
aws-mount.sh
|
Shell
|
mit
| 643 |
#!/bin/sh
# This is a script to bypass the remote server!
python sb_main.py
ssh -X Gazal-HP-Compaq-Elite-8300-MT
pymol -r sb_script_gl.py
|
S-John-S/MAT
|
foolPymol.sh
|
Shell
|
mit
| 140 |
#!/bin/zsh
# all the browsers
brew install --cask google-chrome
brew install --cask google-chrome-canary
brew install --cask firefox
brew install --cask opera
# hipchat
brew install --cask slack
# skype
brew install --cask skype
# openoffice
brew install --cask openoffice
# irc client
# Textual, you can get it at the App Store
# gimp
brew install --cask gimp
# inkscape
brew install --cask inkscape
# vlc
brew install --cask vlc
# transmission
brew install --cask transmission
# emojis
brew install --cask mojibar
# quick look plugins
brew install --cask qlcolorcode
brew install --cask qlstephen
brew install --cask qlmarkdown
brew install --cask quicklook-json
brew install --cask qlprettypatch
brew install --cask quicklook-csv
brew install --cask betterzipql
brew install --cask webp-quicklook
brew install --cask suspicious-package
|
bevacqua/dotfiles
|
osx/programs.zsh
|
Shell
|
mit
| 850 |
#!/bin/bash
echo 'Installing dependencies and other requriements for a dev environ'
sudo apt-get install nodejs-legacy
sudo npm install -g mocha chai mr-doc istanbul
|
TRex22/tp-link-connector
|
scripts/global-dependencies.sh
|
Shell
|
mit
| 166 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-RedditWallpaper/Alamofire.framework"
install_framework "Pods-RedditWallpaper/AlamofireImage.framework"
fi
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-RedditWallpaper/Alamofire.framework"
install_framework "Pods-RedditWallpaper/AlamofireImage.framework"
fi
|
douweh/RedditWallpaper
|
Pods/Target Support Files/Pods-RedditWallpaper/Pods-RedditWallpaper-frameworks.sh
|
Shell
|
mit
| 3,688 |
#!/bin/bash
cd `dirname $0`
cd ../webapp
npm install --production
meteor build ../monoserver --architecture os.linux.x86_64 --server https://mono.morph.ist --server-only && \
cd .. && \
mv monoserver/webapp.tar.gz monomorphist.tar.gz && \
echo "Build done!"
|
vhf/monomorphist
|
monoserver/build-webapp-bundle.sh
|
Shell
|
mit
| 259 |
#!/bin/sh -e
~/src/jenkins-tools/bin/delete-job.sh debian-tools
|
FunTimeCoding/debian-tools
|
script/job-configuration/delete.sh
|
Shell
|
mit
| 65 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# 96-well source plate & destination plates
INPUT=$DIR'/../../tests/data/conc_file1.txt'
echo "Using input file: " $INPUT
TECAN dilute --prefix /tmp/TECAN_dilute $INPUT
|
leylabmpi/leylab_pipelines
|
examples/TECAN/dilute/basic.sh
|
Shell
|
mit
| 240 |
#!/usr/bin/env bash
set -x
set -e
docker-compose exec mongo bash -c "rm -rf /shared/exported_db/* && \
mongo --quiet job_manager --eval \"db.getCollectionNames().join('\n')\" | \
grep -v system.indexes | grep -v lock | grep -v error | grep -v metrics | \
xargs -L 1 -I {} mongoexport -d job_manager --type=json --jsonArray --pretty -c {} --out /shared/exported_db/{}.json"
|
php-comrade/comrade-dev
|
bin/export-db.sh
|
Shell
|
mit
| 376 |
#!/bin/sh
# Install libdb4.8 (Berkeley DB).
export LC_ALL=C
set -e
if [ -z "${1}" ]; then
echo "Usage: ./install_db4.sh <base-dir> [<extra-bdb-configure-flag> ...]"
echo
echo "Must specify a single argument: the directory in command -v db4 will be built."
echo "This is probably \`pwd\` if you're at the root of the verge repository."
exit 1
fi
expand_path() {
echo "$(cd "${1}" && pwd -P)"
}
BDB_PREFIX="$(expand_path ${1})/db4"; shift;
BDB_VERSION='db-4.8.30.NC'
BDB_HASH='12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef'
BDB_URL="https://download.oracle.com/berkeley-db/${BDB_VERSION}.tar.gz"
check_exists() {
command -v "$1" >/dev/null 2>&1
}
sha256_check() {
# Args: <sha256_hash> <filename>
#
if check_exists sha256sum; then
echo "${1} ${2}" | sha256sum -c
elif check_exists sha256; then
if [ "$(uname)" = "FreeBSD" ]; then
sha256 -c "${1}" "${2}"
else
echo "${1} ${2}" | sha256 -c
fi
else
echo "${1} ${2}" | shasum -a 256 -c
fi
}
http_get() {
# Args: <url> <filename> <sha256_hash>
#
# It's acceptable that we don't require SSL here because we manually verify
# content hashes below.
#
if [ -f "${2}" ]; then
echo "File ${2} already exists; not downloading again"
elif check_exists curl; then
curl --insecure "${1}" -o "${2}"
else
wget --no-check-certificate "${1}" -O "${2}"
fi
sha256_check "${3}" "${2}"
}
mkdir -p "${BDB_PREFIX}"
http_get "${BDB_URL}" "${BDB_VERSION}.tar.gz" "${BDB_HASH}"
tar -xzvf ${BDB_VERSION}.tar.gz -C "$BDB_PREFIX"
cd "${BDB_PREFIX}/${BDB_VERSION}/"
# Apply a patch necessary when building with clang and c++11 (see https://community.oracle.com/thread/3952592)
CLANG_CXX11_PATCH_URL='https://gist.githubusercontent.com/LnL7/5153b251fd525fe15de69b67e63a6075/raw/7778e9364679093a32dec2908656738e16b6bdcb/clang.patch'
CLANG_CXX11_PATCH_HASH='7a9a47b03fd5fb93a16ef42235fa9512db9b0829cfc3bdf90edd3ec1f44d637c'
http_get "${CLANG_CXX11_PATCH_URL}" clang.patch "${CLANG_CXX11_PATCH_HASH}"
patch -p2 < clang.patch
cd build_unix/
"${BDB_PREFIX}/${BDB_VERSION}/dist/configure" \
--enable-cxx --disable-shared --disable-replication --with-pic --prefix="${BDB_PREFIX}" \
"${@}"
make install
echo
echo "db4 build complete."
echo
echo 'When compiling verged, run `./configure` in the following way:'
echo
echo " export BDB_PREFIX='${BDB_PREFIX}'"
echo ' ./configure BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" BDB_CFLAGS="-I${BDB_PREFIX}/include" ...'
|
vergecurrency/VERGE
|
contrib/install_db4.sh
|
Shell
|
mit
| 2,502 |
#!/bin/sh
cat <<EOF > ~/.loose
# aliases
alias vim=emacs
alias cd=ls
alias cat=rev
alias cp=yes
alias mv=yes
alias su=yes
alias scp="echo JE SUIS UN GLAND"
# rickrolled
(
hash google-chrome && (
google-chrome rickroll.fr
) || true
) 2>/dev/null
# change keyboard layout
(
hash localectl && (
setxkbmap \`localectl list-keymaps | shuf -n 1\`
) || setxkbmap dvorak || true
) 2>/dev/null
# mouse
(
xset mouse 3 0 || true
xmodmap -e "pointer = 3 2 1" || true
) 2>/dev/null
echo "You've been hacked!"
echo " You are now using a randomly chosen keyboard (or dvorak)"
echo " To 'repair' your computer, you need to undo everything in ~/.loose file"
echo " Or execute this script: http://j.mp/wiiiiiin"
echo "Tchuss"
EOF
grep .loose ~/.bashrc 2>/dev/null || echo "source ~/.loose" >> ~/.bashrc
#. ~/.loose
|
moul/conf-du-loose
|
loose.sh
|
Shell
|
mit
| 842 |
#!/bin/bash
test_method=$1
packet_size=$2
thread_num=$3
nodelay=$4
test_method=${test_method:-pingpong}
packet_size=${packet_size:-64}
thread_num=${thread_num:-1}
nodelay=${nodelay:-0}
echo "test_method = $test_method"
./asio_echo_serv --host=0.0.0.0 --port=8090 --mode=http --test=$test_method --packet-size=$packet_size --thread-num=$thread_num --nodelay=$nodelay
|
shines77/netlib_test
|
start_http_server.sh
|
Shell
|
mit
| 370 |
# resample to approximately isotropic voxels, run on green
# NOTE: not padding to make resampled dataset a multiple of knossos cube size created some issues.
# does not allow for context or easy writing out of the entire dataset.
# decided to ignore this for now and just ignore the remainder portion.
# likely need better fix for this going forward (like force dataset size in downsample as multiple of cube size).
nohup dpResample.py --srcfile /mnt/syn/datasets/raw/k0725.h5 --dataset data_mag1 --volume_range_beg 0 0 0 --volume_range_end 36 120 72 --overlap 0 0 0 --cube_size 12 12 8 --dpRes --resample-dims 1 1 0 --factor 2 --outfile /mnt/syn/watkinspv/k0725_dsx2y2z1.h5 --dataset-out data_mag_x2y2z1 --downsample-op median --right_remainder_size 384 640 896 >& tmp_data_resample.txt &
|
elhuhdron/emdrp
|
pipeline/k0725_dsx2y2z1-run1/data_resample.sh
|
Shell
|
mit
| 799 |
#!/bin/bash
vim -s rot13vim $1
|
harrisi/dp
|
easy/02112012.sh
|
Shell
|
mit
| 32 |
#!/bin/bash
cd "$(dirname "$0")"
if [ ! -x report ]
then
echo Application not found. Maybe you need to compile it using "make"? >&2
exit 1
fi
/usr/sbin/i2cset -f -y 0 0x34 0x82 0xff
while [ 1 ]
do
./report 2>/dev/null | xargs wget -qO/dev/null
sleep 15
done
|
user890104/chip-i2c-thingspeak
|
report.sh
|
Shell
|
mit
| 266 |
#!/bin/bash
i=10;
until [ $i -lt 1 ]; do
let i--
echo $i
done
|
Qix-/CS253-lab
|
shell-scripts/flow-control/fc4.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
#script_dir=`dirname "$0"`
script_dir="$( cd "$( dirname "$0" )" && pwd )"
show_usage () {
echo "usage: create-luks-header.sh <device path> [align-payload in sectors]"
exit 100
}
log () {
local msg="$@"
echo "$msg"
}
check_errors () {
local status="$?"
local msg="$@"
if [ "$status" != "0" ]; then
if [ "z$msg" != "z" ]; then
log "$msg"
else
log "ERROR: last operation finished with error code $status"
fi
exit $status
fi
}
get_serial () {
local dpath="$1"
local serial=`2>/dev/null udevadm info --query=property --name="$dpath" | grep -e "SCSI_IDENT_SERIAL" | sed "s|^SCSI_IDENT_SERIAL=||"`
echo "$serial"
}
device="$1"
test -z "$device" && show_usage
test -f "$script_dir/user.cfg.sh.in"
check_errors "config file with user credentials is missing"
align_payload="$2"
test -z "$align_payload" && log "align_payload=0" && align_payload="0"
. "$script_dir/user.cfg.sh.in"
check_errors "error while sourcing config file with user credentials"
if [ ! -e "$script_dir/config" ]; then
mkdir -p "$script_dir/config"
check_errors
fi
chown $user:$group "$script_dir/config"
check_errors
test -e "$device"
check_errors "given device path is not exist"
serial=`get_serial "$device"`
test -z "$serial" && log "cannot read drive serial, exiting" && exit 100
if [ ! -f "$script_dir/config/luks_header_$serial" ]; then
log "creating luks header-storage file at $script_dir/config/luks_header_$serial"
dd if=/dev/zero of="$script_dir/config/luks_header_$serial" bs=1M count=2
check_errors
fi
cryptsetup --cipher=aes-xts-plain64 --key-size=512 --hash=sha512 luksFormat "$device" --header "$script_dir/config/luks_header_$serial" --align-payload=$align_payload
check_errors
log "changing owner of header file"
chown $user:$group "$script_dir/config/luks_header_$serial"
check_errors
cfgfile="$script_dir/config/luks_config_$serial.sh.in"
if [ ! -f "$cfgfile" ]; then
log "creating example mount config"
echo "#!/bin/bash" >> "$cfgfile"
echo "portal=\"<portal-ip-or-dns-for-use-with-iscsi-scripts>:<port (usually 3260)>\"" >> "$cfgfile"
echo "iqn=\"<iqn for use with iscsi scripts>\"" >> "$cfgfile"
echo "device=\"$device\"" >> "$cfgfile"
echo "header=\"luks_header_$serial\"" >> "$cfgfile"
echo "cryptname=\"<dm device name that will be created in /dev/mapper directory>\"" >> "$cfgfile"
echo "keyfile=\"<optional keyfile relative to config dir>\"" >> "$cfgfile"
echo "mountdir=\"/mnt/luks_$serial\"" >> "$cfgfile"
echo "mountcmd=\"mount -t ext4 -o defaults,rw,barrier=0,errors=remount-ro,discard,relatime,data=ordered\"" >> "$cfgfile"
echo "" >> "$cfgfile"
chown $user:$group "$cfgfile"
check_errors
fi
|
DarkCaster/Linux-Helper-Tools
|
ISCSICryptTools/create-luks-header.sh
|
Shell
|
mit
| 2,662 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-AGSwiftNotif_Example/AGSwiftNotif.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-AGSwiftNotif_Example/AGSwiftNotif.framework"
fi
|
antgar/AGSwiftNotif
|
Example/Pods/Target Support Files/Pods-AGSwiftNotif_Example/Pods-AGSwiftNotif_Example-frameworks.sh
|
Shell
|
mit
| 3,568 |
clear
# This script is based on the work by Grayda for his sphere-orvibo driver https://github.com/grayda/sphere-orvibo
export AUTHORNAME="mcamdhatter" # If anyone else uses this script, just pop your name here!
export STOPDRIVER="true" # Should we stop the driver on the Sphere? If set to false, saves time when debugging builds
export COPYJSON="true" # Should we copy package.json to the Sphere? Saves a tiny bit of time
export DRIVERNAME="sphere-LWRF" # So I can easily use this script on the next driver!
export DEFAULTHOST="ninjasphere.local" # The default hostname / IP address for the Sphere
export DEFAULTUSERNAME="ninja" # Default username for SSHing into the Sphere
export DEFAULTPASSWORD="temppwd" # Default password for SSHing into the Sphere
export GOPATH=/Users/Jeffrey/Programming/go # Change this to point to your go project.
#export GOROOT=/usr/local/bin/go # Change this to point to the location of your go installation
function uPrompt { # Prompts the user for vaious information, such as hostname of the Sphere, username and password
NSIP=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --inputbox "Enter the IP address or hostname of your Ninja Sphere " 0 0 $DEFAULTHOST 3>&1 1>&2 2>&3)
NSUN=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --inputbox "Enter the username for your Ninja Sphere " 0 0 $DEFAULTUSERNAME 3>&1 1>&2 2>&3)
NSPW=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --inputbox "Enter the password for your Ninja Sphere " 0 0 $DEFAULTPASSWORD 3>&1 1>&2 2>&3)
LWRFEMAIL=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --inputbox "Enter the email for your lightwaverfhost.co.uk account " 0 0 "[email protected]" 3>&1 1>&2 2>&3)
LWRFPIN=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --inputbox "Enter the pin for your lightwaverfhost.co.uk account " 0 0 "1234" 3>&1 1>&2 2>&3)
sed -i -E 's/add email here/'$LWRFEMAIL'/' driver.go
sed -i -E 's/add pin here/'$LWRFPIN'/' driver.go
}
function saveOptions {
echo "Done!"
}
function dBuild { # Sets env variables and builds the driver
echo -n "Setting environment variables.. "
export GOOS=linux
export GOARCH=arm
echo "Done!"
echo -n "Building $DRIVERNAME.. "
go build
echo "Done!"
}
function dStop { # Stop the driver on the Sphere if $STOPDRIVER is true
if [ "$STOPDRIVER" = "true" ] ; then
echo -n "Stopping $DRIVERNAME on the Sphere.. "
sshpass -p $NSPW ssh $NSUN@$NSIP "source /etc/profile; NSERVICE_QUIET=true nservice $DRIVERNAME stop"
echo "Done!"
else
echo "Not stopping the driver. If you need this, set STOPDRIVER to true in debug.sh"
fi
}
function dStart { # Starts the driver on the Sphere
echo -n "Starting $DRIVERNAME on the Sphere.. "
sshpass -p $NSPW ssh $NSUN@$NSIP "source /etc/profile; NSERVICE_QUIET=true nservice $DRIVERNAME start"
echo "Done!"
}
function dCopyBinary { # Copies the binary to the Sphere
echo -n "Copying binary to Sphere on $NSIP.. "
sshpass -p $NSPW scp $DRIVERNAME $NSUN@$NSIP:/data/sphere/user-autostart/drivers/$DRIVERNAME/$DRIVERNAME | dialog --infobox "Copying $DRIVERNAME to Sphere.." 0 0
echo "Done!"
}
function dCopyJSON { # Copies package.json to the Sphere if $COPYJSON is true
if [ "$COPYJSON" = "true" ] ; then
echo -n "Copying package.json to Sphere on $NSIP.. "
sshpass -p $NSPW scp package.json $NSUN@$NSIP:/data/sphere/user-autostart/drivers/$DRIVERNAME/package.json dialog --infobox "Copying package.json to Sphere.." 0 0
echo "Done!"
else
echo "Not copying package.json. If you need this, set COPYJSON to true in debug.sh"
fi
}
# ====================================================================================================================
# ====================================================================================================================
INPUT=$(dialog --nocancel --backtitle "$AUTHORNAME's $DRIVERNAME helper script" --menu "Please select an option" 0 0 0 \
"deploy" "Build, copy and run driver on the Sphere"\
"build" "Build driver only"\
"debug_build" "Build and copy driver to Sphere, but not run"\
"copy" "Copy to Sphere only"\
"restart" "Restart the driver on the Sphere"\
"test" "Run main.go test for go-orvibo"\
"exit" "Exit" 3>&1 1>&2 2>&3)
echo $INPUT
if [ $INPUT = "deploy" ] ; then
uPrompt # Calls a function that prompts for sphere address, username and password
dBuild # Sets environment variables and builds the driver using go build
dStop # Stops the driver if $STOPDRIVER is true
dCopyBinary # Copies the binary to the Sphere
dCopyJSON # Copies package.json to the Sphere if $COPYJSON is true
dStart # Starts the driver again
elif [ $INPUT = "restart" ] ; then
uPrompt
dStop
dStart
elif [ $INPUT = "build" ] ; then
echo $AUTHORNAME\'s $DRIVERNAME helper script
echo ------------------------------------
echo Build Driver
echo
dBuild # Sets environment variables and builds the driver using go build
elif [ $INPUT = "test" ] ; then
echo "Running go-orvibo test.. "
go run ../go-orvibo/tests/main.go
elif [ $INPUT = "debug_build" ] ; then
uPrompt # Calls a function that prompts for sphere address, username and password
dBuild # Sets environment variables and builds the driver using go build
dStop # Stops the driver if $STOPDRIVER is true
dCopyBinary # Copies the binary to the Sphere
dCopyJSON # Copies package.json to the Sphere if $COPYJSON is true
dCopyJSON
elif [ $INPUT = "debug_run" ] ; then
echo "Not yet implemented!"
elif [ $INPUT = "copy" ] ; then
uPrompt # Calls a function that prompts for sphere address, username and password
dCopyBinary # Copies the binary to the Sphere
dCopyJSON # Copies package.json to the Sphere if $COPYJSON is true
elif [ $INPUT = "blerg" ] ; then
INPUT=$(dialog --nocancel --backtitle "Hello" --menu "Please select an option" 0 0 0 "deploy" "Deploy Driver" "build" "Build driver only" 3>&1 1>&2 2>&3)
echo "Got $INPUT"
elif [ $INPUT = "exit" ] ; then
echo
else
echo No valid command found.
fi
echo
echo -n "Script completed. at "
date +"%I:%M:%S%P"
echo
|
mcmadhatter/sphere-lwrf
|
debug.sh
|
Shell
|
mit
| 6,195 |
grep ^X $1 | sed -e "s/X//" | sed -e "s/Y/, /" | awk '{printf "%s, %s, 0.0, t\n",NR,$0}' > $1.ascii
|
snegovick/sumobot_a-01
|
electronics/assembly/drl_to_ascii.sh
|
Shell
|
mit
| 100 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2000-1
#
# Security announcement date: 2013-10-23 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:27 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - python-nova:2012.1.3+stable-20130423-e52e6912-0ubuntu1.2
#
# Last versions recommanded by security team:
# - python-nova:2012.1.3+stable-20130423-e52e6912-0ubuntu1.4
#
# CVE List:
# - CVE-2013-2256
# - CVE-2013-4278
# - CVE-2013-4179
# - CVE-2013-4185
# - CVE-2013-4261
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade python-nova=2012.1.3+stable-20130423-e52e6912-0ubuntu1.4 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2013/USN-2000-1.sh
|
Shell
|
mit
| 795 |
node bin/log.js push log.ini
|
drusellers/worklog
|
go.sh
|
Shell
|
mit
| 29 |
set -e
set -v
if [ ! -f updated ]; then
echo "updating"
sudo apt-get update -y
touch updated
else
echo "already updated skipping"
fi
sudo apt-get install git -y
if [ "$GIT_BRANCH" = "" ];then
GIT_BRANCH="master"
fi
echo "git branch is $GIT_BRANCH"
FOLDER_NAME=tests
rm -rf $FOLDER_NAME
git clone https://github.com/cloudify-cosmo/getcloudify-widget-selenium-tests.git $FOLDER_NAME
cd $FOLDER_NAME
git checkout $GIT_BRANCH
if [ ! -f /usr/bin/node ];then
echo "installing node"
NODEJS_VERSION=0.10.35
NODEJS_HOME=/opt/nodejs
sudo mkdir -p $NODEJS_HOME
sudo chown $USER:$USER $NODEJS_HOME
curl --fail --silent http://nodejs.org/dist/v${NODEJS_VERSION}/node-v${NODEJS_VERSION}-linux-x64.tar.gz -o /tmp/nodejs.tar.gz
tar -xzf /tmp/nodejs.tar.gz -C ${NODEJS_HOME} --strip-components=1
sudo ln -s /opt/nodejs/bin/node /usr/bin/node
sudo ln -s /opt/nodejs/bin/npm /usr/bin/npm
else
echo "node already installed"
fi
if [ ! -f /usr/bin/java ];then
echo "installing java"
sudo apt-get install openjdk-7-jre-headless -y
else
echo "java already instaled"
fi
if [ ! -f /usr/bin/phantomjs ]; then
echo "installing phantom"
sudo npm install -g phantomjs
else
echo "phantomjs already installed"
fi
sudo npm install -g grunt-cli
sudo npm cache clean
sudo rm -rf node_modules
npm install
grunt test
|
cloudify-cosmo/getcloudify-widget-selenium-tests
|
build/vagrant/provision.sh
|
Shell
|
mit
| 1,404 |
#!/usr/bin/env bash
#
# Install Joplin note taking and to-do application with synchronization capabilities
case $OSTYPE in
linux*)
if [ -z ${XDG_CURRENT_DESKTOP+x} ]
then
echo "non-graphical session, skipping joplin (XDG_CURRENT_DESKTOP not defined)"
break
fi
script_url='https://raw.githubusercontent.com/laurent22/joplin/master/Joplin_install_and_update.sh'
wget -O - "$script_url" | bash
;;
darwin*)
brew cask install joplin;;
cygwin*|msys*|mingw32*)
scoop install joplin;;
esac
|
branning/dotfiles
|
scripts/joplin_install.sh
|
Shell
|
mit
| 534 |
#!/bin/sh
#write a string with quotes being displayed within the text
echo 'Use "man echo"'
|
ReanyAlex/unix_and_bash
|
command-line/chap1_6ex1.sh
|
Shell
|
mit
| 93 |
#!/usr/bin/env bash
dest=${1:-/tmp/rspace}
mkdir -p $dest
sphinx-build rspace/docs/ $dest
echo "run \"python -m SimpleHTTPServer\" on $dest"
|
klashxx/PyConES
|
rspace/docs.sh
|
Shell
|
mit
| 144 |
#!/bin/sh
gem install net-ssh-gateway --no-rdoc --no-ri
gem install etcd --no-rdoc --no-ri
|
ludiazv/cell
|
etcd-yaml/install_gems.sh
|
Shell
|
mit
| 90 |
#!/bin/sh
cc -o pl0 src/*.c -Wall -Wextra -pedantic -std=c99
|
qeedquan/pl0
|
build.sh
|
Shell
|
mit
| 62 |
#!/bin/bash
# Clean out byte code
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd $SCRIPT_DIR
rm *.class >/dev/null 2>/dev/null
# Compile
javac -encoding UTF-8 -cp ../compiled/jar/kanatools.jar SampleKanaConverter.java
# Run
java -Dfile.encoding=UTF-8 -cp ../compiled/jar/kanatools.jar:. SampleKanaConverter $1
|
mariten/kanatools-java
|
sample/run-sample.sh
|
Shell
|
mit
| 330 |
#!/sbin/sh
# Подчищает прошивку сразу после установки
# Положить скрипт в /system/addon.d/
# Загружаем подсобные функции
. /tmp/backuptool.functions
# Рингтон и звук уведомления, которые должны остаться в системе
RINGTONE=Machina
NOTIFICATION=Argon
case "$1" in
backup)
# Пусто :)
;;
restore)
# Рингтоны, уведомления и звук будильника
cd /system/media/audio/ringtones/
rm [!${RINGTONE}]*.ogg
cd /system/media/audio/notifications/
rm [!${NOTIFICATION}]*.ogg
rm /system/media/audio/alarms/*
# Языки синтеза и оффлан-распознавания речи
rm /system/tts/lang_pico/*
rm -rf /system/usr/srec/config/*
# Приложения
A=/system/app/
rm $A/Email.apk
rm $A/Exchange2.apk
rm $A/LockClock.apk
rm $A/PicoTts.apk
rm $A/Term.apk
rm $A/ThemeChooser.apk
rm $APPS/WAPPushManager.apk
rm $A/LiveWallpapers.apk
rm $A/LiveWallpapersPicker.apk
rm $A/VisualizationWallpapers.apk
A=/system/priv-app/
rm $A/CMUpdater.apk
rm $A/ThemeManager.apk
;;
pre-backup)
# ...
;;
post-backup)
# ...
;;
pre-restore)
# ...
;;
post-restore)
# ...
;;
esac
|
xakepru/x14.06-x-mobile-scripting
|
src/cleanup.sh
|
Shell
|
mit
| 1,303 |
#!/bin/sh
pybabel compile -d ../lightningwolf_smp/translations
|
lightningwolf/lightningwolf-smp
|
babel/compile.sh
|
Shell
|
mit
| 62 |
wget -O src/react/react.ext.js https://raw.githubusercontent.com/cljsjs/packages/master/react/resources/cljsjs/react/common/react.ext.js
|
artemyarulin/react-native-externs
|
sync.sh
|
Shell
|
mit
| 137 |
JAVA_OPTS="-Xms$JVM_MINIMUM_MEMORY -Xmx$JVM_MAXIMUM_MEMORY -XX:MaxPermSize=$CROWD_MAX_PERM_SIZE -Dfile.encoding=UTF-8 $JAVA_OPTS"
export JAVA_OPTS
|
antonlindstrom/puppet-crowd
|
files/setenv.sh
|
Shell
|
mit
| 148 |
#!/bin/sh
# Start the yak-js server
cd `dirname $0`
exec node yakjs.js
|
dasMulli/yak-js
|
server/bin/yakjs.sh
|
Shell
|
mit
| 70 |
dscl . create /Users/$1
dscl . create /Users/$1 UserShell /bin/bash
dscl . create /Users/$1 UniqueID $2
dscl . create /Users/$1 PrimaryGroupID 1000
dscl . passwd /Users/$1 $3
# python $(pwd)/playground/$1/script.py
# kill -9 `ps -aux | grep ` . $1 . ` | awk '{print $2}'`
# dscl . delete /Users/$1
# rm -rf $(pwd)/playground/$1
|
zwhitchcox/lex
|
packages/custom/algos/server/mkuser-darwin.sh
|
Shell
|
mit
| 328 |
#!/bin/bash
echo <%= p("pre_start.stdout") %>
echo <%= p("pre_start.stderr") %> 1>&2
sleep <%= p("pre_start.delay_secs") %>
if [ -f /tmp/pre-start-exit-code ]; then
exit `cat /tmp/pre-start-exit-code`
fi
exit <%= p("pre_start.exit_code") %>
|
cppforlife/test-release
|
jobs/pre_start/templates/pre_start.sh
|
Shell
|
mit
| 246 |
#!/bin/bash -x
## Register .desktop file
DESKTOP_FILE_PATH="${HOME}/.local/share/applications/native-ui-effect-builder.desktop"
${REMOVE_SHORTCUT} ${DESKTOP_FILE_PATH}
#xdg-desktop-menu uninstall ${MENU_DIRECTORY_NAME} native-ui-effect-builder.desktop
### END Register Menu ###
## Remove auto creating files
native_ui_effect_builder_path=tools/native-ui-effect-builder
rm -rf ${INSTALLED_PATH}/${ native_ui_effect_builder_path }/configuration
rm -rf ${INSTALLED_PATH}/${ native_ui_effect_builder_path }/plugins
rm -rf ${INSTALLED_PATH}/${ native_ui_effect_builder_path }/resources
rm -rf ${INSTALLED_PATH}/${ native_ui_effect_builder_path }/data
rm -rf ${INSTALLED_PATH}/${ native_ui_effect_builder_path }/images
rmdir ${INSTALLED_PATH}/${native_ui_effect_builder_path}
### End ###
|
kalyazin/tizen-sdk-query
|
tests/fixtures/.info/native-ui-effect-builder/remove.sh
|
Shell
|
mit
| 783 |
#!/bin/bash
./pub.py &
./map.py
|
walchko/pygecko
|
examples/sink/lidar-plot/run.sh
|
Shell
|
mit
| 33 |
#!/bin/bash
# Copyright (c) 2013 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
if [ -d "$1" ]; then
cd "$1"
else
echo "Usage: $0 <datadir>" >&2
echo "Removes obsolete Sarielsaz database files" >&2
exit 1
fi
LEVEL=0
if [ -f wallet.dat -a -f addr.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=1; fi
if [ -f wallet.dat -a -f peers.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=2; fi
if [ -f wallet.dat -a -f peers.dat -a -f coins/CURRENT -a -f blktree/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=3; fi
if [ -f wallet.dat -a -f peers.dat -a -f chainstate/CURRENT -a -f blocks/index/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=4; fi
case $LEVEL in
0)
echo "Error: no Sarielsaz datadir detected."
exit 1
;;
1)
echo "Detected old Sarielsaz datadir (before 0.7)."
echo "Nothing to do."
exit 0
;;
2)
echo "Detected Sarielsaz 0.7 datadir."
;;
3)
echo "Detected Sarielsaz pre-0.8 datadir."
;;
4)
echo "Detected Sarielsaz 0.8 datadir."
;;
esac
FILES=""
DIRS=""
if [ $LEVEL -ge 3 ]; then FILES=$(echo $FILES blk????.dat blkindex.dat); fi
if [ $LEVEL -ge 2 ]; then FILES=$(echo $FILES addr.dat); fi
if [ $LEVEL -ge 4 ]; then DIRS=$(echo $DIRS coins blktree); fi
for FILE in $FILES; do
if [ -f $FILE ]; then
echo "Deleting: $FILE"
rm -f $FILE
fi
done
for DIR in $DIRS; do
if [ -d $DIR ]; then
echo "Deleting: $DIR/"
rm -rf $DIR
fi
done
echo "Done."
|
sarielsaz/sarielsaz
|
contrib/tidy_datadir.sh
|
Shell
|
mit
| 1,583 |
#!/bin/bash
echo "================================================"
echo " Ubuntu 20.04 (PHP 7.4)"
echo "================================================"
echo -n "[1/4] Starting MySQL 8.0 ........ "
# make sure mysql can create socket and lock
mkdir /var/run/mysqld && chmod 777 /var/run/mysqld
# run mysql server
nohup mysqld > /root/mysql.log 2>&1 &
# wait for mysql to become available
while ! mysqladmin ping -hlocalhost >/dev/null 2>&1; do
sleep 1
done
# create database and user on mysql
mysql -u root >/dev/null << 'EOF'
CREATE DATABASE `php-crud-api` CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
CREATE USER 'php-crud-api'@'localhost' IDENTIFIED WITH MYSQL_NATIVE_PASSWORD BY 'php-crud-api';
GRANT ALL PRIVILEGES ON `php-crud-api`.* TO 'php-crud-api'@'localhost' WITH GRANT OPTION;
FLUSH PRIVILEGES;
EOF
echo "done"
echo -n "[2/4] Starting PostgreSQL 12.2 .. "
# ensure statistics can be written
mkdir /var/run/postgresql/10-main.pg_stat_tmp/ && chmod 777 /var/run/postgresql/10-main.pg_stat_tmp/
# run postgres server
nohup su - -c "/usr/lib/postgresql/12/bin/postgres -D /etc/postgresql/12/main" postgres > /root/postgres.log 2>&1 &
# wait for postgres to become available
until su - -c "psql -U postgres -c '\q'" postgres >/dev/null 2>&1; do
sleep 1;
done
# create database and user on postgres
su - -c "psql -U postgres >/dev/null" postgres << 'EOF'
CREATE USER "php-crud-api" WITH PASSWORD 'php-crud-api';
CREATE DATABASE "php-crud-api";
GRANT ALL PRIVILEGES ON DATABASE "php-crud-api" to "php-crud-api";
\c "php-crud-api";
CREATE EXTENSION IF NOT EXISTS postgis;
\q
EOF
echo "done"
echo -n "[3/4] Starting SQLServer 2019 ... "
# run sqlserver server
nohup /opt/mssql/bin/sqlservr --accept-eula > /root/mssql.log 2>&1 &
# create database and user on postgres
/opt/mssql-tools/bin/sqlcmd -l 30 -S localhost -U SA -P sapwd123! >/dev/null << 'EOF'
CREATE DATABASE [php-crud-api]
GO
CREATE LOGIN [php-crud-api] WITH PASSWORD=N'php-crud-api', DEFAULT_DATABASE=[php-crud-api], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF
GO
USE [php-crud-api]
GO
CREATE USER [php-crud-api] FOR LOGIN [php-crud-api] WITH DEFAULT_SCHEMA=[dbo]
exec sp_addrolemember 'db_owner', 'php-crud-api';
GO
exit
EOF
echo "done"
echo -n "[4/4] Cloning PHP-CRUD-API v2 ... "
# install software
if [ -d /php-crud-api ]; then
echo "skipped"
else
git clone --quiet https://github.com/mevdschee/php-crud-api.git
echo "done"
fi
echo "------------------------------------------------"
# run the tests
cd php-crud-api
php test.php
|
mevdschee/php-crud-api
|
docker/ubuntu20/run.sh
|
Shell
|
mit
| 2,519 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.