code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
source ../testsupport.sh
bpipe run -p AMPLICON_BED=amplicons.bed test.groovy test.bed test.xml > test.out
[ -e amplicons.fasta ] || err "Incorrect output file: file name 'amplicon.fasta' should have been inferred from transform"
true
|
vivovip/bpipe
|
tests/from_transform_to/run.sh
|
Shell
|
bsd-3-clause
| 237 |
#!/bin/bash
FN="TxDb.Dmelanogaster.UCSC.dm6.ensGene_3.4.4.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/annotation/src/contrib/TxDb.Dmelanogaster.UCSC.dm6.ensGene_3.4.4.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Dmelanogaster.UCSC.dm6.ensGene_3.4.4.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.dmelanogaster.ucsc.dm6.ensgene/bioconductor-txdb.dmelanogaster.ucsc.dm6.ensgene_3.4.4_src_all.tar.gz"
)
MD5="7cad715c42d1f960c0e54905ec6612d7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-txdb.dmelanogaster.ucsc.dm6.ensgene/post-link.sh
|
Shell
|
mit
| 1,429 |
#!/bin/bash
if [ -d "node_modules" ]; then
rm -r node_modules
fi
if [ -d ".npm" ]; then
rm -r .npm
fi
if [ -d ".npmrc" ]; then
rm -r .npmrc
fi
|
BradBolander/Project4
|
node_modules/osc/clean-npm.sh
|
Shell
|
mit
| 157 |
#!/bin/bash
FN="PathNetData_1.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/experiment/src/contrib/PathNetData_1.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/PathNetData_1.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pathnetdata/bioconductor-pathnetdata_1.18.0_src_all.tar.gz"
)
MD5="baad9f77b8914d21f84963e3b19f67ed"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
ivirshup/bioconda-recipes
|
recipes/bioconductor-pathnetdata/post-link.sh
|
Shell
|
mit
| 1,313 |
wget --quiet \
--method POST \
--header 'cookie: foo=bar; bar=baz' \
--output-document \
- http://mockbin.com/har
|
postmanlabs/httpsnippet
|
test/fixtures/output/shell/wget/cookies.sh
|
Shell
|
mit
| 122 |
# UpdateChannels.sh
# the url for different channels varies with the channel number
# But not all channels are valid. This scans for 4s from 1 to 1000
# If a channel is valid, it will start download, if not it will time-out
for i in {1..2000}; do
timeout 4 wget --tries=1 http://fms.cntv.lxdns.com/live/flv/channel$i.flv
done
ls *flv|awk '{print "rm -f",$1,"; touch", $1}' | sh
|
ksun6868/xbmc-addons-chinese
|
plugin.video.asia-tv/UpdateChannels.sh
|
Shell
|
gpl-2.0
| 390 |
#! /bin/sh
libtoolize --copy --force
aclocal
autoheader
automake --gnu --add-missing --copy
autoconf
|
DMCsys/smartalkaudio
|
oss-survey/tcpmp/mpc/libmusepack/autogen.sh
|
Shell
|
gpl-3.0
| 102 |
#!/bin/sh
if [ "${1}" == "" ]; then
echo "release number not provided, pulling from the latest"
release_ver=v`npm view | grep "dist-tags" | sed "s/.*latest: '\([0-9]\{1,2\}\.[0-9]\{1,2\}\.[0-9]\{1,2\}\)'.*/\1/"`
else
release_ver=${1}
fi
echo "using release version: ${release_ver}"
for f in `git cherry -v "${release_ver}" | sed "s/+ \([0-9a-f]\{40\}\).*/\1/"` ; do
git show -s --format=%B ${f} | egrep "(fixes|refs|closes) [A-Za-z]{4}-[0-9]{5,6}"
done
|
djbender/canvas-lms
|
packages/canvas-rce/bin/jira_tickets.sh
|
Shell
|
agpl-3.0
| 464 |
#!/bin/bash
cd $(dirname $(readlink -f $0))
BASE=`pwd`
. ./common.sh
# -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005 \
cleanInstall $1 && copyDeps && {
classpath=`echo target/dependency/*.jar | tr ' ' :`
runCmd java \
-Xmx128m -Xms128m \
-Xbootclasspath/a:target/$aid-$version.jar \
-javaagent:target/$aid-$version.jar \
-cp target/test-classes:$classpath \
-ea \
com.alibaba.mtc.threadpool.agent.AgentCheck
}
|
wangcan2014/multi-thread-context
|
run-agent-test.sh
|
Shell
|
apache-2.0
| 495 |
#!/bin/bash
./node_modules/.bin/mocha --reporter spec test/test.js
|
arielschiavoni/yadda
|
examples/mocha-express/bin/example.sh
|
Shell
|
apache-2.0
| 67 |
#!/bin/bash
#
# Copyright (c) 2016, Linaro Limited
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
PCAP_OUT="pcapout.pcap"
PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
echo "using PCAP_IN = ${PCAP_IN}, PCAP_OUT = ${PCAP_OUT}"
./odp_l3fwd -i pcap:in=${PCAP_IN},pcap:out=${PCAP_OUT} \
-r "10.0.0.0/24,pcap:out=${PCAP_OUT}" -d 30
STATUS=$?
PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
rm -f ${PCAP_OUT}
if [ ${STATUS} -ne 0 ] || [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
echo "Error: status ${STATUS}, in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
exit 1
fi
echo "Pass: status ${STATUS}, in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
exit 0
|
erachmi/odp
|
example/l3fwd/odp_l3fwd_run.sh
|
Shell
|
bsd-3-clause
| 728 |
#!/bin/sh
test_description='fetching and pushing, with or without wildcard'
. ./test-lib.sh
D=`pwd`
mk_empty () {
rm -fr testrepo &&
mkdir testrepo &&
(
cd testrepo &&
git init &&
git config receive.denyCurrentBranch warn &&
mv .git/hooks .git/hooks-disabled
)
}
mk_test () {
mk_empty &&
(
for ref in "$@"
do
git push testrepo $the_first_commit:refs/$ref || {
echo "Oops, push refs/$ref failure"
exit 1
}
done &&
cd testrepo &&
for ref in "$@"
do
r=$(git show-ref -s --verify refs/$ref) &&
test "z$r" = "z$the_first_commit" || {
echo "Oops, refs/$ref is wrong"
exit 1
}
done &&
git fsck --full
)
}
mk_child() {
rm -rf "$1" &&
git clone testrepo "$1"
}
check_push_result () {
(
cd testrepo &&
it="$1" &&
shift
for ref in "$@"
do
r=$(git show-ref -s --verify refs/$ref) &&
test "z$r" = "z$it" || {
echo "Oops, refs/$ref is wrong"
exit 1
}
done &&
git fsck --full
)
}
test_expect_success setup '
>path1 &&
git add path1 &&
test_tick &&
git commit -a -m repo &&
the_first_commit=$(git show-ref -s --verify refs/heads/master) &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m second &&
the_commit=$(git show-ref -s --verify refs/heads/master)
'
test_expect_success 'fetch without wildcard' '
mk_empty &&
(
cd testrepo &&
git fetch .. refs/heads/master:refs/remotes/origin/master &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'fetch with wildcard' '
mk_empty &&
(
cd testrepo &&
git config remote.up.url .. &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'fetch with insteadOf' '
mk_empty &&
(
TRASH=$(pwd)/ &&
cd testrepo &&
git config "url.$TRASH.insteadOf" trash/ &&
git config remote.up.url trash/. &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'fetch with pushInsteadOf (should not rewrite)' '
mk_empty &&
(
TRASH=$(pwd)/ &&
cd testrepo &&
git config "url.trash/.pushInsteadOf" "$TRASH" &&
git config remote.up.url "$TRASH." &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push without wildcard' '
mk_empty &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push with wildcard' '
mk_empty &&
git push testrepo "refs/heads/*:refs/remotes/origin/*" &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push with insteadOf' '
mk_empty &&
TRASH="$(pwd)/" &&
git config "url.$TRASH.insteadOf" trash/ &&
git push trash/testrepo refs/heads/master:refs/remotes/origin/master &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push with pushInsteadOf' '
mk_empty &&
TRASH="$(pwd)/" &&
git config "url.$TRASH.pushInsteadOf" trash/ &&
git push trash/testrepo refs/heads/master:refs/remotes/origin/master &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push with pushInsteadOf and explicit pushurl (pushInsteadOf should not rewrite)' '
mk_empty &&
TRASH="$(pwd)/" &&
git config "url.trash2/.pushInsteadOf" trash/ &&
git config remote.r.url trash/wrong &&
git config remote.r.pushurl "$TRASH/testrepo" &&
git push r refs/heads/master:refs/remotes/origin/master &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
)
'
test_expect_success 'push with matching heads' '
mk_test heads/master &&
git push testrepo &&
check_push_result $the_commit heads/master
'
test_expect_success 'push with matching heads on the command line' '
mk_test heads/master &&
git push testrepo : &&
check_push_result $the_commit heads/master
'
test_expect_success 'failed (non-fast-forward) push with matching heads' '
mk_test heads/master &&
git push testrepo : &&
git commit --amend -massaged &&
test_must_fail git push testrepo &&
check_push_result $the_commit heads/master &&
git reset --hard $the_commit
'
test_expect_success 'push --force with matching heads' '
mk_test heads/master &&
git push testrepo : &&
git commit --amend -massaged &&
git push --force testrepo &&
! check_push_result $the_commit heads/master &&
git reset --hard $the_commit
'
test_expect_success 'push with matching heads and forced update' '
mk_test heads/master &&
git push testrepo : &&
git commit --amend -massaged &&
git push testrepo +: &&
! check_push_result $the_commit heads/master &&
git reset --hard $the_commit
'
test_expect_success 'push with no ambiguity (1)' '
mk_test heads/master &&
git push testrepo master:master &&
check_push_result $the_commit heads/master
'
test_expect_success 'push with no ambiguity (2)' '
mk_test remotes/origin/master &&
git push testrepo master:origin/master &&
check_push_result $the_commit remotes/origin/master
'
test_expect_success 'push with colon-less refspec, no ambiguity' '
mk_test heads/master heads/t/master &&
git branch -f t/master master &&
git push testrepo master &&
check_push_result $the_commit heads/master &&
check_push_result $the_first_commit heads/t/master
'
test_expect_success 'push with weak ambiguity (1)' '
mk_test heads/master remotes/origin/master &&
git push testrepo master:master &&
check_push_result $the_commit heads/master &&
check_push_result $the_first_commit remotes/origin/master
'
test_expect_success 'push with weak ambiguity (2)' '
mk_test heads/master remotes/origin/master remotes/another/master &&
git push testrepo master:master &&
check_push_result $the_commit heads/master &&
check_push_result $the_first_commit remotes/origin/master remotes/another/master
'
test_expect_success 'push with ambiguity' '
mk_test heads/frotz tags/frotz &&
if git push testrepo master:frotz
then
echo "Oops, should have failed"
false
else
check_push_result $the_first_commit heads/frotz tags/frotz
fi
'
test_expect_success 'push with colon-less refspec (1)' '
mk_test heads/frotz tags/frotz &&
git branch -f frotz master &&
git push testrepo frotz &&
check_push_result $the_commit heads/frotz &&
check_push_result $the_first_commit tags/frotz
'
test_expect_success 'push with colon-less refspec (2)' '
mk_test heads/frotz tags/frotz &&
if git show-ref --verify -q refs/heads/frotz
then
git branch -D frotz
fi &&
git tag -f frotz &&
git push testrepo frotz &&
check_push_result $the_commit tags/frotz &&
check_push_result $the_first_commit heads/frotz
'
test_expect_success 'push with colon-less refspec (3)' '
mk_test &&
if git show-ref --verify -q refs/tags/frotz
then
git tag -d frotz
fi &&
git branch -f frotz master &&
git push testrepo frotz &&
check_push_result $the_commit heads/frotz &&
test 1 = $( cd testrepo && git show-ref | wc -l )
'
test_expect_success 'push with colon-less refspec (4)' '
mk_test &&
if git show-ref --verify -q refs/heads/frotz
then
git branch -D frotz
fi &&
git tag -f frotz &&
git push testrepo frotz &&
check_push_result $the_commit tags/frotz &&
test 1 = $( cd testrepo && git show-ref | wc -l )
'
test_expect_success 'push head with non-existant, incomplete dest' '
mk_test &&
git push testrepo master:branch &&
check_push_result $the_commit heads/branch
'
test_expect_success 'push tag with non-existant, incomplete dest' '
mk_test &&
git tag -f v1.0 &&
git push testrepo v1.0:tag &&
check_push_result $the_commit tags/tag
'
test_expect_success 'push sha1 with non-existant, incomplete dest' '
mk_test &&
test_must_fail git push testrepo `git rev-parse master`:foo
'
test_expect_success 'push ref expression with non-existant, incomplete dest' '
mk_test &&
test_must_fail git push testrepo master^:branch
'
test_expect_success 'push with HEAD' '
mk_test heads/master &&
git checkout master &&
git push testrepo HEAD &&
check_push_result $the_commit heads/master
'
test_expect_success 'push with HEAD nonexisting at remote' '
mk_test heads/master &&
git checkout -b local master &&
git push testrepo HEAD &&
check_push_result $the_commit heads/local
'
test_expect_success 'push with +HEAD' '
mk_test heads/master &&
git checkout master &&
git branch -D local &&
git checkout -b local &&
git push testrepo master local &&
check_push_result $the_commit heads/master &&
check_push_result $the_commit heads/local &&
# Without force rewinding should fail
git reset --hard HEAD^ &&
test_must_fail git push testrepo HEAD &&
check_push_result $the_commit heads/local &&
# With force rewinding should succeed
git push testrepo +HEAD &&
check_push_result $the_first_commit heads/local
'
test_expect_success 'push HEAD with non-existant, incomplete dest' '
mk_test &&
git checkout master &&
git push testrepo HEAD:branch &&
check_push_result $the_commit heads/branch
'
test_expect_success 'push with config remote.*.push = HEAD' '
mk_test heads/local &&
git checkout master &&
git branch -f local $the_commit &&
(
cd testrepo &&
git checkout local &&
git reset --hard $the_first_commit
) &&
git config remote.there.url testrepo &&
git config remote.there.push HEAD &&
git config branch.master.remote there &&
git push &&
check_push_result $the_commit heads/master &&
check_push_result $the_first_commit heads/local
'
# clean up the cruft left with the previous one
git config --remove-section remote.there
git config --remove-section branch.master
test_expect_success 'push with config remote.*.pushurl' '
mk_test heads/master &&
git checkout master &&
git config remote.there.url test2repo &&
git config remote.there.pushurl testrepo &&
git push there &&
check_push_result $the_commit heads/master
'
# clean up the cruft left with the previous one
git config --remove-section remote.there
test_expect_success 'push with dry-run' '
mk_test heads/master &&
(
cd testrepo &&
old_commit=$(git show-ref -s --verify refs/heads/master)
) &&
git push --dry-run testrepo &&
check_push_result $old_commit heads/master
'
test_expect_success 'push updates local refs' '
mk_test heads/master &&
mk_child child &&
(
cd child &&
git pull .. master &&
git push &&
test $(git rev-parse master) = \
$(git rev-parse remotes/origin/master)
)
'
test_expect_success 'push updates up-to-date local refs' '
mk_test heads/master &&
mk_child child1 &&
mk_child child2 &&
(cd child1 && git pull .. master && git push) &&
(
cd child2 &&
git pull ../child1 master &&
git push &&
test $(git rev-parse master) = \
$(git rev-parse remotes/origin/master)
)
'
test_expect_success 'push preserves up-to-date packed refs' '
mk_test heads/master &&
mk_child child &&
(
cd child &&
git push &&
! test -f .git/refs/remotes/origin/master
)
'
test_expect_success 'push does not update local refs on failure' '
mk_test heads/master &&
mk_child child &&
mkdir testrepo/.git/hooks &&
echo "#!/no/frobnication/today" >testrepo/.git/hooks/pre-receive &&
chmod +x testrepo/.git/hooks/pre-receive &&
(
cd child &&
git pull .. master
test_must_fail git push &&
test $(git rev-parse master) != \
$(git rev-parse remotes/origin/master)
)
'
test_expect_success 'allow deleting an invalid remote ref' '
mk_test heads/master &&
rm -f testrepo/.git/objects/??/* &&
git push testrepo :refs/heads/master &&
(cd testrepo && test_must_fail git rev-parse --verify refs/heads/master)
'
test_expect_success 'allow deleting a ref using --delete' '
mk_test heads/master &&
(cd testrepo && git config receive.denyDeleteCurrent warn) &&
git push testrepo --delete master &&
(cd testrepo && test_must_fail git rev-parse --verify refs/heads/master)
'
test_expect_success 'allow deleting a tag using --delete' '
mk_test heads/master &&
git tag -a -m dummy_message deltag heads/master &&
git push testrepo --tags &&
(cd testrepo && git rev-parse --verify -q refs/tags/deltag) &&
git push testrepo --delete tag deltag &&
(cd testrepo && test_must_fail git rev-parse --verify refs/tags/deltag)
'
test_expect_success 'push --delete without args aborts' '
mk_test heads/master &&
test_must_fail git push testrepo --delete
'
test_expect_success 'push --delete refuses src:dest refspecs' '
mk_test heads/master &&
test_must_fail git push testrepo --delete master:foo
'
test_expect_success 'warn on push to HEAD of non-bare repository' '
mk_test heads/master
(
cd testrepo &&
git checkout master &&
git config receive.denyCurrentBranch warn
) &&
git push testrepo master 2>stderr &&
grep "warning: updating the current branch" stderr
'
test_expect_success 'deny push to HEAD of non-bare repository' '
mk_test heads/master
(
cd testrepo &&
git checkout master &&
git config receive.denyCurrentBranch true
) &&
test_must_fail git push testrepo master
'
test_expect_success 'allow push to HEAD of bare repository (bare)' '
mk_test heads/master
(
cd testrepo &&
git checkout master &&
git config receive.denyCurrentBranch true &&
git config core.bare true
) &&
git push testrepo master 2>stderr &&
! grep "warning: updating the current branch" stderr
'
test_expect_success 'allow push to HEAD of non-bare repository (config)' '
mk_test heads/master
(
cd testrepo &&
git checkout master &&
git config receive.denyCurrentBranch false
) &&
git push testrepo master 2>stderr &&
! grep "warning: updating the current branch" stderr
'
test_expect_success 'fetch with branches' '
mk_empty &&
git branch second $the_first_commit &&
git checkout second &&
echo ".." > testrepo/.git/branches/branch1 &&
(
cd testrepo &&
git fetch branch1 &&
r=$(git show-ref -s --verify refs/heads/branch1) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/heads | wc -l)
) &&
git checkout master
'
test_expect_success 'fetch with branches containing #' '
mk_empty &&
echo "..#second" > testrepo/.git/branches/branch2 &&
(
cd testrepo &&
git fetch branch2 &&
r=$(git show-ref -s --verify refs/heads/branch2) &&
test "z$r" = "z$the_first_commit" &&
test 1 = $(git for-each-ref refs/heads | wc -l)
) &&
git checkout master
'
test_expect_success 'push with branches' '
mk_empty &&
git checkout second &&
echo "testrepo" > .git/branches/branch1 &&
git push branch1 &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/heads/master) &&
test "z$r" = "z$the_first_commit" &&
test 1 = $(git for-each-ref refs/heads | wc -l)
)
'
test_expect_success 'push with branches containing #' '
mk_empty &&
echo "testrepo#branch3" > .git/branches/branch2 &&
git push branch2 &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/heads/branch3) &&
test "z$r" = "z$the_first_commit" &&
test 1 = $(git for-each-ref refs/heads | wc -l)
) &&
git checkout master
'
test_expect_success 'push into aliased refs (consistent)' '
mk_test heads/master &&
mk_child child1 &&
mk_child child2 &&
(
cd child1 &&
git branch foo &&
git symbolic-ref refs/heads/bar refs/heads/foo
git config receive.denyCurrentBranch false
) &&
(
cd child2 &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m child2 &&
git branch foo &&
git branch bar &&
git push ../child1 foo bar
)
'
test_expect_success 'push into aliased refs (inconsistent)' '
mk_test heads/master &&
mk_child child1 &&
mk_child child2 &&
(
cd child1 &&
git branch foo &&
git symbolic-ref refs/heads/bar refs/heads/foo
git config receive.denyCurrentBranch false
) &&
(
cd child2 &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m child2 &&
git branch foo &&
>path3 &&
git add path3 &&
test_tick &&
git commit -a -m child2 &&
git branch bar &&
test_must_fail git push ../child1 foo bar 2>stderr &&
grep "refusing inconsistent update" stderr
)
'
test_expect_success 'push --porcelain' '
mk_empty &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "* refs/heads/master:refs/remotes/origin/master [new branch]" &&
echo >>.git/foo "Done" &&
git push >.git/bar --porcelain testrepo refs/heads/master:refs/remotes/origin/master &&
(
cd testrepo &&
r=$(git show-ref -s --verify refs/remotes/origin/master) &&
test "z$r" = "z$the_commit" &&
test 1 = $(git for-each-ref refs/remotes/origin | wc -l)
) &&
test_cmp .git/foo .git/bar
'
test_expect_success 'push --porcelain bad url' '
mk_empty &&
test_must_fail git push >.git/bar --porcelain asdfasdfasd refs/heads/master:refs/remotes/origin/master &&
test_must_fail grep -q Done .git/bar
'
test_expect_success 'push --porcelain rejected' '
mk_empty &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
git reset --hard origin/master^
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "! refs/heads/master:refs/heads/master [remote rejected] (branch is currently checked out)" &&
test_must_fail git push >.git/bar --porcelain testrepo refs/heads/master:refs/heads/master &&
test_cmp .git/foo .git/bar
'
test_expect_success 'push --porcelain --dry-run rejected' '
mk_empty &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
git reset --hard origin/master
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "! refs/heads/master^:refs/heads/master [rejected] (non-fast-forward)" &&
echo >>.git/foo "Done" &&
test_must_fail git push >.git/bar --porcelain --dry-run testrepo refs/heads/master^:refs/heads/master &&
test_cmp .git/foo .git/bar
'
test_done
|
vidarh/Git
|
t/t5516-fetch-push.sh
|
Shell
|
gpl-2.0
| 18,805 |
#!/bin/bash
set -o nounset
set -o errexit
cd "$(dirname "$0")"
mkdir -p $PWD/../data/
cp $BIN_DIR/trikSharp/Trik.Core.dll $PWD/../data/
|
iakov/qreal
|
installer/packages/trik-studio/ru.qreal.root.trik.f_sharp/meta/prebuild-common.sh
|
Shell
|
apache-2.0
| 176 |
#!/bin/bash
set -e
# Bash substitution to remove everything before '='
# and only keep what is after
function extract_param {
echo "${1##*=}"
}
for option in $(comma_to_space "${DEBUG}"); do
case $option in
verbose)
log "VERBOSE: activating bash debugging mode."
log "To run Ceph daemons in debugging mode, pass the CEPH_ARGS variable like this:"
log "-e CEPH_ARGS='--debug-ms 1 --debug-osd 10'"
export PS4='+${BASH_SOURCE}:${LINENO}: ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
set -x
;;
fstree*)
log "FSTREE: uncompressing content of $(extract_param "$option")"
# NOTE (leseb): the entrypoint should already be running from /
# This is just a safeguard
pushd / > /dev/null
# Downloading patched filesystem
curl --silent --output patch.tar -L "$(extract_param "$option")"
# If the file isn't present, let's stop here
[ -f patch.tar ]
# Let's find out if the tarball has the / in a sub-directory
strip_level=0
for sub_level in $(seq 2 -1 0); do
set +e
if tar -tf patch.tar | cut -d "/" -f $((sub_level+1)) | grep -sqwE "bin|etc|lib|lib64|opt|run|usr|sbin|var"; then
strip_level=$sub_level
fi
set -e
done
log "The main directory is at level $strip_level"
log ""
log "SHA1 of the archive is: $(sha1sum patch.tar)"
log ""
log "Now, we print the SHA1 of each file."
for f in $(tar xfpv patch.tar --show-transformed-names --strip="$strip_level"); do
if [[ ! -d $f ]]; then
sha1sum "$f"
fi
done
rm -f patch.tar
popd > /dev/null
;;
stayalive)
log "STAYALIVE: container will not die if a command fails."
source docker_exec.sh
;;
*)
log "$option is not a valid debug option."
log "Available options are: verbose, fstree and stayalive."
log "They can be used altogether like this: '-e DEBUG=verbose,fstree=http://myfstree,stayalive'"
log ""
log "To run Ceph daemons in debugging mode, pass the CEPH_ARGS variable like this:"
log "-e CEPH_ARGS='--debug-ms 1 --debug-osd 10'"
exit 1
;;
esac
done
|
rootfs/ceph-docker
|
ceph-releases/luminous/ubuntu/16.04/daemon/debug.sh
|
Shell
|
apache-2.0
| 2,205 |
#!/bin/bash
PP="problem.par"
PPRT=${PP}".restart_test"
OUTB="moving_pulse_ts1_"
OUT2=${OUTB}"0002.h5"
OUT1=${OUTB}"0001.h5"
OUT2_1=${OUTB}"1002.h5"
OUT1_1=${OUTB}"1001.h5"
if [ ! -e $PPRT ] ; then
echo "Cannot find $PPRT. Trying to find one up to ../../."
find ../../ -name $PPRT
echo "You must manually copy an appropriate $PPRT to ."
exit 1
fi
PRG="./piernik"
if [ ! -x $PRG ] ; then
echo "Cannot find Piernik executable"
exit 2
fi
rm *.res 2> /dev/null
cp $PPRT $PP
$PRG
cp $OUT1 $OUT1_1
sed '/tend/s/.*/ tend = 2.0/' $PPRT > $PP
$PRG
cp $OUT2 $OUT2_1
rm *.res
cp $PPRT $PP
mpirun -np 5 $PRG
echo "Comparing $OUT1 and $OUT1_1"
h5diff $OUT1 $OUT1_1
sed '/tend/s/.*/ tend = 2.0/' $PPRT > $PP
mpirun -np 9 $PRG
echo "Comparing $OUT2 and $OUT2_1"
h5diff $OUT2 $OUT2_1
#rm *.res moving_pulse_ts1_????.h5
|
askalbania/piernik
|
problems/advection_test/restart_test.sh
|
Shell
|
gpl-3.0
| 837 |
#!/usr/bin/env bash
set -e
set -o nounset
if [ $# -ne 2 ]; then
echo "Usage: $0 <hidden size> <num threads>"
exit 1
fi
basedir=$(dirname $0)/benchmarks
data=$basedir/simple-examples/data
hidden_size=$1
threads=$2
taskset_cmd="taskset -c $(seq -s, 0 $(( $threads - 1 )))"
mkdir -p $basedir
if ! type svn ; then
echo "svn is required"
exit 1
fi
function fat_echo() {
echo "############################################"
echo "########## $1"
}
function wget_or_curl() {
[ $# -eq 2 ] || { echo "Usage: wget_or_curl <url> <fpath>" && exit 1; }
if type wget &> /dev/null; then
local download_cmd="wget -T 10 -t 3 -O"
else
local download_cmd="curl -L -o"
fi
$download_cmd "$2" "$1"
}
function run_test() {
time $taskset_cmd $1 -rnnlm $basedir/models/$2 -train $data/ptb.train.txt -valid $data/ptb.valid.txt -hidden $hidden_size -threads $threads ${3:-}
$1 -rnnlm $basedir/models/$2 -test $data/ptb.test.txt -nce-accurate-test 1 2>&1 > /dev/null | grep "Test entropy" | cat
}
fat_echo "Downloading Penn Tree Bank corpora"
if [ ! -d "$basedir/simple-examples" ]; then
pushd $basedir > /dev/null
wget_or_curl http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz simple-examples.tgz
tar -xf simple-examples.tgz
rm simple-examples.tgz
popd > /dev/null
fi
fat_echo "Downloading and building C-RNNLM from rnnlm.org"
if [ ! -f "$basedir/crnnlm/rnnlm" ]; then
rm -rf $basedir/crnnlm
mkdir -p $basedir/crnnlm
(
cd $basedir/crnnlm
wget_or_curl https://f25ea9ccb7d3346ce6891573d543960492b92c30.googledrive.com/host/0ByxdPXuxLPS5RFM5dVNvWVhTd0U/rnnlm-0.4b.tgz rnnlm-0.4b.tgz
tar -xf rnnlm-0.4b.tgz
cd rnnlm-0.4b
sed -i -- 's/x86_64-linux-g++-4.6/g++/g' makefile
make
cd ..
ln -s rnnlm-0.4b/rnnlm
)
fi
fat_echo "Downloading and building RNNLM-HS from kaldi svn"
if [ ! -f "$basedir/rnnlm-hs-0.1b/rnnlm" ]; then
mkdir -p $basedir/rnnlm-hs-0.1b
(
cd $basedir/rnnlm-hs-0.1b
svn checkout https://svn.code.sf.net/p/kaldi/code/trunk/tools/rnnlm-hs-0.1b/ .
make
)
fi
fat_echo "Building Faster-RNNLM"
$(dirname $0)/build.sh
rm -rf $basedir/models
mkdir -p $basedir/models
fat_echo "Training Faster RNNLM on ptb"
run_test $(dirname $0)/faster-rnnlm/rnnlm fasterrnnlm
fat_echo "Training Faster RNNLM on ptb (NCE mode)"
run_test $(dirname $0)/faster-rnnlm/rnnlm fasterrnnlm-nce "-nce 15"
fat_echo "Training RNNLM-HS on ptb"
run_test $basedir/rnnlm-hs-0.1b/rnnlm rnnlm-hs
fat_echo "Training C-RNNLM on ptb"
run_test $basedir/crnnlm/rnnlm crnnlm
$basedir/crnnlm/rnnlm -rnnlm $basedir/models/crnnlm -test $data/ptb.test.txt 2>&1 | awk '$0 ~ /PPL/ {print "Test entropy", log($3) / log(2)}'
|
yandex/faster-rnnlm
|
run_benchmark.sh
|
Shell
|
apache-2.0
| 2,765 |
#!/bin/bash
# script to be used for Gradle performance test bisecting
# example usage:
# git bisect start HEAD REL_2.14 -- # HEAD=bad REL_2.14=good
# git bisect run check_rev.sh JavaConfigurationPerformanceTest lotDependencies
TESTNAME=${1:-IdeIntegrationPerformanceTest}
TESTPROJECT=${2:-multi}
./gradlew clean
[ -d ~/.gradle-bisect-override ] && cp -Rdvp ~/.gradle-bisect-override/* .
[ -x ~/.gradle-bisect-override-script ] && ~/.gradle-bisect-override-script $TESTNAME $TESTPROJECT
./gradlew -S -PtimestampedVersion -x :performance:prepareSamples :performance:$TESTPROJECT :performance:cleanPerformanceTest :performance:performanceTest -D:performance:performanceTest.single=$TESTNAME
result=$?
hash=$(git rev-parse HEAD | colrm 9)
datets=$(date +%Y-%m-%d-%H:%M:%S)
[ -d ~/.gradle-bisect-results ] || mkdir ~/.gradle-bisect-results
cp subprojects/performance/build/test-results/performanceTest/TEST-org.gradle.performance.$TESTNAME.xml ~/.gradle-bisect-results/result_${result}_${hash}_${datets}.xml
git reset --hard
exit $result
|
gstevey/gradle
|
subprojects/performance/docs/check-rev.sh
|
Shell
|
apache-2.0
| 1,034 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script bootstraps building a Bazel binary without Bazel then
# use this compiled Bazel to bootstrap Bazel itself. It can also
# be provided with a previous version of Bazel to bootstrap Bazel
# itself.
# The resulting binary can be found at output/bazel.
set -o errexit
cd "$(dirname "$0")"
source scripts/bootstrap/buildenv.sh
function usage() {
[ -n "${1:-build}" ] && echo "Invalid command(s): $1" >&2
echo "syntax: $0 [command[,command]* [BAZEL_BIN [BAZEL_SUM]]]" >&2
echo " General purpose commands:" >&2
echo " build = compile,tools,init (default)" >&2
echo " compile = compile a Bazel binary for usage" >&2
echo " tools = compile and install tooling for Bazel" >&2
echo " init = initialize the base workspace" >&2
echo " Commands for developers:" >&2
echo " all = build,determinism,test" >&2
echo " determinism = test for stability of Bazel builds" >&2
echo " test = run the full test suite of Bazel" >&2
exit 1
}
function parse_options() {
local keywords="(build|compile|tools|init|all|determinism|bootstrap|test)"
COMMANDS="${1:-build}"
[[ "${COMMANDS}" =~ ^$keywords(,$keywords)*$ ]] || usage "$@"
DO_COMPILE=
DO_TOOLS_COMPILATION=
DO_CHECKSUM=
DO_FULL_CHECKSUM=1
DO_TESTS=
DO_BASE_WORKSPACE_INIT=
[[ "${COMMANDS}" =~ (compile|build|all) ]] && DO_COMPILE=1
[[ "${COMMANDS}" =~ (tools|build|all) ]] && DO_TOOLS_COMPILATION=1
[[ "${COMMANDS}" =~ (init|build|all) ]] && DO_BASE_WORKSPACE_INIT=1
[[ "${COMMANDS}" =~ (bootstrap|determinism|all) ]] && DO_CHECKSUM=1
[[ "${COMMANDS}" =~ (bootstrap) ]] && DO_FULL_CHECKSUM=
[[ "${COMMANDS}" =~ (test|all) ]] && DO_TESTS=1
BAZEL_BIN=${2:-"bazel-bin/src/bazel"}
BAZEL_SUM=${3:-"x"}
}
parse_options "${@}"
mkdir -p output
: ${BAZEL:=${2-}}
#
# Create an initial binary so we can host ourself
#
if [ ! -x "${BAZEL}" ]; then
display "$INFO You can skip this first step by providing a path to the bazel binary as second argument:"
display "$INFO $0 ${COMMANDS} /path/to/bazel"
new_step 'Building Bazel from scratch'
source scripts/bootstrap/compile.sh
cp ${OUTPUT_DIR}/bazel output/bazel
BAZEL=$(pwd)/output/bazel
fi
#
# Bootstrap bazel using the previous bazel binary = release binary
#
if [ "${EMBED_LABEL-x}" = "x" ]; then
# Add a default label when unspecified
git_sha1=$(git_sha1)
EMBED_LABEL="head (@${git_sha1:-non-git})"
fi
source scripts/bootstrap/bootstrap.sh
if [ $DO_COMPILE ]; then
new_step 'Building Bazel with Bazel'
display "."
bazel_bootstrap //src:bazel output/bazel 0755 1
BAZEL=$(pwd)/output/bazel
fi
#
# Bootstrap tools using the release binary
#
if [ $DO_TOOLS_COMPILATION ]; then
new_step 'Building Bazel tools'
bazel_bootstrap //third_party/ijar:ijar tools/jdk/ijar 0755
bazel_bootstrap //src/java_tools/singlejar:SingleJar_deploy.jar \
tools/jdk/SingleJar_deploy.jar
bazel_bootstrap //src/java_tools/buildjar:JavaBuilder_deploy.jar \
tools/jdk/JavaBuilder_deploy.jar
bazel_bootstrap //src/java_tools/buildjar/java/com/google/devtools/build/buildjar/genclass:GenClass_deploy.jar \
tools/jdk/GenClass_deploy.jar
if [[ $PLATFORM == "darwin" ]]; then
bazel_bootstrap //src/tools/xcode-common/java/com/google/devtools/build/xcode/actoolzip:actoolzip_deploy.jar \
tools/objc/precomp_actoolzip_deploy.jar
bazel_bootstrap //src/tools/xcode/ibtoolwrapper:ibtoolwrapper tools/objc/ibtoolwrapper.sh 0755
bazel_bootstrap //src/tools/xcode-common/java/com/google/devtools/build/xcode/swiftstdlibtoolzip:swiftstdlibtoolzip_deploy.jar \
tools/objc/precomp_swiftstdlibtoolzip_deploy.jar
bazel_bootstrap //src/objc_tools/momczip:momczip_deploy.jar \
tools/objc/precomp_momczip_deploy.jar
bazel_bootstrap //src/objc_tools/bundlemerge:bundlemerge_deploy.jar \
tools/objc/precomp_bundlemerge_deploy.jar
bazel_bootstrap //src/objc_tools/plmerge:plmerge_deploy.jar \
tools/objc/precomp_plmerge_deploy.jar
bazel_bootstrap //src/objc_tools/xcodegen:xcodegen_deploy.jar \
tools/objc/precomp_xcodegen_deploy.jar
if xcodebuild -showsdks 2> /dev/null | grep -q '\-sdk iphonesimulator'; then
bazel_bootstrap //src/tools/xcode/stdredirect:StdRedirect.dylib \
tools/objc/StdRedirect.dylib 0755
fi
bazel_bootstrap //src/tools/xcode/realpath:realpath tools/objc/realpath 0755
fi
fi
#
# Output is deterministic between two bootstrapped bazel binary using the actual tools and the
# released binary.
#
if [ $DO_CHECKSUM ]; then
new_step "Determinism test"
if [ ! -f ${BAZEL_SUM:-x} ]; then
BAZEL_SUM=bazel-out/bazel_checksum
log "First build"
bootstrap_test ${BAZEL} ${BAZEL_SUM}
else
BOOTSTRAP=${BAZEL}
fi
if [ "${BAZEL_SUM}" != "${OUTPUT_DIR}/bazel_checksum" ]; then
cp ${BAZEL_SUM} ${OUTPUT_DIR}/bazel_checksum
fi
if [ $DO_FULL_CHECKSUM ]; then
log "Second build"
bootstrap_test ${BOOTSTRAP} bazel-out/bazel_checksum
log "Comparing output"
(diff -U 0 ${OUTPUT_DIR}/bazel_checksum bazel-out/bazel_checksum >&2) \
|| fail "Differences detected in outputs!"
fi
fi
#
# Tests
#
if [ $DO_TESTS ]; then
new_step "Running tests"
display "."
ndk_target="$(get_bind_target //external:android_ndk_for_testing)"
sdk_target="$(get_bind_target //external:android_sdk_for_testing)"
if [ "$ndk_target" = "//:dummy" -o "$sdk_target" = "//:dummy" ]; then
display "$WARNING Android SDK or NDK are not set in the WORKSPACE file. Android tests will not be run."
fi
[ -n "$JAVAC_VERSION" ] || get_java_version
if [[ ! "${BAZEL_TEST_FILTERS-}" =~ "-jdk8" ]] \
&& [ "8" -gt ${JAVAC_VERSION#*.} ]; then
display "$WARNING Your version of Java is lower than 1.8!"
display "$WARNING Deactivating Java 8 tests, please use a JDK 8 to fully"
display "$WARNING test Bazel."
if [ -n "${BAZEL_TEST_FILTERS-}" ]; then
BAZEL_TEST_FILTERS="${BAZEL_TEST_FILTERS},-jdk8"
else
BAZEL_TEST_FILTERS="-jdk8"
fi
fi
$BAZEL --bazelrc=${BAZELRC} --nomaster_bazelrc test \
--test_tag_filters="${BAZEL_TEST_FILTERS-}" \
--build_tests_only \
--javacopt="-source ${JAVA_VERSION} -target ${JAVA_VERSION}" \
-k --test_output=errors //src/... //third_party/ijar/... //scripts/... \
|| fail "Tests failed"
fi
#
# Setup the base workspace
#
if [ $DO_BASE_WORKSPACE_INIT ]; then
new_step 'Setting up base workspace'
display "."
source scripts/bootstrap/init_workspace.sh
fi
clear_log
display "Build successful! Binary is here: ${BAZEL}"
|
rhuss/bazel
|
compile.sh
|
Shell
|
apache-2.0
| 7,209 |
#!/bin/sh
################################################################################
##
## Licensed to the Apache Software Foundation (ASF) under one or more
## contributor license agreements. See the NOTICE file distributed with
## this work for additional information regarding copyright ownership.
## The ASF licenses this file to You under the Apache License, Version 2.0
## (the "License"); you may not use this file except in compliance with
## the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
################################################################################
cd ../SWFs/assets
echo "Removing previously compiled files..."
rm -f 'find . -name "*.swf"'
echo "Compiling sub application SWFs..."
$SDK_DIR/bin/mxmlc -source-path=$MUSTELLA_DIR/as3/src/mustella -includes=UnitTester pua.mxml
chmod 777 pua.swf
|
SlavaRa/flex-sdk
|
mustella/tests/gumbo/components/PopUpAnchor/Properties/PopUpAnchor_Properties_MP.sh
|
Shell
|
apache-2.0
| 1,231 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will source the default skeleton helper functions, then sources
# cluster/${KUBERNETES_PROVIDER}/util.sh where KUBERNETES_PROVIDER, if unset,
# will use its default value (gce).
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/skeleton/util.sh"
KUBERNETES_PROVIDER="${KUBERNETES_PROVIDER:-gce}"
PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
if [ -f ${PROVIDER_UTILS} ]; then
source "${PROVIDER_UTILS}"
fi
# Federation utils
# Should NOT be called within the global scope, unless setting the desired global zone vars
# This function is currently NOT USED in the global scope
function set-federation-zone-vars {
zone="$1"
export OVERRIDE_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone"
echo "Setting zone vars to: $OVERRIDE_CONTEXT"
if [[ "$KUBERNETES_PROVIDER" == "gce" ]];then
export KUBE_GCE_ZONE="$zone"
# gcloud has a 61 character limit, and for firewall rules this
# prefix gets appended to itself, with some extra information
# need tot keep it short
export KUBE_GCE_INSTANCE_PREFIX="${USER}-${zone}"
elif [[ "$KUBERNETES_PROVIDER" == "gke" ]];then
export CLUSTER_NAME="${USER}-${zone}"
elif [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
export KUBE_AWS_ZONE="$zone"
export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}"
else
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported"
exit 1
fi
}
|
ajohnstone/kubernetes
|
cluster/kube-util.sh
|
Shell
|
apache-2.0
| 2,040 |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Due to the GCE custom metadata size limit, we split the entire script into two
# files configure.sh and configure-helper.sh. The functionality of downloading
# kubernetes configuration, manifests, docker images, and binary files are
# put in configure.sh, which is uploaded via GCE custom metadata.
set -o errexit
set -o nounset
set -o pipefail
### Hardcoded constants
DEFAULT_CNI_VERSION="v0.7.5"
DEFAULT_CNI_SHA1="52e9d2de8a5f927307d9397308735658ee44ab8d"
DEFAULT_NPD_VERSION="v0.6.0"
DEFAULT_NPD_SHA1="a28e960a21bb74bc0ae09c267b6a340f30e5b3a6"
DEFAULT_CRICTL_VERSION="v1.12.0"
DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f"
DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571"
###
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
function set-broken-motd {
cat > /etc/motd <<EOF
Broken (or in progress) Kubernetes node setup! Check the cluster initialization status
using the following commands.
Master instance:
- sudo systemctl status kube-master-installation
- sudo systemctl status kube-master-configuration
Node instance:
- sudo systemctl status kube-node-installation
- sudo systemctl status kube-node-configuration
EOF
}
function download-kube-env {
# Fetch kube-env from GCE metadata server.
(
umask 077
local -r tmp_kube_env="/tmp/kube-env.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_env}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_env}" > "${KUBE_HOME}/kube-env")
rm -f "${tmp_kube_env}"
)
}
function download-kubelet-config {
local -r dest="$1"
echo "Downloading Kubelet config file, if it exists"
# Fetch kubelet config file from GCE metadata server.
(
umask 077
local -r tmp_kubelet_config="/tmp/kubelet-config.yaml"
if curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kubelet_config}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kubelet-config; then
# only write to the final location if curl succeeds
mv "${tmp_kubelet_config}" "${dest}"
elif [[ "${REQUIRE_METADATA_KUBELET_CONFIG_FILE:-false}" == "true" ]]; then
echo "== Failed to download required Kubelet config file from metadata server =="
exit 1
fi
)
}
function download-kube-master-certs {
# Fetch kube-env from GCE metadata server.
(
umask 077
local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml"
curl --fail --retry 5 --retry-delay 3 ${CURL_RETRY_CONNREFUSED} --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_kube_master_certs}" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")
rm -f "${tmp_kube_master_certs}"
)
}
function validate-hash {
local -r file="$1"
local -r expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Get default service account credentials of the VM.
GCE_METADATA_INTERNAL="http://metadata.google.internal/computeMetadata/v1/instance"
function get-credentials {
curl "${GCE_METADATA_INTERNAL}/service-accounts/default/token" -H "Metadata-Flavor: Google" -s | python -c \
'import sys; import json; print(json.loads(sys.stdin.read())["access_token"])'
}
function valid-storage-scope {
curl "${GCE_METADATA_INTERNAL}/service-accounts/default/scopes" -H "Metadata-Flavor: Google" -s | grep -q "auth/devstorage"
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
function download-or-bust {
local -r hash="$1"
shift 1
local -r urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
# if the url belongs to GCS API we should use oauth2_token in the headers
local curl_headers=""
if [[ "$url" =~ ^https://storage.googleapis.com.* ]] && valid-storage-scope ; then
curl_headers="Authorization: Bearer $(get-credentials)"
fi
if ! curl ${curl_headers:+-H "${curl_headers}"} -f --ipv4 -Lo "${file}" --connect-timeout 20 --max-time 300 --retry 6 --retry-delay 10 ${CURL_RETRY_CONNREFUSED} "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
}
function is-preloaded {
local -r key=$1
local -r value=$2
grep -qs "${key},${value}" "${KUBE_HOME}/preload_info"
}
function split-commas {
echo $1 | tr "," "\n"
}
function remount-flexvolume-directory {
local -r flexvolume_plugin_dir=$1
mkdir -p $flexvolume_plugin_dir
mount --bind $flexvolume_plugin_dir $flexvolume_plugin_dir
mount -o remount,exec $flexvolume_plugin_dir
}
function install-gci-mounter-tools {
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}"
if is-preloaded "mounter" "${mounter_tar_sha}"; then
echo "mounter is preloaded."
return
fi
echo "Downloading gci mounter tools."
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}"
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
download-or-bust "${mounter_tar_sha}" "https://storage.googleapis.com/kubernetes-release/gci-mounter/mounter.tar"
cp "${KUBE_HOME}/kubernetes/server/bin/mounter" "${CONTAINERIZED_MOUNTER_HOME}/mounter"
chmod a+x "${CONTAINERIZED_MOUNTER_HOME}/mounter"
mv "${KUBE_HOME}/mounter.tar" /tmp/mounter.tar
tar xf /tmp/mounter.tar -C "${CONTAINERIZED_MOUNTER_HOME}/rootfs"
rm /tmp/mounter.tar
mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs/var/lib/kubelet"
}
# Install node problem detector binary.
function install-node-problem-detector {
if [[ -n "${NODE_PROBLEM_DETECTOR_VERSION:-}" ]]; then
local -r npd_version="${NODE_PROBLEM_DETECTOR_VERSION}"
local -r npd_sha1="${NODE_PROBLEM_DETECTOR_TAR_HASH}"
else
local -r npd_version="${DEFAULT_NPD_VERSION}"
local -r npd_sha1="${DEFAULT_NPD_SHA1}"
fi
local -r npd_tar="node-problem-detector-${npd_version}.tar.gz"
if is-preloaded "${npd_tar}" "${npd_sha1}"; then
echo "${npd_tar} is preloaded."
return
fi
echo "Downloading ${npd_tar}."
local -r npd_release_path="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-https://storage.googleapis.com/kubernetes-release}"
download-or-bust "${npd_sha1}" "${npd_release_path}/node-problem-detector/${npd_tar}"
local -r npd_dir="${KUBE_HOME}/node-problem-detector"
mkdir -p "${npd_dir}"
tar xzf "${KUBE_HOME}/${npd_tar}" -C "${npd_dir}" --overwrite
mv "${npd_dir}/bin"/* "${KUBE_BIN}"
chmod a+x "${KUBE_BIN}/node-problem-detector"
rmdir "${npd_dir}/bin"
rm -f "${KUBE_HOME}/${npd_tar}"
}
function install-cni-binaries {
local -r cni_tar="cni-plugins-amd64-${DEFAULT_CNI_VERSION}.tgz"
local -r cni_sha1="${DEFAULT_CNI_SHA1}"
if is-preloaded "${cni_tar}" "${cni_sha1}"; then
echo "${cni_tar} is preloaded."
return
fi
echo "Downloading cni binaries"
download-or-bust "${cni_sha1}" "https://storage.googleapis.com/kubernetes-release/network-plugins/${cni_tar}"
local -r cni_dir="${KUBE_HOME}/cni"
mkdir -p "${cni_dir}/bin"
tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite
mv "${cni_dir}/bin"/* "${KUBE_BIN}"
rmdir "${cni_dir}/bin"
rm -f "${KUBE_HOME}/${cni_tar}"
}
# Install crictl binary.
function install-crictl {
if [[ -n "${CRICTL_VERSION:-}" ]]; then
local -r crictl_version="${CRICTL_VERSION}"
local -r crictl_sha1="${CRICTL_TAR_HASH}"
else
local -r crictl_version="${DEFAULT_CRICTL_VERSION}"
local -r crictl_sha1="${DEFAULT_CRICTL_SHA1}"
fi
local -r crictl="crictl-${crictl_version}-linux-amd64"
# Create crictl config file.
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
EOF
if is-preloaded "${crictl}" "${crictl_sha1}"; then
echo "crictl is preloaded"
return
fi
echo "Downloading crictl"
local -r crictl_path="https://storage.googleapis.com/kubernetes-release/crictl"
download-or-bust "${crictl_sha1}" "${crictl_path}/${crictl}"
mv "${KUBE_HOME}/${crictl}" "${KUBE_BIN}/crictl"
chmod a+x "${KUBE_BIN}/crictl"
}
function install-exec-auth-plugin {
if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
return
fi
local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}"
local -r plugin_sha1="${EXEC_AUTH_PLUGIN_SHA1}"
echo "Downloading gke-exec-auth-plugin binary"
download-or-bust "${plugin_sha1}" "${plugin_url}"
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin"
chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin"
if [[ ! "${EXEC_AUTH_PLUGIN_LICENSE_URL:-}" ]]; then
return
fi
local -r license_url="${EXEC_AUTH_PLUGIN_LICENSE_URL}"
echo "Downloading gke-exec-auth-plugin license"
download-or-bust "" "${license_url}"
mv "${KUBE_HOME}/LICENSE" "${KUBE_BIN}/gke-exec-auth-plugin-license"
}
function install-kube-manifests {
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
local dst_dir="${KUBE_HOME}/kube-manifests"
mkdir -p "${dst_dir}"
local -r manifests_tar_urls=( $(split-commas "${KUBE_MANIFESTS_TAR_URL}") )
local -r manifests_tar="${manifests_tar_urls[0]##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
if is-preloaded "${manifests_tar}" "${manifests_tar_hash}"; then
echo "${manifests_tar} is preloaded."
return
fi
echo "Downloading k8s manifests tar"
download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite
local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}"
if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh"
if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then
cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/"
fi
cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh"
rm -f "${KUBE_HOME}/${manifests_tar}"
rm -f "${KUBE_HOME}/${manifests_tar}.sha1"
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
until timeout 30 ${LOAD_IMAGE_COMMAND:-docker load -i} "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries. Exit!!"
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_HOME}/kube-docker-files"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
else
try-load-docker-image "${img_dir}/kube-proxy.tar"
fi
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /home/kubernetes.
function install-kube-binary-config {
cd "${KUBE_HOME}"
local -r server_binary_tar_urls=( $(split-commas "${SERVER_BINARY_TAR_URL}") )
local -r server_binary_tar="${server_binary_tar_urls[0]##*/}"
if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then
local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha1}"
local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
if is-preloaded "${server_binary_tar}" "${server_binary_tar_hash}"; then
echo "${server_binary_tar} is preloaded."
else
echo "Downloading binary release tar"
download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}"
tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite
# Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files.
local -r src_dir="${KUBE_HOME}/kubernetes/server/bin"
local dst_dir="${KUBE_HOME}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}"
fi
load-docker-images
mv "${src_dir}/kubelet" "${KUBE_BIN}"
mv "${src_dir}/kubectl" "${KUBE_BIN}"
mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}"
mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "false" ]] && \
[[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
install-node-problem-detector
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \
[[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
install-cni-binaries
fi
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
install-kube-manifests
chmod -R 755 "${KUBE_BIN}"
# Install gci mounter related artifacts to allow mounting storage volumes in GCI
install-gci-mounter-tools
# Remount the Flexvolume directory with the "exec" option, if needed.
if [[ "${REMOUNT_VOLUME_PLUGIN_DIR:-}" == "true" && -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}"
fi
# Install crictl on each node.
install-crictl
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
# TODO(awly): include the binary and license in the OS image.
install-exec-auth-plugin
fi
# Clean up.
rm -rf "${KUBE_HOME}/kubernetes"
rm -f "${KUBE_HOME}/${server_binary_tar}"
rm -f "${KUBE_HOME}/${server_binary_tar}.sha1"
}
######### Main Function ##########
echo "Start to install kubernetes files"
# if install fails, message-of-the-day (motd) will warn at login shell
set-broken-motd
KUBE_HOME="/home/kubernetes"
KUBE_BIN="${KUBE_HOME}/bin"
# download and source kube-env
download-kube-env
source "${KUBE_HOME}/kube-env"
download-kubelet-config "${KUBE_HOME}/kubelet-config.yaml"
# master certs
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
download-kube-master-certs
fi
# binaries and kube-system manifests
install-kube-binary-config
echo "Done for installing kubernetes files"
|
Stackdriver/heapster
|
vendor/k8s.io/kubernetes/cluster/gce/gci/configure.sh
|
Shell
|
apache-2.0
| 17,480 |
#!/bin/bash
#
# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
#
# set -e
devdummy="test-dummy0"
ret=0
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
# set global exit status, but never reset nonzero one.
check_err()
{
if [ $ret -eq 0 ]; then
ret=$1
fi
}
# same but inverted -- used when command must fail for test to pass
check_fail()
{
if [ $1 -eq 0 ]; then
ret=1
fi
}
kci_add_dummy()
{
ip link add name "$devdummy" type dummy
check_err $?
ip link set "$devdummy" up
check_err $?
}
kci_del_dummy()
{
ip link del dev "$devdummy"
check_err $?
}
kci_test_netconf()
{
dev="$1"
r=$ret
ip netconf show dev "$dev" > /dev/null
check_err $?
for f in 4 6; do
ip -$f netconf show dev "$dev" > /dev/null
check_err $?
done
if [ $ret -ne 0 ] ;then
echo "FAIL: ip netconf show $dev"
test $r -eq 0 && ret=0
return 1
fi
}
# add a bridge with vlans on top
kci_test_bridge()
{
devbr="test-br0"
vlandev="testbr-vlan1"
ret=0
ip link add name "$devbr" type bridge
check_err $?
ip link set dev "$devdummy" master "$devbr"
check_err $?
ip link set "$devbr" up
check_err $?
ip link add link "$devbr" name "$vlandev" type vlan id 1
check_err $?
ip addr add dev "$vlandev" 10.200.7.23/30
check_err $?
ip -6 addr add dev "$vlandev" dead:42::1234/64
check_err $?
ip -d link > /dev/null
check_err $?
ip r s t all > /dev/null
check_err $?
for name in "$devbr" "$vlandev" "$devdummy" ; do
kci_test_netconf "$name"
done
ip -6 addr del dev "$vlandev" dead:42::1234/64
check_err $?
ip link del dev "$vlandev"
check_err $?
ip link del dev "$devbr"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: bridge setup"
return 1
fi
echo "PASS: bridge setup"
}
kci_test_gre()
{
gredev=neta
rem=10.42.42.1
loc=10.0.0.1
ret=0
ip tunnel add $gredev mode gre remote $rem local $loc ttl 1
check_err $?
ip link set $gredev up
check_err $?
ip addr add 10.23.7.10 dev $gredev
check_err $?
ip route add 10.23.8.0/30 dev $gredev
check_err $?
ip addr add dev "$devdummy" 10.23.7.11/24
check_err $?
ip link > /dev/null
check_err $?
ip addr > /dev/null
check_err $?
kci_test_netconf "$gredev"
ip addr del dev "$devdummy" 10.23.7.11/24
check_err $?
ip link del $gredev
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: gre tunnel endpoint"
return 1
fi
echo "PASS: gre tunnel endpoint"
}
# tc uses rtnetlink too, for full tc testing
# please see tools/testing/selftests/tc-testing.
kci_test_tc()
{
dev=lo
ret=0
tc qdisc add dev "$dev" root handle 1: htb
check_err $?
tc class add dev "$dev" parent 1: classid 1:10 htb rate 1mbit
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffe: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffd: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffc: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 ht ffe:2: match ip src 10.0.0.3 flowid 1:10
check_err $?
tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:2 u32 ht ffe:2: match ip src 10.0.0.2 flowid 1:10
check_err $?
tc filter show dev "$dev" parent 1:0 > /dev/null
check_err $?
tc filter del dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32
check_err $?
tc filter show dev "$dev" parent 1:0 > /dev/null
check_err $?
tc qdisc del dev "$dev" root handle 1: htb
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: tc htb hierarchy"
return 1
fi
echo "PASS: tc htb hierarchy"
}
kci_test_polrouting()
{
ret=0
ip rule add fwmark 1 lookup 100
check_err $?
ip route add local 0.0.0.0/0 dev lo table 100
check_err $?
ip r s t all > /dev/null
check_err $?
ip rule del fwmark 1 lookup 100
check_err $?
ip route del local 0.0.0.0/0 dev lo table 100
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: policy route test"
return 1
fi
echo "PASS: policy routing"
}
kci_test_route_get()
{
local hash_policy=$(sysctl -n net.ipv4.fib_multipath_hash_policy)
ret=0
ip route get 127.0.0.1 > /dev/null
check_err $?
ip route get 127.0.0.1 dev "$devdummy" > /dev/null
check_err $?
ip route get ::1 > /dev/null
check_err $?
ip route get fe80::1 dev "$devdummy" > /dev/null
check_err $?
ip route get 127.0.0.1 from 127.0.0.1 oif lo tos 0x1 mark 0x1 > /dev/null
check_err $?
ip route get ::1 from ::1 iif lo oif lo tos 0x1 mark 0x1 > /dev/null
check_err $?
ip addr add dev "$devdummy" 10.23.7.11/24
check_err $?
ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy" > /dev/null
check_err $?
ip route add 10.23.8.0/24 \
nexthop via 10.23.7.13 dev "$devdummy" \
nexthop via 10.23.7.14 dev "$devdummy"
check_err $?
sysctl -wq net.ipv4.fib_multipath_hash_policy=0
ip route get 10.23.8.11 > /dev/null
check_err $?
sysctl -wq net.ipv4.fib_multipath_hash_policy=1
ip route get 10.23.8.11 > /dev/null
check_err $?
sysctl -wq net.ipv4.fib_multipath_hash_policy="$hash_policy"
ip route del 10.23.8.0/24
check_err $?
ip addr del dev "$devdummy" 10.23.7.11/24
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: route get"
return 1
fi
echo "PASS: route get"
}
kci_test_addrlabel()
{
ret=0
ip addrlabel add prefix dead::/64 dev lo label 1
check_err $?
ip addrlabel list |grep -q "prefix dead::/64 dev lo label 1"
check_err $?
ip addrlabel del prefix dead::/64 dev lo label 1 2> /dev/null
check_err $?
ip addrlabel add prefix dead::/64 label 1 2> /dev/null
check_err $?
ip addrlabel del prefix dead::/64 label 1 2> /dev/null
check_err $?
# concurrent add/delete
for i in $(seq 1 1000); do
ip addrlabel add prefix 1c3::/64 label 12345 2>/dev/null
done &
for i in $(seq 1 1000); do
ip addrlabel del prefix 1c3::/64 label 12345 2>/dev/null
done
wait
ip addrlabel del prefix 1c3::/64 label 12345 2>/dev/null
if [ $ret -ne 0 ];then
echo "FAIL: ipv6 addrlabel"
return 1
fi
echo "PASS: ipv6 addrlabel"
}
kci_test_ifalias()
{
ret=0
namewant=$(uuidgen)
syspathname="/sys/class/net/$devdummy/ifalias"
ip link set dev "$devdummy" alias "$namewant"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: cannot set interface alias of $devdummy to $namewant"
return 1
fi
ip link show "$devdummy" | grep -q "alias $namewant"
check_err $?
if [ -r "$syspathname" ] ; then
read namehave < "$syspathname"
if [ "$namewant" != "$namehave" ]; then
echo "FAIL: did set ifalias $namewant but got $namehave"
return 1
fi
namewant=$(uuidgen)
echo "$namewant" > "$syspathname"
ip link show "$devdummy" | grep -q "alias $namewant"
check_err $?
# sysfs interface allows to delete alias again
echo "" > "$syspathname"
ip link show "$devdummy" | grep -q "alias $namewant"
check_fail $?
for i in $(seq 1 100); do
uuidgen > "$syspathname" &
done
wait
# re-add the alias -- kernel should free mem when dummy dev is removed
ip link set dev "$devdummy" alias "$namewant"
check_err $?
fi
if [ $ret -ne 0 ]; then
echo "FAIL: set interface alias $devdummy to $namewant"
return 1
fi
echo "PASS: set ifalias $namewant for $devdummy"
}
kci_test_vrf()
{
vrfname="test-vrf"
ret=0
ip link show type vrf 2>/dev/null
if [ $? -ne 0 ]; then
echo "SKIP: vrf: iproute2 too old"
return $ksft_skip
fi
ip link add "$vrfname" type vrf table 10
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: can't add vrf interface, skipping test"
return 0
fi
ip -br link show type vrf | grep -q "$vrfname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: created vrf device not found"
return 1
fi
ip link set dev "$vrfname" up
check_err $?
ip link set dev "$devdummy" master "$vrfname"
check_err $?
ip link del dev "$vrfname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: vrf"
return 1
fi
echo "PASS: vrf"
}
kci_test_encap_vxlan()
{
ret=0
vxlan="test-vxlan0"
vlan="test-vlan0"
testns="$1"
ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789 2>/dev/null
if [ $? -ne 0 ]; then
echo "FAIL: can't add vxlan interface, skipping test"
return 0
fi
check_err $?
ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
check_err $?
ip -netns "$testns" link set up dev "$vxlan"
check_err $?
ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
check_err $?
# changelink testcases
ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy" 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
check_err $?
ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
check_err $?
ip -netns "$testns" link set dev "$vxlan" type vxlan proxy 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan norsc 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan external 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan gbp 2>/dev/null
check_fail $?
ip -netns "$testns" link set dev "$vxlan" type vxlan gpe 2>/dev/null
check_fail $?
ip -netns "$testns" link del "$vxlan"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: vxlan"
return 1
fi
echo "PASS: vxlan"
}
kci_test_encap_fou()
{
ret=0
name="test-fou"
testns="$1"
ip fou help 2>&1 |grep -q 'Usage: ip fou'
if [ $? -ne 0 ];then
echo "SKIP: fou: iproute2 too old"
return $ksft_skip
fi
ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
if [ $? -ne 0 ];then
echo "FAIL: can't add fou port 7777, skipping test"
return 1
fi
ip -netns "$testns" fou add port 8888 ipproto 4
check_err $?
ip -netns "$testns" fou del port 9999 2>/dev/null
check_fail $?
ip -netns "$testns" fou del port 7777
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: fou"
return 1
fi
echo "PASS: fou"
}
# test various encap methods, use netns to avoid unwanted interference
kci_test_encap()
{
testns="testns"
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP encap tests: cannot add net namespace $testns"
return $ksft_skip
fi
ip -netns "$testns" link set lo up
check_err $?
ip -netns "$testns" link add name "$devdummy" type dummy
check_err $?
ip -netns "$testns" link set "$devdummy" up
check_err $?
kci_test_encap_vxlan "$testns"
kci_test_encap_fou "$testns"
ip netns del "$testns"
}
kci_test_macsec()
{
msname="test_macsec0"
ret=0
ip macsec help 2>&1 | grep -q "^Usage: ip macsec"
if [ $? -ne 0 ]; then
echo "SKIP: macsec: iproute2 too old"
return $ksft_skip
fi
ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: can't add macsec interface, skipping test"
return 1
fi
ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
check_err $?
ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
check_err $?
ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef
check_err $?
ip macsec show > /dev/null
check_err $?
ip link del dev "$msname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: macsec"
return 1
fi
echo "PASS: macsec"
}
#-------------------------------------------------------------------
# Example commands
# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
# spi 0x07 mode transport reqid 0x07 replay-window 32 \
# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
# sel src 14.0.0.52/24 dst 14.0.0.70/24
# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \
# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \
# spi 0x07 mode transport reqid 0x07
#
# Subcommands not tested
# ip x s update
# ip x s allocspi
# ip x s deleteall
# ip x p update
# ip x p deleteall
# ip x p set
#-------------------------------------------------------------------
kci_test_ipsec()
{
ret=0
algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
srcip=192.168.123.1
dstip=192.168.123.2
spi=7
ip addr add $srcip dev $devdummy
# flush to be sure there's nothing configured
ip x s flush ; ip x p flush
check_err $?
# start the monitor in the background
tmpfile=`mktemp /var/run/ipsectestXXX`
mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null`
sleep 0.2
ipsecid="proto esp src $srcip dst $dstip spi 0x07"
ip x s add $ipsecid \
mode transport reqid 0x07 replay-window 32 \
$algo sel src $srcip/24 dst $dstip/24
check_err $?
lines=`ip x s list | grep $srcip | grep $dstip | wc -l`
test $lines -eq 2
check_err $?
ip x s count | grep -q "SAD count 1"
check_err $?
lines=`ip x s get $ipsecid | grep $srcip | grep $dstip | wc -l`
test $lines -eq 2
check_err $?
ip x s delete $ipsecid
check_err $?
lines=`ip x s list | wc -l`
test $lines -eq 0
check_err $?
ipsecsel="dir out src $srcip/24 dst $dstip/24"
ip x p add $ipsecsel \
tmpl proto esp src $srcip dst $dstip \
spi 0x07 mode transport reqid 0x07
check_err $?
lines=`ip x p list | grep $srcip | grep $dstip | wc -l`
test $lines -eq 2
check_err $?
ip x p count | grep -q "SPD IN 0 OUT 1 FWD 0"
check_err $?
lines=`ip x p get $ipsecsel | grep $srcip | grep $dstip | wc -l`
test $lines -eq 2
check_err $?
ip x p delete $ipsecsel
check_err $?
lines=`ip x p list | wc -l`
test $lines -eq 0
check_err $?
# check the monitor results
kill $mpid
lines=`wc -l $tmpfile | cut "-d " -f1`
test $lines -eq 20
check_err $?
rm -rf $tmpfile
# clean up any leftovers
ip x s flush
check_err $?
ip x p flush
check_err $?
ip addr del $srcip/32 dev $devdummy
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec"
return 1
fi
echo "PASS: ipsec"
}
#-------------------------------------------------------------------
# Example commands
# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
# spi 0x07 mode transport reqid 0x07 replay-window 32 \
# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
# sel src 14.0.0.52/24 dst 14.0.0.70/24
# offload dev sim1 dir out
# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \
# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \
# spi 0x07 mode transport reqid 0x07
#
#-------------------------------------------------------------------
kci_test_ipsec_offload()
{
ret=0
algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
srcip=192.168.123.3
dstip=192.168.123.4
dev=simx1
sysfsd=/sys/kernel/debug/netdevsim/$dev
sysfsf=$sysfsd/ipsec
# setup netdevsim since dummydev doesn't have offload support
modprobe netdevsim
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec_offload can't load netdevsim"
return 1
fi
ip link add $dev type netdevsim
ip addr add $srcip dev $dev
ip link set $dev up
if [ ! -d $sysfsd ] ; then
echo "FAIL: ipsec_offload can't create device $dev"
return 1
fi
if [ ! -f $sysfsf ] ; then
echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
return 1
fi
# flush to be sure there's nothing configured
ip x s flush ; ip x p flush
# create offloaded SAs, both in and out
ip x p add dir out src $srcip/24 dst $dstip/24 \
tmpl proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42
check_err $?
ip x p add dir out src $dstip/24 dst $srcip/24 \
tmpl proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42
check_err $?
ip x s add proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \
offload dev $dev dir out
check_err $?
ip x s add proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \
offload dev $dev dir in
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec_offload can't create SA"
return 1
fi
# does offload show up in ip output
lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"`
if [ $lines -ne 2 ] ; then
echo "FAIL: ipsec_offload SA offload missing from list output"
check_err 1
fi
# use ping to exercise the Tx path
ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
# does driver have correct offload info
diff $sysfsf - << EOF
SA count=2 tx=3
sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
sa[0] key=0x34333231 38373635 32313039 36353433
sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
sa[1] key=0x34333231 38373635 32313039 36353433
EOF
if [ $? -ne 0 ] ; then
echo "FAIL: ipsec_offload incorrect driver data"
check_err 1
fi
# does offload get removed from driver
ip x s flush
ip x p flush
lines=`grep -c "SA count=0" $sysfsf`
if [ $lines -ne 1 ] ; then
echo "FAIL: ipsec_offload SA not removed from driver"
check_err 1
fi
# clean up any leftovers
ip link del $dev
rmmod netdevsim
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec_offload"
return 1
fi
echo "PASS: ipsec_offload"
}
kci_test_gretap()
{
testns="testns"
DEV_NS=gretap00
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP gretap tests: cannot add net namespace $testns"
return $ksft_skip
fi
ip link help gretap 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: gretap: iproute2 too old"
ip netns del "$testns"
return $ksft_skip
fi
# test native tunnel
ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
ip -netns "$testns" link add dev "$DEV_NS" type gretap external
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: gretap"
ip netns del "$testns"
return 1
fi
echo "PASS: gretap"
ip netns del "$testns"
}
kci_test_ip6gretap()
{
testns="testns"
DEV_NS=ip6gretap00
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP ip6gretap tests: cannot add net namespace $testns"
return $ksft_skip
fi
ip link help ip6gretap 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: ip6gretap: iproute2 too old"
ip netns del "$testns"
return $ksft_skip
fi
# test native tunnel
ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ip6gretap"
ip netns del "$testns"
return 1
fi
echo "PASS: ip6gretap"
ip netns del "$testns"
}
kci_test_erspan()
{
testns="testns"
DEV_NS=erspan00
ret=0
ip link help erspan 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: erspan: iproute2 too old"
return $ksft_skip
fi
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP erspan tests: cannot add net namespace $testns"
return $ksft_skip
fi
# test native tunnel erspan v1
ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel erspan v2
ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
ip -netns "$testns" link add dev "$DEV_NS" type erspan external
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: erspan"
ip netns del "$testns"
return 1
fi
echo "PASS: erspan"
ip netns del "$testns"
}
kci_test_ip6erspan()
{
testns="testns"
DEV_NS=ip6erspan00
ret=0
ip link help ip6erspan 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: ip6erspan: iproute2 too old"
return $ksft_skip
fi
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP ip6erspan tests: cannot add net namespace $testns"
return $ksft_skip
fi
# test native tunnel ip6erspan v1
ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel ip6erspan v2
ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip -netns "$testns" link set dev $DEV_NS up
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
ip -netns "$testns" link add dev "$DEV_NS" \
type ip6erspan external
check_err $?
ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ip6erspan"
ip netns del "$testns"
return 1
fi
echo "PASS: ip6erspan"
ip netns del "$testns"
}
kci_test_fdb_get()
{
IP="ip -netns testns"
BRIDGE="bridge -netns testns"
brdev="test-br0"
vxlandev="vxlan10"
test_mac=de:ad:be:ef:13:37
localip="10.0.2.2"
dstip="10.0.2.3"
ret=0
bridge fdb help 2>&1 |grep -q 'bridge fdb get'
if [ $? -ne 0 ];then
echo "SKIP: fdb get tests: iproute2 too old"
return $ksft_skip
fi
ip netns add testns
if [ $? -ne 0 ]; then
echo "SKIP fdb get tests: cannot add net namespace $testns"
return $ksft_skip
fi
$IP link add "$vxlandev" type vxlan id 10 local $localip \
dstport 4789 2>/dev/null
check_err $?
$IP link add name "$brdev" type bridge &>/dev/null
check_err $?
$IP link set dev "$vxlandev" master "$brdev" &>/dev/null
check_err $?
$BRIDGE fdb add $test_mac dev "$vxlandev" master &>/dev/null
check_err $?
$BRIDGE fdb add $test_mac dev "$vxlandev" dst $dstip self &>/dev/null
check_err $?
$BRIDGE fdb get $test_mac brport "$vxlandev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
check_err $?
$BRIDGE fdb get $test_mac br "$brdev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
check_err $?
$BRIDGE fdb get $test_mac dev "$vxlandev" self 2>/dev/null | grep -q "dev $vxlandev dst $dstip"
check_err $?
ip netns del testns &>/dev/null
if [ $ret -ne 0 ]; then
echo "FAIL: bridge fdb get"
return 1
fi
echo "PASS: bridge fdb get"
}
kci_test_neigh_get()
{
dstmac=de:ad:be:ef:13:37
dstip=10.0.2.4
dstip6=dead::2
ret=0
ip neigh help 2>&1 |grep -q 'ip neigh get'
if [ $? -ne 0 ];then
echo "SKIP: fdb get tests: iproute2 too old"
return $ksft_skip
fi
# ipv4
ip neigh add $dstip lladdr $dstmac dev "$devdummy" > /dev/null
check_err $?
ip neigh get $dstip dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
check_err $?
ip neigh del $dstip lladdr $dstmac dev "$devdummy" > /dev/null
check_err $?
# ipv4 proxy
ip neigh add proxy $dstip dev "$devdummy" > /dev/null
check_err $?
ip neigh get proxy $dstip dev "$devdummy" 2>/dev/null | grep -q "$dstip"
check_err $?
ip neigh del proxy $dstip dev "$devdummy" > /dev/null
check_err $?
# ipv6
ip neigh add $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
check_err $?
ip neigh get $dstip6 dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
check_err $?
ip neigh del $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
check_err $?
# ipv6 proxy
ip neigh add proxy $dstip6 dev "$devdummy" > /dev/null
check_err $?
ip neigh get proxy $dstip6 dev "$devdummy" 2>/dev/null | grep -q "$dstip6"
check_err $?
ip neigh del proxy $dstip6 dev "$devdummy" > /dev/null
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: neigh get"
return 1
fi
echo "PASS: neigh get"
}
kci_test_rtnl()
{
kci_add_dummy
if [ $ret -ne 0 ];then
echo "FAIL: cannot add dummy interface"
return 1
fi
kci_test_polrouting
kci_test_route_get
kci_test_tc
kci_test_gre
kci_test_gretap
kci_test_ip6gretap
kci_test_erspan
kci_test_ip6erspan
kci_test_bridge
kci_test_addrlabel
kci_test_ifalias
kci_test_vrf
kci_test_encap
kci_test_macsec
kci_test_ipsec
kci_test_ipsec_offload
kci_test_fdb_get
kci_test_neigh_get
kci_del_dummy
}
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
exit $ksft_skip
fi
for x in ip tc;do
$x -Version 2>/dev/null >/dev/null
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without the $x tool"
exit $ksft_skip
fi
done
kci_test_rtnl
exit $ret
|
Taeung/tip
|
tools/testing/selftests/net/rtnetlink.sh
|
Shell
|
gpl-2.0
| 26,142 |
#!/bin/sh
if getargbool 1 rd.splash -d -n rd_NO_SPLASH; then
info "Starting Gentoo Splash"
[ -x /lib/udev/console_init ] && /lib/udev/console_init /dev/tty0
CDROOT=0
. /lib/gensplash-lib.sh
splash init
[ -x /lib/udev/console_init ] && /lib/udev/console_init /dev/tty0
fi
|
yuwata/dracut
|
modules.d/50gensplash/gensplash-pretrigger.sh
|
Shell
|
gpl-2.0
| 297 |
#!/bin/bash
# useful script to test all the different build types that we support.
# This helps when doing large merges
# Andrew Tridgell, November 2011
. config.mk
set -e
set -x
echo "Testing ArduPlane build"
pushd ArduPlane
for b in all apm2 sitl linux; do
pwd
make clean
make $b -j4
done
popd
echo "Testing ArduCopter build"
pushd ArduCopter
for b in sitl linux; do
pwd
make clean
make $b -j4
done
popd
echo "Testing APMRover build"
pushd APMrover2
for b in all apm2 sitl linux; do
pwd
make clean
make $b -j4
done
popd
echo "Testing AntennaTracker build"
pushd AntennaTracker
for b in apm2 sitl; do
pwd
make clean
make $b -j4
done
popd
echo "Testing build of examples"
examples="Tools/VARTest Tools/CPUInfo"
for d in $examples; do
pushd $d
make clean
make apm2 -j4
make clean
make sitl -j4
popd
done
test -d ../libmaple && {
echo "Testing flymaple build"
for d in ArduPlane APMrover2; do
pushd $d
make clean
make flymaple -j4
popd
done
}
pushd Tools/Replay
make clean
make linux -j4
popd
test -n "$PX4_ROOT" && test -d "$PX4_ROOT" && {
./Tools/scripts/build_all_px4.sh
}
test -n "$VRBRAIN_ROOT" && test -d "$VRBRAIN_ROOT" && {
./Tools/scripts/build_all_vrbrain.sh
}
exit 0
|
Yndal/ArduPilot-SensorPlatform
|
ardupilot/Tools/scripts/build_all.sh
|
Shell
|
mit
| 1,285 |
#!/bin/bash
set -ex
source $(dirname $0)/provision-config.sh
NETWORK_PLUGIN=$(os::util::get-network-plugin ${4:-""})
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
systemctl restart network
# Setup hosts file to ensure name resolution to each member of the cluster
minion_ip_array=(${MINION_IPS//,/ })
os::util::setup-hosts-file "${MASTER_NAME}" "${MASTER_IP}" MINION_NAMES \
minion_ip_array
# Install the required packages
yum install -y docker-io git golang e2fsprogs hg net-tools bridge-utils which
# Build openshift
echo "Building openshift"
pushd "${ORIGIN_ROOT}"
./hack/build-go.sh
os::util::install-cmds "${ORIGIN_ROOT}"
./hack/install-etcd.sh
popd
os::util::init-certs "${ORIGIN_ROOT}" "${NETWORK_PLUGIN}" "${MASTER_NAME}" \
"${MASTER_IP}" MINION_NAMES minion_ip_array
# Start docker
systemctl enable docker.service
systemctl start docker.service
# Create systemd service
node_list=$(os::util::join , ${MINION_NAMES[@]})
cat <<EOF > /usr/lib/systemd/system/openshift-master.service
[Unit]
Description=OpenShift Master
Requires=docker.service network.service
After=network.service
[Service]
ExecStart=/usr/bin/openshift start master --master=https://${MASTER_IP}:8443 --nodes=${node_list} --network-plugin=${NETWORK_PLUGIN}
WorkingDirectory=${ORIGIN_ROOT}/
[Install]
WantedBy=multi-user.target
EOF
# Start the service
systemctl daemon-reload
systemctl start openshift-master.service
# setup SDN
$(dirname $0)/provision-sdn.sh
# Set up the KUBECONFIG environment variable for use by oc
os::util::set-oc-env "${ORIGIN_ROOT}" "/root/.bash_profile"
os::util::set-oc-env "${ORIGIN_ROOT}" "/home/vagrant/.bash_profile"
|
xuant/origin
|
vagrant/provision-master.sh
|
Shell
|
apache-2.0
| 1,727 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTest "testHexaMD --allowUnusedCPUs=False --allowUnbalancing=True --meshSizeI=11 --meshSizeJ=2 --meshSizeK=2" "$0" "$@"
|
kelchuan/snac_thesis
|
StGermain/Discretisation/Mesh/tests/testHexaMD-unbalanced.3of4.sh
|
Shell
|
gpl-2.0
| 283 |
#!/bin/bash
trap 'exit 2' SIGTERM
usage() {
cat <<EOF
usage: $0 [COMMAND]
Does nothing.
EOF
}
doStuff() {
echo "Running doStuff with args: $@"
}
failStuff() {
echo "Running failStuff with args: $@"
exit -1
}
sleepStuff() {
echo "Sleeping 10 seconds..."
sleep 10
}
interruptSleep() {
for i in {1..10}; do
echo -n "."
sleep 1
done
}
cmd="${1:-usage}"
shift
$cmd "$@"
|
joyent/containerpilot
|
watches/testdata/test.sh
|
Shell
|
mpl-2.0
| 410 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script should be sourced as a part of config-test or config-default.
# Specifically, the following environment variables are assumed:
# - CLUSTER_NAME (the name of the cluster)
ZONE="${ZONE:-us-central1-f}"
NUM_MINIONS="${NUM_MINIONS:-3}"
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"
NETWORK="${NETWORK:-default}"
NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}"
FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}"
GCLOUD="${GCLOUD:-gcloud}"
CMD_GROUP="${CMD_GROUP:-}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}"
MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-2}"
# WARNING: any new vars added here must correspond to options that can be
# passed to `gcloud {CMD_GROUP} container clusters create`, or they will
# have no effect. If you change/add a var used to toggle a value in
# cluster/gce/configure-vm.sh, please ping someone on GKE.
# This is a hack, but I keep setting this when I run commands manually, and
# then things grossly fail during normal runs because cluster/kubecfg.sh and
# cluster/kubectl.sh both use this if it's set.
unset KUBERNETES_MASTER
|
combk8s/kubernetes
|
cluster/gke/config-common.sh
|
Shell
|
apache-2.0
| 1,782 |
#!/bin/bash
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utility script, sourced by both ngram_hdfs_load.sh and hive_table_create.sh
# This script will set a series of constants, some based on the choice
# of the command line "N" value (defaults to 1). N indicates the ngram
# dataset to download and copy into HDFS.
readonly SOURCE_FORMAT="googlebooks-eng-all-%s-20120701-%s%s"
readonly SOURCE_LOCATION="gs://books/ngrams/books"
# The "hadoop" executable should be in the user path
readonly HDFS_CMD="hadoop fs"
# What to install: 1gram by default
N=1
# Now parse command line arguments
while [[ $# -ne 0 ]]; do
case "$1" in
--N=*)
N=${1#--N=}
shift
;;
--help)
N=
shift
;;
*)
esac
done
if [[ ! $N -ge 1 ]]; then
echo "usage $(basename $0): --N=<n>"
exit 1
fi
# Now set constants based on the selection of N
readonly NGRAMS="${N}gram"
readonly HDFS_DIR="ngrams/$NGRAMS"
readonly STAGE_DIR="/hadoop/tmp/$USER/ngrams/$NGRAMS"
|
gruter/bdutil
|
sampleapps/querytools/examples/ngrams/ngram_setup.sh
|
Shell
|
apache-2.0
| 1,542 |
#!/bin/sh /etc/rc.common
# Copyright (C) 2012-2014 OpenWrt.org
if [ "$( which vdsl_cpe_control )" ]; then
XDSL_CTRL=vdsl_cpe_control
else
XDSL_CTRL=dsl_cpe_control
fi
#
# Basic functions to send CLI commands to the vdsl_cpe_control daemon
#
dsl_cmd() {
killall -q -0 ${XDSL_CTRL} && (
lock /var/lock/dsl_pipe
echo "$@" > /tmp/pipe/dsl_cpe0_cmd
cat /tmp/pipe/dsl_cpe0_ack
lock -u /var/lock/dsl_pipe
)
}
dsl_val() {
echo $(expr "$1" : '.*'$2'=\([-\.[:alnum:]]*\).*')
}
dsl_string() {
echo $(expr "$1" : '.*'$2'=(\([A-Z0-9,]*\))')
}
#
# Simple divide by 10 routine to cope with one decimal place
#
dbt() {
local a=$(expr $1 / 10)
local b=$(expr $1 % 10)
echo "${a}.${b#-}"
}
#
# Take a number and convert to k or meg
#
scale() {
local val=$1
local a
local b
if [ "$val" -gt 1000000 ]; then
a=$(expr $val / 1000)
b=$(expr $a % 1000)
a=$(expr $a / 1000)
printf "%d.%03d Mb" ${a} ${b}
elif [ "$val" -gt 1000 ]; then
a=$(expr $val / 1000)
printf "%d Kb" ${a}
else
echo "${val} b"
fi
}
scale_latency() {
local val=$1
local a
local b
a=$(expr $val / 100)
b=$(expr $val % 100)
printf "%d.%d ms" ${a} ${b}
}
#
# convert vendorid into human readable form
#
parse_vendorid() {
local val=$1
local name
local version
case "$val" in
B5,00,41,4C,43,42*)
name="Alcatel"
version=${val##*B5,00,41,4C,43,42,}
;;
B5,00,41,4E,44,56*)
name="Analog Devices"
version=${val##*B5,00,41,4E,44,56,}
;;
B5,00,42,44,43,4D*)
name="Broadcom"
version=${val##*B5,00,42,44,43,4D,}
;;
B5,00,43,45,4E,54*)
name="Centillium"
version=${val##*B5,00,43,45,4E,54,}
;;
B5,00,47,53,50,4E*)
name="Globespan"
version=${val##*B5,00,47,53,50,4E,}
;;
B5,00,49,4B,4E,53*)
name="Ikanos"
version=${val##*B5,00,49,4B,4E,53,}
;;
B5,00,49,46,54,4E*)
name="Infineon"
version=${val##*B5,00,49,46,54,4E,}
;;
B5,00,54,53,54,43*)
name="Texas Instruments"
version=${val##*B5,00,54,53,54,43,}
;;
B5,00,54,4D,4D,42*)
name="Thomson MultiMedia Broadband"
version=${val##*B5,00,54,4D,4D,42,}
;;
B5,00,54,43,54,4E*)
name="Trend Chip Technologies"
version=${val##*B5,00,54,43,54,4E,}
;;
B5,00,53,54,4D,49*)
name="ST Micro"
version=${val##*B5,00,53,54,4D,49,}
;;
esac
[ -n "$name" ] && {
val="$name"
[ "$version" != "00,00" ] && val="$(printf "%s %d.%d" "$val" 0x${version//,/ 0x})"
}
echo "$val"
}
#
# Read the data rates for both directions
#
data_rates() {
local csg
local dru
local drd
local sdru
local sdrd
csg=$(dsl_cmd g997csg 0 1)
drd=$(dsl_val "$csg" ActualDataRate)
csg=$(dsl_cmd g997csg 0 0)
dru=$(dsl_val "$csg" ActualDataRate)
[ -z "$drd" ] && drd=0
[ -z "$dru" ] && dru=0
sdrd=$(scale $drd)
sdru=$(scale $dru)
if [ "$action" = "lucistat" ]; then
echo "dsl.data_rate_down=$drd"
echo "dsl.data_rate_up=$dru"
echo "dsl.data_rate_down_s=\"$sdrd\""
echo "dsl.data_rate_up_s=\"$sdru\""
else
echo "Data Rate: Down: ${sdrd}/s / Up: ${sdru}/s"
fi
}
#
# Chipset
#
chipset() {
local vig
local cs
vig=$(dsl_cmd vig)
cs=$(dsl_val "$vig" DSL_ChipSetType)
csfw=$(dsl_val "$vig" DSL_ChipSetFWVersion)
csapi=$(dsl_val "$vig" DSL_DriverVersionApi)
if [ "$action" = "lucistat" ]; then
echo "dsl.chipset=\"${cs}\""
echo "dsl.firmware_version=\"${csfw}\""
echo "dsl.api_version=\"${csapi}\""
else
echo "Chipset: ${cs}"
echo "Firmware Version: ${csfw}"
echo "API Version: ${csapi}"
fi
}
#
# Vendor information
#
vendor() {
local lig
local vid
local svid
lig=$(dsl_cmd g997lig 1)
vid=$(dsl_string "$lig" G994VendorID)
svid=$(dsl_string "$lig" SystemVendorID)
vid=$(parse_vendorid $vid)
svid=$(parse_vendorid $svid)
if [ "$action" = "lucistat" ]; then
echo "dsl.atuc_vendor_id=\"${vid}\""
echo "dsl.atuc_system_vendor_id=\"${svid}\""
else
echo "ATU-C Vendor ID: ${vid}"
echo "ATU-C System Vendor ID: ${svid}"
fi
}
#
# XTSE capabilities
#
xtse() {
local xtusesg
local xtse1
local xtse2
local xtse3
local xtse4
local xtse5
local xtse6
local xtse7
local xtse8
local xtse_s=""
local annex_s=""
local line_mode_s=""
local vector_s=""
local dsmsg=""
local cmd=""
xtusesg=$(dsl_cmd g997xtusesg)
xtse1=$(dsl_val "$xtusesg" XTSE1)
xtse2=$(dsl_val "$xtusesg" XTSE2)
xtse3=$(dsl_val "$xtusesg" XTSE3)
xtse4=$(dsl_val "$xtusesg" XTSE4)
xtse5=$(dsl_val "$xtusesg" XTSE5)
xtse6=$(dsl_val "$xtusesg" XTSE6)
xtse7=$(dsl_val "$xtusesg" XTSE7)
xtse8=$(dsl_val "$xtusesg" XTSE8)
# Evaluate Annex (according to G.997.1, 7.3.1.1.1)
if [ $((xtse1 & 13)) != 0 \
-o $((xtse2 & 1)) != 0 \
-o $((xtse3 & 12)) != 0 \
-o $((xtse4 & 3)) != 0 \
-o $((xtse6 & 3)) != 0 \
-o $((xtse8 & 1)) != 0 ]; then
annex_s=" A,"
fi
if [ $((xtse1 & 48)) != 0 \
-o $((xtse2 & 2)) != 0 \
-o $((xtse3 & 48)) != 0 \
-o $((xtse6 & 12)) != 0 \
-o $((xtse8 & 2)) != 0 ]; then
annex_s="$annex_s B,"
fi
if [ $((xtse1 & 194)) != 0 \
-o $((xtse2 & 12)) != 0 \
-o $((xtse8 & 4)) != 0 ]; then
annex_s="$annex_s C,"
fi
if [ $((xtse4 & 48)) != 0 \
-o $((xtse5 & 3)) != 0 \
-o $((xtse6 & 192)) != 0 ]; then
annex_s="$annex_s I,"
fi
if [ $((xtse4 & 192)) != 0 \
-o $((xtse7 & 3)) != 0 ]; then
annex_s="$annex_s J,"
fi
if [ $((xtse5 & 60)) != 0 ]; then
annex_s="$annex_s L,"
fi
if [ $((xtse5 & 192)) != 0 \
-o $((xtse7 & 12)) != 0 ]; then
annex_s="$annex_s M,"
fi
annex_s=`echo ${annex_s:1}`
annex_s=`echo ${annex_s%?}`
# Evaluate Line Mode (according to G.997.1, 7.3.1.1.1)
# Regional standard: ANSI T1.413
if [ $((xtse1 & 1)) != 0 ]; then
line_mode_s=" T1.413,"
fi
# Regional standard: TS 101 388
if [ $((xtse1 & 1)) != 0 ]; then
line_mode_s="$line_mode_s TS 101 388,"
fi
if [ $((xtse1 & 252)) != 0 ]; then
line_mode_s="$line_mode_s G.992.1 (ADSL),"
fi
if [ $((xtse2 & 15)) != 0 ]; then
line_mode_s="$line_mode_s G.992.2 (ADSL lite),"
fi
if [ $((xtse3 & 60)) != 0 \
-o $((xtse4 & 240)) != 0 \
-o $((xtse5 & 252)) != 0 ]; then
line_mode_s="$line_mode_s G.992.3 (ADSL2),"
fi
if [ $((xtse4 & 3)) != 0 \
-o $((xtse5 & 3)) != 0 ]; then
line_mode_s="$line_mode_s G.992.4 (ADSL2 lite),"
fi
if [ $((xtse6 & 199)) != 0 \
-o $((xtse7 & 15)) != 0 ]; then
line_mode_s="$line_mode_s G.992.5 (ADSL2+),"
fi
if [ $((xtse8 & 7)) != 0 ]; then
dsmsg=$(dsl_cmd dsmsg)
vector_s=$(dsl_val "$dsmsg" eVectorStatus)
case "$vector_s" in
"0") line_mode_s="$line_mode_s G.993.2 (VDSL2)," ;;
"1") line_mode_s="$line_mode_s G.993.5 (VDSL2 with downstream vectoring)," ;;
"2") line_mode_s="$line_mode_s G.993.5 (VDSL2 with down- and upstream vectoring)," ;;
*) line_mode_s="$line_mode_s unknown," ;;
esac
fi
#!!! PROPRIETARY & INTERMEDIATE USE !!!
if [ $((xtse8 & 128)) != 0 ]; then
line_mode_s="$line_mode_s G.993.1 (VDSL),"
fi
line_mode_s=`echo ${line_mode_s:1}`
line_mode_s=`echo ${line_mode_s%?}`
xtse_s="${xtse1}, ${xtse2}, ${xtse3}, ${xtse4}, ${xtse5}, ${xtse6}, ${xtse7}, ${xtse8}"
if [ "$action" = "lucistat" ]; then
echo "dsl.xtse1=${xtse1:-nil}"
echo "dsl.xtse2=${xtse2:-nil}"
echo "dsl.xtse3=${xtse3:-nil}"
echo "dsl.xtse4=${xtse4:-nil}"
echo "dsl.xtse5=${xtse5:-nil}"
echo "dsl.xtse6=${xtse6:-nil}"
echo "dsl.xtse7=${xtse7:-nil}"
echo "dsl.xtse8=${xtse8:-nil}"
echo "dsl.xtse_s=\"$xtse_s\""
echo "dsl.annex_s=\"${annex_s}\""
echo "dsl.line_mode_s=\"${line_mode_s}\""
else
echo "XTSE Capabilities: ${xtse_s}"
echo "Annex: ${annex_s}"
echo "Line Mode: ${line_mode_s}"
fi
}
#
# Power Management Mode
#
power_mode() {
local pmsg=$(dsl_cmd g997pmsg)
local pm=$(dsl_val "$pmsg" nPowerManagementStatus);
local s;
case "$pm" in
"-1") s="Power management state is not available" ;;
"0") s="L0 - Synchronized" ;;
"1") s="L1 - Power Down Data transmission (G.992.2)" ;;
"2") s="L2 - Power Down Data transmission (G.992.3 and G.992.4)" ;;
"3") s="L3 - No power" ;;
*) s="unknown" ;;
esac
if [ "$action" = "lucistat" ]; then
echo "dsl.power_mode_num=${pm:-nil}"
echo "dsl.power_mode_s=\"$s\""
else
echo "Power Management Mode: $s"
fi
}
#
# Latency type (interleave delay)
#
latency_delay() {
local csg
local idu
local idu_s;
local sidu
local idd
local idd_s;
local sidd
csg=$(dsl_cmd g997csg 0 1)
idd=$(dsl_val "$csg" ActualInterleaveDelay)
csg=$(dsl_cmd g997csg 0 0)
idu=$(dsl_val "$csg" ActualInterleaveDelay)
[ -z "$idd" ] && idd=0
[ -z "$idu" ] && idu=0
if [ "$idd" -gt 100 ]; then
idd_s="Interleave"
else
idd_s="Fast"
fi
if [ "$idu" -gt 100 ]; then
idu_s="Interleave"
else
idu_s="Fast"
fi
sidu=$(scale_latency $idu)
sidd=$(scale_latency $idd)
if [ "$action" = "lucistat" ]; then
echo "dsl.latency_num_down=\"$sidd\""
echo "dsl.latency_num_up=\"$sidu\""
echo "dsl.latency_s_down=\"$idd_s\""
echo "dsl.latency_s_up=\"$idu_s\""
else
echo "Latency [Interleave Delay]: ${sidd} [${idd_s}] ${sidu} [${idu_s}]"
fi
}
#
# Errors
#
errors() {
local lsctg
local dpctg
local ccsg
local esf
local esn
local sesf
local sesn
local lossf
local lossn
local uasf
local uasn
local crc_pf
local crc_pn
local crcp_pf
local crcp_pn
local hecf
local hecn
local fecn
local fecf
lsctg=$(dsl_cmd pmlsctg 1)
esf=$(dsl_val "$lsctg" nES)
sesf=$(dsl_val "$lsctg" nSES)
lossf=$(dsl_val "$lsctg" nLOSS)
uasf=$(dsl_val "$lsctg" nUAS)
lsctg=$(dsl_cmd pmlsctg 0)
esn=$(dsl_val "$lsctg" nES)
sesn=$(dsl_val "$lsctg" nSES)
lossn=$(dsl_val "$lsctg" nLOSS)
uasn=$(dsl_val "$lsctg" nUAS)
dpctg=$(dsl_cmd pmdpctg 0 1)
hecf=$(dsl_val "$dpctg" nHEC)
crc_pf=$(dsl_val "$dpctg" nCRC_P)
crcp_pf=$(dsl_val "$dpctg" nCRCP_P)
dpctg=$(dsl_cmd pmdpctg 0 0)
hecn=$(dsl_val "$dpctg" nHEC)
crc_pn=$(dsl_val "$dpctg" nCRC_P)
crcp_pn=$(dsl_val "$dpctg" nCRCP_P)
ccsg=$(dsl_cmd pmccsg 0 1 0)
fecf=$(dsl_val "$ccsg" nFEC)
ccsg=$(dsl_cmd pmccsg 0 0 0)
fecn=$(dsl_val "$ccsg" nFEC)
if [ "$action" = "lucistat" ]; then
echo "dsl.errors_fec_near=${fecn:-nil}"
echo "dsl.errors_fec_far=${fecf:-nil}"
echo "dsl.errors_es_near=${esn:-nil}"
echo "dsl.errors_es_far=${esf:-nil}"
echo "dsl.errors_ses_near=${sesn:-nil}"
echo "dsl.errors_ses_far=${sesf:-nil}"
echo "dsl.errors_loss_near=${lossn:-nil}"
echo "dsl.errors_loss_far=${lossf:-nil}"
echo "dsl.errors_uas_near=${uasn:-nil}"
echo "dsl.errors_uas_far=${uasf:-nil}"
echo "dsl.errors_hec_near=${hecn:-nil}"
echo "dsl.errors_hec_far=${hecf:-nil}"
echo "dsl.errors_crc_p_near=${crc_pn:-nil}"
echo "dsl.errors_crc_p_far=${crc_pf:-nil}"
echo "dsl.errors_crcp_p_near=${crcp_pn:-nil}"
echo "dsl.errors_crcp_p_far=${crcp_pf:-nil}"
else
echo "Forward Error Correction Seconds (FECS): Near: ${fecn} / Far: ${fecf}"
echo "Errored seconds (ES): Near: ${esn} / Far: ${esf}"
echo "Severely Errored Seconds (SES): Near: ${sesn} / Far: ${sesf}"
echo "Loss of Signal Seconds (LOSS): Near: ${lossn} / Far: ${lossf}"
echo "Unavailable Seconds (UAS): Near: ${uasn} / Far: ${uasf}"
echo "Header Error Code Errors (HEC): Near: ${hecn} / Far: ${hecf}"
echo "Non Pre-emtive CRC errors (CRC_P): Near: ${crc_pn} / Far: ${crc_pf}"
echo "Pre-emtive CRC errors (CRCP_P): Near: ${crcp_pn} / Far: ${crcp_pf}"
fi
}
#
# Work out how long the line has been up
#
line_uptime() {
local ccsg
local et
local etr
local d
local h
local m
local s
local rc=""
ccsg=$(dsl_cmd pmccsg 0 0 0)
et=$(dsl_val "$ccsg" nElapsedTime)
[ -z "$et" ] && et=0
d=$(expr $et / 86400)
etr=$(expr $et % 86400)
h=$(expr $etr / 3600)
etr=$(expr $etr % 3600)
m=$(expr $etr / 60)
s=$(expr $etr % 60)
[ "${d}${h}${m}${s}" -ne 0 ] && rc="${s}s"
[ "${d}${h}${m}" -ne 0 ] && rc="${m}m ${rc}"
[ "${d}${h}" -ne 0 ] && rc="${h}h ${rc}"
[ "${d}" -ne 0 ] && rc="${d}d ${rc}"
[ -z "$rc" ] && rc="down"
if [ "$action" = "lucistat" ]; then
echo "dsl.line_uptime=${et}"
echo "dsl.line_uptime_s=\"${rc}\""
else
echo "Line Uptime Seconds: ${et}"
echo "Line Uptime: ${rc}"
fi
}
#
# Get noise and attenuation figures
#
line_data() {
local lsg
local latnu
local latnd
local satnu
local satnd
local snru
local snrd
local attndru
local attndrd
local sattndru
local sattndrd
local actatpu
local actatpd
lsg=$(dsl_cmd g997lsg 1 1)
latnd=$(dsl_val "$lsg" LATN)
satnd=$(dsl_val "$lsg" SATN)
snrd=$(dsl_val "$lsg" SNR)
attndrd=$(dsl_val "$lsg" ATTNDR)
actatpd=$(dsl_val "$lsg" ACTATP)
lsg=$(dsl_cmd g997lsg 0 1)
latnu=$(dsl_val "$lsg" LATN)
satnu=$(dsl_val "$lsg" SATN)
snru=$(dsl_val "$lsg" SNR)
attndru=$(dsl_val "$lsg" ATTNDR)
actatpu=$(dsl_val "$lsg" ACTATP)
[ -z "$latnd" ] && latnd=0
[ -z "$latnu" ] && latnu=0
[ -z "$satnd" ] && satnd=0
[ -z "$satnu" ] && satnu=0
[ -z "$snrd" ] && snrd=0
[ -z "$snru" ] && snru=0
[ -z "$actatpd" ] && actatpd=0
[ -z "$actatpu" ] && actatpu=0
latnd=$(dbt $latnd)
latnu=$(dbt $latnu)
satnd=$(dbt $satnd)
satnu=$(dbt $satnu)
snrd=$(dbt $snrd)
snru=$(dbt $snru)
actatpd=$(dbt $actatpd)
actatpu=$(dbt $actatpu)
[ -z "$attndrd" ] && attndrd=0
[ -z "$attndru" ] && attndru=0
sattndrd=$(scale $attndrd)
sattndru=$(scale $attndru)
if [ "$action" = "lucistat" ]; then
echo "dsl.line_attenuation_down=\"$latnd\""
echo "dsl.line_attenuation_up=\"$latnu\""
echo "dsl.noise_margin_down=\"$snrd\""
echo "dsl.noise_margin_up=\"$snru\""
echo "dsl.signal_attenuation_down=\"$satnd\""
echo "dsl.signal_attenuation_up=\"$satnu\""
echo "dsl.actatp_down=\"$actatpd\""
echo "dsl.actatp_up=\"$actatpu\""
echo "dsl.max_data_rate_down=$attndrd"
echo "dsl.max_data_rate_up=$attndru"
echo "dsl.max_data_rate_down_s=\"$sattndrd\""
echo "dsl.max_data_rate_up_s=\"$sattndru\""
else
echo "Line Attenuation (LATN): Down: ${latnd} dB / Up: ${latnu} dB"
echo "Signal Attenuation (SATN): Down: ${satnd} dB / Up: ${satnu} dB"
echo "Noise Margin (SNR): Down: ${snrd} dB / Up: ${snru} dB"
echo "Aggregate Transmit Power (ACTATP): Down: ${actatpd} dB / Up: ${actatpu} dB"
echo "Max. Attainable Data Rate (ATTNDR): Down: ${sattndrd}/s / Up: ${sattndru}/s"
fi
}
#
# Is the line up? Or what state is it in?
#
line_state() {
local lsg=$(dsl_cmd lsg)
local ls=$(dsl_val "$lsg" nLineState);
local s;
case "$ls" in
"0x0") s="not initialized" ;;
"0x1") s="exception" ;;
"0x10") s="not updated" ;;
"0xff") s="idle request" ;;
"0x100") s="idle" ;;
"0x1ff") s="silent request" ;;
"0x200") s="silent" ;;
"0x300") s="handshake" ;;
"0x380") s="full_init" ;;
"0x400") s="discovery" ;;
"0x500") s="training" ;;
"0x600") s="analysis" ;;
"0x700") s="exchange" ;;
"0x800") s="showtime_no_sync" ;;
"0x801") s="showtime_tc_sync" ;;
"0x900") s="fastretrain" ;;
"0xa00") s="lowpower_l2" ;;
"0xb00") s="loopdiagnostic active" ;;
"0xb10") s="loopdiagnostic data exchange" ;;
"0xb20") s="loopdiagnostic data request" ;;
"0xc00") s="loopdiagnostic complete" ;;
"0x1000000") s="test" ;;
"0xd00") s="resync" ;;
"0x3c0") s="short init entry" ;;
"") s="not running daemon"; ls="0xfff" ;;
*) s="unknown" ;;
esac
if [ "$action" = "lucistat" ]; then
echo "dsl.line_state_num=$ls"
echo "dsl.line_state_detail=\"$s\""
if [ "$ls" = "0x801" ]; then
echo "dsl.line_state=\"UP\""
else
echo "dsl.line_state=\"DOWN\""
fi
else
if [ "$ls" = "0x801" ]; then
echo "Line State: UP [$ls: $s]"
else
echo "Line State: DOWN [$ls: $s]"
fi
fi
}
#
# Which profile is used?
#
profile() {
local bpstg=$(dsl_cmd bpstg)
local profile=$(dsl_val "$bpstg" nProfile);
local s;
case "$profile" in
"0") s="8a" ;;
"1") s="8b" ;;
"2") s="8c" ;;
"3") s="8d" ;;
"4") s="12a" ;;
"5") s="12b" ;;
"6") s="17a" ;;
"7") s="30a" ;;
"8") s="17b" ;;
"") s="";;
*) s="unknown" ;;
esac
if [ "$action" = "lucistat" ]; then
echo "dsl.profile=${profile:-nil}"
echo "dsl.profile_s=\"${s}\""
else
echo "Profile: $s"
fi
}
status() {
vendor
chipset
xtse
profile
line_state
errors
power_mode
latency_delay
data_rates
line_data
line_uptime
}
lucistat() {
echo "local dsl={}"
status
echo "return dsl"
}
|
981213/openwrt
|
target/linux/lantiq/base-files/lib/functions/lantiq_dsl.sh
|
Shell
|
gpl-2.0
| 16,567 |
#!/bin/sh
if [ $# -lt 1 ]; then
cat <<EOF
Usage: wintest_2k3_dc.sh TESTGROUP
EOF
exit 1;
fi
TESTGROUP=$1
if [ -z $WINTEST_DIR ]; then
echo "Environment variable WINTEST_DIR not found."
exit 1;
fi
# This variable is defined in the per-hosts .fns file for build-farm hosts that run windows tests.
if [ -z $WINTESTCONF ]; then
echo "Please point environment variable WINTESTCONF to your test_win.conf file."
exit 1;
fi
. $WINTESTCONF
. $WINTEST_DIR/wintest_functions.sh
export WIN2K3_DC_REMOTE_HOST=`perl -I$WINTEST_DIR $WINTEST_DIR/vm_get_ip.pl WIN2K3_DC_VM_CFG_PATH`
if [ -z $WIN2K3_DC_REMOTE_HOST ]; then
# Restore snapshot to ensure VM is in a known state, then exit.
restore_snapshot "Test failed to get the IP address of the windows 2003 DC." "$WIN2K3_DC_VM_CFG_PATH"
exit 1;
fi
server=$WIN2K3_DC_REMOTE_HOST
username=$WIN2K3_DC_USERNAME
password=$WIN2K3_DC_PASSWORD
domain=$WIN2K3_DC_DOMAIN
realm=$WIN2K3_DC_REALM
OPTIONS="-U$username%$password -W $domain --option realm=$realm"
all_errs=0
on_error() {
name=$1
all_errs=`expr $all_errs + 1`
restore_snapshot "$name test failed." "$WIN2K3_DC_VM_CFG_PATH"
}
drsuapi_tests() {
name="RPC-DRSUAPI on ncacn_ip_tcp with seal"
bin/smbtorture \
ncacn_ip_tcp:$server[seal] $OPTIONS \
RPC-DRSUAPI || on_error "$name"
name="RPC-DRSUAPI on ncacn_ip_tcp with seal,bigendian"
bin/smbtorture \
ncacn_ip_tcp:$server[seal,bigendian] $OPTIONS \
RPC-DRSUAPI || on_error "$name"
}
spoolss_tests() {
name="RPC-SPOOLSS on ncacn_np"
bin/smbtorture \
ncacn_np:$server $OPTIONS \
RPC-SPOOLSS || on_error "$name"
}
ncacn_ip_tcp_tests() {
bindopt=$1
transport="ncacn_ip_tcp"
tests="RPC-SCHANNEL RPC-EPMAPPER RPC-SAMR RPC-NETLOGON RPC-LSA RPC-SAMLOGON RPC-SAMSYNC RPC-MULTIBIND"
for bindoptions in $bindopt; do
for t in $tests; do
name="$t on $transport with $bindoptions"
bin/smbtorture $TORTURE_OPTIONS \
$transport:$server[$bindoptions] \
$OPTIONS $t || on_error "$name"
done
done
}
ncacn_np_tests() {
bindopt=$1
transport="ncacn_np"
tests="RPC-SCHANNEL RPC-DSSETUP RPC-EPMAPPER RPC-SAMR RPC-WKSSVC RPC-SRVSVC RPC-EVENTLOG RPC-NETLOGON RPC-LSA RPC-SAMLOGON RPC-SAMSYNC RPC-MULTIBIND RPC-WINREG"
for bindoptions in $bindopt; do
for t in $tests; do
name="$t on $transport with $bindoptions"
bin/smbtorture $TORTURE_OPTIONS \
$transport:$server[$bindoptions] \
$OPTIONS $t || on_error "$name"
done
done
}
bindoptions="padcheck connect sign seal ntlm,sign ntml,seal $VALIDATE bigendian"
case $TESTGROUP in
RPC-DRSUAPI) drsuapi_tests ;;
RPC-SPOOLSS) spoolss_tests ;;
ncacn_ip_tcp) ncacn_ip_tcp_tests $bindoptions ;;
ncacn_np) ncacn_np_tests $bindoptions ;;
*) echo "$TESTGROUP is not a known set of tests."
exit 1;
;;
esac
exit $all_errs
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/source4/selftest/win/wintest_2k3_dc.sh
|
Shell
|
gpl-2.0
| 2,771 |
#-{
#- name: 'install_docker',
#- os: 'ubuntu',
#- version: '*'
#-}
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install -y lxc-docker
|
nherment/tornfish
|
lib/scripts/ubuntu/docker.sh
|
Shell
|
isc
| 347 |
#!/usr/bin/env bash
# the node modules are not located at the same places
# the webpack config files
npm install
# the project's source
cd django-rest-messaging-js
npm install
./node_modules/.bin/webpack --config ../webpack.source.config.js
cd ..
# the project's example application
cd example
npm install
./node_modules/.bin/webpack --config ../webpack.example.config.js
cd ..
|
raphaelgyory/django-rest-messaging-js
|
npm_build_modules.sh
|
Shell
|
isc
| 382 |
nohup python code/DataIngest.py --config configFiles/facebook-mongo.cfg &
|
abessou/w251-FinalProject
|
code/start_facebook.sh
|
Shell
|
mit
| 75 |
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export HOST=x86_64-apple-darwin
export PIP_PACKAGES="zmq lief"
export GOAL="install"
export BITCOIN_CONFIG="--with-gui --enable-reduce-exports"
export CI_OS_NAME="macos"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
export RUN_SECURITY_TESTS="true"
|
achow101/bitcoin
|
ci/test/00_setup_env_mac_host.sh
|
Shell
|
mit
| 502 |
#!/bin/bash
ScriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SeedAssemblies="mscorlib.dll;System.dll;System.Core.dll;System.ComponentModel.Composition.dll;System.Data.dll;System.IO.Compression.dll;System.IO.Compression.FileSystem.dll;System.Net.Http.dll;System.Numerics.dll;System.Numerics.Vectors.dll;System.Runtime.Serialization.dll;System.Transactions.dll;System.Xml.dll;System.Xml.Linq.dll;System.Web.Services.dll"
FacadeSeedAssemblies="System.Globalization.Extensions.dll;System.Diagnostics.StackTrace.dll;System.Runtime.Serialization.Xml.dll;System.Runtime.Serialization.Primitives.dll;System.Security.Cryptography.Algorithms.dll;System.Security.SecureString.dll;System.Xml.XPath.XDocument.dll"
$ScriptDir/../../Tools/dotnetcli/dotnet $ScriptDir/../../Tools/GenAPI.exe -assembly $SeedAssemblies -libPath /Library/Frameworks/Xamarin.iOS.framework/Versions/Current/lib/mono/Xamarin.TVOS/ -out $ScriptDir -excludeAttributesList $ScriptDir/../attributeExcludeList.txt -headerFile $ScriptDir/../../netstandard/ref/license-header.txt
$ScriptDir/../../Tools/dotnetcli/dotnet $ScriptDir/../../Tools/GenAPI.exe -assembly "OpenTK-1.0.dll;Xamarin.TVOS.dll" -libPath /Library/Frameworks/Xamarin.iOS.framework/Versions/Current/lib/mono/Xamarin.TVOS/ -out $ScriptDir -excludeAttributesList $ScriptDir/../attributeExcludeList.txt -headerFile $ScriptDir/../../netstandard/ref/license-header.txt -excludeApiList $ScriptDir/apiExcludeList.txt
$ScriptDir/../../Tools/dotnetcli/dotnet $ScriptDir/../../Tools/GenAPI.exe -assembly $FacadeSeedAssemblies -libPath /Library/Frameworks/Xamarin.iOS.framework/Versions/Current/lib/mono/Xamarin.TVOS/Facades -out $ScriptDir/Facades -excludeAttributesList $ScriptDir/../attributeExcludeList.txt -headerFile $ScriptDir/../../netstandard/ref/license-header.txt
|
weshaggard/standard
|
platforms/xamarin.tvos/seed.sh
|
Shell
|
mit
| 1,802 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/Appirater/Appirater.framework"
install_framework "$BUILT_PRODUCTS_DIR/Aspects/Aspects.framework"
install_framework "$BUILT_PRODUCTS_DIR/AsyncSwift/Async.framework"
install_framework "$BUILT_PRODUCTS_DIR/CallbackURLKit/CallbackURLKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Cartography/Cartography.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaAsyncSocket/CocoaAsyncSocket.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/Eureka/Eureka.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICDMaterialActivityIndicatorView/ICDMaterialActivityIndicatorView.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICSMainFramework/ICSMainFramework.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICSPullToRefresh/ICSPullToRefresh.framework"
install_framework "$BUILT_PRODUCTS_DIR/ISO8601DateFormatter/ISO8601DateFormatter.framework"
install_framework "$BUILT_PRODUCTS_DIR/KeychainAccess/KeychainAccess.framework"
install_framework "$BUILT_PRODUCTS_DIR/KissXML/KissXML.framework"
install_framework "$BUILT_PRODUCTS_DIR/LogglyLogger-CocoaLumberjack/LogglyLogger_CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/MMWormhole/MMWormhole.framework"
install_framework "$BUILT_PRODUCTS_DIR/ObjectMapper/ObjectMapper.framework"
install_framework "$BUILT_PRODUCTS_DIR/PSOperations/PSOperations.framework"
install_framework "$BUILT_PRODUCTS_DIR/Realm/Realm.framework"
install_framework "$BUILT_PRODUCTS_DIR/RealmSwift/RealmSwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftColor/SwiftColor.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/Appirater/Appirater.framework"
install_framework "$BUILT_PRODUCTS_DIR/Aspects/Aspects.framework"
install_framework "$BUILT_PRODUCTS_DIR/AsyncSwift/Async.framework"
install_framework "$BUILT_PRODUCTS_DIR/CallbackURLKit/CallbackURLKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Cartography/Cartography.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaAsyncSocket/CocoaAsyncSocket.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/Eureka/Eureka.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICDMaterialActivityIndicatorView/ICDMaterialActivityIndicatorView.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICSMainFramework/ICSMainFramework.framework"
install_framework "$BUILT_PRODUCTS_DIR/ICSPullToRefresh/ICSPullToRefresh.framework"
install_framework "$BUILT_PRODUCTS_DIR/ISO8601DateFormatter/ISO8601DateFormatter.framework"
install_framework "$BUILT_PRODUCTS_DIR/KeychainAccess/KeychainAccess.framework"
install_framework "$BUILT_PRODUCTS_DIR/KissXML/KissXML.framework"
install_framework "$BUILT_PRODUCTS_DIR/LogglyLogger-CocoaLumberjack/LogglyLogger_CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/MMWormhole/MMWormhole.framework"
install_framework "$BUILT_PRODUCTS_DIR/ObjectMapper/ObjectMapper.framework"
install_framework "$BUILT_PRODUCTS_DIR/PSOperations/PSOperations.framework"
install_framework "$BUILT_PRODUCTS_DIR/Realm/Realm.framework"
install_framework "$BUILT_PRODUCTS_DIR/RealmSwift/RealmSwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftColor/SwiftColor.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
iOS-mamu/SS
|
P/Pods/Target Support Files/Pods-Potatso/Pods-Potatso-frameworks.sh
|
Shell
|
mit
| 7,249 |
#!/bin/bash
function run
{
local MY_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
local GOOG_DIR="${MY_DIR}/../goog"
local GOOG_BASH_RC="${GOOG_DIR}/bashrc.sh"
includeFile "${GOOG_BASH_RC}"
local FUNCTIONS_FILE="${MY_DIR}/functions.sh"
includeFile "${FUNCTIONS_FILE}"
local ALIAS_FILE="${MY_DIR}/alias.sh"
includeFile "${ALIAS_FILE}"
local SOURCE_FILE="${MY_DIR}/source.sh"
includeFile "${SOURCE_FILE}"
}
run
|
aawc/dotfiles
|
rc/darwin/bashrc.sh
|
Shell
|
mit
| 445 |
#!/bin/bash
set -x
set -e
set -u
[[ $(type -P "git") ]] || sudo apt-get install git
git config --global user.name "Dan Nuffer"
git config --global push.default matching
if ! [ -e ~/.ssh/id_rsa.pub ]; then
ssh-keygen
echo "Add this key to github"
cat ~/.ssh/id_rsa.pub
echo "Press enter when finished"
read ans
fi
if ! [ -d ~/myvim ]; then
pushd ~
git clone ssh://[email protected]/dnuffer/myvim
pushd ~/myvim
./install.sh
popd
popd
fi
if ! [ -e /etc/sudoers.d/$USER ]; then
OLD_MODE=`umask`
umask 0227
echo "Defaults always_set_home" | sudo tee -a /etc/sudoers.d/$USER
echo "$USER ALL=(ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers.d/$USER
umask $OLD_MODE
fi
if [ -e /usr/bin/gconftool ]; then
# setup gnome-terminal unlimited scrollback and white on black color theme
gconftool --set /apps/gnome-terminal/profiles/Default/alternate_screen_scroll true --type bool
gconftool --set /apps/gnome-terminal/profiles/Default/scrollback_lines 512000 --type int
gconftool --set /apps/gnome-terminal/profiles/Default/use_theme_colors false --type bool
gconftool --set /apps/gnome-terminal/profiles/Default/palette '#2E2E34343636:#CCCC00000000:#4E4E9A9A0606:#C4C4A0A00000:#34346565A4A4:#757550507B7B:#060698209A9A:#D3D3D7D7CFCF:#555557575353:#EFEF29292929:#8A8AE2E23434:#FCFCE9E94F4F:#72729F9FCFCF:#ADAD7F7FA8A8:#3434E2E2E2E2:#EEEEEEEEECEC' --type string
gconftool --set /apps/gnome-terminal/profiles/Default/background_color '#000000000000' --type string
gconftool --set /apps/gnome-terminal/profiles/Default/bold_color '#000000000000' --type string
gconftool --set /apps/gnome-terminal/profiles/Default/foreground_color '#FFFFFFFFFFFF' --type string
fi
if [ -e /usr/bin/gsettings ]; then
# Set keyboard shortcuts (remove workspace switching keys which conflict with ides)
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-up "['']"
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-down "['']"
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-left "['']"
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-right "['']"
fi
if ! grep -q ccache ~/.bashrc && [ -d /usr/lib/ccache ]; then
echo "export PATH=/usr/lib/ccache:\$PATH" >> ~/.bashrc
fi
if ! grep -q "Eternal bash history" ~/.bashrc; then
cat >> ~/.bashrc << EOS
# Eternal bash history.
# http://superuser.com/questions/137438/how-to-unlimited-bash-shell-history
# ---------------------
# Undocumented feature which sets the size to "unlimited".
# http://stackoverflow.com/questions/9457233/unlimited-bash-history
export HISTFILESIZE=
export HISTSIZE=
export HISTTIMEFORMAT="[%F %T] "
# Change the file location because certain bash sessions truncate .bash_history file upon close.
# http://superuser.com/questions/575479/bash-history-truncated-to-500-lines-on-each-login
export HISTFILE=~/.bash_eternal_history
# Force prompt to write history after every command.
# http://superuser.com/questions/20900/bash-history-loss
PROMPT_COMMAND="history -a; \$PROMPT_COMMAND"
EOS
fi
# Disable ctrl-s pause
if ! grep -q "stty -ixon" ~/.bashrc; then
cat >> ~/.bashrc << EOS
stty -ixon
EOS
fi
if ! grep -q StrictHostKeyChecking ~/.ssh/config; then
echo 'Host *' >> ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
fi
if ! [ -e ~/.Renviron ]; then
echo 'R_LIBS_USER="~/.Rlibs"' > ~/.Renviron
fi
if ! [ -e ~/.Rlibs ]; then
mkdir ~/.Rlibs
fi
if ! [ -e ~/bin ]; then
mkdir ~/bin
fi
if ! grep -q \$HOME/bin ~/.bashrc; then
echo "export PATH=\$HOME/bin:\$PATH" >> ~/.bashrc
fi
if ! grep -q termcapinfo ~/.screenrc; then
echo "termcapinfo xterm* ti@:te@" >> ~/.screenrc
fi
if grep -q docker /etc/group; then
sudo usermod -aG docker $USER
fi
# shut up parallel's stupid citation message
mkdir -p ~/.parallel
touch ~/.parallel/will-cite
if ! [ -e ~/.inputrc ]; then
cat >> ~/.inputrc << EOS
\$include /etc/inputrc
EOS
fi
# for reference: https://stackoverflow.com/questions/5029118/bash-ctrl-to-move-cursor-between-words-strings
# Use bind to change the current shell. e.g.: $ bind '"\eOC":forward-word'
# To see the code for a key-combo: cat > /dev/null (^[ == \e)
# 'bind -p' will print all the possible mappings
if ! grep -q "mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving" ~/.inputrc; then
cat >> ~/.inputrc << EOS
# mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving
"\e[1;5C": forward-word
"\e[1;5D": backward-word
"\e[5C": forward-word
"\e[5D": backward-word
"\e\e[C": forward-word
"\e\e[D": backward-word
"\eOC": forward-word
"\eOD": backward-word
EOS
fi
if ! grep -q "force_color_prompt=yes" ~/.bashrc; then
sed -i -e 's/^#force_color_prompt=yes$/force_color_prompt=yes/g' ~/.bashrc
fi
|
dnuffer/setup
|
user_common.sh
|
Shell
|
mit
| 4,699 |
#!/usr/bin/env bash
# Current script location
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Load common
source "${DIR}/load-common.sh"
# Create default file if not exist
function make_default {
local app_conf_file=${APP_CONF_FILE:-conf/application.conf}
touch "$app_conf_file" || exit
}
|
amannocci/streamy
|
core/src/universal/bin/make-default.sh
|
Shell
|
mit
| 305 |
#!/bin/sh
cd $HOME/hlserver
./steamcmd.sh +runscript csgo_ds.txt
|
tflori/basic-images
|
csgo/update.sh
|
Shell
|
mit
| 65 |
#!/bin/bash
DISK="/dev/$1"
echo DISK="$DISK"
# BIOS/MBR layout
# -s : slience
parted -s "$DISK" mklabel msdos
parted -s -a optimal "$DISK"
# /boot (100MiB), / (20Gib), swap (4GiB) 剩下的给 /home
parted -s "$DISK" mkpart primary ext4 1024K 100M
parted -s "$DISK" set 1 boot on
parted -s "$DISK" mkpart primary ext4 100M 20G
parted -s "$DISK" mkpart primary linux-swap 20G 24G
parted -s "$DISK" mkpart primary ext4 24G 100%
parted -l
PBOOT="${DISK}1"
PROOT="${DISK}2"
PSWAP="${DISK}3"
PHOME="${DISK}4"
mkfs.ext4 -F "$PBOOT"
mkfs.ext4 -F "$PROOT"
mkswap "$PSWAP"
swapon "$PSWAP"
mkfs.ext4 -F "$PHOME"
|
JxMeta/JxArchLinux
|
Installation/parted4p.sh
|
Shell
|
mit
| 605 |
#!/bin/bash
# Actifio Copy Data Storage Scripting Team
# Copyright (c) 2018 Actifio Inc. All Rights Reserved
# This script refreshes mounts
# Version 1.0 Initial Release
# Version 1.1 Specify Component Type 0
# Declare variables used in the script.
match=0
# Now check for inputs
while getopts :a:c:i:u:j:l:m:t:hd opt
do
case "$opt"
in
a) appnum="$OPTARG";;
d) deleteonly=y;;
i) ipaddress="$OPTARG";;
j) jobclass="$OPTARG";;
l) mountlabel="$OPTARG";;
m) mountpoint="$OPTARG";;
t) hostnum="$OPTARG";;
u) username="$OPTARG";;
h) help=y;;
esac
done
# test for attempts to get help
if [ "$1" == "-h" ] || [ "$1" == "-?" ] || [ "$1" == "-help" ] || [ "$1" == "--help" ]
then
echo "
***************************************************************************************************
This script is intended for the following scenario:
1) There is a source application (known as 'appnum') which is being protected by Actifio
2) There are hosts which need to access an image of this source app
When run this script will:
a) Unmount and delete any existing mount to the specified target host with the specified label
b) Present the latest image in the selected jobclass to the target host.
Labels are used to identify mounts.
To ensure the process works correctly these labels must be unique or the script will not work reliably.
The script will have several variables which must be set at script run time:
-a <number> To select the source application ID
-d To delete the image without running a new mount job
-i <ip addr> To specify the Appliance to execute commands against (this could also be a host name)
-j <value> To specify the jobclass you wish to mount from
-l <value> To specify the label to be used for mounted and to find the mount later
-m <value> To specify the mount point to be used when mounting (optional)
-t <number> To select the target host to mount to (either host ID or host name)
-u <username> To specify the user name to run commands against
An example of the complete syntax is as follows.
This will access an Actifio appliance with IP address 192.168.103.145.
The username on the Appliance is install.
The source application is 17172 and the desired jobclass is snapshot.
The iumage will be mounted to a host called hq-sql and the label used will be pegasus
mountimage.sh -i 192.168.103.145 -u install -a 17172 -t hq-sql -l pegasus -j snapshot
***************************************************************************************************"
exit 0
fi
# If we don't have an ipaddress we will complain Dont test for numeric in case they use names
if [ -z "$ipaddress" ];then
echo "Please use a valid IP address with: -i <ipaddress>"
echo "For instance for ipaddress 1.2.3.4: -i 1.2.3.4"
exit 0
fi
# If we dont have a username we will complain
if [ -z "$username" ]; then
echo "Please specify a username with: -u <username>"
echo "For instance for username george: -u george"
exit 0
fi
# If we don't have an app ID or the app ID is not numeric we will complain
if [ -z "$appnum" ] || [ -n "`echo $appnum | sed 's/[0-9]//g'`" ]; then
echo "Please use a numeric appid with: -a <appid>"
echo "For instance for appid 1234: -a 1234"
exit 0
fi
# If we don't have a host ID we will complain - can be name or ID
if [ -z "$hostnum" ]; then
echo "Please use a valid host id with: -t <hostid>"
echo "For instance for host id 5678: -t 5678"
echo "Or for instance host name hq-sql: -t hq-sql"
exit 0
fi
# If we dont have a label we will complain
if [ -z "$mountlabel" ]; then
echo "Please specify a label with: -l <labelname>"
echo "For instance for label name pegasus: -l pegasus"
exit 0
fi
# If we didn't get a jobclass, then complain
if [ -z "$jobclass" ] ; then
echo "Please specify a jobclass with: -j <jobclass>"
echo "For instance for snapshots use: -j snapshot"
exit 0
fi
#First define valid jobclass list. You can just add to the list if one got missed!
jobclasslist="dedup
dedupasync
directdedup
liveclone
remote-dedup
snapshot"
# Lets just make sure we can search for that class
for jobtype in $jobclasslist; do
[[ "$jobclass" == "$jobtype" ]] && match=1
done
if [ "$match" -ne 1 ];then
echo "$jobclass is not a valid Job Class."
echo "Please use one of the following:"
echo "$jobclasslist"
exit 0
fi
# does the host exist
hostid=$(ssh $username@$ipaddress udsinfo lshost -delim , $hostnum 2>&1 | awk -F"," '$1=="id" { print $2 }')
if [ -z $hostid ]; then
echo "The host specified does not exist"
echo "Validate host name or ID using: udsinfo lshost"
exit 0
fi
# is there an image to mount?
newimageid=$(ssh $username@$ipaddress "udsinfo lsbackup -filtervalue jobclass=$jobclass\&appid=$appnum\&componenttype=0 -nohdr -delim } 2>&1 | tail -1 | cut -d} -f19")
if [ -z $newimageid ] && [ "$deleteonly" != "y" ]; then
echo "There are no images in that jobclass so there is nothing to mount"
echo "Try a different jobclass or validate you are using the correct application ID"
exit 0
fi
# Label check
echo "Checking for mounts to host $hostnum with a label of $mountlabel"
mountedname=$(ssh $username@$ipaddress "udsinfo lsbackup -filtervalue label=$mountlabel\&jobclass=mount\&mountedhost=$hostid -delim } -nohdr | cut -d} -f19")
# Check if we found more than one mount, exit if we did
mountcount=$(echo "$mountedname" | wc -l)
if [ $mountcount -gt 1 ]; then
echo "There are multiple mounts with the same label $mountlabel. Please use unique labels when mounting. Please use a different label or manually unmount the other mounts"
echo "The mounts are as follows:"
ssh $username@$ipaddress "udsinfo lsbackup -filtervalue label=$mountlabel\&jobclass=mount\&mountedhost=$hostid"
exit 0
fi
# If we found one mount then unmount it
if [ -n "$mountedname" ]; then
echo "Unmounting and deleting the existing mount $mountedname with label $mountlabel"
unmount=$(ssh $username@$ipaddress "udstask unmountimage -delete -image $mountedname -nowait 2>&1")
unmountjob=$(echo "$unmount" | cut -d" " -f1)
# Now monitor the running job
while true; do
jobcheck=$(ssh $username@$ipaddress udsinfo lsjob -delim } $unmountjob 2> /dev/null)
if [ -z "$jobcheck" ]; then
history=$(ssh $username@$ipaddress udsinfo lsjobhistory -delim } $unmountjob)
status=$(echo "$history" | awk -F"}" '$1=="status" { print $2 }')
duration=$(echo "$history" | awk -F"}" '$1=="duration" { print $2 }')
if [ "$status" == "succeeded" ]; then
echo "Unmount Job Results:"
echo "Status: $status"
echo "Duration: $duration"
echo ""
else
echo "An error occurred while unmounting the image with label $mountlabel, please investigate $unmountjob"
echo -n "The message for this failed job was: "
ssh $username@$ipaddress udsinfo lsjobhistory -delim } $unmountjob| awk -F"}" '$1=="message" { print $2 }'
exit 0
fi
break
else
data=$(echo "$jobcheck" | awk -F"}" '$1=="progress" { print $2 }')
echo "$unmountjob progress: $data%"
sleep 5
fi
done
else
echo "There were no mounted images with label $mountlabel"
fi
if [ "$deleteonly" == "y" ]; then
exit 0
fi
# Now mount the latest image in jobclass to the target host
echo "Mounting $jobclass image $newimageid to host $hostnum"
if [ -n "$mountpoint" ]; then
mount=$(ssh $username@$ipaddress "udstask mountimage -image $newimageid -host $hostnum -label $mountlabel -restoreoption "mountpointperimage=$mountpoint" -nowait 2>&1")
else
mount=$(ssh $username@$ipaddress "udstask mountimage -image $newimageid -host $hostnum -label $mountlabel -nowait 2>&1")
fi
mountjob=$(echo "$mount" | cut -d" " -f1)
# Now monitor the running job
while true; do
jobcheck=$(ssh $username@$ipaddress udsinfo lsjob -delim } $mountjob 2> /dev/null)
if [ -z "$jobcheck" ]; then
history=$(ssh $username@$ipaddress udsinfo lsjobhistory -delim } $mountjob)
status=$(echo "$history" | awk -F"}" '$1=="status" { print $2 }')
duration=$(echo "$history" | awk -F"}" '$1=="duration" { print $2 }')
if [ "$status" == "succeeded" ]; then
echo "Mount Job Results:"
echo "Status: $status"
echo "Duration: $duration"
else
echo "An error occurred while mounting the image to $hostnum, please investigate $mountjob"
echo -n "The message for this failed job was: "
ssh $username@$ipaddress udsinfo lsjobhistory -delim } $mountjob| awk -F"}" '$1=="message" { print $2 }'
exit 0
fi
break
else
data=$(echo "$jobcheck" | awk -F"}" '$1=="progress" { print $2 }')
echo "$mountjob progress: $data%"
sleep 5
fi
done
|
Actifio/ShellScripts
|
LinuxMount/mountimage.sh
|
Shell
|
mit
| 8,836 |
#!/usr/bin/env bash
# Install Caskroom
brew tap caskroom/cask
brew tap caskroom/versions
# Install packages
apps=(
1password
dash
docker
dotnet
dotnet-sdk
dropbox
evernote
firefoxdeveloperedition
franz
gitter
google-chrome
iterm2
skype
slack
spotify
sublime-text
sugarsync
virtualbox
virtualbox-extension-pack
visual-studio-code
)
# gyazo
# google-drive
# spectacle
# flux
# imagealpha
# imageoptim
# atom
# webstorm
# malwarebytes-anti-malware
# glimmerblocker
# hammerspoon
# kaleidoscope
# macdown
# opera
# screenflow
# tower
# transmit
# elmedia-player
# utorrent
brew cask install "${apps[@]}"
# Quick Look Plugins (https://github.com/sindresorhus/quick-look-plugins)
#brew cask install qlcolorcode qlstephen qlmarkdown quicklook-json qlprettypatch quicklook-csv betterzipql qlimagesize webpquicklook suspicious-package
|
diogodamiani/dotfiles
|
install/brew-cask.sh
|
Shell
|
mit
| 972 |
#!/bin/bash
# Ejercicio 1
if [ $# -lt 2 ]; then
echo "[ERROR] Mínimo dos argumentos";
else
case $1 in
'for');;
'while');;
'until');;
*)
echo "[ERROR] El primer argumento debe ser for, while o until"
exit -1
;;
esac
shift
while [ $# -gt 0 ];
do
echo $1
shift
done
fi
|
junquera/apuntes-admon-ssoo
|
S6/ejercicio1.sh
|
Shell
|
mit
| 296 |
#! /bin/bash
# exit if a command fails
set -eo pipefail
# install node & npm
apt-get update && apt-get install -y nodejs npm
# alias as node
ln -sf /usr/bin/nodejs /usr/bin/node
# install redis commander
npm install -g redis-commander
# cleanup package manager
apt-get autoclean && apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
schickling/dockerfiles
|
redis-commander/install.sh
|
Shell
|
mit
| 347 |
#!/bin/bash
apt-get install -qy oracle-java7-installer
apt-get install -qy oracle-java7-set-default
|
danilohiggs/vm1-camunda
|
install-java.sh
|
Shell
|
mit
| 101 |
BGO_OPT_DEL=":" # bash options delimiter
BGO_CASE_DEL="|" # switch case template separator
BGO_A_DEL=" " # array delimiter
|
hanovruslan/bash-get-options
|
src/env.sh
|
Shell
|
mit
| 123 |
# This file is sourced at the end of a first-time dotfiles install.
shopt -s expand_aliases
source ~/.bashrc
cat <<EOF
Remember!
* Sync your private dotifles
* Manually load all your app store purchases
* Sign into all your apps and sync everything those can sink
* scp -r old_machine:~/* new_machine:~/
* be careful with ~/Library/Preferences/com.apple.*
** cp -r ~/Library/Preferences ~/Library/Preferences-Backup
** cp -ir ~/Desktoper/Prefs/* ~/Library/Preferences/
EOF
# docs.hardentheworld.org/OS/MacOS_10.12_Sierra/index.html
cat <<EOF
Check the following:
* Finder ⇒ Preferences ⇒ Advanced
* Finder ⇒ Preferences ⇒ Sidebar
* System Preferences ⇒ General -> Handoff
* System Preferences ⇒ General -> Recent Items
* System Preferences ⇒ Security & Privacy ⇒ Advanced -> Require Admin
* System Preferences ⇒ Security & Privacy ⇒ FileVault
* System Preferences ⇒ Security & Privacy ⇒ Firewall
* System Preferences ⇒ Security & Privacy ⇒ General -> Require Pass
* System Preferences ⇒ Security & Privacy ⇒ Privacy
* System Preferences ⇒ Security & Privacy ⇒ Privacy ⇒ Diagnostics & Usage
* System Preferences ⇒ Security & Privacy ⇒ Privacy ⇒ Location Services
* System Preferences ⇒ Spotlight
* System Preferences ⇒ Users & Groups ⇒ Guest User
* System Preferences ⇒ Users & Groups ⇒ Login Options -> Password Hints
EOF
|
lonnen/dotfiles
|
conf/firsttime_reminder.sh
|
Shell
|
mit
| 1,406 |
#!/bin/bash
# This intended to be called from a git / svn hook (like the supplied
# examples of such).
#
# Arguments are:
#
# $1: the full path to the kerouac executable
#
# $2: the directory of the kerouac root
#
# $3: the name of this project
#
# $4: the tag of this build
#
# $5: the log file containing the description of changes represented in this
# build
#
# This script is intended, in general, to:
#
# - call kerouac with the supplied information and actually kick off the build
#
# - do any notifications / other work based on the result of that build
#
# - clean up the log file
#
# You may copy this script as a starting point to your repo and check it in,
# where it can be customized per branch (if, for example, you want
# notifications on a particular branch to be sent to a different set of
# recipients).
#
###########################################################################
# Configuration #
# #
# (note that as written all values will pick up and prefer ENV settings). #
###########################################################################
# If MAIL_TO is set, notifications will be sent to these addresses. If it is
# empty, no notifications will be sent.
MAIL_TO=${MAIL_TO:-""}
# MAIL_TO="[email protected] [email protected]"
NOTIFY_ON_FAILURE="YES"
#NOTIFY_ON_FAILURE="NO"
NOTIFY_ON_SUCCESS="YES"
#NOTIFY_ON_SUCCESS="NO"
# This command will be passed the subject and the list of MAIL_TO addresses.
MAIL_CMD=${MAIL_CMD:-"/usr/bin/env mail -s"}
# Where to find the kerouac configuration in the repo.
KEROUAC_CONFIG_NAME=${KEROUAC_CONFIG_NAME:-"kerouac.json"}
KEROUAC_BUILD_FLAGS="--remove-src"
#############
# Arguments #
#############
KEROUAC=$1
KEROUAC_ROOT=$2
PROJECT=$3
TAG=$4
LOG_FILE=$5
###############################
# Actually run the build. #
###############################
$KEROUAC build $KEROUAC_BUILD_FLAGS . $KEROUAC_CONFIG_NAME $KEROUAC_ROOT $PROJECT $TAG
if [ $? != "0" ]
then
STATUS=FAILED
else
STATUS=SUCCEEDED
fi
########################################################
# Get the output from the build logs into the log file #
########################################################
echo >> $LOG_FILE
echo 'Kerouac log output:' >> $LOG_FILE
cat $($KEROUAC print kerouaclogpath $KEROUAC_ROOT $PROJECT $TAG) >> $LOG_FILE
echo >> $LOG_FILE
echo 'Build stdout:' >> $LOG_FILE
cat $($KEROUAC print stdoutpath $KEROUAC_ROOT $PROJECT $TAG) >> $LOG_FILE
echo >> $LOG_FILE
echo 'Build stderr:' >> $LOG_FILE
cat $($KEROUAC print stderrpath $KEROUAC_ROOT $PROJECT $TAG) >> $LOG_FILE
#####################################
# Send notifications if appropriate #
#####################################
if [ "$MAIL_TO" != "" ]
then
if [ $STATUS == "FAILED" ] && [ $NOTIFY_ON_FAILURE == "YES" ]
then
cat $LOG_FILE | $MAIL_CMD "$PROJECT build $TAG failed" $MAIL_TO
elif [ $NOTIFY_ON_SUCCESS == "YES" ]
then
cat $LOG_FILE | $MAIL_CMD "$PROJECT build $TAG succeeded" $MAIL_TO
fi
fi
rm $LOG_FILE
|
hut8labs/kerouac
|
extra/ci_build.sh
|
Shell
|
mit
| 3,134 |
#!/bin/bash
declare -a on_exit_items
on_exit(){
for i in "${on_exit_items[@]}"
do
#echo "on_exit: $i"
eval $i
done
}
add_on_exit(){
local n=${#on_exit_items[*]}
on_exit_items[$n]="$*"
if [[ $n -eq 0 ]]; then
#echo "Setting trap"
trap on_exit EXIT
fi
}
fctcheck_apt_upd(){
apt-get update 2>&1 >/dev/null
if [ $? != 0 ] ; then
echo "An error occured in fctcheck_apt_upd"
else
fctdownload_apt
fi
}
fctdownload_apt(){
apt-get dist-upgrade -d -y 2>&1 >/dev/null
}
fctapt_install(){
apt-get upgrade -y
}
fctafetch(){
fctcheck_apt_upd
}
fctsrc_folder_gitlisting(){
cd $SRC_FOLDER
add_on_exit rm -f /tmp/gitlisting.$$
add_on_exit rm -f /tmp/tmplisting.$$
ls -d -1 -a */.git > /tmp/tmplisting.$$
sed 's/\.git//' </tmp/tmplisting.$$ >/tmp/gitlisting.$$
}
fctgpull(){
fctsrc_folder_gitlisting
while read line; do
#disable self update
if [ "$SRC_FOLDER/$line" != "$SCRIPTPATH/" ] ; then
cd $SRC_FOLDER/$line
pwd
git pull
fi
done </tmp/gitlisting.$$
}
fcthelp(){
echo "Usage: $0 [MODE]"
echo " possible MODE toggle :"
echo " * afetch [A]"
echo " rpiupd [A]"
echo " gpull [I]"
echo " ainstall [ÃI]"
echo " total [I]"
echo ""
echo " [I] = interactive mode"
echo " [A] = automatic mode"
}
main(){
if [ $# -eq 1 ] ; then
DEFAULT_ACTION=$1
echo "forcing DefAct = $DEFAULT_ACTION"
fi
case $DEFAULT_ACTION in
afetch)
fctafetch
;;
gpull)
fctgpull
;;
ainstall)
fctafetch
fctapt_install
;;
rpiupd)
rpi-update
;;
total)
fctafetch
fctapt_install
fctgpull
if [ "$SPECIFIC_HARD" = "raspberrypi" ] ; then
rpi-update
fi
;;
help)
fcthelp
;;
esac
}
SCRIPTFULLPATH=$(readlink -f $0)
SCRIPTPATH=`dirname $SCRIPTFULLPATH`
ORIGINAL_FOLDER=`pwd`
SRC_FOLDER="/usr/src"
DEFAULT_ACTION="afetch"
SPECIFIC_HARD="raspberrypi"
if [ -f $SCRIPTPATH/config ] ; then
# Override previous settings
. $SCRIPTPATH/config
fi
main $@
on_exit
cd $ORIGINAL_FOLDER
|
nicoolaj/auto-rpi-updater
|
auto-rpi-updater.sh
|
Shell
|
mit
| 2,011 |
#!/bin/bash
# package
./package.sh
# launch
java -Dfile.encoding=utf-8 -jar ./target/quartz-0.0.1-SNAPSHOT.jar
|
rickding/Hello
|
HelloQuartz/launch.sh
|
Shell
|
mit
| 113 |
#!/bin/bash
cd tests/heavy
if [ -d "examples" ]; then
cd examples
git checkout .
git pull
else
git clone https://github.com/d4rkr00t/aik-examples.git examples
fi
|
d4rkr00t/aik
|
tests/update-examples.sh
|
Shell
|
mit
| 172 |
##!/bin/sh.exe
echo "cleaning up log directory"
#function clean_up_2() {
# if [[ ! -d "$1" ]]; then
#
# echo "Argument 1 should be the path of an existing directory" 1>&2
# exit 1
# fi
# rm -r "$1"
#}
#
#!/usr/bin/env bash
function clean_up() {
declare -r directory="$1"
if [[ ! -d "$directory" ]]; then
echo "Argument 1 should be the path of an existing directory" 1>&2
exit 1
fi
rm -r "$directory"
}
#clean_up "build"
clean_up
|
heylve/first
|
clean_up.sh
|
Shell
|
mit
| 509 |
#!/bin/bash
#######################
# configure Gateway 2 #
#######################
[ -f ipsec.conf ] || exit 0
[ -f ipsec.secrets ] || exit 0
[ -f strongswan.conf ] || exit 0
strongswan stop
echo ' Configurando...'
cp -f ipsec.conf /etc/strongswan/ipsec.conf
cp -f ipsec.secrets /etc/strongswan/ipsec.secrets
cp -f strongswan.conf /etc/strongswan/strongswan.conf
echo ' Strongswan configurado!'
|
xXcoronaXx/strongswanConf
|
getaway2/strongswan/conf.sh
|
Shell
|
mit
| 401 |
#!/bin/bash
git checkout gh-pages
git pull
git add -A
git commit -m ${1}
git push origin gh-pages
|
margulies/pres
|
x.commit.sh
|
Shell
|
mit
| 99 |
#!/bin/bash
echo "Enabling I2C"
modprobe i2c-dev
case "$PAPERTRAIL_ON" in
true) bash /usr/src/app/config/papertrail.sh ;;
*) echo "Papertrail not enabled" ;;
esac
case "$LOCAL_SSH_ON" in
true) bash /usr/src/app/config/openssh.sh ;;
*) echo "Local SSH not enabled" ;;
esac
case "$PROMETHEUS_ON" in
true) bash /usr/src/app/config/prometheus.sh ;;
*) echo "Prometheus not enabled" ;;
esac
python cnavsense/main.py
|
konradko/cnav-sense
|
start.sh
|
Shell
|
mit
| 431 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3048-1
#
# Security announcement date: 2016-08-08 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:34 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - libcurl3-nss:7.35.0-1ubuntu2.8
# - libcurl3-gnutls:7.35.0-1ubuntu2.8
# - libcurl3:7.35.0-1ubuntu2.8
#
# Last versions recommanded by security team:
# - libcurl3-nss:7.35.0-1ubuntu2.10
# - libcurl3-gnutls:7.35.0-1ubuntu2.10
# - libcurl3:7.35.0-1ubuntu2.10
#
# CVE List:
# - CVE-2016-5419
# - CVE-2016-5420
# - CVE-2016-5421
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libcurl3-nss=7.35.0-1ubuntu2.10 -y
sudo apt-get install --only-upgrade libcurl3-gnutls=7.35.0-1ubuntu2.10 -y
sudo apt-get install --only-upgrade libcurl3=7.35.0-1ubuntu2.10 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/i686/2016/USN-3048-1.sh
|
Shell
|
mit
| 966 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2814-1
#
# Security announcement date: 2013-12-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:46 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - varnish:2.1.3-8+deb6u1
#
# Last versions recommanded by security team:
# - varnish:2.1.3-8+deb6u1
#
# CVE List:
# - CVE-2013-4484
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade varnish=2.1.3-8+deb6u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2013/DSA-2814-1.sh
|
Shell
|
mit
| 615 |
#!/usr/bin/env bash
set -xeu
IFS=$'\n\t'
if [ "$TRAVIS_PHP_VERSION" = '7.1' ] ; then
wget https://scrutinizer-ci.com/ocular.phar
php ocular.phar code-coverage:upload --format=php-clover ./clover.xml
fi
|
Roave/composer-gpg-verify
|
.travis.coverage.sh
|
Shell
|
mit
| 212 |
#!/bin/sh
# bump_cfbundleversion.sh
#
# Created by Tim Kelly, 2014-2015
#
# Summary: This file takes an input of a .plist file that contains a "Bunle Version"
# This script will get the number of git commits and make that the revision.
# It then takes the display version from the project and adds on the git rev count and updates the .plist file.
#
#
# Prerequisites: XCode 6.1 and valid project -Info.plist file and your project in a git repository.
#
# Usage: $sh bump_cfbundleversion.sh <path to <Project>-Info.plist with CFBundleIdentifier
#
#
INFOPLIST_PATH=$1
if [ ! -f $INFOLIST_PATH ]; then
echo "$INFOLIST_PATH file not found!"
exit 1
fi
# The argvtool gets the version from the CURRENT_PROJECT_VERSION value
# Under your projects Build Settings, make sure you have "Current Project Version" value set.
CURVERSION=`agvtool vers -terse`
echo "current version $CURVERSION"
# get the number of git commits
REV=$(git rev-list HEAD --count)
echo REV=$REV
# set the current app version in the Info.plist
/usr/libexec/PlistBuddy -c "Set :CFBundleVersion $CURVERSION.$REV" "${INFOPLIST_PATH}"
CURVERSION=`agvtool vers -terse`
echo "current version $CURVERSION"
|
austimkelly/utils
|
bump_cfbundleversion.sh
|
Shell
|
mit
| 1,179 |
#! /bin/bash
set -e
source /build/buildconfig
set -x
readonly PACKAGE="0.5.0_linux_amd64.zip"
wget https://dl.bintray.com/mitchellh/serf/$PACKAGE
unzip $PACKAGE && rm $PACKAGE
mv serf /usr/local/bin/ && mkdir /etc/service/serf
|
hivetech/batcave
|
warehouse/base/build/services/serf.sh
|
Shell
|
mit
| 229 |
#!/usr/bin/env bash
#This script can quickly rename filess within the dDocent naming convention. It needs to be passed a tab delimited
#file with the old name in one column and the new name in the other
#Example#
#PopA_001 NewPop_001
#
# This will renmae PopA_001.F.fq.gz and PopA_001.R.fq.gz to NewPop_001.F.fq.gz and NewPop_001.R.fq.gz
if [ -z "$1" ]
then
echo "No file with old names and new names specified."
echo "Correct usage: Rename_for_dDocent.sh namesfile"
exit 1
else
NAMES=( `cut -f2 $1 `)
BARCODES=( `cut -f1 $1 `)
LEN=( `wc -l $1 `)
LEN=$(($LEN - 1))
echo ${NAMES[0]}
echo ${BARCODES[0]}
for ((i = 0; i <= $LEN; i++));
do
mv ${BARCODES[$i]}.F.fq.gz ${NAMES[$i]}.F.fq.gz
mv ${BARCODES[$i]}.R.fq.gz ${NAMES[$i]}.R.fq.gz &>/dev/null
done
fi
|
chollenbeck/dDocent
|
scripts/Rename_SequenceFiles.sh
|
Shell
|
mit
| 758 |
#!/bin/bash
echo -------------------------------------------------------------------
echo Desligando o proxy da FURG
echo -------------------------------------------------------------------
## Faz uma copia de backup .bkp antes de mover os arquivos
echo
echo
echo Fazendo uma copia de backup dos arquivos de configuracao ...
echo
echo
cp /etc/bash.bashrc /etc/bash.bashrc.bkp
cp /etc/environment /etc/environment.bkp
cp /etc/apt/apt.conf /etc/apt/apt.conf.bkp
cp ~/.wgetrc ~/.wgetrc.bkp
cp ~/.Renviron ~/.Renviron.bkp
echo
echo
echo Configurando /etc/bash.bashrc ...
cp ~/.proxy/bash.bashrc_no-proxy /etc/bash.bashrc
echo
echo
echo Configurando /etc/environment ...
cp ~/.proxy/environment_no-proxy /etc/environment
echo
echo
echo Configurando /etc/apt/apt.conf ...
cp ~/.proxy/apt.conf_no-proxy /etc/apt/apt.conf
echo
echo
echo Configurando ~/.wgetrc ...
cp ~/.proxy/wgetrc_no-proxy ~/.wgetrc
echo
echo
echo Configurando ~/.Renviron ...
cp ~/.proxy/Renviron_no-proxy ~/.Renviron
echo
echo
echo -------------------------------------------------------------------
echo Proxy desativado com sucesso.
echo -------------------------------------------------------------------
echo
echo
|
fernandomayer/linux-config
|
proxy/proxy-OFF.sh
|
Shell
|
mit
| 1,182 |
#!/bin/bash
db=${1-'catarse_api_test'}
user=`whoami`
port=8888
exit_code=0
postgrest_bin='unknown'
unamestr=`uname`
ver='0.3.0.3'
dir='postgrest'
schema_log='logs/schema_load.log'
data_log='logs/data_load.log'
if [[ "$unamestr" == 'Linux' ]]; then
postgrest_bin="postgrest-$ver-linux"
elif [[ "$unamestr" == 'Darwin' ]]; then
postgrest_bin="postgrest-$ver-osx"
fi
if [[ "$postgrest_bin" == "unknown" ]]; then
echo "Platform $unamestr is not supported by the postgrest binaries."
fi
echo "Initiating database users..."
createuser --no-login web_user > /dev/null 2>&1
createuser --no-login admin > /dev/null 2>&1
createuser --no-login anonymous > /dev/null 2>&1
createuser catarse -s > /dev/null 2>&1
createuser postgrest -g admin -g web_user -g anonymous > /dev/null 2>&1
echo "Initiating database schema..."
dropdb --if-exists $db
createdb $db
psql --set ON_ERROR_STOP=1 $db < ./database/schema.sql > $schema_log 2>&1
if [[ $? -ne 0 ]]; then
echo "Error restoring test schema. Take a look at ${schema_log}:"
tail -n 5 $schema_log
exit 1
fi
echo "Populating database..."
psql --set ON_ERROR_STOP=1 -v db=$db $db < ./database/data.sql > $data_log 2>&1
if [[ $? -ne 0 ]]; then
echo "Error restoring test data. Take a look at ${data_log}:"
tail -n 5 $data_log
exit 1
fi
echo "Initiating PostgREST server ./$dir/$postgrest_bin ..."
./$dir/$postgrest_bin "postgres://postgrest@localhost/$db" --schema "1" -a anonymous -p $port --jwt-secret gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr9C > logs/postgrest.log 2>&1 &
echo "Running tests..."
sleep 2
for f in test/*.yml
do
echo ""
echo "$f..."
pyresttest http://localhost:$port $f
if [[ $? -ne 0 ]]; then
exit_code=1
fi
done
echo ""
echo "Terminating PostgREST server..."
killall $postgrest_bin
echo "Done."
exit $exit_code
|
catarse/catarse-api-specs
|
src/run_tests.sh
|
Shell
|
mit
| 1,820 |
#!/bin/sh -x
#
# mybashrc setting
#
# author: Atsushi Sakai
#
# echo "Source mybashrc"
source ~/dotfiles/src/esh/esh.sh
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# Better ls
if type "exa" > /dev/null 2>&1; then
# Install for mac: brew install exa
# Install for ubuntu: apt install exa
# Install for windows: not provided.
alias ls='exa --color=auto'
fi
# Better cat
if type "bat" > /dev/null 2>&1; then
# Install for mac: brew install bat
# Install for ubuntu: apt install bat
# Install for windows: choco install bat
alias cat='bat'
fi
# Better grep
if type "rg" > /dev/null 2>&1; then
# Install for mac: brew install ripgrep
# Install for ubuntu: apt install ripgrep
# Install for windows: choco install ripgrep
alias grep='rg'
fi
# Better find
if type "fd" > /dev/null 2>&1; then
# Install for mac: brew install fd-find
# Install for ubuntu: apt install fd
# Install for windows: choco install fd
alias find='fd'
fi
# open finder emulation
if type "xdg-open" > /dev/null 2>&1; then # for ubuntu
alias Open='xdg-open .'
fi
function add_upstream() {
url=$(git config --get remote.origin.url)
repo=$(basename ${url##*/} .git)
user=$(echo "$url" | awk -F/ '{print $4}')
remote=$(curl -s "https://api.github.com/repos/$user/$repo" | jq -r '.parent.clone_url')
echo "upstream is " $remote
if [ ! -z "$remote" ]; then
git remote add upstream "$remote"
else
echo "no upstream found"
fi
}
function parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1="\W\[\033[32m\]\$(parse_git_branch)\[\033[00m\] $ "
# ==== bash history setting ====
export HISTSIZE=10000
# save command history immediately setting
shopt -s histappend
PROMPT_COMMAND="history -a;$PROMPT_COMMAND"
# bash completio for mac
[ -f /usr/local/etc/bash_completion ] && . /usr/local/etc/bash_completion
# for percol setting
source ~/dotfiles/mypercol.bash
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
# for enhancd setting
source ~/dotfiles/src/enhancd/init.sh
export ENHANCD_FILTER=fzf
# alias
#alias jupercol='find . -name "*.ipynb" -not -name '*checkpoint*'| percol | xargs jupyter notebook'
alias jupercol='find . -name "*.ipynb" -not -name '*checkpoint*'| percol | xargs jupyter lab'
alias plain_vim='vim -u NONE'
|
AtsushiSakai/dotfiles
|
mybashrc.bash
|
Shell
|
mit
| 2,363 |
URL="/"
TEMPLATE_IN="views/index.st"
TEMPLATE_OUT="views/index.html"
RENDER="./render.sh"
METHOD=$1
data_k=()
data_v=()
i=1;
for arg in "$@"
do
if [ $i -eq 1 ]
then
i=$((i+1))
continue;
fi
if [ $(($i % 2)) -eq 1 ]
then
data_v+=($arg);
else
data_k+=($arg);
fi
i=$((i+1))
done
#echo "URL: $URL"
#echo "METHOD: $METHOD"
#echo "KEYS: ${data_k[*]}"
#echo "VALUES: ${data_v[*]}"
###### DON'T EDIT ABOVE HERE ########
# unless you know what you're doing #
#####################################
## Basic usage of this file:
## database calls
## webpage generation
## calling the template processor
##
## Automagic variables:
## $URL is the page url
## $METHOD is the request method
## $TEMPLATE_IN is the template before it is rendered
## $TEMPLATE_OUT is the rendered html
## $RENDER is the rendering tool
## $data_k is an array of the get/post keys
## $data_v is an array of the get/post values
####################################
## WHEN FINISHED
# To output to the client, echo to standard output
# Note that you must echo an HTTP header first.
# For your convenience, some common header files are available
# in ../headers/ :
# HTTP200OK
# HTTP404NOTFOUND
#
# To use just run
# cat ../headers/HTTP200OK
##
####################################
|
kjcjohnson/nasT
|
nTemplates/handler_temp.sh
|
Shell
|
mit
| 1,320 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2369-1
#
# Security announcement date: 2011-12-21 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:19 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libsoup2.4:2.30.2-1+squeeze1
#
# Last versions recommanded by security team:
# - libsoup2.4:2.30.2-1+squeeze1
#
# CVE List:
# - CVE-2011-2524
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libsoup2.4=2.30.2-1+squeeze1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2011/DSA-2369-1.sh
|
Shell
|
mit
| 633 |
#!/bin/bash
. env.sh
# remove old installers
rm -f Disk\ Image/*.pkg
if [ "`uname`" == "Darwin" ]; then sed_regexp="-E"; else sed_regexp="-r"; fi
GIT_VERSION="${1:-`curl http://git-scm.com/ 2>&1 | grep "<span class='version'>" | sed $sed_regexp 's/^.+>([0-9.]+)<.+$/\1/'`}"
./build.sh $GIT_VERSION
PACKAGE_NAME="git-$GIT_VERSION-intel-universal-snow-leopard"
echo $PACKAGE_NAME | pbcopy
rm -f Disk\ Image/*.pkg
sudo bash -c "$PACKAGE_MAKER_APP/Contents/MacOS/PackageMaker --doc Git\ Installer.pmdoc/ -o Disk\ Image/$PACKAGE_NAME.pkg --title 'Git $GIT_VERSION'"
UNCOMPRESSED_IMAGE_FILENAME="$PACKAGE_NAME.uncompressed.dmg"
IMAGE_FILENAME="$PACKAGE_NAME.dmg"
rm -f $UNCOMPRESSED_IMAGE_FILENAME $IMAGE_FILENAME
hdiutil create $UNCOMPRESSED_IMAGE_FILENAME -srcfolder "Disk Image" -volname "Git $GIT_VERSION Snow Leopard Intel Universal" -ov
hdiutil convert -format UDZO -o $IMAGE_FILENAME $UNCOMPRESSED_IMAGE_FILENAME
rm $UNCOMPRESSED_IMAGE_FILENAME
echo "Testing the installer..."
. test_installer.sh
echo "Git Installer $GIT_VERSION - OS X - Snow Leopard - Intel Universal" | pbcopy
open "http://code.google.com/p/git-osx-installer/downloads/entry"
sleep 1
open "./"
echo "To upload build, run:"
echo
echo "./upload-to-github.rb timcharper timcharper/git_osx_installer $IMAGE_FILENAME 'Git Installer $GIT_VERSION - OS X - Snow Leopard - Intel Universal'"
|
juvenal/git_osx_installer
|
build_package_test_and_bundle.sh
|
Shell
|
mit
| 1,367 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:2086
#
# Security announcement date: 2015-11-18 17:27:06 UTC
# Script generation date: 2017-01-13 21:18:21 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - java-1.6.0-openjdk.x86_64:1.6.0.37-1.13.9.4.el5_11
# - java-1.6.0-openjdk-debuginfo.x86_64:1.6.0.37-1.13.9.4.el5_11
# - java-1.6.0-openjdk-demo.x86_64:1.6.0.37-1.13.9.4.el5_11
# - java-1.6.0-openjdk-devel.x86_64:1.6.0.37-1.13.9.4.el5_11
# - java-1.6.0-openjdk-javadoc.x86_64:1.6.0.37-1.13.9.4.el5_11
# - java-1.6.0-openjdk-src.x86_64:1.6.0.37-1.13.9.4.el5_11
#
# Last versions recommanded by security team:
# - java-1.6.0-openjdk.x86_64:1.6.0.41-1.13.13.1.el5_11
# - java-1.6.0-openjdk-debuginfo.x86_64:1.6.0.41-1.13.13.1.el5_11
# - java-1.6.0-openjdk-demo.x86_64:1.6.0.41-1.13.13.1.el5_11
# - java-1.6.0-openjdk-devel.x86_64:1.6.0.41-1.13.13.1.el5_11
# - java-1.6.0-openjdk-javadoc.x86_64:1.6.0.41-1.13.13.1.el5_11
# - java-1.6.0-openjdk-src.x86_64:1.6.0.41-1.13.13.1.el5_11
#
# CVE List:
# - CVE-2015-4734
# - CVE-2015-4803
# - CVE-2015-4805
# - CVE-2015-4806
# - CVE-2015-4835
# - CVE-2015-4842
# - CVE-2015-4843
# - CVE-2015-4844
# - CVE-2015-4860
# - CVE-2015-4872
# - CVE-2015-4881
# - CVE-2015-4882
# - CVE-2015-4883
# - CVE-2015-4893
# - CVE-2015-4903
# - CVE-2015-4911
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install java-1.6.0-openjdk.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-debuginfo.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-demo.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-devel.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-javadoc.x86_64-1.6.0.41 -y
sudo yum install java-1.6.0-openjdk-src.x86_64-1.6.0.41 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2015/RHSA-2015:2086.sh
|
Shell
|
mit
| 1,915 |
#!/sbin/sh
. /tmp/backuptool.sh
list_files() {
cat <<EOF
EOF
}
case "$1" in
backup)
list_files | while read FILE DUMMY; do
backup_file $S/$FILE
done
;;
restore)
list_files | while read FILE REPLACEMENT; do
R=""
[ -n "$REPLACEMENT" ] && R="$S/$REPLACEMENT"
[ -f "$C/$S/$FILE" ] && restore_file $S/$FILE $R
done
;;
pre-backup)
# Stub
;;
post-backup)
# Stub
;;
pre-restore)
# Stub
;;
post-restore)
# Stub
;;
esac
|
Oi-Android/gapps_kitchen
|
build-tools/extras/gapps.sh
|
Shell
|
mit
| 493 |
eval $(docker-machine env default)
docker exec sonrasc_db_1 mkdir /data/backup/
docker cp "$1" sonrasc_db_1:/data/backup/
docker exec sonrasc_db_1 mongorestore -d db /data/backup/db
#bash restore-db.sh ./backup/2016-04-24-15\:53\:09/db
|
brennj55/Sonrasc
|
db_backup/restore-db.sh
|
Shell
|
mit
| 236 |
#!/bin/bash
if [ $# -lt 1 ] || [ $1 = -l ] && [ $# -lt 2 ]
then
echo error: not enough arguments 1>&2
exit 1
fi
if [ $1 = -l ]
then
shift
min=$1
shift
find "$@" -type f -exec wc -c {} \; | cut -f1 -d' ' | sort -n | head -$min
else
min=$1
shift
find "$@" -type f -exec wc -c {} \; | sort -n | head -$min
fi
|
rgeorgiev583/os-2015-2016
|
midterm/solutions/min.alt.sh
|
Shell
|
mit
| 346 |
#!/bin/bash
set -e
if [ ! -e "$HOME/.egulden/coin.conf" ]; then
mkdir -p "$HOME/.egulden"
echo "Creating coin.conf"
cat <<EOF > $HOME/.egulden/coin.conf
rpcuser=${RPCUSER:-eguldenrpc}
rpcpassword=${RPCPASSWORD:-`dd if=/dev/urandom bs=33 count=1 2>/dev/null | base64`}
printtoconsole=1
EOF
fi
cat $HOME/.egulden/coin.conf
chown -R egulden:egulden $HOME
echo -n "Executing: "
su -c "${@:1}" - egulden
|
Electronic-Gulden-Foundation/eguldend-docker
|
docker-entrypoint.sh
|
Shell
|
mit
| 411 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-BMAccordion_Example/BMAccordion.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-BMAccordion_Example/BMAccordion.framework'
fi
|
blackmirror-media/BMAccordion
|
Example/Pods/Target Support Files/Pods-BMAccordion_Example/Pods-BMAccordion_Example-frameworks.sh
|
Shell
|
mit
| 2,620 |
#!/usr/bin/env sh
# Specify the preferences directory
defaults write com.googlecode.iterm2.plist PrefsCustomFolder -string "$ZSH/iterm"
# Tell iTerm2 to use the custom preferences in the directory
defaults write com.googlecode.iterm2.plist LoadPrefsFromCustomFolder -bool true
|
bendrucker/dotfiles
|
iterm/install.sh
|
Shell
|
mit
| 278 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2739-1
#
# Security announcement date: 2015-09-10 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:46 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - libfreetype6:2.5.2-1ubuntu2.5
#
# Last versions recommanded by security team:
# - libfreetype6:2.5.2-1ubuntu2.5
#
# CVE List:
# - CVE-2014-9745
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libfreetype6=2.5.2-1ubuntu2.5 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/i686/2015/USN-2739-1.sh
|
Shell
|
mit
| 632 |
#!/bin/sh
###############################################################################
# Script for automatically computing and plotting surface displacements
# generated by an earthquake in an elastic half-space.
###############################################################################
gmt set PS_MEDIA letter
if [ ! -f polar_mwh.cpt ]; then
cat > polar_mwh.cpt << EOF
# Simulates the POLAR colormap in Matlab
# Modified to make small values white
-1 blue -0.1 white
-0.1 white 0.1 white
0.1 white 1 red
EOF
fi
###############################################################################
# The user can specify the following variables:
# PSFILE Name of the output PostScript file
# NN Density of the computation grid (NN x NN)
# SAMP Sampling interval (from the NN x NN grid) for plotting vectors
# DISP_THR (Horizontal) displacement threshold for plotting as bold vectors
###############################################################################
# Name of output PostScript file
PSFILE="surf_disp.ps"
PSFILE="$OFILE.ps"
# Background vertical displacement grid is (NN x NN) points
NN="100"
#NN="50"
# Horizontal vectors sampled every SAMP points from (NN x NN) grid
SAMP="5"
NN_SAMP=`echo $NN $SAMP | awk '{print int($1/$2)}'`
# Horizontal displacements below DISP_THR will be faded
DISP_THR="0.05" # meters
###############################################################################
# PARSE COMMAND LINE TO GET SOURCE TYPE AND FILE NAME
###############################################################################
function USAGE() {
echo
echo "Usage: surf_disp.sh SRC_TYPE SRC_FILE [-Rw/e/s/n] [-seg] [-Tvmin/vmax/dv] [-getscript] [-novector] [-o FILENAME]"
echo " SRC_TYPE Either MT (moment tensor), FFM (finite fault model), or FSP (SRCMOD format finite fault model)"
echo " SRC_FILE Name of input file"
echo " MT: EVLO EVLA EVDP STR DIP RAK MAG"
echo " FFM: finite fault model in static subfault format"
echo " FSP: finite fault model in SRCMOD FSP format"
echo " -Rw/e/s/n Define map limits"
echo " -seg Plot segmented finite faults"
echo " -Tvmin/vmax/dv Define vertical color bar limits"
echo " -getscript Copy surf_disp.sh to working directory"
echo " -novector Do not plot horizontal vectors"
echo " -o FILENAME Define basename for output file (will produce FILENAME.ps and FILENAME.pdf)"
echo
exit
}
if [ $# -lt 2 ]
then
echo "!! Error: SRC_TYPE and SRC_FILE arguments required"
echo "!! Map limits (-Rw/e/s/n) optional"
USAGE
fi
SRC_TYPE="$1"
SRC_FILE="$2"
# Check that source type is correct
if [ $SRC_TYPE != "FFM" -a $SRC_TYPE != "MT" -a $SRC_TYPE != "FSP" ]
then
echo "!! Error: source type must be FFM, FSP, or MT"
USAGE
fi
# Check that input file exists
if [ ! -f $SRC_FILE ]
then
echo "!! Error: no source file $SRC_FILE found"
USAGE
fi
# Parse optional arguments
LIMS=""
SEG="0"
VERT_CPT_RANGE=""
GETSCRIPT="N"
PLOT_VECT="Y"
OFILE="surf_disp"
shift;shift
while [ "$1" != "" ]
do
case $1 in
-R*) LIMS="$1";;
-T*) VERT_CPT_RANGE="$1";;
-seg) SEG="1" ;;
-novect*) PLOT_VECT="N" ;;
-getscript) GETSCRIPT="Y" ;;
-o) shift;OFILE="$1" ;;
*) echo "!! Error: no option \"$1\""; USAGE;;
esac
shift
done
PSFILE="$OFILE.ps"
if [ $GETSCRIPT == "Y" ]
then
cp $0 .
fi
###############################################################################
# The appearance of displacements plotted on the map is controlled by awk
# commands created within this script. To adjust the coloring, scaling and
# labeling on the figure, adjust these awk commands as necessary.
###############################################################################
# Define the value at which the color bar for vertical displacements
# will saturate, based on maximum vertical displacements.
# IF (MAXIMUM VERTICAL DISPLACEMENT >= THRESHOLD) {USE THIS SATURATION VALUE}
cat > vert_scale_max.awk << EOF
{
if (\$1>=2) {print 2}
else if (\$1>=1) {print 1}
else if (\$1>=0.5) {print 0.5}
else if (\$1>=0.2) {print 0.2}
else {print 0.1}
}
EOF
# Define the annotation increment for the vertical displacement scale bar,
# based on the saturation value above.
# IF (MAXIMUM VERTICAL DISPLACEMENT >= THRESHOLD) {USE THIS ANNOTATION INCREMENT}
cat > vert_scale_lbl.awk << EOF
{
if (\$1>=2) {print 0.5}
else if (\$1>=1) {print 0.2}
else if (\$1>=0.5) {print 0.1}
else if (\$1>=0.2) {print 0.05}
else {print 0.02}
}
EOF
# Use the maximum horizontal displacement to define the length of the
# vector in the legend.
# IF (MAXIMUM HORIZONTAL DISPLACEMENT >= THRESHOLD) {USE THIS LENGTH IN METERS AS LEGEND VECTOR}
cat > vect_label.awk << EOF
{
if (\$1>10) {print 5}
else if (\$1>5) {print 2}
else if (\$1>1) {print 1}
else {print 0.5}
}
EOF
# Use the maximum horizontal displacement to define the vector scaling.
# Larger earthquakes should have a smaller scale factor for all of the
# vectors to fit on the map.
# IF (MAXIMUM HORIZONTAL DISPLACEMENT >= THRESHOLD) {USE THIS VECTOR SCALING}
cat > vect_scale.awk << EOF
{
if (\$1>10) {print 0.3}
else if (\$1>5) {print 0.8}
else if (\$1>1) {print 1.6}
else {print 4}
}
EOF
###############################################################################
###############################################################################
# Everything below this point should be automated. This script requires the
# tools O92UTIL, GRID, and FF2GMT from Matt's codes, and creates the figure
# using GMT 5 commands. All of the work is performed in the same directory
# that the script is run from.
###############################################################################
###############################################################################
#####
# INPUT FILES FOR DISPLACEMENT CALCULATION
#####
if [ $SRC_TYPE == "FFM" ]
then
# Copy FFM to new file name
cp $SRC_FILE ./ffm.dat
elif [ $SRC_TYPE == "FSP" ]
then
# Copy FSP to new file name
cp $SRC_FILE ./fsp.dat
else
# Copy MT to new file name
cp $SRC_FILE ./mt.dat
fi
# Elastic half-space properties
LAMDA="4e10" # Lame parameter
MU="4e10" # Shear modulus
echo "Lame $LAMDA $MU" > haf.dat
#####
# SET UP COMPUTATION GRID
#####
Z="0.0" # Depth is zero on the surface
if [ -z $LIMS ]
then
# Use "-auto" option in O92UTIL to get rough map limits
D="10" # Large initial increment, to get map limits without taking much time
if [ $SRC_TYPE == "FFM" ]
then
o92util -ffm ffm.dat -auto h $Z $D -haf haf.dat -disp disp.out > auto.dat
elif [ $SRC_TYPE == "FSP" ]
then
o92util -fsp fsp.dat -auto h $Z $D -haf haf.dat -disp disp.out > auto.dat
else
o92util -mag mt.dat -auto h $Z $D -haf haf.dat -disp disp.out -gmt rect.out > auto.dat
fi
rm autosta.dat
W=`grep " W: " auto.dat | awk '{print $2}'`
E=`grep " E: " auto.dat | awk '{print $2}'`
S=`grep " S: " auto.dat | awk '{print $2}'`
N=`grep " N: " auto.dat | awk '{print $2}'`
echo "Starting map limits: $W $E $S $N"
# Determine if map has decent aspect ratio and correct as necessary
# Mercator projection x and y lengths
X=`echo $W $E | awk '{print $2-$1}'`
Y=`echo $S $N |\
awk '{
v2 = log(sin(3.14159/4+$2/2*0.01745)/cos(3.14159/4+$2/2*0.01745))
v1 = log(sin(3.14159/4+$1/2*0.01745)/cos(3.14159/4+$1/2*0.01745))
print v2-v1
}' |\
awk '{print $1/0.017}'`
# Check map aspect ratio (no skinnier than 1.4:1)
FIX=`echo $X $Y |\
awk '{
if ($1>1.4*$2) {print "fixx"}
else if ($2>1.4*$1) {print "fixy"}
else {print 1}
}'`
# Reduce map limits in long dimension
if [ $FIX == "fixx" ]
then
NEW=`echo $W $E $Y | awk '{print 0.5*($1+$2)-$3*0.70,0.5*($1+$2)+$3*0.70}'`
W=`echo $NEW | awk '{print $1}'`
E=`echo $NEW | awk '{print $2}'`
elif [ $FIX == "fixy" ]
then
NEW=`echo $S $N $X $Y |\
awk '{print 0.5*($1+$2)-0.7*$3/$4*($2-$1),0.5*($1+$2)+0.7*$3/$4*($2-$1)}'`
S=`echo $NEW | awk '{print $1}'`
N=`echo $NEW | awk '{print $2}'`
fi
# Round map limits to nearest 0.1
W=`echo "$W $E" | awk '{printf("%.1f"),$1}'`
E=`echo "$W $E" | awk '{printf("%.1f"),$2}'`
S=`echo "$S $N" | awk '{printf("%.1f"),$1}'`
N=`echo "$S $N" | awk '{printf("%.1f"),$2}'`
echo "Final map limits: $W $E $S $N"
else
W=`echo $LIMS | sed -e "s/\// /g" -e "s/-R//" | awk '{print $1}'`
E=`echo $LIMS | sed -e "s/\// /g" -e "s/-R//" | awk '{print $2}'`
S=`echo $LIMS | sed -e "s/\// /g" -e "s/-R//" | awk '{print $3}'`
N=`echo $LIMS | sed -e "s/\// /g" -e "s/-R//" | awk '{print $4}'`
echo "Using map limits from command line: $W $E $S $N"
fi
# Create (NN x NN) point horizontal grid
grid -x $W $E -nx $NN -y $S $N -ny $NN -z $Z -o sta.dat
grid -x $W $E -nx $NN_SAMP -y $S $N -ny $NN_SAMP -z $Z -o sta_samp.dat
#####
# COMPUTE SURFACE DISPLACEMENTS
#####
if [ $SRC_TYPE == "FFM" ]
then
o92util -ffm ffm.dat -sta sta.dat -haf haf.dat -disp disp.out -prog
o92util -ffm ffm.dat -sta sta_samp.dat -haf haf.dat -disp disp_samp.out -prog
elif [ $SRC_TYPE == "FSP" ]
then
o92util -fsp fsp.dat -sta sta.dat -haf haf.dat -disp disp.out -prog
o92util -fsp fsp.dat -sta sta_samp.dat -haf haf.dat -disp disp_samp.out -prog
else
o92util -mag mt.dat -sta sta.dat -haf haf.dat -disp disp.out -prog
o92util -mag mt.dat -sta sta_samp.dat -haf haf.dat -disp disp_samp.out -prog
fi
# Extract maximum vertical displacements and determine scale parameters for gridding
MINMAX=`awk '{print $6}' disp.out | awk 'BEGIN{mn=1e10;mx=-1e10}{if($1<mn){mn=$1};if($1>mx){mx=$1}}END{print mn,mx}'`
V1=`echo $MINMAX | awk '{if($1<0){print $1*(-1)}else{print $1}}'`
V2=`echo $MINMAX | awk '{if($2<0){print $2*(-1)}else{print $2}}'`
T=`echo $V1 $V2 | awk '{if($1>$2){print $1}else{print $2}}' | awk -f vert_scale_max.awk`
DT=`echo $T | awk -f vert_scale_lbl.awk`
#####
# PLOT RESULTS
#####
PORTRAIT=`echo $X $Y | awk '{if($1<$2){print "-P"}}'`
PROJ="-JM5i $PORTRAIT"
LIMS="-R$W/$E/$S/$N"
# Colored grid of vertical displacements plotted under horizontal displacement vectors
if [ -z $VERT_CPT_RANGE ]
then
gmt makecpt -T-$T/$T/0.01 -C./polar_mwh.cpt -D > vert.cpt
else
gmt makecpt $VERT_CPT_RANGE -C./polar_mwh.cpt -D > vert.cpt
fi
awk '{print $1,$2,$6}' disp.out | gmt xyz2grd -Gvert.grd $LIMS -I$NN+/$NN+
gmt grdimage vert.grd $PROJ $LIMS -Cvert.cpt -Y1.5i -K > $PSFILE
gmt psscale -D2.5i/-0.8i/5.0i/0.2ih -Cvert.cpt -Ba$DT -Bg$DT \
-B+l"Vertical Displacement (m)" -Al -K -O >> $PSFILE
# Map stuff
ANNOT=`echo $W $E | awk '{if($2-$1<=10){print 1}else{print 2}}'`
gmt psbasemap $PROJ $LIMS -Bxa${ANNOT} -Bya1 -BWeSn -K -O --MAP_FRAME_TYPE=plain >> $PSFILE
gmt pscoast $PROJ $LIMS -W1p,105/105/105 -G205/205/205 -N1/0.5p -Dh -K -O -t85 >> $PSFILE
# Plot FFM slip contours
if [ $SRC_TYPE == "FFM" -o $SRC_TYPE == "FSP" ]
then
case $SRC_TYPE in
FFM)OPT="-ffm ffm.dat";;
FSP)OPT="-fsp fsp.dat";;
esac
if [ $SEG -eq 0 ]
then
ff2gmt $OPT -slip slip.out -clip clip.out -epi epi.out
else
ff2gmt $OPT -slip slip.out -clipseg clip.out -epi epi.out
fi
MAXSLIP=`awk '{print $3}' slip.out | awk 'BEGIN{mx=0}{if($1>mx){mx=$1}}END{print mx}' | awk '{print $1}'`
CONT=`echo $MAXSLIP |\
awk '{
if ($1>=50) {print 10}
else if ($1>=20) {print 5}
else if ($1>=10) {print 2}
else if ($1>=2) {print 1}
else {print 0.5}
}'`
echo $CONT $MAXSLIP | awk '{for (i=$1;i<=$2;i=i+$1){print i,"C"}}' > junk
awk '{print $1,$2,$3}' slip.out |\
gmt surface -Gslip.grd -I0.10/0.10 -Tb1 -Ti0.25 $LIMS
gmt psclip clip.out $PROJ $LIMS -K -O >> $PSFILE
gmt grdcontour slip.grd $PROJ $LIMS -W1p,205/205/205 -Cjunk -K -O -t40 >> $PSFILE
gmt psclip -C -K -O >> $PSFILE
gmt psxy clip.out $PROJ $LIMS -W1p,205/205/205 -K -O -t40 >> $PSFILE
rm junk
else
awk '{print $1,$2,$4,$5,$6}' rect.out |\
gmt psxy $PROJ $LIMS -SJ -W1p,205/205/205 -K -O -t40 >> $PSFILE
fi
# Plot epicenter
if [ $SRC_TYPE == "FFM" -o $SRC_TYPE == "FSP" ]
then
LONX=`awk '{print $1}' epi.out`
LATX=`awk '{print $2}' epi.out`
#LONX=`sed -n -e "3p" ffm.dat | sed -e "s/.*Lon:/Lon:/" | awk '{print $2}'`
#LATX=`sed -n -e "3p" ffm.dat | sed -e "s/.*Lon:/Lon:/" | awk '{print $4}'`
echo $LONX $LATX |\
gmt psxy $PROJ $LIMS -Sa0.15i -W1p,55/55/55 -K -O -t50 >> $PSFILE
fi
if [ $PLOT_VECT == "Y" ]
then
# If max displacement is much larger than other displacements, don't use it
MAXLN=`awk '{print sqrt($4*$4+$5*$5)}' disp_samp.out |\
awk 'BEGIN{m1=0;m2=0}
{if($1>m1){m2=m1;m1=$1;ln=NR}}
END{if(m1>2*m2){print ln}else{print 0}}'`
# Scale vectors differently depending on maximum horizontal displacement
MAX=`awk '{if(NR!='"$MAXLN"'){print sqrt($4*$4+$5*$5)}}' disp_samp.out |\
awk 'BEGIN{mx=0}{if($1>mx){mx=$1}}END{print mx}' | awk '{print $1}'`
DISP_LBL=`echo $MAX | awk -f vect_label.awk`
VEC_SCALE=`echo $MAX | awk -f vect_scale.awk`
MAX=0.5
## Plot displacements smaller than DISP_THR faded
#awk '{
# if (sqrt($4*$4+$5*$5)<'"$DISP_THR"') {
# print $1,$2,atan2($4,$5)/0.01745,'"$VEC_SCALE"'*sqrt($4*$4+$5*$5)
# }
#}' disp.out |\
# gmt psxy $PROJ $LIMS -SV10p+e+a45 -W2p,175/175/175 -K -O >> $PSFILE
## Plot larger displacements in black
#awk '{
# if (sqrt($4*$4+$5*$5)>='"$DISP_THR"'&&NR!='"$MAXLN"') {
# print $1,$2,atan2($4,$5)/0.01745,'"$VEC_SCALE"'*sqrt($4*$4+$5*$5)
# }
#}' disp.out |\
# gmt psxy $PROJ $LIMS -SV10p+e+a45 -W2p,black -K -O >> $PSFILE
# Scale vector thickness by displacement magnitude
# Plot displacements smaller than DISP_THR faded
awk '{
if (sqrt($4*$4+$5*$5)<'"$DISP_THR"') {
print $1,$2,atan2($4,$5)/0.01745,'"$VEC_SCALE"'*sqrt($4*$4+$5*$5)
}
}' disp_samp.out |\
gmt psxy $PROJ $LIMS -SV10p+e+a45+n${MAX} -W2p,175/175/175 -K -O >> $PSFILE
# Plot larger displacements in black
awk '{
if (sqrt($4*$4+$5*$5)>='"$DISP_THR"'&&NR!='"$MAXLN"') {
print $1,$2,atan2($4,$5)/0.01745,'"$VEC_SCALE"'*sqrt($4*$4+$5*$5)
}
}' disp_samp.out |\
gmt psxy $PROJ $LIMS -SV10p+e+a45+n${MAX} -W2p,black -K -O >> $PSFILE
fi
if [ $PLOT_VECT == "Y" ]
then
# Legend (all coordinates are in cm from the bottom left)
gmt psxy -JX10c -R0/10/0/10 -W1p -Gwhite -K -O >> $PSFILE << EOF
0.2 0.2
0.2 1.5
`echo $VEC_SCALE $DISP_LBL | awk '{print $1*$2+0.6}'` 1.5
`echo $VEC_SCALE $DISP_LBL | awk '{print $1*$2+0.6}'` 0.2
0.2 0.2
EOF
echo $VEC_SCALE $DISP_LBL |\
awk '{print 0.4,0.5,0,$1*$2}' |\
gmt psxy -JX -R -Sv10p+e+a45 -W2p,black -N -K -O >> $PSFILE
echo $VEC_SCALE $DISP_LBL |\
awk '{if ($2!=1) {print $1*$2*0.5+0.4,1.0,12","0,"CM",$2,"meters"}
else{print $1*$2*0.5+0.4,1.0,12","0,"CM",$2,"meter"}}' |\
gmt pstext -JX -R -F+f+j -N -K -O >> $PSFILE
gmt pstext -JX -R -F+f+j -Gwhite -N -K -O >> $PSFILE << EOF
`echo $VEC_SCALE $DISP_LBL | awk '{print $1*$2+0.7}'` 0.2 10,2 LB \
Displacements less than $DISP_THR m are in light grey
EOF
else
VEC_SCALE=0
DISP_LBL=0
fi
if [ $SRC_TYPE == "FFM" -o $SRC_TYPE == "FSP" ]
then
echo $VEC_SCALE $DISP_LBL $CONT |\
awk '{
if($3==1) {print $1*$2+0.7,0.6,"10,2 LB FFM Slip Contours: "$3" meter"}
else {print $1*$2+0.7,0.6,"10,2 LB FFM Slip Contours: "$3" meters"}
}' |\
gmt pstext -JX10c -R0/10/0/10 -F+f+j -N -K -O >> $PSFILE
fi
echo 0 0 | gmt psxy $PROJ $LIMS -O >> $PSFILE
#####
# CLEAN UP
#####
ps2pdf $PSFILE
rm *.awk
rm polar_mwh.cpt
|
mherman09/src
|
scripts/surf_disp.sh
|
Shell
|
mit
| 15,967 |
#!/usr/bin/env bash
#=========================================================================
# Copyright (c) 2015, 2016 GemTalk Systems, LLC. All Rights Reserved <[email protected]>.
#
# MIT license: https://github.com/GsDevKit/GsDevKit_home/blob/master/license.txt
#=========================================================================
set -e # exit on error
startStone -b ${STONENAME1}
"$GS_HOME/bin/private/gsDevKitTodeCommandLine" todeIt ${STONENAME1} << EOF
# after test run, <self> will be a TestResult
test --batch image
eval \`[(self hasErrors or: [ self hasFailures ]) ifTrue: [ self error: 'Tests failed' ] ] on: Warning do: [:ex | ex resume: true ]\`
EOF
|
GsDevKit/GsDevKit_home
|
tests/todeUnitTests.sh
|
Shell
|
mit
| 689 |
#!/bin/sh
#
# Smart little documentation generator.
# GPL/LGPL
# (c) Del 2015 http://www.babel.com.au/
#
APPNAME='Omnipay eWay Rapid 3.1 Gateway Module'
CMDFILE=apigen.cmd.$$
DESTDIR=./documents
#
# Find apigen, either in the path or as a local phar file
#
if [ -f apigen.phar ]; then
APIGEN="php apigen.phar"
else
APIGEN=`which apigen`
if [ ! -f "$APIGEN" ]; then
# Search for phpdoc if apigen is not found.
if [ -f phpDocumentor.phar ]; then
PHPDOC="php phpDocumentor.phar"
else
PHPDOC=`which phpdoc`
if [ ! -f "$PHPDOC" ]; then
echo "Neither apigen nor phpdoc is installed in the path or locally, please install one of them"
echo "see http://www.apigen.org/ or http://www.phpdoc.org/"
exit 1
fi
fi
fi
fi
#
# As of version 4 of apigen need to use the generate subcommand
#
if [ ! -z "$APIGEN" ]; then
APIGEN="$APIGEN generate"
fi
#
# Without any arguments this builds the entire system documentation,
# making the cache file first if required.
#
if [ -z "$1" ]; then
#
# Check to see that the cache has been made.
#
if [ ! -f dirlist.cache ]; then
echo "Making dirlist.cache file"
$0 makecache
fi
#
# Build the apigen/phpdoc command in a file.
#
if [ ! -z "$APIGEN" ]; then
echo "$APIGEN --php --tree --title '$APPNAME API Documentation' --destination $DESTDIR/main \\" > $CMDFILE
cat dirlist.cache | while read dir; do
echo "--source $dir \\" >> $CMDFILE
done
echo "" >> $CMDFILE
elif [ ! -z "$PHPDOC" ]; then
echo "$PHPDOC --sourcecode --title '$APPNAME API Documentation' --target $DESTDIR/main --directory \\" > $CMDFILE
cat dirlist.cache | while read dir; do
echo "${dir},\\" >> $CMDFILE
done
echo "" >> $CMDFILE
else
"Neither apigen nor phpdoc are found, how did I get here?"
exit 1
fi
#
# Run the apigen command
#
rm -rf $DESTDIR/main
mkdir -p $DESTDIR/main
. ./$CMDFILE
/bin/rm -f ./$CMDFILE
#
# The "makecache" argument causes the script to just make the cache file
#
elif [ "$1" = "makecache" ]; then
echo "Find application source directories"
find src -name \*.php -print | \
(
while read file; do
grep -q 'class' $file && dirname $file
done
) | sort -u | \
grep -v -E 'config|docs|migrations|phpunit|test|Test|views|web' > dirlist.app
echo "Find vendor source directories"
find vendor -name \*.php -print | \
(
while read file; do
grep -q 'class' $file && dirname $file
done
) | sort -u | \
grep -v -E 'config|docs|migrations|phpunit|codesniffer|test|Test|views' > dirlist.vendor
#
# Filter out any vendor directories for which apigen fails
#
echo "Filter source directories"
mkdir -p $DESTDIR/tmp
cat dirlist.app dirlist.vendor | while read dir; do
if [ ! -z "$APIGEN" ]; then
$APIGEN --quiet --title "Test please ignore" \
--source $dir \
--destination $DESTDIR/tmp && (
echo "Including $dir"
echo $dir >> dirlist.cache
) || (
echo "Excluding $dir"
)
elif [ ! -z "$PHPDOC" ]; then
$PHPDOC --quiet --title "Test please ignore" \
--directory $dir \
--target $DESTDIR/tmp && (
echo "Including $dir"
echo $dir >> dirlist.cache
) || (
echo "Excluding $dir"
)
fi
done
echo "Documentation cache dirlist.cache built OK"
#
# Clean up
#
/bin/rm -rf $DESTDIR/tmp
fi
|
Mihai-P/tez-omnipay-eway
|
makedoc.sh
|
Shell
|
mit
| 3,965 |
#!/bin/bash
set -e && \
cd ./dist && \
echo stage 1 && \
remote_repo=${GITHUB_URL:-`git config remote.origin.url`} && \
remote_branch="release" && \
echo stage 2 $remote_repo master:$remote_branch && \
git init && \
echo stage 3 && \
git config user.name "Travis CI" && \
git config user.email "[email protected]" && \
git add . && \
echo stage 4 && \
git commit -m'build' && \
echo stage 5 && \
git push --force --quiet $remote_repo master:$remote_branch > /dev/null 2>&1 && \
echo stage 6 && \
rm -fr .git && \
echo stage 7 && \
cd ../
|
mikhail-angelov/bconf
|
release.sh
|
Shell
|
mit
| 544 |
cl src/c/OTE.c src/c/action.c src/c/base.c src/c/editor.c /WX /TC /Z7 /Febin/OTE.exe -link ../OTG/OTG.obj ../OSAL/gpu.obj ../OSAL/graphics.obj ../OSAL/input.obj ../OSAL/net.obj ../OSAL/sound.obj ../OSAL/system.obj ../OSAL/util.obj ../OSAL/release_windows/libfbxsdk.lib ../OSAL/release_windows/glew32.lib ../OSAL/release_windows/SDL2.lib ../OSAL/release_windows/SDL2_image.lib ../OSAL/release_windows/SDL2_mixer.lib ../OSAL/release_windows/SDL2_net.lib ../OSAL/release_windows/SDL2_ttf.lib ../OSAL/release_windows/SDL2main.lib ../OSAL/release_windows/SDL2test.lib ../OSAL/release_windows/libfbxsdk.lib opengl32.lib
|
oddthread/OTE
|
script/build/make_vc.bash
|
Shell
|
mit
| 616 |
#!/bin/bash
# Prints the most recent EBS snapshot for matching instances
TAG="$1"
VALUES="$2"
if [[ -z $TAG || -z $VALUES ]]; then
echo "Usage: $0 TAG VALUE[,VALUE,...]"
echo "List the most recent EBS snapshots for the matching instances"
exit 1
fi
VOLUME_IDS=$(aws ec2 describe-instances \
--filter "Name=tag:$TAG,Values=$VALUES" | \
jq -r '.Reservations[].Instances[].BlockDeviceMappings[].Ebs.VolumeId')
for VOL in $VOLUME_IDS; do
aws ec2 describe-snapshots --filters "Name=volume-id,Values=$VOL" | \
jq -r '.Snapshots | sort_by(.StartTime) | last |
[.SnapshotId, .StartTime, .State] | @tsv'
done
|
mivok/tools
|
aws/ebs_snapshots.sh
|
Shell
|
mit
| 647 |
#!/bin/bash
psql soccer-w -c "drop table if exists ncaa.results;"
psql soccer-w -f sos/standardized_results.sql
psql soccer-w -c "vacuum full verbose analyze ncaa.results;"
psql soccer-w -c "drop table if exists ncaa._basic_factors;"
psql soccer-w -c "drop table if exists ncaa._parameter_levels;"
R --vanilla -f sos/lmer.R
psql soccer-w -c "vacuum full verbose analyze ncaa._parameter_levels;"
psql soccer-w -c "vacuum full verbose analyze ncaa._basic_factors;"
psql soccer-w -f sos/normalize_factors.sql
psql soccer-w -c "vacuum full verbose analyze ncaa._factors;"
psql soccer-w -f sos/schedule_factors.sql
psql soccer-w -c "vacuum full verbose analyze ncaa._schedule_factors;"
psql soccer-w -f sos/current_ranking.sql > sos/current_ranking.txt
cp /tmp/current_ranking.csv sos/current_ranking.csv
psql soccer-w -f sos/division_ranking.sql > sos/division_ranking.txt
psql soccer-w -f sos/connectivity.sql > sos/connectivity.txt
psql soccer-w -f sos/test_predictions.sql > sos/test_predictions.txt
psql soccer-w -f sos/predict_daily.sql > sos/predict_daily.txt
cp /tmp/predict_daily.csv sos/predict_daily.csv
psql soccer-w -f sos/predict_weekly.sql > sos/predict_weekly.txt
cp /tmp/predict_weekly.csv sos/predict_weekly.csv
psql soccer-w -f sos/predict.sql > sos/predict.txt
cp /tmp/predict.csv sos/predict.csv
|
octonion/soccer-w
|
ncaa/sos.sh
|
Shell
|
mit
| 1,325 |
#!/bin/sh
set -eo pipefail -o nounset
#!/bin/sh
set -eo pipefail -o nounset
wget --quiet -O - ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_34/GRCh37_mapping/gencode.v34lift37.transcripts.fa.gz\
| gzip -dc \
| bgzip -c > hg19_transcripts-gencode-v1.fa.gz
samtools faidx hg19_transcripts-gencode-v1.fa.gz
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg19/hg19-transcript-sequences-chr-regions-gencode-v1/recipe.sh
|
Shell
|
mit
| 332 |
#!/bin/bash
#
# Script to setup & configure a source build environment for Android - Jelly Bean
#
# 1) Installs dependencies
# 2) Repo inits
#
# Written by iroro - 2012-04-02 - original ICS 4.0.3 script for Ubuntu 11.04
# 2013-01-25 - updated for Jelly Bean, 4.1.1 for Ubuntu 12.04
#####################################################################
echo -e "\n\nCreating ~/bin in home directory - if needed"
#####################################################################
if [ ! -d "$HOME/bin" ] ; then
mkdir ~/bin
fi
source ~/.profile # Ensure bin is in the path
#####################################################################
cd ~/
WORKING_DIR="android_source"
if [ $# -eq 1 ]; then
WORKING_DIR=$1
echo -e "\nCreating working directory - ~/$WORKING_DIR \n"
fi
# Create the working directory if it doesn't exist
if [ ! -d $WORKING_DIR ]; then
mkdir -p $WORKING_DIR
cd ~/$WORKING_DIR
fi
#####################################################################
echo -e "\n\nInstalling dependencies"
#####################################################################
sudo DEBIAN_FRONTEND=noninteractive sudo apt-get install git-core gnupg flex bison gperf build-essential \
zip curl libc6-dev libncurses5-dev:i386 x11proto-core-dev \
libx11-dev:i386 libreadline6-dev:i386 libgl1-mesa-glx:i386 \
libgl1-mesa-dev g++-multilib mingw32 openjdk-6-jdk tofrodos \
python-markdown libxml2-utils xsltproc zlib1g-dev:i386
sudo apt-get install gcc-4.4 g++-4.4 g++-4.4-multilib
sudo unlink /usr/bin/gcc
sudo ln -s /usr/bin/gcc-4.4 /usr/bin/gcc
sudo unlink /usr/bin/g++
sudo ln -s /usr/bin/g++-4.4 /usr/bin/g++
#####################################################################
#sudo add-apt-repository "deb http://archive.canonical.com/ lucid partner"
#sudo apt-get update
#sudo apt-get install sun-java6-jdk
#####################################################################
echo -e "\n\nInstalling JDK6 -- DO ONCE THEN COMMENT OUT"
#####################################################################
# io havoc note: the above stuff didn't work per source.android.com
# install JDK6 per http://blog.markloiseau.com/2012/07/how-to-compile-android-on-ubuntu-12-04/#more-1687
# run update-alternatives so your system uses the Sun JDK:
sudo update-alternatives --install /usr/bin/java java /usr/lib/jvm/jdk1.6.0_37/bin/java 1
sudo update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/jdk1.6.0_37/bin/javac 1
sudo update-alternatives --install /usr/bin/javaws javaws /usr/lib/jvm/jdk1.6.0_37/bin/javaws 1
sudo update-alternatives --config java
sudo update-alternatives --config javac
sudo update-alternatives --config javaws
#####################################################################
# only do this ONCE
# echo -e "\n\nLinking libGL.so"
# sudo ln -s /usr/lib/i386-linux-gnu/mesa/libGL.so.1 /usr/lib/i386-linux-gnu/libGL.so
#####################################################################
echo -e "\nSetting up the repo \n"
#####################################################################
curl https://dl-ssl.google.com/dl/googlesource/git-repo/repo > ~/bin/repo
chmod a+x ~/bin/repo
#####################################################################
echo -e "\nEntering working directory $WORKING_DIR \n"
#####################################################################
cd ~/$WORKING_DIR
repo init -u https://android.googlesource.com/platform/manifest -b android-4.1.1_r1
#####################################################################
echo -e "\n\n Now ready to call $repo sync \n\n"
#####################################################################
|
ruohoruotsi/Utils
|
shell_crips/io_android_4.1.1_setup.sh
|
Shell
|
mit
| 3,652 |
#!/usr/bin/env sh
# Delete existing binaries to see that the lib correctly recreates them
clear;
rm -rf ~/.hubris_cache/*; rm -rf *.o; rm -rf *.hi; rm -rf *.c;
ruby haskell_math.rb
|
mwotton/Hubris-Haskell
|
examples/simple_inline/clean_and_run.sh
|
Shell
|
mit
| 188 |
#!/bin/sh
set -eo pipefail -o nounset
## Get the ggd genome file
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh38/GRCh38.genome
## Get the chromomsome mapping file
chrom_mapping=$(ggd get-files grch38-chrom-mapping-ucsc2ensembl-ncbi-v1 --pattern "*.txt")
# download, header, remap chroms, sort, bgzip
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg38/database/cytoBand.txt.gz \
| gzip -dc \
| awk -v OFS="\t" 'BEGIN {print "#Table Info: http://genome.ucsc.edu/cgi-bin/hgTables?db=hg38&hgta_group=map&hgta_track=cytoBand&hgta_table=cytoBand&hgta_doSchema=describe+table+schema\n#chrom\tstart\tend\tband\tstain"} {print $1,$2,$3,$4,$5}' \
| gsort --chromosomemappings $chrom_mapping /dev/stdin $genome \
| bgzip -c > grch38-cytoband-ucsc-v1.bed.gz
# index the bed file using tabix
tabix grch38-cytoband-ucsc-v1.bed.gz
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/GRCh38/grch38-cytobands-ucsc-v1/recipe.sh
|
Shell
|
mit
| 901 |
#!/bin/bash
examples=$(ls -1 inc-repair-Lab*.sh)
for example in ${examples[@]}; do
./$example
cat result-inc-repair
echo ""
done
|
jyi/ITSP
|
experiment/examples/inc-repair/run.example.sh
|
Shell
|
mit
| 143 |
# pip should only run if there is a virtualenv currently activated
#export PIP_REQUIRE_VIRTUALENV=true
export VIRTUALENVWRAPPER_PYTHON=/usr/local/bin/python3
export WORKON_HOME=$HOME/.virtualenvs
source $(brew --prefix)/bin/virtualenvwrapper_lazy.sh
# Run pip without requiring a virtualenv
gpip(){
PIP_REQUIRE_VIRTUALENV="" pip "$@"
}
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
if command -v pyenv 1>/dev/null 2>&1; then
eval "$(pyenv init -)"
fi
|
yarinb/dotfiles-1
|
python/virtualenv.zsh
|
Shell
|
mit
| 485 |
export PLUSHU_APPS_DIR=${PLUSHU_APPS_DIR-$PLUSHU_ROOT/apps}
|
plushu/plushu-apps
|
profile.d/apps-default.sh
|
Shell
|
mit
| 60 |
#!/usr/bin/env bash
set -ex
CURRENT=`pwd`
GAP="$GAPROOT/bin/gap.sh --quitonbreak -q"
# generate library coverage reports
$GAP -a 500M -m 500M -q <<GAPInput
if LoadPackage("profiling") <> true then
Print("ERROR: could not load profiling package");
FORCE_QUIT_GAP(1);
fi;
d := Directory("$COVDIR");;
covs := [];;
for f in DirectoryContents(d) do
if f in [".", ".."] then continue; fi;
Add(covs, Filename(d, f));
od;
Print("Merging coverage results\n");
r := MergeLineByLineProfiles(covs);;
Print("Outputting JSON\n");
OutputJsonCoverage(r, "gap-coverage.json");;
QUIT_GAP(0);
GAPInput
# FIXME
# generate source coverage reports by running gcov
#gcov -o . $COVDIR/*.c*
bash <(curl -s https://codecov.io/bash)
|
mcmartins/francy
|
scripts/coverage.sh
|
Shell
|
mit
| 727 |
#!/usr/bin/env bash
usage()
{
echo "Usage: $0 [BuildArch] [BuildType] [clean] [verbose] [clangx.y]"
echo "BuildArch can be: x64, ARM"
echo "BuildType can be: Debug, Release"
echo "clean - optional argument to force a clean build."
echo "verbose - optional argument to enable verbose build output."
echo "clangx.y - optional argument to build using clang version x.y."
exit 1
}
setup_dirs()
{
echo Setting up directories for build
mkdir -p "$__RootBinDir"
mkdir -p "$__BinDir"
mkdir -p "$__LogsDir"
mkdir -p "$__IntermediatesDir"
}
# Performs "clean build" type actions (deleting and remaking directories)
clean()
{
echo Cleaning previous output for the selected configuration
rm -rf "$__BinDir"
rm -rf "$__IntermediatesDir"
rm -rf "$__TestWorkingDir"
rm -rf "$__TestIntermediatesDir"
rm -rf "$__LogsDir/*_$__BuildOS__$__BuildArch__$__BuildType.*"
}
# Check the system to ensure the right pre-reqs are in place
check_prereqs()
{
echo "Checking pre-requisites..."
# Check presence of CMake on the path
hash cmake 2>/dev/null || { echo >&2 "Please install cmake before running this script"; exit 1; }
# Check for clang
hash clang-$__ClangMajorVersion.$__ClangMinorVersion 2>/dev/null || hash clang$__ClangMajorVersion$__ClangMinorVersion 2>/dev/null || hash clang 2>/dev/null || { echo >&2 "Please install clang before running this script"; exit 1; }
}
build_coreclr()
{
# All set to commence the build
echo "Commencing build of native components for $__BuildOS.$__BuildArch.$__BuildType"
cd "$__IntermediatesDir"
# Regenerate the CMake solution
echo "Invoking cmake with arguments: \"$__ProjectRoot\" $__CMakeArgs"
"$__ProjectRoot/src/pal/tools/gen-buildsys-clang.sh" "$__ProjectRoot" $__ClangMajorVersion $__ClangMinorVersion $__CMakeArgs
# Check that the makefiles were created.
if [ ! -f "$__IntermediatesDir/Makefile" ]; then
echo "Failed to generate native component build project!"
exit 1
fi
# Get the number of processors available to the scheduler
# Other techniques such as `nproc` only get the number of
# processors available to a single process.
if [ `uname` = "FreeBSD" ]; then
NumProc=`sysctl hw.ncpu | awk '{ print $2+1 }'`
else
NumProc=$(($(getconf _NPROCESSORS_ONLN)+1))
fi
# Build CoreCLR
echo "Executing make install -j $NumProc $__UnprocessedBuildArgs"
make install -j $NumProc $__UnprocessedBuildArgs
if [ $? != 0 ]; then
echo "Failed to build coreclr components."
exit 1
fi
}
echo "Commencing CoreCLR Repo build"
# Argument types supported by this script:
#
# Build architecture - valid values are: x64, ARM.
# Build Type - valid values are: Debug, Release
#
# Set the default arguments for build
# Obtain the location of the bash script to figure out whether the root of the repo is.
__ProjectRoot="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
__BuildArch=x64
# Use uname to determine what the OS is.
OSName=$(uname -s)
case $OSName in
Linux)
__BuildOS=Linux
;;
Darwin)
__BuildOS=OSX
;;
FreeBSD)
__BuildOS=FreeBSD
;;
OpenBSD)
__BuildOS=OpenBSD
;;
NetBSD)
__BuildOS=NetBSD
;;
*)
echo "Unsupported OS $OSName detected, configuring as if for Linux"
__BuildOS=Linux
;;
esac
__MSBuildBuildArch=x64
__BuildType=Debug
__CMakeArgs=DEBUG
# Set the various build properties here so that CMake and MSBuild can pick them up
__ProjectDir="$__ProjectRoot"
__SourceDir="$__ProjectDir/src"
__PackagesDir="$__ProjectDir/packages"
__RootBinDir="$__ProjectDir/bin"
__LogsDir="$__RootBinDir/Logs"
__UnprocessedBuildArgs=
__MSBCleanBuildArgs=
__CleanBuild=false
__VerboseBuild=false
__ClangMajorVersion=3
__ClangMinorVersion=5
for i in "$@"
do
lowerI="$(echo $i | awk '{print tolower($0)}')"
case $lowerI in
-?|-h|--help)
usage
exit 1
;;
x64)
__BuildArch=x64
__MSBuildBuildArch=x64
;;
arm)
__BuildArch=arm
__MSBuildBuildArch=arm
;;
debug)
__BuildType=Debug
;;
release)
__BuildType=Release
__CMakeArgs=RELEASE
;;
clean)
__CleanBuild=1
;;
verbose)
__VerboseBuild=1
;;
clang3.5)
__ClangMajorVersion=3
__ClangMinorVersion=5
;;
clang3.6)
__ClangMajorVersion=3
__ClangMinorVersion=6
;;
clang3.7)
__ClangMajorVersion=3
__ClangMinorVersion=7
;;
*)
__UnprocessedBuildArgs="$__UnprocessedBuildArgs $i"
esac
done
# Set the remaining variables based upon the determined build configuration
__BinDir="$__RootBinDir/Product/$__BuildOS.$__BuildArch.$__BuildType"
__PackagesBinDir="$__BinDir/.nuget"
__ToolsDir="$__RootBinDir/tools"
__TestWorkingDir="$__RootBinDir/tests/$__BuildOS.$__BuildArch.$__BuildType"
__IntermediatesDir="$__RootBinDir/obj/$__BuildOS.$__BuildArch.$__BuildType"
__TestIntermediatesDir="$__RootBinDir/tests/obj/$__BuildOS.$__BuildArch.$__BuildType"
# Specify path to be set for CMAKE_INSTALL_PREFIX.
# This is where all built CoreClr libraries will copied to.
export __CMakeBinDir="$__BinDir"
# Configure environment if we are doing a clean build.
if [ $__CleanBuild == 1 ]; then
clean
fi
# Configure environment if we are doing a verbose build
if [ $__VerboseBuild == 1 ]; then
export VERBOSE=1
fi
# Make the directories necessary for build if they don't exist
setup_dirs
# Check prereqs.
check_prereqs
# Build the coreclr (native) components.
build_coreclr
# Build complete
echo "Repo successfully built."
echo "Product binaries are available at $__BinDir"
exit 0
|
apanda/coreclr
|
build.sh
|
Shell
|
mit
| 5,948 |
#!/usr/bin/env bash
wget -np -k -e robots=off -r -l 1 https://github.com/Naereen/cuisine/{milestones,issues,labels}/
gh2md -t $(cat ~/.gh2md_token) Naereen/cuisine cuisine-issues.md
|
Naereen/cuisine
|
issues/update_local_cache.sh
|
Shell
|
mit
| 183 |
#!/bin/bash
cd output
texi2pdf -q ../tex/Aoum-main.tex
|
krizka/aoum
|
dopdf.sh
|
Shell
|
cc0-1.0
| 55 |
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Copyright Siemens AG, 2017.
# Copyright Bosch Software Innovations GmbH, 2017-2019.
# Part of the SW360 Portal Project.
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
# -----------------------------------------------------------------------------
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )"
################################################################################
# helper functions:
testUrlHttpCode () {
url="$1"
set -x
http_code=$(curl -k -s -o /dev/null -I -w "%{http_code}" "$url")
[ "$http_code" -lt "400" ]
set +x
}
testUrlWithSearchString () {
url="$1"
needle="$2"
testUrlHttpCode $url
set -x
curl -Ss -k "$url" | grep -iq "$needle"
set +x
}
################################################################################
# asserts:
assertLiferayViaNginx () {
echo "assert that Liferay answers over HTTPS via nginx"
testUrlWithSearchString "https://localhost:8443" "Welcome to Liferay Community Edition Portal"
}
assertLiferayViaHTTP () {
echo "assert that Liferay answers over HTTP"
testUrlWithSearchString "http://localhost:8080" "Welcome to Liferay Community Edition Portal"
}
assertNoTomcatDebugPort () {
echo "assert that the tomcat debug port is not open on the host"
set -x
! nc -vz localhost 5005
set +x
}
assertCouchdb () {
echo "assert that CouchDB running (by examining the log)"
set -x
$DIR/sw360chores.pl -- logs sw360couchdb 2>/dev/null | grep -iq "Apache CouchDB has started"
set +x
}
assertCouchdbViaHTTP () {
echo "assert that CouchDB answers over HTTP"
testUrlWithSearchString "http://localhost:5984" "\"couchdb\":\"Welcome\""
testUrlWithSearchString "http://localhost:5984/_utils/" "Fauxton"
}
assertNoCouchdbPort () {
echo "assert that the couchdb port is not open on the host"
set -x
! nc -vz localhost 5984
set +x
}
assertTomcat () {
echo "assert that tomcat running (by examining the log)"
set -x
$DIR/sw360chores.pl -- logs sw360 2>/dev/null | grep -iq "Loading file:/opt/sw360/portal-bundle.properties"
$DIR/sw360chores.pl -- logs sw360 2>/dev/null | grep -iq "Using dialect org.hibernate.dialect.PostgreSQLDialect for PostgreSQL 9.6"
$DIR/sw360chores.pl -- logs sw360 2>/dev/null | grep -iq "org.apache.catalina.startup.Catalina.start Server startup in"
set +x
}
assertIsSwarmMode() {
echo "assert that swarm mode is enable"
set -x
docker info | grep -iq "Swarm: active"
set +x
}
|
sw360/sw360chores
|
.travis/assertions.sh
|
Shell
|
epl-1.0
| 2,800 |
#!/bin/bash
source ./vars
URL="https://cdn.redhat.com/content/dist/rhel/rhui/server/6/6Server/x86_64/rhui/2/iso/RHEL-6-RHUI-2-LATEST-Server-x86_64-DVD.iso"
wget --certificate=${ENT_CERT} --ca-certificate=${REDHAT_CA_CERT} ${URL}
|
jwmatthews/rhui_scripts
|
older/install/fetch_iso.sh
|
Shell
|
gpl-2.0
| 232 |
#!/bin/sh
a=1
while test $a -lt 5
do echo $a;a=`expr $a + 1`;
done
|
nonemaw/MATRIX_01
|
COMP9041/ass1/autoTest/examples/5/while.sh
|
Shell
|
gpl-2.0
| 70 |
#! /bin/sh
#
# ========================================================================
#
# Copyright (C) 1991-2002 SciTech Software, Inc. All rights reserved.
#
# This file may be distributed and/or modified under the terms of the
# GNU General Public License version 2 as published by the Free
# Software Foundation and appearing in the file LICENSE.GPL included
# in the packaging of this file.
#
# Licensees holding a valid Commercial License for this product from
# SciTech Software, Inc. may use this file in accordance with the
# Commercial License Agreement provided with the Software.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING
# THE WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE.
#
# See http://www.scitechsoft.com/license/ for information about
# the licensing options available and how to purchase a Commercial
# License Agreement.
#
# Contact [email protected] if any conditions of this licensing
# are not clear to you, or you have questions about licensing options.
#
# ========================================================================
#
# makeself 1.2
#
# Utilty to create self-extracting tar.gz archives.
# The resulting archive is a file holding the tar.gz archive with
# a small Shell script stub that uncompresses the archive to a temporary
# directory and then executes a given script from withing that directory.
#
# Makeself home page: http://www.lokigames.com/~megastep/makeself/
#
# Version history :
# - 1.0 : Initial public release
# - 1.1 : The archive can be passed parameters that will be passed on to
# the embedded script, thanks to John C. Quillan
# - 1.2 : Package distribution, bzip2 compression, more command line options,
# support for non-temporary archives. Ideas thanks to Francois Petitjean
#
# (C) 1998-1999 by Stéphane Peter <[email protected]>
#
# This software is released under the terms of the GNU GPL
# Please read the license at http://www.gnu.org/copyleft/gpl.html
#
VERSION=1.2
GZIP="gzip -c"
KEEP=n
if [ "$1" = --version ]; then
echo Makeself version $VERSION
exit 0
fi
if [ "$1" = --bzip2 ]; then
if which bzip2 2>&1 > /dev/null; then
GZIP=bzip2
shift 1
else
echo Unable to locate the bzip2 program in your \$PATH.>&2
exit 1
fi
fi
if [ "$1" = --notemp ]; then
KEEP=y
shift 1
fi
if [ $# -lt 4 ]; then
echo $0: Not enough parameters.
echo "Usage: $0 [params] archive_dir file_name label startup_script [args]"
echo "params can be one of those :"
echo " --version : Print out Makeself version number and exit"
echo " --bzip2 : Compress using bzip2 instead of gzip"
echo " --notemp : The archive will uncompress to the current directory"
echo Do not forget to give a fully qualified startup script name
echo "(i.e. with a ./ prefix if inside the archive)."
exit 1
fi
archdir=$1
archname=$2
# The following is the shell script stub code
echo '#! /bin/sh' > $archname
echo \# This script was generated using Makeself $VERSION >> $archname
echo label=\"$3\" >> $archname
echo script=$4 >> $archname
shift 4
echo scriptargs=\"$*\" >> $archname
echo "keep=$KEEP" >> $archname
cat << EOF >> $archname
skip=27
if [ "\$1" = "-keep" ]; then keep=y; shift 1; fi
if [ "\$keep" = y ]; then echo "Creating directory $archdir"; tmpdir=$archdir;
else tmpdir="/tmp/selfgz\$\$"; fi
location=\`pwd\`
echo=echo; [ -x /usr/ucb/echo ] && echo=/usr/ucb/echo
mkdir \$tmpdir || {
echo 'Cannot create target directory' >&2
exit 1
}
\$echo -n Uncompressing \$label
cd \$tmpdir
[ "\$keep" = y ] || trap 'cd /tmp; /bin/rm -rf \$tmpdir; exit \$res'
if ( (cd \$location; tail +\$skip \$0; ) | $GZIP -d | tar xvof - | \
(while read a; do \$echo -n .; done; echo; )) 2> /dev/null; then
\$script \$scriptargs \$*; res=\$?
[ "\$keep" = y ] || ( cd /tmp; /bin/rm -rf \$tmpdir; )
else
echo Cannot decompress \$0; exit 1
fi;
exit \$res
EOF
# Append the tar.gz data after the stub
echo Adding files to archive named \"$archname\"...
(cd $archdir; tar cvf - *| $GZIP -9 ) >> $archname && chmod +x $archname && \
echo Self-extractible archive \"$archname\" successfully created.
|
OS2World/DEV-UTIL-SNAP
|
src/util/makeself.sh
|
Shell
|
gpl-2.0
| 4,256 |
#!/bin/bash
PID=`ps aux | grep "[d]tweb-headless"| awk '{print $2}'`
if [ -z "$PID" ]; then
echo "Starting device tree editor"
/opt/dtweb/dtweb-headless.sh &
fi
|
UDOOboard/udoo-web-conf
|
bin/dtweb.sh
|
Shell
|
gpl-2.0
| 171 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.