code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source /usr/local/bin/openshift-dind-lib.sh
source /data/dind-env
function is-northd-running() {
local northd_ip=$1
ovn-nbctl --timeout=2 "--db=tcp:${northd_ip}:6641" ls-list
}
function have-token() {
local master_dir=$1
[[ -s "${master_dir}/ovn.token" ]]
}
function ovn-kubernetes-node() {
local config_dir=$1
local master_dir=$2
local kube_config="${config_dir}/node.kubeconfig"
os::util::wait-for-condition "kubernetes token" "have-token ${master_dir}" "120"
token=$(cat ${master_dir}/ovn.token)
cat >"/etc/openvswitch/ovn_k8s.conf" <<EOF
[kubernetes]
cacert=${config_dir}/ca.crt
EOF
local host
host="$(hostname)"
if os::util::is-master; then
host="${host}-node"
fi
local node_config="${config_dir}/node-config.yaml"
local master_config="${master_dir}/master-config.yaml"
cluster_cidr=$(python -c "import yaml; stream = file('${master_config}', 'r'); y = yaml.load(stream); print y['networkConfig']['clusterNetworks'][0]['cidr']")
apiserver=$(awk '/server:/ { print $2; exit }' ${kube_config})
ovn_master_ip=$(echo -n ${apiserver} | cut -d "/" -f 3 | cut -d ":" -f 1)
# Ensure GENEVE's UDP port isn't firewalled
/usr/share/openvswitch/scripts/ovs-ctl --protocol=udp --dport=6081 enable-protocol
os::util::wait-for-condition "ovn-northd" "is-northd-running ${ovn_master_ip}" "120"
echo "Enabling and start ovn-kubernetes node services"
/usr/local/bin/ovnkube \
--k8s-apiserver "${apiserver}" \
--k8s-cacert "${config_dir}/ca.crt" \
--k8s-token "${token}" \
--cluster-subnet "${cluster_cidr}" \
--nb-address "tcp://${ovn_master_ip}:6641" \
--sb-address "tcp://${ovn_master_ip}:6642" \
--init-node ${host} \
--init-gateways
}
if [[ -n "${OPENSHIFT_OVN_KUBERNETES}" ]]; then
ovn-kubernetes-node /var/lib/origin/openshift.local.config/node /data/openshift.local.config/master
fi
|
miminar/origin
|
images/dind/node/ovn-kubernetes-node.sh
|
Shell
|
apache-2.0
| 1,913 |
#!/bin/bash
set -ex
echo "Tagging release branch"
TAG=$NEW_VERSION
echo "RELEASE_TAG=$TAG" >> $GITHUB_ENV
git tag $TAG
git push origin --tags
echo "Setting Release Notes"
cat CHANGELOG.md | awk '/^#/{f=1} f; /^#/ && ++c==3{exit}' | sed '$ d' > RELEASE_NOTES.md
|
pinterest/PINCache
|
Scripts/tag-release-branch.sh
|
Shell
|
apache-2.0
| 264 |
#!/bin/bash
#
# Copyright (c) 2012-2019 Red Hat, Inc.
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# Contributors:
# Red Hat, Inc. - initial API and implementation
#
set -e
export USER_ID=$(id -u)
export GROUP_ID=$(id -g)
if ! grep -Fq "${USER_ID}" /etc/passwd; then
# current user is an arbitrary
# user (its uid is not in the
# container /etc/passwd). Let's fix that
cat ${HOME}/passwd.template | \
sed "s/\${USER_ID}/${USER_ID}/g" | \
sed "s/\${GROUP_ID}/${GROUP_ID}/g" | \
sed "s/\${HOME}/\/home\/user/g" > /etc/passwd
cat ${HOME}/group.template | \
sed "s/\${USER_ID}/${USER_ID}/g" | \
sed "s/\${GROUP_ID}/${GROUP_ID}/g" | \
sed "s/\${HOME}/\/home\/user/g" > /etc/group
fi
is_current_user_sudoer() {
sudo -n true > /dev/null 2>&1
}
if ! is_current_user_sudoer; then
sed -i "s/che-host/che-host.eclipse-che.svc/g" /home/user/traefik/traefik.toml
fi
exec "$@"
|
akervern/che
|
dockerfiles/dev/entrypoint.sh
|
Shell
|
epl-1.0
| 1,107 |
#!/bin/sh
#
# Copyright (C) 2005 Rene Scharfe
#
test_description='git archive and git get-tar-commit-id test
This test covers the topics of file contents, commit date handling and
commit id embedding:
The contents of the repository is compared to the extracted tar
archive. The repository contains simple text files, symlinks and a
binary file (/bin/sh). Only paths shorter than 99 characters are
used.
git archive applies the commit date to every file in the archive it
creates. The test sets the commit date to a specific value and checks
if the tar archive contains that value.
When giving git archive a commit id (in contrast to a tree id) it
embeds this commit id into the tar archive as a comment. The test
checks the ability of git get-tar-commit-id to figure it out from the
tar file.
'
. ./test-lib.sh
SUBSTFORMAT=%H%n
test_lazy_prereq TAR_NEEDS_PAX_FALLBACK '
(
mkdir pax &&
cd pax &&
"$TAR" xf "$TEST_DIRECTORY"/t5000/pax.tar &&
test -f PaxHeaders.1791/file
)
'
test_lazy_prereq GZIP 'gzip --version'
get_pax_header() {
file=$1
header=$2=
while read len rest
do
if test "$len" = $(echo "$len $rest" | wc -c)
then
case "$rest" in
$header*)
echo "${rest#$header}"
;;
esac
fi
done <"$file"
}
check_tar() {
tarfile=$1.tar
listfile=$1.lst
dir=$1
dir_with_prefix=$dir/$2
test_expect_success ' extract tar archive' '
(mkdir $dir && cd $dir && "$TAR" xf -) <$tarfile
'
test_expect_success TAR_NEEDS_PAX_FALLBACK ' interpret pax headers' '
(
cd $dir &&
for header in *.paxheader
do
data=${header%.paxheader}.data &&
if test -h $data || test -e $data
then
path=$(get_pax_header $header path) &&
if test -n "$path"
then
mv "$data" "$path"
fi
fi
done
)
'
test_expect_success ' validate filenames' '
(cd ${dir_with_prefix}a && find .) | sort >$listfile &&
test_cmp a.lst $listfile
'
test_expect_success ' validate file contents' '
diff -r a ${dir_with_prefix}a
'
}
check_added() {
dir=$1
path_in_fs=$2
path_in_archive=$3
test_expect_success " validate extra file $path_in_archive" '
diff -r $path_in_fs $dir/$path_in_archive
'
}
test_expect_success 'setup' '
test_oid_cache <<-EOF
obj sha1:19f9c8273ec45a8938e6999cb59b3ff66739902a
obj sha256:3c666f798798601571f5cec0adb57ce4aba8546875e7693177e0535f34d2c49b
EOF
'
test_expect_success 'populate workdir' '
mkdir a &&
echo simple textfile >a/a &&
ten=0123456789 &&
hundred="$ten$ten$ten$ten$ten$ten$ten$ten$ten$ten" &&
echo long filename >"a/four$hundred" &&
mkdir a/bin &&
test-tool genrandom "frotz" 500000 >a/bin/sh &&
printf "A\$Format:%s\$O" "$SUBSTFORMAT" >a/substfile1 &&
printf "A not substituted O" >a/substfile2 &&
if test_have_prereq SYMLINKS
then
ln -s a a/l1
else
printf %s a >a/l1
fi &&
(
p=long_path_to_a_file &&
cd a &&
for depth in 1 2 3 4 5
do
mkdir $p &&
cd $p
done &&
echo text >file_with_long_path
) &&
(cd a && find .) | sort >a.lst
'
test_expect_success \
'add ignored file' \
'echo ignore me >a/ignored &&
echo ignored export-ignore >.git/info/attributes'
test_expect_success 'add files to repository' '
git add a &&
GIT_COMMITTER_DATE="2005-05-27 22:00" git commit -m initial
'
test_expect_success 'setup export-subst' '
echo "substfile?" export-subst >>.git/info/attributes &&
git log --max-count=1 "--pretty=format:A${SUBSTFORMAT}O" HEAD \
>a/substfile1
'
test_expect_success 'create bare clone' '
git clone --bare . bare.git &&
cp .git/info/attributes bare.git/info/attributes
'
test_expect_success 'remove ignored file' '
rm a/ignored
'
test_expect_success 'git archive' '
git archive HEAD >b.tar
'
check_tar b
test_expect_success 'git archive --prefix=prefix/' '
git archive --prefix=prefix/ HEAD >with_prefix.tar
'
check_tar with_prefix prefix/
test_expect_success 'git-archive --prefix=olde-' '
git archive --prefix=olde- HEAD >with_olde-prefix.tar
'
check_tar with_olde-prefix olde-
test_expect_success 'git archive --add-file' '
echo untracked >untracked &&
git archive --add-file=untracked HEAD >with_untracked.tar
'
check_tar with_untracked
check_added with_untracked untracked untracked
test_expect_success 'git archive --add-file twice' '
echo untracked >untracked &&
git archive --prefix=one/ --add-file=untracked \
--prefix=two/ --add-file=untracked \
--prefix= HEAD >with_untracked2.tar
'
check_tar with_untracked2
check_added with_untracked2 untracked one/untracked
check_added with_untracked2 untracked two/untracked
test_expect_success 'git archive on large files' '
test_config core.bigfilethreshold 1 &&
git archive HEAD >b3.tar &&
test_cmp_bin b.tar b3.tar
'
test_expect_success 'git archive in a bare repo' '
git --git-dir bare.git archive HEAD >b3.tar
'
test_expect_success 'git archive vs. the same in a bare repo' '
test_cmp_bin b.tar b3.tar
'
test_expect_success 'git archive with --output' '
git archive --output=b4.tar HEAD &&
test_cmp_bin b.tar b4.tar
'
test_expect_success 'git archive --remote' '
git archive --remote=. HEAD >b5.tar &&
test_cmp_bin b.tar b5.tar
'
test_expect_success 'git archive --remote with configured remote' '
git config remote.foo.url . &&
(
cd a &&
git archive --remote=foo --output=../b5-nick.tar HEAD
) &&
test_cmp_bin b.tar b5-nick.tar
'
test_expect_success 'validate file modification time' '
mkdir extract &&
"$TAR" xf b.tar -C extract a/a &&
test-tool chmtime --get extract/a/a >b.mtime &&
echo "1117231200" >expected.mtime &&
test_cmp expected.mtime b.mtime
'
test_expect_success 'git get-tar-commit-id' '
git get-tar-commit-id <b.tar >actual &&
git rev-parse HEAD >expect &&
test_cmp expect actual
'
test_expect_success 'git archive with --output, override inferred format' '
git archive --format=tar --output=d4.zip HEAD &&
test_cmp_bin b.tar d4.zip
'
test_expect_success GZIP 'git archive with --output and --remote creates .tgz' '
git archive --output=d5.tgz --remote=. HEAD &&
gzip -d -c <d5.tgz >d5.tar &&
test_cmp_bin b.tar d5.tar
'
test_expect_success 'git archive --list outside of a git repo' '
nongit git archive --list
'
test_expect_success 'git archive --remote outside of a git repo' '
git archive HEAD >expect.tar &&
nongit git archive --remote="$PWD" HEAD >actual.tar &&
test_cmp_bin expect.tar actual.tar
'
test_expect_success 'clients cannot access unreachable commits' '
test_commit unreachable &&
sha1=$(git rev-parse HEAD) &&
git reset --hard HEAD^ &&
git archive $sha1 >remote.tar &&
test_must_fail git archive --remote=. $sha1 >remote.tar
'
test_expect_success 'upload-archive can allow unreachable commits' '
test_commit unreachable1 &&
sha1=$(git rev-parse HEAD) &&
git reset --hard HEAD^ &&
git archive $sha1 >remote.tar &&
test_config uploadarchive.allowUnreachable true &&
git archive --remote=. $sha1 >remote.tar
'
test_expect_success 'setup tar filters' '
git config tar.tar.foo.command "tr ab ba" &&
git config tar.bar.command "tr ab ba" &&
git config tar.bar.remote true &&
git config tar.invalid baz
'
test_expect_success 'archive --list mentions user filter' '
git archive --list >output &&
grep "^tar\.foo\$" output &&
grep "^bar\$" output
'
test_expect_success 'archive --list shows only enabled remote filters' '
git archive --list --remote=. >output &&
! grep "^tar\.foo\$" output &&
grep "^bar\$" output
'
test_expect_success 'invoke tar filter by format' '
git archive --format=tar.foo HEAD >config.tar.foo &&
tr ab ba <config.tar.foo >config.tar &&
test_cmp_bin b.tar config.tar &&
git archive --format=bar HEAD >config.bar &&
tr ab ba <config.bar >config.tar &&
test_cmp_bin b.tar config.tar
'
test_expect_success 'invoke tar filter by extension' '
git archive -o config-implicit.tar.foo HEAD &&
test_cmp_bin config.tar.foo config-implicit.tar.foo &&
git archive -o config-implicit.bar HEAD &&
test_cmp_bin config.tar.foo config-implicit.bar
'
test_expect_success 'default output format remains tar' '
git archive -o config-implicit.baz HEAD &&
test_cmp_bin b.tar config-implicit.baz
'
test_expect_success 'extension matching requires dot' '
git archive -o config-implicittar.foo HEAD &&
test_cmp_bin b.tar config-implicittar.foo
'
test_expect_success 'only enabled filters are available remotely' '
test_must_fail git archive --remote=. --format=tar.foo HEAD \
>remote.tar.foo &&
git archive --remote=. --format=bar >remote.bar HEAD &&
test_cmp_bin remote.bar config.bar
'
test_expect_success GZIP 'git archive --format=tgz' '
git archive --format=tgz HEAD >j.tgz
'
test_expect_success GZIP 'git archive --format=tar.gz' '
git archive --format=tar.gz HEAD >j1.tar.gz &&
test_cmp_bin j.tgz j1.tar.gz
'
test_expect_success GZIP 'infer tgz from .tgz filename' '
git archive --output=j2.tgz HEAD &&
test_cmp_bin j.tgz j2.tgz
'
test_expect_success GZIP 'infer tgz from .tar.gz filename' '
git archive --output=j3.tar.gz HEAD &&
test_cmp_bin j.tgz j3.tar.gz
'
test_expect_success GZIP 'extract tgz file' '
gzip -d -c <j.tgz >j.tar &&
test_cmp_bin b.tar j.tar
'
test_expect_success GZIP 'remote tar.gz is allowed by default' '
git archive --remote=. --format=tar.gz HEAD >remote.tar.gz &&
test_cmp_bin j.tgz remote.tar.gz
'
test_expect_success GZIP 'remote tar.gz can be disabled' '
git config tar.tar.gz.remote false &&
test_must_fail git archive --remote=. --format=tar.gz HEAD \
>remote.tar.gz
'
test_expect_success 'archive and :(glob)' '
git archive -v HEAD -- ":(glob)**/sh" >/dev/null 2>actual &&
cat >expect <<EOF &&
a/
a/bin/
a/bin/sh
EOF
test_cmp expect actual
'
test_expect_success 'catch non-matching pathspec' '
test_must_fail git archive -v HEAD -- "*.abc" >/dev/null
'
# Pull the size and date of each entry in a tarfile using the system tar.
#
# We'll pull out only the year from the date; that avoids any question of
# timezones impacting the result (as long as we keep our test times away from a
# year boundary; our reference times are all in August).
#
# The output of tar_info is expected to be "<size> <year>", both in decimal. It
# ignores the return value of tar. We have to do this, because some of our test
# input is only partial (the real data is 64GB in some cases).
tar_info () {
"$TAR" tvf "$1" |
awk '{
split($4, date, "-")
print $3 " " date[1]
}'
}
# See if our system tar can handle a tar file with huge sizes and dates far in
# the future, and that we can actually parse its output.
#
# The reference file was generated by GNU tar, and the magic time and size are
# both octal 01000000000001, which overflows normal ustar fields.
test_lazy_prereq TAR_HUGE '
echo "68719476737 4147" >expect &&
tar_info "$TEST_DIRECTORY"/t5000/huge-and-future.tar >actual &&
test_cmp expect actual
'
test_expect_success LONG_IS_64BIT 'set up repository with huge blob' '
obj=$(test_oid obj) &&
path=$(test_oid_to_path $obj) &&
mkdir -p .git/objects/$(dirname $path) &&
cp "$TEST_DIRECTORY"/t5000/huge-object .git/objects/$path &&
rm -f .git/index &&
git update-index --add --cacheinfo 100644,$obj,huge &&
git commit -m huge
'
# We expect git to die with SIGPIPE here (otherwise we
# would generate the whole 64GB).
test_expect_success LONG_IS_64BIT 'generate tar with huge size' '
{
git archive HEAD
echo $? >exit-code
} | test_copy_bytes 4096 >huge.tar &&
echo 141 >expect &&
test_cmp expect exit-code
'
test_expect_success TAR_HUGE,LONG_IS_64BIT 'system tar can read our huge size' '
echo 68719476737 >expect &&
tar_info huge.tar | cut -d" " -f1 >actual &&
test_cmp expect actual
'
test_expect_success TIME_IS_64BIT 'set up repository with far-future (2^34 - 1) commit' '
rm -f .git/index &&
echo foo >file &&
git add file &&
GIT_COMMITTER_DATE="@17179869183 +0000" \
git commit -m "tempori parendum"
'
test_expect_success TIME_IS_64BIT 'generate tar with far-future mtime' '
git archive HEAD >future.tar
'
test_expect_success TAR_HUGE,TIME_IS_64BIT,TIME_T_IS_64BIT 'system tar can read our future mtime' '
echo 2514 >expect &&
tar_info future.tar | cut -d" " -f2 >actual &&
test_cmp expect actual
'
test_expect_success TIME_IS_64BIT 'set up repository with far-far-future (2^36 + 1) commit' '
rm -f .git/index &&
echo content >file &&
git add file &&
GIT_TEST_COMMIT_GRAPH=0 GIT_COMMITTER_DATE="@68719476737 +0000" \
git commit -m "tempori parendum"
'
test_expect_success TIME_IS_64BIT 'generate tar with far-far-future mtime' '
git archive HEAD >future.tar
'
test_expect_success TAR_HUGE,TIME_IS_64BIT,TIME_T_IS_64BIT 'system tar can read our future mtime' '
echo 4147 >expect &&
tar_info future.tar | cut -d" " -f2 >actual &&
test_cmp expect actual
'
test_done
|
abg1979/git
|
t/t5000-tar-tree.sh
|
Shell
|
gpl-2.0
| 12,696 |
#!/bin/sh -xe
[ "$#" -lt 2 ] && echo "Usage: sign_app.sh <app> <identity> <team_identifier>" && exit
src_app="$1"
identity="$2"
team_identifier="$3"
codesign -s "$identity" --force --preserve-metadata=entitlements --verbose=4 --deep "$src_app"
# Verify the signature
spctl -a -t exec -vv $src_app
codesign -dv $src_app
# Validate that the key used for signing the binary matches the expected TeamIdentifier
# needed to pass the SocketApi through the sandbox
codesign -dv $src_app 2>&1 | grep "TeamIdentifier=$team_identifier"
exit $?
|
cketti/client
|
admin/osx/sign_app.sh
|
Shell
|
gpl-2.0
| 538 |
#!/bin/bash
lib=$(dirname $0)/lib
$lib/ucalls.py -l python "$@"
|
romain-intel/bcc
|
tools/pythoncalls.sh
|
Shell
|
apache-2.0
| 64 |
#!/bin/bash
ant clean
ant
ant
|
phalax4/CarnotKE
|
jyhton/build.sh
|
Shell
|
apache-2.0
| 30 |
#!/bin/bash
# Initial housekeepting
export DEBIAN_FRONTEND=noninteractive
# Add the PPA repository for LXD/LXC stable
if [[ ! -e /etc/apt/sources.list.d/ubuntu-lxc-lxd-stable-trusty.list ]]; then
sudo add-apt-repository -y ppa:ubuntu-lxc/lxd-stable
fi
# Update package list
sudo apt-get update
# Install LXC/LXD if not already installed
if [[ ! -e /usr/bin/lxd ]]; then
sudo apt-get -y install lxd
fi
|
devendermishrajio/learning-tools
|
lxd/setup.sh
|
Shell
|
mit
| 413 |
#!/bin/sh
exec /usr/bin/g++ "$@"
|
justinmuller/buck
|
test/com/facebook/buck/cxx/testdata/step_test/cxx.sh
|
Shell
|
apache-2.0
| 33 |
#!/bin/bash -e
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to install everything needed to build chromium on android, including
# items requiring sudo privileges.
# See http://code.google.com/p/chromium/wiki/AndroidBuildInstructions
# This script installs the sun-java6 packages (bin, jre and jdk). Sun requires
# a license agreement, so upon installation it will prompt the user. To get
# past the curses-based dialog press TAB <ret> TAB <ret> to agree.
args="$@"
if ! uname -m | egrep -q "i686|x86_64"; then
echo "Only x86 architectures are currently supported" >&2
exit
fi
# Install first the default Linux build deps.
"$(dirname "${BASH_SOURCE[0]}")/install-build-deps.sh" \
--no-syms --lib32 --no-arm --no-chromeos-fonts --no-nacl --no-prompt "${args}"
lsb_release=$(lsb_release --codename --short)
# The temporary directory used to store output of update-java-alternatives
TEMPDIR=$(mktemp -d)
cleanup() {
local status=${?}
trap - EXIT
rm -rf "${TEMPDIR}"
exit ${status}
}
trap cleanup EXIT
# Fix deps
sudo apt-get -f install
# Install deps
# This step differs depending on what Ubuntu release we are running
# on since the package names are different, and Sun's Java must
# be installed manually on late-model versions.
# common
sudo apt-get -y install lighttpd python-pexpect xvfb x11-utils
# Some binaries in the Android SDK require 32-bit libraries on the host.
# See https://developer.android.com/sdk/installing/index.html?pkg=tools
if [[ $lsb_release == "precise" ]]; then
sudo apt-get -y install ia32-libs
else
sudo apt-get -y install libncurses5:i386 libstdc++6:i386 zlib1g:i386
fi
sudo apt-get -y install ant
# Install openjdk and openjre 7 stuff
sudo apt-get -y install openjdk-7-jre openjdk-7-jdk
# Switch version of Java to openjdk 7.
# Some Java plugins (e.g. for firefox, mozilla) are not required to build, and
# thus are treated only as warnings. Any errors in updating java alternatives
# which are not '*-javaplugin.so' will cause errors and stop the script from
# completing successfully.
if ! sudo update-java-alternatives -s java-1.7.0-openjdk-amd64 \
>& "${TEMPDIR}"/update-java-alternatives.out
then
# Check that there are the expected javaplugin.so errors for the update
if grep 'javaplugin.so' "${TEMPDIR}"/update-java-alternatives.out >& \
/dev/null
then
# Print as warnings all the javaplugin.so errors
echo 'WARNING: java-6-sun has no alternatives for the following plugins:'
grep 'javaplugin.so' "${TEMPDIR}"/update-java-alternatives.out
fi
# Check if there are any errors that are not javaplugin.so
if grep -v 'javaplugin.so' "${TEMPDIR}"/update-java-alternatives.out \
>& /dev/null
then
# If there are non-javaplugin.so errors, treat as errors and exit
echo 'ERRORS: Failed to update alternatives for java-6-sun:'
grep -v 'javaplugin.so' "${TEMPDIR}"/update-java-alternatives.out
exit 1
fi
fi
echo "install-build-deps-android.sh complete."
|
hujiajie/chromium-crosswalk
|
build/install-build-deps-android.sh
|
Shell
|
bsd-3-clause
| 3,109 |
#!/bin/sh
# script to determine git hash of current source tree
# try to use whatever git tells us if there is a .git folder
if [ -d .git -a -r .git ]
then
hash=$(git log 2>/dev/null | head -n1 2>/dev/null | sed "s/.* //" 2>/dev/null)
fi
if [ x"$hash" != x ]
then
echo $hash
else
echo "UNKNOWN"
fi
exit 0
|
gawen947/wsn-tools
|
hash.sh
|
Shell
|
gpl-3.0
| 309 |
echo \[omod-if-array.sh\]: test omod-if-array via udp
$srcdir/killrsyslog.sh # kill rsyslogd if it runs for some reason
./nettester -tomod-if-array -iudp -p4711
if [ "$?" -ne "0" ]; then
exit 1
fi
echo test omod-if-array via tcp
./nettester -tomod-if-array -itcp
if [ "$?" -ne "0" ]; then
exit 1
fi
|
fastly/rsyslog
|
tests/omod-if-array.sh
|
Shell
|
gpl-3.0
| 306 |
#!/bin/bash
function printEnv()
{
if [ $# -ne 1 ]; then
echo "[!] Invalid invocation; need a parameter"
return 1;
fi
eval VALUE=\$$1
printf "%-30s: " $1
if test x"$VALUE" == x; then
echo "default"
else
echo $VALUE;
fi
}
printEnv XRDTEST_MAINSERVERURL
printEnv XRDTEST_DISKSERVERURL
printEnv XRDTEST_DATAPATH
printEnv XRDTEST_LOCALFILE
printEnv XRDTEST_REMOTEFILE
printEnv XRDTEST_MULTIIPSERVERURL
|
alja/xrootd
|
tests/XrdClTests/printenv.sh
|
Shell
|
gpl-3.0
| 430 |
#!/bin/bash
# \author Hans J. Johnson
#
# Script to process a directory to replace
# outdated macro names with their modern
# conformant names
function ReplaceCXXString()
{
oldstring="$1"
newstring="$2"
# NOTE: Skip processing this file
# NOTE: Skip processing the Migration directory in ITK
git grep -l "${oldstring}" | \
fgrep -v ReplaceOutdatedMacroNames.sh | \
fgrep -v Migration | \
fgrep -v ReplaceITK_NULLPTRMacroNames.sh | \
fgrep -v itk_compiler_detection.h | fgrep -v CMakeLists.txt |fgrep -v .cmake | \
xargs sed -i '' -e "s/${oldstring}/${newstring}/g"
git add -A
git commit -m"COMP: Use C++11 ${newstring} directly
git grep -l \"${oldstring}\" | \
fgrep -v itk_compiler_detection.h | fgrep -v CMakeLists.txt |fgrep -v .cmake | \
xargs sed -i '' -e \"s/${oldstring}/${newstring}/g\"
"
echo "WARNING: This script is not intended to be bullet-proof."
echo "WARNING: Please carefully review all changes made to ensure proper behavior."
}
ReplaceCXXString ITK_NULLPTR nullptr
|
hjmjohnson/ITK
|
Utilities/ITKv5Preparation/ReplaceITK_NULLPTRMacroNames.sh
|
Shell
|
apache-2.0
| 1,007 |
# Put node-build on PATH
export PATH=<%= scope.lookupvar("::nodejs::build::prefix") %>/bin:$PATH
# Configure NODENV_ROOT and put NODENV_ROOT/bin on PATH
export NODENV_ROOT=<%= scope.lookupvar("::nodejs::nodenv::prefix") %>
export PATH=$NODENV_ROOT/bin:$PATH
# Load nodenv
eval "$(nodenv init -)"
# Helper for shell prompts and the like
current_node() {
echo "$(nodenv version-name)"
}
|
hirocaster/puppet-nodejs
|
templates/nodejs.sh
|
Shell
|
mit
| 390 |
#!/bin/sh
. /lib/functions.sh
. ../netifd-proto.sh
init_proto "$@"
proto_vpnc_init_config() {
proto_config_add_string "server"
proto_config_add_string "username"
proto_config_add_string "hexpasswd"
proto_config_add_string "authgroup"
proto_config_add_string "password"
proto_config_add_string "token_mode"
proto_config_add_string "token_secret"
proto_config_add_string "interface"
proto_config_add_string "passgroup"
proto_config_add_string "hexpassgroup"
proto_config_add_string "domain"
proto_config_add_string "vendor"
proto_config_add_string "natt_mode"
proto_config_add_string "dh_group"
proto_config_add_string "pfs"
proto_config_add_boolean "enable_single_des"
proto_config_add_boolean "enable_no_enc"
proto_config_add_int "mtu"
proto_config_add_string "local_addr"
proto_config_add_int "local_port"
proto_config_add_int "udp_port"
proto_config_add_int "dpd_idle"
proto_config_add_string "auth_mode"
proto_config_add_string "target_network"
proto_config_add_boolean "authfail"
no_device=1
available=1
}
proto_vpnc_setup() {
local config="$1"
json_get_vars server username hexpasswd authgroup password token_mode token_secret interface passgroup hexpassgroup domain vendor natt_mode dh_group pfs enable_single_des enable_no_enc mtu local_addr local_port udp_port dpd_idle auth_mode target_network authfail
grep -q tun /proc/modules || insmod tun
logger -t vpnc "initializing..."
serv_addr=
for ip in $(resolveip -t 10 "$server"); do
( proto_add_host_dependency "$config" "$ip" $interface )
serv_addr=1
done
[ -n "$serv_addr" ] || {
logger -t vpnc "Could not resolve server address: '$server'"
sleep 60
proto_setup_failed "$config"
exit 1
}
mkdir -p /var/etc
umask 077
pwfile="/var/etc/vpnc-$config.conf"
echo "IPSec gateway $server" > "$pwfile"
cmdline="--no-detach --pid-file /var/run/vpnc-$config.pid --ifname vpn-$config --non-inter --script /lib/netifd/vpnc-script $pwfile"
[ -f /etc/vpnc/ca-vpn-$config.pem ] && echo "CA-File /etc/vpnc/ca-vpn-$config.pem" >> "$pwfile"
[ -n "$hexpasswd" ] && echo "Xauth obfuscated password $hexpasswd" >> "$pwfile"
[ -n "$authgroup" ] && echo "IPSec ID $authgroup" >> "$pwfile"
[ -n "$username" ] && echo "Xauth username $username" >> "$pwfile"
[ -n "$password" ] && echo "Xauth password $password" >> "$pwfile"
[ -n "$passgroup" ] && echo "IPSec secret $passgroup" >> "$pwfile"
[ -n "$hexpassgroup" ] && echo "IPSec obfuscated secret $hexpassgroup" >> "$pwfile"
[ -n "$domain" ] && echo "Domain $domain" >> "$pwfile"
[ -n "$vendor" ] && echo "Vendor $vendor" >> "$pwfile"
[ -n "$natt_mode" ] && echo "NAT Traversal Mode $natt_mode" >> "$pwfile"
[ -n "$dh_group" ] && echo "IKE DH Group $dh_group" >> "$pwfile"
[ -n "$pfs" ] && echo "Perfect Forward Secrecy $pfs" >> "$pwfile"
[ "${enable_single_des:-0}" -gt 0 ] && echo "Enable Single DES" >> "$pwfile"
[ "${enable_no_enc:-0}" -gt 0 ] && echo "Enable no encryption" >> "$pwfile"
[ -n "$mtu" ] && echo "Interface MTU $mtu" >> "$pwfile"
[ -n "$local_addr" ] && echo "Local Addr $local_addr" >> "$pwfile"
[ -n "$local_port" ] && echo "Local Port $local_port" >> "$pwfile"
[ -n "$udp_port" ] && echo "Cisco UDP Encapsulation Port $udp_port" >> "$pwfile"
[ -n "$dpd_idle" ] && echo "DPD idle timeout (our side) $dpd_idle" >> "$pwfile"
[ -n "$auth_mode" ] && echo "IKE Authmode $auth_mode" >> "$pwfile"
[ -n "$target_network" ] && echo "IPSEC target network $target_network" >> "$pwfile"
proto_export INTERFACE="$config"
logger -t vpnc "executing 'vpnc $cmdline'"
proto_run_command "$config" /usr/sbin/vpnc $cmdline
}
proto_vpnc_teardown() {
local config="$1"
pwfile="/var/etc/vpnc-$config.conf"
json_get_var authfail authfail
# On error exit (vpnc only has success = 0 and error = 1, so
# we can't be fine-grained and say only auth error)
# and authfail setting true, then don't retry starting vpnc
# This is used for the case were the server blocks repeated
# failed authentication attempts (which will occur if the password
# is wrong, for example).
if [ ${ERROR:-0} -gt 0 ] && [ "${authfail:-0}" -gt 0 ]; then
proto_block_restart "$config"
fi
rm -f $pwfile
logger -t vpnc "bringing down vpnc"
proto_kill_command "$config" 2
}
add_protocol vpnc
|
kuoruan/lede-packages
|
net/vpnc/files/vpnc.sh
|
Shell
|
gpl-2.0
| 4,244 |
#!/bin/bash
set -e
declare -a arr=("./consensus" "./core" "./events" "./examples" "./membersrvc" "./peer" "./protos")
for i in "${arr[@]}"
do
OUTPUT="$(goimports -l $i)"
if [[ $OUTPUT ]]; then
echo "The following files contain goimports errors"
echo $OUTPUT
echo "The goimports command must be run for these files"
exit 1
fi
done
|
andresgaragiola/fabric
|
scripts/goimports.sh
|
Shell
|
apache-2.0
| 344 |
#!/bin/bash
mkdir -p $PREFIX/bin
mkdir -p $PREFIX/include
mkdir -p $PREFIX/lib
mkdir -p $PREFIX/share/man/man1/
mv bin/* $PREFIX/bin/
mv include/wkhtmltox/ $PREFIX/include/
mv lib/* $PREFIX/lib/
mv share/man/man1/* $PREFIX/share/man/man1/
|
dmaticzka/bioconda-recipes
|
recipes/wkhtmltopdf/build.sh
|
Shell
|
mit
| 242 |
#!/bin/bash
DIR=$(cd $(dirname "$0"); pwd)
BIN=$DIR"/../../bin"
ETC=$DIR"/../../etc/test_stack2"
echo "Stopping reactionner"
kill `cat $DIR/../../var/reactionnerd-2.pid`
|
rledisez/shinken
|
test/bin/test_stack2/stop_reactionner2.sh
|
Shell
|
agpl-3.0
| 172 |
#!/bin/sh
#
# arch/arm26/boot/install.sh
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995 by Linus Torvalds
#
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
# Adapted from code in arch/i386/boot/install.sh by Russell King
# Stolen from arm32 by Ian Molton
#
# "make install" script for arm architecture
#
# Arguments:
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
#
# User may have a custom install script
if [ -x /sbin/installkernel ]; then
exec /sbin/installkernel "$@"
fi
if [ "$2" = "zImage" ]; then
# Compressed install
echo "Installing compressed kernel"
if [ -f $4/vmlinuz-$1 ]; then
mv $4/vmlinuz-$1 $4/vmlinuz.old
fi
if [ -f $4/System.map-$1 ]; then
mv $4/System.map-$1 $4/System.old
fi
cat $2 > $4/vmlinuz-$1
cp $3 $4/System.map-$1
else
# Normal install
echo "Installing normal kernel"
if [ -f $4/vmlinux-$1 ]; then
mv $4/vmlinux-$1 $4/vmlinux.old
fi
if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
fi
cat $2 > $4/vmlinux-$1
cp $3 $4/System.map
fi
if [ -x /sbin/loadmap ]; then
/sbin/loadmap --rdev /dev/ima
else
echo "You have to install it yourself"
fi
|
foxsat-hdr/linux-kernel
|
arch/arm26/boot/install.sh
|
Shell
|
gpl-2.0
| 1,386 |
#!/bin/bash
#
# Copyright 2007 IBM
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# test_filecaps.sh - Run the file capabilities test suite.
# Must be root to run the containers testsuite
if [ $UID != 0 ]
then
echo "FAILED: Must be root to execute this script"
exit 1
fi
# set the LTPROOT directory
cd `dirname $0`
LTPROOT=${PWD}
echo $LTPROOT | grep testscripts > /dev/null 2>&1
if [ $? -eq 0 ]
then
cd ..
LTPROOT=${PWD}
fi
# set the PATH to include testcase/bin
export PATH=$PATH:/usr/sbin:$LTPROOT/testcases/bin
export LTPBIN=$LTPROOT/testcases/bin
# We will store the logfiles in $LTPROOT/results, so make sure
# it exists.
if [ ! -d $LTPROOT/results ]
then
mkdir $LTPROOT/results
fi
# Check the role and mode testsuite is being executed under.
echo "Running the file capabilities testsuite..."
$LTPROOT/bin/ltp-pan -S -a $LTPROOT/results/filecaps -n ltp-filecaps -l $LTPROOT/results/filecaps.logfile -o $LTPROOT/results/filecaps.outfile -p -f $LTPROOT/runtest/filecaps
echo "Done."
exit 0
|
sunyuan3/ltp
|
testscripts/test_filecaps.sh
|
Shell
|
gpl-2.0
| 1,226 |
#!/bin/bash
STARTTIME=$(date +%s)
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::build::setup_env
EXAMPLES=examples
OUTPUT_PARENT=${OUTPUT_ROOT:-$OS_ROOT}
pushd vendor/github.com/jteeuwen/go-bindata > /dev/null
go install ./...
popd > /dev/null
os::util::ensure::gopath_binary_exists 'go-bindata'
pushd "${OS_ROOT}" > /dev/null
"$(os::util::find::gopath_binary go-bindata)" \
-nocompress \
-nometadata \
-prefix "bootstrap" \
-pkg "bootstrap" \
-o "${OUTPUT_PARENT}/pkg/bootstrap/bindata.go" \
-ignore "README.md" \
-ignore ".*\.go$" \
-ignore "\.DS_Store" \
-ignore application-template.json \
${EXAMPLES}/image-streams/... \
${EXAMPLES}/db-templates/... \
${EXAMPLES}/jenkins \
${EXAMPLES}/jenkins/pipeline \
${EXAMPLES}/quickstarts/... \
${EXAMPLES}/logging/... \
${EXAMPLES}/heapster/... \
${EXAMPLES}/prometheus/... \
pkg/image/admission/imagepolicy/api/v1/...
"$(os::util::find::gopath_binary go-bindata)" \
-nocompress \
-nometadata \
-prefix "testextended" \
-pkg "testdata" \
-o "${OUTPUT_PARENT}/test/extended/testdata/bindata.go" \
-ignore "\.DS_Store" \
-ignore ".*\.(go|md)$" \
test/extended/testdata/... \
test/integration/testdata \
examples/db-templates \
examples/image-streams \
examples/sample-app \
examples/hello-openshift \
examples/jenkins/...
popd > /dev/null
# If you hit this, please reduce other tests instead of importing more
if [[ "$( cat "${OUTPUT_PARENT}/test/extended/testdata/bindata.go" | wc -c )" -gt 650000 ]]; then
echo "error: extended bindata is $( cat "${OUTPUT_PARENT}/test/extended/testdata/bindata.go" | wc -c ) bytes, reduce the size of the import" 1>&2
exit 1
fi
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
tmckayus/oshinko-cli
|
vendor/github.com/openshift/origin/cmd/service-catalog/go/src/github.com/kubernetes-incubator/service-catalog/hack/update-generated-bindata.sh
|
Shell
|
apache-2.0
| 1,844 |
#!/bin/sh
cd ..
. ./setEnv.sh
RUN_CMD="$JAVA_HOME/bin/java $MEM_ARGS -cp $CLASSPATH twitter4j.examples.tweets.GetRetweets"
echo $RUN_CMD ${1+"$@"}
exec $RUN_CMD ${1+"$@"}
|
jonathanmcelroy/DataCommunicationsProgram456
|
twitter4j/bin/tweets/getRetweets.sh
|
Shell
|
gpl-2.0
| 171 |
#! /bin/bash
# Update git submodules
git submodule init
git submodule update
# Set up the android environment
source tools/android/setup.sh
function run_tests() {
./run-tests.sh \
-b Remote \
--remote-executor http://localhost:9515 \
--remote-caps="chromeOptions=androidPackage=$CHROME_APP" \
--load-list load-list.txt \
--verbose || exit 1
}
# We split the test runs into two groups to avoid running out of memory in Travis.
echo "^[a].*" > load-list.txt
run_tests
echo "^[^a].*" > load-list.txt
run_tests
echo "Run $ANDROID_DIR/stop.sh if finished."
|
xasos/Cordova-Polymer-Seed
|
www/bower_components/web-animations-js/run-tests-android.sh
|
Shell
|
mit
| 579 |
#!/bin/bash
cd /vagrant
composer.phar install
if [ -f database.sql.gz ]; then
zcat database.sql.gz | mysql -uroot -pvagrant readingcorner
else
app/console doctrine:schema:create
app/console doctrine:fixtures:load -n
fi
YEAR=`date +'%Y'`
MOD=$(( $YEAR % 2 ))
if [ $MOD -eq 0 ]; then
let YEAR=$YEAR-1
fi
app/console readingcorner:calendar ${YEAR}
|
ReadingCorner/ReadingCorner
|
shell_provisioner/module/readingcorner.sh
|
Shell
|
isc
| 366 |
#!/bin/sh
# arangodb connection string shell variables default used if not set
# server-name ar-server
# username ARUSR default aruser
# user passwd ARPWD postgres password if stop user password not in ~/.arpass file
# database name ARDBN default testdb
# database passwd ARPASSWORD default 'pleasechangeme'
#ARUSR=raven
# get passwords and set default values for shell variables
. ./ar-env.sh
USR=${1:-${ARUSR}}
if [ "x"${USR} = "x"${ARUSR} ]; then
echo "User is administrator ${ARUSR}"
exit 1
fi
# Stop unless ~/.aqlpass file permissions are 0600
if [ "x"$(stat -c %a ${HOME}/.aqlpass) != "x600" ]
then
echo "Set permission on ~/.aqlpass to 0600 or nosuch file"
exit 1
fi
PWD=$(jq -r '.'${USR}' | select(. != null)' ${HOME}/.aqlpass)
if [ "x"${PWD} = "x" ]
then
echo "No password set in the .aqlpass file for user ${USR}"
exit 1
fi
# drop user
node <<@@EOF1
request = require('request');
function deleteuser() {
request.delete('http://root:${ARPASSWORD}@${ARSVR}:8529/_api/user/${USR}')
}
deleteuser();
@@EOF1
|
guidoeco/docker
|
arangodb/dropuser.sh
|
Shell
|
mit
| 1,064 |
#!/bin/bash
#
# simple build all and test testclient against inmemory ES script
set -eu
set -o pipefail
cargo test --quiet --all
cd testclient
bash ./test_with_inmemory_es.bash
|
koivunej/eventstore-tcp
|
smoke_test.bash
|
Shell
|
mit
| 180 |
#!/bin/bash
if [ -z "$4" ]
then
echo "No folder supplied!"
echo "Usage: bash `basename "$0"` imagenet_folder imagenet_annotations_folder alov_videos_folder alov_annotations_folder"
exit
fi
GPU_ID=0
FOLDER=GOTURN1
RANDOM_SEED=800
echo FOLDER: $FOLDER
VIDEOS_FOLDER_IMAGENET=$1
ANNOTATIONS_FOLDER_IMAGENET=$2
VIDEOS_FOLDER=$3
ANNOTATIONS_FOLDER=$4
SOLVER=nets/solver.prototxt
TRAIN_PROTO=nets/tracker.prototxt
CAFFE_MODEL=nets/models/weights_init/tracker_init.caffemodel
BASEDIR=nets
RESULT_DIR=$BASEDIR/results/$FOLDER
SOLVERSTATE_DIR=$BASEDIR/solverstate/$FOLDER
#Make folders to store results and snapshots
mkdir -p $RESULT_DIR
mkdir -p $SOLVERSTATE_DIR
#Modify solver to save snapshot in SOLVERSTATE_DIR
mkdir -p nets/solver_temp
SOLVER_TEMP=nets/solver_temp/solver_temp_$FOLDER.prototxt
sed s#SOLVERSTATE_DIR#$SOLVERSTATE_DIR# <$SOLVER >$SOLVER_TEMP
sed -i s#TRAIN_FILE#$TRAIN_PROTO# $SOLVER_TEMP
sed -i s#DEVICE_ID#$GPU_ID# $SOLVER_TEMP
sed -i s#RANDOM_SEED#$RANDOM_SEED# $SOLVER_TEMP
LAMBDA_SHIFT=5
LAMBDA_SCALE=15
MIN_SCALE=-0.4
MAX_SCALE=0.4
echo LAMBDA_SCALE: $LAMBDA_SCALE
echo LAMBDA_SHIFT: $LAMBDA_SHIFT
build/train $VIDEOS_FOLDER_IMAGENET $ANNOTATIONS_FOLDER_IMAGENET $VIDEOS_FOLDER $ANNOTATIONS_FOLDER $CAFFE_MODEL $TRAIN_PROTO $SOLVER_TEMP $LAMBDA_SHIFT $LAMBDA_SCALE $MIN_SCALE $MAX_SCALE $GPU_ID $RANDOM_SEED 2> $RESULT_DIR/results.txt
|
shuochen99/goturn
|
scripts/train.sh
|
Shell
|
mit
| 1,376 |
export VOLTA_HOME="$HOME/.volta"
[ -s "$VOLTA_HOME/load.sh" ] && . "$VOLTA_HOME/load.sh"
export PATH="$VOLTA_HOME/bin:$PATH"
|
sivakumar-kailasam/dotfiles
|
node/path.zsh
|
Shell
|
mit
| 126 |
#!/bin/bash
exec /usr/sbin/squid3 -N -d 0 -f /etc/squid/squid.conf
|
CosmicQ/docker-squid
|
start_squid.sh
|
Shell
|
mit
| 68 |
#! /bin/bash
echo "This is a placeholder script"
|
MarkEWaite/JENKINS-22457-included-region-ignored
|
build/build-repo.sh
|
Shell
|
mit
| 50 |
#!/bin/bash
echo '<html>
<head>
<title>pouzivatelia</title>
</head>
<body>
<table border="1">
<tr>
<td><h2>login</h2></td><td><h2>passwd</h2></td><td><h2>uid</h2></td><td><h2>gid</h2></td><td><h2>meno</h2></td><td><h2>home</h2></td><td><h2>shell</h2></td>
</tr>'
USERS=$(cat /etc/passwd)
IFS=$'\n'
for r in ${USERS}
do
echo '<tr>'
IFS=':'
for i in ${r}
do
echo '<td>'${i}'</td>'
done
echo '</tr>'
done
echo '</table>
</body>
</html>'
exit
|
spacive/frihttp
|
www/bash/users.bash
|
Shell
|
mit
| 449 |
if [ "$(uname -s)" = "Linux" ]; then
alias pbcopy='xsel --clipboard --input'
alias pbpaste='xsel --clipboard --output'
fi
|
jescholl/dotfiles
|
linux/aliases.zsh
|
Shell
|
mit
| 126 |
#!/bin/sh
echo "Fail, stdout"
echo "Fail, stderr" 1>&2
exit 1
|
github/octocatalog-diff
|
spec/octocatalog-diff/fixtures/repos/bootstrap/config/broken-bootstrap.sh
|
Shell
|
mit
| 63 |
#!/bin/bash -e
export COMPONENT="db"
export API_IMAGE="$PRIVATE_IMAGE_REGISTRY/api:$RELEASE"
export LOGS_FILE="$RUNTIME_DIR/logs/$COMPONENT.log"
## Write logs of this script to component specific file
exec &> >(tee -a "$LOGS_FILE")
__validate_api_envs() {
__process_msg "Initializing api environment variables"
__process_msg "CONFIG_DIR: $CONFIG_DIR"
__process_msg "RELEASE: $RELEASE"
__process_msg "API_IMAGE: $API_IMAGE"
__process_msg "DBNAME: $DBNAME"
__process_msg "DBUSERNAME: $DBUSERNAME"
__process_msg "DBPASSWORD: $DBPASSWORD"
__process_msg "DBHOST: $DBHOST"
__process_msg "DBPORT: $DBPORT"
__process_msg "DBDIALECT: $DBDIALECT"
__process_msg "LOGS_FILE: $LOGS_FILE"
if [ "$ACCESS_KEY" == "" ]; then
__process_error "Access key not present, exiting"
exit 1
else
__process_msg "ACCESS_KEY: $ACCESS_KEY"
fi
if [ "$SECRET_KEY" == "" ]; then
__process_error "Secret key not present, exiting"
exit 1
else
__process_msg "SECRET_KEY: $SECRET_KEY"
fi
}
__docker_login() {
__process_msg "Updating docker credentials to pull Shippable images"
local credentials_template="$SCRIPTS_DIR/configs/credentials.template"
local credentials_file="/tmp/credentials"
sed "s#{{ACCESS_KEY}}#$ACCESS_KEY#g" $credentials_template > $credentials_file
sed -i "s#{{SECRET_KEY}}#$SECRET_KEY#g" $credentials_file
mkdir -p ~/.aws
mv -v $credentials_file ~/.aws
local docker_login_cmd=$(aws ecr --region us-east-1 get-login)
__process_msg "Docker login generated, logging into ecr"
eval "$docker_login_cmd"
}
__run_api() {
__process_msg "Running api container"
local run_cmd="sudo docker run \
-d \
-e DBNAME=$DBNAME \
-e DBUSERNAME=$DBUSERNAME \
-e DBPASSWORD=$DBPASSWORD \
-e DBHOST=$DBHOST \
-e DBPORT=$DBPORT \
-e DBDIALECT=$DBDIALECT \
--net=host \
--privileged=true \
--name=fakeapi \
$API_IMAGE
"
eval "$run_cmd"
__process_msg "API container started"
}
__check_api() {
__process_msg "Checking API container status"
local interval=3
local timeout=180
local counter=0
local is_booted=false
while [ $is_booted != true ] && [ $counter -lt $timeout ]; do
local running_api_container=$(sudo docker ps | \
grep fakeapi | awk '{print $1}')
if [ "$running_api_container" != "" ]; then
__process_msg "Waiting fifteen seconds before stopping API container"
is_booted=true
sleep 15
# Check if it's still running
local api_container=$(sudo docker ps | grep fakeapi | awk '{print $1}')
if [ "$api_container" != "" ]; then
__process_msg "Stopping API container"
sudo docker stop -t=0 $api_container
fi
sudo docker rm $running_api_container
else
local exited_api_container=$(sudo docker ps -a --filter status=exited | \
grep fakeapi | awk '{print $1}')
if [ "$exited_api_container" != "" ]; then
__process_msg "Removing API container"
sudo docker rm $exited_api_container
is_booted=true
else
let "counter = $counter + $interval"
sleep $interval
fi
fi
done
if [ $is_booted = false ]; then
__process_error "Failed to boot api container"
exit 1
fi
}
main() {
__process_marker "Booting fake api to generate models"
__validate_api_envs
__docker_login
__run_api
__check_api
__process_msg "Started API container successfully"
}
main
|
stephanielingwood/admiral
|
common/scripts/docker/startFakeAPI.sh
|
Shell
|
mit
| 3,445 |
#!/bin/sh
# Compile example.ui for PyQt.
pyuic5 --from-imports example.ui > example_pyqt5_ui.py
|
mstuttgart/qdarkgray-stylesheet
|
example/ui/compile_ui.sh
|
Shell
|
mit
| 97 |
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Ion protocol traffic to this rate
LIMIT="160kbit"
#defines the IPv4 address space for which you wish to disable rate limiting
LOCALNET_V4="192.168.0.0/16"
#defines the IPv6 address space for which you wish to disable rate limiting
LOCALNET_V6="fe80::/10"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
if [ ! -z "${LOCALNET_V6}" ] ; then
# v6 cannot have the same priority value as v4
tc filter add dev ${IF} parent 1: protocol ipv6 prio 3 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ipv6 prio 4 handle 2 fw classid 1:11
fi
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 12700. but not when dealing with a host on the local network
# (defined by $LOCALNET_V4 and $LOCALNET_V6)
# --set-mark marks packages matching these criteria with the number "2" (v4)
# --set-mark marks packages matching these criteria with the number "4" (v6)
# these packets are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 12700 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 12700 ! -d ${LOCALNET_V4} -j MARK --set-mark 0x2
if [ ! -z "${LOCALNET_V6}" ] ; then
ip6tables -t mangle -A OUTPUT -p tcp -m tcp --dport 12700 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4
ip6tables -t mangle -A OUTPUT -p tcp -m tcp --sport 12700 ! -d ${LOCALNET_V6} -j MARK --set-mark 0x4
fi
|
aspaas/ion
|
contrib/qos/tc.sh
|
Shell
|
mit
| 2,565 |
#!/bin/sh
# This script cross-compiles binaries for various platforms
# Download our release binary builder
go get -u github.com/mitchellh/gox
# Specify platforms and release version
PLATFORMS="linux/amd64 linux/386 darwin/386 windows/amd64 windows/386"
RELEASE=$(git describe --tags)
echo "Building release $RELEASE"
# Build Inertia Go binaries for specified platforms
gox -output="cumulus.$(git describe --tags).{{.OS}}.{{.Arch}}" \
-osarch="$PLATFORMS" \
|
ubclaunchpad/cumulus
|
.scripts/release.sh
|
Shell
|
mit
| 465 |
#!/bin/bash
info () {
printf "\r [ \033[00;34m>>\033[0m ] $1\n"
}
success () {
printf "\r\033[2K [ \033[00;32mOK\033[0m ] $1\n"
}
fail () {
printf "\r\033[2K [\033[0;31mFAIL\033[0m] $1\n"
echo ''
exit
}
|
davidochoa/dotfiles
|
setup/_func-print.sh
|
Shell
|
mit
| 219 |
#!/usr/bin/env bash
autoreconf -fi --warning=no-portability
|
openvenues/jpostal
|
bootstrap.sh
|
Shell
|
mit
| 59 |
./mak.sh && ./server
|
jonaslu/thatswhatsup
|
c/wayland/compositor/run.sh
|
Shell
|
mit
| 21 |
#!/bin/bash
# Automatically run Ruby scripts with "bundle exec" (but only when appropriate).
# http://effectif.com/ruby/automating-bundle-exec
# Github: https://github.com/gma/bundler-exec
## Functions
bundler-installed()
{
which bundle > /dev/null 2>&1
}
within-bundled-project()
{
local dir="$(pwd)"
while [ "$(dirname $dir)" != "/" ]; do
[ -f "$dir/Gemfile" ] && return
dir="$(dirname $dir)"
done
false
}
run-with-bundler()
{
if bundler-installed && within-bundled-project; then
if [ $1 == "ruby" ]; then
ruby -rbundler/setup "$@"
else
bundle exec "$@"
fi
else
"$@"
fi
}
## Main program
BUNDLED_COMMANDS="${BUNDLED_COMMANDS:-
cap
capify
cucumber
foreman
guard
haml
heroku
html2haml
jasmine
rackup
rails
rake
rake2thor
rspec
sass
sass-convert
serve
shotgun
spec
spork
thin
thor
tilt
tt
turn
unicorn
unicorn_rails
}"
for CMD in $BUNDLED_COMMANDS; do
if [[ $CMD != "bundle" && $CMD != "gem" ]]; then
alias $CMD="run-with-bundler $CMD"
fi
done
|
tomichj/dotfiles
|
ruby/bundler-exec.bash
|
Shell
|
mit
| 1,071 |
#!/usr/bin/env bash
# add 'x' for command tracing
set -eu
#-------------------------------------------------------------------------------
#
# Utilities
#
# For builds not triggered by a pull request TRAVIS_BRANCH is the name of the
# branch currently being built; whereas for builds triggered by a pull request
# it is the name of the branch targeted by the pull request (in many cases this
# will be master).
MAIN_BRANCH="0"
if [[ $TRAVIS_BRANCH == "master" || $TRAVIS_BRANCH == "develop" ]]; then
MAIN_BRANCH="1"
fi
if [[ "${BEAST_RETRY}" == "true" ]]; then
JOBS=1
elif [[ "${TRAVIS}" == "true" ]]; then
JOBS="2"
elif [[ $(uname -s) == "Linux" ]]; then
# Physical cores
JOBS=$(lscpu -p | grep -v '^#' | sort -u -t, -k 2,4 | wc -l)
elif [[ $(uname) == "Darwin" ]]; then
# Physical cores
JOBS=$(sysctl -n hw.physicalcpu)
else
JOBS=1
fi
# run with a debugger
function debug_run ()
{
if [[ $TRAVIS_OS_NAME == "osx" ]]; then
# -o runs after loading the binary
# -k runs after any crash
# We use a ghetto appromixation of --return-child-result, exiting with
# 1 on a crash
lldb \
--batch \
-o 'run' \
-k 'thread backtrace all' \
-k 'script import os; os._exit(1)' \
$@
else
gdb \
--silent \
--batch \
--return-child-result \
-ex="set print thread-events off" \
-ex=run \
-ex="thread apply all bt full" \
--args $@
fi
}
function valgrind_run ()
{
valgrind \
--track-origins=yes \
--max-stackframe=16000000 \
--suppressions=$BOOST_ROOT/libs/beast/tools/valgrind.supp \
--error-exitcode=1 \
$@
}
function run_tests_with_debugger ()
{
find "$1" -name "$2" -print0 | while read -d $'\0' f
do
debug_run "$f"
done
}
function run_tests_with_valgrind ()
{
find "$1" -name "$2" -print0 | while read -d $'\0' f
do
valgrind_run "$f"
done
}
function run_tests ()
{
find "$1" -name "$2" -print0 | while read -d $'\0' f
do
"$f"
done
}
#-------------------------------------------------------------------------------
BIN_DIR="$BOOST_ROOT/bin.v2/libs/beast/test"
LIB_DIR="$BOOST_ROOT/libs/beast"
INC_DIR="$BOOST_ROOT/boost/beast"
function build_bjam ()
{
if [[ $VARIANT == "beast_coverage" ]] || \
[[ $VARIANT == "beast_valgrind" ]] || \
[[ $VARIANT == "beast_ubasan" ]]; then
b2 \
define=BOOST_COROUTINES_NO_DEPRECATION_WARNING=1 \
cxxstd=$CXXSTD \
libs/beast/test/beast/core//fat-tests \
libs/beast/test/beast/http//fat-tests \
libs/beast/test/beast/websocket//fat-tests \
libs/beast/test/beast/zlib//fat-tests \
toolset=$TOOLSET \
variant=$VARIANT \
link=static \
-j${JOBS}
elif [[ $VARIANT == "debug" ]]; then
b2 \
define=BOOST_COROUTINES_NO_DEPRECATION_WARNING=1 \
cxxstd=$CXXSTD \
libs/beast/test//fat-tests \
libs/beast/example \
toolset=$TOOLSET \
variant=$VARIANT \
-j${JOBS}
else
b2 \
define=BOOST_COROUTINES_NO_DEPRECATION_WARNING=1 \
cxxstd=$CXXSTD \
libs/beast/test//fat-tests \
toolset=$TOOLSET \
variant=$VARIANT \
-j${JOBS}
fi
}
build_bjam
if [[ $VARIANT == "beast_coverage" ]]; then
# for lcov to work effectively, the paths and includes
# passed to the compiler should not contain "." or "..".
# (this runs in $BOOST_ROOT)
lcov --version
find "$BOOST_ROOT" -name "*.gcda" | xargs rm -f
rm -f "$BOOST_ROOT/*.info"
lcov --no-external -c -i -d "$BOOST_ROOT" -o baseline.info > /dev/null
run_tests "$BIN_DIR" fat-tests
# https://bugs.launchpad.net/ubuntu/+source/lcov/+bug/1163758
lcov --no-external -c -d "$BOOST_ROOT" -o testrun-all.info > /dev/null 2>&1
lcov -a baseline.info -a testrun-all.info -o lcov-diff.info > /dev/null
lcov -e "lcov-diff.info" "$INC_DIR/*" -o lcov.info > /dev/null
lcov --remove "lcov.info" "$INC_DIR/_experimental/*" -o lcov.info > /dev/null
echo "Change working directory for codecov:"
pwd
pushd .
cd libs/beast
~/.local/bin/codecov -X gcov -f ../../lcov.info
popd
find "$BOOST_ROOT" -name "*.gcda" | xargs rm -f
elif [[ $VARIANT == "beast_valgrind" ]]; then
run_tests_with_valgrind "$BIN_DIR" fat-tests
else
#run_tests_with_debugger "$BIN_DIR" fat-tests
run_tests "$BIN_DIR" fat-tests
fi
|
davehorton/drachtio-server
|
deps/boost_1_77_0/libs/beast/tools/build-and-test.sh
|
Shell
|
mit
| 4,316 |
#!/bin/bash
if [ ! -f 1.0.simplify_pindel_callset.sh -o ! -f 1.1.simplify_svseq_callset.sh ]; then
echo "need 1.0.simplify_pindel_callset.sh and 1.1.simplify_svseq_callset.sh"
exit 1
fi
# restore bsub out file
mkdir bsub_out
mv *.out bsub_out/
mkdir list used_sh feature
mkdir -p bam/bam_cfg
mv *.cfg bam/bam_cfg
mv *.bam *.bai *.bas bam/
mkdir -p raw_callset/pindel
mv out.pindel* raw_callset/pindel && cd raw_callset/pindel && . ../../1.0.simplify_pindel_callset.sh && tar czvf res.pindel.tar.gz out* && rm out* && cp res/* ../../ && cd -
mkdir -p raw_callset/svseq
mv out.svseq* raw_callset/svseq && cd raw_callset/svseq && . ../../1.1.simplify_svseq_callset.sh && tar czvf res.svseq.tar.gz out* && rm out* && cp res/* ../../ && cd -
mkdir -p raw_callset/breakdancer
cp out.breakdancer* raw_callset/breakdancer
mkdir -p raw_callset/delly
cp out.delly* raw_callset/delly
|
zz-zigzag/bioinformatics
|
scripts/real/call_analyze/hpc/1.4.mkdir.sh
|
Shell
|
mit
| 883 |
#!/usr/bin/bash
set -o errexit -o noclobber -o noglob -o nounset -o pipefail
IFS=$'\n'
if [[ "${#}" -gt 0 ]]; then
separator='--'
fi
perl -p -e 's/(?:\x1B\x5B|\x9B)[\x30-\x3F]*[\x20-\x2F]*[\x40-\x7E]//g' | \
nvim -R -c 'set noswapfile filetype=man' ${separator:-} "${@:--}"
|
nfnty/vimpager
|
man.sh
|
Shell
|
mit
| 284 |
#!/bin/bash
{ [ -f .build/init.sh ] && . .build/init.sh; } || true;
{ [ -f ../.build/init.sh ] && . ../.build/init.sh; } || true;
buildreport || exit
builddocker_init_ver debian
vers=${1:-debian builder}
builddocker_vers $vers
|
wenerme/dockerfiles
|
debian/build.sh
|
Shell
|
mit
| 228 |
#!/bin/bash
# Based on https://dev.to/michael/compile-a-jekyll-project-without-installing-jekyll-or-ruby-by-using-docker-4184
docker run --rm -it --volume="$PWD:/srv/jekyll" --volume="$PWD/vendor/bundle:/usr/local/bundle" jekyll/jekyll:3.8 jekyll build
|
LordRaydenMK/lordraydenmk.github.io
|
docker-run.sh
|
Shell
|
mit
| 252 |
#!/bin/sh
./makeself.sh package dsf_uitzendinggemist.sh "Uitzending Gemist link created in Favorites" ./install.sh
gcc dsf_pack.c -o dsf_pack
gzip dsf_uitzendinggemist.sh
./dsf_pack -e dsf_uitzendinggemist.sh.gz
mv outfile.dsf "[email protected]"
rm dsf_uitzendinggemist.sh.gz
|
Rodinia/php-UitzendingGemist4DuneHD
|
dsf/make_dsf.sh
|
Shell
|
mit
| 298 |
git clone [email protected]:Burthorpe/runescape-api.git workbench/burthorpe/runescape-api
|
Burthorpe/burthorpe
|
bin/setup-dependencies.sh
|
Shell
|
mit
| 87 |
#!/bin/bash
sudo tee /etc/default/docker > /dev/null <<-EOS
export http_proxy=http://168.219.61.252:8080/
export ftp_proxy=ftp://168.219.61.252:8080/
export https_proxy=http://168.219.61.252:8080/
export no_proxy=qb.sec.samsung.net,165.213.180.100,127.0.0.1,localhost
export SSL_CERT_FILE=/usr/share/ca-certificates/infra.crt
EOS
|
ermaker/inner_infra
|
docker_env.sh
|
Shell
|
mit
| 331 |
echo "Backing up database from db container into /tmp/backup.gz on host machine..."
if [ ! -f /tmp/backup.gz ]; then
docker run --rm -t --link db:db -v /tmp:/tmp datahuborg/postgres \
/bin/bash -c "pg_dumpall --clean --if-exists --host db --username postgres | gzip > /tmp/backup.gz"
else
echo "/tmp/backup.gz already exists. Cancelling backup."
fi
echo "Done."
|
dnsserver/datahub
|
provisions/docker/back-up-database.sh
|
Shell
|
mit
| 378 |
#!/bin/sh
echo "START TEST..."
npm run test-compiled
|
mpneuried/systemhealth
|
dockertests/test.sh
|
Shell
|
mit
| 53 |
#!/usr/bin/env bash
set -e
if [ "$EUID" != 0 ]; then
echo "Please run as root on $0"
exit 1
fi
echo '======================='
echo 'Config for PC on Fedora'
echo '======================='
echo
#################### GRUB ####################
grub_update(){
# Update GRUB2 Bootloader
echo 'Updating GRUB2 Bootloader configuration...'
# Legacy GRUB2 for BIOS may no longer needed for booting
#sudo grub2-mkconfig -o /boot/grub2/grub.cfg
# Modern PCs use UEFI
grub2-mkconfig -o /boot/efi/EFI/fedora/grub.cfg
echo
}
#################### ASUS Notebook ####################
asus_nb_wmi(){
# ASUS Notebook Modules Patch
# Enable certain modules for special keys and functions to work
echo 'Enable kernel modules for Fn keys to work...'
# Maybe no longer needed since kernel knows it is Asus Notebook
#sudo tee /etc/modules-load.d/asus.conf <<< 'asus_nb_wmi' >/dev/null
tee /etc/modprobe.d/asus.conf <<< 'options asus_nb_wmi wapf=4' >/dev/null
echo
}
upower_config(){
# ASUS Battery may not be really good at time estimation
# Use this if default is bad
echo 'Switching to percentage based estimation for battery level...'
sed -i 's/UsePercentageForPolicy=.*/UsePercentageForPolicy=true/' /etc/UPower/UPower.conf
echo
# Hybrid sleep doesn't work well on ASUS Laptop
echo 'Switching to use classic suspend to disk...'
sed -i 's/CriticalPowerAction=.*/CriticalPowerAction=Hibernate/' /etc/UPower/UPower.conf
echo
systemctl restart upower
}
#################### General PC ####################
patch_grub(){
echo 'Patching GRUB2 source file to restore features...'
sed -e 's|^\(GRUB_CMDLINE_LINUX=.*\)\("\)$|\1 zswap.enabled=1 resume='$(swapon -s | awk '/dev/ {print $1}')'"|' -i /etc/default/grub
echo
# Some way of spitting out where is swap
#resume='$(awk '/swap/ {print $1}' /etc/fstab)'
#resume='$(echo "$(swapon -s)" | awk '/dev/ {print $1}')'
grub_update
}
disable_wayland(){
echo 'Disabling Wayland on next reboot...'
sed -i 's/#WaylandEnable=.*$/WaylandEnable=false/' /etc/gdm/custom.conf
echo
}
#################### START OF SCRIPT ####################
patch_grub
#asus_nb_wmi
upower_config
#disable_wayland
#################### END OF SCRIPT ####################
echo 'Done!'
exit
|
truboxl/post-auto
|
fedora-config-0.sh
|
Shell
|
mit
| 2,337 |
#!/bin/bash
# move to the mongo directory, no matter where the script is run from
BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $BINDIR
cd ../
rm -rf ./var/mongodb-data/*
|
jpitts/isomorphic-react-demo-ss2015
|
vendor/mongodb/bin/reset_all_mongodb_data.sh
|
Shell
|
mit
| 190 |
#!/bin/bash
function run() {
local version_new="$1"
local minor_number=$(echo "$version_new" | cut --delimiter="." --fields=2)
local major_number=$(echo "$version_new" | cut --delimiter="." --fields=1)
minor_tag="$major_number"."$minor_number"
if git tag --list | grep -xq "$minor_tag"
then
echo "Recreating minor tag $minor_tag"
git tag --delete "$minor_tag"
else
echo "Creating minor tag $minor_tag"
fi
git tag "$minor_tag"
if git tag --list | grep -xq "$major_number"
then
echo "Recreating major tag $major_number"
git tag --delete "$major_number"
else
echo "Creating major tag ""$major_number"
fi
git tag "$major_number"
echo
return 0
}
case "${1}" in
--about)
echo -n "Create or recreate a minor and major tag on each version bump."
;;
*)
run "$@"
;;
esac
|
markchalloner/git-semver
|
plugins/90-major_and_minor_tag.sh
|
Shell
|
mit
| 922 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-31-1
#
# Security announcement date: 2014-08-07 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:47 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - reportbug:4.12.6+deb6u1
#
# Last versions recommanded by security team:
# - reportbug:4.12.6+deb6u1
#
# CVE List:
# - CVE-2014-0479
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade reportbug=4.12.6+deb6u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2014/DLA-31-1.sh
|
Shell
|
mit
| 616 |
# !/bin/bash
function get_upstream_branch {
local SUBMODULE_NAME=$0
local MEDIA_SERVICE_NAME="openslides-media-service"
# We have to treat the media-service differently to the other services
# until its "main" branch is neither master nor main
if [ "$SUBMODULE_NAME" == "$MEDIA_SERVICE_NAME" ]; then
echo "openslides4-dev"
return
fi;
local BRANCH_NAME=master
local exists=`git show-ref refs/heads/$BRANCH_NAME`
if [[ -z $exists ]]; then
BRANCH_NAME=main
fi;
echo "$BRANCH_NAME"
}
function get_upstream_name {
git ls-remote --exit-code upstream &>/dev/null || {
echo "origin"
return
}
echo "upstream"
}
function pull_latest_commit {
local SUBMODULE_NAME=$0
echo ""
echo "$SUBMODULE_NAME"
local BRANCH_NAME=$(get_upstream_branch)
local REMOTE_NAME=$(get_upstream_name)
echo "git fetch $REMOTE_NAME && git checkout $REMOTE_NAME/$BRANCH_NAME ..."
git fetch $REMOTE_NAME;
git checkout $REMOTE_NAME/$BRANCH_NAME;
}
export -f pull_latest_commit
export -f get_upstream_branch
export -f get_upstream_name
git submodule foreach -q --recursive "bash -c pull_latest_commit \$name"
echo ""
echo "Successfully updated all submodules to latest commit."
# Old command, if we need to checkout another branch than master or main:
# git submodule foreach -q --recursive
# '
# git checkout $(git config -f $$toplevel/.gitmodules submodule.$$name.branch || echo master);
# git pull upstream $$(git config -f $$toplevel/.gitmodules submodule.$$name.branch || echo master)
# '
|
tsiegleauq/OpenSlides
|
services-to-master.sh
|
Shell
|
mit
| 1,564 |
#!/bin/sh
cd ..
cd results
case $9 in
0|4|1|5 )
for iter in `seq 1 $8`; do
for nodes in `seq 1 $3`; do
rm size=${nodes}_idle=$5_speeds=$4_expe=$9_iter=${iter}.dat
rm pbm_size=${nodes}_idle=$5_speeds=$4_expe=$9_iter=${iter}_general.lp
rm result_${nodes}_${5}_$4_${9}_${iter}.temp
done
done
rm *.log
;;
2|6 )
for iter in `seq 1 $8`; do
r=$(echo "$5/20" | bc)
for static in `seq 0 $r $5`; do
rm size=${3}_idle=${static}_expe=$9_iter=${iter}.dat
rm pbm_size=${3}_idle=${static}_expe=$9_iter=${iter}_general.lp
rm result_${3}_${static}_$4_${9}_${iter}.temp
done
done
rm *.log
;;
3|7 )
for iter in `seq 1 $8`; do
rm size=${3}_idle=$5_speeds=$4_expe=$9_iter=${iter}.dat
rm pbm_size=${3}_idle=$5_speeds=$4_expe=$9_iter=${iter}_general.lp
rm result_${3}_${5}_$4_${9}_${iter}.temp
done
rm *.log
;;
8 )
for iter in `seq 1 $8`; do
r=$(echo "$3/20" | bc)
for nodes in `seq $r $r $3`; do
rm size=${nodes}_idle=$5_speeds=$4_expe=$9_iter=${iter}.dat
rm pbm_size=${nodes}_idle=$5_speeds=$4_expe=$9_iter=${iter}_general.lp
rm result_${nodes}_${5}_${9}_${iter}.temp
done
done
;;
* )
echo "You have tried an expe_number that is not yet implemented."
;;
esac
|
Gaupy/replica
|
scripts/clean.sh
|
Shell
|
mit
| 1,210 |
#
# deta
#
# Copyright (c) 2011 David Persson
#
# Distributed under the terms of the MIT License.
# Redistributions of files must retain the above copyright notice.
#
msginfo "Module %s loaded." "transfer"
# @FUNCTION: download
# @USAGE: <URL> <target>
# @DESCRIPTION:
# Downloads from various sources. Implements "svn export"-like functionality
# for GIT. Automatically dearchives downloaded archives. The source URL may
# point to an archive, a repository or a single file.
download() {
msg "Downloading %s." $1
case $1 in
# Partially GitHub specific
*".zip"* | *"/zipball/"*)
tmp=$(mktemp -d -t deta.XXX)
defer rm -rf $tmp
curl -# -f -L $1 --O $tmp/download.zip
unzip $tmp/download -d $2
;;
# Partially GitHub specific
*".tar.gz"* | *"/tarball/"*)
curl -s -S -f -L $1 | tar vxz -C $2
;;
"git"* | *".git")
git clone --no-hardlinks --progress --depth 1 $1 $2
;;
"svn://"* | *"/svn/"* | *".svn."*)
svn export $1 $2
;;
# Must come after filetype-specific download strategies.
"http://"* | "https://"*)
curl -# -f -L $1 --O $2
;;
*"://"*)
curl -# -f -L $1 --O $2
;;
*)
if [[ -f $1 ]]; then
cp -v $1 $2
elif [[ -d $1/.git ]]; then
git clone --no-hardlinks --progress $1 $2
fi
;;
esac
}
# @FUNCTION: sync
# @USAGE: <source> <target> <ignore>
# @DESCRIPTION:
# Will rsync all directories and files from <source> to <target>. Thus files
# which have been removed in <source> will also be removed from <target>.
# Specify a whitespace separated list of patterns to ignore. Files matching the
# patterns won't be transferred from <source> to <target>. This function has
# DRYRUN support. Symlinks are copied as symlinks.
sync() {
if [[ $DRYRUN != "n" ]]; then
msgdry "Pretending syncing from %s to target %s." $1 $2
else
msg "Syncing from %s to target %s." $1 $2
fi
rsync --stats -h -z -p -r --delete \
$(_rsync_ignore "$3") \
--links \
--times \
--verbose \
--itemize-changes \
$(_rsync_dryrun) \
$1 $2
}
# @FUNCTION: sync_sanity
# @USAGE: <source> <target> <ignore>
# @DESCRIPTION:
# Performs sanity checks on a sync from <source> to <target>. Will ask for
# confirmation if and return 1 thus aborting the script when the errexit option
# is set. Best used right before the actual sync call. See the sync function
# for more information on behavior and arguments.
sync_sanity() {
local backup=$DRYRUN
DRYRUN="y"
msgdry "Running sync sanity check."
local out=$(sync $1 $2 "$3" 2>&1)
DRYRUN=$backup
set +o errexit # grep may not match anything at all.
echo "To be changed on target:"
echo "$out" | grep -E '^<[a-z]+.*[a-z\?].*'
echo
echo "To be deleted on target:"
echo "$out" | grep deleting
echo
echo "To be created on target:"
echo "$out" | grep '^c'
echo
set -o errexit
read -p "Looks good? (y/N) " continue
if [[ $continue != "y" ]]; then
return 1
fi
}
# @FUNCTION: _rsync_ignore
# @USAGE: <ignore>
# @DESCRIPTION:
# Takes a list of ignores and creates an argument to be passed to rsync.
_rsync_ignore() {
local tmp=$(mktemp -t deta.XXX)
for excluded in $1; do
echo $excluded >> $tmp
done
# defer rm $tmp
echo "--exclude-from=$tmp"
}
# @FUNCTION: _rsync_dryrun
# @DESCRIPTION:
# Creates the dryrun argument to be passed to rsync. This function
# has DRYRUN support.
_rsync_dryrun() {
if [[ $DRYRUN != "n" ]]; then
echo "--dry-run"
fi
}
|
davidpersson/deta
|
transfer.sh
|
Shell
|
mit
| 3,389 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3473-1
#
# Security announcement date: 2016-02-11 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:49 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - nginx:1.2.1-2.2+wheezy4
#
# Last versions recommanded by security team:
# - nginx:1.2.1-2.2+wheezy3
#
# CVE List:
# - CVE-2016-0742
# - CVE-2016-0746
# - CVE-2016-0747
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade nginx=1.2.1-2.2+wheezy3 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2016/DSA-3473-1.sh
|
Shell
|
mit
| 657 |
#!/bin/bash
mkdir ./raw_data/bio/yeast/
wget http://vlado.fmf.uni-lj.si/pub/networks/data/bio/Yeast/yeast.zip && gzip -d yeast.zip && mv yeast.zip.* raw_data/bio/yeast/
echo "yeast!"
|
bt3gl/NetAna-Complex-Network-Analysis
|
scripts_to_extract_data/17-getdata_yeast.sh
|
Shell
|
mit
| 186 |
#!/bin/bash -e
for extension in "$@"; do
pecl install "${extension}"
echo "extension=${extension}.so" > "${CONF_PHPMODS}"/"${extension}".ini
done
|
bryanlatten/docker-php
|
scripts/pecl-install.sh
|
Shell
|
mit
| 152 |
#!/bin/bash
echo "> Running server tests..."
go test -v ./server/pkg/...
|
darwinfroese/hacksite
|
scripts/run-server-tests.sh
|
Shell
|
mit
| 75 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3131-1
#
# Security announcement date: 2016-11-21 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:44 UTC
#
# Operating System: Ubuntu 16.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libmagick++-6.q16-5v5:8:6.8.9.9-7ubuntu5.2
# - imagemagick:8:6.8.9.9-7ubuntu5.2
# - libmagickcore-6.q16-2-extra:8:6.8.9.9-7ubuntu5.2
# - imagemagick-6.q16:8:6.8.9.9-7ubuntu5.2
# - libmagickcore-6.q16-2:8:6.8.9.9-7ubuntu5.2
#
# Last versions recommanded by security team:
# - libmagick++-6.q16-5v5:8:6.8.9.9-7ubuntu5.3
# - imagemagick:8:6.8.9.9-7ubuntu5.3
# - libmagickcore-6.q16-2-extra:8:6.8.9.9-7ubuntu5.3
# - imagemagick-6.q16:8:6.8.9.9-7ubuntu5.3
# - libmagickcore-6.q16-2:8:6.8.9.9-7ubuntu5.3
#
# CVE List:
# - CVE-2014-8354
# - CVE-2014-8355
# - CVE-2014-8562
# - CVE-2014-8716
# - CVE-2014-9805
# - CVE-2014-9806
# - CVE-2014-9807
# - CVE-2014-9808
# - CVE-2014-9809
# - CVE-2014-9810
# - CVE-2014-9811
# - CVE-2014-9812
# - CVE-2014-9813
# - CVE-2014-9814
# - CVE-2014-9815
# - CVE-2014-9816
# - CVE-2014-9817
# - CVE-2014-9818
# - CVE-2014-9819
# - CVE-2014-9820
# - CVE-2014-9821
# - CVE-2014-9822
# - CVE-2014-9823
# - CVE-2014-9826
# - CVE-2014-9828
# - CVE-2014-9829
# - CVE-2014-9830
# - CVE-2014-9831
# - CVE-2014-9833
# - CVE-2014-9834
# - CVE-2014-9835
# - CVE-2014-9836
# - CVE-2014-9837
# - CVE-2014-9838
# - CVE-2014-9839
# - CVE-2014-9840
# - CVE-2014-9841
# - CVE-2014-9843
# - CVE-2014-9844
# - CVE-2014-9845
# - CVE-2014-9846
# - CVE-2014-9847
# - CVE-2014-9848
# - CVE-2014-9849
# - CVE-2014-9850
# - CVE-2014-9851
# - CVE-2014-9853
# - CVE-2014-9854
# - CVE-2014-9907
# - CVE-2015-8894
# - CVE-2015-8895
# - CVE-2015-8896
# - CVE-2015-8897
# - CVE-2015-8898
# - CVE-2015-8900
# - CVE-2015-8901
# - CVE-2015-8902
# - CVE-2015-8903
# - CVE-2015-8957
# - CVE-2015-8958
# - CVE-2015-8959
# - CVE-2016-4562
# - CVE-2016-4563
# - CVE-2016-4564
# - CVE-2016-5010
# - CVE-2016-5687
# - CVE-2016-5688
# - CVE-2016-5689
# - CVE-2016-5690
# - CVE-2016-5691
# - CVE-2016-5841
# - CVE-2016-5842
# - CVE-2016-6491
# - CVE-2016-6823
# - CVE-2016-7101
# - CVE-2016-7513
# - CVE-2016-7514
# - CVE-2016-7515
# - CVE-2016-7516
# - CVE-2016-7517
# - CVE-2016-7518
# - CVE-2016-7519
# - CVE-2016-7520
# - CVE-2016-7521
# - CVE-2016-7522
# - CVE-2016-7523
# - CVE-2016-7524
# - CVE-2016-7525
# - CVE-2016-7526
# - CVE-2016-7527
# - CVE-2016-7528
# - CVE-2016-7529
# - CVE-2016-7530
# - CVE-2016-7531
# - CVE-2016-7532
# - CVE-2016-7533
# - CVE-2016-7534
# - CVE-2016-7535
# - CVE-2016-7536
# - CVE-2016-7537
# - CVE-2016-7538
# - CVE-2016-7539
# - CVE-2016-7540
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libmagick++-6.q16-5v5=8:6.8.9.9-7ubuntu5.3 -y
sudo apt-get install --only-upgrade imagemagick=8:6.8.9.9-7ubuntu5.3 -y
sudo apt-get install --only-upgrade libmagickcore-6.q16-2-extra=8:6.8.9.9-7ubuntu5.3 -y
sudo apt-get install --only-upgrade imagemagick-6.q16=8:6.8.9.9-7ubuntu5.3 -y
sudo apt-get install --only-upgrade libmagickcore-6.q16-2=8:6.8.9.9-7ubuntu5.3 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_16.04_LTS/i386/2016/USN-3131-1.sh
|
Shell
|
mit
| 3,407 |
#!/bin/bash
set -xe
sudo add-apt-repository -y ppa:pipelight/stable
sudo apt-get update
sudo apt-get install --install-recommends wine-staging wine-staging-compat xvfb
wget https://github.com/fpco/minghc/releases/download/2015-12-04/minghc-7.10.2-i386.exe
wine minghc-7.10.2-i386.exe /S | grep -v Extracting
wine cabal --version
wine cabal update
wget http://sourceforge.net/projects/gnuwin32/files/pcre/7.0/pcre-7.0.exe/download -O pcre-7.0.exe
xvfb-run -a wine ./pcre-7.0.exe /VERYSILENT
test -d ~/".wine/drive_c/Program Files (x86)/GnuWin32/include"
wine cabal install --only-dependencies --enable-tests --enable-benchmarks --extra-include-dirs='C:\Program Files (x86)\GnuWin32\include' --extra-lib-dirs='C:\Program Files (x86)\GnuWin32\lib' --constraint 'pcre-light < 0.4.0.4'
# pcre-light 0.4.0.4 requires pkg-config
|
colinba/tip-toi-reveng
|
.travis-setup-windows.sh
|
Shell
|
mit
| 823 |
rm simplescan-meego
/usr/lib64/madde/linux-x86_64/targets/meego-core-ia32-1.2.0/bin/gcc -o simplescan-meego simplescan.c -I/usr/include/bluetooth -L/usr/lib64 -lbluetooth
|
bearlin/study_bluetooth_socket
|
bluetooth/02_cross_compiler/01_simplescan/build-meego.sh
|
Shell
|
mit
| 171 |
# thanks to https://gist.github.com/domenic/ec8b0fc8ab45f39403dd
#!/bin/sh
set -e
git config --global user.name "Travis CI"
git config --global user.email "[email protected]"
python scraper.py
# don't continue if no changes
if git diff-index --quiet HEAD; then
exit 0
fi
git commit -m '[Auto] updated json files [ci skip]' out/*.json || echo "no changes"
git push "https://${GH_TOKEN}@github.com/fossasia/open-event-scraper" HEAD:master
git clone --depth=1 "https://${GH_TOKEN}@github.com/fossasia/2016.fossasia.org" fa16-repo
node schedule/generator > fa16-repo/schedule/index.html
cd fa16-repo
git commit -m '[Auto] updated schedule' schedule/index.html || echo "no changes"
git push origin gh-pages
exit 0
|
fossasia/open-event-scraper
|
build.sh
|
Shell
|
mit
| 730 |
#!/bin/bash
#
#SBATCH --job-name=vectorSum
#SBATCH --output=res_mpi_vec_sum.out
#SBATCH --ntasks=3
#SBATCH --nodes=3
#SBATCH --time=20:00
#SBATCH --mem-per-cpu=100
#SBATCH --gres=gpu:1
mpirun mpi_vec_sum
|
leiverandres/HPC_assignments
|
openmpi/vec_sum/mpi_vec_sum.sh
|
Shell
|
mit
| 205 |
#!/usr/bin/env bash
npm start
|
frouyer193/GuitarEffects_dev
|
run.sh
|
Shell
|
mit
| 30 |
#!/bin/bash
# wait for mysql to be ready
nc -z db 3306
n=$?
while [ $n -ne 0 ]; do
sleep 1
nc -z db 3306
n=$?
done
python manage.py runserver 0.0.0.0:8000
|
mujinyun2009/shakespeare-census
|
runserver.sh
|
Shell
|
mit
| 168 |
#!/usr/bin/env bash
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y build-essential
sudo apt-get install -y git tig htop ranger tmux
# Sub-systems
sh ./build_ds9.sh
sh ./build_shell.sh
sh ./build_vim.sh
sh ./build_python.sh
|
autocorr/transient
|
provision/main.sh
|
Shell
|
mit
| 248 |
#!/bin/sh
CURDIR=$(dirname $0)
SOURCE_DIR=$1; shift
DEST_DIR=$1; shift
add_to_sources() {
URL=$1; shift
NAME=$1; shift
HASH=$1; shift
(
git clone "$URL" "$DEST_DIR/$NAME"
cd "$DEST_DIR/$NAME"
git checkout -qf "$HASH"
rm -Rf .git
)
}
(
cd "$SOURCE_DIR"
mkdir -p "$DEST_DIR"
"$CURDIR/git-archive-all.sh" --format tar -- - | tar -x -C "$DEST_DIR"
add_to_sources https://github.com/Snaipe/libcsptr dependencies/libcsptr 0d52904
add_to_sources https://github.com/Snaipe/dyncall dependencies/dyncall 51e79a8
add_to_sources https://github.com/nanomsg/nanomsg dependencies/nanomsg 7e12a20
add_to_sources https://github.com/diacritic/BoxFort dependencies/boxfort 7ed0cf2
)
|
am11/Criterion
|
.cmake/copy-source.sh
|
Shell
|
mit
| 743 |
#!/bin/bash -eux
# Set empty password for mysql root
export DEBIAN_FRONTEND=noninteractive
echo mysql-server-5.5 mysql-server/root_password password '' | debconf-set-selections
echo mysql-server-5.5 mysql-server/root_password_again password '' | debconf-set-selections
# Install packages
apt-get install -qq -y \
mysql-server-5.5 \
mysql-client-5.5 \
libmysqlclient18 \
libmysqlclient-dev
|
chulkilee/jenkins-vagrant
|
scripts/mysql.sh
|
Shell
|
mit
| 407 |
#!/bin/bash
python RunSimulation.py --Geo 50.0 --sim_num 46
|
xji3/IGCCodonSimulation
|
ShFiles/YDR418W_YEL054C_IGCgeo_50.0_sim_46.sh
|
Shell
|
mit
| 60 |
#!/usr/bin/env bash
PY_GOMODULE="msched.go"
mpiexec --ompi-server file:$(pwd)/memory/urifile python -m "${PY_GOMODULE}" "$@"
|
ctogle/msched
|
mclient.sh
|
Shell
|
mit
| 127 |
#!/usr/bin/env bash
set -euo pipefail
# Switch to parent directory of location of script
cd "$(dirname "$BASH_SOURCE")/.."
# Load settings
. "bin/settings.sh"
# Clear the offline servers file and make the temporary directory
>"$offline_servers"
tmp_dir="tmp.$$"
mkdir -p "$tmp_dir"
# Query each server in background for concurrent execution
sed -n '/^[^#]/p' "$server_list" | while read -r server; do
# Retrieve and write output; record server as offline if ssh returns nonzero
ssh "${ssh_config[@]}" "$username@$server" \
"$remote_dir/$info_script $server" >"$tmp_dir/$server" 2>/dev/null \
|| echo "$server" >>"$offline_servers" &
done
# Wait for all background jobs to finish
wait
# Generate the new online servers file and remove the temporary directory
cat "$tmp_dir"/* >"$online_servers"
rm -r "$tmp_dir"
|
nkouevda/ucb-eecs-servers
|
bin/main.sh
|
Shell
|
mit
| 834 |
#!/bin/bash
set -ex
IMG=${IMG:-rdev02/docker-openvpn}
temp=$(mktemp -d)
pushd $temp
SERV_IP=$(ip -4 -o addr show scope global | awk '{print $4}' | sed -e 's:/.*::' | head -n1)
docker run --net=none --rm -t -i -v $PWD:/etc/openvpn $IMG ovpn_genconfig -u udp://$SERV_IP
docker run --net=none --rm -t -i -v $PWD:/etc/openvpn -e "EASYRSA_BATCH=1" -e "EASYRSA_REQ_CN=Travis-CI Test CA" rdev02/docker-openvpn ovpn_initpki nopass
docker run --net=none --rm -t -i -v $PWD:/etc/openvpn $IMG ovpn_copy_server_files
popd
# Can't delete the temp directory as docker creates some files as root.
# Just let it die with the test instance.
rm -rf $temp || true
|
rdev02/docker-openvpn
|
tests/paranoid.sh
|
Shell
|
mit
| 655 |
#!/bin/sh
if [ "$1" = "" ]; then
echo "usage: $0 <filename>"
exit 1
fi
FROM=$1
NAME=`basename $FROM`
BASE=`dirname $0`/..
JAR=$BASE/conv/target/symboliclua-conv-0.0.1-SNAPSHOT-jar-with-dependencies.jar
java -cp $JAR net.klazz.symboliclua.conv.Main < $FROM > $BASE/tmp/$NAME
cp $FROM $BASE/tmp/$NAME.bak
cd $BASE/src
lua run.lua ../tmp/$NAME ../tmp/$NAME.bak
|
kohyatoh/symboliclua
|
bin/symboliclua.sh
|
Shell
|
mit
| 365 |
rm -rf wrapper_nn_bn bn_layer_generator_tiramisu bn_layer_generator_tiramisu.o bn_layer_generator_tiramisu.o.h bn_layer_mkldnn_result tiramisu_result.txt mkldnn_result.txt
|
rbaghdadi/tiramisu
|
benchmarks/DNN/layers/bn/clean.sh
|
Shell
|
mit
| 172 |
# vi mode
bindkey -v
bindkey "^F" vi-cmd-mode
bindkey jj vi-cmd-mode
# handy keybindings
bindkey "^A" beginning-of-line
bindkey "^E" end-of-line
bindkey "^R" history-incremental-search-backward
bindkey "^P" history-search-backward
bindkey "^Y" accept-and-hold
bindkey "^N" insert-last-word
bindkey -s "^T" "^[Isudo ^[A" # "t" for "toughguy"
|
albertogg/dotfiles
|
zsh/.zsh/keybindings.zsh
|
Shell
|
mit
| 342 |
#!/bin/sh
#origin https://aur.archlinux.org/libvpx-git.git (fetch)
#origin https://aur.archlinux.org/libvpx-git.git (push)
#origin https://aur.archlinux.org/ffmpeg-git.git (fetch)
#origin https://aur.archlinux.org/ffmpeg-git.git (push)
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/gbencke/git/000.INFRA/ffmpeg-git/pkg/ffmpeg-git/usr/lib:/home/gbencke/git/000.INFRA/libvpx-git/pkg/libvpx-git/usr/lib
nohup postman 2> /dev/null 1> /dev/null &
|
gbencke/dotfiles
|
new.host/arch/i3/scripts/start_postman.sh
|
Shell
|
mit
| 447 |
#!/bin/bash
sudo su
cd /home
wget https://packages.zendframework.com/releases/ZendFramework-1.12.3/ZendFramework-1.12.3.tar.gz
tar -xvzf ZendFramework-1.12.3.tar.gz
echo 'include_path = ".:/usr/share/php:/home/ZendFramework-1.12.3/library"' > /etc/php5/conf.d/includepath.ini
echo 'LoadModule expires_module /usr/lib/apache2/modules/mod_expires.so' > /etc/apache2/mods-available/expires.load
ln -s /etc/apache2/mods-available/expires.load /etc/apache2/mods-enabled/expires.load
service apache2 restart
|
hugofcampos/Phalcon-Facebook-Events-Example
|
shell/install_zf1.sh
|
Shell
|
mit
| 501 |
# Exports
export LESSOPEN="| /usr/local/bin/src-hilite-lesspipe.sh %s"
export LESS='-qRn'
export SVN_EDITOR="vi"
export GOPATH=~/.go
# Variable
HISTSIZE=100000
SAVEHIST=100000
# Set Options
setopt correct
setopt list_packed
setopt hist_ignore_dups
setopt share_history
setopt EXTENDED_HISTORY
# KEY BIND
autoload history-search-end
zle -N history-beginning-search-backward-end history-search-end
zle -N history-beginning-search-forward-end history-search-end
bindkey "^p" history-beginning-search-backward-end
bindkey "^n" history-beginning-search-forward-end
bindkey "^[^H" run-help
bindkey "[^H" backward-kill-word
# Alias
alias svn=colorsvn
alias r=rmtrash
alias brew_cask_alfred_link="brew cask alfred link"
alias rm="rm -i"
alias cp="cp -i"
alias mv="mv -i"
alias pbcopytr="tr -d '\n' | pbcopy"
alias lt="l -tT"
alias diff="colordiff"
alias s3="aws s3"
alias adb_restart_server="adb kill-server && adb start-server"
alias f="open ."
alias ctags="/usr/local/bin/ctags"
alias sed="gsed"
alias git="hub"
alias vi="vim"
alias -g PP=" | peco"
alias -g G=" | grep"
alias -g L=" | less"
alias -g C=" | pbcopy"
alias -g T=" | tr -d '\n'"
# Functions
pass2clip(){
G_PASS=$(pwgen 12 1 -Bsync)
echo -n $G_PASS | pbcopy
echo "generated: $G_PASS"
return
}
dash(){
open "dash://$@"
}
have(){
if [ -e "`which $@`" ];then
return 0
else
return 1
fi
}
dic(){
open dict:///$1
}
# Disable duplicate function in prezto osx module.
#cdf(){
# target=`osascript -e 'tell application "Finder" to if (count of Finder windows) > 0 then get POSIX path of (target of front Finder window as text)'`
# if [ "$target" != "" ]
# then
# cd "$target"
# pwd
# else
# echo 'No Finder window found' >&2
# fi
#}
strlen(){
local tmp
tmp="$@"
echo "${#tmp}"
unset local
}
start_emacs_daemon(){
# Emacs Daemon
/usr/local/bin/emacs --daemon
}
# http://d.hatena.ne.jp/hiboma/20120315/1331821642
pbcopy-buffer(){
print -rn $BUFFER | pbcopy
zle -M "pbcopy: ${BUFFER}"
}
zle -N pbcopy-buffer
bindkey '^x^p' pbcopy-buffer
# Valiable
fpath=(/usr/local/share/zsh/site-functions $fpath)
fpath=(/usr/local/share/zsh-completions $fpath)
#fpath=(/usr/share/zsh/${ZSH_VERSION}/functions $fpath)
# Setting
chpwd(){ ls }
# . `brew --prefix`/etc/profile.d/z.sh
#source /usr/local/share/zsh/site-functions/*
# cdr
autoload -Uz chpwd_recent_dirs cdr add-zsh-hook
add-zsh-hook chpwd chpwd_recent_dirs
zstyle ':chpwd:*' recent-dirs-max 5000
zstyle ':chpwd:*' recent-dirs-default yes
zstyle ':completion:*' recent-dirs-insert both
# zaw-src-cdr
# zstyle ':filter-select' case-insensitive yes # 絞り込みをcase-insensitiveに
# bindkey '^@' zaw-cdr
# zaw-src-history
# bindkey '^r' zaw-history
autoload -Uz compinit
compinit -u
|
kama-meshi/dotFiles
|
.prezto/custom/custom-zshrc.zsh
|
Shell
|
mit
| 2,769 |
#! /bin/bash
cd $SSP_CODE_HOME
for entry in $(git status | grep ".cpp\|.hpp" | grep "modified:\|new file:" | cut -d$'\t' -f2 | cut -d":" -f 2)
do
echo $entry
#./tools/bin/clang-format -i $entry
done
|
Meraz/doremi
|
tools/script/formatCached.sh
|
Shell
|
mit
| 204 |
#!/usr/bin/env bash
echo "Setting up database $SUBTICKET_DB for user $SUBTICKET_DB_USER"
createdb -h "$SUBTICKET_DB_HOSTNAME" -p "$SUBTICKET_DB_PORT" "$SUBTICKET_DB"
psql -h "$SUBTICKET_DB_HOSTNAME" -p "$SUBTICKET_DB_PORT" "$SUBTICKET_DB" <<EOF
create user "$SUBTICKET_DB_USER" password '$SUBTICKET_DB_PASS';
create schema "$SUBTICKET_DB_SCHEMA" authorization "$SUBTICKET_DB_USER";
grant all privileges on schema "$SUBTICKET_DB_SCHEMA" to "$SUBTICKET_DB_USER";
alter user "$SUBTICKET_DB_USER" set search_path to "$SUBTICKET_DB_SCHEMA";
EOF
|
mattbowen/subticket
|
initdb.sh
|
Shell
|
epl-1.0
| 541 |
#!/bin/bash
sudo apt-get install ruby ruby-dev -y
sudo gem install compass
|
uberhacker/pantheon-local-drupal-development
|
compass-install.sh
|
Shell
|
gpl-2.0
| 75 |
#! /bin/bash
# Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 nerdopolis (or n3rdopolis) <[email protected]>
#
# This file is part of LinuxRCD
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
function linuxrcdedit ()
{
export OriginalText=$1
export TargetText=$2
#Change all references to /$OriginalText to /$TargetText in the folder containg the LiveCD system
find ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir" -type f -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/proc/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/sys/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/dev/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/bin/recoverylauncher" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/RCDbin/recoverychrootscript" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/launchers/Apps" | sort -r | while read FILE
do
echo "editing file $FILE"
#replace all instances of $OriginalText with the new folder name only if its not near a-z A-Z or 0-9. Thanks to @ofnuts on Ubuntu Fourms for helping me with the sed expression
sed -re "s/(\W|^)$OriginalText(\W|$)/\1$TargetText\2/g" "$FILE" > "$FILE.tmp"
cat "$FILE.tmp" > "$FILE"
rm "$FILE.tmp"
done
#change all symbolic links that point to $OriginalText to point to $TargetText
find ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir" -type l -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/proc/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/sys/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/dev/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/*" | sort -r |while read FILE
do
echo "relinking $FILE"
newlink=$(readlink $FILE | sed -re "s/(\W|^)$OriginalText(\W|$)/\1$TargetText\2/g")
ln -s -f "$newlink" "$FILE"
done
#find all items contianing $OriginalText in the name
find ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir" -type d -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/proc/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/sys/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/dev/*" -not -path ""$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/*" | sort -r | while read FILEPATH
do
cd "$FILEPATH"
rename -v "s/(\W|^)$OriginalText(\W|$)/\1$TargetText\2/g" * 2> /dev/null
done
}
function RenameFiles()
{
linuxrcdedit usr RCD
linuxrcdedit lib LYB
linuxrcdedit lib64 LYB64
#fix for Xorg, it uses wildcards.
find "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/RCD/LYB/xorg -name "lib*" | while read FILEPATH
do
echo "Renaming $FILEPATH"
rename "s/lib/\1LYB\2/g" "$FILEPATH"
done
#fix for NetworkManager, it uses wildcards.
find "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/RCD/LYB/NetworkManager -name "lib*" | while read FILEPATH
do
echo "Renaming $FILEPATH"
rename "s/lib/\1LYB\2/g" "$FILEPATH"
done
#Do this for X
ln -s -f /var/LYB "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/var/lib
#delete the usr folder in the Live CD
rm -rf "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr
#Do this for OS prober as it works with a normal system with lib. not LYB
sed -i 's/LYB/lib/g' "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/RCD/LYB/os-probes/mounted/90linux-distro
#Do this for the main library interpreter, so that it does not use the target system's ld.so.cache
sed -i 's@/ld.so.cache@/LD.SO.CACHE@g' "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/LYB/ld-linux*
mv "$BUILDLOCATION"/etc/ld.so.cache "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/etc/LD.SO.CACHE
}
echo "PHASE 3"
SCRIPTFILEPATH=$(readlink -f "$0")
SCRIPTFOLDERPATH=$(dirname "$SCRIPTFILEPATH")
unset HOME
if [[ -z "$BUILDARCH" || -z $BUILDLOCATION || $UID != 0 ]]
then
echo "BUILDARCH variable not set, or BUILDLOCATION not set, or not run as root. This external build script should be called by the main build script."
exit
fi
#Ensure that all the mountpoints in the namespace are private, and won't be shared to the main system
mount --make-rprivate /
#Union mount phase2 and phase3
if [[ -d "$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/phase_3 ]]
then
mount -t overlay overlay -o lowerdir="$BUILDLOCATION"/build/"$BUILDARCH"/$PHASE2_PATHNAME,upperdir="$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/phase_3,workdir="$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/unionwork "$BUILDLOCATION"/build/"$BUILDARCH"/workdir
else
mount -t overlay overlay -o lowerdir="$BUILDLOCATION"/build/"$BUILDARCH"/$PHASE2_PATHNAME,upperdir="$BUILDLOCATION"/build/"$BUILDARCH"/phase_3,workdir="$BUILDLOCATION"/build/"$BUILDARCH"/unionwork "$BUILDLOCATION"/build/"$BUILDARCH"/workdir
fi
#mounting critical fses on chrooted fs with bind
mount --rbind "$BUILDLOCATION"/build/"$BUILDARCH"/minidev/ "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/dev
mount --rbind /proc "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/proc
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/run/shm
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/minidev/shm "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/run/shm
#Bind mount shared directories
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/srcbuild/buildoutput
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/home/remastersys
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/var/tmp
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/buildlogs
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/srcbuild_overlay
#Hide /proc/modules as some debian packages call lsmod during install, which could lead to different results
mount --bind /dev/null "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/proc/modules
#if there is enough ram, use the ramdisk as the upperdir, if not, use a path on the same filesystem as the upperdir
if [[ -d "$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/srcbuild_overlay ]]
then
mount -t overlay overlay -o lowerdir="$BUILDLOCATION"/build/"$BUILDARCH"/srcbuild,upperdir="$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/srcbuild_overlay,workdir="$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/unionwork_srcbuild "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/srcbuild/
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/ramdisk/srcbuild_overlay "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/srcbuild_overlay
else
mount -t overlay overlay -o lowerdir="$BUILDLOCATION"/build/"$BUILDARCH"/srcbuild,upperdir="$BUILDLOCATION"/build/"$BUILDARCH"/srcbuild_overlay,workdir="$BUILDLOCATION"/build/"$BUILDARCH"/unionwork_srcbuild "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/srcbuild/
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/srcbuild_overlay "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/tmp/srcbuild_overlay
fi
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/buildoutput "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/srcbuild/buildoutput
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/remastersys "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/home/remastersys
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/vartmp "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/var/tmp
mount --bind "$BUILDLOCATION"/build/"$BUILDARCH"/buildlogs "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/buildlogs
#copy the files to where they belong
rsync "$BUILDLOCATION"/build/"$BUILDARCH"/importdata/* -CKr "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/
#Handle /usr/import for the creation of the deb file that contains this systems files
mkdir -p "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/import
rsync "$BUILDLOCATION"/build/"$BUILDARCH"/importdata/* -CKr "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/import
rm -rf "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/usr/import/usr/import
#delete the temp folder
rm -rf "$BUILDLOCATION"/build/"$BUILDARCH"/workdir/temp/
#Configure the Live system########################################
TARGETBITSIZE=$(chroot "$BUILDLOCATION"/build/"$BUILDARCH"/workdir /usr/bin/getconf LONG_BIT)
if [[ $TARGETBITSIZE == 32 ]]
then
linux32 chroot "$BUILDLOCATION"/build/"$BUILDARCH"/workdir /tmp/configure_phase3.sh
elif [[ $TARGETBITSIZE == 64 ]]
then
linux64 chroot "$BUILDLOCATION"/build/"$BUILDARCH"/workdir /tmp/configure_phase3.sh
else
echo "chroot execution failed. Please ensure your processor can handle the "$BUILDARCH" architecture, or that the target system isn't corrupt."
fi
|
n3rdopolis/linuxrcd
|
externalbuilders/linuxrcd_phase3.sh
|
Shell
|
gpl-2.0
| 8,970 |
# SCRIPTNAME: recordcarverx.bash
# This script has two prerequisites; it expects pre-processed records produced by the scripts "NSRL2MD5.bash" and "prodcodecarver.bash"
# Following satisfaction of pre-requisites:
# Execution Option1: ./recordcarverx.bash $(< prodcodecarver.txt)
# Execution Option2: ./recordcarverx.bash 123 456 789 ...
# This script will GAWK pattern match "n" number of input codes versus National Institute of Sciences NSRL RDS format hash record fields.
# This script will output matched records as a custom hash file and its companion "idx" file.
#---------------------------------------------------------------------
date1=$(date +"%s")
date
# Note: If you change "n" number of codes to check per pass per record,
# you need to change the number hard coded gawk "${1:-0}"... statements that follow as well.
# No doubt there is a better way to handle the gawk, but it is what it is.
n=6
# "short-hand form" of the "test" command; if file named "custom*.*" pre-exists, delete it if true
[ -f customtemp0.txt ] && rm custom*.*
# While positional parameters count in $# not equal to 0, process "n" at a time,
# shift "n" records out of $@ for each loop, rinse and repeat until $# reports empty.
while (($#)); do
date2=$(date +"%s")
elapsed_seconds=$(($date2-$date1))
echo
echo "$(($elapsed_seconds / 60)) minutes and $(($elapsed_seconds % 60)) seconds elapsed."
echo "$# codes remaining to process"
echo "$@"
if [ "$#" -gt $n ]
then
# The construction "${1:-0}" means "if global variable $1 is not a number or empty, set it to the number 0.
# Why? Comparison to nothing or null is illegal and all the variables must be set to something or the code breaks.
# Since we cannot ensure "n" may have a value, we must check each instance and handle for the possibility it is null.
#
gawk -F/ '$(NF-1)=='"${1:-0}"'||$(NF-1)=='"${2:-0}"'||$(NF-1)=='"${3:-0}"'|| \
$(NF-1)=='"${4:-0}"'||$(NF-1)=='"${5:-0}"'||$(NF-1)=='"${6:-0}"' { print $1,$2 }' NSRLFile2.txt >> customtemp0.txt
# "Shift" builtin bash command operates on the $@ environment variable values, $# keeps count of how many remain.
shift $n
# This section handles any remainder of parameters less than "n" to finish up search and zero-out $#
else
gawk -F/ '$(NF-1)=='"${1:-0}"'||$(NF-1)=='"${2:-0}"'||$(NF-1)=='"${3:-0}"'||\
$(NF-1)=='"${4:-0}"'||$(NF-1)=='"${5:-0}"'||$(NF-1)=='"${6:-0}"' { print $1,$2 }' NSRLFile2.txt >> customtemp0.txt
shift "$#"
fi
done
# Simply display no more values to process
echo "$# codes in process"
echo $@
# Remove copies of hashes and product names with sort and uniq
gawk '{print $0}' IGNORECASE=1 customtemp0.txt | sort | uniq > customhash.txt
# Build the Hash index
hfind -i md5sum customhash.txt
echo "Your new custom hash index file is:"
ls -al customhash.txt-md5.idx
echo
echo "Your new hash file is:"
ls -al customhash.txt
echo
date2=$(date +"%s")
elapsed_seconds=$(($date2-$date1))
echo "$(($elapsed_seconds / 60)) minutes and $(($elapsed_seconds % 60)) seconds elapsed."
|
JohnEbert/nsrl_carver
|
recordcarverx.bash
|
Shell
|
gpl-2.0
| 3,028 |
#! /bin/sh
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test to make sure Automake supports implicit rules with dot-less
# extensions. Se also related "grepping" test 'suffix6.sh'.
required=GNUmake # Other makes might not grok dot-less suffix rules.
. test-init.sh
cat >> configure.ac << 'END'
# $(LINK) is not defined automatically by Automake, since the *_SOURCES
# variables don't contain any known extension (.c, .cc, .f, ...).
# So we need this hack -- but since such an hack can also serve as a
# mild stress test, that's ok.
AC_SUBST([LINK], ['cat >$@'])
AC_SUBST([OBJEXT], [oOo])
AC_SUBST([EXEEXT], [.XxX])
AC_OUTPUT
END
cat > Makefile.am << 'END'
SUFFIXES = a b c .$(OBJEXT)
bin_PROGRAMS = foo
foo_SOURCES = fooa
ab:
{ echo '=ab=' && cat $<; } >$@
bc:
{ echo '=bc=' && cat $<; } >$@
c.$(OBJEXT):
{ echo '=b.obj=' && cat $<; } >$@
test:
: For debugging.
ls -l
: Implicit intermediate files should be removed by GNU make ...
test ! -r foob
test ! -r fooc
: ... but object files should not.
cat foo.$(OBJEXT)
: For debugging.
cat foo.XxX
: Now check that the chain of implicit rules has been executed
: completely and in the correct order.
(echo =b.obj= && echo =bc= && echo =ab= && echo =src=) > exp
diff exp foo.XxX
rm -f exp
.PHONY: test
check-local: test
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
echo =src= > fooa
$MAKE
$MAKE test
$MAKE distcheck
:
|
komh/automake-os2
|
t/suffix6b.sh
|
Shell
|
gpl-2.0
| 2,026 |
./bin/3DVisualizer -class RealMCNP ../data/ATR\ Modernization/MCNP/meshtal/meshtal155
|
VisualIdeation/3DVisualizer
|
scripts/meshtal155.sh
|
Shell
|
gpl-2.0
| 87 |
/home/quaker/bin/getip eth1 > /home/quaker/Dropbox/Office/IP
/home/quaker/bin/getip eth2 >> /home/quaker/Dropbox/Office/IP
chown quaker /home/quaker/Dropbox/Office/IP
chgrp quaker /home/quaker/Dropbox/Office/IP
chmod 744 /home/quaker/Dropbox/Office/IP
|
quakerntj/bashscript
|
daylyip.sh
|
Shell
|
gpl-2.0
| 252 |
#! /bin/bash
#
# Installation script for CK packages.
#
# See CK LICENSE.txt for licensing details.
# See CK Copyright.txt for copyright details.
#
# Developer(s): Grigori Fursin, 2015
#
# PACKAGE_DIR
# INSTALL_DIR
# Fix number of processes
NP=${CK_HOST_CPU_NUMBER_OF_PROCESSORS}
if [ "${PARALLEL_BUILDS}" != "" ] ; then
NP=${PARALLEL_BUILDS}
fi
# GCC version
GCC_VER=`gcc -dumpversion`
# MACHINE
MACHINE=`gcc -dumpmachine`
export PACKAGE_NAME=milepost-gcc-4.4.4
cd ${INSTALL_DIR}
# Set special vars
if [ "$LD_LIBRARY_PATH" == "" ] ; then
export LD_LIBRARY_PATH=/usr/lib/${MACHINE}:/usr/lib/gcc/${MACHINE}/${GCC_VER}
else
LD_LIBRARY_PATH1=${LD_LIBRARY_PATH}
if [ "${LD_LIBRARY_PATH: -1}" == ":" ] ; then
LD_LIBRARY_PATH1=${LD_LIBRARY_PATH: : -1}
fi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH1:/usr/lib/${MACHINE}:/usr/lib/gcc/${MACHINE}/${GCC_VER}
fi
if [ "$LIBRARY_PATH" == "" ] ; then
export LIBRARY_PATH=/usr/lib/${MACHINE}:/usr/lib/gcc/${MACHINE}/${GCC_VER}
else
LIBRARY_PATH1=${LIBRARY_PATH}
if [ "${LIBRARY_PATH: -1}" == ":" ] ; then
LIBRARY_PATH1=${LIBRARY_PATH: : -1}
fi
export LIBRARY_PATH=$LIBRARY_PATH1:/usr/lib/${MACHINE}:/usr/lib/gcc/${MACHINE}/${GCC_VER}
fi
echo ""
echo "Copying package files ..."
cp ${PACKAGE_DIR}/${PACKAGE_NAME}.tar.bz2 .
bzip2 -d ${PACKAGE_NAME}.tar.bz2
tar xvf ${PACKAGE_NAME}.tar
rm ${PACKAGE_NAME}.tar
rm -rf obj
export INSTALL_OBJ_DIR=${INSTALL_DIR}/obj
mkdir $INSTALL_OBJ_DIR
#
echo ""
echo "Patching to support GCC v5+ ..."
cd ${INSTALL_DIR}/${PACKAGE_NAME}
patch -p2 < ${PACKAGE_DIR}/patch1
#
echo ""
echo "Configuring ..."
# Needed for host GCC 5+
export CFLAGS="-fgnu89-inline"
XMACHINE=$(uname -m)
EXTRA_CFG=""
#if [ "${RPI3}" == "YES" ] ; then
# EXTRA_CFG="--with-cpu=cortex-a53 --with-fpu=neon-fp-armv8 --with-float=hard --build=arm-linux-gnueabihf --host=arm-linux-gnueabihf --target=arm-linux-gnueabihf"
# EXTRA_CFG="--build=arm-linux-gnueabihf --host=arm-linux-gnueabihf --target=arm-linux-gnueabihf"
#fi
if [ "${XMACHINE}" == "armv7l" ] || [ "${XMACHINE}" == "aarch64" ] ; then
EXTRA_CFG="$EXTRA_CFG --enable-languages=c --disable-bootstrap \
--disable-libssp \
--with-newlib \
--disable-libgomp \
--disable-libmudflap \
--disable-threads"
else
EXTRA_CFG=" --enable-languages=c,fortran"
fi
echo ""
echo "* EXTRA_CFG = $EXTRA_CFG"
echo ""
cd ${INSTALL_OBJ_DIR}
../${PACKAGE_NAME}/configure --prefix=${INSTALL_DIR} ${EXTRA_CFG} \
--disable-multilib
# --with-gmp=${CK_ENV_LIB_GMP} \
# --with-mpfr=${CK_ENV_LIB_MPFR} \
# --with-mpc=${CK_ENV_LIB_MPC}
# CFLAGS="-fgnu89-inline"
# FGG had issues with 'cannot find crti.o: No such file or directory',
# hence FGG added export LIBRARY_PATH=/usr/lib/x86_64-linux-gnu:$LIBRARY_PATH
if [ "${?}" != "0" ] ; then
echo "Error: Configuration failed in $PWD!"
exit 1
fi
# Build
echo ""
echo "Building ..."
echo ""
cd ${INSTALL_OBJ_DIR}
make -j$NP
if [ "${?}" != "0" ] ; then
echo "Error: Compilation failed in $PWD!"
exit 1
fi
# Install
echo ""
echo "Installing ..."
echo ""
make install
if [ "${?}" != "0" ] ; then
echo "Error: Compilation failed in $PWD!"
exit 1
fi
|
ctuning/reproduce-milepost-project
|
package/compiler-gcc-4.4.4-milepost-src-no-deps/process.sh
|
Shell
|
gpl-2.0
| 3,263 |
#!/bin/sh
# Eingabe:
# $1 - Kontakte-Basisverzeichnis
# $2 - Dateiname der Ergebnisliste
# Ausgabe:
# (1) Ergebnisliste im Kontakte-Basisverzeichnis
BASISVERZ=$1
LISTEADRESSEZUVERZ=$2
SKRIPTVERZ=`dirname $0`
ls "$BASISVERZ" \
| xargs -n 1 \
| while read ARGS; do
if [ -d "$BASISVERZ/$ARGS" ]; then
sh $SKRIPTVERZ/CsvDatensatzAdresseZuVerz.sh "$BASISVERZ/$ARGS/$ARGS.vcf" \
>> $BASISVERZ/$LISTEADRESSEZUVERZ
fi
done
|
PhilippDedie/mail2wiki
|
lib/Kontakte/ErzeugeListeAdresseZuVerz.sh
|
Shell
|
gpl-2.0
| 436 |
#!/bin/bash
#
# Universal launch script for Natron
#
# Get real current dir
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
SOURCEDIR=`dirname "$SOURCE"`
DIR=`cd -P "$SOURCEDIR" && pwd`
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
SOURCEDIR=`dirname "$SOURCE"`
DIR=`cd -P "$SOURCEDIR" && pwd`
# Force numeric
export LC_NUMERIC=C
# Set fontconfig path
# Not needed (done in app), but added to avoid warn before splashscreen
if [ -d "$DIR/Resources/etc/fonts" ]; then
export FONTCONFIG_PATH="$DIR/Resources/etc/fonts"
fi
# Check gcc compat
COMPAT_ARCH=`uname -m`
COMPAT_VERSION=3.4.15
if [ "$COMPAT_ARCH" = "x86_64" ]; then
COMPAT_SUFFIX=64
fi
if [ -f /usr/lib$COMPAT_SUFFIX/libstdc++.so.6 ]; then
STDC_LIB=/usr/lib$COMPAT_SUFFIX/libstdc++.so.6
elif [ -f /usr/lib/libstdc++.so.6 ]; then
STDC_LIB=/usr/lib/libstdc++.so.6
elif [ -f /usr/lib/$COMPAT_ARCH-linux-gnu/libstdc++.so.6 ]; then
STDC_LIB=/usr/lib/$COMPAT_ARCH-linux-gnu/libstdc++.so.6
elif [ -f /usr/lib/i386-linux-gnu/libstdc++.so.6 ]; then
STDC_LIB=/usr/lib/i386-linux-gnu/libstdc++.so.6
fi
if [ "$STDC_LIB" != "" ]; then
COMPAT_GCC=`$DIR/bin/strings $STDC_LIB | grep GLIBCXX_${COMPAT_VERSION}`
fi
if [ "$COMPAT_GCC" != "" ]; then
if [ -f "$DIR/lib/libstdc++.so.6" ]; then
rm -f $DIR/lib/libstdc++.so.6 || echo "Failed to remove symlink, please run as root to fix."
fi
if [ -f "$DIR/lib/libgcc_s.so.1" ]; then
rm -f $DIR/lib/libgcc_s.so.1 || echo "Failed to remove symlink, please run as root to fix."
fi
if [ -f "$DIR/lib/libgomp.so.1" ]; then
rm -f $DIR/lib/libgomp.so.1 || echo "Failed to remove symlink, please run as root to fix."
fi
else
if [ ! -f "$DIR/lib/libstdc++.so.6" ]; then
cd $DIR/lib ; ln -sf compat/libstdc++.so.6 . || echo "Failed to create symlink, please run as root to fix."
fi
if [ ! -f "$DIR/lib/libgcc_s.so.1" ]; then
cd $DIR/lib ; ln -sf compat/libgcc_s.so.1 . || echo "Failed to create symlink, please run as root to fix."
fi
if [ ! -f "$DIR/lib/libgomp.so.1" ]; then
cd $DIR/lib ; ln -sf compat/libgomp.so.1 . || echo "Failed to create symlink, please run as root to fix."
fi
fi
# Check for updates
if [ "$1" = "-update" -a -x "$DIR/NatronSetup" ]; then
"$DIR/NatronSetup" --updater
exit 0
fi
# Portable mode, save settings in current dir
if [ "$1" = "-portable" ]; then
#XDG_CACHE_HOME=/tmp
XDG_DATA_HOME="$DIR"
XDG_CONFIG_HOME="$DIR"
export XDG_DATA_HOME XDG_CONFIG_HOME
fi
# start app, with optional debug
if [ "$1" = "-debug" -a -x "$DIR/bin/Natron.debug" ]; then
SEGFAULT_SIGNALS="all"
export SEGFAULT_SIGNALS
catchsegv "$DIR/bin/Natron.debug" -style plastique "$@"
else
"$DIR/bin/Natron" -style plastique "$@"
fi
|
olear/Natron
|
tools/linux/include/scripts/Natron.sh
|
Shell
|
gpl-2.0
| 2,769 |
#!/bin/bash
VER="0.4.1"
wget "https://github.com/SUPERAndroidAnalyzer/super/releases/download/"$VER"/super-analyzer_"$VER"_debian_amd64.deb"
sign_only "super-analyzer_"$VER"_debian_amd64.deb"
echo "DEB Signed and ready for usage"
|
AndroidTamer/Packaging_Tools
|
Build/SUPER/build.sh
|
Shell
|
gpl-2.0
| 229 |
#!\bin\bash
MinR=1 # 70
MaxR=46 # 75
MinZ=11 # 166
MaxZ=90 # 261
MinT=1 # 90
MaxT=90 # 1
echo "Processing format between layer${MinZ}.txt and layer${MaxZ}.txt"
for i in $(seq -f %03g $((MinZ)) $((MaxZ))); do
paste Rindex.txt ORI_Layer${i}.txt | expand -t 1 >> layer${i}.txt
done
echo "Searching data between layer${MinZ}.txt and layer${MaxZ}.txt"
for i in $(seq -f %03g $((MinZ)) $((MaxZ)) ); do
echo " Searching R data between ${MinR} and ${MaxR} in layer${i}.txt"
sed -n "$((MinR+1)),$((MaxR+1))p" layer${i}.txt >> Ext_layer${i}.txt
done
echo " find Maximum and position for interest region"
python Search_Max.py ${MinZ} ${MaxZ}
rm Ext_layer* Rindex.txt
|
selwyndd21/read3dsyn
|
4_MaxSearching.sh
|
Shell
|
gpl-2.0
| 690 |
#!/bin/sh
cat $1 | grep -e "EDEN" | sed 's/^.*MIN:\s\([0-9]*\).*AVG:\s\([0-9]*\).*MAX:\s\([0-9]*\).*/\1 \2 \3/' | awk 'BEGIN{sum_1=0; sum_2=0; sum_3=0; n=0} {n=n+1; sum_1+=$1; sum_2+=$2; sum_3+=$3} END{print "EDEN => MIN:",sum_1/n,"K AVG:",sum_2/n,"K MAX:",sum_3/n,"K"}'
cat $1 | grep -e "FROM" | sed 's/^.*MIN:\s\([0-9]*\).*AVG:\s\([0-9]*\).*MAX:\s\([0-9]*\).*/\1 \2 \3/' | awk 'BEGIN{sum_1=0; sum_2=0; sum_3=0; n=0} {n=n+1; sum_1+=$1; sum_2+=$2; sum_3+=$3} END{print "FROM => MIN:",sum_1/n,"K AVG:",sum_2/n,"K MAX:",sum_3/n,"K"}'
cat $1 | grep -e "TO" | sed 's/^.*MIN:\s\([0-9]*\).*AVG:\s\([0-9]*\).*MAX:\s\([0-9]*\).*/\1 \2 \3/' | awk 'BEGIN{sum_1=0; sum_2=0; sum_3=0; n=0} {n=n+1; sum_1+=$1; sum_2+=$2; sum_3+=$3} END{print "TO => MIN:",sum_1/n,"K AVG:",sum_2/n,"K MAX:",sum_3/n,"K"}'
cat $1 | grep -e "OLD" | sed 's/^.*MIN:\s\([0-9]*\).*AVG:\s\([0-9]*\).*MAX:\s\([0-9]*\).*/\1 \2 \3/' | awk 'BEGIN{sum_1=0; sum_2=0; sum_3=0; n=0} {n=n+1; sum_1+=$1; sum_2+=$2; sum_3+=$3} END{print "OLD => MIN:",sum_1/n,"K AVG:",sum_2/n,"K MAX:",sum_3/n,"K"}'
cat $1 | grep -e "ops" | sed 's/\sops\/m//' | awk 'BEGIN{sum=0; n=0} {n+=1; sum+=$1} END{print "SPEED:",sum/n,"ops/m"}'
rm -f $1
|
yuhc/jdk8u-dev
|
java_demo/spec/specjvm/average.sh
|
Shell
|
gpl-2.0
| 1,191 |
#!/bin/bash
# Copyright 2014 Johns Hopkins University (Author: Daniel Povey)
# Apache 2.0
# Begin configuration.
stage=0 # This allows restarting after partway, when something when wrong.
feature_type=mfcc
add_pitch=false
mfcc_config=conf/mfcc.conf # you can override any of these you need to override.
plp_config=conf/plp.conf
fbank_config=conf/fbank.conf
# online_pitch_config is the config file for both pitch extraction and
# post-processing; we combine them into one because during training this
# is given to the program compute-and-process-kaldi-pitch-feats.
online_pitch_config=conf/online_pitch.conf
# Below are some options that affect the iVectors, and should probably
# match those used in extract_ivectors_online.sh.
num_gselect=5 # Gaussian-selection using diagonal model: number of Gaussians to select
posterior_scale=0.1 # Scale on the acoustic posteriors, intended to account for
# inter-frame correlations.
min_post=0.025 # Minimum posterior to use (posteriors below this are pruned out)
# caution: you should use the same value in the online-estimation
# code.
max_count=100 # This max-count of 100 can make iVectors more consistent for
# different lengths of utterance, by scaling up the prior term
# when the data-count exceeds this value. The data-count is
# after posterior-scaling, so assuming the posterior-scale is
# 0.1, --max-count 100 starts having effect after 1000 frames,
# or 10 seconds of data.
iter=final
# End configuration.
echo "$0 $@" # Print the command line for logging
[ -f path.sh ] && . ./path.sh;
. parse_options.sh || exit 1;
if [ $# -ne 4 ] && [ $# -ne 3 ]; then
echo "Usage: $0 [options] <lang-dir> [<ivector-extractor-dir>] <nnet-dir> <output-dir>"
echo "e.g.: $0 data/lang exp/nnet2_online/extractor exp/nnet2_online/nnet exp/nnet2_online/nnet_online"
echo "main options (for others, see top of script file)"
echo " --feature-type <mfcc|plp> # Type of the base features; "
echo " # important to generate the correct"
echo " # configs in <output-dir>/conf/"
echo " --add-pitch <true|false> # Append pitch features to cmvn"
echo " # (default: false)"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --config <config-file> # config containing options"
echo " --iter <model-iteration|final> # iteration of model to take."
echo " --stage <stage> # stage to do partial re-run from."
exit 1;
fi
if [ $# -eq 4 ]; then
lang=$1
iedir=$2
srcdir=$3
dir=$4
else
[ $# -eq 3 ] || exit 1;
lang=$1
iedir=
srcdir=$2
dir=$3
fi
for f in $lang/phones.txt $srcdir/${iter}.mdl $srcdir/tree; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
if [ ! -z "$iedir" ]; then
for f in final.{mat,ie,dubm} splice_opts global_cmvn.stats online_cmvn.conf; do
[ ! -f $iedir/$f ] && echo "$0: no such file $iedir/$f" && exit 1;
done
fi
utils/lang/check_phones_compatible.sh $lang/phones.txt $srcdir/phones.txt || exit 1;
mkdir -p $dir
cp $lang/phones.txt $dir || exit 1;
dir=$(utils/make_absolute.sh $dir) # Convert $dir to an absolute pathname, so that the
# configuration files we write will contain absolute
# pathnames.
mkdir -p $dir/conf
cp $srcdir/${iter}.mdl $dir/final.mdl || exit 1;
cp $srcdir/tree $dir/ || exit 1;
if [ ! -z "$iedir" ]; then
mkdir -p $dir/ivector_extractor/
cp $iedir/final.{mat,ie,dubm} $iedir/global_cmvn.stats $dir/ivector_extractor/ || exit 1;
# The following things won't be needed directly by the online decoding, but
# will allow us to run prepare_online_decoding.sh again with
# $dir/ivector_extractor/ as the input directory (useful in certain
# cross-system training scenarios).
cp $iedir/splice_opts $iedir/online_cmvn.conf $dir/ivector_extractor/ || exit 1;
fi
mkdir -p $dir/conf
rm $dir/{plp,mfcc,fbank}.conf 2>/dev/null
echo "$0: preparing configuration files in $dir/conf"
if [ -f $dir/conf/online_nnet2_decoding.conf ]; then
echo "$0: moving $dir/conf/online_nnet2_decoding.conf to $dir/conf/online_nnet2_decoding.conf.bak"
mv $dir/conf/online_nnet2_decoding.conf $dir/conf/online_nnet2_decoding.conf.bak
fi
conf=$dir/conf/online_nnet2_decoding.conf
echo -n >$conf
echo "--feature-type=$feature_type" >>$conf
case "$feature_type" in
mfcc)
echo "--mfcc-config=$dir/conf/mfcc.conf" >>$conf
cp $mfcc_config $dir/conf/mfcc.conf || exit 1;;
plp)
echo "--plp-config=$dir/conf/plp.conf" >>$conf
cp $plp_config $dir/conf/plp.conf || exit 1;;
fbank)
echo "--fbank-config=$dir/conf/fbank.conf" >>$conf
cp $fbank_config $dir/conf/fbank.conf || exit 1;;
*)
echo "Unknown feature type $feature_type"
esac
if [ ! -z "$iedir" ]; then
ieconf=$dir/conf/ivector_extractor.conf
echo -n >$ieconf
echo "--ivector-extraction-config=$ieconf" >>$conf
cp $iedir/online_cmvn.conf $dir/conf/online_cmvn.conf || exit 1;
# the next line puts each option from splice_opts on its own line in the config.
for x in $(cat $iedir/splice_opts); do echo "$x"; done > $dir/conf/splice.conf
echo "--splice-config=$dir/conf/splice.conf" >>$ieconf
echo "--cmvn-config=$dir/conf/online_cmvn.conf" >>$ieconf
echo "--lda-matrix=$dir/ivector_extractor/final.mat" >>$ieconf
echo "--global-cmvn-stats=$dir/ivector_extractor/global_cmvn.stats" >>$ieconf
echo "--diag-ubm=$dir/ivector_extractor/final.dubm" >>$ieconf
echo "--ivector-extractor=$dir/ivector_extractor/final.ie" >>$ieconf
echo "--num-gselect=$num_gselect" >>$ieconf
echo "--min-post=$min_post" >>$ieconf
echo "--posterior-scale=$posterior_scale" >>$ieconf # this is currently the default in the scripts.
echo "--max-remembered-frames=1000" >>$ieconf # the default
echo "--max-count=$max_count" >>$ieconf
fi
if $add_pitch; then
echo "$0: enabling pitch features"
echo "--add-pitch=true" >>$conf
echo "$0: creating $dir/conf/online_pitch.conf"
if [ ! -f $online_pitch_config ]; then
echo "$0: expected file '$online_pitch_config' to exist.";
exit 1;
fi
cp $online_pitch_config $dir/conf/online_pitch.conf || exit 1;
echo "--online-pitch-config=$dir/conf/online_pitch.conf" >>$conf
fi
silphonelist=`cat $lang/phones/silence.csl` || exit 1;
echo "--endpoint.silence-phones=$silphonelist" >>$conf
echo "$0: created config file $conf"
|
michellemorales/OpenMM
|
kaldi/egs/wsj/s5/steps/online/nnet2/prepare_online_decoding.sh
|
Shell
|
gpl-2.0
| 6,714 |
#!/bin/bash
# Copyright 2015 Johns Hopkins University (Author: Daniel Povey).
# 2015 Vijayaditya Peddinti
# 2016 Yiming Wang
# 2017 Google Inc. ([email protected])
# Apache 2.0.
# 6o is same as 6k, but with two additional BLSTM layers
# and delay of -1 for the first blstm layer
# local/chain/compare_wer_general.sh blstm_6k_sp blstm_6o_sp
# System blstm_6k_sp blstm_6o_sp
# WER on train_dev(tg) 12.95 12.60
# WER on train_dev(fg) 11.98 11.75
# WER on eval2000(tg) 15.5 14.7
# WER on eval2000(fg) 14.1 13.4
# Final train prob -0.041 -0.041
# Final valid prob -0.072 -0.069
# Final train prob (xent) -0.629 -0.636
# Final valid prob (xent) -0.8091 -0.7854
set -e
# configs for 'chain'
stage=12
train_stage=-10
get_egs_stage=-10
speed_perturb=true
dir=exp/chain/blstm_6o # Note: _sp will get added to this if $speed_perturb == true.
decode_iter=
decode_dir_affix=
# training options
leftmost_questions_truncate=-1
chunk_width=150
chunk_left_context=40
chunk_right_context=40
xent_regularize=0.025
self_repair_scale=0.00001
label_delay=0
# decode options
extra_left_context=50
extra_right_context=50
frames_per_chunk=
remove_egs=false
common_egs_dir=
affix=
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
# The iVector-extraction and feature-dumping parts are the same as the standard
# nnet3 setup, and you can skip them by setting "--stage 8" if you have already
# run those things.
suffix=
if [ "$speed_perturb" == "true" ]; then
suffix=_sp
fi
dir=$dir${affix:+_$affix}
if [ $label_delay -gt 0 ]; then dir=${dir}_ld$label_delay; fi
dir=${dir}$suffix
train_set=train_nodup$suffix
ali_dir=exp/tri4_ali_nodup$suffix
treedir=exp/chain/tri5_7d_tree$suffix
lang=data/lang_chain_2y
# if we are using the speed-perturbed data we need to generate
# alignments for it.
local/nnet3/run_ivector_common.sh --stage $stage \
--speed-perturb $speed_perturb \
--generate-alignments $speed_perturb || exit 1;
if [ $stage -le 9 ]; then
# Get the alignments as lattices (gives the CTC training more freedom).
# use the same num-jobs as the alignments
nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1;
steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \
data/lang exp/tri4 exp/tri4_lats_nodup$suffix
rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space
fi
if [ $stage -le 10 ]; then
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
rm -rf $lang
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
if [ $stage -le 11 ]; then
# Build a tree using our new topology.
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--leftmost-questions-truncate $leftmost_questions_truncate \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 7000 data/$train_set $lang $ali_dir $treedir
fi
if [ $stage -le 12 ]; then
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $treedir/tree |grep num-pdfs|awk '{print $2}')
[ -z $num_targets ] && { echo "$0: error getting num-targets"; exit 1; }
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
lstm_opts="decay-time=20"
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# the first splicing is moved before the lda layer, so no splicing here
# check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults
fast-lstmp-layer name=blstm1-forward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-1 $lstm_opts
fast-lstmp-layer name=blstm1-backward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=1 $lstm_opts
fast-lstmp-layer name=blstm2-forward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
fast-lstmp-layer name=blstm2-backward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
fast-lstmp-layer name=blstm3-forward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
fast-lstmp-layer name=blstm3-backward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
fast-lstmp-layer name=blstm4-forward input=Append(blstm3-forward, blstm3-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
fast-lstmp-layer name=blstm4-backward input=Append(blstm3-forward, blstm3-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
fast-lstmp-layer name=blstm5-forward input=Append(blstm4-forward, blstm4-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
fast-lstmp-layer name=blstm5-backward input=Append(blstm4-forward, blstm4-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
## adding the layers for chain branch
output-layer name=output input=Append(blstm5-forward, blstm5-backward) output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
output-layer name=output-xent input=Append(blstm5-forward, blstm5-backward) output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 13 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.00005 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.num-chunk-per-minibatch 32 \
--trainer.frames-per-iter 1200000 \
--trainer.max-param-change 2.0 \
--trainer.num-epochs 4 \
--trainer.optimization.shrink-value 0.99 \
--trainer.optimization.num-jobs-initial 3 \
--trainer.optimization.num-jobs-final 16 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.momentum 0.0 \
--trainer.deriv-truncate-margin 8 \
--egs.stage $get_egs_stage \
--egs.opts "--frames-overlap-per-eg 0" \
--egs.chunk-width $chunk_width \
--egs.chunk-left-context $chunk_left_context \
--egs.chunk-right-context $chunk_right_context \
--egs.dir "$common_egs_dir" \
--cleanup.remove-egs $remove_egs \
--feat-dir data/${train_set}_hires \
--tree-dir $treedir \
--lat-dir exp/tri4_lats_nodup$suffix \
--dir $dir || exit 1;
fi
if [ $stage -le 14 ]; then
# Note: it might appear that this $lang directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg
fi
decode_suff=sw1_tg
graph_dir=$dir/graph_sw1_tg
if [ $stage -le 15 ]; then
[ -z $extra_left_context ] && extra_left_context=$chunk_left_context;
[ -z $extra_right_context ] && extra_right_context=$chunk_right_context;
[ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width;
iter_opts=
if [ ! -z $decode_iter ]; then
iter_opts=" --iter $decode_iter "
fi
for decode_set in train_dev eval2000; do
(
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj 50 --cmd "$decode_cmd" $iter_opts \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--frames-per-chunk "$frames_per_chunk" \
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \
$graph_dir data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_${decode_suff} || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_sw1_{tg,fsh_fg} || exit 1;
fi
) &
done
fi
wait;
exit 0;
|
michellemorales/OpenMM
|
kaldi/egs/swbd/s5c/local/chain/tuning/run_blstm_6o.sh
|
Shell
|
gpl-2.0
| 10,550 |
#!/bin/bash
########### TASK metadata #############
# Task : makeAfg_016of016
# Module : P_PreAssembler
# TaskType : None
# URL : task://Anonymous/P_PreAssembler/makeAfg_016of016
# createdAt : 2013-12-20 15:55:43.303219
# ncmds : 1
# LogPath : /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/log/P_PreAssembler/makeAfg_016of016.log
# Script Path : /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/workflow/P_PreAssembler/makeAfg_016of016.sh
# Input : file://Anonymous/fastq.chunk016of016.fofn
# Input : file://Anonymous/data/filtered_regions.chunk016of016.fofn
# Output : file://Anonymous/shortreads.chunk016of016.afg
########### END TASK metadata #############
cat /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/workflow/P_PreAssembler/makeAfg_016of016.sh >> /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/log/P_PreAssembler/makeAfg_016of016.log;
echo 'Running task://Anonymous/P_PreAssembler/makeAfg_016of016 on' `uname -a`;
echo 'Started on' `date -u`;
echo 'Validating existence of Input Files'
if [ -f /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/fastq.chunk016of016.fofn ]
then
echo 'Successfully found /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/fastq.chunk016of016.fofn'
else
echo 'WARNING: Unable to find necessary input file /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/fastq.chunk016of016.fofn. Treating file as optional to run task.'
fi
if [ -f /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/data/filtered_regions.chunk016of016.fofn ]
then
echo 'Successfully found /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/data/filtered_regions.chunk016of016.fofn'
else
echo 'WARNING: Unable to find necessary input file /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/data/filtered_regions.chunk016of016.fofn. Treating file as optional to run task.'
fi
echo 'Successfully validated input files'
# Task makeAfg_016of016 commands:
toAfg /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/fastq.chunk016of016.fofn /groups/csf-ngs/projects/20131203_Armin_PacbioEC/data/preassembly/result-nonsensitive/shortreads.chunk016of016.afg -noSplitSubreads || exit $?; echo "Task 0 completed at `date -u`" || exit $?;
echo 'Finished on' `date -u`;
# Success
exit 0
|
h3kker/assemblyTalk
|
preassembler/workflow/P_PreAssembler/makeAfg_016of016.sh
|
Shell
|
gpl-2.0
| 2,646 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.