code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
(cd dist-dev; php -S localhost:8000 ../src/server-php/server.php)
|
msimonin/OS.js-v2
|
bin/start-php-dev.sh
|
Shell
|
bsd-2-clause
| 76 |
PYTHON_DIR="/var/packages/python/target/bin"
GIT_DIR="/var/packages/git/target/bin"
GIT="${GIT_DIR}/git"
PATH="${SYNOPKG_PKGDEST}/bin:${SYNOPKG_PKGDEST}/env/bin:${PYTHON_DIR}:${GIT_DIR}:${PATH}"
PYTHON="${SYNOPKG_PKGDEST}/env/bin/python"
VIRTUALENV="${PYTHON_DIR}/virtualenv"
LAZYLIBRARIAN="${SYNOPKG_PKGDEST}/var/LazyLibrarian/LazyLibrarian.py"
CFG_FILE="${SYNOPKG_PKGDEST}/var/config.ini"
SERVICE_COMMAND="${PYTHON} ${LAZYLIBRARIAN} --daemon --pidfile ${PID_FILE} --config ${CFG_FILE} --datadir ${SYNOPKG_PKGDEST}/var/"
GROUP="sc-download"
LEGACY_GROUP="sc-media"
validate_preinst ()
{
# Check fork
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ] && ! ${GIT} ls-remote --heads --exit-code ${wizard_fork_url:=git://github.com/DobyTang/LazyLibrarian.git} ${wizard_fork_branch:=master} > /dev/null 2>&1; then
echo "Incorrect fork"
exit 1
fi
}
service_postinst ()
{
# Create a Python virtualenv
${VIRTUALENV} --system-site-packages ${SYNOPKG_PKGDEST}/env
# Clone the repository for new installs or upgrades
# Upgrades from the old package had the repo in /share/
if [ "${SYNOPKG_PKG_STATUS}" == "INSTALL" ] || [ ! -d "${TMP_DIR}/LazyLibrarian" ]; then
${GIT} clone -q -b ${wizard_fork_branch:=master} ${wizard_fork_url:=git://github.com/DobyTang/LazyLibrarian.git} ${SYNOPKG_PKGDEST}/var/LazyLibrarian
fi
# If nessecary, add user also to the old group
syno_user_add_to_legacy_group "${EFF_USER}" "${USER}" "${LEGACY_GROUP}"
# Remove legacy user
# Commands of busybox from spk/python
delgroup "${USER}" "users"
deluser "${USER}"
}
|
Zetten/spksrc
|
spk/lazylibrarian/src/service-setup.sh
|
Shell
|
bsd-3-clause
| 1,616 |
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
# change to grpc repo root
cd $(dirname $0)/../../..
##########################
# Portability operations #
##########################
PLATFORM=`uname -s`
function is_msys() {
if [ "${PLATFORM/MSYS}" != "$PLATFORM" ]; then
echo true
else
exit 1
fi
}
function is_mingw() {
if [ "${PLATFORM/MINGW}" != "$PLATFORM" ]; then
echo true
else
exit 1
fi
}
function is_darwin() {
if [ "${PLATFORM/Darwin}" != "$PLATFORM" ]; then
echo true
else
exit 1
fi
}
function is_linux() {
if [ "${PLATFORM/Linux}" != "$PLATFORM" ]; then
echo true
else
exit 1
fi
}
# Associated virtual environment name for the given python command.
function venv() {
$1 -c "import sys; print('py{}{}'.format(*sys.version_info[:2]))"
}
# Path to python executable within a virtual environment depending on the
# system.
function venv_relative_python() {
if [ $(is_mingw) ]; then
echo 'Scripts/python.exe'
else
echo 'bin/python'
fi
}
# Distutils toolchain to use depending on the system.
function toolchain() {
if [ $(is_mingw) ]; then
echo 'mingw32'
else
echo 'unix'
fi
}
# Command to invoke the linux command `realpath` or equivalent.
function script_realpath() {
# Find `realpath`
if [ -x "$(command -v realpath)" ]; then
realpath "$@"
elif [ -x "$(command -v grealpath)" ]; then
grealpath "$@"
else
exit 1
fi
}
####################
# Script Arguments #
####################
PYTHON=${1:-python2.7}
VENV=${2:-$(venv $PYTHON)}
VENV_RELATIVE_PYTHON=${3:-$(venv_relative_python)}
TOOLCHAIN=${4:-$(toolchain)}
if [ $(is_msys) ]; then
echo "MSYS doesn't directly provide the right compiler(s);"
echo "switch to a MinGW shell."
exit 1
fi
ROOT=`pwd`
export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv $CFLAGS"
export GRPC_PYTHON_BUILD_WITH_CYTHON=1
export LANG=en_US.UTF-8
# Default python on the host to fall back to when instantiating e.g. the
# virtualenv.
HOST_PYTHON=${HOST_PYTHON:-python}
# If ccache is available on Linux, use it.
if [ $(is_linux) ]; then
# We're not on Darwin (Mac OS X)
if [ -x "$(command -v ccache)" ]; then
if [ -x "$(command -v gcc)" ]; then
export CC='ccache gcc'
elif [ -x "$(command -v clang)" ]; then
export CC='ccache clang'
fi
fi
fi
############################
# Perform build operations #
############################
# Instnatiate the virtualenv, preferring to do so from the relevant python
# version. Even if these commands fail (e.g. on Windows due to name conflicts)
# it's possible that the virtualenv is still usable and we trust the tester to
# be able to 'figure it out' instead of us e.g. doing potentially expensive and
# unnecessary error recovery by `rm -rf`ing the virtualenv.
($PYTHON -m virtualenv $VENV ||
$HOST_PYTHON -m virtualenv -p $PYTHON $VENV ||
true)
VENV_PYTHON=`script_realpath "$VENV/$VENV_RELATIVE_PYTHON"`
# pip-installs the directory specified. Used because on MSYS the vanilla Windows
# Python gets confused when parsing paths.
pip_install_dir() {
PWD=`pwd`
cd $1
($VENV_PYTHON setup.py build_ext -c $TOOLCHAIN || true)
$VENV_PYTHON -m pip install --no-deps .
cd $PWD
}
$VENV_PYTHON -m pip install --upgrade pip
$VENV_PYTHON -m pip install setuptools
$VENV_PYTHON -m pip install cython
$VENV_PYTHON -m pip install six enum34 protobuf futures
pip_install_dir $ROOT
$VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py
pip_install_dir $ROOT/tools/distrib/python/grpcio_tools
# Build/install health checking
$VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py preprocess
$VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py build_package_protos
pip_install_dir $ROOT/src/python/grpcio_health_checking
# Build/install reflection
$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py preprocess
$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py build_package_protos
pip_install_dir $ROOT/src/python/grpcio_reflection
# Build/install tests
$VENV_PYTHON -m pip install coverage==4.4 oauth2client==4.1.0 \
google-auth==1.0.0 requests==2.14.2
$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py preprocess
$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py build_package_protos
pip_install_dir $ROOT/src/python/grpcio_tests
|
7anner/grpc
|
tools/run_tests/helper_scripts/build_python.sh
|
Shell
|
bsd-3-clause
| 5,836 |
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Nautiluscoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 8333. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8333 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
coinkeeper/2015-04-19_21-21_nautiluscoin
|
contrib/qos/tc.sh
|
Shell
|
mit
| 1,675 |
#!/bin/bash
FN="pd.clariom.s.rat.ht_3.14.1.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/pd.clariom.s.rat.ht_3.14.1.tar.gz"
"https://bioarchive.galaxyproject.org/pd.clariom.s.rat.ht_3.14.1.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.clariom.s.rat.ht/bioconductor-pd.clariom.s.rat.ht_3.14.1_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.clariom.s.rat.ht/bioconductor-pd.clariom.s.rat.ht_3.14.1_src_all.tar.gz"
)
MD5="be901eb9e8830f3e5bedf154189a3743"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bebatut/bioconda-recipes
|
recipes/bioconductor-pd.clariom.s.rat.ht/post-link.sh
|
Shell
|
mit
| 1,480 |
#!/bin/bash
FN="MeSH.Dse.eg.db_1.13.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/MeSH.Dse.eg.db_1.13.0.tar.gz"
"https://bioarchive.galaxyproject.org/MeSH.Dse.eg.db_1.13.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.dse.eg.db/bioconductor-mesh.dse.eg.db_1.13.0_src_all.tar.gz"
)
MD5="0af4efa430595c8bff94b43ac01f66a7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-mesh.dse.eg.db/post-link.sh
|
Shell
|
mit
| 1,322 |
#!/bin/bash
FN="GSBenchMark_1.6.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/GSBenchMark_1.6.0.tar.gz"
"https://bioarchive.galaxyproject.org/GSBenchMark_1.6.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-gsbenchmark/bioconductor-gsbenchmark_1.6.0_src_all.tar.gz"
)
MD5="201561e111a96f6332c8b796c355b27c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-gsbenchmark/post-link.sh
|
Shell
|
mit
| 1,303 |
#!/bin/bash
nuget pack nuspec/Vernacular.Forms.nuspec -BasePath ./ -Prop Configuration=Release -Verbosity quiet
|
rdio/vernacular
|
build-nuget.sh
|
Shell
|
mit
| 112 |
#!/bin/sh
envtpl --allow-missing /nipap/nipap.conf.dist -o /etc/nipap/nipap.conf
/usr/sbin/nipap-passwd create-database
if [ -n "$NIPAP_USERNAME" -a -n "$NIPAP_PASSWORD" ]; then
echo "Creating user '$NIPAP_USERNAME'"
/usr/sbin/nipap-passwd add --username $NIPAP_USERNAME --name "NIPAP user" --password $NIPAP_PASSWORD
fi
exec /usr/sbin/nipapd --debug --foreground --auto-install-db --auto-upgrade-db --no-pid-file
|
fredsod/NIPAP
|
nipap/entrypoint.sh
|
Shell
|
mit
| 419 |
#!/bin/bash
set -e
SCRIPT_DIR=$(dirname $0)
MANATEE_ROOT=${1%/}
EXPORT_ROOT=${2-$LAMASSU_EXPORT}
if [ -z "$EXPORT_ROOT" -o -z "$MANATEE_ROOT" ]
then
echo "Builds a lamassu-machine Manatee barcode scanning package file for deploying to a device."
echo -e "\nUsage:"
echo -e "build <manatee directory> <target directory>\n"
echo "You may also set LAMASSU_EXPORT in lieu of <target directory>."
exit 1
fi
SUB_DIR=manatee
EXPORT_BASE=$EXPORT_ROOT/$SUB_DIR
EXPORT_DIR=$EXPORT_BASE/package
UPDATESCRIPT=$SCRIPT_DIR/updateinit.js
MACHINE_DIR=$SCRIPT_DIR/../..
rm -rf $EXPORT_DIR
mkdir -p $EXPORT_DIR
# Needed for updateinit script on target device
cp $MACHINE_DIR/node_modules/async/lib/async.js $EXPORT_DIR
cp $SCRIPT_DIR/../report.js $EXPORT_DIR
# Manatee
cp -a $MANATEE_ROOT $EXPORT_DIR
# Installation scripts
cp -a $SCRIPT_DIR/install $EXPORT_DIR
cp $UPDATESCRIPT $EXPORT_DIR/updatescript.js
node $SCRIPT_DIR/../build.js $EXPORT_BASE
|
joshmh/lamassu-machine
|
deploy/manatee/build.sh
|
Shell
|
unlicense
| 964 |
#!/bin/bash
TEMPFILE=`mktemp`
TEMPFILE2=`mktemp`
INCLUDEDIR=../include
SOURCEDIR=../src/syscalls
sed -n -e '/\[vleSyscalls\]/,/\[vleTools\]/p' $1 |sed -e '$d' -e '1d' -e 's/\r//' > $TEMPFILE
eval `grep -e 'header' $TEMPFILE |sed -e 's/;.*@//g' -e 's/@//g'`
grep -vG -e header -e LIBRARY $TEMPFILE > $TEMPFILE2
mkdir -p $INCLUDEDIR
mkdir -p $SOURCEDIR
old_IFS=$IFS # save the field separator
IFS=$'\n' # new field separator, the end of line
for line in $(cat $TEMPFILE2)
do
eval "HEADER=`echo \"${line}\" | awk -F\; '{ gsub(/\\(/, \"{\", \$2); gsub(/\\)/, \"}\", \$2); print \$2}'`"
EQUATE=`echo ${line}|head -c 4`
SYSCALL=`echo ${line} | awk -F\( '{ sub(/\(.+?/,"", \$1); sub(/.+?(\*|[[:space:]])/,"", \$1); print \$1 }'`
echo ${line} | awk -F\@ '{ gsub(/;.*/, "", $2); print $2";" }' >> $INCLUDEDIR/$HEADER
sort -u -o $INCLUDEDIR/$HEADER $INCLUDEDIR/$HEADER
echo "#include <asm.h>
SYSCALL(_${SYSCALL}, 0x${EQUATE})
">$SOURCEDIR/${SYSCALL}.S
done
IFS=$old_IFS
|
Forty-Bot/libfxcg
|
resources/process-mini-sdk-ini.sh
|
Shell
|
bsd-3-clause
| 1,023 |
#!/usr/bin/env bash
#
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
#
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ "$SOURCE" != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
OLDPATH="$PATH"
source "$DIR/common/_prettyprint.sh"
while [[ $# > 0 ]]; do
lowerI="$(echo $1 | awk '{print tolower($0)}')"
case $lowerI in
-c|--configuration)
export CONFIGURATION=$2
shift
;;
--targets)
IFS=',' read -r -a targets <<< $2
shift
;;
--nopackage)
export DOTNET_BUILD_SKIP_PACKAGING=1
;;
--skip-prereqs)
# Allow CI to disable prereqs check since the CI has the pre-reqs but not ldconfig it seems
export DOTNET_INSTALL_SKIP_PREREQS=1
;;
--help)
echo "Usage: $0 [--configuration <CONFIGURATION>] [--skip-prereqs] [--nopackage] [--docker <IMAGENAME>] [--help] [--targets <TARGETS...>]"
echo ""
echo "Options:"
echo " --configuration <CONFIGURATION> Build the specified Configuration (Debug or Release, default: Debug)"
echo " --targets <TARGETS...> Comma separated build targets to run (Init, Compile, Publish, etc.; Default is a full build and publish)"
echo " --nopackage Skip packaging targets"
echo " --skip-prereqs Skip checks for pre-reqs in dotnet_install"
echo " --docker <IMAGENAME> Build in Docker using the Dockerfile located in scripts/docker/IMAGENAME"
echo " --help Display this help message"
echo " <TARGETS...> The build targets to run (Init, Compile, Publish, etc.; Default is a full build and publish)"
exit 0
;;
*)
break
;;
esac
shift
done
# Set up the environment to be used for building with clang.
if which "clang-3.5" > /dev/null 2>&1; then
export CC="$(which clang-3.5)"
export CXX="$(which clang++-3.5)"
elif which "clang-3.6" > /dev/null 2>&1; then
export CC="$(which clang-3.6)"
export CXX="$(which clang++-3.6)"
elif which clang > /dev/null 2>&1; then
export CC="$(which clang)"
export CXX="$(which clang++)"
else
error "Unable to find Clang Compiler"
error "Install clang-3.5 or clang3.6"
exit 1
fi
# Load Branch Info
while read line; do
if [[ $line != \#* ]]; then
IFS='=' read -ra splat <<< "$line"
export ${splat[0]}="${splat[1]}"
fi
done < "$DIR/../branchinfo.txt"
# Use a repo-local install directory (but not the artifacts directory because that gets cleaned a lot
[ -z "$DOTNET_INSTALL_DIR" ] && export DOTNET_INSTALL_DIR=$DIR/../.dotnet_stage0/$(uname)
[ -d $DOTNET_INSTALL_DIR ] || mkdir -p $DOTNET_INSTALL_DIR
# Ensure the latest stage0 is installed
export CHANNEL=$RELEASE_SUFFIX
$DIR/obtain/install.sh --channel preview --verbose
# Put stage 0 on the PATH (for this shell only)
PATH="$DOTNET_INSTALL_DIR:$PATH"
# Increases the file descriptors limit for this bash. It prevents an issue we were hitting during restore
FILE_DESCRIPTOR_LIMIT=$( ulimit -n )
if [ $FILE_DESCRIPTOR_LIMIT -lt 1024 ]
then
echo "Increasing file description limit to 1024"
ulimit -n 1024
fi
# Restore the build scripts
echo "Restoring Build Script projects..."
(
cd $DIR
dotnet restore --infer-runtimes
)
# Build the builder
echo "Compiling Build Scripts..."
dotnet publish "$DIR/dotnet-cli-build" -o "$DIR/dotnet-cli-build/bin" --framework netstandardapp1.5
export PATH="$OLDPATH"
# Run the builder
echo "Invoking Build Scripts..."
echo "Configuration: $CONFIGURATION"
if [ -f "$DIR/dotnet-cli-build/bin/dotnet-cli-build" ]; then
$DIR/dotnet-cli-build/bin/dotnet-cli-build ${targets[@]}
exit $?
else
# We're on an older CLI. This is temporary while Ubuntu and CentOS VSO builds are stalled.
$DIR/dotnet-cli-build/bin/Debug/dnxcore50/dotnet-cli-build "${targets[@]}"
exit $?
fi
|
anurse/Cli
|
scripts/run-build.sh
|
Shell
|
apache-2.0
| 4,528 |
#!/bin/bash
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There could be a flag here for the provider to create the instance on
# but for now this is specialized for Google.
SCRIPT_DIR=$(dirname $0)
PYTHONPATH=$SCRIPT_DIR/../pylib python $SCRIPT_DIR/create_google_dev_vm.py "$@"
|
tgracchus/spinnaker
|
dev/create_google_dev_vm.sh
|
Shell
|
apache-2.0
| 834 |
#!/bin/bash
DOCKERID=`cat .dockerid`
echo "Running docker image $DOCKERID"
docker run -u $UID -v $PWD:/opt/website -p 4567:4567 -it $DOCKERID $@
|
thatdocslady/ovirt-site
|
docker-run.sh
|
Shell
|
mit
| 148 |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Warn in case of spelling errors.
# Note: Will exit successfully regardless of spelling errors.
export LC_ALL=C
if ! command -v codespell > /dev/null; then
echo "Skipping spell check linting since codespell is not installed."
exit 0
fi
IGNORE_WORDS_FILE=test/lint/lint-spelling.ignore-words.txt
if ! codespell --check-filenames --disable-colors --quiet-level=7 --ignore-words=${IGNORE_WORDS_FILE} $(git ls-files -- ":(exclude)build-aux/m4/" ":(exclude)contrib/seeds/*.txt" ":(exclude)depends/" ":(exclude)doc/release-notes/" ":(exclude)src/leveldb/" ":(exclude)src/qt/locale/" ":(exclude)src/secp256k1/" ":(exclude)src/univalue/"); then
echo "^ Warning: codespell identified likely spelling errors. Any false positives? Add them to the list of ignored words in ${IGNORE_WORDS_FILE}"
fi
|
FeatherCoin/Feathercoin
|
test/lint/lint-spelling.sh
|
Shell
|
mit
| 1,012 |
# Copyright: 2017 Masatake YAMATO
# License: GPL-2
CTAGS=$1
${CTAGS} --quiet --options=NONE \
--langdef=TEST'{base=C}' \
--kinddef-TEST=t,test,tests \
--regex-TEST=@./list.regex \
-o - \
input.c
|
universal-ctags/ctags
|
Tmain/regex-patterns-from-file.d/run.sh
|
Shell
|
gpl-2.0
| 212 |
#!/bin/bash
# This script is a part of RPC & TI-RPC Test Suite
# (c) 2007 BULL S.A.S.
# Please refer to RPC & TI-RPC Test Suite documentation.
# More details at http://nfsv4.bullopensource.org/doc/rpc_testsuite.php
# This scripts launch everything needed to test RPC & TI-RPC
# Never try to launch alone, run "script_run.sh" instead
# Note : this script could be in more than one copy depending on what
# tests series you want to run
# By C. LACABANNE - [email protected]
# creation : 2007-06-18 revision : 2007-06-19
# **********************
# *** INITIALISATION ***
# **********************
# simple tests suite identification
TESTSUITENAME="TIRPC bottomlevel mt domain"
TESTSUITEVERS="0.1"
TESTSUITEAUTH="Cyril LACABANNE"
TESTSUITEDATE="2007-06-18"
TESTSUITECOMM=""
TESTSERVER_1_PATH="tirpc_svc_6"
TESTSERVER_1_BIN="tirpc_svc_6.bin"
TESTSERVER_1=$SERVERTSTPACKDIR/$TESTSERVER_1_PATH/$TESTSERVER_1_BIN
export TESTSERVER_1_PATH
export TESTSERVER_1_BIN
export TESTSERVER_1
# check if tests run locally or not
# if not, logs directory will be change to remote directory
if [ "$LOCALIP" != "$CLIENTIP" ]
then
LOGDIR=/tmp/$LOGDIR
if [ $VERBOSE -eq 1 ]
then
echo " - log dir changes to client log dir : "$LOGDIR # debug !
fi
fi
# *****************
# *** PROCESSUS ***
# *****************
echo "*** Starting Tests Suite : "$TESTSUITENAME" (v "$TESTSUITEVERS") ***"
#-- start TIRPC MT Server for that following tests series
$REMOTESHELL $SERVERUSER@$SERVERIP "$TESTSERVER_1 $PROGNUMBASE $NBTHREADPROCESS"&
#-- start another instance of TIRPC MT server for simple API call type test
$REMOTESHELL $SERVERUSER@$SERVERIP "$TESTSERVER_1 $PROGNUMNOSVC $NBTHREADPROCESS"&
# wait for server creation and initialization
sleep $SERVERTIMEOUT
### SCRIPT LIST HERE !!! ###
./$SCRIPTSDIR/tirpc_bottomlevel_clnt_call.mt.sh
#-- Cleanup
$REMOTESHELL $SERVERUSER@$SERVERIP "killall -9 "$TESTSERVER_1_BIN
#-- Unreg all procedure
for ((a=PROGNUMNOSVC; a < `expr $PROGNUMNOSVC + $NBTHREADPROCESS` ; a++))
do
$REMOTESHELL $SERVERUSER@$SERVERIP "$SERVERTSTPACKDIR/cleaner.bin $a"
done
for ((a=PROGNUMBASE; a < `expr $PROGNUMBASE + $NBTHREADPROCESS` ; a++))
do
$REMOTESHELL $SERVERUSER@$SERVERIP "$SERVERTSTPACKDIR/cleaner.bin $a"
done
# ***************
# *** RESULTS ***
# ***************
|
anthony-kolesov/arc_ltp
|
testcases/network/rpc/rpc-tirpc-full-test-suite/tirpc_bottomlevel_mt_lib.sh
|
Shell
|
gpl-2.0
| 2,300 |
#!/bin/sh
# Copyright (C) 2009-2013 OpenWrt.org
. /lib/functions/leds.sh
. /lib/ar71xx.sh
get_status_led() {
case $(ar71xx_board_name) in
alfa-nx)
status_led="alfa:green:led_8"
;;
all0305)
status_led="eap7660d:green:ds4"
;;
ap132)
status_led="ap132:green:status"
;;
ap136-010|\
ap136-020)
status_led="ap136:green:status"
;;
ap135-020)
status_led="ap135:green:status"
;;
ap81)
status_led="ap81:green:status"
;;
ap83)
status_led="ap83:green:power"
;;
ap96)
status_led="ap96:green:led2"
;;
aw-nr580)
status_led="aw-nr580:green:ready"
;;
bullet-m | rocket-m | nano-m | nanostation-m | nanostation-m-xw | loco-m-xw)
status_led="ubnt:green:link4"
;;
bxu2000n-2-a1)
status_led="bhu:green:status"
;;
cap4200ag)
status_led="senao:green:pwr"
;;
cpe510)
status_led="tp-link:green:link4"
;;
db120)
status_led="db120:green:status"
;;
dgl-5500-a1 |\
dhp-1565-a1|\
dir-505-a1 |\
dir-600-a1 |\
dir-615-e1 |\
dir-615-e4)
status_led="d-link:green:power"
;;
dir-615-c1)
status_led="d-link:green:status"
;;
dir-825-b1)
status_led="d-link:orange:power"
;;
dir-825-c1 |\
dir-835-a1)
status_led="d-link:amber:power"
;;
dragino2)
status_led="dragino2:red:system"
;;
eap300v2)
status_led="engenius:blue:power"
;;
eap7660d)
status_led="eap7660d:green:ds4"
;;
el-mini | \
el-m150)
status_led="easylink:green:system"
;;
f9k1115v2)
status_led="belkin:blue:status"
;;
gl-inet)
status_led="gl-connect:green:lan"
;;
esr1750)
status_led="esr1750:amber:power"
;;
esr900)
status_led="engenius:amber:power"
;;
hiwifi-hc6361)
status_led="hiwifi:blue:system"
;;
hornet-ub)
status_led="alfa:blue:wps"
;;
ja76pf | \
ja76pf2)
status_led="jjplus:green:led1"
;;
ls-sr71)
status_led="ubnt:green:d22"
;;
mc-mac1200r)
status_led="mercury:green:system"
;;
mr600)
status_led="mr600:orange:power"
;;
mr600v2)
status_led="mr600:blue:power"
;;
mr900 | \
mr900v2)
status_led="mr900:blue:power"
;;
mynet-n600 | \
mynet-n750)
status_led="wd:blue:power"
;;
mynet-rext)
status_led="wd:blue:power"
;;
mzk-w04nu | \
mzk-w300nh)
status_led="planex:green:status"
;;
nbg460n_550n_550nh)
status_led="nbg460n:green:power"
;;
nbg6716)
status_led="zyxel:white:power"
;;
om2p | \
om2pv2 | \
om2p-hs | \
om2p-hsv2 | \
om2p-lc)
status_led="om2p:blue:power"
;;
om5p | \
om5p-an)
status_led="om5p:blue:power"
;;
pb44)
status_led="pb44:amber:jump1"
;;
rb-2011l|\
rb-2011uas|\
rb-2011uas-2hnd)
status_led="rb:green:usr"
;;
rb-411 | rb-411u | rb-433 | rb-433u | rb-450 | rb-450g | rb-493)
status_led="rb4xx:yellow:user"
;;
rb-750)
status_led="rb750:green:act"
;;
rb-911g-2hpnd|\
rb-911g-5hpnd|\
rb-912uag-2hpnd|\
rb-912uag-5hpnd)
status_led="rb:green:user"
;;
rb-951ui-2hnd)
status_led="rb:green:act"
;;
rb-sxt2n|\
rb-sxt5n)
status_led="rb:green:power"
;;
routerstation | routerstation-pro)
status_led="ubnt:green:rf"
;;
rw2458n)
status_led="rw2458n:green:d3"
;;
smart-300)
status_led="nc-link:green:system"
;;
oolite)
status_led="oolite:red:system"
;;
qihoo-c301)
status_led="qihoo:green:status"
;;
tew-632brp)
status_led="tew-632brp:green:status"
;;
tew-673gru)
status_led="trendnet:blue:wps"
;;
tew-712br|\
tew-732br)
status_led="trendnet:green:power"
;;
tl-mr3020)
status_led="tp-link:green:wps"
;;
tl-wa750re)
status_led="tp-link:orange:re"
;;
tl-wa850re)
status_led="tp-link:blue:re"
;;
tl-wa860re)
status_led="tp-link:green:power"
;;
tl-mr3220 | \
tl-mr3220-v2 | \
tl-mr3420 | \
tl-mr3420-v2 | \
tl-wa701nd-v2 | \
tl-wa801nd-v2 | \
tl-wa901nd | \
tl-wa901nd-v2 | \
tl-wa901nd-v3 | \
tl-wdr3500 | \
tl-wr1041n-v2 | \
tl-wr1043nd | \
tl-wr1043nd-v2 | \
tl-wr741nd | \
tl-wr741nd-v4 | \
tl-wr841n-v1 | \
tl-wr841n-v7 | \
tl-wr841n-v8 | \
tl-wa830re-v2 | \
tl-wr842n-v2 | \
tl-wr941nd | \
tl-wr941nd-v5)
status_led="tp-link:green:system"
;;
archer-c5 | \
archer-c7 | \
tl-wdr4900-v2 | \
tl-mr10u | \
tl-mr12u | \
tl-mr13u | \
tl-wdr4300 | \
tl-wr703n | \
tl-wr710n | \
tl-wr720n-v3)
status_led="tp-link:blue:system"
;;
tl-wr841n-v9)
status_led="tp-link:green:qss"
;;
tl-wr2543n)
status_led="tp-link:green:wps"
;;
tube2h)
status_led="alfa:green:signal4"
;;
unifi)
status_led="ubnt:green:dome"
;;
uap-pro)
status_led="ubnt:white:dome"
;;
unifi-outdoor-plus)
status_led="ubnt:white:front"
;;
airgateway)
status_led="ubnt:white:status"
;;
whr-g301n | \
whr-hp-g300n | \
whr-hp-gn | \
wzr-hp-g300nh)
status_led="buffalo:green:router"
;;
wlae-ag300n)
status_led="buffalo:green:status"
;;
wzr-hp-ag300h | \
wzr-hp-g300nh2)
status_led="buffalo:red:diag"
;;
r6100 | \
wndap360 | \
wndr3700 | \
wndr3700v4 | \
wndr4300 | \
wnr2000 | \
wnr2200 |\
wnr612-v2 |\
wnr1000-v2)
status_led="netgear:green:power"
;;
wp543)
status_led="wp543:green:diag"
;;
wpj558)
status_led="wpj558:green:sig3"
;;
wrt400n)
status_led="wrt400n:blue:wps"
;;
wrt160nl)
status_led="wrt160nl:blue:wps"
;;
zcn-1523h-2 | zcn-1523h-5)
status_led="zcn-1523h:amber:init"
;;
wlr8100)
status_led="sitecom:amber:status"
;;
esac
}
set_state() {
get_status_led
case "$1" in
preinit)
status_led_blink_preinit
;;
failsafe)
status_led_blink_failsafe
;;
preinit_regular)
status_led_blink_preinit_regular
;;
done)
status_led_on
case $(ar71xx_board_name) in
qihoo-c301)
local n=$(fw_printenv activeregion | cut -d = -f 2)
fw_setenv "image${n}trynum" 0
;;
esac
;;
esac
}
|
claudyus/openwrt
|
target/linux/ar71xx/base-files/etc/diag.sh
|
Shell
|
gpl-2.0
| 5,649 |
#!/usr/bin/env bash
# This file is part of RetroPie.
#
# (c) Copyright 2012-2015 Florian Müller ([email protected])
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="lr-nxengine"
rp_module_desc="Cave Story engine clone - NxEngine port for libretro"
rp_module_menus="2+"
function sources_lr-nxengine() {
gitPullOrClone "$md_build" https://github.com/libretro/nxengine-libretro.git
}
function build_lr-nxengine() {
make clean
make
md_ret_require="$md_build/nxengine_libretro.so"
}
function install_lr-nxengine() {
md_ret_files=(
'nxengine_libretro.so'
)
}
function configure_lr-nxengine() {
# remove old install folder
rm -rf "$rootdir/$md_type/cavestory"
mkRomDir "ports"
ensureSystemretroconfig "cavestory"
local msg="You need the original Cave Story game files to use $md_id. Please unpack the game to $romdir/ports/CaveStory so you have the file $romdir/ports/CaveStory/Doukutsu.exe present."
cat > "$romdir/ports/Cave Story.sh" << _EOF_
#!/bin/bash
if [[ -f "$romdir/ports/CaveStory/Doukutsu.exe" ]]; then
$rootdir/supplementary/runcommand/runcommand.sh 0 "$emudir/retroarch/bin/retroarch -L $md_inst/nxengine_libretro.so --config $configdir/cavestory/retroarch.cfg $romdir/ports/CaveStory/Doukutsu.exe" "$md_id"
else
dialog --msgbox "$msg" 22 76
fi
_EOF_
chmod +x "$romdir/ports/Cave Story.sh"
setESSystem 'Ports' 'ports' '~/RetroPie/roms/ports' '.sh .SH' '%ROM%' 'pc' 'ports'
__INFMSGS+=("$msg")
}
|
joshheil/RetroPie-Setup
|
scriptmodules/libretrocores/lr-nxengine.sh
|
Shell
|
gpl-3.0
| 1,629 |
#!/usr/bin/env bash
set +exu
function get_base_path(){
cd $(dirname $0) && cd .. && pwd
}
BASE_DIR="$(get_base_path)"
BACKEND_DIR="${BASE_DIR}/backend"
BACKEND_REPO='https://github.com/OperationCode/operationcode_backend.git'
echo "Cloning backend repo into $BACKEND_DIR"
git clone $BACKEND_REPO $BACKEND_DIR
echo "Starting backend"
cd $BACKEND_DIR && make build && make db_create && make db_migrate && make run
|
hollomancer/operationcode_frontend
|
bin/run_backend.sh
|
Shell
|
mit
| 419 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set up the following environment variables to build a Lucy project with
# an uninstalled Lucy source tree. Useful for development.
#
# - LIBRARY_PATH
# - LD_LIBRARY_PATH
# - CLOWNFISH_INCLUDE
# - PERL5LIB
#
# Usage: source setup_env.sh [path to lucy]
contains() {
string="$1"
substring="$2"
test "${string#*$substring}" != "$string"
}
add_to_path() {
path="$1"
dir="$2"
if [ -z "$path" ]; then
echo "$dir"
elif ! contains ":$path:" ":$dir:"; then
echo "$dir:$path"
else
echo "$path"
fi
}
if [ -n "$1" ]; then
base_dir="$1"
elif [ -n "$BASH_SOURCE" ]; then
# Only works with bash.
script_dir=`dirname "$BASH_SOURCE"`
base_dir=`cd "$script_dir/../.." && pwd`
else
echo "Usage: source setup_env.sh path_to_lucy_source"
return 1 2>/dev/null || exit 1
fi
if [ ! -d "$base_dir/c" ] || [ ! -d "$base_dir/perl" ]
then
echo "Doesn't look like a Lucy source directory: $base_dir"
return 1 2>/dev/null || exit 1
fi
export LIBRARY_PATH=`add_to_path "$LIBRARY_PATH" "$base_dir/c"`
export CLOWNFISH_INCLUDE=`add_to_path "$CLOWNFISH_INCLUDE" "$base_dir/core"`
export PERL5LIB=`add_to_path "$PERL5LIB" "$base_dir/perl/blib/arch"`
export PERL5LIB=`add_to_path "$PERL5LIB" "$base_dir/perl/blib/lib"`
case `uname` in
MINGW*|CYGWIN*)
export PATH=`add_to_path "$PATH" "$base_dir/c"`
;;
Darwin*)
;;
*)
export LD_LIBRARY_PATH=`add_to_path "$LD_LIBRARY_PATH" "$base_dir/c"`
esac
|
rbevers/lucy
|
devel/bin/setup_env.sh
|
Shell
|
apache-2.0
| 2,286 |
#!/bin/sh
## https://github.com/auth0/react-native-lock/blob/master/bin/prepare.sh
echo "Preparing to link react-native-firestack for iOS"
echo "Checking CocoaPods..."
has_cocoapods=`which pod >/dev/null 2>&1`
if [ -z "$has_cocoapods" ]
then
echo "CocoaPods already installed"
else
echo "Installing CocoaPods..."
gem install cocoapods
fi
|
tegument/react-native-firestack
|
bin/prepare.sh
|
Shell
|
mit
| 346 |
#!/bin/sh
php -d asp_tags=On /usr/local/bin/phpunit --verbose SmartyTests.php > test_results.txt
|
FranTorletti/inmobiliaria
|
vendor/smarty/smarty/development/PHPunit/phpunit-tests.sh
|
Shell
|
apache-2.0
| 97 |
#!/bin/bash
# Written by Matthew Garcia
# Runs a groovy script that creates a text file containing the information
# that will be parsed, as well as a text file containing the hostnames of
# the machines used in the test. It uses awk to get the number of hosts
# in the script. Afterwards it creates a .pg file using the hosts file
# and the text file containing the information to be graphed. After
# creating the .pg file it is run and output is directed to a .png file.
# The .png file contains the data created from the groovy script in a graph.
DATE=`date +%C%y_%m_%d_%H_%M_%S`
PG_FILE=${DATE}graphMultiHostTest.pg
IMG_FILE=graphMultiHostTest.png
DATA_FILE=${DATE}graphMultiHostTest.dat
HOSTS_FILE=${DATE}graphMultiHostTest-hosts.dat
if [ "$1" = "" ]
then
echo "Usage: $0 Takes raw data files as arguments."
exit 1
fi
`dirname $0`/graphMultiHostTest.scala $@ > $DATA_FILE
cat $1 | cut -d' ' -f1 | sort | uniq > $HOSTS_FILE
intI=`awk 'END{print NR}' $HOSTS_FILE`
# Creates the .pg file
cat > $PG_FILE <<End-of-Message
#!`which gnuplot`
reset
set terminal png
set xlabel "Test Number"
set ylabel "Transactions/Second"
set yrange [0:8000]
set title "Average Transactions per second"
set key reverse Left outside
set grid
set style data linespoints
End-of-Message
# Loops through and appends to the .pg file for each host.
for ((i = 1; $i <= $intI; i++)) {
hostName=$(awk -v i=$i 'NR==i {print $1}' $HOSTS_FILE)
usingLine="using 1:$(($i + 1)) title \"$hostName\""
if [ $i == 1 ]
then
# If the current host is the first host, it appends a
# plot String to the .pg file.
echo -n "plot \"$DATA_FILE\" $usingLine" >> $PG_FILE
else
# If neither of the above cases are true the file will
# append to the file the "" string with the ending
# having a , and \ (meaning that the file has more to
# plot.
echo -n "\"\" $usingLine" >> $PG_FILE
fi
if [ $i != $intI ]
then
# If the current host is NOT the last host, it appends
# a string starting with "" (which means using the same
# data stated already) and ends without a \
echo ", \\" >> $PG_FILE
else
# This is just to be nice formatting since we don't
# put out any newlines above
echo "" >> $PG_FILE
fi
}
chmod +x $PG_FILE
./$PG_FILE > $IMG_FILE
rm -f $DATA_FILE $HOSTS_FILE $PG_FILE
|
birendraa/voldemort
|
contrib/ec2-testing/examples/remotetest/graphMultiHostTest.sh
|
Shell
|
apache-2.0
| 2,312 |
#!/bin/bash -v
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
PREFIX="Mesos Provisioner: "
set -e
echo "${PREFIX} Installing pre-reqs..."
# For Mesos
apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
CODENAME=$(lsb_release -cs)
echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | sudo tee /etc/apt/sources.list.d/mesosphere.list
apt-get -y update
apt-get -y install libcurl3
apt-get -y install zookeeperd
apt-get -y install aria2
apt-get -y install ssh
apt-get -y install rsync
apt-get -y install mesos=0.24.1-0.2.35.ubuntu1404
|
apache/incubator-myriad
|
vagrant/virtualbox/mesos/provisioning/install_mesos.sh
|
Shell
|
apache-2.0
| 1,353 |
# Validationg ip address
is_ip_valid() {
userip=${1-$ip}
check_nat=$(grep -H "^NAT='$userip'" $VESTA/data/ips/* 2>/dev/null)
if [ ! -e "$VESTA/data/ips/$userip" ] && [ -z "$check_nat" ] ; then
echo "Error: IP $userip not exist"
log_event "$E_NOTEXIST" "$EVENT"
exit $E_NOTEXIST
fi
}
# Check if ip availabile for user
is_ip_avalable() {
userip=${1-$ip}
if [ -e "$VESTA/data/ips/$userip" ]; then
ip_data=$(cat $VESTA/data/ips/$userip)
else
nated_ip=$(grep -H "^NAT='$userip'" $VESTA/data/ips/* 2>/dev/null)
nated_ip=$(echo "$nated_ip" | cut -f 1 -d : | cut -f 7 -d /)
ip_data=$(cat $VESTA/data/ips/$nated_ip)
fi
owner=$(echo "$ip_data"|grep OWNER= | cut -f 2 -d \')
status=$(echo "$ip_data"|grep STATUS= | cut -f 2 -d \')
shared=no
if [ 'admin' = "$owner" ] && [ "$status" = 'shared' ]; then
shared='yes'
fi
if [ "$owner" != "$user" ] && [ "$shared" != 'yes' ]; then
echo "Error: User $user don't have permission to use $userip"
log_event "$E_FORBIDEN" "$EVENT"
exit $E_FORBIDEN
fi
}
# Check ip ownership
is_ip_owner() {
# Parsing ip
owner=$(grep 'OWNER=' $VESTA/data/ips/$IP|cut -f 2 -d \')
if [ "$owner" != "$user" ]; then
echo "Error: IP $IP not owned"
log_event "$E_FORBIDEN" "$EVENT"
exit $E_FORBIDEN
fi
}
# Check if ip address is free
is_ip_free() {
if [ -e "$VESTA/data/ips/$ip" ]; then
echo "Error: IP exist"
log_event "$E_EXISTS" "$EVENT"
exit $E_EXISTS
fi
}
# Get full interface name
get_ip_iface() {
i=$(/sbin/ip addr | grep -w $interface |\
awk '{print $NF}' | tail -n 1 | cut -f 2 -d :)
if [ "$i" = "$interface" ]; then
n=0
else
n=$((i + 1))
fi
echo "$interface:$n"
}
# Check ip address speciefic value
is_ip_key_empty() {
key="$1"
string=$(cat $VESTA/data/ips/$ip)
eval $string
eval value="$key"
if [ ! -z "$value" ] && [ "$value" != '0' ]; then
echo "Error: $key is not empty = $value"
log_event "$E_EXISTS" "$EVENT"
exit $E_EXISTS
fi
}
# Update ip address value
update_ip_value() {
key="$1"
value="$2"
conf="$VESTA/data/ips/$ip"
str=$(cat $conf)
eval $str
c_key=$(echo "${key//$/}")
eval old="${key}"
old=$(echo "$old" | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g' -e 's/\//\\\//g')
new=$(echo "$value" | sed -e 's/\\/\\\\/g' -e 's/&/\\&/g' -e 's/\//\\\//g')
sed -i "$str_number s/$c_key='${old//\*/\\*}'/$c_key='${new//\*/\\*}'/g"\
$conf
}
# Get ip name
get_ip_name() {
grep "NAME=" $VESTA/data/ips/$ip | cut -f 2 -d \'
}
# Increase ip value
increase_ip_value() {
sip=${1-ip}
USER=$user
web_key='U_WEB_DOMAINS'
usr_key='U_SYS_USERS'
current_web=$(grep "$web_key=" $VESTA/data/ips/$sip |cut -f 2 -d \')
current_usr=$(grep "$usr_key=" $VESTA/data/ips/$sip |cut -f 2 -d \')
if [ -z "$current_web" ]; then
echo "Error: Parsing error"
log_event "$E_PARSING" "$EVENT"
exit $E_PARSING
fi
new_web=$((current_web + 1))
if [ -z "$current_usr" ]; then
new_usr="$USER"
else
check_usr=$(echo -e "${current_usr//,/\n}" |grep -w $USER)
if [ -z "$check_usr" ]; then
new_usr="$current_usr,$USER"
else
new_usr="$current_usr"
fi
fi
sed -i "s/$web_key='$current_web'/$web_key='$new_web'/g" \
$VESTA/data/ips/$ip
sed -i "s/$usr_key='$current_usr'/$usr_key='$new_usr'/g" \
$VESTA/data/ips/$ip
}
# Decrease ip value
decrease_ip_value() {
sip=${1-ip}
USER=$user
web_key='U_WEB_DOMAINS'
usr_key='U_SYS_USERS'
current_web=$(grep "$web_key=" $VESTA/data/ips/$sip |cut -f 2 -d \')
current_usr=$(grep "$usr_key=" $VESTA/data/ips/$sip |cut -f 2 -d \')
if [ -z "$current_web" ]; then
echo "Error: Parsing error"
log_event "$E_PARSING" "$EVENT"
exit $E_PARSING
fi
new_web=$((current_web - 1))
check_ip=$(grep $sip $USER_DATA/web.conf |wc -l)
if [ "$check_ip" -lt 2 ]; then
new_usr=$(echo "$current_usr" |\
sed "s/,/\n/g"|\
sed "s/^$user$//g"|\
sed "/^$/d"|\
sed ':a;N;$!ba;s/\n/,/g')
else
new_usr="$current_usr"
fi
sed -i "s/$web_key='$current_web'/$web_key='$new_web'/g" \
$VESTA/data/ips/$sip
sed -i "s/$usr_key='$current_usr'/$usr_key='$new_usr'/g" \
$VESTA/data/ips/$sip
}
# Get ip address value
get_ip_value() {
key="$1"
string=$( cat $VESTA/data/ips/$ip )
eval $string
eval value="$key"
echo "$value"
}
# Get real ip address
get_real_ip() {
if [ -e "$VESTA/data/ips/$1" ]; then
echo $1
else
nated_ip=$(grep -H "^NAT='$1'" $VESTA/data/ips/*)
echo "$nated_ip" | cut -f 1 -d : | cut -f 7 -d /
fi
}
# Get user ip
get_user_ip(){
ip=$(grep -H "OWNER='$1'" $VESTA/data/ips/* 2>/dev/null | head -n1)
ip=$(echo "$ip" | cut -f 7 -d / | cut -f 1 -d :)
if [ -z "$ip" ]; then
admin_ips=$(grep -H "OWNER='admin'" $VESTA/data/ips/* 2>/dev/null)
admin_ips=$(echo "$admin_ips" | cut -f 7 -d / | cut -f 1 -d :)
for admin_ip in $admin_ips; do
if [ -z "$ip" ]; then
shared=$(grep "STATUS='shared'" $VESTA/data/ips/$admin_ip)
if [ ! -z "$shared" ]; then
ip=$admin_ip
fi
fi
done
fi
echo "$ip"
}
# Convert CIDR to netmask
convert_cidr() {
set -- $(( 5 - ($1 / 8) )) 255 255 255 255 \
$(((255 << (8 - ($1 % 8))) & 255 )) 0 0 0
if [[ $1 -gt 1 ]]; then
shift $1
else
shift
fi
echo ${1-0}.${2-0}.${3-0}.${4-0}
}
# Convert netmask to CIDR
convert_netmask() {
nbits=0
IFS=.
for dec in $1 ; do
case $dec in
255) let nbits+=8;;
254) let nbits+=7;;
252) let nbits+=6;;
248) let nbits+=5;;
240) let nbits+=4;;
224) let nbits+=3;;
192) let nbits+=2;;
128) let nbits+=1;;
0);;
esac
done
echo "$nbits"
}
# Calculate broadcast address
get_broadcast() {
OLD_IFS=$IFS
IFS=.
typeset -a I=($1)
typeset -a N=($2)
IFS=$OLD_IFS
echo "$((${I[0]} |\
(255 ^ ${N[0]}))).$((${I[1]} |\
(255 ^ ${N[1]}))).$((${I[2]} |\
(255 ^ ${N[2]}))).$((${I[3]} |\
(255 ^ ${N[3]})))"
}
|
rohdoor/vestacp
|
func/ip.sh
|
Shell
|
gpl-3.0
| 6,559 |
#!/bin/sh
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -e
if [ "x$TEST" = "x" ] ; then
TEST=false
fi
cd `dirname $0`/../..
mako_renderer=tools/buildgen/mako_renderer.py
if [ "x$TEST" != "x" ] ; then
tools/buildgen/build-cleaner.py build.json
fi
. tools/buildgen/generate_build_additions.sh
global_plugins=`find ./tools/buildgen/plugins -name '*.py' |
sort | grep -v __init__ | awk ' { printf "-p %s ", $0 } '`
for dir in . ; do
local_plugins=`find $dir/templates -name '*.py' |
sort | grep -v __init__ | awk ' { printf "-p %s ", $0 } '`
plugins="$global_plugins $local_plugins"
find -L $dir/templates -type f -and -name *.template | while read file ; do
out=${dir}/${file#$dir/templates/} # strip templates dir prefix
out=${out%.*} # strip template extension
echo "generating file: $out"
json_files="build.json $gen_build_files"
data=`for i in $json_files ; do echo $i ; done | awk ' { printf "-d %s ", $0 } '`
if [ "x$TEST" = "xtrue" ] ; then
actual_out=$out
out=`mktemp /tmp/gentXXXXXX`
fi
mkdir -p `dirname $out` # make sure dest directory exist
$mako_renderer $plugins $data -o $out $file
if [ "x$TEST" = "xtrue" ] ; then
diff -q $out $actual_out
rm $out
fi
done
done
rm $gen_build_files
|
zeliard/grpc
|
tools/buildgen/generate_projects.sh
|
Shell
|
bsd-3-clause
| 2,780 |
#!/bin/bash
# Copyright 2015-2016, EMC, Inc.
#
# Run this without options to create a virtual
# environment for a specific name, a default name, or git-branch
# based name.
#
# By default (no argument), the base environment name will be 'fit'
# If an argument is passed, the base name will be that value _except_
# If the argument is 'git', the base name will be the git-branch
#
# mkenv.sh <env_name>
#
PROG=${0}
# We need virtualenv from somewhere
virtualenv=`which virtualenv`
if [ ! -x "${virtualenv}" ]; then
echo "location of virtualenv is unknown"
exit 1
fi
# Set up env_name either from command line, else current git branch
if [ $# -eq 1 ]; then
if [ "$1" == "git" ]; then
env_name=`git rev-parse --abbrev-ref HEAD`
else
env_name=$1
fi
else
env_name='fit'
fi
# Normalize env_name, replace '/' with '_'
env_name=${env_name//\//_}
# Our virtual environments are found within <toplevelgit>/.venv
export WORKON_HOME=`pwd`/.venv
# mkvirtualenv, OK if its already there
${virtualenv} --clear .venv/${env_name}
# activate the virtual environment
source .venv/${env_name}/bin/activate
# Use locally sourced pip configuration
export PIP_CONFIG_FILE=`pwd`/pip.conf
# Update local-pip to latest
pip install -U pip
# Install all required packages
pip install -r requirements.txt
# Create local requirements (for example, pylint)
# pip install -r requirements_local.txt
# Generate a script that assists in switching environments
cat > myenv_${env_name} <<End-of-message
# *** Autgenerated file, do not commit to remote repository
export WORKON_HOME=${WORKON_HOME}
source .venv/${env_name}/bin/activate
End-of-message
echo ""
echo "${PROG}: complete, run the following to use '${env_name}' environment:"
echo
echo "source myenv_${env_name}"
exit 0
|
johren/RackHD
|
test/mkenv.sh
|
Shell
|
apache-2.0
| 1,787 |
# Test for automatic creation of dynafile directories
# note that we use the "test-spool" directory, because it is handled by diag.sh
# in any case, so we do not need to add any extra new test dir.
# added 2009-11-30 by Rgerhards
# This file is part of the rsyslog project, released under GPLv3
# uncomment for debugging support:
echo ===================================================================================
echo \[dircreate_dflt_dflt.sh\]: testing automatic directory creation for dynafiles - default
source $srcdir/diag.sh init
source $srcdir/diag.sh startup dircreate_dflt.conf
source $srcdir/diag.sh injectmsg 0 1 # a single message is sufficient
source $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
source $srcdir/diag.sh wait-shutdown
if [ ! -e test-logdir/rsyslog.out.log ]
then
echo "test-logdir or logfile not created!"
exit 1
fi
exit
source $srcdir/diag.sh exit
|
rangochan/rsyslog
|
tests/dircreate_dflt.sh
|
Shell
|
gpl-3.0
| 928 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Check that route PMTU values match expectations, and that initial device MTU
# values are assigned correctly
#
# Tests currently implemented:
#
# - pmtu_vti4_exception
# Set up vti tunnel on top of veth, with xfrm states and policies, in two
# namespaces with matching endpoints. Check that route exception is not
# created if link layer MTU is not exceeded, then exceed it and check that
# exception is created with the expected PMTU. The approach described
# below for IPv6 doesn't apply here, because, on IPv4, administrative MTU
# changes alone won't affect PMTU
#
# - pmtu_vti6_exception
# Set up vti6 tunnel on top of veth, with xfrm states and policies, in two
# namespaces with matching endpoints. Check that route exception is
# created by exceeding link layer MTU with ping to other endpoint. Then
# decrease and increase MTU of tunnel, checking that route exception PMTU
# changes accordingly
#
# - pmtu_vti4_default_mtu
# Set up vti4 tunnel on top of veth, in two namespaces with matching
# endpoints. Check that MTU assigned to vti interface is the MTU of the
# lower layer (veth) minus additional lower layer headers (zero, for veth)
# minus IPv4 header length
#
# - pmtu_vti6_default_mtu
# Same as above, for IPv6
#
# - pmtu_vti4_link_add_mtu
# Set up vti4 interface passing MTU value at link creation, check MTU is
# configured, and that link is not created with invalid MTU values
#
# - pmtu_vti6_link_add_mtu
# Same as above, for IPv6
#
# - pmtu_vti6_link_change_mtu
# Set up two dummy interfaces with different MTUs, create a vti6 tunnel
# and check that configured MTU is used on link creation and changes, and
# that MTU is properly calculated instead when MTU is not configured from
# userspace
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
tests="
pmtu_vti6_exception vti6: PMTU exceptions
pmtu_vti4_exception vti4: PMTU exceptions
pmtu_vti4_default_mtu vti4: default MTU assignment
pmtu_vti6_default_mtu vti6: default MTU assignment
pmtu_vti4_link_add_mtu vti4: MTU setting on link creation
pmtu_vti6_link_add_mtu vti6: MTU setting on link creation
pmtu_vti6_link_change_mtu vti6: MTU changes on link changes"
NS_A="ns-$(mktemp -u XXXXXX)"
NS_B="ns-$(mktemp -u XXXXXX)"
ns_a="ip netns exec ${NS_A}"
ns_b="ip netns exec ${NS_B}"
veth4_a_addr="192.168.1.1"
veth4_b_addr="192.168.1.2"
veth4_mask="24"
veth6_a_addr="fd00:1::a"
veth6_b_addr="fd00:1::b"
veth6_mask="64"
vti4_a_addr="192.168.2.1"
vti4_b_addr="192.168.2.2"
vti4_mask="24"
vti6_a_addr="fd00:2::a"
vti6_b_addr="fd00:2::b"
vti6_mask="64"
dummy6_0_addr="fc00:1000::0"
dummy6_1_addr="fc00:1001::0"
dummy6_mask="64"
cleanup_done=1
err_buf=
err() {
err_buf="${err_buf}${1}
"
}
err_flush() {
echo -n "${err_buf}"
err_buf=
}
setup_namespaces() {
ip netns add ${NS_A} || return 1
ip netns add ${NS_B}
}
setup_veth() {
${ns_a} ip link add veth_a type veth peer name veth_b || return 1
${ns_a} ip link set veth_b netns ${NS_B}
${ns_a} ip addr add ${veth4_a_addr}/${veth4_mask} dev veth_a
${ns_b} ip addr add ${veth4_b_addr}/${veth4_mask} dev veth_b
${ns_a} ip addr add ${veth6_a_addr}/${veth6_mask} dev veth_a
${ns_b} ip addr add ${veth6_b_addr}/${veth6_mask} dev veth_b
${ns_a} ip link set veth_a up
${ns_b} ip link set veth_b up
}
setup_vti() {
proto=${1}
veth_a_addr="${2}"
veth_b_addr="${3}"
vti_a_addr="${4}"
vti_b_addr="${5}"
vti_mask=${6}
[ ${proto} -eq 6 ] && vti_type="vti6" || vti_type="vti"
${ns_a} ip link add vti${proto}_a type ${vti_type} local ${veth_a_addr} remote ${veth_b_addr} key 10 || return 1
${ns_b} ip link add vti${proto}_b type ${vti_type} local ${veth_b_addr} remote ${veth_a_addr} key 10
${ns_a} ip addr add ${vti_a_addr}/${vti_mask} dev vti${proto}_a
${ns_b} ip addr add ${vti_b_addr}/${vti_mask} dev vti${proto}_b
${ns_a} ip link set vti${proto}_a up
${ns_b} ip link set vti${proto}_b up
sleep 1
}
setup_vti4() {
setup_vti 4 ${veth4_a_addr} ${veth4_b_addr} ${vti4_a_addr} ${vti4_b_addr} ${vti4_mask}
}
setup_vti6() {
setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${vti6_a_addr} ${vti6_b_addr} ${vti6_mask}
}
setup_xfrm() {
proto=${1}
veth_a_addr="${2}"
veth_b_addr="${3}"
${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
}
setup_xfrm4() {
setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr}
}
setup_xfrm6() {
setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
}
setup() {
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
cleanup_done=0
for arg do
eval setup_${arg} || { echo " ${arg} not supported"; return 1; }
done
}
cleanup() {
[ ${cleanup_done} -eq 1 ] && return
ip netns del ${NS_A} 2 > /dev/null
ip netns del ${NS_B} 2 > /dev/null
cleanup_done=1
}
mtu() {
ns_cmd="${1}"
dev="${2}"
mtu="${3}"
${ns_cmd} ip link set dev ${dev} mtu ${mtu}
}
mtu_parse() {
input="${1}"
next=0
for i in ${input}; do
[ ${next} -eq 1 ] && echo "${i}" && return
[ "${i}" = "mtu" ] && next=1
done
}
link_get() {
ns_cmd="${1}"
name="${2}"
${ns_cmd} ip link show dev "${name}"
}
link_get_mtu() {
ns_cmd="${1}"
name="${2}"
mtu_parse "$(link_get "${ns_cmd}" ${name})"
}
route_get_dst_exception() {
ns_cmd="${1}"
dst="${2}"
${ns_cmd} ip route get "${dst}"
}
route_get_dst_pmtu_from_exception() {
ns_cmd="${1}"
dst="${2}"
mtu_parse "$(route_get_dst_exception "${ns_cmd}" ${dst})"
}
test_pmtu_vti4_exception() {
setup namespaces veth vti4 xfrm4 || return 2
veth_mtu=1500
vti_mtu=$((veth_mtu - 20))
# SPI SN IV ICV pad length next header
esp_payload_rfc4106=$((vti_mtu - 4 - 4 - 8 - 16 - 1 - 1))
ping_payload=$((esp_payload_rfc4106 - 28))
mtu "${ns_a}" veth_a ${veth_mtu}
mtu "${ns_b}" veth_b ${veth_mtu}
mtu "${ns_a}" vti4_a ${vti_mtu}
mtu "${ns_b}" vti4_b ${vti_mtu}
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
if [ "${pmtu}" != "" ]; then
err " unexpected exception created with PMTU ${pmtu} for IP payload length ${esp_payload_rfc4106}"
return 1
fi
# Now exceed link layer MTU by one byte, check that exception is created
${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
if [ "${pmtu}" = "" ]; then
err " exception not created for IP payload length $((esp_payload_rfc4106 + 1))"
return 1
fi
# ...with the right PMTU value
if [ ${pmtu} -ne ${esp_payload_rfc4106} ]; then
err " wrong PMTU ${pmtu} in exception, expected: ${esp_payload_rfc4106}"
return 1
fi
}
test_pmtu_vti6_exception() {
setup namespaces veth vti6 xfrm6 || return 2
fail=0
# Create route exception by exceeding link layer MTU
mtu "${ns_a}" veth_a 4000
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
# Check that exception was created
if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
err " tunnel exceeding link layer MTU didn't create route exception"
return 1
fi
# Decrease tunnel MTU, check for PMTU decrease in route exception
mtu "${ns_a}" vti6_a 3000
if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 3000 ]; then
err " decreasing tunnel MTU didn't decrease route exception PMTU"
fail=1
fi
# Increase tunnel MTU, check for PMTU increase in route exception
mtu "${ns_a}" vti6_a 9000
if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 9000 ]; then
err " increasing tunnel MTU didn't increase route exception PMTU"
fail=1
fi
return ${fail}
}
test_pmtu_vti4_default_mtu() {
setup namespaces veth vti4 || return 2
# Check that MTU of vti device is MTU of veth minus IPv4 header length
veth_mtu="$(link_get_mtu "${ns_a}" veth_a)"
vti4_mtu="$(link_get_mtu "${ns_a}" vti4_a)"
if [ $((veth_mtu - vti4_mtu)) -ne 20 ]; then
err " vti MTU ${vti4_mtu} is not veth MTU ${veth_mtu} minus IPv4 header length"
return 1
fi
}
test_pmtu_vti6_default_mtu() {
setup namespaces veth vti6 || return 2
# Check that MTU of vti device is MTU of veth minus IPv6 header length
veth_mtu="$(link_get_mtu "${ns_a}" veth_a)"
vti6_mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ $((veth_mtu - vti6_mtu)) -ne 40 ]; then
err " vti MTU ${vti6_mtu} is not veth MTU ${veth_mtu} minus IPv6 header length"
return 1
fi
}
test_pmtu_vti4_link_add_mtu() {
setup namespaces || return 2
${ns_a} ip link add vti4_a type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
[ $? -ne 0 ] && err " vti not supported" && return 2
${ns_a} ip link del vti4_a
fail=0
min=68
max=$((65528 - 20))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
# This can fail, or MTU can be adjusted to a proper value
[ $? -ne 0 ] && continue
mtu="$(link_get_mtu "${ns_a}" vti4_a)"
if [ ${mtu} -lt ${min} -o ${mtu} -gt ${max} ]; then
err " vti tunnel created with invalid MTU ${mtu}"
fail=1
fi
${ns_a} ip link del vti4_a
done
# Now check valid values
for v in ${min} 1300 ${max}; do
${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
mtu="$(link_get_mtu "${ns_a}" vti4_a)"
${ns_a} ip link del vti4_a
if [ "${mtu}" != "${v}" ]; then
err " vti MTU ${mtu} doesn't match configured value ${v}"
fail=1
fi
done
return ${fail}
}
test_pmtu_vti6_link_add_mtu() {
setup namespaces || return 2
${ns_a} ip link add vti6_a type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
[ $? -ne 0 ] && err " vti6 not supported" && return 2
${ns_a} ip link del vti6_a
fail=0
min=68 # vti6 can carry IPv4 packets too
max=$((65535 - 40))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10 2>/dev/null
# This can fail, or MTU can be adjusted to a proper value
[ $? -ne 0 ] && continue
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -lt ${min} -o ${mtu} -gt ${max} ]; then
err " vti6 tunnel created with invalid MTU ${v}"
fail=1
fi
${ns_a} ip link del vti6_a
done
# Now check valid values
for v in 68 1280 1300 $((65535 - 40)); do
${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
${ns_a} ip link del vti6_a
if [ "${mtu}" != "${v}" ]; then
err " vti6 MTU ${mtu} doesn't match configured value ${v}"
fail=1
fi
done
return ${fail}
}
test_pmtu_vti6_link_change_mtu() {
setup namespaces || return 2
${ns_a} ip link add dummy0 mtu 1500 type dummy
[ $? -ne 0 ] && err " dummy not supported" && return 2
${ns_a} ip link add dummy1 mtu 3000 type dummy
${ns_a} ip link set dummy0 up
${ns_a} ip link set dummy1 up
${ns_a} ip addr add ${dummy6_0_addr}/${dummy6_mask} dev dummy0
${ns_a} ip addr add ${dummy6_1_addr}/${dummy6_mask} dev dummy1
fail=0
# Create vti6 interface bound to device, passing MTU, check it
${ns_a} ip link add vti6_a mtu 1300 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne 1300 ]; then
err " vti6 MTU ${mtu} doesn't match configured value 1300"
fail=1
fi
# Move to another device with different MTU, without passing MTU, check
# MTU is adjusted
${ns_a} ip link set vti6_a type vti6 remote ${dummy6_1_addr} local ${dummy6_1_addr}
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne $((3000 - 40)) ]; then
err " vti MTU ${mtu} is not dummy MTU 3000 minus IPv6 header length"
fail=1
fi
# Move it back, passing MTU, check MTU is not overridden
${ns_a} ip link set vti6_a mtu 1280 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne 1280 ]; then
err " vti6 MTU ${mtu} doesn't match configured value 1280"
fail=1
fi
return ${fail}
}
trap cleanup EXIT
exitcode=0
desc=0
IFS="
"
for t in ${tests}; do
[ $desc -eq 0 ] && name="${t}" && desc=1 && continue || desc=0
(
unset IFS
eval test_${name}
ret=$?
cleanup
if [ $ret -eq 0 ]; then
printf "TEST: %-60s [ OK ]\n" "${t}"
elif [ $ret -eq 1 ]; then
printf "TEST: %-60s [FAIL]\n" "${t}"
err_flush
exit 1
elif [ $ret -eq 2 ]; then
printf "TEST: %-60s [SKIP]\n" "${t}"
err_flush
fi
)
[ $? -ne 0 ] && exitcode=1
done
exit ${exitcode}
|
kronat/linux
|
tools/testing/selftests/net/pmtu.sh
|
Shell
|
gpl-2.0
| 13,861 |
#!/bin/sh
npm owner add benirose $1
npm owner add kylestetz $1
npm owner add boutell $1
npm owner add colpanik $1
npm owner add jsumnersmith $1
npm owner add alexgilbert $1
npm owner add gsf $1
npm owner add stuartromanek $1
npm owner add livhaas $1
npm owner add mcoppola $1
# Have not created npm accounts
# npm owner add geoffdimasi $1
# npm owner add tquirino $1
# npm owner add ahoefinger $1
|
punkave/punkave-jobs
|
scripts/ownerone.sh
|
Shell
|
mit
| 398 |
#!/bin/sh
test_description='Test reflog display routines'
. ./test-lib.sh
test_expect_success 'setup' '
echo content >file &&
git add file &&
test_tick &&
git commit -m one
'
cat >expect <<'EOF'
Reflog: HEAD@{0} (C O Mitter <[email protected]>)
Reflog message: commit (initial): one
EOF
test_expect_success 'log -g shows reflog headers' '
git log -g -1 >tmp &&
grep ^Reflog <tmp >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
e46513e HEAD@{0}: commit (initial): one
EOF
test_expect_success 'oneline reflog format' '
git log -g -1 --oneline >actual &&
test_cmp expect actual
'
test_expect_success 'reflog default format' '
git reflog -1 >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
commit e46513e
Reflog: HEAD@{0} (C O Mitter <[email protected]>)
Reflog message: commit (initial): one
Author: A U Thor <[email protected]>
one
EOF
test_expect_success 'override reflog default format' '
git reflog --format=short -1 >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
Reflog: HEAD@{Thu Apr 7 15:13:13 2005 -0700} (C O Mitter <[email protected]>)
Reflog message: commit (initial): one
EOF
test_expect_success 'using @{now} syntax shows reflog date (multiline)' '
git log -g -1 HEAD@{now} >tmp &&
grep ^Reflog <tmp >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
EOF
test_expect_success 'using @{now} syntax shows reflog date (oneline)' '
git log -g -1 --oneline HEAD@{now} >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
HEAD@{Thu Apr 7 15:13:13 2005 -0700}
EOF
test_expect_success 'using @{now} syntax shows reflog date (format=%gd)' '
git log -g -1 --format=%gd HEAD@{now} >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
Reflog: HEAD@{Thu Apr 7 15:13:13 2005 -0700} (C O Mitter <[email protected]>)
Reflog message: commit (initial): one
EOF
test_expect_success 'using --date= shows reflog date (multiline)' '
git log -g -1 --date=default >tmp &&
grep ^Reflog <tmp >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
EOF
test_expect_success 'using --date= shows reflog date (oneline)' '
git log -g -1 --oneline --date=default >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
HEAD@{1112911993 -0700}
EOF
test_expect_success 'using --date= shows reflog date (format=%gd)' '
git log -g -1 --format=%gd --date=raw >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
Reflog: HEAD@{0} (C O Mitter <[email protected]>)
Reflog message: commit (initial): one
EOF
test_expect_success 'log.date does not invoke "--date" magic (multiline)' '
test_config log.date raw &&
git log -g -1 >tmp &&
grep ^Reflog <tmp >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
e46513e HEAD@{0}: commit (initial): one
EOF
test_expect_success 'log.date does not invoke "--date" magic (oneline)' '
test_config log.date raw &&
git log -g -1 --oneline >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
HEAD@{0}
EOF
test_expect_success 'log.date does not invoke "--date" magic (format=%gd)' '
test_config log.date raw &&
git log -g -1 --format=%gd >actual &&
test_cmp expect actual
'
cat >expect <<'EOF'
HEAD@{0}
EOF
test_expect_success '--date magic does not override explicit @{0} syntax' '
git log -g -1 --format=%gd --date=raw HEAD@{0} >actual &&
test_cmp expect actual
'
: >expect
test_expect_success 'empty reflog file' '
git branch empty &&
: >.git/logs/refs/heads/empty &&
git log -g empty >actual &&
test_cmp expect actual
'
test_done
|
TextusData/Mover
|
thirdparty/git-1.7.11.3/t/t1411-reflog-show.sh
|
Shell
|
gpl-3.0
| 3,605 |
#!/bin/sh
test_description='Tests multi-threaded lazy_init_name_hash'
. ./perf-lib.sh
test_perf_large_repo
test_checkout_worktree
test_expect_success 'verify both methods build the same hashmaps' '
test-tool lazy-init-name-hash --dump --single >out.single &&
if test-tool lazy-init-name-hash --dump --multi >out.multi
then
test_set_prereq REPO_BIG_ENOUGH_FOR_MULTI &&
sort <out.single >sorted.single &&
sort <out.multi >sorted.multi &&
test_cmp sorted.single sorted.multi
fi
'
test_expect_success 'calibrate' '
entries=$(wc -l <out.single) &&
case $entries in
?) count=1000000 ;;
??) count=100000 ;;
???) count=10000 ;;
????) count=1000 ;;
?????) count=100 ;;
??????) count=10 ;;
*) count=1 ;;
esac &&
export count &&
case $entries in
1) entries_desc="1 entry" ;;
*) entries_desc="$entries entries" ;;
esac &&
case $count in
1) count_desc="1 round" ;;
*) count_desc="$count rounds" ;;
esac &&
desc="$entries_desc, $count_desc" &&
export desc
'
test_perf "single-threaded, $desc" "
test-tool lazy-init-name-hash --single --count=$count
"
test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" "
test-tool lazy-init-name-hash --multi --count=$count
"
test_done
|
devzero2000/git-core
|
t/perf/p0004-lazy-init-name-hash.sh
|
Shell
|
gpl-2.0
| 1,210 |
#!/bin/bash
# This script extracts a valid release tar into _output/releases. It requires hack/build-release.sh
# to have been executed
set -o errexit
set -o nounset
set -o pipefail
S2I_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${S2I_ROOT}/hack/common.sh"
# Go to the top of the tree.
cd "${S2I_ROOT}"
# Copy the linux release archives release back to the local _output/local/bin/linux/amd64 directory.
# TODO: support different OS's?
s2i::build::detect_local_release_tars "linux-amd64"
mkdir -p "${S2I_OUTPUT_BINPATH}/linux/amd64"
tar mxzf "${S2I_PRIMARY_RELEASE_TAR}" -C "${S2I_OUTPUT_BINPATH}/linux/amd64"
s2i::build::make_binary_symlinks
|
childsb/origin
|
vendor/github.com/openshift/source-to-image/hack/extract-release.sh
|
Shell
|
apache-2.0
| 651 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
$MANAGIX_HOME/bin/managix create -n asterix -c $MANAGIX_HOME/clusters/local/local.xml;
|
heriram/incubator-asterixdb
|
asterixdb/asterix-installer/src/test/resources/transactionts/scripts/recovery_ddl/temp_secondary_index_recovery/create_and_start.sh
|
Shell
|
apache-2.0
| 892 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script performs disaster recovery of etcd from the backup data.
# Assumptions:
# - backup was done using etcdctl command:
# a) in case of etcd2
# $ etcdctl backup --data-dir=<dir>
# produced .snap and .wal files
# b) in case of etcd3
# $ etcdctl --endpoints=<address> snapshot save
# produced .db file
# - version.txt file is in the current directy (if it isn't it will be
# defaulted to "2.2.1/etcd2"). Based on this file, the script will
# decide to which version we are restoring (procedures are different
# for etcd2 and etcd3).
# - in case of etcd2 - *.snap and *.wal files are in current directory
# - in case of etcd3 - *.db file is in the current directory
# - the script is run as root
# - for event etcd, we only support clearing it - to do it, you need to
# set RESET_EVENT_ETCD=true env var.
set -o errexit
set -o nounset
set -o pipefail
# Version file contains information about current version in the format:
# <etcd binary version>/<etcd api mode> (e.g. "3.0.12/etcd3").
#
# If the file doesn't exist we assume "2.2.1/etcd2" configuration is
# the current one and create a file with such configuration.
# The restore procedure is chosen based on this information.
VERSION_FILE="version.txt"
# Make it possible to overwrite version file (or default version)
# with VERSION_CONTENTS env var.
if [ -n "${VERSION_CONTENTS:-}" ]; then
echo "${VERSION_CONTENTS}" > "${VERSION_FILE}"
fi
if [ ! -f "${VERSION_FILE}" ]; then
echo "2.2.1/etcd2" > "${VERSION_FILE}"
fi
VERSION_CONTENTS="$(cat ${VERSION_FILE})"
ETCD_VERSION="$(echo $VERSION_CONTENTS | cut -d '/' -f 1)"
ETCD_API="$(echo $VERSION_CONTENTS | cut -d '/' -f 2)"
# Name is used only in case of etcd3 mode, to appropriate set the metadata
# for the etcd data.
# NOTE: NAME HAS TO BE EQUAL TO WHAT WE USE IN --name flag when starting etcd.
NAME="${NAME:-etcd-$(hostname)}"
# Port on which etcd is exposed.
etcd_port=2379
event_etcd_port=4002
# Wait until both etcd instances are up
wait_for_etcd_up() {
port=$1
# TODO: As of 3.0.x etcd versions, all 2.* and 3.* versions return
# {"health": "true"} on /health endpoint in healthy case.
# However, we should come with a regex for it to avoid future break.
health_ok="{\"health\": \"true\"}"
for i in $(seq 120); do
# TODO: Is it enough to look into /health endpoint?
health=$(curl --silent http://127.0.0.1:${port}/health)
if [ "${health}" == "${health_ok}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until apiserver is up.
wait_for_cluster_healthy() {
for i in $(seq 120); do
cs_status=$(kubectl get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}}{{end}}{{"\n"}}{{end}}') || true
componentstatuses=$(echo "${cs_status}" | grep -c 'Healthy:') || true
healthy=$(echo "${cs_status}" | grep -c 'Healthy:True') || true
if [ "${componentstatuses}" -eq "${healthy}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until etcd and apiserver pods are down.
wait_for_etcd_and_apiserver_down() {
for i in $(seq 120); do
etcd=$(docker ps | grep etcd | grep -v etcd-empty-dir | grep -v etcd-monitor | wc -l)
apiserver=$(docker ps | grep apiserver | wc -l)
# TODO: Theoretically it is possible, that apiserver and or etcd
# are currently down, but Kubelet is now restarting them and they
# will reappear again. We should avoid it.
if [ "${etcd}" -eq "0" -a "${apiserver}" -eq "0" ]; then
return 0
fi
sleep 1
done
return 1
}
# Move the manifest files to stop etcd and kube-apiserver
# while we swap the data out from under them.
MANIFEST_DIR="/etc/kubernetes/manifests"
MANIFEST_BACKUP_DIR="/etc/kubernetes/manifests-backups"
mkdir -p "${MANIFEST_BACKUP_DIR}"
echo "Moving etcd(s) & apiserver manifest files to ${MANIFEST_BACKUP_DIR}"
# If those files were already moved (e.g. during previous
# try of backup) don't fail on it.
mv "${MANIFEST_DIR}/kube-apiserver.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd-events.manifest" "${MANIFEST_BACKUP_DIR}" || true
# Wait for the pods to be stopped
echo "Waiting for etcd and kube-apiserver to be down"
if ! wait_for_etcd_and_apiserver_down; then
# Couldn't kill etcd and apiserver.
echo "Downing etcd and apiserver failed"
exit 1
fi
# Create the sort of directory structure that etcd expects.
# If this directory already exists, remove it.
BACKUP_DIR="/var/tmp/backup"
rm -rf "${BACKUP_DIR}"
if [ "${ETCD_API}" == "etcd2" ]; then
echo "Preparing etcd backup data for restore"
# In v2 mode, we simply copy both snap and wal files to a newly created
# directory. After that, we start etcd with --force-new-cluster option
# that (according to the etcd documentation) is required to recover from
# a backup.
echo "Copying data to ${BACKUP_DIR} and restoring there"
mkdir -p "${BACKUP_DIR}/member/snap"
mkdir -p "${BACKUP_DIR}/member/wal"
# If the cluster is relatively new, there can be no .snap file.
mv *.snap "${BACKUP_DIR}/member/snap/" || true
mv *.wal "${BACKUP_DIR}/member/wal/"
# TODO(jsz): This won't work with HA setups (e.g. do we need to set --name flag)?
echo "Starting etcd ${ETCD_VERSION} to restore data"
image=$(docker run -d -v ${BACKUP_DIR}:/var/etcd/data \
--net=host -p ${etcd_port}:${etcd_port} \
"gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcd --data-dir /var/etcd/data --force-new-cluster")
if [ "$?" -ne "0" ]; then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Container ${image} created, waiting for etcd to report as healthy"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Kill that etcd instance.
echo "Etcd healthy - killing ${image} container"
docker kill "${image}"
elif [ "${ETCD_API}" == "etcd3" ]; then
echo "Preparing etcd snapshot for restore"
mkdir -p "${BACKUP_DIR}"
echo "Copying data to ${BACKUP_DIR} and restoring there"
number_files=$(find . -maxdepth 1 -type f -name "*.db" | wc -l)
if [ "${number_files}" -ne "1" ]; then
echo "Incorrect number of *.db files - expected 1"
exit 1
fi
mv *.db "${BACKUP_DIR}/"
snapshot="$(ls ${BACKUP_DIR})"
# Run etcdctl snapshot restore command and wait until it is finished.
# setting with --name in the etcd manifest file and then it seems to work.
# TODO(jsz): This command may not work in case of HA.
image=$(docker run -d -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \
"gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${NAME}=http://localhost:2380; mv /${NAME}.etcd/member /var/tmp/backup/")
if [ "$?" -ne "0" ]; then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Prepare container exit code: $(docker wait ${image})"
rm -f "${BACKUP_DIR}/${snapshot}"
fi
# Also copy version.txt file.
cp "${VERSION_FILE}" "${BACKUP_DIR}"
# Find out if we are running GCI vs CVM.
export CVM=$(curl "http://metadata/computeMetadata/v1/instance/attributes/" -H "Metadata-Flavor: Google" |& grep -q gci; echo $?)
if [[ "$CVM" == "1" ]]; then
export MNT_DISK="/mnt/master-pd"
else
export MNT_DISK="/mnt/disks/master-pd"
fi
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${MNT_DISK}/var/etcd-corrupted"
mkdir -p "${MNT_DISK}/var/etcd-corrupted"
echo "Saving corrupted data to ${MNT_DISK}/var/etcd-corrupted"
mv /var/etcd/data "${MNT_DISK}/var/etcd-corrupted"
# Replace the corrupted data dir with the resotred data.
echo "Copying restored data to /var/etcd/data"
mv "${BACKUP_DIR}" /var/etcd/data
if [ "${RESET_EVENT_ETCD:-}" == "true" ]; then
echo "Removing event-etcd corrupted data"
EVENTS_CORRUPTED_DIR="${MNT_DISK}/var/etcd-events-corrupted"
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${EVENTS_CORRUPTED_DIR}"
mkdir -p "${EVENTS_CORRUPTED_DIR}"
mv /var/etcd/data-events "${EVENTS_CORRUPTED_DIR}"
fi
# Start etcd and kube-apiserver again.
echo "Restarting etcd and apiserver from restored snapshot"
mv "${MANIFEST_BACKUP_DIR}"/* "${MANIFEST_DIR}/"
rm -rf "${MANIFEST_BACKUP_DIR}"
# Verify that etcd is back.
echo "Waiting for etcd to come back"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Verify that event etcd is back.
echo "Waiting for event etcd to come back"
if ! wait_for_etcd_up "${event_etcd_port}"; then
echo "Event etcd didn't come back correctly"
exit 1
fi
# Verify that kube-apiserver is back and cluster is healthy.
echo "Waiting for apiserver to come back"
if ! wait_for_cluster_healthy; then
echo "Apiserver didn't come back correctly"
exit 1
fi
echo "Cluster successfully restored!"
|
jackielii/client-nfs-provisioner
|
vendor/k8s.io/kubernetes/cluster/restore-from-backup.sh
|
Shell
|
apache-2.0
| 9,581 |
#!/bin/sh
#
# Copyright (c) 2008 Johannes E. Schindelin
#
test_description='prune'
. ./test-lib.sh
day=$((60*60*24))
week=$(($day*7))
add_blob() {
before=$(git count-objects | sed "s/ .*//") &&
BLOB=$(echo aleph_0 | git hash-object -w --stdin) &&
BLOB_FILE=.git/objects/$(echo $BLOB | sed "s/^../&\//") &&
verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
test_path_is_file $BLOB_FILE &&
test-chmtime =+0 $BLOB_FILE
}
test_expect_success setup '
: > file &&
git add file &&
test_tick &&
git commit -m initial &&
git gc
'
test_expect_success 'prune stale packs' '
orig_pack=$(echo .git/objects/pack/*.pack) &&
: > .git/objects/tmp_1.pack &&
: > .git/objects/tmp_2.pack &&
test-chmtime =-86501 .git/objects/tmp_1.pack &&
git prune --expire 1.day &&
test_path_is_file $orig_pack &&
test_path_is_file .git/objects/tmp_2.pack &&
test_path_is_missing .git/objects/tmp_1.pack
'
test_expect_success 'prune --expire' '
add_blob &&
git prune --expire=1.hour.ago &&
verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
test_path_is_file $BLOB_FILE &&
test-chmtime =-86500 $BLOB_FILE &&
git prune --expire 1.day &&
verbose test $before = $(git count-objects | sed "s/ .*//") &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc: implicit prune --expire' '
add_blob &&
test-chmtime =-$((2*$week-30)) $BLOB_FILE &&
git gc &&
verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
test_path_is_file $BLOB_FILE &&
test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
git gc &&
verbose test $before = $(git count-objects | sed "s/ .*//") &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc: refuse to start with invalid gc.pruneExpire' '
git config gc.pruneExpire invalid &&
test_must_fail git gc
'
test_expect_success 'gc: start with ok gc.pruneExpire' '
git config gc.pruneExpire 2.days.ago &&
git gc
'
test_expect_success 'prune: prune nonsense parameters' '
test_must_fail git prune garbage &&
test_must_fail git prune --- &&
test_must_fail git prune --no-such-option
'
test_expect_success 'prune: prune unreachable heads' '
git config core.logAllRefUpdates false &&
mv .git/logs .git/logs.old &&
: > file2 &&
git add file2 &&
git commit -m temporary &&
tmp_head=$(git rev-list -1 HEAD) &&
git reset HEAD^ &&
git prune &&
test_must_fail git reset $tmp_head --
'
test_expect_success 'prune: do not prune detached HEAD with no reflog' '
git checkout --detach --quiet &&
git commit --allow-empty -m "detached commit" &&
# verify that there is no reflogs
# (should be removed and disabled by previous test)
test_path_is_missing .git/logs &&
git prune -n >prune_actual &&
: >prune_expected &&
test_cmp prune_actual prune_expected
'
test_expect_success 'prune: prune former HEAD after checking out branch' '
head_sha1=$(git rev-parse HEAD) &&
git checkout --quiet master &&
git prune -v >prune_actual &&
grep "$head_sha1" prune_actual
'
test_expect_success 'prune: do not prune heads listed as an argument' '
: > file2 &&
git add file2 &&
git commit -m temporary &&
tmp_head=$(git rev-list -1 HEAD) &&
git reset HEAD^ &&
git prune -- $tmp_head &&
git reset $tmp_head --
'
test_expect_success 'gc --no-prune' '
add_blob &&
test-chmtime =-$((5001*$day)) $BLOB_FILE &&
git config gc.pruneExpire 2.days.ago &&
git gc --no-prune &&
verbose test 1 = $(git count-objects | sed "s/ .*//") &&
test_path_is_file $BLOB_FILE
'
test_expect_success 'gc respects gc.pruneExpire' '
git config gc.pruneExpire 5002.days.ago &&
git gc &&
test_path_is_file $BLOB_FILE &&
git config gc.pruneExpire 5000.days.ago &&
git gc &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc --prune=<date>' '
add_blob &&
test-chmtime =-$((5001*$day)) $BLOB_FILE &&
git gc --prune=5002.days.ago &&
test_path_is_file $BLOB_FILE &&
git gc --prune=5000.days.ago &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc --prune=never' '
add_blob &&
git gc --prune=never &&
test_path_is_file $BLOB_FILE &&
git gc --prune=now &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc respects gc.pruneExpire=never' '
git config gc.pruneExpire never &&
add_blob &&
git gc &&
test_path_is_file $BLOB_FILE &&
git config gc.pruneExpire now &&
git gc &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'prune --expire=never' '
add_blob &&
git prune --expire=never &&
test_path_is_file $BLOB_FILE &&
git prune &&
test_path_is_missing $BLOB_FILE
'
test_expect_success 'gc: prune old objects after local clone' '
add_blob &&
test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
git clone --no-hardlinks . aclone &&
(
cd aclone &&
verbose test 1 = $(git count-objects | sed "s/ .*//") &&
test_path_is_file $BLOB_FILE &&
git gc --prune &&
verbose test 0 = $(git count-objects | sed "s/ .*//") &&
test_path_is_missing $BLOB_FILE
)
'
test_expect_success 'garbage report in count-objects -v' '
test_when_finished "rm -f .git/objects/pack/fake*" &&
: >.git/objects/pack/foo &&
: >.git/objects/pack/foo.bar &&
: >.git/objects/pack/foo.keep &&
: >.git/objects/pack/foo.pack &&
: >.git/objects/pack/fake.bar &&
: >.git/objects/pack/fake.keep &&
: >.git/objects/pack/fake.pack &&
: >.git/objects/pack/fake.idx &&
: >.git/objects/pack/fake2.keep &&
: >.git/objects/pack/fake3.idx &&
git count-objects -v 2>stderr &&
grep "index file .git/objects/pack/fake.idx is too small" stderr &&
grep "^warning:" stderr | sort >actual &&
cat >expected <<\EOF &&
warning: garbage found: .git/objects/pack/fake.bar
warning: garbage found: .git/objects/pack/foo
warning: garbage found: .git/objects/pack/foo.bar
warning: no corresponding .idx or .pack: .git/objects/pack/fake2.keep
warning: no corresponding .idx: .git/objects/pack/foo.keep
warning: no corresponding .idx: .git/objects/pack/foo.pack
warning: no corresponding .pack: .git/objects/pack/fake3.idx
EOF
test_cmp expected actual
'
test_expect_success 'prune .git/shallow' '
SHA1=`echo hi|git commit-tree HEAD^{tree}` &&
echo $SHA1 >.git/shallow &&
git prune --dry-run >out &&
grep $SHA1 .git/shallow &&
grep $SHA1 out &&
git prune &&
test_path_is_missing .git/shallow
'
test_expect_success 'prune: handle alternate object database' '
test_create_repo A &&
git -C A commit --allow-empty -m "initial commit" &&
git clone --shared A B &&
git -C B commit --allow-empty -m "next commit" &&
git -C B prune
'
test_done
|
wparad/git
|
t/t5304-prune.sh
|
Shell
|
gpl-2.0
| 6,464 |
#! /bin/bash
set -e
if [ "$1" = "create" ]; then
ACTION=create-stack
elif [ "$1" = "update" ]; then
ACTION=update-stack
elif [ "$1" = "delete" ]; then
ACTION=delete-stack
else
echo "Usage: $0 [create|update]"
exit 1
fi
TAG='lambda-comments'
DIR=`cd $(dirname $0); pwd`
BABEL_NODE=$DIR/../../node_modules/babel-cli/bin/babel-node.js
BIN_DIR=$DIR/../../bin
STACK_NAME=$($BABEL_NODE $BIN_DIR/dump-config.js CLOUDFORMATION)
ORIGIN=$($BABEL_NODE $BIN_DIR/dump-config.js ORIGIN)
REGION=$($BABEL_NODE $BIN_DIR/dump-config.js REGION)
if [ "$ACTION" = "delete-stack" ]; then
aws cloudformation delete-stack \
--region $REGION \
--stack-name $STACK_NAME
exit 0
fi
aws cloudformation $ACTION \
--region $REGION \
--stack-name $STACK_NAME \
--template-body file://$DIR/lambda-comments.json \
--capabilities CAPABILITY_IAM \
--parameters \
ParameterKey=TagName,ParameterValue=$TAG,UsePreviousValue=false \
ParameterKey=Origin,ParameterValue=$ORIGIN,UsePreviousValue=false \
|| true
# $BABEL_NODE $BIN_DIR/save-cloudformation-config.js
|
jimpick/lambda-comments
|
deploy/cloudformation/cloudformation.sh
|
Shell
|
isc
| 1,086 |
#!/bin/bash
if [ -d testbox ]
then
echo "Deleting vagrant test directory."
# Destroy possible running instance
cd testbox
vagrant destroy -f
cd ..
# Delete directory
rm -rf testbox
fi
# Remove possible existent box
existent_box=$(vagrant box list | grep testbox)
if [ ! -z "$existent_box" ]; then
echo "Removing test box."
vagrant box remove -f testboxcentos
fi
# Remove possible existent box
existent_box=$(vagrant box list | grep blacklabelops/latestcentos)
if [ ! -z "$existent_box" ]; then
echo "Removing centos test base box."
vagrant box remove -f blacklabelops/latestcentos
fi
|
blacklabelops/dockerdev
|
packer/cleanTest.sh
|
Shell
|
mit
| 623 |
#!/bin/bash
usage="$ Takes 2 arguments : <browser: chrome or firefox> <url>"
while getopts ':hs:' option; do
case "$option" in
h) echo "$usage"
exit
;;
esac
done
if [ "$#" -ne 2 ]; then
echo "$usage"
exit
fi
echo "mitmdump -s /home/evla/Documents/har_dump.py -U http://proxy.rd.francetelecom.fr:8080 &"
mitmdump -s har_dump.py -U http://proxy.rd.francetelecom.fr:8080 &
export mitmpid=$!
sleep 5
echo "python3 ./browser.py $1 $2"
# python3 /home/evla/Documents/fire_at_will.py $1 $2 #with display
xvfb-run -a python3 ./browser.py $1 $2 #without display
sleep 10
kill $mitmpid
exit
|
gandalf-the-white/dockseldon
|
scripts/order66.sh
|
Shell
|
mit
| 619 |
#!/bin/bash --norc
kdiff3 $1 $2 $3 -o $4
|
samsaggace/linux-tools
|
mergekdiff3.sh
|
Shell
|
mit
| 41 |
#!/bin/bash
START_DIR="${START_DIR:-/home/coder/project}"
PREFIX="deploy-code-server"
mkdir -p $START_DIR
# function to clone the git repo or add a user's first file if no repo was specified.
project_init () {
[ -z "${GIT_REPO}" ] && echo "[$PREFIX] No GIT_REPO specified" && echo "Example file. Have questions? Join us at https://community.coder.com" > $START_DIR/coder.txt || git clone $GIT_REPO $START_DIR
}
# add rclone config and start rclone, if supplied
if [[ -z "${RCLONE_DATA}" ]]; then
echo "[$PREFIX] RCLONE_DATA is not specified. Files will not persist"
# start the project
project_init
else
echo "[$PREFIX] Copying rclone config..."
mkdir -p /home/coder/.config/rclone/
touch /home/coder/.config/rclone/rclone.conf
echo $RCLONE_DATA | base64 -d > /home/coder/.config/rclone/rclone.conf
# defasult to true
RCLONE_VSCODE_TASKS="${RCLONE_VSCODE_TASKS:-true}"
RCLONE_AUTO_PUSH="${RCLONE_AUTO_PUSH:-true}"
RCLONE_AUTO_PULL="${RCLONE_AUTO_PULL:-true}"
if [ $RCLONE_VSCODE_TASKS = "true" ]; then
# copy our tasks config to VS Code
echo "[$PREFIX] Applying VS Code tasks for rclone"
cp /tmp/rclone-tasks.json /home/coder/.local/share/code-server/User/tasks.json
# install the extension to add to menu bar
code-server --install-extension actboy168.tasks&
else
# user specified they don't want to apply the tasks
echo "[$PREFIX] Skipping VS Code tasks for rclone"
fi
# Full path to the remote filesystem
RCLONE_REMOTE_PATH=${RCLONE_REMOTE_NAME:-code-server-remote}:${RCLONE_DESTINATION:-code-server-files}
RCLONE_SOURCE_PATH=${RCLONE_SOURCE:-$START_DIR}
echo "rclone sync $RCLONE_SOURCE_PATH $RCLONE_REMOTE_PATH $RCLONE_FLAGS -vv" > /home/coder/push_remote.sh
echo "rclone sync $RCLONE_REMOTE_PATH $RCLONE_SOURCE_PATH $RCLONE_FLAGS -vv" > /home/coder/pull_remote.sh
chmod +x push_remote.sh pull_remote.sh
if rclone ls $RCLONE_REMOTE_PATH; then
if [ $RCLONE_AUTO_PULL = "true" ]; then
# grab the files from the remote instead of running project_init()
echo "[$PREFIX] Pulling existing files from remote..."
/home/coder/pull_remote.sh&
else
# user specified they don't want to apply the tasks
echo "[$PREFIX] Auto-pull is disabled"
fi
else
if [ $RCLONE_AUTO_PUSH = "true" ]; then
# we need to clone the git repo and sync
echo "[$PREFIX] Pushing initial files to remote..."
project_init
/home/coder/push_remote.sh&
else
# user specified they don't want to apply the tasks
echo "[$PREFIX] Auto-push is disabled"
fi
fi
# Fix permission
chmod +x /home/coder/deploy-container/entrypoint.sh
fi
# Add dotfiles, if set
if [ -n "$DOTFILES_REPO" ]; then
# grab the files from the remote instead of running project_init()
echo "[$PREFIX] Cloning dotfiles..."
mkdir -p $HOME/dotfiles
git clone $DOTFILES_REPO $HOME/dotfiles
DOTFILES_SYMLINK="${RCLONE_AUTO_PULL:-true}"
# symlink repo to $HOME
if [ $DOTFILES_SYMLINK = "true" ]; then
shopt -s dotglob
ln -sf source_file $HOME/dotfiles/* $HOME
fi
# run install script, if it exists
[ -f "$HOME/dotfiles/install.sh" ] && $HOME/dotfiles/install.sh
fi
echo "[$PREFIX] Starting code-server..."
# Now we can run code-server with the default entrypoint
/usr/bin/entrypoint.sh --bind-addr 0.0.0.0:$PORT $START_DIR
|
mjenrungrot/competitive_programming
|
deploy-container/entrypoint.sh
|
Shell
|
mit
| 3,562 |
#!/bin/bash
user="ircd"
website="https://unrealircd.org/unrealircd4/"
file="unrealircd-4.0.11.tar.gz "
set_up_user() {
created=$(cat /etc/passwd | grep $user | wc -l);
if [[ $created < 1 ]]; then
echo "[+] Creating user 'ircd' ..."
adduser ircd
fi
apt-get install sudo -y > /dev/null 2>&1
}
install_deps() {
echo "[+] Installing deps ..."
sudo apt-get install build-essential openssl libcurl4-openssl-dev zlib1g zlib1g-dev zlibc libgcrypt20 libgcrypt11-dev libgcrypt20-dev wget -y > /dev/null 2>&1
}
download_daemon() {
cd ~
echo "[+] Downloading daemon ..."
wget --no-check-certificate $website$file > /dev/null 2>&1
if [[ $? == 0 ]]; then
echo "[*] File $file downloaded.";
else
echo "[!] File $file could not be downloaded.";
exit 1;
fi
}
install_daemon() {
tar -xvf $file
cd unr*/
./Config
}
set_up_user
install_deps
download_daemon
install_daemon
|
0x00-0x00/shell_scripts
|
irc_setup.sh
|
Shell
|
mit
| 883 |
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
if [ "$dist" == "Ubuntu" ]; then
echo "ubuntu found"
else
echo "not ubuntu"
exit 1
fi
if [ -d "/etc/naemon/" ]; then
echo "Found naemon"
else
echo "This doesn't look like a Naemon server"
exit 1
fi
if [ -d "/usr/lib/naemon/plugins" ]; then
echo "found the plugins"
if [ -d "/usr/lib/naemon/plugins/scripts/" ]; then
echo "scripts exists"
else
echo "creating scripts directory"
sudo mkdir /usr/lib/naemon/plugins/scripts
fi
echo "creating/refreshing notifymqtt"
sudo cp ../monitor/notifymqtt.py /usr/lib/naemon/plugins/scripts/notifymqtt
echo "creating/refreshing notification.cfg"
sudo cp ../monitor/notification.cfg /etc/naemon/conf.d
echo "notifymqtt installed. You need to configure contacts.cfg to use it"
else
echo "No plugins directory found"
exit 1
fi
if [ -d "/etc/sitepipe/" ]; then
echo "config dir exists"
else
echo "creating config directory"
sudo mkdir /etc/sitepipe/
fi
echo "Creating/refreshing config.ini"
sudo cp ../etc/sitepipe/config.ini /etc/sitepipe/config.ini
|
greypanda/sitepipe
|
install/install_naemon_server.sh
|
Shell
|
mit
| 1,153 |
#!/usr/bin/env bash
pretty () {
echo "I am pretty"
}
|
BaxterStockman/bashlib
|
test/fixtures/bashlib/i/am/pretty.sh
|
Shell
|
mit
| 58 |
#!/bin/sh
BIN=/usr/bin/clazy
if [ -x $BIN ]; then
$BIN -fPIC -std=c++11 \
-I /usr/include \
-I /usr/include/qt \
-I /usr/include/qt/QtCore \
-I /usr/include/qt/QtGui \
-I /usr/include/qt/QtNetwork \
-I /usr/include/qt/QtXml \
-I /usr/include/qt/QtWidgets \
-I /usr/include/qt/QtX11Extras \
-I /usr/include/gstreamer-1.0 \
-I /usr/lib/x86_64-linux-gnu/gstreamer-1.0/include \
-I /usr/include/glib-2.0 \
-I /usr/lib/x86_64-linux-gnu/glib-2.0/include \
-I ../build-ExPlayer-Desktop-Debug \
-I . \
-c $*
else
echo "Please install $BIN [https://github.com/KDE/clazy]"
fi
|
kmikolaj/explayer
|
clazy.sh
|
Shell
|
mit
| 583 |
#!/bin/bash
/home/root/bin/cam init > /dev/null 2>&1
exec /home/root/bin/video-server 192.168.254.141 4000 > /dev/null 2>&1
|
DeLaGuardo/bonecam
|
systemd/autostart-video.sh
|
Shell
|
mit
| 125 |
#!/bin/bash
input=/intput
output=/output
for filename in $1*;
do
rm -rf $filename$input
rm -rf $filename$output
cp -r $2 $filename;
cp -r $3 $filename;
done
sleep 5
|
derekso1/Aultron
|
prep.sh
|
Shell
|
mit
| 184 |
#!/bin/bash
# vim: et sr sw=4 ts=4 smartindent:
# create_aws_instance_info.sh
#
# Gets IMMUTABLE instance info from AWS metadata
# i.e. we're not interested in putting metadata or tag values in to the file
# that might change over time.
#
INFO_FILE=/etc/eurostar/aws_instance_info
TIMESTAMP="$(date +'%Y-%m-%d %H:%M:%S')"
AWS_DOC_URI=http://169.254.169.254/latest/dynamic/instance-identity/document
AWS_DOC=$(curl -s $AWS_DOC_URI)
if [[ -z $AWS_DOC ]]; then
echo "$0 ERROR: couldn't fetch $AWS_DOC_URI with curl"
exit 1
fi
cat << EOF >$INFO_FILE
# [$TIMESTAMP] AWS INSTANCE INFO GENERATED BY $0
AWS_INSTANCE_ID=$( echo "$AWS_DOC" | grep '^[ ]\+"instanceId"' | awk -F\" '{print $4}' )
AWS_INSTANCE_TYPE=$( echo "$AWS_DOC" | grep '^[ ]\+"instanceType"' | awk -F\" '{print $4}' )
AWS_REGION=$( echo "$AWS_DOC" | grep '^[ ]\+"region"' | awk -F\" '{print $4}' )
AWS_AZ=$( echo "$AWS_DOC" | grep '^[ ]\+"availabilityZone"' | awk -F\" '{print $4}' |
sed -e 's/.*\(.\)$/\1/'
)
# END AWS INSTANCE INFO GENERATED BY $0
EOF
cat $INFO_FILE
|
jinal--shah/packer_base_centos
|
uploads/cloud-init/usr/local/bin/cloud-init/00010-create_aws_instance_info.sh
|
Shell
|
mit
| 1,059 |
#!/usr/bin/env bash
## =============================================================================
# File: setup-screen.sh
#
# Author: Cashiuus
# Created: 12-DEC-2016 - - - - - - (Revised: )
#
# MIT License ~ http://opensource.org/licenses/MIT
#-[ Notes ]---------------------------------------------------------------------
# Purpose:
#
#
## ========================================================================== ##
__version__="0.1"
__author__="Cashiuus"
## ========[ TEXT COLORS ]=============== ##
# [https://wiki.archlinux.org/index.php/Color_Bash_Prompt]
# [https://en.wikipedia.org/wiki/ANSI_escape_code]
GREEN="\033[01;32m" # Success
YELLOW="\033[01;33m" # Warnings/Information
RED="\033[01;31m" # Issues/Errors
BLUE="\033[01;34m" # Heading
PURPLE="\033[01;35m" # Other
ORANGE="\033[38;5;208m" # Debugging
BOLD="\033[01;01m" # Highlight
RESET="\033[00m" # Normal
## =========[ CONSTANTS ]================ ##
APP_PATH=$(readlink -f $0)
APP_BASE=$(dirname "${APP_PATH}")
APP_NAME=$(basename "${APP_PATH}")
APP_SETTINGS="${HOME}/.config/penbuilder/settings.conf"
APP_ARGS=$@
DEBUG=true
LOG_FILE="${APP_BASE}/debug.log"
# These can be used to know height (LINES) and width (COLS) of current terminal in script
LINES=$(tput lines)
COLS=$(tput cols)
#======[ ROOT PRE-CHECK ]=======#
function check_root() {
if [[ $EUID -ne 0 ]];then
if [[ $(dpkg-query -s sudo) ]];then
export SUDO="sudo"
# $SUDO - run commands with this prefix now to account for either scenario.
else
echo "Please install sudo or run this as root."
exit 1
fi
fi
}
is_root
## ========================================================================== ##
# ================================[ BEGIN ]================================ #
function install_screen() {
#apt -y -qq install screen \
# || echo -e ' '${RED}'[!] Issue with apt install'${RESET} 1>&2
#--- Configure screen
file="${HOME}/.screenrc"
if [[ -f "${file}" ]]; then
echo -e ' '${RED}'[!]'${RESET}" ${file} detected. Skipping..." 1>&2
else
cat <<EOF > "${file}"
## Don't display the copyright page
startup_message off
## tab-completion flash in heading bar
vbell off
## Keep scrollback n lines
defscrollback 1000
## Hardstatus is a bar of text that is visible in all screens
hardstatus on
hardstatus alwayslastline
hardstatus string '%{gk}%{G}%H %{g}[%{Y}%l%{g}] %= %{wk}%?%-w%?%{=b kR}(%{W}%n %t%?(%u)%?%{=b kR})%{= kw}%?%+w%?%?%= %{g} %{Y} %Y-%m-%d %C%a %{W}'
## Title bar
termcapinfo xterm ti@:te@
## Default windows (syntax: screen -t label order command)
screen -t bash1 0
screen -t bash2 1
## Select the default window
select 0
EOF
fi
}
install_screen
# NOTE: If you use screen, your configuration file is ~/.screenrc.
# If you want screen to record history, you just need to set it to use a
# login shell which will source your bash startup files (and record your history).
# use bash, make it a login shell
#defshell -bash
pause() {
local dummy
read -s -r -p "Press any key to continue..." -n 1 dummy
}
asksure() {
### using it
#if asksure; then
#echo "Okay, performing rm -rf / then, master...."
#else
#echo "Pfff..."
#fi
echo -n "Are you sure (Y/N)? "
while read -r -n 1 -s answer; do
if [[ $answer = [YyNn] ]]; then
[[ $answer = [Yy] ]] && retval=0
[[ $answer = [Nn] ]] && retval=1
break
fi
done
echo # just a final linefeed, optics...
return $retval
}
make_tmp_dir() {
# <doc:make_tmp_dir> {{{
#
# This function taken from a vmware tool install helper to securely
# create temp files. (installer.sh)
#
# Usage: make_tmp_dir dirname prefix
#
# Required Variables:
#
# dirname
# prefix
#
# Return value: null
#
# </doc:make_tmp_dir> }}}
local dirname="$1" # OUT
local prefix="$2" # IN
local tmp
local serial
local loop
tmp="${TMPDIR:-/tmp}"
# Don't overwrite existing user data
# -> Create a directory with a name that didn't exist before
#
# This may never succeed (if we are racing with a malicious process), but at
# least it is secure
serial=0
loop='yes'
while [ "$loop" = 'yes' ]; do
# Check the validity of the temporary directory. We do this in the loop
# because it can change over time
if [ ! -d "$tmp" ]; then
echo 'Error: "'"$tmp"'" is not a directory.'
echo
exit 1
fi
if [ ! -w "$tmp" -o ! -x "$tmp" ]; then
echo 'Error: "'"$tmp"'" should be writable and executable.'
echo
exit 1
fi
# Be secure
# -> Don't give write access to other users (so that they can not use this
# directory to launch a symlink attack)
if mkdir -m 0755 "$tmp"'/'"$prefix$serial" >/dev/null 2>&1; then
loop='no'
else
serial=`expr $serial + 1`
serial_mod=`expr $serial % 200`
if [ "$serial_mod" = '0' ]; then
echo 'Warning: The "'"$tmp"'" directory may be under attack.'
echo
fi
fi
done
eval "$dirname"'="$tmp"'"'"'/'"'"'"$prefix$serial"'
}
is_process_alive() {
# Checks if the given pid represents a live process.
# Returns 0 if the pid is a live process, 1 otherwise
#
# Usage: is_process_alive 29833
# [[ $? -eq 0 ]] && echo -e "Process is alive"
local pid="$1" # IN
ps -p $pid | grep $pid > /dev/null 2>&1
}
function finish {
# Any script-termination routines go here, but function cannot be empty
clear
[[ "$DEBUG" = true ]] && echo -e "${ORANGE}[DEBUG] :: function finish :: Script complete${RESET}"
echo -e "${GREEN}[$(date +"%F %T")] ${RESET}App Shutting down, please wait..." | tee -a "${LOG_FILE}"
# Redirect app output to log, sending both stdout and stderr (*NOTE: this will not parse color codes)
# cmd_here 2>&1 | tee -a "${LOG_FILE}"
}
# End of script
trap finish EXIT
## ========================================================================== ##
## ======================[ Template File Code Help ]========================= ##
#
## ============[ BASH GUIDES ]============= #
# Google's Shell Styleguide: https://google.github.io/styleguide/shell.xml
# Using Exit Codes: http://bencane.com/2014/09/02/understanding-exit-codes-and-how-to-use-them-in-bash-scripts/
# Writing Robust BASH Scripts: http://www.davidpashley.com/articles/writing-robust-shell-scripts/
#
# Shell Script Development Helper Projects
# https://github.com/alebcay/awesome-shell#shell-script-development
# https://github.com/jmcantrell/bashful
# https://github.com/lingtalfi/bashmanager
#
#
# =============[ Styleguide Recommendations ]============ #
# line length = 80
# functions = lower-case with underscores, must use () after func, "function" optional, be consistent
# Place all functions at top below constants, don't hide exec code between functions
# A function called 'main' is required for scripts long enough to contain other functions
# constants = UPPERCASE with underscores
# read-only = Vars that are readonly - use 'readonly var' or 'declare -r var' to ensure
# local vars = delcare and assign on separate lines
#
# return vals = Always check return values and give informative return values
#
# =============[ ECHO/PRINTF Commands ]============ #
# echo -n Print without a newline
#
# Run echo and cat commands through sudo (notice the single quotes)
# sudo sh -c 'echo "strings here" >> /path/to/file'
#
# Pipe user input into a script to automate its input execution; this'll hit enter for all inputs
# echo -e '\n' | timeout 300 perl vmware-install.pl
#
#
#
# -==[ Output Suppression/Redirection ]==-
# >/dev/null 1>&2 Supress all output (1), including errors (2)
#
#
# =========[ Expression Cheat Sheet ]========= #
#
# -d file exists and is a directory
# -e file exists
# -f file exists and is a regular file
# -h file exists and is a symbolic link
# -s file exists and size greater than zero
# -r file exists and has read permission
# -w file exists and write permission granted
# -x file exists and execute permission granted
# -z file is size zero (empty)
#
# $# Number of arguments passed to script by user
# $@ A list of available parameters (*avoid using this)
# $? The exit value of the command run before requesting this
# $0 Name of the running script
# $1..5 Arguments given to script by user
# $$ Process ID of the currently-running shell the script is running in
#
# [[ $? -eq 0 ]] Previous command was successful
# [[ $? -ne 0 ]] Previous command NOT successful
#
# [[ $var_string ]] true if var contains a string, false if null or empty
#
# ===============[ READ / READLINE Commands ]=============== #
# Ref: http://wiki.bash-hackers.org/commands/builtin/read
#
# The read command reads a line of input and separates the line into individual words using the IFS
# inter field separator. Each word in the line is stored in a variable from left to right. If there
# are fewer variables than words, then all remaining words are stored in the last variable. If there
# are more variables than words, then all remaining variables are set to NULL. If no variable is
# specified, it uses the default variable REPLY.
#
# read [-ers] [-u <FD>] [-t <TIMEOUT>] [-p <PROMPT>] [-a <ARRAY>] [-n <NCHARS>] [-d <DELIM>] [-i <TEXT>] [<NAME...>]
#
# -p "" Instead of echoing text, provide it right in the "prompt" argument
# *NOTE: Typically, there is no newline, so you may need to follow
# this with an "echo" statement to output a newline.
# -n # read returns after reading # chars
# -t # read will timeout after TIMEOUT seconds. Only from a Terminal
# (or) you can use the shell timeout variable TMOUT.
# -s Silent mode. Characters are not echoed coming from a Terminal (useful for passwords)
# -r Raw input; Backslash does not act as an escape character
# *NOTE: According to wiki, you should ALWAYS use -r
# -a ANAME words are assigned sequentially to the array variable ANAME
# You can also set individual array elements: read 'MYARRAY[5]' - quotes important
# without them, path expansion can break the script!
# -d DELIM recognize DELIM as data-end, instead of the default <newline>
# -u FD read input from File Descriptor FD
#
# *NOTE: User must hit enter or what they type will not be stored, including if timeout
# triggers before user presses enter so be sure to include enough time for user.
#
# *NOTE: If you specify -e, the 'readline' package is used and the remaining below params are available.
# -e On interactive shells, tells it to use BASH's readline interface to read the data
# -i "" Specify a default value. If user hits ENTER, this value is saved
#
# -= RETURN STATUSES =-
# 0 no error
# 2 invalid options
# >128 timeout
# !=0 invalid fd supplied to -u
# !=0 end-of-file reached
#
# -= EXAMPLES =-
# Ask for a path with a default value
#read -r -e -p "Enter the path to the file: " -i "/usr/local/etc/" FILEPATH
#
# Ask for a path with a default value and 5-second timeout - TODO: this work?
# read -e -r -n 5 -p "Enter the path to the file: " -i "/usr/local/etc/" FILEPATH
#
# A "press any key to continue..." solution like pause in MSDOS
#pause() {
# local dummy
# read -s -r -p "Press any key to continue..." -n 1 dummy
#}
#
# Parsing a simple date/time string
#datetime="2008:07:04 00:34:45"
#IFS=": " read -r year month day hour minute second <<< "$datetime"
#
#
#
# -==[ TOUCH ]==-
#touch
#touch "$file" 2>/dev/null || { echo "Cannot write to $file" >&2; exit 1; }
# -==[ SED ]==-
# NOTE: When using '/' for paths in sed, use a different delimiter, such as # or |
#
#sed -i 's/^.*editor_font=.*/editor_font=Monospace\ 10/' "${file}"
#sed -i 's|^.*editor_font=.*|editor_font=Monospace\ 10|' "${file}"
#
#
#
# -==[ Parse/Read a config file using whitelisting ]==-
#
#CONFIG_FILE="/path/here"
# Declare a whitelist
#CONFIG_SYNTAX="^\s*#|^\s*$|^[a-zA-Z_]+='[^']*'$"
# Check if file contains something we don't want
#if egrep -q -v "${CONFIG_SYNTAX}" "$CONFIG_PATH"; then
# echo "Error parsing config file ${CONFIG_PATH}." >&2
# echo "The following lines in the configfile do not fit the syntax:" >&2
# egrep -vn "${CONFIG_SYNTAX}" "$CONFIG_PATH"
# exit 5
#fi
# otherwise go on and source it:
#source "${CONFIG_FILE}"
#
#
#
#
## ======================[ Template File Code Help ]========================= ##
#
# ============[ Variables ]===============
#
#
# var1="stuff"
# readonly var1 Make variable readonly
# declare -r var1 Another way to make it readonly
# unset var1 Delete var1
#
#
# =========[ Loops ]========
# For, While, Until, Select
#
# For x in ___; do done
#
#
#
# ===============[ ARRAYS (Index starts at [0]) ]==================
# Create arrays
# declare -a MYARRAY=(val1 val2 val3...)
# files=( "/etc/passwd" "/etc/group" "/etc/hosts" )
# limits=( 10, 20, 26, 39, 48)
#
# Print all items in an array; prints them space-separated, unles you do the \n method below
# printf "%s\n" "${array[@]}" (or) "${array[*]}"
# printf "%s\n" "${files[@]}"
# printf "%s\n" "${limits[@]}"
# echo -e "${array[@]}"
#
# Loop through an array
# array=( one two three )
# for i in "${array[@]}"
# do
# echo $i
# done
#
#
# ==============[ Booleans ]==================
# The below examples are all ways you can check booleans
#bool=true
#if [ "$bool" = true ]; then
#if [ "$bool" = "true" ]; then
#
#if [[ "$bool" = true ]]; then
#if [[ "$bool" = "true" ]]; then
#if [[ "$bool" == true ]]; then
#if [[ "$bool" == "true" ]]; then
#
#if test "$bool" = true; then
#if test "$bool" = "true"; then
|
Cashiuus/penprep
|
system-setup/linux/setup-screen.sh
|
Shell
|
mit
| 14,089 |
shopt -s cdspell
shopt -s nocaseglob
shopt -s checkwinsize
shopt -s dotglob
shopt -s extglob
shopt -s progcomp
shopt -s histappend
set -o ignoreeof
set bell-style none
unset MAILCHECK # disable "you have mail" warning
ulimit -S -c 0 # disable core dump
bind "set completion-ignore-case on" # ignore case on bash completion
bind "set mark-symlinked-directories on" # add trailing slash to symlinked directories
|
pedroreys/bash
|
scripts/options.sh
|
Shell
|
mit
| 470 |
###################
# LOCALE #
###################
export LC_ALL=en_US.UTF-8
export TZ="Europe/Berlin"
export LC_CTYPE="en_US.UTF-8"
export LANG="en_US.UTF-8"
|
salimane/dotfiles
|
zsh/.zsh/etc/locale.zsh
|
Shell
|
mit
| 169 |
#!/bin/bash
echo "== Data Collection and Parsing Begin =="
if [ "$#" -le 0 ]
then
echo "Invalid Input, Please Add the name of one or more log files"
exit 1;
fi
strategy=""
actions=0
success=0
total_milisecs=0
C=0
R=0
U=0
D=0
echo "=> Processing Log Files"
for log_name in "$@"
do
echo "Retrieving $log_name";
#TODO: Make hostname and path a parameter
scp ${hostname}:${log_path}"$log_name" .;
strategy=$(grep "=> STRATEGY:" $log_name | /usr/local/bin/sed -nr 's/.*STRATEGY: (.+).*/\1/p');
total_milisecs=$((total_milisecs + $(grep "TOTAL Milisecs Taken:" $log_name | /usr/local/bin/sed -nr 's/.*TOTAL Milisecs Taken:(.+).*/\1/p')));
C=$((C + $(grep "Create RUN" $log_name | /usr/local/bin/sed -nr 's/.*Requested:(.+)\/.*/\1/p')));
R=$((R + $(grep "Read RUN" $log_name | /usr/local/bin/sed -nr 's/.*Requested:(.+)\/.*/\1/p')));
U=$((U + $(grep "Update RUN" $log_name | /usr/local/bin/sed -nr 's/.*Requested:(.+)\/.*/\1/p')));
D=$((D + $(grep "Delete RUN" $log_name | /usr/local/bin/sed -nr 's/.*Requested:(.+)\/.*/\1/p')));
done
echo "=> Removing Log Files"
for log_name in "$@"
do
rm -rf "$log_name"
done
success=$((success/3))
total_milisecs=$((total_milisecs/3))
echo "== SUMMARY OF STATS =="
echo "======================"
echo "Number of clients parsed: 3"
echo "Strategy Used: ${strategy}"
echo "AVG Total Milisecs taken: ${total_milisecs}"
echo "Total Create Actions Run: ${C}"
echo "Total Read Actions Run: ${R}"
echo "Total Update Actions Run: ${U}"
echo "Total Delete Actions Run: ${D}"
echo "== Data Collection and Parsing Complete =="
|
fpiagent/dmachine
|
runningFiles/collect_data.sh
|
Shell
|
mit
| 1,587 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3333-1
#
# Security announcement date: 2015-08-12 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:31 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - iceweasel:38.2.0esr-1~deb7u1
#
# Last versions recommanded by security team:
# - iceweasel:38.3.0esr-1~deb7u1
#
# CVE List:
# - CVE-2015-4473
# - CVE-2015-4478
# - CVE-2015-4479
# - CVE-2015-4480
# - CVE-2015-4484
# - CVE-2015-4487
# - CVE-2015-4488
# - CVE-2015-4489
# - CVE-2015-4492
# - CVE-2015-4493
# - CVE-2015-4475
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade iceweasel=38.3.0esr-1~deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2015/DSA-3333-1.sh
|
Shell
|
mit
| 832 |
# Sample bash script for Bot-Shutdown Event
# If you enable 'For all commands, use job details as arguments'
# some details about the just-finished batch will be appended to the
# command as arguments. On windows, there was no easy way to call
# the php script directly, so we call php via a batch file. Furthermore,
# we had to provide an absolute path to the php executable even though
# php.exe is in the environment path.
#
# The order or the arguments is as follows:
#
# $1 => The given name of The Bot as found in Templater's Preferences dialog
# $2 => Absolute path to the AE project file being processed at the time of disable
# $3 => Absolute path to the folder containing the AE project file being processed
#
# Provided for your personal or commercial use by Dataclay, LLC
log="$3/templater-bot.log"
echo "-------- [TEMPLATER BOT] --------" >> "$log"
echo "" >> "$log"
echo " The bot went down at $(date)" >> "$log"
echo " Sending email notifice" >> "$log"
/usr/local/opt/php55/bin/php -f "/Users/arie/Dev/Templater/Scripts/on-bot-disable.php" -- "$1" "$2" "$3"
echo " Done sending email notice" >> "$log"
echo "" >> "$log"
|
dataclay/event-scripts
|
OSX/on-bot-disable.sh
|
Shell
|
mit
| 1,169 |
#!/bin/sh
esdir="$(dirname $0)"
while true; do
rm -f /tmp/es-restart /tmp/es-sysrestart /tmp/es-shutdown
"$esdir/emulationstation" "$@"
[ -f /tmp/es-restart ] && continue
if [ -f /tmp/es-sysrestart ]; then
rm -f /tmp/es-sysrestart
sudo reboot
break
fi
if [ -f /tmp/es-shutdown ]; then
rm -f /tmp/es-shutdown
sudo poweroff
break
fi
break
done
|
tlanks/EmulationStation
|
emulationstation.sh
|
Shell
|
mit
| 423 |
#!/bin/bash
set -e
# folder structure
WORKDIR=$(jq '.working_directory' < credentials.json | tr -d '"')
parallel 'mkdir -p {1}/{2}/{3}' ::: "$WORKDIR" ::: 'raw' 'preprocessed' ::: 'ALDOT' 'CAPS'
# read database credentials
PGHOST=$(jq '.db.PGHOST' < credentials.json | tr -d '"')
PGUSER=$(jq '.db.PGUSER' < credentials.json | tr -d '"')
PGPASSWORD=$(jq '.db.PGPASSWORD' < credentials.json | tr -d '"')
PGDATABASE=$(jq '.db.PGDATABASE' < credentials.json | tr -d '"')
PGPORT=$(jq '.db.PGPORT' < credentials.json | tr -d '"')
# run db creation scripts
PGPASSWORD="$PGPASSWORD" psql -X -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -p "$PGPORT" -f infrastructure/db/create_matrices_schema.sql
PGPASSWORD="$PGPASSWORD" psql -X -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -p "$PGPORT" -f infrastructure/db/create_modeling_schema.sql
PGPASSWORD="$PGPASSWORD" psql -X -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -p "$PGPORT" -f infrastructure/db/create_results_schema.sql
|
jtwalsh0/traffic_safety
|
setup.sh
|
Shell
|
mit
| 963 |
# Execute certaines commandes après obtention des sources
#
# Le but est de configurer les droits sur les répertoires,
# l'URL de base du site ainsi que tout ce qui est nécéssaire
# pour que l'installation ne rapporte pas d'erreur.
#
# Il est possible de modifier la section suivante ou de définir
# les variables d'environement avant d'executer le script.
#
# Exemple:
# export BASE_URL="https://flub78.ddns.net/citemplate"
# export PROJECT_DIR="$HOME/git/citemplate"
# ===================================================================
# Variables à configurer
if [ ! -n "${BASE_URL+1}" ]; then
export BASE_URL="http://localhost/citemplate_bb"
fi
# Directory where GVV has been fetched
if [ ! -n "${PROJECT_DIR+1}" ]; then
export PROJECT_DIR="/var/www/html/citemplate_bb"
fi
echo "CITEMPLATE installation"
echo "\$BASE_URL = $BASE_URL"
echo "\$PROJECT_DIR = $PROJECT_DIR"
export BASE_URL_PATTERN='http://localhost/citemplate/'
# User ID of the WEB server
export WEB_SERVER_ID=""
# Not changes required below this line
# ===================================================================
# Configure BASE_URL
export CONFIG_FILE="$PROJECT_DIR/application/config/config.php"
echo "Configuration"
echo " \$BASE_URL=$BASE_URL"
echo " \$CONFIG_FILE=$CONFIG_FILE"
echo " \$BASE_URL_PATTERN=$BASE_URL_PATTERN"
# temporairement faire une copy
mv -f $CONFIG_FILE "$CONFIG_FILE.svg"
cp /opt/citemplate/config.php $CONFIG_FILE
# Vérifie les droits d'écriture
#sed s|$BASE_URL_PATTERN|$BASE_URL| $CONFIG_FILE > $CONFIG_FILE
# sed -i s#http://localhost/citemplate#http://localhost/citemplate_bb# $CONFIG_FILE
# Nettoyage des répertoires
# Vérification des droits
chmod -f a+w $PROJECT_DIR/application/config/program.php
chmod -f 777 $PROJECT_DIR/application/logs
chmod -f 777 $PROJECT_DIR/uploads
mkdir -p $PROJECT_DIR/uploads/restore
chmod -f 777 $PROJECT_DIR/uploads/restore
find $PROJECT_DIR -type d -exec chmod -f a+wx {} \;
chmod -f -R a+r $PROJECT_DIR
return 0
|
flub78/citemplate
|
install/install.sh
|
Shell
|
mit
| 1,999 |
#!/bin/bash
# EA to show Sophos Home version
avpath="/Applications/Sophos Home.app"
if [ -d "$avpath" ];
then
version=$( /usr/bin/defaults read "${pluginpath}/Contents/Info" CFBundleShortVersionString )
echo "<result>$version</result>"
else
echo "<result>Not Installed</result>"
fi
|
franton/Remove-AV
|
ea-sophoshome.sh
|
Shell
|
mit
| 287 |
git pull;
git commit -am 'auto-update balances';
git push;
|
chancecoin/chancecoinj
|
git_commit.sh
|
Shell
|
mit
| 59 |
set -ex
SOURCE="$1"
TARGET="$2"
URL="$3"
DIR="$4"
has() {
type "$1" > /dev/null 2>&1
return $?
}
if [ ! "$SOURCE" ] || [ ! "$URL" ] || [ ! "$TARGET" ]; then
echo "Error: missing source and/or target" >&2;
exit 10
fi
if [ "$DIR" ]; then
set +e
rm -Rf "$TARGET" 2>/dev/null
set -e
fi
mkdir -p "$TARGET"
cd "$TARGET"
# Download file if needed
if [ "$URL" ]; then
if has "wget"; then
DOWNLOAD="wget --no-check-certificate -nc"
elif has "curl"; then
DOWNLOAD="curl -sSOL"
else
echo "Error: you need curl or wget to proceed" >&2;
exit 20
fi
echo "Downloading... $URL"
printf "\e[01;30m"
$DOWNLOAD "$URL"
printf "\e[0m"
SOURCE="$TARGET/$(basename $URL)"
fi
# Make sure package is in the target folder
if [ "$(dirname "$SOURCE")" != "$(dirname "$TARGET"/x)" ]; then
cp -a "$SOURCE" "$TARGET"
SOURCE="$TARGET/$(basename "$SOURCE")"
fi
# Unpack source
echo "Unpacking... $SOURCE"
printf "\e[01;30m"
tar --overwrite -zxf "$SOURCE"
printf "\e[0m"
# Delete package
rm -Rf $SOURCE
# Move directory
if [ "$DIR" ]; then
echo "Merging... $TARGET/$DIR in $TARGET"
merge() {
mkdir -p "c9_tmp"
rm -rf "c9_tmp/$DIR"
mv "$DIR" "c9_tmp/$DIR"
mv "c9_tmp/$DIR/"* .
set +e
mv "c9_tmp/$DIR/."* . 2>/dev/null
set -e
rm -rf "c9_tmp"
}
printf "\e[01;30m"
merge
printf "\e[0m"
fi
|
GHackAnonymous/c9.ide.installer
|
commands/tar.gz.sh
|
Shell
|
mit
| 1,460 |
#!/bin/sh
# mogend.sh
#
# Created by Jean-Denis Muys on 24/02/11.
# Modified by Ryan Rounkles on 15/5/11 to use correct model version and to account for spaces in file paths
# Modified by Vyacheslav Artemev on 7/12/11 to use separate folders for machine and human
# Modyfied by Danis Ziganshin on 14.02.14 for ARC compability
# Check paths for generated files first! It can be different for every project
# If something wrong with paths on your machine - use absolute path for mogenerator script
# for enabling this script you should go to "Project target" -> "Build Rules" -> "Editor" -> "Add build Rule" -> select "Data model version files using Script" -> Process = "Data model version files" -> add custom script
#echo "Running mogend"
#"${SRCROOT}/ios-base/Scripts/mogend.sh"
# Set Output files = $(DERIVED_FILE_DIR)/${INPUT_FILE_BASE}.momd
curVer=`/usr/libexec/PlistBuddy "${INPUT_FILE_PATH}/.xccurrentversion" -c 'print _XCCurrentVersionName'`
mogenerator --model "${INPUT_FILE_PATH}/$curVer" --machine-dir "${PROJECT_DIR}/ios-base/Classes/CoreData/Private/" --human-dir "${PROJECT_DIR}/Classes/CoreData/" --template-var arc=true
${DEVELOPER_BIN_DIR}/momc -XD_MOMC_TARGET_VERSION=10.6 "${INPUT_FILE_PATH}" "${TARGET_BUILD_DIR}/${EXECUTABLE_FOLDER_PATH}/${INPUT_FILE_BASE}.momd"
echo "Mogend.sh is done"
|
fs/ios-base
|
ios-base/Scripts/mogend.sh
|
Shell
|
mit
| 1,338 |
#!/bin/bash
cd $(dirname $0)
diff=`git diff`
if [ ${#diff} != 0 ];
then
echo "还有东西没有提交"
exit 1
fi
echo "--------tag list--------"
git tag -l
echo "--------tag list--------"
echo "根据上面的tag输入新tag"
read thisTag
# 获取podspec文件名
podSpecName=`ls|grep ".podspec$"|sed "s/\.podspec//g"`
echo $podSpecName
# 修改版本号
sed -i "" "s/s.version *= *[\"\'][^\"]*[\"\']/s.version=\"$thisTag\"/g" $podSpecName.podspec
pod lib lint --allow-warnings
# 验证失败退出
if [ $? != 0 ];then
exit 1
fi
git commit $podSpecName.podspec -m "update podspec"
git push
git tag -m "update podspec" $thisTag
git push --tags
pod trunk push $podSpecName.podspec --allow-warnings
|
baxiang/BXUIKit
|
cocoapod.sh
|
Shell
|
mit
| 726 |
#!/bin/sh
cp -v ../../build_gcw0/$1 ./opk-data/
cp -rv ../../data ./opk-data/data
mksquashfs opk-data/* ../../build_gcw0/$1.opk
rm ./opk-data/$1
|
PuKoren/gcw0-sdl2-cmake-sample
|
cmake/gcw-zero/opk_build.sh
|
Shell
|
mit
| 144 |
#!/bin/bash
if [ "$1" == "kill" ]
then
echo "Killing server..."
lsof -P | grep ':8080' | awk '{print $2}' | xargs kill -9
else
echo "Starting server..."
dev_appserver.py app.yaml --log_level=debug
fi
|
onejgordon/action-potential
|
server.sh
|
Shell
|
mit
| 205 |
#!/bin/bash
python RunSimulation.py --Geo 500.0 --sim_num 87
|
xji3/IGCCodonSimulation
|
ShFiles/YDR418W_YEL054C_IGCgeo_500.0_sim_87.sh
|
Shell
|
mit
| 61 |
#!/bin/sh
# chkdeps.sh
# Copyright (c) 2006 Jeffrey S. Pitblado
self=`basename $0`
date=`date '+%y-%m-%d-%H%M%S%Z'`
tmp=/tmp/$self-$$
trap "rm -f $tmp;exit" 1 2 3 15
if [ "$1" != "now" ]
then
cat 1>&2 <<EOF
Usage: $self now
Display a list of interdependenties of the script files within the current
directory.
EOF
exit 1
fi
for file in *
do
test -f $file || continue
grep -l "^[ ]*$file\>" * | grep -v "\<$file\>" > $tmp
grep -l "=.[ ]*$file\>" * | grep -v "\<$file\>" >> $tmp
grep -l "|[ ]*$file\>" * | grep -v "\<$file\>" >> $tmp
list=`cat $tmp | sort | uniq`
if [ ! -z "$list" ]
then
echo "$file"
for name in $list
do
echo " $name"
done
fi
done
rm -f $tmp
exit
# end
|
jpitblado/dotfiles
|
bin/chkdeps.sh
|
Shell
|
mit
| 705 |
#!/bin/bash
#
# uninstall-user.sh
BINDIR="/home/steam/bin"
DATADIR="/home/steam/.local/share/shadowbound"
for f in "${BINDIR}/shadowbound" \
"${DATADIR}/uninstall.sh" \
"${DATADIR}/sbrconclient.py"
do
if [ -f "$f" ]; then
rm "$f"
fi
done
|
thakyZ/shadowbound
|
tools/uninstall-user.sh
|
Shell
|
mit
| 274 |
#!/bin/bash
#
# Basic database backup and restore script.
mongodump --db bikeabout ~/dump
mongorestore --db bikeabout ~/dump
#only one collection:
mongorestore --db bikeabout ~/dump --collection hits dump2012_06_21_14_32_36/analytics/hits.bson
|
svda/bikeabout.nl
|
bin/backup.sh
|
Shell
|
mit
| 245 |
#!/bin/bash
cmd="psql template1 --tuples-only --command \"select count(*) from pg_database where datname = 'boxing';\""
db_exists=`eval $cmd`
if [ $db_exists -eq 0 ] ; then
cmd="createdb boxing"
eval $cmd
fi
psql boxing -f schema/create_schema.sql
cat csv/boxers_*.csv >> /tmp/boxers.csv
rpl -q ',"",' ',,' /tmp/boxers.csv
psql boxing -f loaders/load_boxers.sql
rm /tmp/boxers.csv
cat csv/fights_*.csv >> /tmp/fights.csv
rpl -q ',"",' ',,' /tmp/fights.csv
rpl -q ',?,' ',,' /tmp/fights.csv
rpl -q ',debut,' ',0,' /tmp/fights.csv
psql boxing -f loaders/load_fights.sql
rm /tmp/fights.csv
|
octonion/boxing
|
load.sh
|
Shell
|
mit
| 600 |
#!/bin/bash
KEYWORDS_AIRBENDER="\bAang|Katara|\bToph(|s)\b|(air|fire|water|earth)(| )(bender|tribe|people)|avatar cycle|\bSokka|\bZuko|\bAppa(|(|')s)\b|\bIroh(|s)\b|\bMomo\b|\bAzula\b|\bOzal\b"
KEYWORDS_AIRBENDER_EXCLUDE="Momo (Sohma|Hinamori|Adachi)|Bleach"
if [ "$1" == "" ]; #Normal operation
then
debug_start "Avatar: The Last Airbender"
AVATAR=$(egrep -i "$KEYWORDS_AIRBENDER" "$NEWPAGES" | egrep -iv "$KEYWORDS_AIRBENDER_EXCLUDE")
categorize "AVATAR" "Avatar: The Last Airbender"
debug_end "Avatar: The Last Airbender"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Entertainment/Cartoons/Avatar_The_Last_Airbender/Avatar.sh
|
Shell
|
mit
| 542 |
#! /bin/bash
# mkdir3.sh -- This script checks to see if the user is root or not then asks the root user for a name of a directory and then checks if it already exists before trying to create it. If the directory exists, it prints out a message to tell the user it cannot be created. And also if the user is not root, it prints out a message that only root can run this program.
# Author : Prince Oppong Boamah<[email protected]>
# Date : 3rd July 2015
if [[ $USER == 'root' ]]; then
echo You are like a God on this system
echo -n "what's the name of the folder you wanted? "
read name
if [[ -e $name ]]; then
echo $name exists. checking what it is
if [[ -d $name ]]; then
echo $name is already a directory and cannot be created
elif [[ -f $name ]]; then
echo $name is already a file and cannot be created
else
echo $name is neither a file nor a directory
fi
else
echo $name does not exist. But wait and check because it has been created.
mkdir $name
fi
else
echo You are just a regular user on this system and only root can run this program
fi
|
Prince-linux/prince-linux-learning
|
scripts/mkdir3.sh
|
Shell
|
mit
| 1,242 |
#!/bin/bash
# Get the Env Details. In this case as part of the App Profile we will define a custom variable "DEP_ENV" to indicate which list to pick the IP from
# CliQr CCO will be passed the variable by prefix it with eNV as shown below.
dep_env=$eNV_DEP_ENV
# File to fetch the IP from. These files will be stored on the CCO and will maintain the IP Addr list.
fname="/usr/local/osmosix/callout/ipam-static/${dep_env}_ip_list"
# Fetch the first available IP Address from the list and mark the status as used
#ipAddr=`grep -m 1 "available" $fname | cut -d' ' -f1`
ipAddr=$nicIP_0
# Change the status of the IP Address from available to used
#sed -i -e /$ipAddr/s/available/used/ $fname
sed -i -e /$ipAddr/s/used/available/ $fname
|
clijockey/cloudcenter-services
|
callout/ipam2.sh
|
Shell
|
mit
| 744 |
# build.sh
# This build file started off as a simple, 2 line script that compiled all the classes and jar-ed them up.
# To build two separate jars and keep them tight (ie, with no extra files) required the script to grow.
# Since the intent of jNV as a project is to produce an app that works anywhere a JRE is available,
# it seemed disigenuous to have its source depend on bash. If the jar command had an option to exclude specific
# classes, I would have used it, but it doesnt. Instead of using sed or similar unix tools to exclude files, this
# script copies the classes over to working directories and uses standard file delete commands to arrive at
# the directory structure that can be fed to jar readily. So these commands could be easily replaced by their DOS
# batch commands equivalents, for example. Alternativey, you can use MSYS-Git (like I do) on Windows and run the
# bash scripts as-is.
rm -rf classes/*
echo compiling...
if javac -Xlint:deprecation -Xlint:unchecked -d classes -cp classes src/org/stringtree/json/*.java src/org/vinodkd/jnv/*.java; then
echo building jnv_json.jar
echo "===================="
# clear work dir, taking care not to delete the manifest file.
rm -rf jar/jnv_json/*class
# copy new files
cp -R classes/* jar/jnv_json
# remove files not required for this jar
rm jar/jnv_json/org/vinodkd/jnv/JNV.class
rm jar/jnv_json/org/vinodkd/jnv/SerializedStore.class
# build the jar
jar cvfm bin/jnv_json.jar jar/jnv_json/MANIFEST.MF -C jar/jnv_json/ . res
echo building jnv.jar
echo "==============="
# clear work dir, taking care not to delete the manifest file.
rm -rf jar/jnv/*class
# copy new files
cp -R classes/* jar/jnv
# remove files not required for this jar
rm -rf jar/jnv/org/stringtree
rm jar/jnv/org/vinodkd/jnv/*Json*.class
# build the jar
jar cvfm bin/jnv.jar jar/jnv/MANIFEST.MF -C jar/jnv/ . res
fi
|
vinodkd/jNV
|
java/build.sh
|
Shell
|
mit
| 1,889 |
#!/bin/zsh
#command prompt aliases
alias ll='ls -lrt'
alias la='ls -lart'
alias hi=history
#homebrew aliases
alias bd='brew doctor'
alias bu='brew update'
alias bi='brew install'
alias bs='brew search'
#git aliases
alias gi='git init'
alias gc='git clone'
alias gb='git branch'
alias gs='git status -sb'
alias ga='git add -a'
alias gcm='git commit -m'
alias gpl='git pull'
alias gps='git push'
alias gf='git fetch'
alias gl='git log'
alias gplm='git pull origin master'
alias gplgm='git pull github master'
alias gphm='git push origin master'
alias gphgm='git push github master'
|
sundayoyeniyi/.dotfiles
|
zsh-config/aliases.zsh
|
Shell
|
mit
| 582 |
#!/bin/sh
# UI MODULE GENERATOR
# ---------------------
# This script allows you to generate new emtpy UI modules
# based on a sample or boilerplate module.
# By default, modules are stored in "./ui-modules/" (relative to projet directory).
# Sample module is kept under "./bin/ui-module-sample". You can change these files to reflect whatever
# the starting point of a new module should be (jade vs handlebars, default sass includes), etc.
#variables
MODULE_NAME="$1" #First argument in command line
SAMPLE_LOCATION="bin"
MODULE_SAMPLE_DIR="ui-module-sample" #path to sample directory, relative to project folder
PROJECT_MODULES_DIR="ui-modules"
PWD="`pwd`"
#get the list of current modules based on directory names in module folder.
CURRENT_MODULES=`ls -l $PROJECT_MODULES_DIR | egrep '^d' | awk '{print $9}'`
#check if module already exists. If so, exit script with error message
for MODULE in $CURRENT_MODULES
do
if [ "$MODULE" == "$MODULE_NAME" ]; then
echo "Error: module '${MODULE}'' already exists"
exit
fi
done
#copy the sample ui-module into your project modules and rename based on command line argument
# -r = recursive
# -n = do not overwrite existing files
cp -rn ${PWD}/$SAMPLE_LOCATION/$MODULE_SAMPLE_DIR $PROJECT_MODULES_DIR/$MODULE_NAME
#rename all files based on new module name
for file in $PROJECT_MODULES_DIR/$MODULE_NAME/*/*.*
do
filepath="${file%/*}"
filename="${file##*/}"
extension=""${file#*.}""
mv $file $filepath/$MODULE_NAME.$extension
done
|
ripestudios/stylestrap
|
bin/ui-module.sh
|
Shell
|
mit
| 1,502 |
#!/bin/sh
case "$(uname)" in
Darwin)
os="macos"
;;
Linux)
os="linux"
;;
*)
echo "error: platform not supported"
;;
esac
cd
mkdir .config
# in alphabetical order
[ "$os" != "macos" ] && ln -s .cfg/bash/bashrc .bashrc
ln -s .cfg/git/gitconfig .gitconfig
mkdir -p .config/mpv
if [ "$os" == "macos" ]; then
ln -s ../../.cfg/mpv/mpv.conf.macos .config/mpv/mpv.conf
else
ln -s ../../.cfg/mpv/mpv.conf .config/mpv/mpv.conf
fi
ln -s ../.cfg/nvim .config
ln -s ../.cfg/ranger .config
ln -s ../.cfg/rofi .config
ln -s ../.cfg/sway .config
ln -s ../.cfg/termite .config
ln -s .cfg/tmux/tmux.conf .tmux.conf
if [ ! -d .vim ]; then
ln -s .cfg/vim .vim
else
echo "ln: .vim: directory exists"
fi
ln -s .cfg/zsh/zshrc .zshrc
ln -s .cfg/zsh/zprofile .zprofile
|
aldn/dotfiles
|
deploy.sh
|
Shell
|
mit
| 827 |
#!/usr/bin/env bash
# create and partition disks per ooom.fstab file - second boot
# TODO(ross) add function comments
ooom::mkvol() {
devn="$1"
vol="$2"
if [[ ! -b "${devn}" ]]; then
echo "*** Error: Device not found: ${devn}"
return 1
fi
if [[ -z "${vol}" ]]; then
echo "*** Error: Invalid volume: ${vol}"
return 1
fi
if [[ "${vol}" = "none" ]]; then
echo Skipping volume: ${vol}
return 0
fi
if [[ -d "${vol}" ]]; then
echo Skipping volume ${vol} as it already exists
return 0
fi
if [[ "${vol}" = "/tmp" ]]; then
mode=1777
else
mode=0755
fi
echo Creating volume ${vol} with mode ${mode} ...
mkdir -pv --mode "${mode}" "${vol}"
if [[ ! -d "${vol}" ]]; then
echo "*** Error: Volume not found: ${vol}"
return 1
fi
echo Mounting ${vol} on ${devn} ...
mount -v "${vol}"
EL=$? ; test "$EL" -gt 0 && echo "*** Error: Command returned error $EL"
}
# TODO(ross) add function comments
ooom::rmbackup() {
devn="$1"
vol="$2"
if [[ "${vol}" = "none" ]]; then
return 0
fi
if [[ ! -b "${devn}" ]]; then
echo "*** Error: Device not found: ${devn}"
return 1
fi
if [[ ! -d "${vol}" ]]; then
echo "*** Error: Volume not found: ${vol}"
return 1
fi
if [[ ! -d "${vol}.ooomed" ]]; then
if ! egrep -v '^\s*#' /etc/fstab.pre-ooomed | tr -s "\t" " " | cut -d' ' -f 2 | egrep -q "^${vol}$"; then
echo Warning: Volume not found: ${vol}.ooomed
return 1
fi
fi
echo Removing "${vol}.ooomed" ...
rm -fr "${vol}.ooomed"
EL=$? ; test "$EL" -gt 0 && echo "*** Error: Command returned error $EL"
}
#set -o xtrace
echo $0 started at $(date)
OOOM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${OOOM_DIR}"
# for debugging only:
#env | sort
. "${OOOM_DIR}/ooom-config.sh"
# for debugging only:
#env | sort | grep _ | egrep -v '^(BASH|UPSTART)_'
ls -l / >${OOOM_LOG_DIR}/ls-2.log
cat /proc/mounts | sort >${OOOM_LOG_DIR}/mounts-2.log
parted -s -l 2>&1 >${OOOM_LOG_DIR}/parted-2.log
swapon -s 2>&1 >${OOOM_LOG_DIR}/swapon-2.log
if [[ -f "${OOOM_DIR}/ooom-custom-boot-2-start.sh" ]]; then
echo Running ${OOOM_DIR}/ooom-custom-boot-2-start.sh ...
${OOOM_DIR}/ooom-custom-boot-2-start.sh
echo ${OOOM_DIR}/ooom-custom-boot-2-start.sh returned $?
fi
OOOM_FSTABS="${OOOM_DIR}/${OOOM_FSTAB}"
if [[ ! -f "${OOOM_FSTABS}" ]]; then
echo File not found: ${OOOM_FSTABS}
exit 1
fi
echo === Step 1: ooom::mkvol
cat "${OOOM_FSTABS}" | while IFS=$' \t' read -r -a var; do
dev="${var[[0]]}"
vol="${var[[1]]}"
if echo "${dev}" | egrep -q "^\s*#"; then
continue
fi
if [[ "${vol}" = "$OOOM_BOOT_VOL" ]]; then
continue
fi
ooom::mkvol "${dev}" "${vol}"
done
if [[ "$OOOM_REMOVE_BACKUPS" ]]; then
echo === Step 2: ooom::rmbackup
tac "${OOOM_FSTABS}" | while IFS=$' \t' read -r -a var; do
dev="${var[[0]]}"
vol="${var[[1]]}"
if echo "${dev}" | egrep -q "^\s*#"; then
continue
fi
ooom::rmbackup "${dev}" "${vol}"
done
fi
echo === Step 3: zero volumes
for vol in ${OOOM_SHRINK_DISKS}; do
if [[ ! -d "${vol}" ]]; then
echo "*** Error: Volume not found: ${vol}"
continue
fi
if [[ "${vol}" != "/" ]]; then
vol="${vol}/"
fi
zero="${vol}ZERO_FREE_SPACE"
echo Zeroing free space on ${vol} ...
dd if=/dev/zero of=${zero} bs=1M
rm -f "${zero}"
done
if [[ -d "${OOOM_MOUNT}" ]]; then
rmdir "${OOOM_MOUNT}"
fi
OOOM_RC_LOCAL=/etc/rc.local.ooomed
if [[ -f "${OOOM_RC_LOCAL}" ]]; then
cp -p "${OOOM_RC_LOCAL}" /etc/rc.local
else
rm -f /etc/rc.local
fi
if [[ -f "${OOOM_DIR}/ooom-custom-boot-1-end.sh" ]]; then
echo Running ${OOOM_DIR}/ooom-custom-boot-1-end.sh ...
${OOOM_DIR}/ooom-custom-boot-1-end.sh
echo ${OOOM_DIR}/ooom-custom-boot-1-end.sh returned $?
fi
ls -l / >${OOOM_LOG_DIR}/ls-2b.log
echo $0 finished at $(date)
# eof
|
rasa/out-of-one-many
|
ooom-boot-2.sh
|
Shell
|
mit
| 3,914 |
#!/bin/bash -
#===============================================================================
#
# FILE: build.sh
#
# USAGE: ./build.sh
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Michael Fong (mcfongtw),
# ORGANIZATION:
# CREATED: 2016/10/13/ 20:14:28
# REVISION: 0.1
#===============================================================================
rm -rf ./build/
mkdir -p ./build/
cd ./build
#TODO: Improve w/ case insensitive equality check
if [ "$1" = "Release" ];
then
cmake -DCMAKE_BUILD_TYPE=Release ../
else
cmake ../
fi
make googletestsuite
make
|
mcfongtw/LostInCompilation
|
build.sh
|
Shell
|
mit
| 683 |
#!/bin/bash
set -e
cd "$(dirname "$0")"
project_root_dir="`pwd`/../.."
webtest_patches_dir="${project_root_dir}/ifs-data-layer/ifs-data-service/src/main/resources/db/webtest"
get_current_patch_level () {
# extract the current version of the webtest data
echo "`find ${webtest_patches_dir} -name '*__Base_webtest_data.sql' | sed 's/.*\(V.*\)_[0-9]*__.*/\1/g'`"
}
new_version_or_current=`get_current_patch_level`
force=""
profile=""
while getopts ":f :v: :a" opt ; do
case ${opt} in
v)
new_version_or_current="$OPTARG"
;;
f)
force="true"
;;
a)
profile="-Pprofile=automated"
;;
esac
done
new_version="${new_version_or_current}_"
run_flyway_clean () {
cd ${project_root_dir}
./gradlew -PopenshiftEnv=unused $profile -Pcloud=automated -Pifs.companies.house.key=unused ifs-data-layer:ifs-data-service:flywayClean --stacktrace
cd -
}
run_flyway_migrate() {
cd ${project_root_dir}
./gradlew -PopenshiftEnv=unused $profile -Pcloud=automated -Pifs.companies.house.key=unused ifs-data-layer:ifs-data-service:flywayMigrate --stacktrace
cd -
}
do_baseline () {
: ${DATABASE_HOST:=ifs-database}
generate_test_class="ifs-data-layer/ifs-data-service/src/test/java/org/innovateuk/ifs/testdata/GenerateTestData.java"
# clean database
run_flyway_clean
# navigate to project root
cd ${project_root_dir}
./gradlew -PopenshiftEnv=unused $profile -Pcloud=automated -Pifs.companies.house.key=unused clean --stacktrace
# run generator test class
IFS_GENERATE_TEST_DATA_EXECUTION=SINGLE_THREADED IFS_GENERATE_TEST_DATA_COMPETITION_FILTER=ALL_COMPETITIONS ./gradlew -PopenshiftEnv=unused $profile -Pcloud=automated -Pifs.companies.house.key=unused -PtestGroups=generatetestdata :ifs-data-layer:ifs-data-service:cleanTest :ifs-data-layer:ifs-data-service:test --tests org.innovateuk.ifs.testdata.GenerateTestData --stacktrace
# extract the current version of the webtest data
current_version="`get_current_patch_level`_"
cd ${webtest_patches_dir}
for i in ${current_version}*; do mv $i ${i/${current_version}/tmp_${new_version}}; done
rm -f ${new_version}*.sql
for i in tmp_${new_version}*; do mv $i ${i/tmp_${new_version}/${new_version}}; done
cd ${project_root_dir}/setup-files/scripts
# create baseline dump
./create-baseline-dump.sh ${new_version} $DATABASE_HOST
cd ${project_root_dir}
# check that the new sequence of patches works
run_flyway_clean
run_flyway_migrate
cat << EOF
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* You have successfully run a webtest baseline. *
* Please verify the changes by running a full acceptance suite *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
EOF
}
cat << EOF
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* You are about to run a webtest baseline. *
* This will take a while so make sure you are not in a rush *
* *
* Current version is `get_current_patch_level` *
* New version will be ${new_version_or_current} *
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
EOF
if [[ -z "${force}" ]]; then
while true; do
read -p "Do you want to start the baseline? (y/N)" yn
case $yn in
[Yy]* ) do_baseline; break;;
[Nn]* ) exit;;
* ) exit;;
esac
done
else
do_baseline
fi
|
InnovateUKGitHub/innovation-funding-service
|
setup-files/scripts/generate-test-data.sh
|
Shell
|
mit
| 4,042 |
#!/bin/bash
set -x
set -e
DIR='/aineisto/documents'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/nbsp\;/#160\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/middot\;/#183\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/ndash\;/#8211\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/mdash\;/#8212\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/rsaquo\;/#8250\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/lsaquo\;/#8249\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/sup2\;/#178\;/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/col http:/col /g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/row http:/row /g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/row localhost:18088/row /g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/image http:/image /g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/ x:str/ str/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/ x:num/ num/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/colstr/col str/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/u1:fmla/u1-fmla/g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/row style:/row /g'
find $DIR -type f -name *.xml -print0 | xargs -0 sed -i 's/ Template:/ /g'
|
ottok/uta-infim-tools
|
wikipedia-data-xml-fixes/xmlify.sh
|
Shell
|
mit
| 1,377 |
#!/bin/bash
set -e # exit with nonzero exit code if anything fails
# clear and re-create the out directory
rm -rf out || exit 0;
mkdir out;
# run our compile script, discussed above
bash compile.sh
# go to the out directory and create a *new* Git repo
cd out
git init
# inside this git repo we'll pretend to be a new user
git config user.name "Travis CI"
git config user.email "[email protected]"
# The first and only commit to this new Git repo contains all the
# files present with the commit message "Deploy to GitHub Pages".
git add .
git commit -m "Deploy to GitHub Pages"
# Force push from the current repo's master branch to the remote
# repo's gh-pages branch. (All previous history on the gh-pages branch
# will be lost, since we are overwriting it.) We redirect any output to
# /dev/null to hide any sensitive credential data that might otherwise be exposed..
git push --force --quiet "https://${GH_PAGES}@${GH_REF}" master:gh-pages > /dev/null 2>&1
|
ChubbyPotato/ggame
|
deploy.sh
|
Shell
|
mit
| 969 |
#!/bin/bash
##
## Ensures that a fresh Phoenix project does not trigger any issue in "normal" mode
##
# common setup
set -e
DIRNAME=$( cd "$( dirname "$0" )" && pwd )
PROJECT_ROOT=$( cd "$DIRNAME/.." && pwd )
# script specific sources, variables and function definitions
PROJECT_NAME=phx_credo_tester
PROJECT_DIRNAME=tmp/$PROJECT_NAME
# setup
yes | mix archive.install hex phx_new 1.4.11
cd $PROJECT_ROOT
mkdir -p tmp
echo ""
echo "--> Creating $PROJECT_NAME ..."
echo ""
rm -fr $PROJECT_DIRNAME || true
cd tmp
yes n | mix phx.new $PROJECT_NAME
# execution
echo ""
echo "--> Running Credo ..."
echo ""
cd $PROJECT_ROOT
mix credo $PROJECT_DIRNAME
|
rrrene/credo
|
test/test_phoenix_compatibility.sh
|
Shell
|
mit
| 663 |
#!/bin/sh
#sass --watch css/foo.scss:css/foo.css --style compressed #for production
sass --watch css/omd.scss:css/omd.css
exit 0
|
nevvkid/OMD
|
watch.sh
|
Shell
|
mit
| 131 |
#!/bin/bash
wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh
bash Miniconda2-latest-Linux-x86_64.sh
bash setup_lc.sh
|
panoptes/lightcurve-demo
|
setup_linux.sh
|
Shell
|
mit
| 144 |
#!/bin/bash
export SHELL_SCRIPT_MODULE_PATH="/vagrant-modules"
export APACHE_VHOST_FILE="/vagrant/vagrant/files/vhost.conf"
source "${SHELL_SCRIPT_MODULE_PATH}/lib.sh"
runModules "base" "mysql" "apache2" "php5" "php5-curl" "php5-gd" "php5-mcrypt" "php5-mysql" "php5-xdebug"
service apache2 restart
|
omerucel/framework-sample-site
|
vagrant/bootstrap.sh
|
Shell
|
mit
| 300 |
#!/usr/bin/env bash
echo $(bash --version 2>&1 | head -n 1)
#CUSTOMPARAM=0
BUILD_ARGUMENTS=()
for i in "$@"; do
case $(echo $1 | awk '{print tolower($0)}') in
# -custom-param) CUSTOMPARAM=1;;
*) BUILD_ARGUMENTS+=("$1") ;;
esac
shift
done
set -eo pipefail
SCRIPT_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
###########################################################################
# CONFIGURATION
###########################################################################
BUILD_PROJECT_FILE="$SCRIPT_DIR/./build/.build.csproj"
TEMP_DIRECTORY="$SCRIPT_DIR/./.tmp"
DOTNET_GLOBAL_FILE="$SCRIPT_DIR/./global.json"
DOTNET_INSTALL_URL="https://raw.githubusercontent.com/dotnet/cli/master/scripts/obtain/dotnet-install.sh"
DOTNET_RELEASES_URL="https://raw.githubusercontent.com/dotnet/core/master/release-notes/releases.json"
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
export NUGET_XMLDOC_MODE="skip"
###########################################################################
# EXECUTION
###########################################################################
function FirstJsonValue {
perl -nle 'print $1 if m{"'$1'": "([^"\-]+)",?}' <<< ${@:2}
}
# If global.json exists, load expected version
if [ -f "$DOTNET_GLOBAL_FILE" ]; then
DOTNET_VERSION=$(FirstJsonValue "version" $(cat "$DOTNET_GLOBAL_FILE"))
fi
# If dotnet is installed locally, and expected version is not set or installation matches the expected version
if [[ -x "$(command -v dotnet)" && (-z ${DOTNET_VERSION+x} || $(dotnet --version) == "$DOTNET_VERSION") ]]; then
export DOTNET_EXE="$(command -v dotnet)"
else
DOTNET_DIRECTORY="$TEMP_DIRECTORY/dotnet-unix"
export DOTNET_EXE="$DOTNET_DIRECTORY/dotnet"
# If expected version is not set, get latest version
if [ -z ${DOTNET_VERSION+x} ]; then
DOTNET_VERSION=$(FirstJsonValue "version-sdk" $(curl -s "$DOTNET_RELEASES_URL"))
fi
# Download and execute install script
DOTNET_INSTALL_FILE="$TEMP_DIRECTORY/dotnet-install.sh"
mkdir -p "$TEMP_DIRECTORY"
curl -Lsfo "$DOTNET_INSTALL_FILE" "$DOTNET_INSTALL_URL"
chmod +x "$DOTNET_INSTALL_FILE"
"$DOTNET_INSTALL_FILE" --install-dir "$DOTNET_DIRECTORY" --version "$DOTNET_VERSION" --no-path
fi
echo "Microsoft (R) .NET Core SDK version $("$DOTNET_EXE" --version)"
"$DOTNET_EXE" run --project "$BUILD_PROJECT_FILE" -- ${BUILD_ARGUMENTS[@]}
|
BigBabay/AsyncConverter
|
build.sh
|
Shell
|
mit
| 2,455 |
#!/bin/bash
set -e
remote=$(git config --get remote.origin.url)
build_dir="build"
if [ -d $build_dir/.git ]; then
echo "The .git directory already exists in the build directory."
echo "This probably means that you have already cloned a reposiory there."
echo "You can proabbaly start publishing but if you are not sure use"
echo " rm -rf $build_dir"
echo "to remove the whole build directory and start over."
exit 1
fi
rm -r $build_dir || true
git clone $remote $build_dir
cd $build_dir
git checkout gh-pages
|
ncsu-osgeorel/foss-for-geospatial-analysis
|
get-gh-pages-branch.sh
|
Shell
|
mit
| 544 |
#!/bin/bash
# allow `go get` for private repositories
git config --global url."[email protected]:".insteadOf "https://github.com/"
if [ "$1" = "ssh_setup" ]; then
# start our ssh agent
eval "$(ssh-agent -s)"
# run expect to handle entering password for my mounted SSH key
# /ssh.exp
ssh-add /.ssh/github_rsa
# automate trusting github as a remote hote
ssh -o StrictHostKeyChecking=no [email protected] uptime
ssh -T [email protected]
fi
if [ ! -d "vendor" ]; then
echo "Fetching dependencies described within lock file..."
glide install
fi
echo "Start watching files..."
godo server --watch
|
Integralist/Go-Requester
|
bootstrap.sh
|
Shell
|
mit
| 608 |
#!/bin/bash
# Set up links from CHAPI plugins and tools into the AMSoil deployment
base=$PWD
AMSOIL=$base/AMsoil
CHAPI=$base
if [ -d $AMSOIL ]; then
echo "updating $AMSOIL"
cd $AMSOIL
git pull
cd $base
else
echo "cloning into $AMSOIL"
cd $base
git clone https://github.com/fp7-ofelia/AMsoil.git
fi
cd $base
rm -f $AMSOIL/src/plugins/.gitignore
# hold off on marm for now
for pl in chrm chapiv1rpc sarm marm
do
if [ ! -d $AMSOIL/src/plugins/$pl ]; then
ln -s $CHAPI/plugins/$pl $AMSOIL/src/plugins/$pl
fi
echo $pl >> $AMSOIL/src/plugins/.gitignore
done
if [ ! -d $AMSOIL/src/tools ]; then
ln -s $CHAPI/tools $AMSOIL/src/tools
fi
echo tools > $AMSOIL/src/.gitignore
# Remove unused AMsoil plugins
for pl in dhcprm dhcpgeni3 mailer worker geniv3rpc
do
sudo rm $AMSOIL/src/plugins/$pl
done
|
ahelsing/geni-ch
|
linkamsoil.sh
|
Shell
|
mit
| 840 |
#
# .NET
#
# .NET Framework is a software framework developed by Microsoft.
# It includes a large class library and provides language interoperability
# across several programming languages.
# Link: https://www.microsoft.com/net
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
SPACESHIP_DOTNET_SHOW="${SPACESHIP_DOTNET_SHOW=true}"
SPACESHIP_DOTNET_PREFIX="${SPACESHIP_DOTNET_PREFIX="$SPACESHIP_PROMPT_DEFAULT_PREFIX"}"
SPACESHIP_DOTNET_SUFFIX="${SPACESHIP_DOTNET_SUFFIX="$SPACESHIP_PROMPT_DEFAULT_SUFFIX"}"
SPACESHIP_DOTNET_SYMBOL="${SPACESHIP_DOTNET_SYMBOL=".NET "}"
SPACESHIP_DOTNET_COLOR="${SPACESHIP_DOTNET_COLOR="128"}"
# ------------------------------------------------------------------------------
# Section
# ------------------------------------------------------------------------------
# Show current version of .NET SDK
spaceship_dotnet() {
[[ $SPACESHIP_DOTNET_SHOW == false ]] && return
# Show DOTNET status only for folders containing project.json, global.json, .csproj, .xproj or .sln files
[[ -f project.json || -f global.json || -n *.csproj(#qN^/) || -n *.xproj(#qN^/) || -n *.sln(#qN^/) ]] || return
spaceship::exists dotnet || return
# dotnet-cli automatically handles SDK pinning (specified in a global.json file)
# therefore, this already returns the expected version for the current directory
local dotnet_version=$(dotnet --version 2>/dev/null)
spaceship::section \
"$SPACESHIP_DOTNET_COLOR" \
"$SPACESHIP_DOTNET_PREFIX" \
"${SPACESHIP_DOTNET_SYMBOL}${dotnet_version}" \
"$SPACESHIP_DOTNET_SUFFIX"
}
|
nelsonghezzi/spaceship-zsh-theme
|
sections/dotnet.zsh
|
Shell
|
mit
| 1,694 |
#!/bin/bash
#
# Use this file to quickly change the app version.
# It will also tag, commit the change and push it.
#
# Usage: ./version.sh 1.2.0
# Check $1
if [ -z "$1" ]
then
echo "Version is required."
fi
# Replace version in package.json files
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./package.json
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./src/package.json
sed -i.bak "s/download\/v.*\/iCilegon/download\/v$1\/iCilegon/g" ./src/package.json
# Clean up
rm ./package.json.bak
rm ./src/package.json.bak
# Edit CHANGELOG
vim ./CHANGELOG
# Git commit
git add .
git commit -m "New version v$1"
git tag -a "v$1" -m "v$1"
# TODO Paste all commits since the last tag into CHANGELOG
|
eyeyunianto/aksara
|
iCilegon/version.sh
|
Shell
|
mit
| 724 |
#! /bin/bash
if [ $1 = "init" ]; then
mkdir .shoelace
cd .shoelace
#cp -r ../../vagrant/provisioned/basic-ubuntu /
fi
|
codeeverything/shoelace
|
shoelace.sh
|
Shell
|
mit
| 130 |
#!zsh -x
havemodif=0
git diff-index --quiet HEAD
if [ $? -ne 0 ]; then
havemodif=1
fi
if [ $havemodif -ne 0 ]; then
git stash
fi
git fetch origin
foreach this (cmake-utils genericLogger genericStack genericHash tconv marpaWrapper luaunpanic lua-template-engine) {
git fetch $this master
}
current_branch=`git branch --show-current`
git reset --hard origin/$current_branch
git clean -ffdx
foreach this (cmake-utils genericLogger genericStack genericHash tconv marpaWrapper luaunpanic lua-template-engine) {
git subtree pull --prefix 3rdparty/github/$this $this master --squash
}
if [ $havemodif -ne 0 ]; then
git stash apply
fi
exit 0
|
jddurand/c-marpaESLIF
|
git_subtree_pull.zsh
|
Shell
|
mit
| 656 |
#!/bin/sh
docker-compose down -v --rmi all --remove-orphans
|
HospitalRun/hospitalrun-frontend
|
couchdb/couchdb-cleanup.sh
|
Shell
|
mit
| 61 |
#!/bin/bash
_HOSTS=""
_USER=""
_PUBKEY=""
_UID=""
_GROUP=""
SHADOW=""
SSH_OPTIONS=""
usage() {
echo "
Usage: $0
--help Display this help message
-h, --hosts Login host(space separated values)
-u, --user Username
-k, --pubkey Public key(string or file path)
-i, --uid UID
[-g, --group] Group
[-p, --shadow] Hash in /etc/shadow (single quotation marks)
[-s, --ssh_dir] Path to .ssh
[-o, --ssh_options] Ssh options
"
}
get_options() {
for OPT in "$@"
do
case "$OPT" in
'--help' )
usage
exit 1
;;
'-h'|'--hosts' )
_HOSTS="$2"
shift 2
;;
'-u'|'--user' )
_USER="$2"
shift 2
;;
'-k'|'--pubkey' )
if [ -f "$2" ] ; then
_PUBKEY=`cat $2`
else
_PUBKEY="$2"
fi
shift 2
;;
'-i'|'--uid' )
_UID="$2"
shift 2
;;
'-g'|'--group' )
_GROUP="$2"
shift 2
;;
'-p'|'--shadow' )
SHADOW="$2"
shift 2
;;
'-s'|'--ssh_dir' )
SSH_DIR="$2"
shift 2
;;
'-o'|'--ssh_options' )
SSH_OPTIONS="$2"
shift 2
;;
esac
done
}
create_user() {
if [ "$SSH_DIR" == "" ] ; then
SSH_DIR=/home/$_USER/.ssh
fi
AUTHORIZED_KEYS=$SSH_DIR/authorized_keys
HASH=`cat /dev/urandom | LC_CTYPE=C tr -dc "[:alnum:]" | head -c 32`
TMP_SHELL=/tmp/${HASH}.sh
SSH_OPTIONS=`bash -c "echo $SSH_OPTIONS"`
cat << EOF | ssh $SSH_OPTIONS $_HOST "cat >> ${TMP_SHELL}"
#!/bin/bash
if getent passwd | awk -F':' '{ print \$1}' | grep -w $_USER > /dev/null 2>&1; then
echo "[${_HOST}] ${_USER} is already registered"
exit 1
fi
if ! awk -F':' '{ print \$1}' /etc/group | grep $_GID > /dev/null 2>&1 ; then
_GROUP=$_USER
fi
if [ "$SHADOW" == "" ] ; then
HASH='\$6\$'\`sha1sum <(date) | awk '{print \$1}'\`
SHADOW=\`python -c "import crypt; print crypt.crypt(\"${_USER}\", \"\${HASH}\")";\`
fi
/usr/sbin/useradd -u $_UID -g $_GROUP -p '$SHADOW' -m $_USER
sed -i -e "s/${_USER}:\!\!/${_USER}:\${SHADOW}/" /etc/shadow
echo '${_USER} ALL=(ALL) ALL' >> /etc/sudoers
mkdir -m 700 $SSH_DIR
echo '${_PUBKEY}' >> $AUTHORIZED_KEYS
chmod 600 $AUTHORIZED_KEYS
chown $_USER:$_GROUP $SSH_DIR $AUTHORIZED_KEYS
EOF
ssh $SSH_OPTIONS -t -t $_HOST "chmod +x ${TMP_SHELL}; sudo $TMP_SHELL; rm -f $TMP_SHELL"
}
BIN_PATH=$(cd $(dirname $0); pwd)
SSH_CONF=$BIN_PATH/../config/ssh_config
KNOWN_HOSTS=$BIN_PATH/../tmp/known_hosts
get_options "$@" --ssh_options "-F ${SSH_CONF} -o UserKnownHostsFile=${KNOWN_HOSTS} ${SSH_OPTIONS}"
SERVERS=${_HOSTS:-"server1 server2"}
for SERVER in $SERVERS; do
_HOST=$SERVER
create_user
done
rm -f $KNOWN_HOSTS
|
tkuchiki/bash-create-and-delete-users
|
bin/create_user.sh
|
Shell
|
mit
| 3,200 |
#!/bin/bash
#################################################
##
## This script performs the preprocess
## stage to prepare the data for
## crowding script
##
## AUTHOR: Antonio Dorta <[email protected]>
## DATE: 2017-03-02
##
## Operations:
## 1) Read filenames to get the number of chips
## 2) Split the input stars file so that every
## chip has a similar number of FULL lines
## 3) Create a directory per chip to store all
## needed files
## 4) Create a new MCH file with ".alf" files
## instead of ".als". A link to this file
## using underscores is made to avoid
## problems with daomaster
## 5) Link/copy/move all specified files to
## the right chip directory
##
#################################################
# New extension for MCH with .alf
ext=".alf.mch"
# Files to be copied
EXT_TO_COPY=(.fits .opt .alf .mch .psf .als .opt .als.opt .ap .raw .mag .log .weights .scale .zero _comb.psf _comb.opt _comb.als.opt _shift.mch .phot)
#EXT_TO_COPY=(alf)
FILES_TO_COPY=(apcor.lst extinction fields)
mkdir -p logs
INLIST=logs/ADDSTAR.inlist
#FILE_FIELDS=addstar_fields
#FILE_CHIPS=addstar_chips
starsDir=STARS
starsFn=input_stars
ARGS=( \
"1: whether link/copy/move data files from source to destination (or 'inlist' to only create input list)" \
"2: path to data files" \
"3: create (1) or not (anything else) a directory per field" \
"4: create (1) or not (anything else) a directory per chip" \
"5: path to input stars file" \
"6: split (1) or not (anything else) the input stars file among all chips" \
"7: file with info of chips" \
"8 (Optional): path to add as prefix if needed when creating links (only if arg6 is 'link')" \
)
# "8 (Optional): destination (current directory if none). It will be created if it does NOT exist" \
if [[ $# -ge 1 ]] && [[ $1 == 'inlist' ]]
then
# If there is only one argument and it is "inlist", only the list will be created
find `pwd` -regextype sed -regex ".*F[0-9]*-[0-9]*_[0-9]*\.mch" | sort > $INLIST
echo "Input list for ADDSTAR stage has been created in $INLIST. Skipping file transferring..."
echo ""
exit
elif [[ $# -eq 8 ]]
then
prefix=$7
elif [[ $# -eq 7 ]]
then
# If there is no destination directory, use the current one
prefix=""
else
echo -n "ERROR -> Syntax: $0 "
count=1
for args in "${ARGS[@]}"
do
echo -n "arg$count "
(( count++ ))
done
echo ""
echo "ARGUMENTS:"
for arg in "${ARGS[@]}"
do
echo " * arg$arg"
(( count++ ))
done
>&2 echo "Syntax error when calling $0"
exit 1
fi
# Check that all needed arguments were specified
transferMethod=$1
orig=$2
sepFields=$3
sepChips=$4
starsOrig=$5
starsSplit=$6
chipsFile=$7
dest="."
# Determine whether copy|move|link the data files
case $transferMethod in
"copy") cmd='cp -f'
;;
"move") cmd='mv -f'
;;
"link") cmd='ln -sf'
;;
*) cmd='cp -f'
;;
esac
# Stars will not be moved, just copied!
if [[ $transferMethod == "move" ]] ; then
starsCmd='cp -f'
else
starsCmd=$cmd
fi
# Check that stars file exists
if [ ! -f $starsOrig ]; then
>&2 echo "File $starsOrig NOT found"
exit 2
fi
# Create DESTINATION
mkdir -p $dest
# Get CHIPS number based on filenames (skip all those missing chips)
FIELDS=(`find $orig -name "F*-*_??.fits" -printf "%f\n" | sed "s/^F//" | sed "s/-.*//" | sort -u`)
TOTAL_FIELDS=${#FIELDS[@]}
CHIPS=(`find $orig -name "F*-*_??.fits" -printf "%f\n" | sed "s/.*_//" | sed "s/\..*//" | sort -u`)
TOTAL_CHIPS=${#CHIPS[@]}
echo FIELDS: $FIELDS
echo CHIPS: $CHIPS
if [[ $TOTAL_FIELDS -eq 0 ]]
then
>&2 echo "ERROR: No fields have been detected, check path to data files ($orig)"
exit 3
fi
echo "Detected $TOTAL_FIELDS field(s)"
if [[ $TOTAL_CHIPS -eq 0 ]]
then
>&2 echo "ERROR: No chips have been detected, check path to data files ($orig)"
exit 4
fi
echo "Detected $TOTAL_CHIPS chip(s)"
starsDest="$dest/$starsDir"
mkdir -p $starsDest
if [[ $starsSplit -eq 1 ]]
then
# SPLIT input file of stars into as many files as chips we have (with similar number of FULL lines!!)
echo "Splitting $starsOrig in $TOTAL_CHIPS with similar number of lines (${starsFn}_XX.txt)"
split -n l/${TOTAL_CHIPS} --additional-suffix=".stars" $starsOrig
i=0
for f in *.stars
do
chip=${CHIPS[$i]}
#mkdir -p $dest/chip_$chip
#mv $f $dest/chip_$chip/${starsFn}_$chip.txt
mv $f $starsDest/${starsFn}_${chip}.txt
i=$(( i+1 ))
done
else
cp $starsOrig $starsDest/${starsFn}.txt
starsFile=${starsFn}.txt
fi
# Check if origin directory is absolute or relative
if [[ "$orig" != /* && "$transferMethod" == "link" ]]
then
# Relative path, convert to absolute
orig=`readlink -fv $orig`
orig="$prefix$orig"
echo "NEW ORIG: $orig"
fi
# Store current directory and move to destination
pushd .
cd $dest
# Create file with fields and chips
#rm -f $FILE_FIELDS
#for fld in ${FIELDS[@]}
#do
# echo "F$fld" >> $FILE_FIELDS
#done
#rm -f $FILE_CHIPS
#for chp in ${CHIPS[@]}
#do
# echo $chp >> $FILE_CHIPS
#done
# Create MCH with .alf files instead of .als
# Create symbolic links FX_...mch to FX-...mch
#(daomaster does NOT work with '-')
echo "Creating MCH files"
for f in $orig/*\-*_??.mch
do
fn="`basename $f .mch`$ext"
sed "s/\.als/\.alf/g" $f > $fn
ln -s "$fn" "${fn//-/_}"
done
relDir="."
if [[ $sepFields -eq 1 ]]
then
relDir="${relDir}/.."
fi
if [[ $sepChips -eq 1 ]]
then
relDir="${relDir}/.."
fi
# Transfer common needed files (if they exist)
for file in ${FILES_TO_COPY[@]}
do
if [ -f "$orig/$file" ] ; then
$cmd $orig/$file .
else
echo "Warning! File $orig/$file does NOT exist!"
fi
done
# Create a directory per EACH cheap and transfer related files to it
for fld in ${FIELDS[@]}
do
for chp in ${CHIPS[@]}
do
destDir='.'
if [[ $sepFields -eq 1 ]]
then
destDir="${destDir}/F${fld}"
fi
if [[ $sepChips -eq 1 ]]
then
destDir="${destDir}/chip${chp}"
fi
if [[ "$destDir" != "." ]]
then
mkdir -p $destDir
fi
echo " *** Create directory $destdir for Field F${fld} and Chip ${chp}. Use $cmd to transfer files"
mv F${fld}*_${chp}.* $destDir
# Transfer group of files by extension
for ext in ${EXT_TO_COPY[@]}
do
$cmd $orig/F${fld}-*_${chp}${ext} $destDir
done
# Get FI and PS values from OPT files of each chip
grep "^FI" $orig/F${fld}*_${chp}.opt | awk '{print $NF}' > $destDir/F${fld}-chipFI_${chp}.dat
grep "^PS" $orig/F${fld}*_${chp}.opt | awk '{print $NF}' > $destDir/F${fld}-chipPS_${chp}.dat
# Transfer stars file
cd $destDir
if [[ $starsSplit -eq 1 ]]
then
starsFile="*${chp}.txt"
fi
# Transfer stars
$starsCmd $relDir/$starsDest/$starsFile F${fld}-stars_${chp}.txt
# Link to common files
for file in ${FILES_TO_COPY[@]}
do
if [ -f "$relDir/$file" ] ; then
ln -s $relDir/$file .
fi
done
# Go back to base directory
cd $relDir
done
done
# Transfer CHIPS info file
echo "$cmd $orig/${chipsFile} ."
$cmd $orig/${chipsFile} .
# Create the INLIST file with all FX-NNNNNNN_YY.mch files
find `pwd` -regextype sed -regex ".*F[0-9]*-[0-9]*_[0-9]*\.mch" | sort > $INLIST
echo "Input list for ADDSTAR stage has been created in $INLIST"
# Return to initial directory
popd
exit 0
#DONE!!!
|
dnidever/PHOTRED
|
scripts/fakered_transfer.sh
|
Shell
|
mit
| 7,287 |
# libbash gui tests for osascript (only on macOS)
[ "$lb_current_os" != macOS ] && return 0
gui=osascript
# load global tests
source "$(dirname "$(lb_realpath "$BASH_SOURCE")")"/00_test_gui.sh
|
pruje/libbash-tests
|
libbash-gui/test_osascript.sh
|
Shell
|
mit
| 196 |
mkdir libs bin
wget -P libs http://jsoup.org/packages/jsoup-1.8.3.jar
wget -P libs https://bitbucket.org/xerial/sqlite-jdbc/downloads/sqlite-jdbc-3.8.11.1.jar
javac -cp "libs/*" -d "bin" src/*.java
|
lacraig2/XKCDViewer
|
build.sh
|
Shell
|
mit
| 197 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-TraceJobs_Example/TraceJobs.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-TraceJobs_Example/TraceJobs.framework'
fi
|
alpinereplay/trace_jobs
|
Example/Pods/Target Support Files/Pods-TraceJobs_Example/Pods-TraceJobs_Example-frameworks.sh
|
Shell
|
mit
| 2,612 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.