code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
export VMNAME=${VMNAME:-"$1"}
export VMNAME=${VMNAME:-"omerovm"}
export TARGET=${2:-"QA"}
export MEMORY=${MEMORY:-"1024"}
export SSH_PF=${SSH_PF:-"2222"}
export OMERO_PORT=${OMERO_PORT:-"4063"}
export OMERO_PF=${OMERO_PF:-"4063"}
export OMEROS_PORT=${OMEROS_PORT:-"4064"}
export OMEROS_PF=${OMEROS_PF:-"4064"}
export RELEASE_VERSION="4.3.4"
set -e
set -u
set -x
VBOX="VBoxManage --nologo"
OS=`uname -s`
ATTEMPTS=0
MAXATTEMPTS=5
DELAY=2
NATADDR="10.0.2.15"
##################
##################
# SCRIPT FUNCTIONS
##################
##################
function checknet ()
{
UP=$($VBOX guestproperty enumerate $VMNAME | grep "10.0.2.15") || true
ATTEMPTS=$(($ATTEMPTS + 1))
}
function installvm ()
{
ssh-keygen -R [localhost]:2222 -f ~/.ssh/known_hosts
chmod 600 ./omerovmkey
SCP="scp -2 -o NoHostAuthenticationForLocalhost=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -o PasswordAuthentication=no -o ChallengeResponseAuthentication=no -o PreferredAuthentications=publickey -i omerovmkey -P $SSH_PF"
SSH="ssh -2 -o StrictHostKeyChecking=no -i omerovmkey -p $SSH_PF -t"
echo "Copying scripts to VM"
$SCP driver.sh omero@localhost:~/
$SCP setup_userspace.sh omero@localhost:~/
$SCP setup_postgres.sh omero@localhost:~/
$SCP setup_environment.sh omero@localhost:~/
$SCP setup_omero.sh omero@localhost:~/
$SCP setup_nginx.sh omero@localhost:~/
$SCP setup_omero_daemon.sh omero@localhost:~/
$SCP omero-init.d omero@localhost:~/
$SCP omero-web-init.d omero@localhost:~/
$SCP virtualbox-network-fix-init.d omero@localhost:~/
$SCP virtualbox_fix.sh omero@localhost:~/
$SCP nginx-control.sh omero@localhost:~/
echo "ssh : exec driver.sh"
$SSH omero@localhost "bash /home/omero/driver.sh ${TARGET}"
sleep 10
echo "ALL DONE!"
}
function failfast ()
{
exit 1
}
function poweroffvm ()
{
$VBOX list runningvms | grep "$VMNAME" && {
VBoxManage controlvm "$VMNAME" poweroff && sleep 10
} || true
}
function poweronvm ()
{
$VBOX list runningvms | grep "$VMNAME" || {
$VBOX startvm "$VMNAME" --type headless && sleep 45
}
}
function rebootvm ()
{
poweroffvm
poweronvm
}
function killallvbox ()
{
ps aux | grep [V]Box && {
if [ "$OS" == "Darwin" ]; then
killall -m [V]Box
else [ "$OS" == "Linux" ];
killall -r [V]Box
fi
} || true
ps aux | grep [V]irtualBox && {
if [ "$OS" == "Darwin" ]; then
killall -m [V]irtualBox
else [ "$OS" == "Linux" ];
killall -r [V]irtualBox
fi
} || true
}
function checkhddfolder ()
{
if test -e $HOME/Library/VirtualBox; then
export HARDDISKS=${HARDDISKS:-"$HOME/Library/VirtualBox/HardDisks/"}
elif test -e $HOME/.VirtualBox; then
export HARDDISKS=${HARDDISKS:-"$HOME/.VirtualBox/HardDisks/"}
else
echo "Cannot find harddisks! Trying setting HARDDISKS"
failfast
fi
}
function deletevm ()
{
poweroffvm
$VBOX list vms | grep "$VMNAME" && {
VBoxManage storageattach "$VMNAME" --storagectl "SATA CONTROLLER" --port 0 --device 0 --type hdd --medium none
VBoxManage unregistervm "$VMNAME" --delete
VBoxManage closemedium disk $HARDDISKS"$VMNAME".vdi --delete
} || true
}
function createvm ()
{
$VBOX list vms | grep "$VMNAME" || {
VBoxManage clonehd "$HARDDISKS"omero-base-img_2011-08-08.vdi"" "$HARDDISKS$VMNAME.vdi"
VBoxManage createvm --name "$VMNAME" --register --ostype "Debian"
VBoxManage storagectl "$VMNAME" --name "SATA CONTROLLER" --add sata
VBoxManage storageattach "$VMNAME" --storagectl "SATA CONTROLLER" --port 0 --device 0 --type hdd --medium $HARDDISKS$VMNAME.vdi
VBoxManage modifyvm "$VMNAME" --nic1 nat --nictype1 "82545EM"
VBoxManage modifyvm "$VMNAME" --memory $MEMORY --acpi on
VBoxManage modifyvm "$VMNAME" --natpf1 "ssh,tcp,127.0.0.1,2222,10.0.2.15,22"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-unsec,tcp,127.0.0.1,4063,10.0.2.15,4063"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-ssl,tcp,127.0.0.1,4064,10.0.2.15,4064"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-web,tcp,127.0.0.1,8080,10.0.2.15,8080"
}
}
####################
####################
# SCRIPT ENTRY POINT
####################
####################
checkhddfolder
killallvbox
deletevm
createvm
poweronvm
checknet
if [[ -z "$UP" ]]
then
while [[ -z "$UP" && $ATTEMPTS -lt $MAXATTEMPTS ]]
do
rebootvm
checknet
sleep $DELAY
done
if [[ -z "$UP" ]]
then
echo "No connection to x. Failure after $ATTEMPTS tries"
failfast
fi
fi
echo "Network up after $ATTEMPTS tries"
installvm
if [ "$TARGET" == "QA" ]; then
EXPORTVMNAME="${VMNAME}-latest-build"
else
EXPORTVMNAME="${VMNAME}-${RELEASE_VERSION}"
fi
bash export_ova.sh ${VMNAME} ${EXPORTVMNAME}
|
rleigh-dundee/openmicroscopy
|
docs/install/VM/omerovm.sh
|
Shell
|
gpl-2.0
| 4,682 |
#!/bin/bash
docker rm -f sismics_reader
docker run \
-d --name=sismics_reader --restart=always \
--link sismics_reader_hsqldb:sismics_reader_hsqldb \
--volumes-from=sismics_reader_data \
-e 'VIRTUAL_HOST_SECURE=reader.sismics.com' -e 'VIRTUAL_PORT=80' \
sismics/reader:latest
|
Feitianyuan/reader
|
run-service.sh
|
Shell
|
gpl-2.0
| 296 |
#!/usr/bin/env bash
# This file contains environment variables required to run Spark. Copy it as
# spark-env.sh and edit that to configure Spark for your site.
#
# The following variables can be set in this file:
# - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
# - MESOS_NATIVE_LIBRARY, to point to your libmesos.so if you use Mesos
# - SPARK_JAVA_OPTS, to set node-specific JVM options for Spark. Note that
# we recommend setting app-wide options in the application's driver program.
# Examples of node-specific options : -Dspark.local.dir, GC options
# Examples of app-wide options : -Dspark.serializer
#
# If using the standalone deploy mode, you can also set variables for it here:
# - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
# - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports
# - SPARK_WORKER_CORES, to set the number of cores to use on this machine
# - SPARK_WORKER_MEMORY, to set how much memory to use (e.g. 1000m, 2g)
# - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT
# - SPARK_WORKER_INSTANCES, to set the number of worker processes per node
# - SPARK_WORKER_DIR, to set the working directory of worker processes
# Additional
# SPARK_DAEMON_MEMORY Memory to allocate to the Spark master and
# worker daemons themselves (default: 512m).
# SPARK_DAEMON_JAVA_OPTS JVM options for the Spark master and worker
# daemons themselves (default: none).
# PYSPARK_PYTHON, the Python binary to use for PySpark
# SPARK_LIBRARY_PATH, to add search directories for native libraries.
# SPARK_CLASSPATH, to add elements to s classpath that you want to be
# present for all applications. Note that applications can also add
# dependencies for themselves through SparkContext.addJar
# SPARK_JAVA_OPTS, t#o add JVM options. This includes Java options
# like garbage collector settings and any system properties that d
# like to pass with -D (e.g., -Dspark.local.dir=/disk1,/disk2).
export JAVA_HOME="SPARK_JAVA_HOME"
export SPARK_HOME="SPARKHOME"
export SPARK_DAEMON_MEMORY="SPARK_DAEMON_HEAP_MAXm"
export SPARK_MASTER_HOST="SPARKMASTERHOST"
export SPARK_MASTER_PORT="SPARKMASTERPORT"
export SPARK_MASTER_WEBUI_PORT="SPARKMASTERWEBUIPORT"
export SPARK_WORKER_WEBUI_PORT="SPARKWORKERWEBUIPORT"
export SPARK_WORKER_CORES="SPARKWORKERCORES"
export SPARK_WORKER_MEMORY="SPARKWORKERMEMORYm"
export SPARK_WORKER_DIR="SPARKWORKERDIR"
export SPARK_DRIVER_MEMORY="SPARKDRIVERMEMORYm"
export SPARK_PID_DIR="SPARKPIDDIR"
export SPARK_DAEMON_JAVA_OPTS="SPARKDAEMONJAVAOPTS"
export PYSPARK_PYTHON="SPARKPYSPARKPYTHON"
|
LLNL/magpie
|
conf/spark/spark-env-2.X.sh
|
Shell
|
gpl-2.0
| 2,614 |
#!/bin/sh
###################################################################
# connect.sh #
# #
# Description: Connects a PEN to a specified CCN #
# #
# Synopsis: #
# connect.sh [connection string] #
# #
# Date: 05/07/2005 #
# #
# Author: Jan Capek #
# #
# $Id: connect.sh,v 1.3 2007/09/02 21:49:42 stavam2 Exp $ #
# #
###################################################################
# connection string, there is a default value
#connstr=${1:-'tcp:192.168.0.5:54321'}
connip=${1:-'192.168.1.161'}
connport=${2:-'54321'}
connprot=${3:-'tcp'}
connstr=${connprot}:${connip}:${connport}
connfile=/clondike/pen/connect
# check if there is connection file present
[ ! -w $connfile ] && {
echo Can\'t connect, missing connection file:\'$connfile\'
exit 1
}
#echo 9p > /clondike/pen/fs-mount
insmod ../src/proxyfs/proxyfs.ko
echo -n Connecting to $connstr..
echo $connstr > $connfile
echo done.
|
novyzde3/Test_clondike
|
scripts/devel/connect.sh
|
Shell
|
gpl-2.0
| 1,599 |
#!/usr/bin/env bash
#
# Copyright 2014-2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------- Import ---------------------#
export BASE_DIR=${BASE_DIR:-"/opt/rpc-openstack"}
source ${BASE_DIR}/scripts/functions.sh
# ------------ End import -------------------#
export WORKSPACE=${WORKSPACE:-"$HOME"}
echo "Archiving logs and configs..."
d="${WORKSPACE}/logs"
mkdir -p $d
# logs and config from host
mkdir -p $d/$HOSTNAME/log
cp -rp /openstack/log/$HOSTNAME-* $d/$HOSTNAME/log ||:
cp -rp /etc/ $d/$HOSTNAME/etc
cp -rp /var/log/ $d/$HOSTNAME/var_log
# logs and config from the containers
while read c; do
mkdir -p $d/$c/log
cp -rp /openstack/log/$c/* $d/$c/log 2>/dev/null ||:
cp -rp /var/lib/lxc/$c/rootfs/etc $d/$c 2>/dev/null ||:
cp -rp /var/lib/lxc/$c/delta0/etc $d/$c 2>/dev/null ||:
done < <(lxc-ls)
# compress to reduce storage space requirements
ARTIFACT_SIZE=$(du -sh $d | cut -f1)
echo "Compressing $ARTIFACT_SIZE of artifact files..."
tar cjf "$d".tar.bz2 $d
echo "Compression complete."
rm -rf ${d}
|
major/rpc-openstack
|
scripts/gather-artifacts.sh
|
Shell
|
apache-2.0
| 1,556 |
#!/usr/bin/env bash
set -e
# This script builds various binary artifacts from a checkout of the docker
# source code.
#
# Requirements:
# - The current directory should be a checkout of the docker source code
# (https://github.com/docker/docker). Whatever version is checked out
# will be built.
# - The VERSION file, at the root of the repository, should exist, and
# will be used as Docker binary version and package version.
# - The hash of the git commit will also be included in the Docker binary,
# with the suffix -unsupported if the repository isn't clean.
# - The script is intended to be run inside the docker container specified
# in the Dockerfile at the root of the source. In other words:
# DO NOT CALL THIS SCRIPT DIRECTLY.
# - The right way to call this script is to invoke "make" from
# your checkout of the Docker repository.
# the Makefile will do a "docker build -t docker ." and then
# "docker run hack/make.sh" in the resulting image.
#
set -o pipefail
export DOCKER_PKG='github.com/docker/docker'
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export MAKEDIR="$SCRIPTDIR/make"
# We're a nice, sexy, little shell script, and people might try to run us;
# but really, they shouldn't. We want to be in a container!
if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
{
echo "# WARNING! I don't seem to be running in the Docker container."
echo "# The result of this command might be an incorrect build, and will not be"
echo "# officially supported."
echo "#"
echo "# Try this instead: make all"
echo "#"
} >&2
fi
echo
# List of bundles to create when no argument is passed
DEFAULT_BUNDLES=(
validate-dco
validate-gofmt
validate-lint
validate-pkg
validate-test
validate-toml
validate-vet
binary
dynbinary
test-unit
test-integration-cli
test-docker-py
cover
cross
tgz
)
VERSION=$(< ./VERSION)
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
GITCOMMIT=$(git rev-parse --short HEAD)
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
GITCOMMIT="$GITCOMMIT-unsupported"
fi
! BUILDTIME=$(date --rfc-3339 ns | sed -e 's/ /T/') &> /dev/null
if [ -z $BUILDTIME ]; then
# If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI
BUILDTIME=$(date -u)
fi
elif [ "$DOCKER_GITCOMMIT" ]; then
GITCOMMIT="$DOCKER_GITCOMMIT"
else
echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified'
echo >&2 ' Please either build with the .git directory accessible, or specify the'
echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for'
echo >&2 ' future accountability in diagnosing build issues. Thanks!'
exit 1
fi
if [ "$AUTO_GOPATH" ]; then
rm -rf .gopath
mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
export GOPATH="${PWD}/.gopath:${PWD}/vendor"
fi
if [ ! "$GOPATH" ]; then
echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH'
echo >&2 ' alternatively, set AUTO_GOPATH=1'
exit 1
fi
if [ "$DOCKER_EXPERIMENTAL" ]; then
echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
echo >&2
DOCKER_BUILDTAGS+=" experimental pkcs11"
fi
if [ -z "$DOCKER_CLIENTONLY" ]; then
DOCKER_BUILDTAGS+=" daemon"
if pkg-config libsystemd-journal 2> /dev/null ; then
DOCKER_BUILDTAGS+=" journald"
fi
fi
# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
if \
command -v gcc &> /dev/null \
&& ! gcc -E - -o /dev/null &> /dev/null <<<'#include <btrfs/version.h>' \
; then
DOCKER_BUILDTAGS+=' btrfs_noversion'
fi
# test whether "libdevmapper.h" is new enough to support deferred remove
# functionality.
if \
command -v gcc &> /dev/null \
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - -o /dev/null &> /dev/null ) \
; then
DOCKER_BUILDTAGS+=' libdm_no_deferred_remove'
fi
# Use these flags when compiling the tests and final binary
IAMSTATIC='true'
source "$SCRIPTDIR/make/.go-autogen"
if [ -z "$DOCKER_DEBUG" ]; then
LDFLAGS='-w'
fi
LDFLAGS_STATIC=''
EXTLDFLAGS_STATIC='-static'
# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build
# with options like -race.
ORIG_BUILDFLAGS=( -a -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo )
# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here
BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
# Test timeout.
: ${TIMEOUT:=180m}
TESTFLAGS+=" -test.timeout=${TIMEOUT}"
LDFLAGS_STATIC_DOCKER="
$LDFLAGS_STATIC
-extldflags \"$EXTLDFLAGS_STATIC\"
"
if [ "$(uname -s)" = 'FreeBSD' ]; then
# Tell cgo the compiler is Clang, not GCC
# https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752
export CC=clang
# "-extld clang" is a workaround for
# https://code.google.com/p/go/issues/detail?id=6845
LDFLAGS="$LDFLAGS -extld clang"
fi
# If sqlite3.h doesn't exist under /usr/include,
# check /usr/local/include also just in case
# (e.g. FreeBSD Ports installs it under the directory)
if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then
export CGO_CFLAGS='-I/usr/local/include'
export CGO_LDFLAGS='-L/usr/local/lib'
fi
HAVE_GO_TEST_COVER=
if \
go help testflag | grep -- -cover > /dev/null \
&& go tool -n cover > /dev/null 2>&1 \
; then
HAVE_GO_TEST_COVER=1
fi
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
# You can use this to select certain tests to run, eg.
#
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
#
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
# to run certain tests on your local host, you should run with command:
#
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
#
go_test_dir() {
dir=$1
coverpkg=$2
testcover=()
if [ "$HAVE_GO_TEST_COVER" ]; then
# if our current go install has -cover, we want to use it :)
mkdir -p "$DEST/coverprofiles"
coverprofile="docker${dir#.}"
coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}"
testcover=( -cover -coverprofile "$coverprofile" $coverpkg )
fi
(
echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
cd "$dir"
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
)
}
test_env() {
# use "env -i" to tightly control the environment variables that bleed into the tests
env -i \
DEST="$DEST" \
DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
DOCKER_HOST="$DOCKER_HOST" \
DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \
DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \
GOPATH="$GOPATH" \
HOME="$ABS_DEST/fake-HOME" \
PATH="$PATH" \
TEMP="$TEMP" \
TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \
"$@"
}
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
bundle() {
local bundle="$1"; shift
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
source "$SCRIPTDIR/make/$bundle" "$@"
}
main() {
# We want this to fail if the bundles already exist and cannot be removed.
# This is to avoid mixing bundles from different versions of the code.
mkdir -p bundles
if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then
echo "bundles/$VERSION already exists. Removing."
rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
echo
fi
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
# Windows and symlinks don't get along well
rm -f bundles/latest
ln -s "$VERSION" bundles/latest
fi
if [ $# -lt 1 ]; then
bundles=(${DEFAULT_BUNDLES[@]})
else
bundles=($@)
fi
for bundle in ${bundles[@]}; do
export DEST="bundles/$VERSION/$(basename "$bundle")"
# Cygdrive paths don't play well with go build -o.
if [[ "$(uname -s)" == CYGWIN* ]]; then
export DEST="$(cygpath -mw "$DEST")"
fi
mkdir -p "$DEST"
ABS_DEST="$(cd "$DEST" && pwd -P)"
bundle "$bundle"
echo
done
}
main "$@"
|
mssola/zypper-docker
|
vendor/github.com/docker/docker/hack/make.sh
|
Shell
|
apache-2.0
| 8,926 |
#!/bin/bash
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script updates ghardy boxes to add the gcc 4.1.1 compatiblity libs.
# Usage: update_ghardy.sh
# Require root.
if [ `whoami` != root ]
then
echo ---------------
echo No Root Permission
echo ---------------
echo
echo You must have root privileges to do this update.
echo Log in as the root user or use the "sudo" command to run this installer.
echo
exit -1
fi
# Unpack the compatibility libs and move them under
# /opt/google/lib64
tar xzf ghardy-compatibility-libs.tar.gz
mv compatibility /opt/google/lib64
cd /opt/google/lib64/compatibility
# Create the necessary symlinks.
ln -f -s ld-2.3.3.so ld-linux-x86-64.so.2
ln -f -s libICE.so.6.3 libICE.so.6
ln -f -s libSM.so.6.0 libSM.so.6
ln -f -s libX11.so.6.2 libX11.so.6
ln -f -s libXcursor.so.1.0.2 libXcursor.so.1
ln -f -s libXext.so.6.4 libXext.so.6
ln -f -s libXft.so.2.1.1 libXft.so.2
ln -f -s libXinerama.so.1.0 libXinerama.so.1
ln -f -s libXmu.so.6.2 libXmu.so.6
ln -f -s libXrandr.so.2.0 libXrandr.so.2
ln -f -s libXrender.so.1.2.2 libXrender.so.1
ln -f -s libXt.so.6.0 libXt.so.6
ln -f -s libfontconfig.so.1.0.4 libfontconfig.so.1
ln -f -s libfreetype.so.6.3.5 libfreetype.so.6
ln -f -s libjpeg.so.62.0.0 libjpeg.so.62
ln -f -s libjpeg.so.62.0.0 libjpeg.so
ln -f -s liblcms.so.1.0.12 liblcms.so.1
ln -f -s libpng12.so.0.1.2.5 libpng12.so.0
ln -f -s libpng12.so.0 libpng12.so
ln -f -s libsasl2.so.2.0.18 libsasl2.so.2
ln -f -s libz.so.1 libgz.so.1
ln -f -s libz.so.1 libz.so
|
iparanza/earthenterprise
|
earth_enterprise/rpms/build_scripts/update_ghardy.sh
|
Shell
|
apache-2.0
| 2,068 |
#!/bin/bash
$BABELOMICS_HOME/babelomics.sh --tool network-miner $*
|
kalyanreddyemani/opencga
|
opencga-app/build/tools/network-miner/network-miner.sh
|
Shell
|
apache-2.0
| 66 |
#!/bin/bash
set -e
echo "Getting lastest CSS and Script resources..."
curl https://developers.google.com/_static/css/devsite-google-blue.css > gae/styles/devsite-google-blue.css
curl https://developers.google.com/_static/js/framebox.js > gae/scripts/framebox.js
curl https://developers.google.com/_static/js/jquery_ui-bundle.js > gae/scripts/jquery_ui-bundle.js
curl https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js > gae/scripts/jquery-2.1.1.min.js
curl https://developers.google.com/_static/js/prettify-bundle.js > gae/scripts/prettify-bundle.js
curl https://developers.google.com/_static/js/script_foot_closure.js > gae/scripts/footer-closure.js
|
abdshomad/WebFundamentals
|
tools/update-resources.sh
|
Shell
|
apache-2.0
| 667 |
#!/bin/bash
echo "mode: set" > acc.out
FAIL=0
# Standard go tooling behavior is to ignore dirs with leading underscors
for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d);
do
if ls $dir/*.go &> /dev/null; then
go test -coverprofile=profile.out $dir || FAIL=$?
if [ -f profile.out ]
then
cat profile.out | grep -v "mode: set" | grep -v "mocks.go" >> acc.out
rm profile.out
fi
fi
done
# Failures have incomplete results, so don't send
if [ "$FAIL" -eq 0 ]; then
goveralls -service=travis-ci -v -coverprofile=acc.out
fi
rm -f acc.out
exit $FAIL
|
chinanjjohn2012/go-torch
|
.test-cover.sh
|
Shell
|
mit
| 610 |
#!/bin/sh
#
# Copyright (c) 2007 Johannes E. Schindelin
#
test_description='git rebase interactive
This test runs git rebase "interactively", by faking an edit, and verifies
that the result still makes sense.
Initial setup:
one - two - three - four (conflict-branch)
/
A - B - C - D - E (master)
| \
| F - G - H (branch1)
| \
|\ I (branch2)
| \
| J - K - L - M (no-conflict-branch)
\
N - O - P (no-ff-branch)
where A, B, D and G all touch file1, and one, two, three, four all
touch file "conflict".
'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-rebase.sh
test_cmp_rev () {
git rev-parse --verify "$1" >expect.rev &&
git rev-parse --verify "$2" >actual.rev &&
test_cmp expect.rev actual.rev
}
set_fake_editor
# WARNING: Modifications to the initial repository can change the SHA ID used
# in the expect2 file for the 'stop on conflicting pick' test.
test_expect_success 'setup' '
test_commit A file1 &&
test_commit B file1 &&
test_commit C file2 &&
test_commit D file1 &&
test_commit E file3 &&
git checkout -b branch1 A &&
test_commit F file4 &&
test_commit G file1 &&
test_commit H file5 &&
git checkout -b branch2 F &&
test_commit I file6 &&
git checkout -b conflict-branch A &&
test_commit one conflict &&
test_commit two conflict &&
test_commit three conflict &&
test_commit four conflict &&
git checkout -b no-conflict-branch A &&
test_commit J fileJ &&
test_commit K fileK &&
test_commit L fileL &&
test_commit M fileM &&
git checkout -b no-ff-branch A &&
test_commit N fileN &&
test_commit O fileO &&
test_commit P fileP
'
# "exec" commands are ran with the user shell by default, but this may
# be non-POSIX. For example, if SHELL=zsh then ">file" doesn't work
# to create a file. Unseting SHELL avoids such non-portable behavior
# in tests. It must be exported for it to take effect where needed.
SHELL=
export SHELL
test_expect_success 'rebase -i with the exec command' '
git checkout master &&
(
FAKE_LINES="1 exec_>touch-one
2 exec_>touch-two exec_false exec_>touch-three
3 4 exec_>\"touch-file__name_with_spaces\";_>touch-after-semicolon 5" &&
export FAKE_LINES &&
test_must_fail git rebase -i A
) &&
test_path_is_file touch-one &&
test_path_is_file touch-two &&
test_path_is_missing touch-three " (should have stopped before)" &&
test_cmp_rev C HEAD &&
git rebase --continue &&
test_path_is_file touch-three &&
test_path_is_file "touch-file name with spaces" &&
test_path_is_file touch-after-semicolon &&
test_cmp_rev master HEAD &&
rm -f touch-*
'
test_expect_success 'rebase -i with the exec command runs from tree root' '
git checkout master &&
mkdir subdir && (cd subdir &&
FAKE_LINES="1 exec_>touch-subdir" \
git rebase -i HEAD^
) &&
test_path_is_file touch-subdir &&
rm -fr subdir
'
test_expect_success 'rebase -i with the exec command checks tree cleanness' '
git checkout master &&
(
FAKE_LINES="exec_echo_foo_>file1 1" &&
export FAKE_LINES &&
test_must_fail git rebase -i HEAD^
) &&
test_cmp_rev master^ HEAD &&
git reset --hard &&
git rebase --continue
'
test_expect_success 'no changes are a nop' '
git checkout branch2 &&
git rebase -i F &&
test "$(git symbolic-ref -q HEAD)" = "refs/heads/branch2" &&
test $(git rev-parse I) = $(git rev-parse HEAD)
'
test_expect_success 'test the [branch] option' '
git checkout -b dead-end &&
git rm file6 &&
git commit -m "stop here" &&
git rebase -i F branch2 &&
test "$(git symbolic-ref -q HEAD)" = "refs/heads/branch2" &&
test $(git rev-parse I) = $(git rev-parse branch2) &&
test $(git rev-parse I) = $(git rev-parse HEAD)
'
test_expect_success 'test --onto <branch>' '
git checkout -b test-onto branch2 &&
git rebase -i --onto branch1 F &&
test "$(git symbolic-ref -q HEAD)" = "refs/heads/test-onto" &&
test $(git rev-parse HEAD^) = $(git rev-parse branch1) &&
test $(git rev-parse I) = $(git rev-parse branch2)
'
test_expect_success 'rebase on top of a non-conflicting commit' '
git checkout branch1 &&
git tag original-branch1 &&
git rebase -i branch2 &&
test file6 = $(git diff --name-only original-branch1) &&
test "$(git symbolic-ref -q HEAD)" = "refs/heads/branch1" &&
test $(git rev-parse I) = $(git rev-parse branch2) &&
test $(git rev-parse I) = $(git rev-parse HEAD~2)
'
test_expect_success 'reflog for the branch shows state before rebase' '
test $(git rev-parse branch1@{1}) = $(git rev-parse original-branch1)
'
test_expect_success 'exchange two commits' '
FAKE_LINES="2 1" git rebase -i HEAD~2 &&
test H = $(git cat-file commit HEAD^ | sed -ne \$p) &&
test G = $(git cat-file commit HEAD | sed -ne \$p)
'
cat > expect << EOF
diff --git a/file1 b/file1
index f70f10e..fd79235 100644
--- a/file1
+++ b/file1
@@ -1 +1 @@
-A
+G
EOF
cat > expect2 << EOF
<<<<<<< HEAD
D
=======
G
>>>>>>> 5d18e54... G
EOF
test_expect_success 'stop on conflicting pick' '
git tag new-branch1 &&
test_must_fail git rebase -i master &&
test "$(git rev-parse HEAD~3)" = "$(git rev-parse master)" &&
test_cmp expect .git/rebase-merge/patch &&
test_cmp expect2 file1 &&
test "$(git diff --name-status |
sed -n -e "/^U/s/^U[^a-z]*//p")" = file1 &&
test 4 = $(grep -v "^#" < .git/rebase-merge/done | wc -l) &&
test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo)
'
test_expect_success 'abort' '
git rebase --abort &&
test $(git rev-parse new-branch1) = $(git rev-parse HEAD) &&
test "$(git symbolic-ref -q HEAD)" = "refs/heads/branch1" &&
test_path_is_missing .git/rebase-merge
'
test_expect_success 'abort with error when new base cannot be checked out' '
git rm --cached file1 &&
git commit -m "remove file in base" &&
test_must_fail git rebase -i master > output 2>&1 &&
grep "The following untracked working tree files would be overwritten by checkout:" \
output &&
grep "file1" output &&
test_path_is_missing .git/rebase-merge &&
git reset --hard HEAD^
'
test_expect_success 'retain authorship' '
echo A > file7 &&
git add file7 &&
test_tick &&
GIT_AUTHOR_NAME="Twerp Snog" git commit -m "different author" &&
git tag twerp &&
git rebase -i --onto master HEAD^ &&
git show HEAD | grep "^Author: Twerp Snog"
'
test_expect_success 'squash' '
git reset --hard twerp &&
echo B > file7 &&
test_tick &&
GIT_AUTHOR_NAME="Nitfol" git commit -m "nitfol" file7 &&
echo "******************************" &&
FAKE_LINES="1 squash 2" EXPECT_HEADER_COUNT=2 \
git rebase -i --onto master HEAD~2 &&
test B = $(cat file7) &&
test $(git rev-parse HEAD^) = $(git rev-parse master)
'
test_expect_success 'retain authorship when squashing' '
git show HEAD | grep "^Author: Twerp Snog"
'
test_expect_success '-p handles "no changes" gracefully' '
HEAD=$(git rev-parse HEAD) &&
git rebase -i -p HEAD^ &&
git update-index --refresh &&
git diff-files --quiet &&
git diff-index --quiet --cached HEAD -- &&
test $HEAD = $(git rev-parse HEAD)
'
test_expect_failure 'exchange two commits with -p' '
FAKE_LINES="2 1" git rebase -i -p HEAD~2 &&
test H = $(git cat-file commit HEAD^ | sed -ne \$p) &&
test G = $(git cat-file commit HEAD | sed -ne \$p)
'
test_expect_success 'preserve merges with -p' '
git checkout -b to-be-preserved master^ &&
: > unrelated-file &&
git add unrelated-file &&
test_tick &&
git commit -m "unrelated" &&
git checkout -b another-branch master &&
echo B > file1 &&
test_tick &&
git commit -m J file1 &&
test_tick &&
git merge to-be-preserved &&
echo C > file1 &&
test_tick &&
git commit -m K file1 &&
echo D > file1 &&
test_tick &&
git commit -m L1 file1 &&
git checkout HEAD^ &&
echo 1 > unrelated-file &&
test_tick &&
git commit -m L2 unrelated-file &&
test_tick &&
git merge another-branch &&
echo E > file1 &&
test_tick &&
git commit -m M file1 &&
git checkout -b to-be-rebased &&
test_tick &&
git rebase -i -p --onto branch1 master &&
git update-index --refresh &&
git diff-files --quiet &&
git diff-index --quiet --cached HEAD -- &&
test $(git rev-parse HEAD~6) = $(git rev-parse branch1) &&
test $(git rev-parse HEAD~4^2) = $(git rev-parse to-be-preserved) &&
test $(git rev-parse HEAD^^2^) = $(git rev-parse HEAD^^^) &&
test $(git show HEAD~5:file1) = B &&
test $(git show HEAD~3:file1) = C &&
test $(git show HEAD:file1) = E &&
test $(git show HEAD:unrelated-file) = 1
'
test_expect_success 'edit ancestor with -p' '
FAKE_LINES="1 2 edit 3 4" git rebase -i -p HEAD~3 &&
echo 2 > unrelated-file &&
test_tick &&
git commit -m L2-modified --amend unrelated-file &&
git rebase --continue &&
git update-index --refresh &&
git diff-files --quiet &&
git diff-index --quiet --cached HEAD -- &&
test $(git show HEAD:unrelated-file) = 2
'
test_expect_success '--continue tries to commit' '
test_tick &&
test_must_fail git rebase -i --onto new-branch1 HEAD^ &&
echo resolved > file1 &&
git add file1 &&
FAKE_COMMIT_MESSAGE="chouette!" git rebase --continue &&
test $(git rev-parse HEAD^) = $(git rev-parse new-branch1) &&
git show HEAD | grep chouette
'
test_expect_success 'verbose flag is heeded, even after --continue' '
git reset --hard master@{1} &&
test_tick &&
test_must_fail git rebase -v -i --onto new-branch1 HEAD^ &&
echo resolved > file1 &&
git add file1 &&
git rebase --continue > output &&
grep "^ file1 | 2 +-$" output
'
test_expect_success 'multi-squash only fires up editor once' '
base=$(git rev-parse HEAD~4) &&
FAKE_COMMIT_AMEND="ONCE" FAKE_LINES="1 squash 2 squash 3 squash 4" \
EXPECT_HEADER_COUNT=4 \
git rebase -i $base &&
test $base = $(git rev-parse HEAD^) &&
test 1 = $(git show | grep ONCE | wc -l)
'
test_expect_success 'multi-fixup does not fire up editor' '
git checkout -b multi-fixup E &&
base=$(git rev-parse HEAD~4) &&
FAKE_COMMIT_AMEND="NEVER" FAKE_LINES="1 fixup 2 fixup 3 fixup 4" \
git rebase -i $base &&
test $base = $(git rev-parse HEAD^) &&
test 0 = $(git show | grep NEVER | wc -l) &&
git checkout to-be-rebased &&
git branch -D multi-fixup
'
test_expect_success 'commit message used after conflict' '
git checkout -b conflict-fixup conflict-branch &&
base=$(git rev-parse HEAD~4) &&
(
FAKE_LINES="1 fixup 3 fixup 4" &&
export FAKE_LINES &&
test_must_fail git rebase -i $base
) &&
echo three > conflict &&
git add conflict &&
FAKE_COMMIT_AMEND="ONCE" EXPECT_HEADER_COUNT=2 \
git rebase --continue &&
test $base = $(git rev-parse HEAD^) &&
test 1 = $(git show | grep ONCE | wc -l) &&
git checkout to-be-rebased &&
git branch -D conflict-fixup
'
test_expect_success 'commit message retained after conflict' '
git checkout -b conflict-squash conflict-branch &&
base=$(git rev-parse HEAD~4) &&
(
FAKE_LINES="1 fixup 3 squash 4" &&
export FAKE_LINES &&
test_must_fail git rebase -i $base
) &&
echo three > conflict &&
git add conflict &&
FAKE_COMMIT_AMEND="TWICE" EXPECT_HEADER_COUNT=2 \
git rebase --continue &&
test $base = $(git rev-parse HEAD^) &&
test 2 = $(git show | grep TWICE | wc -l) &&
git checkout to-be-rebased &&
git branch -D conflict-squash
'
cat > expect-squash-fixup << EOF
B
D
ONCE
EOF
test_expect_success 'squash and fixup generate correct log messages' '
git checkout -b squash-fixup E &&
base=$(git rev-parse HEAD~4) &&
FAKE_COMMIT_AMEND="ONCE" FAKE_LINES="1 fixup 2 squash 3 fixup 4" \
EXPECT_HEADER_COUNT=4 \
git rebase -i $base &&
git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup &&
test_cmp expect-squash-fixup actual-squash-fixup &&
git checkout to-be-rebased &&
git branch -D squash-fixup
'
test_expect_success 'squash ignores comments' '
git checkout -b skip-comments E &&
base=$(git rev-parse HEAD~4) &&
FAKE_COMMIT_AMEND="ONCE" FAKE_LINES="# 1 # squash 2 # squash 3 # squash 4 #" \
EXPECT_HEADER_COUNT=4 \
git rebase -i $base &&
test $base = $(git rev-parse HEAD^) &&
test 1 = $(git show | grep ONCE | wc -l) &&
git checkout to-be-rebased &&
git branch -D skip-comments
'
test_expect_success 'squash ignores blank lines' '
git checkout -b skip-blank-lines E &&
base=$(git rev-parse HEAD~4) &&
FAKE_COMMIT_AMEND="ONCE" FAKE_LINES="> 1 > squash 2 > squash 3 > squash 4 >" \
EXPECT_HEADER_COUNT=4 \
git rebase -i $base &&
test $base = $(git rev-parse HEAD^) &&
test 1 = $(git show | grep ONCE | wc -l) &&
git checkout to-be-rebased &&
git branch -D skip-blank-lines
'
test_expect_success 'squash works as expected' '
git checkout -b squash-works no-conflict-branch &&
one=$(git rev-parse HEAD~3) &&
FAKE_LINES="1 squash 3 2" EXPECT_HEADER_COUNT=2 \
git rebase -i HEAD~3 &&
test $one = $(git rev-parse HEAD~2)
'
test_expect_success 'interrupted squash works as expected' '
git checkout -b interrupted-squash conflict-branch &&
one=$(git rev-parse HEAD~3) &&
(
FAKE_LINES="1 squash 3 2" &&
export FAKE_LINES &&
test_must_fail git rebase -i HEAD~3
) &&
(echo one; echo two; echo four) > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
git add conflict &&
git rebase --continue &&
test $one = $(git rev-parse HEAD~2)
'
test_expect_success 'interrupted squash works as expected (case 2)' '
git checkout -b interrupted-squash2 conflict-branch &&
one=$(git rev-parse HEAD~3) &&
(
FAKE_LINES="3 squash 1 2" &&
export FAKE_LINES &&
test_must_fail git rebase -i HEAD~3
) &&
(echo one; echo four) > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
(echo one; echo two; echo four) > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
git add conflict &&
git rebase --continue &&
test $one = $(git rev-parse HEAD~2)
'
test_expect_success 'ignore patch if in upstream' '
HEAD=$(git rev-parse HEAD) &&
git checkout -b has-cherry-picked HEAD^ &&
echo unrelated > file7 &&
git add file7 &&
test_tick &&
git commit -m "unrelated change" &&
git cherry-pick $HEAD &&
EXPECT_COUNT=1 git rebase -i $HEAD &&
test $HEAD = $(git rev-parse HEAD^)
'
test_expect_success '--continue tries to commit, even for "edit"' '
parent=$(git rev-parse HEAD^) &&
test_tick &&
FAKE_LINES="edit 1" git rebase -i HEAD^ &&
echo edited > file7 &&
git add file7 &&
FAKE_COMMIT_MESSAGE="chouette!" git rebase --continue &&
test edited = $(git show HEAD:file7) &&
git show HEAD | grep chouette &&
test $parent = $(git rev-parse HEAD^)
'
test_expect_success 'aborted --continue does not squash commits after "edit"' '
old=$(git rev-parse HEAD) &&
test_tick &&
FAKE_LINES="edit 1" git rebase -i HEAD^ &&
echo "edited again" > file7 &&
git add file7 &&
(
FAKE_COMMIT_MESSAGE=" " &&
export FAKE_COMMIT_MESSAGE &&
test_must_fail git rebase --continue
) &&
test $old = $(git rev-parse HEAD) &&
git rebase --abort
'
test_expect_success 'auto-amend only edited commits after "edit"' '
test_tick &&
FAKE_LINES="edit 1" git rebase -i HEAD^ &&
echo "edited again" > file7 &&
git add file7 &&
FAKE_COMMIT_MESSAGE="edited file7 again" git commit &&
echo "and again" > file7 &&
git add file7 &&
test_tick &&
(
FAKE_COMMIT_MESSAGE="and again" &&
export FAKE_COMMIT_MESSAGE &&
test_must_fail git rebase --continue
) &&
git rebase --abort
'
test_expect_success 'rebase a detached HEAD' '
grandparent=$(git rev-parse HEAD~2) &&
git checkout $(git rev-parse HEAD) &&
test_tick &&
FAKE_LINES="2 1" git rebase -i HEAD~2 &&
test $grandparent = $(git rev-parse HEAD~2)
'
test_expect_success 'rebase a commit violating pre-commit' '
mkdir -p .git/hooks &&
PRE_COMMIT=.git/hooks/pre-commit &&
echo "#!/bin/sh" > $PRE_COMMIT &&
echo "test -z \"\$(git diff --cached --check)\"" >> $PRE_COMMIT &&
chmod a+x $PRE_COMMIT &&
echo "monde! " >> file1 &&
test_tick &&
test_must_fail git commit -m doesnt-verify file1 &&
git commit -m doesnt-verify --no-verify file1 &&
test_tick &&
FAKE_LINES=2 git rebase -i HEAD~2
'
test_expect_success 'rebase with a file named HEAD in worktree' '
rm -fr .git/hooks &&
git reset --hard &&
git checkout -b branch3 A &&
(
GIT_AUTHOR_NAME="Squashed Away" &&
export GIT_AUTHOR_NAME &&
>HEAD &&
git add HEAD &&
git commit -m "Add head" &&
>BODY &&
git add BODY &&
git commit -m "Add body"
) &&
FAKE_LINES="1 squash 2" git rebase -i to-be-rebased &&
test "$(git show -s --pretty=format:%an)" = "Squashed Away"
'
test_expect_success 'do "noop" when there is nothing to cherry-pick' '
git checkout -b branch4 HEAD &&
GIT_EDITOR=: git commit --amend \
--author="Somebody else <[email protected]>" &&
test $(git rev-parse branch3) != $(git rev-parse branch4) &&
git rebase -i branch3 &&
test $(git rev-parse branch3) = $(git rev-parse branch4)
'
test_expect_success 'submodule rebase setup' '
git checkout A &&
mkdir sub &&
(
cd sub && git init && >elif &&
git add elif && git commit -m "submodule initial"
) &&
echo 1 >file1 &&
git add file1 sub &&
test_tick &&
git commit -m "One" &&
echo 2 >file1 &&
test_tick &&
git commit -a -m "Two" &&
(
cd sub && echo 3 >elif &&
git commit -a -m "submodule second"
) &&
test_tick &&
git commit -a -m "Three changes submodule"
'
test_expect_success 'submodule rebase -i' '
FAKE_LINES="1 squash 2 3" git rebase -i A
'
test_expect_success 'avoid unnecessary reset' '
git checkout master &&
test-chmtime =123456789 file3 &&
git update-index --refresh &&
HEAD=$(git rev-parse HEAD) &&
git rebase -i HEAD~4 &&
test $HEAD = $(git rev-parse HEAD) &&
MTIME=$(test-chmtime -v +0 file3 | sed 's/[^0-9].*$//') &&
test 123456789 = $MTIME
'
test_expect_success 'reword' '
git checkout -b reword-branch master &&
FAKE_LINES="1 2 3 reword 4" FAKE_COMMIT_MESSAGE="E changed" git rebase -i A &&
git show HEAD | grep "E changed" &&
test $(git rev-parse master) != $(git rev-parse HEAD) &&
test $(git rev-parse master^) = $(git rev-parse HEAD^) &&
FAKE_LINES="1 2 reword 3 4" FAKE_COMMIT_MESSAGE="D changed" git rebase -i A &&
git show HEAD^ | grep "D changed" &&
FAKE_LINES="reword 1 2 3 4" FAKE_COMMIT_MESSAGE="B changed" git rebase -i A &&
git show HEAD~3 | grep "B changed" &&
FAKE_LINES="1 reword 2 3 4" FAKE_COMMIT_MESSAGE="C changed" git rebase -i A &&
git show HEAD~2 | grep "C changed"
'
test_expect_success 'rebase -i can copy notes' '
git config notes.rewrite.rebase true &&
git config notes.rewriteRef "refs/notes/*" &&
test_commit n1 &&
test_commit n2 &&
test_commit n3 &&
git notes add -m"a note" n3 &&
git rebase --onto n1 n2 &&
test "a note" = "$(git notes show HEAD)"
'
cat >expect <<EOF
an earlier note
a note
EOF
test_expect_success 'rebase -i can copy notes over a fixup' '
git reset --hard n3 &&
git notes add -m"an earlier note" n2 &&
GIT_NOTES_REWRITE_MODE=concatenate FAKE_LINES="1 fixup 2" git rebase -i n1 &&
git notes show > output &&
test_cmp expect output
'
test_expect_success 'rebase while detaching HEAD' '
git symbolic-ref HEAD &&
grandparent=$(git rev-parse HEAD~2) &&
test_tick &&
FAKE_LINES="2 1" git rebase -i HEAD~2 HEAD^0 &&
test $grandparent = $(git rev-parse HEAD~2) &&
test_must_fail git symbolic-ref HEAD
'
test_tick # Ensure that the rebased commits get a different timestamp.
test_expect_success 'always cherry-pick with --no-ff' '
git checkout no-ff-branch &&
git tag original-no-ff-branch &&
git rebase -i --no-ff A &&
touch empty &&
for p in 0 1 2
do
test ! $(git rev-parse HEAD~$p) = $(git rev-parse original-no-ff-branch~$p) &&
git diff HEAD~$p original-no-ff-branch~$p > out &&
test_cmp empty out
done &&
test $(git rev-parse HEAD~3) = $(git rev-parse original-no-ff-branch~3) &&
git diff HEAD~3 original-no-ff-branch~3 > out &&
test_cmp empty out
'
test_expect_success 'set up commits with funny messages' '
git checkout -b funny A &&
echo >>file1 &&
test_tick &&
git commit -a -m "end with slash\\" &&
echo >>file1 &&
test_tick &&
git commit -a -m "something (\000) that looks like octal" &&
echo >>file1 &&
test_tick &&
git commit -a -m "something (\n) that looks like a newline" &&
echo >>file1 &&
test_tick &&
git commit -a -m "another commit"
'
test_expect_success 'rebase-i history with funny messages' '
git rev-list A..funny >expect &&
test_tick &&
FAKE_LINES="1 2 3 4" git rebase -i A &&
git rev-list A.. >actual &&
test_cmp expect actual
'
test_done
|
moy/git
|
t/t3404-rebase-interactive.sh
|
Shell
|
gpl-2.0
| 20,255 |
#!/bin/bash
# called by dracut
install() {
local _terminfodir
# terminfo bits make things work better if you fall into interactive mode
for _terminfodir in /lib/terminfo /etc/terminfo /usr/share/terminfo; do
[ -f ${_terminfodir}/l/linux ] && break
done
if [ -d ${_terminfodir} ]; then
for i in "l/linux" "v/vt100" "v/vt102" "v/vt220"; do
inst_dir "$_terminfodir/${i%/*}"
$DRACUT_CP -L -t "${initdir}/${_terminfodir}/${i%/*}" "$_terminfodir/$i"
done
fi
}
|
Calrama/dracut
|
modules.d/95terminfo/module-setup.sh
|
Shell
|
gpl-2.0
| 528 |
#!/bin/bash
# For each argument passed to this script
for var in "$@"
do
echo "Whitelisting $var..."
# Use sed to search for the domain in /etc/pihole/gravity.list and remove it using an in-place edit
sed -i "/$var/d" /etc/pihole/gravity.list
# Also add the domain to the whitelist.txt in /etc/pihole
echo "$var" >> /etc/pihole/whitelist.txt
done
echo "** $# domain(s) whitelisted."
# Force dnsmasq to reload /etc/pihole/gravity.list
kill -HUP $(pidof dnsmasq)
|
chrisdeely/pi-hole
|
advanced/Scripts/whitelist.sh
|
Shell
|
gpl-2.0
| 500 |
#!/bin/bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
## @file inifuncs.sh
## @brief RetroPie inifuncs library
## @copyright GPLv3
# @fn fatalError()
# @param message string or array of messages to display
# @brief echos message, and exits immediately.
function fatalError() {
echo -e "$1"
exit 1
}
# arg 1: delimiter, arg 2: quote, arg 3: file
## @fn iniConfig()
## @param delim ini file delimiter eg. ' = '
## @param quote ini file quoting character eg. '"'
## @param config ini file to edit
## @brief Configure an ini file for getting/setting values with `iniGet` and `iniSet`
function iniConfig() {
__ini_cfg_delim="$1"
__ini_cfg_quote="$2"
__ini_cfg_file="$3"
}
# arg 1: command, arg 2: key, arg 2: value, arg 3: file (optional - uses file from iniConfig if not used)
# @fn iniProcess()
# @param command `set`, `unset` or `del`
# @param key ini key to operate on
# @param value to set
# @param file optional file to use another file than the one configured with iniConfig
# @brief The main function for setting and deleting from ini files - usually
# not called directly but via iniSet iniUnset and iniDel
function iniProcess() {
local cmd="$1"
local key="$2"
local value="$3"
local file="$4"
[[ -z "$file" ]] && file="$__ini_cfg_file"
local delim="$__ini_cfg_delim"
local quote="$__ini_cfg_quote"
[[ -z "$file" ]] && fatalError "No file provided for ini/config change"
[[ -z "$key" ]] && fatalError "No key provided for ini/config change on $file"
# we strip the delimiter of spaces, so we can "fussy" match existing entries that have the wrong spacing
local delim_strip=${delim// /}
# if the stripped delimiter is empty - such as in the case of a space, just use the delimiter instead
[[ -z "$delim_strip" ]] && delim_strip="$delim"
local match_re="^[[:space:]#]*$key[[:space:]]*$delim_strip.*$"
local match
if [[ -f "$file" ]]; then
match=$(egrep -i "$match_re" "$file" | tail -1)
else
touch "$file"
fi
if [[ "$cmd" == "del" ]]; then
[[ -n "$match" ]] && sed -i -e "\|$(sedQuote "$match")|d" "$file"
return 0
fi
[[ "$cmd" == "unset" ]] && key="# $key"
local replace="$key$delim$quote$value$quote"
if [[ -z "$match" ]]; then
# make sure there is a newline then add the key-value pair
sed -i '$a\' "$file"
echo "$replace" >> "$file"
else
# replace existing key-value pair
sed -i -e "s|$(sedQuote "$match")|$(sedQuote "$replace")|g" "$file"
fi
[[ "$file" =~ retroarch\.cfg$ ]] && retroarchIncludeToEnd "$file"
}
## @fn iniUnset()
## @param key ini key to operate on
## @param value to Unset (key will be commented out, but the value can be changed also)
## @param file optional file to use another file than the one configured with iniConfig
## @brief Unset (comment out) a key / value pair in an ini file.
## @details The key does not have to exist - if it doesn't exist a new line will
## be added - eg. `# key = "value"`
##
## This function is useful for creating example configuration entries for users
## to manually enable later or if a configuration is to be disabled but left
## as an example.
function iniUnset() {
iniProcess "unset" "$1" "$2" "$3"
}
## @fn iniSet()
## @param key ini key to operate on
## @param value to set
## @param file optional file to use another file than the one configured with iniConfig
## @brief Set a key / value pair in an ini file.
## @details If the key already exists the existing line will be changed. If not
## a new line will be created.
function iniSet() {
iniProcess "set" "$1" "$2" "$3"
}
## @fn iniDel()
## @param key ini key to operate on
## @param file optional file to use another file than the one configured with iniConfig
## @brief Delete a key / value pair in an ini file.
function iniDel() {
iniProcess "del" "$1" "" "$2"
}
## @fn iniGet()
## @param key ini key to get the value of
## @param file optional file to use another file than the one configured with iniConfig
## @brief Get the value of a key from an ini file.
## @details The value of the key will end up in the global ini_value variable.
function iniGet() {
local key="$1"
local file="$2"
[[ -z "$file" ]] && file="$__ini_cfg_file"
if [[ ! -f "$file" ]]; then
ini_value=""
return 1
fi
local delim="$__ini_cfg_delim"
local quote="$__ini_cfg_quote"
# we strip the delimiter of spaces, so we can "fussy" match existing entries that have the wrong spacing
local delim_strip=${delim// /}
# if the stripped delimiter is empty - such as in the case of a space, just use the delimiter instead
[[ -z "$delim_strip" ]] && delim_strip="$delim"
# create a regexp to match the value based on whether we are looking for quotes or not
local value_m
if [[ -n "$quote" ]]; then
value_m="$quote*\([^$quote|\r]*\)$quote*"
else
value_m="\([^\r]*\)"
fi
ini_value="$(sed -n "s/^[ |\t]*$key[ |\t]*$delim_strip[ |\t]*$value_m.*/\1/p" "$file" | tail -1)"
}
# @fn retroarchIncludeToEnd()
# @param file config file to process
# @brief Makes sure a `retroarch.cfg` file has the `#include` line at the end.
# @details Used in runcommand.sh and iniProcess to ensure the #include for the
# main retroarch.cfg is always at the end of a system `retroarch.cfg`. This
# is because when processing its config RetroArch will take the first value it
# finds, so any overrides need to be above the `#include` line where the global
# retroarch.cfg is included.
function retroarchIncludeToEnd() {
local config="$1"
[[ ! -f "$config" ]] && return
local re="^#include.*retroarch\.cfg"
# extract the include line (unless it is the last line in the file)
# (remove blank lines, the last line and search for an include line in remaining lines)
local include=$(sed '/^$/d;$d' "$config" | grep "$re")
# if matched remove it and re-add it at the end
if [[ -n "$include" ]]; then
sed -i "/$re/d" "$config"
# add newline if missing and the #include line
sed -i '$a\' "$config"
echo "$include" >>"$config"
fi
}
# arg 1: key, arg 2: default value (optional - is 1 if not used)
function addAutoConf() {
local key="$1"
local default="$2"
local file="$configdir/all/autoconf.cfg"
if [[ -z "$default" ]]; then
default="1"
fi
iniConfig " = " '"' "$file"
iniGet "$key"
ini_value="${ini_value// /}"
if [[ -z "$ini_value" ]]; then
iniSet "$key" "$default"
fi
}
# arg 1: key, arg 2: value
function setAutoConf() {
local key="$1"
local value="$2"
local file="$configdir/all/autoconf.cfg"
iniConfig " = " '"' "$file"
iniSet "$key" "$value"
}
# arg 1: key
function getAutoConf(){
local key="$1"
iniConfig " = " '"' "$configdir/all/autoconf.cfg"
iniGet "$key"
[[ "$ini_value" == "1" ]] && return 0
return 1
}
# escape backslashes and pipes for sed
function sedQuote() {
local string="$1"
string="${string//\\/\\\\}"
string="${string//|/\\|}"
echo "$string"
}
|
j-r0dd/RetroPie-Setup
|
scriptmodules/inifuncs.sh
|
Shell
|
gpl-3.0
| 7,483 |
#!/bin/bash
set -eu
oc config view --flatten -o template --template '{{with index .users 0}}{{.user.token}}{{end}}'
|
detiber/demo-ansible
|
playbooks/files/get_token.sh
|
Shell
|
apache-2.0
| 117 |
#!/bin/sh -x
##############################
# ZSH installation
##############################
cd /tmp
wget https://github.com/zsh-users/zsh/archive/zsh-5.2.tar.gz
tar zxf zsh-5.2.tar.gz
cd zsh-5.2
./configure
sudo make
sudo make install
#Remove the tarball and tarball extract so they don't take up space in the final, packaged VM
rm -rf /tmp/zsh-5.2/ /tmp/zsh-5.2.tar.gz
##############################
# Vagrant ZSH setup stuff
##############################
#Add this variable so keyboard stuff isn't wonky
echo "DEBIAN_PREVENT_KEYBOARD_CHANGES=yes" > /home/vagrant/.zshenv
#install oh-my-zsh first
cd /home/vagrant
#su - vagrant -c 'curl -L https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh | sh'
su - vagrant -c 'git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh; cd ~/.oh-my-zsh; git checkout ca648ae7b1ca7cf2abbc63afde3c216994a71966'
#Make a folder for plugins like...
mkdir -p /home/vagrant/.oh-my-zsh/custom/plugins
cd /home/vagrant/.oh-my-zsh/custom/plugins
#smarter history search
/usr/local/bin/git clone git://github.com/zsh-users/zsh-history-substring-search.git
#syntax highlighting
/usr/local/bin/git clone git://github.com/zsh-users/zsh-syntax-highlighting.git
#cat the uploaded vagrant .zshrc into the /home/vagrant/.zshrc file
cat /tmp/vagrant-zshrc > /home/vagrant/.zshrc
chown -R vagrant:vagrant /home/vagrant/
#cat in the uploaded ZSH theme
cat /tmp/vagrant-zsh-theme.zsh-theme > /home/vagrant/.oh-my-zsh/themes/vagrant-zsh-theme.zsh-theme
#Install rbenv and some plugins
/usr/local/bin/git clone git://github.com/sstephenson/rbenv.git /home/vagrant/.rbenv
mkdir -p /home/vagrant/.rbenv/plugins
cd /home/vagrant/.rbenv/plugins;
/usr/local/bin/git clone git://github.com/sstephenson/ruby-build.git
#Set ownership of the homedir incase any commands above set parts of it to root
chown -R vagrant:vagrant /home/vagrant/.rbenv/
##############################
# root ZSH setup stuff
##############################
#Add this variable so keyboard stuff isn't wonky
echo "DEBIAN_PREVENT_KEYBOARD_CHANGES=yes" > /root/.zshenv
#install oh-my-zsh first
cd /root
#su - root -c 'curl -L https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh | sh'
su - root -c 'git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh; cd ~/.oh-my-zsh; git checkout ca648ae7b1ca7cf2abbc63afde3c216994a71966'
#Make a folder for plugins like...
mkdir -p /root/.oh-my-zsh/custom/plugins
cd /root/.oh-my-zsh/custom/plugins
#smarter history search
/usr/local/bin/git clone git://github.com/zsh-users/zsh-history-substring-search.git
#syntax highlighting
/usr/local/bin/git clone git://github.com/zsh-users/zsh-syntax-highlighting.git
#cat the uploaded vagrant .zshrc into the /home/vagrant/.zshrc file
cat /tmp/root-zshrc > /root/.zshrc
#cat in the uploaded ZSH theme
cat /tmp/root-zsh-theme.zsh-theme > /root/.oh-my-zsh/themes/root-zsh-theme.zsh-theme
#Install rbenv and some plugins
/usr/local/bin/git clone git://github.com/sstephenson/rbenv.git /root/.rbenv
mkdir -p /root/.rbenv/plugins
cd /root/.rbenv/plugins
/usr/local/bin/git clone git://github.com/sstephenson/ruby-build.git
#Change the login shell to zsh for the vagrant and root users
echo "/usr/local/bin/zsh" >> /etc/shells
chsh -s /usr/local/bin/zsh vagrant
chsh -s /usr/local/bin/zsh root
|
nickchappell/packer-templates
|
works_in_progress/rhel-7-amd64/scripts/zsh.sh
|
Shell
|
apache-2.0
| 3,323 |
#!/bin/bash
RES=0
. /openrc
if ! keystone token-get > /dev/null; then
echo "ERROR: keystone token-get failed" >&2
RES=1
else
if ! glance image-list > /dev/null; then
echo "ERROR: glance image-list failed" >&2
RES=1
fi
fi
exit $RES
|
rthallisey/atomic-osp-installer
|
docker/glance/glance-api/check.sh
|
Shell
|
apache-2.0
| 267 |
#!/bin/sh -e
#
# Copyright (C) 2010, 2012 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# Id: setup.sh,v 1.2 2010/06/21 02:31:45 marka Exp
SYSTEMTESTTOP=..
. $SYSTEMTESTTOP/conf.sh
. ./clean.sh
../../../tools/genrandom 800 random.data
dd if=random.data of=random.data1 bs=1k count=400 2> /dev/null
dd if=random.data of=random.data2 bs=1k skip=400 2> /dev/null
cd ns1 && sh sign.sh
|
execunix/vinos
|
external/bsd/bind/dist/bin/tests/virtual-time/autosign-zsk/setup.sh
|
Shell
|
apache-2.0
| 1,091 |
#!/bin/sh
#
# Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 6265810 6705893
# @build CheckEngine
# @run shell jrunscript-argsTest.sh
# @summary Test passing of script arguments from command line
. ${TESTSRC-.}/common.sh
setup
${JAVA} -cp ${TESTCLASSES} CheckEngine
if [ $? -eq 2 ]; then
echo "No js engine found and engine not required; test vacuously passes."
exit 0
fi
# we check whether "excess" args are passed as script arguments
${JRUNSCRIPT} -f - hello world <<EOF
if (typeof(arguments) == 'undefined') { println("arguments expected"); exit(1); }
if (arguments.length != 2) { println("2 arguments are expected here"); exit(1); }
if (arguments[0] != 'hello') { println("First arg should be 'hello'"); exit(1); }
if (arguments[1] != 'world') { println("Second arg should be 'world'"); exit(1); }
println("Passed");
exit(0);
EOF
if [ $? -ne 0 ]; then
exit 1
fi
|
andreagenso/java2scala
|
test/J2s/java/openjdk-6-src-b27/jdk/test/sun/tools/jrunscript/jrunscript-argsTest.sh
|
Shell
|
apache-2.0
| 1,891 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-22577
#Group Title: GEN008440
#Rule ID: SV-26963r1_rule
#Severity: CAT III
#Rule Version (STIG-ID): GEN008440
#Rule Title: Automated file system mounting tools must not be enabled unless needed.
#
#Vulnerability Discussion: Automated file system mounting tools may provide unprivileged users with the ability to access local media and network shares. If this access is not necessary for the system’s operation, it must be disabled to reduce the risk of unauthorized access to these resources.
#
#Responsibility: System Administrator
#IAControls: ECSC-1
#
#Check Content:
#If the autofs service is needed, this vulnerability is not applicable.
#Check if the autofs service is running.
# service autofs status
#If the service is running, this is a finding.
#
#Fix Text: Stop and disable the autofs service.
# service autofs stop
# chkconfig autofs off
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN008440
AUTOSERVICE=$( service autofs status | grep "running..." | wc -l )
#Start-Lockdown
if [ $AUTOSERVICE -ne 0 ]
then
service autofs stop
chkconfig --level 2345 autofs off
fi
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN008440.sh
|
Shell
|
apache-2.0
| 2,756 |
# Prints the current weather in Celsius, Fahrenheits or lord Kelvins. The forecast is cached and updated with a period of $update_period.
# The update period in seconds.
update_period=600
TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT="yahoo"
TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT="c"
TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT="600"
if shell_is_bsd; then
TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT="/usr/local/bin/grep"
else
TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT="grep"
fi
generate_segmentrc() {
read -d '' rccontents << EORC
# The data provider to use. Currently only "yahoo" is supported.
export TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER="${TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT}"
# What unit to use. Can be any of {c,f,k}.
export TMUX_POWERLINE_SEG_WEATHER_UNIT="${TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT}"
# How often to update the weather in seconds.
export TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD="${TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT}"
# Name of GNU grep binary if in PATH, or path to it.
export TMUX_POWERLINE_SEG_WEATHER_GREP="${TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT}"
# Your location. Find a code that works for you:
# 1. Go to Yahoo weather http://weather.yahoo.com/
# 2. Find the weather for you location
# 3. Copy the last numbers in that URL. e.g. "http://weather.yahoo.com/united-states/california/newport-beach-12796587/" has the numbers "12796587"
export TMUX_POWERLINE_SEG_WEATHER_LOCATION=""
EORC
echo "$rccontents"
}
run_segment() {
__process_settings
local tmp_file="${TMUX_POWERLINE_DIR_TEMPORARY}/weather_yahoo.txt"
local weather
case "$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER" in
"yahoo") weather=$(__yahoo_weather) ;;
*)
echo "Unknown weather provider [${$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER}]";
return 1
esac
if [ -n "$weather" ]; then
echo "$weather"
fi
}
__process_settings() {
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER" ]; then
export TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER="${TMUX_POWERLINE_SEG_WEATHER_DATA_PROVIDER_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_UNIT" ]; then
export TMUX_POWERLINE_SEG_WEATHER_UNIT="${TMUX_POWERLINE_SEG_WEATHER_UNIT_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD" ]; then
export TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD="${TMUX_POWERLINE_SEG_WEATHER_UPDATE_PERIOD_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_GREP" ]; then
export TMUX_POWERLINE_SEG_WEATHER_GREP="${TMUX_POWERLINE_SEG_WEATHER_GREP_DEFAULT}"
fi
if [ -z "$TMUX_POWERLINE_SEG_WEATHER_LOCATION" ]; then
echo "No weather location specified.";
exit 8
fi
}
__yahoo_weather() {
degree=""
if [ -f "$tmp_file" ]; then
if shell_is_osx || shell_is_bsd; then
last_update=$(stat -f "%m" ${tmp_file})
elif shell_is_linux; then
last_update=$(stat -c "%Y" ${tmp_file})
fi
time_now=$(date +%s)
up_to_date=$(echo "(${time_now}-${last_update}) < ${update_period}" | bc)
if [ "$up_to_date" -eq 1 ]; then
__read_tmp_file
fi
fi
if [ -z "$degree" ]; then
weather_data=$(curl --max-time 4 -s "https://query.yahooapis.com/v1/public/yql?format=xml&q=SELECT%20*%20FROM%20weather.forecast%20WHERE%20u=%27${TMUX_POWERLINE_SEG_WEATHER_UNIT}%27%20AND%20woeid%20=%20%27${TMUX_POWERLINE_SEG_WEATHER_LOCATION}%27")
if [ "$?" -eq "0" ]; then
error=$(echo "$weather_data" | grep "problem_cause\|DOCTYPE");
if [ -n "$error" ]; then
echo "error"
exit 1
fi
# Assume latest grep is in PATH
gnugrep="${TMUX_POWERLINE_SEG_WEATHER_GREP}"
# <yweather:units temperature="F" distance="mi" pressure="in" speed="mph"/>
unit=$(echo "$weather_data" | "$gnugrep" -Zo "<yweather:units [^<>]*/>" | sed 's/.*temperature="\([^"]*\)".*/\1/')
condition=$(echo "$weather_data" | "$gnugrep" -Zo "<yweather:condition [^<>]*/>")
# <yweather:condition text="Clear" code="31" temp="66" date="Mon, 01 Oct 2012 8:00 pm CST" />
degree=$(echo "$condition" | sed 's/.*temp="\([^"]*\)".*/\1/')
condition=$(echo "$condition" | sed 's/.*text="\([^"]*\)".*/\1/')
# Pull the times for sunrise and sunset so we know when to change the day/night indicator
# <yweather:astronomy sunrise="6:56 am" sunset="6:21 pm"/>
if shell_is_osx || shell_is_bsd; then
date_arg='-j -f "%H:%M %p "'
else
date_arg='-d'
fi
sunrise=$(date ${date_arg}"$(echo "$weather_data" | "$gnugrep" "yweather:astronomy" | sed 's/^\(.*\)sunset.*/\1/' | sed 's/^.*sunrise="\(.*m\)".*/\1/')" +%H%M)
sunset=$(date ${date_arg}"$(echo "$weather_data" | "$gnugrep" "yweather:astronomy" | sed 's/^.*sunset="\(.*m\)".*/\1/')" +%H%M)
elif [ -f "${tmp_file}" ]; then
__read_tmp_file
fi
fi
if [ -n "$degree" ]; then
if [ "$TMUX_POWERLINE_SEG_WEATHER_UNIT" == "k" ]; then
degree=$(echo "${degree} + 273.15" | bc)
fi
condition_symbol=$(__get_condition_symbol "$condition" "$sunrise" "$sunset")
echo "${condition_symbol} ${degree}°$(echo "$TMUX_POWERLINE_SEG_WEATHER_UNIT" | tr '[:lower:]' '[:upper:]')" | tee "${tmp_file}"
fi
}
# Get symbol for condition. Available conditions: http://developer.yahoo.com/weather/#codes
__get_condition_symbol() {
local condition=$(echo "$1" | tr '[:upper:]' '[:lower:]')
local sunrise="$2"
local sunset="$3"
case "$condition" in
"sunny" | "hot")
hourmin=$(date +%H%M)
if [ "$hourmin" -ge "$sunset" -o "$hourmin" -le "$sunrise" ]; then
#echo "☽"
echo "☾"
else
#echo "☀"
echo "☼"
fi
;;
"rain" | "mixed rain and snow" | "mixed rain and sleet" | "freezing drizzle" | "drizzle" | "light drizzle" | "freezing rain" | "showers" | "mixed rain and hail" | "scattered showers" | "isolated thundershowers" | "thundershowers" | "light rain with thunder" | "light rain" | "rain and snow")
#echo "☂"
echo "☔"
;;
"snow" | "mixed snow and sleet" | "snow flurries" | "light snow showers" | "blowing snow" | "sleet" | "hail" | "heavy snow" | "scattered snow showers" | "snow showers" | "light snow" | "snow/windy" | "snow grains" | "snow/fog")
#echo "☃"
echo "❅"
;;
"cloudy" | "mostly cloudy" | "partly cloudy" | "partly cloudy/windy")
echo "☁"
;;
"tornado" | "tropical storm" | "hurricane" | "severe thunderstorms" | "thunderstorms" | "isolated thunderstorms" | "scattered thunderstorms")
#echo "⚡"
echo "☈"
;;
"dust" | "foggy" | "fog" | "haze" | "smoky" | "blustery" | "mist")
#echo "♨"
#echo "﹌"
echo "〰"
;;
"breezy")
#echo "🌬"
echo "🍃"
;;
"windy" | "fair/windy")
#echo "⚐"
echo "⚑"
;;
"clear" | "fair" | "cold")
hourmin=$(date +%H%M)
if [ "$hourmin" -ge "$sunset" -o "$hourmin" -le "$sunrise" ]; then
echo "☾"
else
echo "〇"
fi
;;
*)
echo "?"
;;
esac
}
__read_tmp_file() {
if [ ! -f "$tmp_file" ]; then
return
fi
cat "${tmp_file}"
exit
}
|
jessonfoo/dotfiles
|
tmux/tmux-powerline/segments/weather.sh
|
Shell
|
bsd-2-clause
| 6,833 |
#!/bin/sh
# create global.h
echo "#ifndef PREFIX
#define PREFIX QString(\"${1}\")
#endif" > global.h
|
grahamperrin/lumina
|
libLumina/make-global-h.sh
|
Shell
|
bsd-3-clause
| 105 |
if (( $+commands[kubectl] )); then
__KUBECTL_COMPLETION_FILE="${ZSH_CACHE_DIR}/kubectl_completion"
if [[ ! -f $__KUBECTL_COMPLETION_FILE ]]; then
kubectl completion zsh >! $__KUBECTL_COMPLETION_FILE
fi
[[ -f $__KUBECTL_COMPLETION_FILE ]] && source $__KUBECTL_COMPLETION_FILE
unset __KUBECTL_COMPLETION_FILE
fi
# This command is used a LOT both below and in daily life
alias k=kubectl
# Apply a YML file
alias kaf='k apply -f'
# Drop into an interactive terminal on a container
alias keti='k exec -ti'
# Manage configuration quickly to switch contexts between local, dev ad staging.
alias kcuc='k config use-context'
alias kcsc='k config set-context'
alias kcdc='k config delete-context'
alias kccc='k config current-context'
# Pod management.
alias kgp='k get pods'
alias kep='k edit pods'
alias kdp='k describe pods'
alias kdelp='k delete pods'
# Service management.
alias kgs='k get svc'
alias kes='k edit svc'
alias kds='k describe svc'
alias kdels='k delete svc'
# Ingress management
alias kgi='k get ingress'
alias kei='k edit ingress'
alias kdi='k describe ingress'
alias kdeli='k delete ingress'
# Secret management
alias kgsec='k get secret'
alias kdsec='k describe secret'
alias kdelsec='k delete secret'
# Deployment management.
alias kgd='k get deployment'
alias ked='k edit deployment'
alias kdd='k describe deployment'
alias kdeld='k delete deployment'
alias ksd='k scale deployment'
alias krsd='k rollout status deployment'
# Rollout management.
alias kgrs='k get rs'
alias krh='k rollout history'
alias kru='k rollout undo'
# Logs
alias kl='k logs'
alias klf='k logs -f'
|
kulesa/dotfiles
|
zsh/.oh-my-zsh/plugins/kubectl/kubectl.plugin.zsh
|
Shell
|
mit
| 1,628 |
#!/bin/bash
# Deploy to staging/production on master/release merges (not PRs)
set -e
# Don't deploy on PRs
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
exit 0
fi
if [ "$TRAVIS_BRANCH" == "master" ]; then
# Deploy to staging on a merge to master
ember deploy staging --verbose --activate
elif [ -n "$TRAVIS_TAG" ]; then
# Deploy to production on a tag
ember deploy production --verbose --activate
fi
|
pangratz/ember-twiddle
|
scripts/travis-deploy.sh
|
Shell
|
mit
| 415 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that the user can force automake to use *_YFLAGS variables
# which have conditional content.
. test-init.sh
cat >> configure.ac <<'END'
AC_SUBST([CC], [false])
AC_PROG_YACC
AM_CONDITIONAL([COND], [test x"$cond" = x"yes"])
AC_OUTPUT
END
mkdir bin
cat > bin/fake-yacc <<'END'
#!/bin/sh
echo "/* $* */" > y.tab.c
echo 'extern int dummy;' >> y.tab.c
END
chmod a+x bin/fake-yacc
PATH=$(pwd)/bin$PATH_SEPARATOR$PATH; export PATH
YACC=fake-yacc; export YACC
cat > Makefile.am <<'END'
AUTOMAKE_OPTIONS = no-dependencies
bin_PROGRAMS = foo bar
foo_SOURCES = foo.y main.c
bar_SOURCES = $(foo_SOURCES)
bar_YFLAGS = $(bar_yflags2)
if COND
AM_YFLAGS = __am_cond_yes__
bar_YFLAGS += __bar_cond_yes__
else !COND
AM_YFLAGS = __am_cond_no__
bar_yflags2 = __bar_cond_no__
endif !COND
END
: > foo.y
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a -Wno-unsupported
$EGREP '(YFLAGS|yflags|am__append)' Makefile.in # For debugging.
./configure cond=yes
$MAKE foo.c bar-foo.c
cat foo.c
cat bar-foo.c
$FGREP ' __am_cond_yes__ ' foo.c
$FGREP ' __bar_cond_yes__ ' bar-foo.c
$FGREP 'cond_no' foo.c bar-foo.c && exit 1
$MAKE maintainer-clean
ls -l
./configure cond=no
$MAKE foo.c bar-foo.c
cat foo.c
cat bar-foo.c
$FGREP ' __am_cond_no__ ' foo.c
$FGREP ' __bar_cond_no__ ' bar-foo.c
$FGREP 'cond_yes' foo.c bar-foo.c && exit 1
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/yflags-force-conditional.sh
|
Shell
|
gpl-2.0
| 1,998 |
#!/bin/bash
#PBS -l nodes=1:ppn=20
#PBS -l walltime=168:00:00
#PBS -N session1_default
#PBS -A course
#PBS -q GpuQ
export THEANO_FLAGS=device=gpu,floatX=float32
cd $PBS_O_WORKDIR
python ./train_nmt_all.py
|
kyunghyuncho/dl4mt-material
|
session1/train_all.sh
|
Shell
|
bsd-3-clause
| 210 |
#!/bin/bash
# Hello, World!
cd ~
git clone https://github.com/gae-init/gae-init.git hello-world
cd hello-world
yarn
gulp
|
gae-init/gae-init-docs
|
bin/test_hello_world.sh
|
Shell
|
mit
| 122 |
#!/bin/bash
# Change this if your clang-format executable is somewhere else
#CLANG_FORMAT="$HOME/Library/Application Support/Alcatraz/Plug-ins/ClangFormat/bin/clang-format"
CLANG_FORMAT=./clang-format
find . \( -name '*.h' -or -name '*.m' -or -name '*.mm' \) -print0 | xargs -0 "$CLANG_FORMAT" -i
|
longbai/iOS-netdiag
|
format.sh
|
Shell
|
mit
| 297 |
#!/usr/bin/env bash
set -ex -o pipefail
# These ones can be `npm link`ed for fast development
LINKABLE_PKGS=(
$(pwd)/dist/packages-dist/{common,forms,core,compiler,compiler-cli,platform-{browser,server},platform-browser-dynamic}
$(pwd)/dist/tools/@angular/tsc-wrapped
)
PKGS=(
[email protected]
[email protected]
[email protected]
[email protected]
@types/{[email protected],[email protected]}
[email protected]
[email protected]
@angular2-material/{core,button}@2.0.0-alpha.8-1
)
TMPDIR=${TMPDIR:-.}
readonly TMP=$TMPDIR/e2e_test.$(date +%s)
mkdir -p $TMP
cp -R -v modules/@angular/compiler-cli/integrationtest/* $TMP
cp -R -v modules/benchmarks $TMP
# Try to use the same versions as angular, in particular, this will
# cause us to install the same rxjs version.
cp -v package.json $TMP
# run in subshell to avoid polluting cwd
(
cd $TMP
set -ex -o pipefail
npm install ${PKGS[*]}
# TODO(alexeagle): allow this to be npm link instead
npm install ${LINKABLE_PKGS[*]}
./node_modules/.bin/tsc --version
# Compile the compiler-cli third_party simulation.
# Use ngc-wrapped directly so we don't produce *.ngfactory.ts files!
# Compile the compiler-cli integration tests
# TODO(vicb): restore the test for .xtb
#./node_modules/.bin/ngc -p tsconfig-build.json --i18nFile=src/messages.fi.xtb --locale=fi --i18nFormat=xtb
# Generate the metadata for the third-party modules
node ./node_modules/@angular/tsc-wrapped/src/main -p third_party_src/tsconfig-build.json
./node_modules/.bin/ngc -p tsconfig-build.json --i18nFile=src/messages.fi.xlf --locale=fi --i18nFormat=xlf
./node_modules/.bin/ng-xi18n -p tsconfig-build.json --i18nFormat=xlf
./node_modules/.bin/ng-xi18n -p tsconfig-build.json --i18nFormat=xmb
./node_modules/.bin/jasmine init
# Run compiler-cli integration tests in node
./node_modules/.bin/webpack ./webpack.config.js
./node_modules/.bin/jasmine ./all_spec.js
# Compile again with a differently named tsconfig file
mv tsconfig-build.json othername.json
./node_modules/.bin/ngc -p othername.json
)
|
domusofsail/angular
|
scripts/ci-lite/offline_compiler_test.sh
|
Shell
|
mit
| 2,075 |
#!/bin/bash
PATH=/home/deploy/realms-wiki/.venv/bin:/usr/local/bin:/usr/bin:/bin:$PATH
export PATH
LC_ALL=en_US.UTF-8
GEVENT_RESOLVER=ares
export LC_ALL
export GEVENT_RESOLVER
if [ "${REALMS_WIKI_CONFIG}" != "" ]; then
realms-wiki configure ${REALMS_WIKI_CONFIG}
fi
if [ "${REALMS_WIKI_WORKERS}" == "" ]; then
REALMS_WIKI_WORKERS=3
fi
if [ "${REALMS_WIKI_PORT}" == "" ]; then
REALMS_WIKI_PORT=5000
fi
exec gunicorn \
--name realms-wiki \
--access-logfile - \
--error-logfile - \
--worker-class gevent \
--workers ${REALMS_WIKI_WORKERS} \
--bind 0.0.0.0:${REALMS_WIKI_PORT} \
--user deploy \
--group deploy \
--chdir /home/deploy/realms-wiki \
'realms:create_app()' >>/var/log/realms-wiki/realms-wiki.log 2>&1
|
loleg/realms-wiki
|
docker/realms-wiki.sh
|
Shell
|
gpl-2.0
| 748 |
#!/bin/sh
# Make sure chmod gives the right diagnostic for a readable,
# but inaccessible directory.
# Copyright (C) 2003-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ chmod
skip_if_root_
mkdir -p d/no-x/y a/b || framework_failure_
chmod u=rw d/no-x || framework_failure_
# This must exit nonzero.
chmod -R o=r d >/dev/null 2>out && fail=1
prog=chmod
# NOTE: this code is the same for all tests/*/no-x tests.
# Depending on whether fts is using native fdopendir, we see one
# of the following diagnostics (note also the /y suffix in one case):
# prog: 'd/no-x': Permission denied
# prog: cannot access 'd/no-x/y': Permission denied
# prog: cannot read directory 'd/no-x': Permission denied
# Convert either of the latter two to the first one.
sed "s/^$prog: cannot access /$prog: /" out > t && mv t out
sed "s/^$prog: cannot read directory /$prog: /" out > t && mv t out
sed 's,d/no-x/y,d/no-x,' out > t && mv t out
cat <<EOF > exp
$prog: 'd/no-x': Permission denied
EOF
compare exp out || fail=1
cd a
# This will fail with ''chmod: fts_read failed: Permission denied''
chmod a-x . b 2> /dev/null && fail=1
# chmod must exit with status 1.
# Due to a bug in coreutils-5.93's fts.c, chmod would provoke
# an abort (exit with status 134) on recent glibc-based systems.
test $? = 1 || fail=1
Exit $fail
|
yuxuanchen1997/coreutils
|
tests/chmod/no-x.sh
|
Shell
|
gpl-3.0
| 2,001 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -u
DIR=`dirname "$0"`
DIR=`cd "${DIR}/.."; pwd`
CURDIR=`pwd`
cd $DIR/src
mvn clean package && \
cd $DIR/src/sparkbench && \
( mkdir jars | true )
for mr in MR1 MR2; do
for spark_version in 1.2 1.3 1.4; do
cp target/*-jar-with-dependencies.jar jars
mvn clean package -D spark$spark_version -Dmr
if [ $? -ne 0 ]; then
echo "Build failed for spark$spark_version and $mr, please check!"
exit 1
fi
done
done
cp jars/*.jar target/ && \
rm -rf jars
result=$?
cd $CURDIR
if [ $result -ne 0 ]; then
echo "Build failed, please check!"
else
echo "Build all done!"
fi
|
GayathriMurali/HiBench
|
bin/build-all.sh
|
Shell
|
apache-2.0
| 1,521 |
#!/usr/bin/env bash
set -x
set -e
# Decide what kind of documentation build to run, and run it.
#
# If the last commit message has a "[doc skip]" marker, do not build
# the doc. On the contrary if a "[doc build]" marker is found, build the doc
# instead of relying on the subsequent rules.
#
# We always build the documentation for jobs that are not related to a specific
# PR (e.g. a merge to master or a maintenance branch).
#
# If this is a PR, do a full build if there are some files in this PR that are
# under the "doc/" or "examples/" folders, otherwise perform a quick build.
#
# If the inspection of the current commit fails for any reason, the default
# behavior is to quick build the documentation.
get_build_type() {
if [ -z "$CIRCLE_SHA1" ]
then
echo SKIP: undefined CIRCLE_SHA1
return
fi
commit_msg=$(git log --format=%B -n 1 $CIRCLE_SHA1)
if [ -z "$commit_msg" ]
then
echo QUICK BUILD: failed to inspect commit $CIRCLE_SHA1
return
fi
if [[ "$commit_msg" =~ \[doc\ skip\] ]]
then
echo SKIP: [doc skip] marker found
return
fi
if [[ "$commit_msg" =~ \[doc\ quick\] ]]
then
echo QUICK: [doc quick] marker found
return
fi
if [[ "$commit_msg" =~ \[doc\ build\] ]]
then
echo BUILD: [doc build] marker found
return
fi
if [ -z "$CI_PULL_REQUEST" ]
then
echo BUILD: not a pull request
return
fi
git_range="origin/master...$CIRCLE_SHA1"
git fetch origin master >&2 || (echo QUICK BUILD: failed to get changed filenames for $git_range; return)
filenames=$(git diff --name-only $git_range)
if [ -z "$filenames" ]
then
echo QUICK BUILD: no changed filenames for $git_range
return
fi
if echo "$filenames" | grep -q -e ^examples/
then
echo BUILD: detected examples/ filename modified in $git_range: $(echo "$filenames" | grep -e ^examples/ | head -n1)
return
fi
echo QUICK BUILD: no examples/ filename modified in $git_range:
echo "$filenames"
}
build_type=$(get_build_type)
if [[ "$build_type" =~ ^SKIP ]]
then
exit 0
fi
if [[ "$CIRCLE_BRANCH" =~ ^master$|^[0-9]+\.[0-9]+\.X$ && -z "$CI_PULL_REQUEST" ]]
then
MAKE_TARGET=dist # PDF linked into HTML
elif [[ "$build_type" =~ ^QUICK ]]
then
MAKE_TARGET=html-noplot
else
MAKE_TARGET=html
fi
# Installing required system packages to support the rendering of math
# notation in the HTML documentation
sudo -E apt-get -yq update
sudo -E apt-get -yq remove texlive-binaries --purge
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes \
install dvipng texlive-latex-base texlive-latex-extra \
texlive-latex-recommended texlive-latex-extra texlive-fonts-recommended
# deactivate circleci virtualenv and setup a miniconda env instead
if [[ `type -t deactivate` ]]; then
deactivate
fi
# Install dependencies with miniconda
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh && ./miniconda.sh -b -p $MINICONDA_PATH
export PATH="$MINICONDA_PATH/bin:$PATH"
conda update --yes --quiet conda
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n $CONDA_ENV_NAME --yes --quiet python numpy scipy \
cython nose coverage matplotlib sphinx=1.5 pillow
source activate testenv
# Build and install scikit-learn in dev mode
python setup.py develop
# The pipefail is requested to propagate exit code
set -o pipefail && cd doc && make $MAKE_TARGET 2>&1 | tee ~/log.txt
cd -
set +o pipefail
affected_doc_paths() {
files=$(git diff --name-only origin/master...$CIRCLE_SHA1)
echo "$files" | grep ^doc/.*\.rst | sed 's/^doc\/\(.*\)\.rst$/\1.html/'
echo "$files" | grep ^examples/.*.py | sed 's/^\(.*\)\.py$/auto_\1.html/'
sklearn_files=$(echo "$files" | grep '^sklearn/')
if [ -n "$sklearn_files" ]
then
grep -hlR -f<(echo "$sklearn_files" | sed 's/^/scikit-learn\/blob\/[a-z0-9]*\//') doc/_build/html/stable/modules/generated | cut -d/ -f5-
fi
}
if [ -n "$CI_PULL_REQUEST" ]
then
echo "The following documentation files may have been changed by PR #$CI_PULL_REQUEST:"
affected=$(affected_doc_paths)
echo "$affected" | sed 's|^|* http://scikit-learn.org/circle?'$CIRCLE_BUILD_NUM'/|'
(
echo '<html><body><ul>'
echo "$affected" | sed 's|.*|<li><a href="&">&</a></li>|'
echo '</ul></body></html>'
) > 'doc/_build/html/stable/_changed.html'
fi
|
Titan-C/scikit-learn
|
build_tools/circle/build_doc.sh
|
Shell
|
bsd-3-clause
| 4,321 |
#!/bin/bash
FN="metaboliteIDmapping_1.0.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/annotation/src/contrib/metaboliteIDmapping_1.0.0.tar.gz"
"https://bioarchive.galaxyproject.org/metaboliteIDmapping_1.0.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-metaboliteidmapping/bioconductor-metaboliteidmapping_1.0.0_src_all.tar.gz"
)
MD5="bd78ec373ce90fac1a10d2c64c462e77"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-metaboliteidmapping/post-link.sh
|
Shell
|
mit
| 1,343 |
if [ -e $1 ]
then
rm -r $1
fi
mkdir $1
cp qm.x IN qm.f $1
cp en.py $1
cp derivs.f90 $1
#cp pople $1
cd $1
#qsub pople
./qm.x &
|
binghongcha08/pyQMD
|
QTM/MixQC/1.0.7/run.sh
|
Shell
|
gpl-3.0
| 135 |
#!/bin/bash
# added 2014-01-17 by rgerhards
# This file is part of the rsyslog project, released under ASL 2.0
echo ===============================================================================
echo \[rscript_lt.sh\]: testing rainerscript LT statement for two JSON variables
. $srcdir/diag.sh init
. $srcdir/diag.sh startup rscript_lt_var.conf
. $srcdir/diag.sh injectmsg 0 1
echo doing shutdown
. $srcdir/diag.sh shutdown-when-empty
echo wait on shutdown
. $srcdir/diag.sh wait-shutdown
. $srcdir/diag.sh seq-check 0 0
. $srcdir/diag.sh exit
|
madedotcom/rsyslog
|
tests/rscript_lt_var.sh
|
Shell
|
gpl-3.0
| 548 |
#
# Automated Testing Framework (atf)
#
# Copyright (c) 2007 The NetBSD Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND
# CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
atf_test_case main
main_head()
{
atf_set "descr" "Verifies that variable names with symbols not" \
"allowed as part of shell variable names work"
}
main_body()
{
h="$(atf_get_srcdir)/misc_helpers -s $(atf_get_srcdir)"
atf_check -s eq:0 -o match:'a.b: test value 1' \
-o match:'c-d: test value 2' -e ignore ${h} normalize
}
atf_init_test_cases()
{
atf_add_test_case main
}
# vim: syntax=sh:expandtab:shiftwidth=4:softtabstop=4
|
krichter722/bind9
|
unit/atf-src/atf-sh/normalize_test.sh
|
Shell
|
gpl-3.0
| 1,875 |
# getopt-like parser
parseopts() {
local opt= optarg= i= shortopts=$1
local -a longopts=() unused_argv=()
shift
while [[ $1 && $1 != '--' ]]; do
longopts+=("$1")
shift
done
shift
longoptmatch() {
local o longmatch=()
for o in "${longopts[@]}"; do
if [[ ${o%:} = "$1" ]]; then
longmatch=("$o")
break
fi
[[ ${o%:} = "$1"* ]] && longmatch+=("$o")
done
case ${#longmatch[*]} in
1)
# success, override with opt and return arg req (0 == none, 1 == required)
opt=${longmatch%:}
if [[ $longmatch = *: ]]; then
return 1
else
return 0
fi ;;
0)
# fail, no match found
return 255 ;;
*)
# fail, ambiguous match
printf "@SCRIPTNAME@: $(gettext "option '%s' is ambiguous; possibilities:")" "--$1"
printf " '%s'" "${longmatch[@]%:}"
printf '\n'
return 254 ;;
esac >&2
}
while (( $# )); do
case $1 in
--) # explicit end of options
shift
break
;;
-[!-]*) # short option
for (( i = 1; i < ${#1}; i++ )); do
opt=${1:i:1}
# option doesn't exist
if [[ $shortopts != *$opt* ]]; then
printf "@SCRIPTNAME@: $(gettext "invalid option") -- '%s'\n" "$opt" >&2
OPTRET=(--)
return 1
fi
OPTRET+=("-$opt")
# option requires optarg
if [[ $shortopts = *$opt:* ]]; then
# if we're not at the end of the option chunk, the rest is the optarg
if (( i < ${#1} - 1 )); then
OPTRET+=("${1:i+1}")
break
# if we're at the end, grab the the next positional, if it exists
elif (( i == ${#1} - 1 )) && [[ $2 ]]; then
OPTRET+=("$2")
shift
break
# parse failure
else
printf "@SCRIPTNAME@: $(gettext "option requires an argument") -- '%s'\n" "$opt" >&2
OPTRET=(--)
return 1
fi
fi
done
;;
--?*=*|--?*) # long option
IFS='=' read -r opt optarg <<< "${1#--}"
longoptmatch "$opt"
case $? in
0)
# parse failure
if [[ $optarg ]]; then
printf "@SCRIPTNAME@: $(gettext "option '%s' does not allow an argument")\n" "--$opt" >&2
OPTRET=(--)
return 1
# --longopt
else
OPTRET+=("--$opt")
fi
;;
1)
# --longopt=optarg
if [[ $optarg ]]; then
OPTRET+=("--$opt" "$optarg")
# --longopt optarg
elif [[ $2 ]]; then
OPTRET+=("--$opt" "$2" )
shift
# parse failure
else
printf "@SCRIPTNAME@: $(gettext "option '%s' requires an argument")\n" "--$opt" >&2
OPTRET=(--)
return 1
fi
;;
254)
# ambiguous option -- error was reported for us by longoptmatch()
OPTRET=(--)
return 1
;;
255)
# parse failure
printf "@SCRIPTNAME@: $(gettext "invalid option") '--%s'\n" "$opt" >&2
OPTRET=(--)
return 1
;;
esac
;;
*) # non-option arg encountered, add it as a parameter
unused_argv+=("$1")
;;
esac
shift
done
# add end-of-opt terminator and any leftover positional parameters
OPTRET+=('--' "${unused_argv[@]}" "$@")
unset longoptmatch
return 0
}
|
elieux/pacman
|
scripts/library/parseopts.sh
|
Shell
|
gpl-2.0
| 3,133 |
#!/bin/sh
java -classpath "../src/zmq.jar:zmq-perf.jar" remote_lat $@
|
trevorbernard/jzmq
|
src/main/perf/remote_lat.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
#
# This script packages a project as a tar.gz file and a ready-to-run apk file.
#
# If you *can*, you should prefer "mup" instead.
#
# However, if your public server has unusual characteristics, or if "mup" might
# alter it inappropriately you might want to adapt this script to your special
# needs.
#
# In the last section the script uses ssh rpc to run three
# scripts that you must create yourself or use the published examples.
#
echo "Checking dependencies."
APT_INSTALLS=0;
if [[ "$(dpkg -s openjdk-7-jdk 2> /dev/null | grep -c 'ok installed')" < "1" ]]; then
APT_INSTALLS=1;
fi
if [[ "$(dpkg -s lib32z1 2> /dev/null | grep -c 'ok installed')" < "1" ]]; then
APT_INSTALLS=1;
fi
if [[ "$(dpkg -s lib32stdc++6 2> /dev/null | grep -c 'ok installed')" < "1" ]]; then
APT_INSTALLS=1;
fi
if [[ "$(dpkg -s jq 2> /dev/null | grep -c 'ok installed')" < "1" ]]; then
APT_INSTALLS=1;
fi
if [[ "${APT_INSTALLS}" > "0" ]]; then
if [ "$EUID" -ne 0 ]; then
echo "There are dependencies to install. Please re-run this script as root"
exit 1
fi
apt-get update;
apt-get install --yes openjdk-7-jdk
apt-get install --yes lib32z1 lib32stdc++6
apt-get install --yes jq
exit
fi
#
if [ "$EUID" -eq 0 ]; then
echo "It is better to *NOT* run this script as root"
exit 1
fi
#
source ./utilsJson.sh
#
echo "Instantiating variables from settings.json"
parseJSON_public APP_VERSION
parseJSON_public APP_ID
parseJSON_public APP_NAME
parseJSON_public USE_MUGEN_GENERATOR
parseJSON_public PRODUCTION_MAIN_SERVER
parseJSON PRODUCTION_MONGO_SERVER
parseJSON ZIPALIGN_PATH
ZIPALIGN_PATH="$(eval echo ${ZIPALIGN_PATH//>})"
parseJSON ALIGNMENT
parseJSON KEYSTORE_PATH
KEYSTORE_PATH="$(eval echo ${KEYSTORE_PATH//>})"
parseJSON KEYSTORE_PWD
parseJSON BUILD_DIRECTORY
BUILD_DIRECTORY="$(eval echo ${BUILD_DIRECTORY//>})"
parseJSON BLOB_MONGO_SERVER
parseJSON DEBUG_MODE
export MONGO_URL=mongodb://${PRODUCTION_MONGO_SERVER}
export PROJ=${PWD##*/}
#
#
export TARGET_SERVER=$(echo ${PRODUCTION_MAIN_SERVER} | awk -F/ '{print $3}')
export TARGET_DIRECTORY=${BUILD_DIRECTORY}/${PROJ}
#
echo "### Configuration for your '"${PROJ}"' project ... "
echo " ~ Public name : " ${APP_NAME} v${APP_VERSION}
echo " ~ Meteor packaging unique ID : " ${APP_ID}
echo " ~ Target host is : " ${TARGET_SERVER}
echo " ~ Target web site is : " ${PRODUCTION_MAIN_SERVER}
echo " ~ Mongo main database is at : " ${MONGO_URL}
echo " ~ Mongo BLOB database is at : " ${BLOB_MONGO_SERVER}
echo " ~ APK signing keys are stored at : " ${KEYSTORE_PATH}
echo " ~ Expose javascript debug symbols to public : " ${DEBUG_MODE}
echo " ~ Align android bundle to "$ALIGNMENT"-byte boundary using : " ${ZIPALIGN_PATH}
echo " ~ Temporary builds directory : " ${TARGET_DIRECTORY}
echo "### ~ ~ ~ "
#
echo "Checking/installing Android capabilities : "
meteor install-sdk android || { echo 'Failed to install Android SDK.' ; exit 1; }
#
echo
echo "Checking key exists in key store :"
export KEY_WORKS=$(keytool -list -v -storepass ${KEYSTORE_PWD} -keystore ~/.keystore -alias ${APP_NAME} | grep -c "Alias name: ${APP_NAME}")
if [[ "${KEY_WORKS}" != "1" ]]; then
echo "Probably you need to run this command : "
echo
echo "keytool -genkey -v -keystore ~/.keystore -alias ${APP_NAME} -keyalg RSA -keysize 2048 -validity 10000"
echo
exit
else
echo "✓ Found the key"
fi
#
echo ""
echo "**NOT** Checking/installing iOS capabilities : "
#meteor install-sdk ios
#
#
echo ""
echo ""
echo "Building project : ${APP_NAME} in ${BUILD_DIRECTORY}"
mkdir -p ${TARGET_DIRECTORY}
#
cp settings.json ./public/
rm -fr ${TARGET_DIRECTORY}
echo "Building WITHOUT public debug symbols for Android version. "
meteor build ${TARGET_DIRECTORY} --server=${PRODUCTION_MAIN_SERVER}
mv ${TARGET_DIRECTORY}/android/unaligned.apk ${TARGET_DIRECTORY}/android/${APP_NAME}_unaligned.apk
if [[ "${DEBUG_MODE}" -eq "yes" ]]; then
echo "Rebuilding WITH public debug symbols for browser version "
meteor build ${TARGET_DIRECTORY} --debug --server=${PRODUCTION_MAIN_SERVER}
fi
rm -f ./public/settings.json
#
pushd ${TARGET_DIRECTORY} > /dev/null
pushd ./android > /dev/null
echo "Sign the unaligned APK"
MYVARIABLE="$(jarsigner -storepass ${KEYSTORE_PWD} -tsa http://timestamp.digicert.com -digestalg SHA1 ${APP_NAME}_unaligned.apk ${APP_NAME})"
if [[ "$?" > "0" ]]; then
echo "----------"
echo ${MYVARIABLE}
echo "----------"
exit
fi
#
echo "Align to byte boundaries and verify."
${ZIPALIGN_PATH}/zipalign -f ${ALIGNMENT} ${APP_NAME}_unaligned.apk ${APP_NAME}_aligned.apk
# ${ZIPALIGN_PATH}/zipalign -f -v ${ALIGNMENT} ${APP_NAME}_unaligned.apk ${APP_NAME}_aligned.apk
#
echo
echo "Rename and relocate for easy deployment"
mv ${APP_NAME}_aligned.apk ..
mv unaligned.apk ../${APP_NAME}.apk
popd > /dev/null
#
echo
echo "Uploading ${PROJ}.tar.gz & ${APP_NAME}.apk to . . . "
pwd
scp ${PROJ}.tar.gz ${TARGET_SERVER}:~/incoming
scp ${APP_NAME}.apk ${TARGET_SERVER}:~/incoming
#
popd > /dev/null
#
echo
echo "Reply from ${TARGET_SERVER} :: Installing . . . "
ssh -t ${TARGET_SERVER} "sudo -u meteor /home/meteor/installProj.sh ${PROJ} ${APP_NAME}"
#
echo
echo
echo "Reply from ${TARGET_SERVER} :: Fixing modules . . . "
ssh -t ${TARGET_SERVER} "sudo -u root /home/meteor/fixRunEnv.sh"
#
echo
echo
echo "Reply from ${TARGET_SERVER} :: Restarting . . . "
ssh -t ${TARGET_SERVER} "sudo -u root /home/meteor/restartMeteor.sh"
#
|
radiegtya/meteoris2
|
productionPackager.sh
|
Shell
|
mit
| 5,626 |
#!/bin/bash
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
mkdir -p $BINDIR
(cd kent/src/lib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/stringify && make)
(cd kent/src/hg/pslCDnaFilter && make)
mkdir -p $PREFIX/bin
cp bin/pslCDnaFilter $PREFIX/bin
chmod +x $PREFIX/bin/pslCDnaFilter
|
JenCabral/bioconda-recipes
|
recipes/ucsc-pslcdnafilter/build.sh
|
Shell
|
mit
| 330 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by seperating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-60-9592-90-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-docker load -i}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
if [[ "${NETWORK}" == "default" ]]; then
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
else
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
fi
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# stackdriver - Heapster, Google Cloud Monitoring (schema container), and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Version tag of metadata agent
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.13-5-watch}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
# To avoid situation during cluster upgrade when there are two instances
# of fluentd running on a node, kubelet need to mark node on which
# fluentd is not running as a manifest pod with appropriate label.
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: customize runtime config
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Size of ranges allocated to each node. Currently supports only /32 and /24.
IP_ALIAS_SIZE=${KUBE_GCE_IP_ALIAS_SIZE:-/24}
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,PVCProtection
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Storage backend. 'etcd2' supported, 'etcd3' experimental.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
echo "Warning: Basic audit logging is deprecated and will be removed. Please use advanced auditing instead."
fi
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
# Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable pod priority
ENABLE_POD_PRIORITY="${ENABLE_POD_PRIORITY:-}"
if [[ "${ENABLE_POD_PRIORITY}" == "true" ]]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
|
tmckayus/oshinko-cli
|
vendor/k8s.io/kubernetes/cluster/gce/config-default.sh
|
Shell
|
apache-2.0
| 18,658 |
#!/bin/bash
# Wraps opsin.jar
set -o pipefail
# Find original directory of bash script, resovling symlinks
# http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
JAR_DIR=$DIR
java=java
if [ -e "$JAVA_HOME/bin/java" ]
then
java="$JAVA_HOME/bin/java"
fi
# extract memory and system property Java arguments from the list of provided arguments
# http://java.dzone.com/articles/better-java-shell-script
default_jvm_mem_opts="-Xms512m -Xmx1g"
jvm_mem_opts=""
jvm_prop_opts=""
pass_args=""
for arg in "$@"; do
case $arg in
'-D'*)
jvm_prop_opts="$jvm_prop_opts $arg"
;;
'-XX'*)
jvm_prop_opts="$jvm_prop_opts $arg"
;;
'-Xm'*)
jvm_mem_opts="$jvm_mem_opts $arg"
;;
*)
pass_args="$pass_args $arg"
;;
esac
done
if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]; then
jvm_mem_opts="$default_jvm_mem_opts"
fi
pass_arr=($pass_args)
if [[ ${pass_arr[0]} == org* ]]
then
eval "$java" $jvm_mem_opts $jvm_prop_opts -cp "$JAR_DIR/luciphor2.jar" $pass_args
else
eval "$java" $jvm_mem_opts $jvm_prop_opts -jar "$JAR_DIR/luciphor2.jar" $pass_args
fi
exit
|
phac-nml/bioconda-recipes
|
recipes/luciphor2/luciphor2.sh
|
Shell
|
mit
| 1,664 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
DIE=0
(test -f $srcdir/configure.ac) || {
echo "**Error**: Directory "\`$srcdir\'" does not look like the top-level package directory"
exit 1
}
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`autoconf' installed."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(intltoolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`intltool' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
(glib-gettextize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`glib' installed."
echo "You can get it from: ftp://ftp.gtk.org/pub/gtk"
DIE=1
}
(libtoolize --version) < /dev/null > /dev/null 2>&1 || {
(glibtoolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`libtool' installed."
echo "You can get it from:"
echo " http://www.gnu.org/software/libtool/"
DIE=1
}
}
(pkg-config --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have pkg-config installed to compile $package."
echo "Download the appropriate package for your distribution."
result="no"
DIE=1
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`automake' (1.7 or later) installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
NO_AUTOMAKE=yes
}
# if no automake, don't bother testing for aclocal
test -n "$NO_AUTOMAKE" || (aclocal --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: Missing \`aclocal'. The version of \`automake'"
echo "installed doesn't appear recent enough."
echo "You can get automake from ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
if test "$DIE" -eq 1; then
exit 1
fi
if test -z "$*" -a "$NOCONFIGURE" != 1; then
echo "**Warning**: I am going to run \`configure' with no arguments."
echo "If you wish to pass any to it, please specify them on the"
echo \`$0\'" command line."
echo
fi
echo "Processing configure.ac"
test -d build-aux || mkdir build-aux
echo "no" | glib-gettextize --force --copy
intltoolize --copy --force --automake
libtoolize --copy --force || glibtoolize --copy --force
aclocal -I m4
autoheader
automake --add-missing --copy --gnu
autoconf
if [ "$NOCONFIGURE" = 1 ]; then
echo "Done. configure skipped."
exit 0;
fi
echo "Running $srcdir/configure $@ ..."
$srcdir/configure "$@" && echo "Now type 'make' to compile." || exit 1
|
Akronix/geany
|
autogen.sh
|
Shell
|
gpl-2.0
| 2,746 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ LRBase.Mmu.eg.db
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-lrbase.mmu.eg.db/pre-unlink.sh
|
Shell
|
mit
| 63 |
#!/bin/bash
set -eu -o pipefail
outdir=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $outdir
mkdir -p $PREFIX/bin
cd $SRC_DIR
cp -R dnaA_database $outdir
cp *.R $outdir/
cp smeg growth_est_denovo growth_est_ref build_sp pileupParser uniqueSNPmultithreading uniqueClusterSNP $outdir
chmod +x $outdir/smeg
chmod +x $outdir/uniqueSNPmultithreading
chmod +x $outdir/pileupParser
chmod +x $outdir/growth_est_denovo
chmod +x $outdir/growth_est_ref
chmod +x $outdir/build_sp
chmod +x $outdir/uniqueClusterSNP
ln -s $outdir/smeg $PREFIX/bin/smeg
|
roryk/recipes
|
recipes/smeg/build.sh
|
Shell
|
mit
| 556 |
#!/bin/sh
set -e
DEPLOY=/home/YOU/deployment
SOURCE=/home/YOU/projects/mongrel2
cd $SOURCE/examples/chat
# WARNING: on some systems the nohup doesn't work, like OSX
# try running it without
nohup python -u chat.py > chat.log 2>&1 &
echo $! > $DEPLOY/profiles/chat/chat.pid
|
krattai/noo-ebs
|
ref_code/mongrel2/docs/manual/inputs/procer_script_for_chat_demo.sh
|
Shell
|
bsd-2-clause
| 275 |
#!/usr/bin/env bash
function GetNativeInstallDirectory {
local install_dir
if [[ -z $NETCOREENG_INSTALL_DIRECTORY ]]; then
install_dir=$HOME/.netcoreeng/native/
else
install_dir=$NETCOREENG_INSTALL_DIRECTORY
fi
echo $install_dir
return 0
}
function GetTempDirectory {
echo $(GetNativeInstallDirectory)temp/
return 0
}
function ExpandZip {
local zip_path=$1
local output_directory=$2
local force=${3:-false}
echo "Extracting $zip_path to $output_directory"
if [[ -d $output_directory ]] && [[ $force = false ]]; then
echo "Directory '$output_directory' already exists, skipping extract"
return 0
fi
if [[ -d $output_directory ]]; then
echo "'Force flag enabled, but '$output_directory' exists. Removing directory"
rm -rf $output_directory
if [[ $? != 0 ]]; then
echo Unable to remove '$output_directory'>&2
return 1
fi
fi
echo "Creating directory: '$output_directory'"
mkdir -p $output_directory
echo "Extracting archive"
tar -xf $zip_path -C $output_directory
if [[ $? != 0 ]]; then
echo "Unable to extract '$zip_path'" >&2
return 1
fi
return 0
}
function GetCurrentOS {
local unameOut="$(uname -s)"
case $unameOut in
Linux*) echo "Linux";;
Darwin*) echo "MacOS";;
esac
return 0
}
function GetFile {
local uri=$1
local path=$2
local force=${3:-false}
local download_retries=${4:-5}
local retry_wait_time_seconds=${5:-30}
if [[ -f $path ]]; then
if [[ $force = false ]]; then
echo "File '$path' already exists. Skipping download"
return 0
else
rm -rf $path
fi
fi
if [[ -f $uri ]]; then
echo "'$uri' is a file path, copying file to '$path'"
cp $uri $path
return $?
fi
echo "Downloading $uri"
# Use curl if available, otherwise use wget
if command -v curl > /dev/null; then
curl "$uri" -sSL --retry $download_retries --retry-delay $retry_wait_time_seconds --create-dirs -o "$path" --fail
else
wget -q -O "$path" "$uri" --tries="$download_retries"
fi
return $?
}
function GetTempPathFileName {
local path=$1
local temp_dir=$(GetTempDirectory)
local temp_file_name=$(basename $path)
echo $temp_dir$temp_file_name
return 0
}
function DownloadAndExtract {
local uri=$1
local installDir=$2
local force=${3:-false}
local download_retries=${4:-5}
local retry_wait_time_seconds=${5:-30}
local temp_tool_path=$(GetTempPathFileName $uri)
echo "downloading to: $temp_tool_path"
# Download file
GetFile "$uri" "$temp_tool_path" $force $download_retries $retry_wait_time_seconds
if [[ $? != 0 ]]; then
echo "Failed to download '$uri' to '$temp_tool_path'." >&2
return 1
fi
# Extract File
echo "extracting from $temp_tool_path to $installDir"
ExpandZip "$temp_tool_path" "$installDir" $force $download_retries $retry_wait_time_seconds
if [[ $? != 0 ]]; then
echo "Failed to extract '$temp_tool_path' to '$installDir'." >&2
return 1
fi
return 0
}
function NewScriptShim {
local shimpath=$1
local tool_file_path=$2
local force=${3:-false}
echo "Generating '$shimpath' shim"
if [[ -f $shimpath ]]; then
if [[ $force = false ]]; then
echo "File '$shimpath' already exists." >&2
return 1
else
rm -rf $shimpath
fi
fi
if [[ ! -f $tool_file_path ]]; then
echo "Specified tool file path:'$tool_file_path' does not exist" >&2
return 1
fi
local shim_contents=$'#!/usr/bin/env bash\n'
shim_contents+="SHIMARGS="$'$1\n'
shim_contents+="$tool_file_path"$' $SHIMARGS\n'
# Write shim file
echo "$shim_contents" > $shimpath
chmod +x $shimpath
echo "Finished generating shim '$shimpath'"
return $?
}
|
zenos-os/zenos
|
vendor/corert/eng/common/native/common-library.sh
|
Shell
|
mit
| 3,733 |
#!/bin/sh
set -e
while true; do
time go install
mog -w -dev
if [ $? != 0 ] ; then
exit
fi
echo restarting
done
|
shazow/mog
|
w.sh
|
Shell
|
isc
| 119 |
#!/usr/bin/env bash
# Author: Nico Trost
# Helper function
function display_help()
{
echo "rocSPARSE benchmark helper script"
echo " [-h|--help] prints this help message"
echo " [-d|--device] select device"
echo " [-p|--path] path to rocsparse-bench"
}
# Check if getopt command is installed
type getopt > /dev/null
if [[ $? -ne 0 ]]; then
echo "This script uses getopt to parse arguments; try installing the util-linux package";
exit 1;
fi
dev=0
path=../../build/release/clients/staging
# Parse command line parameters
getopt -T
if [[ $? -eq 4 ]]; then
GETOPT_PARSE=$(getopt --name "${0}" --longoptions help,device:,path: --options hd:p: -- "$@")
else
echo "Need a new version of getopt"
exit 1
fi
if [[ $? -ne 0 ]]; then
echo "getopt invocation failed; could not parse the command line";
exit 1
fi
eval set -- "${GETOPT_PARSE}"
while true; do
case "${1}" in
-h|--help)
display_help
exit 0
;;
-d|--device)
dev=${2}
shift 2 ;;
-p|--path)
path=${2}
shift 2 ;;
--) shift ; break ;;
*) echo "Unexpected command line parameter received; aborting";
exit 1
;;
esac
done
bench=$path/rocsparse-bench
# Check if binary is available
if [ ! -f $bench ]; then
echo $bench not found, exit...
exit 1
else
echo ">>" $(realpath $(ldd $bench | grep rocsparse | awk '{print $3;}'))
fi
# Generate logfile name
logname=dcsrsv_$(date +'%Y%m%d%H%M%S').log
truncate -s 0 $logname
# Run csrsv for all matrices available
for filename in ./matrices/*.csr; do
$bench -f csrsv --precision d --device $dev --alpha 1 --iters 1000 --rocalution $filename 2>&1 | tee -a $logname
done
|
ROCmSoftwarePlatform/rocSPARSE
|
scripts/performance/dcsrsv.sh
|
Shell
|
mit
| 1,788 |
# define the different directories
# the default config creates all directories in HOME
export BASE_DIR=$HOME
export CM_DIR=$BASE_DIR/android/system
export OUT_DIR=$CM_DIR/out
export CCACHE_DIR=$HOME/.ccache
export CM_ARCHIVE_PARTITION=/dev/xvdf1
export CM_ARCHIVE_DIR=$BASE_DIR/mnt
export CM_ARCHIVE_NAME=cmrepo.7z
# configure ramdisk usage
# needs a lot of RAM (!!!)
export CM_RAMDISK_ENABLED=1 # should we use a ramdisk for the cm repo?
export CM_RAMDISK_SIZE=30G # a CM11 tree needs at least 26 GB of memory
export OUT_RAMDISK_ENABLED=1 # should we use a ramdisk for out?
export OUT_RAMDISK_SIZE=25G # at least 22 GB
export CCACHE_RAMDISK_ENABLED=1 # should we use a ramdisk for the ccache?
export CCACHE_RAMDISK_SIZE=50G # the ccache constantly grows, 50 GB should be enough
export CM_ARCHIVE_ENABLED=1 # load the inital repo from an archive
export PACKAGES_TO_INSTALL="oracle-java7-installer oracle-java7-set-default \
git git-core gnupg flex bison gperf libsdl1.2-dev \
libesd0-dev libwxgtk2.8-dev squashfs-tools \
build-essential zip curl libncurses5-dev zlib1g-dev \
pngcrush schedtool libxml2 libxml2-utils xsltproc \
g++-multilib lib32z1-dev lib32ncurses5-dev \
lib32readline-gplv2-dev gcc-multilib p7zip-full"
|
mawatech/setup_cm_env
|
old/env.sh
|
Shell
|
mit
| 1,259 |
#!/bin/bash
python __init__.py
|
Fusxfaranto/Robcxjo
|
start.sh
|
Shell
|
mit
| 32 |
#!/bin/bash
dir="$(pwd)"
show_help() {
echo -e "Usage: $0 [-h] [-fuc]\n"
echo " -h Show this help message."
echo " -f Force install."
echo " -u Install *all* config files."
echo " -c Copy files instead of using symlinking."
exit 0
}
force=false
full=false
tool="ln"
# Grab arguments
while getopts "h?fuc" opt; do
case "$opt" in
h|\?) show_help ;;
f) force=true ;;
u) full=true ;;
c) tool="cp" ;;
esac
done
# Check dependencies
echo "# Checking dependencies..."
deps=("git" "ln" "cp")
for d in ${deps[@]}; do
if ! type "$d"; then
echo "error: $d is required to run $0."
exit 1
fi
done
# Update submodules
echo -e "\n# Updating submodules..."
git submodule init
git submodule update --recursive
# Build rbin
pushd .bin/rbin
make
popd
# Create basic $HOME structure
mkdir -p "$HOME/.bin"
mkdir -p "$HOME/.config"
# Basic Install
basic_install=(\
".bashrc"\
".dir_colors"\
".mplayer"\
".mpv"\
".profile"\
".tmux.conf"\
".toprc"\
".twmrc"
".vim"\
".vimrc"\
".npmrc"\
".Xresources"\
".gitconfig"\
".gnupg/gpg-agent.conf"
)
# Full Install
full_install=(\
${basic_install[@]}\
".abcde.conf"\
".gemrc"\
".irssi"\
".toprc"\
".vnc"\
".xinitrc"\
".config/dunst"\
".config/mpv"\
".config/termite"\
.bin/*\
)
# What array will we use?
if [ $full == "true" ]; then
mkdir -p ~/.gnupg
array=${full_install[@]}
else
array=${basic_install[@]}
fi
# Copy/Symlink the files
[ "$tool" == "cp" ] && echo -e "\n# Copying files..." || echo -e "\n# Symlinking files..."
for f in ${array[@]}; do
[ $force == "true" ] && rm -rf "$HOME/$f"
if [ "$tool" == "cp" ]; then
cp -rv "$(readlink -f "$f")" "$HOME/$f"
elif [ "$tool" == "ln" ]; then
ln -sv "$dir/$f" "$HOME/$(dirname "$f")"
fi
mkdir -p "$(dirname "$f")"
done
# nvim config (symlink to .vim)
mkdir -p ~/.config
ln -s ~/.vim ~/.config/nvim
# Update Vundle packages
if type vim &>/dev/null; then
vim -c BundleInstall -c qa
fi
|
ryanmjacobs/ryans_dotfiles
|
setup.sh
|
Shell
|
mit
| 2,132 |
source stage0n_variables
source stage01_variables
source stage02_variables
PKGNAME=bash
PKGVERSION=4.4.18
# Download:
[ -f ${SRCDIR}/${PKGNAME}-${PKGVERSION}.tar.gz ] || wget -O ${SRCDIR}/${PKGNAME}-${PKGVERSION}.tar.gz \
ftp://ftp.gnu.org/gnu/bash/${PKGNAME}-${PKGVERSION}.tar.gz
# Prepare build:
workdir=`pwd `
mkdir -p ${CLFS}/build/${PKGNAME}-${PKGVERSION}
cd ${CLFS}/build/${PKGNAME}-${PKGVERSION}
tar xvzf ${SRCDIR}/${PKGNAME}-${PKGVERSION}.tar.gz
# Patch
cd ${CLFS}/build/${PKGNAME}-${PKGVERSION}/${PKGNAME}-${PKGVERSION}
# cat "${workdir}/patches/bash-4.3-upstream_fixes-1.patch" | patch -p1
# Build and install
./configure --prefix=/usr \
--sysconfdir=/etc \
--host=${CLFS_TARGET} \
--without-bash-malloc \
--with-curses=no \
--enable-static-link
make -j $( grep -c processor /proc/cpuinfo ) LDFLAGS=-all-static
make install DESTDIR=${CLFS}/targetfs
${CLFS}/cross-tools/bin/${CLFS_TARGET}-strip ${CLFS}/targetfs/usr/bin/bash
ln -sf /usr/bin/bash ${CLFS}/targetfs/bin/bash
# Clean up
cd ${CLFS}
rm -rf ${CLFS}/build/${PKGNAME}-${PKGVERSION}/${PKGNAME}-${PKGVERSION}
|
mschlenker/TinyCrossLinux
|
stage02/0045_bash.sh
|
Shell
|
mit
| 1,095 |
#!/bin/bash
# this script is used to initiate builds regularly
set -e
if [[ "${DONT_PUSH}" != yes ]]; then
mkdir -p ~/.docker && echo "${DOCKER_AUTH}" >~/.docker/config.json
fi
set -x
BUILD_FAILURES=0
LAST_ACTION_PASSED=0
make ci-nightly && make ci-nightly-test || LAST_ACTION_PASSED=$?
if [[ $LAST_ACTION_PASSED == 0 ]] ; then
VERSION=$(make -s get-nightly-version)
# FIXME: move this to makefile
docker tag tomastomecek/rust:nightly tomastomecek/rust:$VERSION
if [[ "${DONT_PUSH}" != yes ]]; then
docker push tomastomecek/rust:nightly
docker push tomastomecek/rust:$VERSION
fi
else
BUILD_FAILURES=$((BUILD_FAILURES+1))
fi
LAST_ACTION_PASSED=0
make ci-stable && make ci-stable-test || LAST_ACTION_PASSED=$?
if [[ $LAST_ACTION_PASSED == 0 ]] ; then
VERSION=$(make -s get-stable-version)
docker tag tomastomecek/rust tomastomecek/rust:$VERSION
if [[ "${DONT_PUSH}" != yes ]]; then
docker push tomastomecek/rust
docker push tomastomecek/rust:$VERSION
fi
else
BUILD_FAILURES=$((BUILD_FAILURES+1))
fi
LAST_ACTION_PASSED=0
make ci-clippy && make ci-clippy-test || LAST_ACTION_PASSED=$?
if [[ $LAST_ACTION_PASSED == 0 ]] ; then
if [[ "${DONT_PUSH}" != yes ]]; then
docker push tomastomecek/rust:clippy
fi
else
BUILD_FAILURES=$((BUILD_FAILURES+1))
fi
exit $BUILD_FAILURES
|
TomasTomecek/rust-container
|
hack/ci.sh
|
Shell
|
mit
| 1,326 |
#!/usr/bin/env bash
# ide.sh
# open project dir in intellij
# note: must have .idea dir already
# https://gist.github.com/chrisdarroch/7018927
function idea() {
# check for where the latest version of IDEA is installed
IDEA=`ls -1d /Applications/IntelliJ\ * | tail -n1`
wd=`pwd`
# were we given a directory?
if [ -d "$1" ]; then
# echo "checking for things in the working dir given"
wd=`ls -1d "$1" | head -n1`
fi
# were we given a file?
if [ -f "$1" ]; then
# echo "opening '$1'"
open -a "$IDEA" "$1"
else
# let's check for stuff in our working directory.
pushd $wd > /dev/null
# does our working dir have an .idea directory?
if [ -d ".idea" ]; then
# echo "opening via the .idea dir"
open -a "$IDEA" .
# is there an IDEA project file?
elif [ -f *.ipr ]; then
# echo "opening via the project file"
open -a "$IDEA" `ls -1d *.ipr | head -n1`
# Is there a pom.xml?
elif [ -f pom.xml ]; then
# echo "importing from pom"
open -a "$IDEA" "pom.xml"
# can't do anything smart; just open IDEA
else
# echo 'cbf'
open "$IDEA"
fi
popd > /dev/null
fi
# for some reason we end up in $HOME after above is run
cd $wd
}
|
boggebe/dotfiles
|
sh/ide.sh
|
Shell
|
mit
| 1,266 |
alias gvm="sdk" # for legacy
|
Limess/dotfiles
|
jvm/aliases.zsh
|
Shell
|
mit
| 28 |
export GOOGLE_CLOUD_SDK=$HOME/code/github/google-cloud-sdk
if [ -d "$GOOGLE_CLOUD_SDK" ]; then
# Update PATH for the Google Cloud SDK.
source $GOOGLE_CLOUD_SDK/path.zsh.inc
# Enable zsh completion for gcloud.
source $GOOGLE_CLOUD_SDK/completion.zsh.inc
alias goapp=$GOOGLE_CLOUD_SDK/platform/google_appengine/goapp
fi
|
markscholtz/dotfiles
|
google-cloud-sdk/google-cloud-sdk.zsh
|
Shell
|
mit
| 331 |
#!/bin/sh
# drop all ports of IP 172.16.1.0/24
sudo /usr/sbin/iptables -D INPUT -s 172.16.1.0/24 -j DROP
# accept only port 51000 of IP 172.16.1.0/24
sudo /usr/sbin/iptables -D INPUT -s 172.16.1.0/24 -p tcp --dport 51000 -j ACCEPT
|
TorbenHaug/RNP
|
Aufgabe4/3bRemove.sh
|
Shell
|
mit
| 234 |
#!/bin/sh
exec java -Dderby.system.home=/opt/derby/databases -jar ${DERBY_HOME}/lib/derbyrun.jar server start
|
fupelaqu/ansible-docker-db
|
files/derby/run.sh
|
Shell
|
mit
| 111 |
#!/bin/bash
# exit if a command fails
set -e
#
# Required parameters
if [ -z "${info_plist_file}" ] ; then
echo " [!] Missing required input: info_plist_file"
exit 1
fi
if [ ! -f "${info_plist_file}" ] ; then
echo " [!] File Info.plist doesn't exist at specified path: ${info_plist_file}"
exit 1
fi
if [ -z "${bundle_identifier}" ] ; then
echo " [!] No Bundle Identifier (bundle_identifier) specified!"
exit 1
fi
# ---------------------
# --- Configs:
echo " (i) Provided Info.plist file path: ${info_plist_file}"
echo " (i) Provided Bundle Identifier: ${bundle_identifier}"
# ---------------------
# --- Main:
# verbose / debug print commands
set -v
# ---- Set Info.plist Bundle Identifier:
echo ""
echo ""
echo " (i) Replacing Bundle Identifier..."
ORIGINAL_BUNDLE_IDENTIFIER="$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "${info_plist_file}")"
echo " (i) Original Bundle Identifier: $ORIGINAL_BUNDLE_IDENTIFIER"
/usr/libexec/PlistBuddy -c "Set :CFBundleIdentifier ${bundle_identifier}" "${info_plist_file}"
REPLACED_BUNDLE_IDENTIFIER="$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "${info_plist_file}")"
echo " (i) Replaced Bundle Identifier: $REPLACED_BUNDLE_IDENTIFIER"
# ==> Bundler Identifier patched in Info.plist file for iOS project
|
teference/steps-set-ios-bundle-identifier
|
step.sh
|
Shell
|
mit
| 1,290 |
alias ss='bin/spring stop && sleep 1; ps ax | ag "[s]pring"'
alias r='bin/spring rails'
alias be='bundle exec'
|
conf/dotfiles
|
ruby/aliases.zsh
|
Shell
|
mit
| 111 |
# To activate, use:
# source ~/bin/nvm_init.sh
export NVM_DIR="$HOME/.nvm"
. "/usr/local/opt/nvm/nvm.sh"
|
kaeff/.bin
|
nvm_init.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
#
# The MIT License (MIT)
# Copyright © 2016 Michał Dobaczewski <[email protected]>
#
_UTILS_SH=true;
true=0
false=-1
# Check if current OS is supported by this script.
check_os_support ()
{
if [[ ! is_mac && ! is_linux ]]; then
echo_error "Unsupported operating system! Only the Linux and Mac OS are supported!"
exit -1;
fi;
return $true;
}
# Check if required software is already instaled on current OS.
check_requirements ()
{
local IS_OK=$true;
if is_mac && ! command_exists docker-machine; then
echo_error "Fail: The Docker Machine is required for this script"
IS_OK=$false;
fi;
if ! command_exists docker; then
echo_error "Fail: The Docker is required for this script"
IS_OK=$false;
fi;
if [[ $IS_OK == $false ]]; then
exit -1;
fi
}
# Checks if script is being run by root.
check_root ()
{
if [[ $EUID -ne 0 ]]; then
echo_fatal "This script must be run as root!" 1>&2
exit 1
fi
}
# Check if script is being run using sudo command.
check_sudo ()
{
if [[ -z $SUDO_USER ]] && [[ $SUDO_USER != "roor" ]]; then
echo_fatal "This script must be run using sudo as non root user!" 1>&2
exit 1
fi
}
# Prints an error messages.
#
# $1 - An error message.
echo_error ()
{
printf "$CRED$1$CRESET\n"
}
# Prints a warning messages.
#
# $1 - A warning message.
echo_warn ()
{
printf "$CORANGE$1$CRESET\n"
}
# Prints a success messages.
#
# $1 - A success message.
echo_success ()
{
printf "$CYELLOW$1$CRESET\n"
}
# Prints a step name withthe "INFO" status.
#
# $1 - A message.
echo_log ()
{
if [[ $VERBOSE != 0 ]]; then
printf "$CBLUE$1\n"
fi
}
# Prints a fatal error message and interrupts the script execution.
#
# $1 - A success message.
# $2 - Exit immediately ($true or $false, by default $true)
echo_fatal ()
{
local EXIT_IMMEDIATLY=$2
echo
echo_error "--------------------------------------------------------------------------------"
echo_error " Script failed! "
echo_error " $1"
echo_error "--------------------------------------------------------------------------------"
echo
if [[ -z "$EXIT_IMMEDIATLY" ]] || [[ "$EXIT_IMMEDIATLY" == "$true" ]]; then
exit -1;
fi
}
# Prints current step message. After this function you must use one
# of these functions: echo_step_result_ok, echo_step_result_fail or
# echo_step_result_auto to print the result.
#
# $1 - Step message.
echo_step ()
{
_STEP=$true
if [[ $VERBOSE == 0 ]]; then
printf "$CRESET[ ] $CYELLOW$1$CRESET\r"
else
printf "$CYELLOW$1$CRESET\n"
fi
}
# Should be used to execute step commands after a echo_step.
# This function executes a step (using the exec_cmd function) and immediately
# after it shows the step status using the echo_step_result_auto.
#
# $@ - A command to execute.
exec_step ()
{
local STATUS=0
"$@"
STATUS=$?
if [[ $true == $_STEP ]]; then
echo_step_result_auto
fi
_STEP=$false
return $?
}
# Prints the "OK" result for echo_step function.
echo_step_result_ok ()
{
if [[ $VERBOSE == 0 ]]; then
printf "$CYELLOW[ OK ]\n"
fi
}
# Prints the "FAIL" result for echo_step function.
echo_step_result_fail ()
{
if [[ $VERBOSE == 0 ]]; then
printf "$CRED[FAIL]\n"
fi
}
# Check if last command was executed successfuly and if so it prints the "OK"
# status, in case of an error it prints the "FAIL" status and interrupts the
# script execution.
echo_step_result_auto ()
{
if [[ $? == 0 ]]; then
echo_step_result_ok
else
echo_step_result_fail
echo_fatal "Execute this script again with -v flag to enable the verbose mode."
fi
}
# Prints a step name with the "SKIP" status.
#
# $1 - A message.
echo_step_skip ()
{
printf "$CLBLUE[SKIP] $CYELLOW$1$CRESET\n"
}
# Execute the command given after that function. If a $VERBOSE variables is set
# to 0 a result of this command will not be shown. When $VERBOSE is set to 1 all
# output will be printed.
#
# $@ - Command to execute.
verbose ()
{
local STATUS=0
if [[ $VERBOSE == 1 ]]; then
echo -e "$CBLUE\xE2\x94\x8F $CRESET$(caller)$CRESET"
echo -e "$CBLUE\xE2\x94\x97 $CRESET$@$CRESET"
"$@"
STATUS=$?
printf "\n"
else
"$@" &> /dev/null
STATUS=$?
fi
return $STATUS
}
run_as_user ()
{
sudo -u $SUDO_USER "$@"
return $?
}
# Copy permission of one file to another.
#
# $1 - Source.
# $2 - Target.
copy_permissions ()
{
chmod $( stat -f '%p' "$1" ) "${@:2}"
}
# Checks if a command exists.
#
# $1 - A command to check.
command_exists ()
{
type "$1" &> /dev/null;
}
# Checks if current OS is a Linux.
is_linux ()
{
if [[ "$(uname -s)" == "Linux" ]]; then
return $true;
else
return $false;
fi;
}
# Checks if current OS is a Mac OS.
is_mac ()
{
if [[ "$(uname -s)" == "Darwin" ]]; then
return $true;
else
return $false;
fi;
}
|
MDobak/docker-dev-env
|
src/util.sh
|
Shell
|
mit
| 4,901 |
#!/bin/bash
cd "$(dirname $0)"
if [ -z "$1" ]
then
bash ./roundup.sh tests/*-test.sh
else
bash ./roundup.sh "$@"
fi
|
chingjun/jumbo
|
run-test.sh
|
Shell
|
mit
| 124 |
#!/bin/bash
#
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
VERSION=$(cat "${DIR}/../VERSION")
CLUSTER_VERSION=${CLUSTER_VERSION:=$VERSION}
INSTANCE_BUILDER_NAME="verifier-instance-image-builder-${CLUSTER_VERSION//./-}"
INSTANCE_GROUP_NAME="verifier-cluster-${CLUSTER_VERSION//./-}"
TEMPLATE_NAME="verifier-template-${CLUSTER_VERSION//./-}"
HEALTHCHECK_NAME="basic-check"
TARGET_POOL_NAME="verifier-pool-${CLUSTER_VERSION//./-}"
AUTOSCALER_NAME="verifier-autoscaler-${CLUSTER_VERSION//./-}"
FORWARD_RULE_NAME="verifier-rule-${CLUSTER_VERSION//./-}"
REGION="us-central1"
ZONE="us-central1-a"
BASE_IMAGE="container-vm-v20150806"
BASE_IMAGE_PROJECT="google-containers"
VM_IMAGE="verifier-${CLUSTER_VERSION//\./-}"
VM_IMAGE_PROJECT="singpath-hd"
VM_MACHINE_TYPE="f1-micro"
CLUSTER_NODE_MIN_COUNT=1
CLUSTER_NODE_MAX_COUNT=2
STARTUP_SCRIPT="${DIR}/../server/bin/startup_run.sh"
STARTUP_SETUP_SCRIPT="${DIR}/../server/bin/startup_setup.sh"
### Test Startup script
if [[ -z "$STARTUP_SCRIPT" ]] || [[ ! -f "$STARTUP_SCRIPT" ]]; then
>&2 echo "The startup script could not be found."
exit 1
fi
function set_user() {
current_user=$(gcloud config list account | awk '$2 == "=" {print $3}')
if [[ -n "$current_user" ]]; then
echo "You are currently logged it on: $current_user"
read -p "Would you like to login on a different account (y/N)? " yN
case $yN in
[Yy]* ) gcloud auth login; ;;
* ) ;;
esac
else
gcloud auth login
fi
}
function found {
if [[ -z "$1" ]]; then
echo " not found."
return 1
else
echo " found."
return 0
fi
}
function forwardrule_exist() {
echo -n "Checking if forwarding rule named '$1' in region '$2' already exists..."
found $(gcloud compute forwarding-rules list "$1" --regions "$2" | cat -n | awk '$1>1 {print $2}')
return $?
}
function group_exist() {
echo -n "Checking if managed instance group named '$1' in zone '$2' already exists..."
found $(gcloud compute instance-groups managed list --zone "$2" | awk '$1=="'$1'" {print $1}')
return $?
}
function healthcheck_exist() {
echo -n "Checking if healthcheck named '$1' already exists..."
found $(gcloud compute http-health-checks list $1 | cat -n | awk '$1>1 {print $2}')
return $?
}
function image_exist() {
echo -n "Checking if image named '$1' already exists..."
found $(gcloud compute images list --no-standard-images $1 | cat -n | awk '$1>1 {print $2}')
return $?
}
function targetpool_exist() {
echo -n "Checking if target pool named '$1' in region $2 already exists..."
found $(gcloud compute target-pools list "$1" --regions "$2" | cat -n | awk '$1>1 {print $2}')
return $?
}
function template_exist() {
echo -n "Checking if template named '$1' already exists..."
found $(gcloud compute instance-templates list $1 | cat -n | awk '$1>1 {print $2}')
return $?
}
function create_autoscaler() {
gcloud compute instance-groups managed set-autoscaling \
$1 \
--zone "$2"\
--min-num-replicas "$3" \
--max-num-replicas "$4" \
--target-cpu-utilization 0.75 \
--cool-down-period 180
}
function create_forwardrule() {
forwardrule_exist "$1" "$2"
if [[ $? -ne 0 ]]; then
gcloud compute forwarding-rules create "$1" \
--region "$2" \
--port-range 80 \
--target-pool "$3"
else
>&2 echo "The forwarding rule should be removed (is it safe?)"
>&2 echo "or you could deploy to an other version"
exit 1
fi
}
function create_group() {
group_exist $1 $2
if [[ $? -ne 0 ]]; then
gcloud compute instance-groups managed create \
"$1" \
--zone "$2" \
--base-instance-name "$3" \
--size "$4"\
--template "$5" \
--target-pool "$6"
else
>&2 echo "The instance group exists already."
exit 1
fi
}
function create_healthcheck() {
healthcheck_exist $1
if [[ $? -ne 0 ]]; then
gcloud compute http-health-checks create $1
fi
}
function create_instance_template() {
template_exist $TEMPLATE_NAME
if [[ $? -eq 0 ]]; then
gcloud compute instance-templates delete $1
fi
echo -e "\nCreateing instance template..."
gcloud compute instance-templates create $@
}
function create_targetpool() {
targetpool_exist $1 $3
if [[ $? -ne 0 ]]; then
gcloud compute target-pools create $1 \
--region $3 --health-check $2
else
>&2 echo "The target pool should be removed (is it safe?)"
>&2 echo "or you could deploy to an other version"
exit 1
fi
}
function create_image() {
image_exist $1
if [[ $? -ne 0 ]]; then
start_image_builder_instance $1
check_image_builder_instance_status $1
save_image_builder_instance $1
else
echo "Image $INSTANCE_BUILDER_NAME already exists."
echo "You should delete it or prepare a new version."
exit 1
fi
}
function start_image_builder_instance() {
gcloud compute instances create "$1" \
--image "$BASE_IMAGE" \
--image-project "$BASE_IMAGE_PROJECT" \
--machine-type "$VM_MACHINE_TYPE" \
--zone "$ZONE" \
--metadata-from-file startup-script="$STARTUP_SETUP_SCRIPT" \
--metadata cluster-version="$CLUSTER_VERSION" \
--tags http-server
}
function check_image_builder_instance_status() {
details=$(gcloud compute instances list --regexp "$1" --zone "$ZONE")
ip=$(echo "$details" | cat -n | awk '$1>1 {print $6}')
if [[ -z "$ip" ]]; then
echo "instance ip not found"
exit 1
fi
echo "Image builder instance IP:" $ip
code=$(curl -s -o /dev/null -w "%{http_code}" http://${ip}/status.txt)
while [[ $code -ne 200 ]]; do
echo "Failed to fetch image builder instance status..."
echo "will try again in 30s"
sleep 30
code=$(curl -s -o /dev/null -w "%{http_code}" http://${ip}/status.txt)
done
status=$(curl http://${ip}/status.txt)
if [[ "$status" == "done" ]]; then
echo "Image builder instance is ready."
elif [[ "$status" == "failed" ]]; then
echo "Image builder instance startup failed."
exit 2
else
echo "Unknown image builder instance status:" $status
exit 3
fi
}
function save_image_builder_instance() {
echo "Stopping image builder instance..."
gcloud compute instances delete "$1" --zone "$ZONE" --keep-disks boot
echo "Creating a new image named $VM_IMAGE from the the boot disk..."
gcloud compute images create "$VM_IMAGE" --source-disk "$1" --source-disk-zone "$ZONE"
echo "Deleting the boot disk..."
gcloud compute disks delete "$1" --zone "$ZONE"
}
function setup_cluster() {
### Summary
echo -e "\n\nCluster version: $CLUSTER_VERSION"
echo "Template name: $TEMPLATE_NAME"
echo "Base Image container: $BASE_IMAGE"
echo "Base Image project: $BASE_IMAGE_PROJECT"
echo "Instance type: $VM_MACHINE_TYPE"
echo "Startup script: $STARTUP_SCRIPT"
### Authentication
echo -e "\n\nChecking your credentials...\n"
set_user
### Verifier image, step 1
echo -e "\n\nStarting creating the verifier image..."
image_exist "$INSTANCE_BUILDER_NAME"
if [[ $? -eq 0 ]]; then
echo "Image $INSTANCE_BUILDER_NAME already exists."
echo "You should delete it or prepare a new version."
exit 1
fi
start_image_builder_instance "$INSTANCE_BUILDER_NAME"
### healthcheck
echo -e "\n\nCreating health-check..."
create_healthcheck "$HEALTHCHECK_NAME"
### Targetpool
echo -e "\n\nCreating target-pool..."
create_targetpool "$TARGET_POOL_NAME" "$HEALTHCHECK_NAME" "$REGION"
### Forwarding rule
echo -e "\n\nCreating forwarding rule..."
create_forwardrule "$FORWARD_RULE_NAME" "$REGION" "$TARGET_POOL_NAME"
### Verifier image, step 2 and 3
echo -e "\n\nFinishing creating the verifier image..."
check_image_builder_instance_status "$INSTANCE_BUILDER_NAME"
save_image_builder_instance "$INSTANCE_BUILDER_NAME"
### Instance template
echo -e "\n\nCreating instance template..."
create_instance_template "$TEMPLATE_NAME" \
--machine-type "$VM_MACHINE_TYPE" \
--image "$VM_IMAGE" \
--image-project "$VM_IMAGE_PROJECT" \
--metadata-from-file startup-script="$STARTUP_SCRIPT" \
--metadata cluster-version="$CLUSTER_VERSION" \
--tags http-server
}
function start_cluster() {
### Instance group
echo -e "\nCreating instance group..."
create_group "$INSTANCE_GROUP_NAME" "$ZONE" "$INSTANCE_GROUP_NAME" "$CLUSTER_NODE_MIN_COUNT" "$TEMPLATE_NAME" "$TARGET_POOL_NAME"
### Autoscaler
echo -e "\n\nCreating autoscaler..."
create_autoscaler "$INSTANCE_GROUP_NAME" "$ZONE" "$CLUSTER_NODE_MIN_COUNT" "$CLUSTER_NODE_MAX_COUNT"
cluster_ip=$(gcloud compute forwarding-rules list codeverifier-rule --regions us-central1 | cat -n | awk '$1>1{print $4}')
echo "Test the cluster at http://${cluster_ip}/console/"
echo "(it may take a minute for the cluster to be available)"
}
function stop_cluster() {
gcloud compute instance-groups managed delete --zone "$ZONE" "$INSTANCE_GROUP_NAME"
}
function delete_cluster() {
gcloud compute forwarding-rules delete "$FORWARD_RULE_NAME" --region "$REGION"
gcloud compute target-pools delete "$TARGET_POOL_NAME" --region "$REGION"
gcloud compute instance-templates delete "$TEMPLATE_NAME"
gcloud compute images delete "$VM_IMAGE"
}
function show_help() {
echo -e "usage: setup|start|stop|delete \n"
echo "setup Create the an image, an instance template and a load balancer."
echo "start Manually start the cluster (an instance group and an autoscaler)."
echo " The cluster should already be setup."
echo "stop Stop the instance group."
echo "delete Delete the cluster (instance group, load balancer and image.)"
echo -e " The cluster shouldn't be running.\n"
}
case "$1" in
setup )
setup_cluster
;;
start )
start_cluster
;;
stop )
stop_cluster
;;
delete )
delete_cluster
;;
* )
show_help
;;
esac
|
dinoboff/docker-code-verifier
|
scripts/deploy.sh
|
Shell
|
mit
| 10,461 |
#!/bin/bash
echo "deb http://download.openvz.org/debian wheezy main" >> /etc/apt/sources.list.d/openvz.list
wget http://ftp.openvz.org/debian/archive.key
apt-key add archive.key
apt-get -qq update
apt-get install -qqy linux-image-openvz-amd64
echo '# On Hardware Node we generally need
# packet forwarding enabled and proxy arp disabled
net.ipv4.ip_forward = 1
# Enables source route verification
net.ipv4.conf.all.rp_filter = 1
# Enables the magic-sysrq key
kernel.sysrq = 1
# We do not want all our interfaces to send redirects
net.ipv4.conf.default.send_redirects = 1
net.ipv4.conf.all.send_redirects = 0' >> /etc/sysctl.conf
apt-get install -qqy vzctl vzquota ploop vzstats
sed -i 's/GRUB_DEFAULT=0/GRUB_DEFAULT=2/' /etc/default/grub
update-grub
|
h3rX9m/debian-first-install
|
files/openvz.bash
|
Shell
|
mit
| 757 |
#!/bin/bash
psql -f delete_ddl.sql
|
Bauer312/election-data
|
Model/destroy.sh
|
Shell
|
mit
| 35 |
#!/bin/bash
# Get provisionning directory
dir=/vagrant/vagrant
scripts=$dir/scripts
# Load configuration
. "$1"
# Server configuration
if [ "$2" == "server" ]; then
$scripts/server/user.sh
$scripts/server/repos.sh
$scripts/server/update.sh
$scripts/server/start-stop-daemon.sh
$scripts/server/ssh.sh "${CONFIG['server:ssh_key']}" "${CONFIG['server:known_hosts']}"
$scripts/server/ntp.sh "${CONFIG['server:timezone']}"
$scripts/server/fixes.sh
fi
# Installation
if [ "$2" == "install" ]; then
# Get script arguments
domain="$3"
domains="$4"
# Web server configuration
if [ "${CONFIG['web:ssl']}" == true ]; then
$scripts/web/ssl.sh "$domain"
fi
if [ "${CONFIG['web:httpd:enabled']}" == true ]; then
$scripts/web/httpd.sh "$domain" "$domains" "${CONFIG['web:webroots']}" "${CONFIG['web:ssl']}" "${CONFIG['web:httpd:conf']}" "${CONFIG['web:httpd:confssl']}"
fi
if [ "${CONFIG['web:nginx:enabled']}" == true ]; then
$scripts/web/nginx.sh "$domain" "$domains" "${CONFIG['web:ssl']}" "${CONFIG['web:nginx:conf']}" "${CONFIG['web:nginx:confssl']}"
fi
# PHP configuration
if [ "${CONFIG['php:enabled']}" == true ]; then
if [ -f "$scripts/php/php-${CONFIG['php:version']}.sh" ]; then
$scripts/php/php-${CONFIG['php:version']}.sh "${CONFIG['server:timezone']}" "${CONFIG['php:modules']}" "$scripts/php"
fi
if [ "${CONFIG['php:composer:enabled']}" == true ]; then
$scripts/php/composer.sh "${CONFIG['php:composer:github_token']}"
fi
if [ "${CONFIG['php:phpunit']}" == true ]; then
$scripts/php/phpunit.sh
fi
if [ "${CONFIG['php:blackfire:enabled']}" == true ]; then
$scripts/php/blackfire.sh "${CONFIG['php:blackfire:server_id']}" "${CONFIG['php:blackfire:server_token']}"
fi
if [ "${CONFIG['php:hhvm:enabled']}" == true ]; then
$scripts/php/hhvm.sh "${CONFIG['php:hhvm:composer']}"
fi
fi
# Database configuration
if [ "${CONFIG['database:sql:enabled']}" == true ] && [ -f "$scripts/database/sql/${CONFIG['database:sql:mode']}.sh" ]; then
$scripts/database/sql/${CONFIG['database:sql:mode']}.sh "${CONFIG['server:timezone']}" "${CONFIG['database:sql:fixtures']}" "$scripts/database/sql"
fi
if [ "${CONFIG['database:redis:enabled']}" == true ]; then
$scripts/database/redis.sh
fi
if [ "${CONFIG['database:mongodb:enabled']}" == true ]; then
$scripts/database/mongodb.sh "${CONFIG['database:mongodb:fixtures']}" "${CONFIG['php:enabled']}"
fi
# Tools configuration
if [ "${CONFIG['phpmyadmin:enabled']}" == true ]; then
$scripts/tools/phpmyadmin.sh "${CONFIG['phpmyadmin:version']}"
fi
if [ "${CONFIG['nodejs:enabled']}" == true ]; then
$scripts/tools/nodejs.sh "${CONFIG['nodejs:libraries']}"
fi
if [ "${CONFIG['ruby:enabled']}" == true ]; then
$scripts/tools/ruby.sh "${CONFIG['ruby:gems']}"
fi
if [ "${CONFIG['python:enabled']}" == true ]; then
$scripts/tools/python.sh "${CONFIG['python:version']}" "${CONFIG['python:pip']}"
fi
if [ "${CONFIG['zeromq:enabled']}" == true ]; then
$scripts/tools/zeromq.sh "${CONFIG['php:enabled']}"
fi
# Symfony configuration
if [ "${CONFIG['symfony:installer']}" == true ]; then
$scripts/symfony/installer.sh
fi
if [ "${CONFIG['symfony:completion']}" == true ]; then
$scripts/symfony/completion.sh
fi
if [ "${CONFIG['symfony:twig']}" == true ]; then
$scripts/symfony/twig.sh
fi
# Search engine configuration
if [ "${CONFIG['search:enabled']}" == true ] && [ -f "$scripts/search/${CONFIG['search:mode']}.sh" ]; then
$scripts/search/${CONFIG['search:mode']}.sh
fi
# Finalize
$scripts/finalize.sh
fi
|
fantoine/vagrant-provision-centos6.6
|
setup.sh
|
Shell
|
mit
| 3,891 |
#!/bin/bash
set -o pipefail
get_changes ()
{
git remote add current https://github.com/tastejs/todomvc.git && \
git fetch --quiet current && \
git diff HEAD origin/master --name-only | awk 'BEGIN {FS = "/"}; {print $1 "/" $2 "/" $3}' | grep -v \/\/ | grep examples | awk -F '[/]' '{print "--framework=" $2}'|uniq
}
npm i -g gulp
if [ "$TRAVIS_BRANCH" = "master" ] && [ "$TRAVIS_PULL_REQUEST" = "false" ]
then
gulp
git submodule add -b gh-pages https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} site > /dev/null 2>&1
cd site
if git checkout gh-pages; then git checkout -b gh-pages; fi
git rm -r .
cp -R ../dist/* .
cp ../dist/.* .
git add -f .
git config user.email '[email protected]'
git config user.name 'TasteBot'
git commit -am 'update the build files for gh-pages [ci skip]'
# Any command that using GH_OAUTH_TOKEN must pipe the output to /dev/null to not expose your oauth token
git push https://${GH_OAUTH_TOKEN}@github.com/${GH_OWNER}/${GH_PROJECT_NAME} HEAD:gh-pages > /dev/null 2>&1
else
changes=$(get_changes)
if [ "${#changes}" = 0 ]
then
exit 0
else
cd tooling && \
echo $changes | xargs ./run.sh && \
cd ../tests && \
(gulp test-server &) && \
sleep 2 && \ # give the server time to boot in the background
echo $changes | xargs ./run.sh
fi
exit $?
fi
|
elacin/todomvc
|
test-runner.sh
|
Shell
|
mit
| 1,324 |
#!/bin/bash
if [ $# -le 0 ]; then
echo "Error: No command was specified for the test"
exit
fi
COMMAND="$1"
HOST="http://127.0.0.1:8000"
CONCURRENCIES=(10 100 500)
REQUESTLOADS=(100 1000 10000)
ENDPOINTS=("/status" "/echo/hello")
echo "//////////////////////////////////////////"
echo "/------------ Beginning Test ------------/"
echo "//////////////////////////////////////////"
echo "Testing command: $COMMAND"
echo "Testing host: $HOST"
echo
for e in "${ENDPOINTS[@]}"
do
echo "-------- Testing $e Endpoint ---------"
echo
for c in "${CONCURRENCIES[@]}"
do
echo " ---Concurrency Level $c---"
for n in "${REQUESTLOADS[@]}"
do
# If the concurrency is greater than the total then skip
if [ $c -gt $n ]; then
continue
fi
echo " - $n Requests - "
/usr/bin/time -l $COMMAND &> /tmp/results &
TIMEPID=$!
sleep 1
COMMANDPID=$(pgrep -P $TIMEPID)
wrk2 -t4 -c$c -d30s -R$n $HOST$e
kill $COMMANDPID
sleep 1
MEM=$(cat /tmp/results | grep maximum | awk '{print $1}')
echo "Max Memory Used: $(expr $MEM / 1048576) MB"
sleep 1
done
done
done
echo
echo "************ End of Test **************"
rm /tmp/results
|
bwinterton/spark_vs_go
|
test_2.sh
|
Shell
|
mit
| 1,237 |
webkit2png --fullsize --zoom=0.6 index.html -o index && imgcat index-full.png
|
jvranish/adventures_in_js
|
js_map_gen/render.sh
|
Shell
|
mit
| 78 |
#!/usr/bin/env zsh
# Continue on error
set +e
#
# Set path variables for all the preferences/settings
#
user_library_path="$HOME/Library"
dotf_library_path="$DOTFILES_DIR/Library"
rsync_backup_path="$DOTFILES_DIR/Library/_backup"
user_preferences_path="$user_library_path/Preferences"
dotf_preferences_path="$dotf_library_path/Preferences"
user_colors_path="$user_library_path/Colors"
dotf_colors_path="$dotf_library_path/Colors"
user_services_path="$user_library_path/Services"
dotf_services_path="$dotf_library_path/Services"
user_spelling_path="$user_library_path/Spelling"
dotf_spelling_path="$dotf_library_path/Spelling"
user_xcode_userdata_path="$user_library_path/Developer/Xcode/UserData"
dotf_xcode_userdata_path="$dotf_library_path/Developer/Xcode/UserData"
user_sublimetext_settings_path="$user_library_path/Application Support/Sublime Text 3"
dotf_sublimetext_settings_path="$dotf_library_path/Application Support/Sublime Text 3"
#
# Preferences
#
print_action "Copy back .plist to $dotf_preferences_path"
for file in $dotf_preferences_path/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_preferences_path/$f" "$dotf_preferences_path"
done
#
# Colors
#
print_action "Copy back Colors to $dotf_colors_path"
for file in $user_colors_path/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_colors_path/$f" "$dotf_colors_path"
done
#
# Services
#
print_action "Copy back Services to $dotf_services_path"
for file in $user_services_path/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_services_path/$f" "$dotf_services_path"
done
#
# Spelling
#
print_action "Copy back Spelling to $dotf_spelling_path"
for file in $dotf_spelling_path/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_spelling_path/$f" "$dotf_spelling_path"
done
#
# Xcode
#
print_action "Copy back Xcode settings to $dotf_xcode_userdata_path"
for file in $dotf_xcode_userdata_path/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_xcode_userdata_path/$f" "$dotf_xcode_userdata_path"
done
#
# Sublime Text
#
print_action "Copy back Sublime Text settings to $dotf_sublimetext_settings_path"
for file in $dotf_sublimetext_settings_path/Packages/User/* ; do
f="$(basename -- $file)"
try_can_fail cp -rf "$user_sublimetext_settings_path/Packages/User/$f" "$dotf_sublimetext_settings_path"
done
|
ladislas/dotfiles
|
scripts/rsync_config.sh
|
Shell
|
mit
| 2,344 |
#!/bin/sh
MYNAME=`basename $0`
MYFULLPATH=$PWD/$MYNAME
if [ ! -f $MYFULLPATH ]; then
echo "The script $MYNAME is apparently not being run inside its own working directory. Please cd into its directory before running it."
exit 1
fi
echo "Ok, it was run inside its own directory."
|
mvendra/sandboxes
|
bash/denyrun_outsideworkingdir.sh
|
Shell
|
mit
| 285 |
export PGHOST=172.17.0.2
export PGSSLMODE=disable
export PGDATABASE=test
export PGUSER=postgres
|
nad2000/gorilla-mux-pg-rest-api
|
testenv.sh
|
Shell
|
mit
| 97 |
#!/bin/sh
cd `dirname $0`
screen -S poifullchan -d -m java -jar ./target/scala-2.10/poifullchan.jar
|
yantene/poifullchan
|
run.sh
|
Shell
|
mit
| 101 |
#!/bin/bash
scan-build -o analysis --status-bugs bash -c 'test/unit/configure.py && ninja -f .build/unit/build.ninja'
|
ludocode/mpack
|
tools/scan-build.sh
|
Shell
|
mit
| 118 |
#!/bin/bash
# Make sure we have root access
if ! [[ `whoami` = "root" ]]; then
sudo echo -n ""
fi
# Starting lighty
echo -n "Starting lighty"
sudo ./sbin/lighttpd -f ./lighttpd.conf
# Show PID
echo " [$(cat ./lighttpd.pid)]"
|
C-Bouthoorn/didactic-octo-tribble
|
lighttpd/lighty-start.sh
|
Shell
|
mit
| 230 |
#!/usr/bin/env bash
echo '----------------------'
echo '------ SETUP.SH ------'
echo '----------------------'
#######################################
## DIST SETUP
#######################################
# Update packages
sudo apt-get update
# Install Packages
sudo apt-get install -y apache2 curl libfontconfig
# Add Sources
curl -sL https://deb.nodesource.com/setup_5.x | sudo -E bash -
# Install Node & NPM Packages
sudo apt-get install -y nodejs
sudo npm set progress=false
sudo npm install --global gulp-cli
#######################################
## SETUP
#######################################
# Apache
sudo su -
cp /vagrant/vagrant/apache-site-config.conf /etc/apache2/sites-available/000-default.conf
sudo a2enmod rewrite
service apache2 restart
exit
echo '----------------------------'
echo '------ SETUP.SH (end) ------'
echo '----------------------------'
|
aaronheath/afl-2016
|
vagrant/setup.sh
|
Shell
|
mit
| 878 |
#!/usr/bin/bash
#
# 大部分来自 https://vivaldi.club & 针对性修改 & 添加
# 逢千貼不縮寫為「n 千」,而改為逢萬縮寫「n 萬」
echo '逢千貼不縮寫為「n 千」,而改為逢萬縮寫「n 萬」'
sed -i -r 's#([=\/] 1000)\) #\10)#' \
vendor/flarum/core/js/forum/dist/app.js \
vendor/flarum/core/js/admin/dist/app.js \
vendor/flarum/core/js/lib/utils/abbreviateNumber.js
sed -i -r 's#(kilo_text: )千#\1万#' \
vendor/csineneo/flarum-ext-simplified-chinese/locale/core.yml
sed -i -r 's#(kilo_text: )千#\1萬#' \
vendor/csineneo/flarum-ext-traditional-chinese/locale/core.yml
# 取消標題及用戶名最小長度限制
echo '取消標題及用戶名最小長度限制'
sed -i 's#min:3#min:1#' \
vendor/flarum/core/src/Core/Validator/UserValidator.php \
vendor/flarum/core/src/Core/Validator/DiscussionValidator.php
# 啟用 Pusher 後不隱藏刷新按鈕
echo '啟用 Pusher 後不隱藏刷新按鈕'
sed -i "/actionItems/,+2d" \
vendor/flarum/flarum-ext-pusher/js/forum/dist/extension.js
# emoji 資源替換為本地鏡像(并修改 Emojiarea 改用本地)
echo 'emoji 資源替換為本地鏡像'
sed -i 's#//cdn.jsdelivr.net/emojione/assets/png#/assets/emoji/png#' \
vendor/flarum/flarum-ext-emoji/js/forum/dist/extension.js
sed -i 's#//cdn.jsdelivr.net/emojione/assets#/assets/emoji#; s#abcdef#ABCDEF#; s#ABCDEF#abcdef#' \
vendor/s9e/text-formatter/src/Plugins/Emoji/Configurator.php
sed -i 's#//cdn.jsdelivr.net/emojione/assets/png#/assets/emoji/png#' \
vendor/flarum/flarum-ext-emoji/js/forum/src/addComposerAutocomplete.js
sed -i \
-e "s/window.emojioneVersion\s||\s'2.1.4'/'2.1.4'/" \
-e 's#defaultBase:\s"https://cdnjs.cloudflare.com/ajax/libs/emojione/"#defaultBase: "/assets/emojiarea/"#' \
vendor/clarkwinkelmann/flarum-ext-emojionearea/js/forum/dist/extension.js
# 老外們總喜歡把文章標題給弄到 url 裡面,美其名曰 seo,可是對於 CJK 一點都不友好。移除 discussion slug 讓 URL 簡短清爽
echo '移除 discussion slug 讓 URL 簡短清爽'
sed -i "s# + '-' + discussion.slug()##" \
vendor/flarum/core/js/forum/dist/app.js \
vendor/flarum/core/js/forum/src/initializers/routes.js
sed -i "s# . '-'.*##" \
vendor/flarum/core/views/index.blade.php
# 優化日期顯示
echo '優化日期顯示'
sed -i "s#-30#-1#; s#D MMM#L#; s#MMM \\\'YY#LL#" \
vendor/flarum/core/js/forum/dist/app.js \
vendor/flarum/core/js/lib/utils/humanTime.js
# 允許搜尋長度小於三個字符的 ID
echo '允許搜尋長度小於三個字符的 ID'
sed -i 's#query.length >= 3#query.length >= 1#' \
vendor/flarum/core/js/forum/src/components/Search.js \
vendor/flarum/core/js/forum/dist/app.js
sed -i 's#&& this.value().length >= 3 ##' \
vendor/flagrow/byobu/js/forum/dist/extension.js \
# vendor/flagrow/byobu/js/forum/src/components/RecipientSearch.js
# 帖子編輯器按鈕中文化
echo '帖子編輯器按鈕中文化'
sed -i -e 's#Strong#\\u7c97\\u4f53#;'\
-e 's#strong text#\\u7c97\\u4f53\\u5b57#;'\
-e 's#"Emphasis#"\\u659c\\u4f53#;'\
-e 's#emphasized text#\\u659c\\u4f53\\u5b57#;'\
-e 's#"Hyperlink#"\\u8d85\\u94fe\\u63a5#;'\
-e 's#enter link description here#\\u5728\\u6b64\\u5904\\u8f93\\u5165\\u94fe\\u63a5\\u63cf\\u8ff0#;'\
-e 's#Insert Hyperlink#\\u63d2\\u5165\\u94fe\\u63a5#;'\
-e 's#Blockquote#\\u5f15\\u7528#g;'\
-e 's#Code Sample#\\u4ee3\\u7801#;'\
-e 's#enter code here#\\u5728\\u6b64\\u5904\\u8f93\\u5165\\u4ee3\\u7801#;'\
-e 's#"Image#"\\u56fe\\u7247#;'\
-e 's#enter image description here#\\u5728\\u6b64\\u5904\\u8f93\\u5165\\u56fe\\u7247\\u63cf\\u8ff0#;'\
-e 's#Insert Image#\\u63d2\\u5165\\u56fe\\u7247#;'\
-e 's#optional title#\\u53ef\\u9009\\u6807\\u9898#g;'\
-e 's#Numbered List#\\u6709\\u5e8f\\u5217\\u8868#;'\
-e 's#Bulleted List#\\u65e0\\u5e8f\\u5217\\u8868#;'\
-e 's#List item#\\u5217\\u8868\\u9879#;'\
-e 's#"Heading#"\\u6807\\u9898#g;'\
-e 's#Horizontal Rule#\\u6c34\\u5e73\\u5206\\u5206\\u5272#;'\
-e 's#"Undo#"\\u8fd8\\u539f#;'\
-e 's#"Redo#"\\u91cd\\u505a#g;'\
-e 's#Markdown Editing Help#Markdown \\u7f16\\u8f91\\u5e2e\\u52a9#;'\
-e 's#OK#\\u597d#;'\
-e 's#Cancel#\\u53d6\\u6d88#' \
vendor/xengine/flarum-ext-markdown-editor/js/forum/dist/extension.js
# 不再使用 google font
echo '不再使用 google font'
sed -i "/fonts.googleapis.com/d" \
vendor/flarum/core/views/install/app.php \
vendor/flarum/core/src/Http/WebApp/WebAppView.php
#vendor/flarum/core/src/Http/Controller/ClientView.php \
# emoji 選擇框中文化
echo 'emoji 選擇框中文化'
sed -i "s#title: \"Diversity\"#title: \"种类\"#; \
s#title: \"Recent\"#title: \"最近\"#; \
s#title: \"Smileys & People\"#title: \"笑脸与人\"#; \
s#title: \"Animals & Nature\"#title: \"动物与自然\"#; \
s#title: \"Food & Drink\"#title: \"食物与饮品\"#; \
s#title: \"Activity\"#title: \"活动\"#; \
s#title: \"Travel & Places\"#title: \"旅游与景点\"#; \
s#title: \"Objects\"#title: \"物体\"#; \
s#title: \"Symbols\"#title: \"符号\"#; \
s#title: \"Flags\"#title: \"国旗\"#" \
vendor/clarkwinkelmann/flarum-ext-emojionearea/js/forum/dist/extension.js
# 修復無法使用 CJK 字符註冊的問題
echo '修復無法使用 CJK 字符註冊的問題'
sed -i "s#a-z0-9_-#-_a-z0-9\\\x7f-\\\xff#" \
vendor/flarum/core/src/Core/Validator/UserValidator.php
# 修正 terabin/flarum-ext-sitemap 不支援 CJK 字符的問題
echo '修正 terabin/flarum-ext-sitemap 不支援 CJK 字符的問題'
sed -i '/>validateLocation/d' \
vendor/samdark/sitemap/Sitemap.php
# 修正無法 @ 中文名的問題
echo '修正無法 @ 中文名的問題'
sed -i "s#a-z0-9_-#-_a-zA-Z0-9\\\x7f-\\\xff#" \
vendor/flarum/flarum-ext-mentions/src/Listener/FormatPostMentions.php \
vendor/flarum/flarum-ext-mentions/src/Listener/FormatUserMentions.php
sed -i "s#getIdForUsername(#getIdForUsername(rawurlencode(#; /getIdForUsername/s/'))/')))/" \
vendor/flarum/flarum-ext-mentions/src/Listener/FormatUserMentions.php
# 增加中文搜索支援
#echo '增加中文搜索支援'
#sed -i "/AGAINST/d; /discussion_id/i\\\t\\t->where('content', 'like', '%'.\$string.'%')" \
# vendor/flarum/core/src/Core/Search/Discussion/Fulltext/MySqlFulltextDriver.php
# 修正中文名無法獲取 id 的問題
echo '修正中文名無法獲取 id 的問題'
sed -i "79i\\\t\\t\$username = rawurldecode(\$username);" \
vendor/flarum/core/src/Core/Repository/UserRepository.php
# 替换一些说明
echo '替换一下说明'
sed -i "s/Search recipient by typing first three characters/Search recipient by typing username or groupname/" \
vendor/flagrow/byobu/locale/en.yml
sed -i "s/输入\sID\s前三个字符进行搜索/输入用户名或组名进行搜索/" \
vendor/csineneo/flarum-ext-simplified-chinese/locale/flagrow-byobu.yml
sed -i "s/輸入\sID\s前三個字符進行搜尋/輸入帳戶名或組名進行搜尋/" \
vendor/csineneo/flarum-ext-traditional-chinese/locale/flagrow-byobu.yml
wpwd=$(dirname $0)
# 根据当前浏览语言设定,使用 OpenCC 进行繁简转换 (注意 PHP 插件配置)
echo "根据当前浏览语言设定,使用 OpenCC 进行繁简转换"
eval "/usr/bin/cp -f ${wpwd}/JsonApiResponse.php" vendor/flarum/core/src/Api/JsonApiResponse.php
eval "/usr/bin/cp -f ${wpwd}/MySqlFulltextDriver.php" vendor/flarum/core/src/Core/Search/Discussion/Fulltext/MySqlFulltextDriver.php
# 替换错误页面
echo "替换错误页面"
eval "/usr/bin/cp -f ${wpwd}/404.html vendor/flarum/core/error/"
eval "/usr/bin/cp -f ${wpwd}/403.html vendor/flarum/core/error/"
eval "/usr/bin/cp -f ${wpwd}/500.html vendor/flarum/core/error/"
eval "/usr/bin/cp -f ${wpwd}/503.html vendor/flarum/core/error/"
# 修改自定义 Footer 默认显示,并取消 fixed 属性以及切换显示按钮
echo "修改自定义 Footer 默认显示,并取消 fixed 属性以及切换显示按钮"
eval "/usr/bin/cp -f ${wpwd}/dav-is-customfooter-dist-extension.js.modified vendor/davis/flarum-ext-customfooter/js/forum/dist/extension.js"
eval "/usr/bin/cp -f ${wpwd}/dav-is-customfooter-src-main.js.modified vendor/davis/flarum-ext-customfooter/js/forum/src/main.js"
|
ProjectFishpond/pfp-docker-config
|
sc/hack/hackFlarum.sh
|
Shell
|
mit
| 8,294 |
#!/usr/bin/env bash
now=$(date +"%Y%m%d_%H%M%S")
#JOB_NAME="autoconverter_$now"
JOB_NAME="change_gan_$now"
TRAINER_PACKAGE_PATH=change-gan
MAIN_TRAINER_MODULE=change-gan.main
BUCKET_NAME=mlcampjeju2017-mlengine
#JOB_DIR="gs://$BUCKET_NAME/autoconverter-7"
JOB_DIR="gs://$BUCKET_NAME/job-dir/change-gan-bbox/change-gan-bbox-16"
PACKAGE_STAGING_LOCATION="gs://$BUCKET_NAME/stage"
TRAIN_DIR="gs://$BUCKET_NAME/data-bbox"
EVAL_DIR="gs://$BUCKET_NAME/data-bbox"
EVAL_OUTPUT_DIR="eval-output"
LOCAL_JOB_DIR="/Users/SHYBookPro/Desktop/local-job-dir"
LOCAL_TRAIN_DIR="$LOCAL_JOB_DIR/data"
LOCAL_EVAL_DIR="$LOCAL_JOB_DIR/data"
REGION="asia-east1"
RUNTIME_VERSION="1.2"
if [ $1 = "cloud" ]; then
gcloud ml-engine jobs submit training $JOB_NAME \
--job-dir $JOB_DIR \
--runtime-version $RUNTIME_VERSION \
--module-name $MAIN_TRAINER_MODULE \
--package-path $TRAINER_PACKAGE_PATH \
--region $REGION \
--config config.yaml \
-- \
--verbosity DEBUG \
--train-dir $TRAIN_DIR \
--eval-dir $EVAL_DIR \
--train-steps 200000 \
--eval-steps 1 \
--train-batch-size 1 \
--eval-batch-size 3 \
--learning-rate 0.0002
elif [ $1 = "local" ]; then
gcloud ml-engine local train \
--module-name $MAIN_TRAINER_MODULE \
--package-path $MAIN_TRAINER_MODULE \
-- \
--job-dir $LOCAL_JOB_DIR \
--verbosity DEBUG \
--train-dir $LOCAL_TRAIN_DIR\
--eval-dir $LOCAL_EVAL_DIR \
--train-steps 10 \
--eval-steps 1 \
--train-batch-size 1 \
--eval-batch-size 3 \
--learning-rate 0.0002
elif [ $1 = "eval" ]; then
gcloud ml-engine jobs submit training $JOB_NAME \
--job-dir $JOB_DIR \
--runtime-version $RUNTIME_VERSION \
--module-name $MAIN_TRAINER_MODULE \
--package-path $TRAINER_PACKAGE_PATH \
--region $REGION \
--scale-tier BASIC_GPU \
-- \
--verbosity DEBUG \
--eval true \
--eval-output-dir $EVAL_OUTPUT_DIR \
--eval-output-bucket $BUCKET_NAME \
--train-dir $TRAIN_DIR \
--eval-dir $EVAL_DIR \
--eval-steps 10 \
--eval-batch-size 50
else
echo "Usage: train.sh [cloud|local|eval]"
exit 1
fi
|
shygiants/ChangeGAN
|
change-gan/train.sh
|
Shell
|
mit
| 2,318 |
#!/bin/bash
if [ "$RAILS_ENV" == "production" ]; then
echo passenger start -p "$PORT" --min-instances 3 --max-pool-size 3 --no-friendly-error-pages --nginx-config-template ./config/nginx.conf.erb
exec bundle exec passenger start -p "$PORT" --min-instances 3 --max-pool-size 3 --no-friendly-error-pages --nginx-config-template ./config/nginx.conf.erb
else
exec bin/rails server
fi
|
mikechau/pharaoh
|
server.sh
|
Shell
|
mit
| 387 |
#!/bin/bash
# Kyle's script to toggle touchpad enable on XPS 15: 9560
# For some reason, there is no button to provide this functionality.
# And the touchpad is mighty close to the space bar.
# Try and get device ID first
DEVICENUMBER=$(xinput | awk -F"\t" ' /Touchpad/ { idNum = substr( $2, 4); print idNum }')
TOGGLESTATE=$(xinput --list-props ${DEVICENUMBER} | awk '/Device Enabled/ {print $4}')
#echo ${TOGGLESTATE
if [[ ${TOGGLESTATE} -eq 1 ]]; then
# Current have working touchpad
xinput disable $DEVICENUMBER 2>&1 > /dev/null
echo "Touchpad Disabled"
exit 0
fi
if [[ ${TOGGLESTATE} -eq 0 ]]; then
# Current have turned off touchpad
xinput enable $DEVICENUMBER 2>&1 > /dev/null
echo "Touchpad Enabled"
exit 0
fi
>&2 echo "Touchpad state not recognized: ${TOGGLESTATE}" >stderr
exit 1
|
ksaxberg/MyScripts
|
touchpadToggle.sh
|
Shell
|
mit
| 821 |
#!/bin/bash
# This is a CodeRunner compile script. Compile scripts are used to compile
# code before being run using the run command specified in CodeRunner
# preferences. This script is invoked with the following properties:
#
# Current directory: The directory of the source file being run
#
# Arguments $1-$n: User-defined compile flags
#
# Environment: $CR_FILENAME Filename of the source file being run
# $CR_ENCODING Encoding code of the source file
# $CR_TMPDIR Path of CodeRunner's temporary directory
#
# This script should have the following return values:
#
# Exit status: 0 on success (CodeRunner will continue and execute run command)
#
# Output (stdout): On success, one line of text which can be accessed
# using the $compiler variable in the run command
#
# Output (stderr): Anything outputted here will be displayed in
# the CodeRunner console
enc[4]="UTF8" # UTF-8
enc[10]="UTF16" # UTF-16
enc[5]="ISO8859-1" # ISO Latin 1
enc[9]="ISO8859-2" # ISO Latin 2
enc[30]="MacRoman" # Mac OS Roman
enc[12]="CP1252" # Windows Latin 1
enc[3]="EUCJIS" # Japanese (EUC)
enc[8]="SJIS" # Japanese (Shift JIS)
enc[1]="ASCII" # ASCII
# Set Java 8
JAVA_HOME=`/usr/libexec/java_home -v 1.8`
directory=`dirname "$0"`
# Check if file resides in a folder with package name, if so, change directory
package=`php "$directory/parsejava.php" --package "$PWD/$CR_FILENAME"`
if [ ${#package} -ne 0 ]; then
# Check if package name is in the form package.subpackage.subpackage
# If this structure matches directory structure, change directory...
packageDirectory=`echo "$package" | sed 's/\./\//g'`
if [[ $PWD == *"$packageDirectory" ]]; then
cd "${PWD:0:${#PWD}-${#packageDirectory}}"
CR_FILENAME="$packageDirectory"/"$CR_FILENAME"
else
if [[ $package == *.* ]]; then
errmessage="structure"
else
errmessage="named"
fi
>& 2 echo "CodeRunner warning: Java file \"$CR_FILENAME\" with package \"$package\" should reside in folder $errmessage \"$packageDirectory\"."
fi
fi
javac "$CR_FILENAME" -encoding ${enc[$CR_ENCODING]} "${@:1}"
status=$?
if [ $status -ne 0 ]; then
javac -version &>/dev/null
if [ $? -ne 0 ]; then
echo -e "\nTo run Java code, you need to install a JDK. You can download a JDK at http://oracle.com/technetwork/java/javase/downloads/\n\nIf you see a prompt asking you to install Java Runtime Environment, please ignore it and use the JDK download link above."
fi
exit $status
fi
# Use parsejava.php to get package and class name of main function.
out=`php "$directory/parsejava.php" "$PWD/$CR_FILENAME"`
status=$?
if [ $status -ne 0 ]; then
>& 2 echo "CodeRunner warning: Could not find a main method in the file \"$CR_FILENAME\". Please add a main method or run the file in your project containing your main method."
compname=`echo "$CR_FILENAME" | sed 's/\(.*\)\..*/\1/'`
if [ -z "$out" ]; then
out="$compname"
else
out="$out.$compname"
fi
fi
echo "cd \"$PWD\"; JAVA_HOME=$JAVA_HOME java $out"
exit 0
|
ascarter/CodeRunnerSupport
|
Languages/Java 8.crLanguage/Scripts/compile.sh
|
Shell
|
mit
| 2,999 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2012:0710
#
# Security announcement date: 2012-06-06 14:01:28 UTC
# Script generation date: 2017-01-27 21:18:30 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:10.0.5-1.el6.centos
# - xulrunner.i686:10.0.5-1.el6.centos
# - xulrunner-devel.i686:10.0.5-1.el6.centos
# - firefox.x86_64:10.0.5-1.el6.centos
# - xulrunner.x86_64:10.0.5-1.el6.centos
# - xulrunner-devel.x86_64:10.0.5-1.el6.centos
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6.centos
# - xulrunner.i686:17.0.10-1.el6.centos
# - xulrunner-devel.i686:17.0.10-1.el6.centos
# - firefox.x86_64:45.7.0-1.el6.centos
# - xulrunner.x86_64:17.0.10-1.el6.centos
# - xulrunner-devel.x86_64:17.0.10-1.el6.centos
#
# CVE List:
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install xulrunner.i686-17.0.10 -y
sudo yum install xulrunner-devel.i686-17.0.10 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install xulrunner.x86_64-17.0.10 -y
sudo yum install xulrunner-devel.x86_64-17.0.10 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_6/x86_64/2012/CESA-2012:0710.sh
|
Shell
|
mit
| 1,269 |
#!/usr/bin/env bash
# requires: brew install entr
source ./cleanElm.sh
cd elm
ls `find . -name \*.elm -not -path \*elm-stuff* -print` | entr sh -c 'clear; rm ../src/main/resources/flakeless.js; elm-make `find . -name \*.elm -not -path \*elm-stuff* -print` --output ../src/main/resources/flakeless.js; cp ../src/main/resources/flakeless.js ../target/flakeless/flakeless.js'
|
alltonp/flakeless
|
autobuild.sh
|
Shell
|
mit
| 379 |
find "${SRCROOT}" \( -name "*.h" -or -name "*.m" \) -and \( -path "${SRCROOT}/Pods/*" -prune -o -print0 \) | xargs -0 wc -l | awk '$1 > 2000 && $2 != "total" {for(i=2;i<NF;i++){printf "%s%s", $i, " "} print $NF ":1: warning: File more than 2000 lines (" $1 "), consider refactoring." }'
#Sends a warning after x amount of lines of codes to tell that the code is to complex or getting complex in i think a class or function
|
thankmelater23/MyFitZ
|
MyFitZ/CodeComplexity.sh
|
Shell
|
mit
| 422 |
#!/bin/bash
A=$1
B=$2
# ImageMagick params
BLUR=6x2
# SED matching stuff
NUM='\([0-9]\{1,\}\)'
FORMAT='{"x":\1,"y":\2,"v":[\3,\4,\5]},'
FORMAT='\1,\2,\3,\4,\5'
REGEX="s/^$NUM,$NUM: ($NUM,$NUM,$NUM).*/$FORMAT/g"
convert \
$A -gaussian-blur $BLUR \
$B -gaussian-blur $BLUR \
-compose difference -composite \
-negate -selective-blur 12x4+08% \
-shave 10x40 \
text:- | tail -n +2 | sed -e "$REGEX"
|
janjakubnanista/rain
|
src/js/server/api/bin/compare.sh
|
Shell
|
mit
| 422 |
source ~/dotfiles/cli_helper_functions.sh
if [[ "$OSTYPE" = "linux-gnu"* ]]; then
sudo apt-get install ruby ca-certificates
sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"
test -d ~/.linuxbrew && eval $(~/.linuxbrew/bin/brew shellenv)
test -d /home/linuxbrew/.linuxbrew && eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
# test -r ~/.bash_profile && echo "eval \$($(brew --prefix)/bin/brew shellenv)" >> ~/.bash_profile
echo "eval \$($(brew --prefix)/bin/brew shellenv)" >> ~/.profile
brew doctor
fi
if (hash xcode-select); then
info "Installing xcode cli tools"
xcode-select --install || echo "This is fine"
fi
if (!( hash brew 2>/dev/null )); then
info "Installing homebrew"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew doctor
fi
info "Installing my fav tools"
(cd ~/dotfiles;brew bundle --no-lock)
|
sivakumar-kailasam/dotfiles
|
brew/install.sh
|
Shell
|
mit
| 923 |
#!/bin/bash
path="letvs/le_services"
cd ./$path
nohup node server.js &
echo $1$path
|
flystome/webfe
|
Letv/rofe/_tool/child/services.sh
|
Shell
|
mit
| 84 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2015:1043
#
# Security announcement date: 2015-06-03 10:42:14 UTC
# Script generation date: 2016-05-12 18:12:59 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - virtio-win.noarch:1.7.4-1.el6_6
#
# Last versions recommanded by security team:
# - virtio-win.noarch:1.7.4-1.el6_6
#
# CVE List:
# - CVE-2015-3215
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install virtio-win.noarch-1.7.4 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2015/RHSA-2015:1043.sh
|
Shell
|
mit
| 611 |
#!/usr/bin/zsh
# Give processing files via arguments.
# Set processing files.
# This paramater is overwritable with .purebuild-puredoc.rc configuration file.
typeset -xT FILES files
files=("$@")
# Load config file (.purebuild-puredoc.rc on current directory.)
# If $load_conf is set "no", skip this process.
if [[ -f .purebuild-puredoc.rc && ${load_conf} != no ]]
then
. ./.purebuild-puredoc.rc
fi
# Processing.
if [[ $1 == "-s" ]] # Stdout option.
then
#Output to STDOUT.
shift
puredoc -f ${puredoc_format:-htmlcls} "$@"
else
# Processing each file.
typeset -xi index # Index of processing file (1 origin.)
for (( index=1; index <= ${#files}; index++ ))
do
# Set current file path
export purebuilder_current="${files[index]}"
#print $purebuilder_current
# Infomation output.
print -l "[$index]" "FORMAT:${puredoc_format:-htmlcls}" "TEMPLATE:${puredoc_template:+"$puredoc_template"}" "SOURCE:${purebuilder_current}" "OUTPUT:${outdir:-.}/${prefix_subpath:+$prefix_subpath/}${${purebuilder_current:r}}.html" ""
# $puredoc_format => format option for puredoc -f optional argument (htmlclass as default.)
# $puredoc_template => template file for puredoc -t optional argument. If this is not given, don't use -t option.
# $outdir => Base directory for output. if processing path is "foo.pdoc" and $outdir is "~/something", output path is "~/something/foo.html".
# If $outdir is not given, use current directory instead.
# $prefix_subpath => Optional output sub directory.
puredoc -f ${puredoc_format:-htmlcls} ${=puredoc_template:+"-t$puredoc_template"} "${purebuilder_current}" > "${outdir:-.}/${prefix_subpath:+$prefix_subpath/}${${purebuilder_current}:r}.html"
done
fi
|
reasonset/purebuilder
|
purebuild-puredoc.zsh
|
Shell
|
mit
| 1,746 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:1604
#
# Security announcement date: 2015-08-12 16:48:42 UTC
# Script generation date: 2017-01-01 21:16:34 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - java-1.6.0-ibm.x86_64:1.6.0.16.7-1jpp.1.el6_7
# - java-1.6.0-ibm-devel.x86_64:1.6.0.16.7-1jpp.1.el6_7
#
# Last versions recommanded by security team:
# - java-1.6.0-ibm.x86_64:1.6.0.16.35-1jpp.1.el6_8
# - java-1.6.0-ibm-devel.x86_64:1.6.0.16.35-1jpp.1.el6_8
#
# CVE List:
# - CVE-2015-1931
# - CVE-2015-2590
# - CVE-2015-2601
# - CVE-2015-2621
# - CVE-2015-2625
# - CVE-2015-2632
# - CVE-2015-2637
# - CVE-2015-2638
# - CVE-2015-2664
# - CVE-2015-4000
# - CVE-2015-4731
# - CVE-2015-4732
# - CVE-2015-4733
# - CVE-2015-4748
# - CVE-2015-4749
# - CVE-2015-4760
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install java-1.6.0-ibm.x86_64-1.6.0.16.35 -y
sudo yum install java-1.6.0-ibm-devel.x86_64-1.6.0.16.35 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2015/RHSA-2015:1604.sh
|
Shell
|
mit
| 1,128 |
pass=$(perl -e 'print crypt($ARGV[0], "password")', $3)
expect -c "
spawn ssh $5@$1
expect -re \".*?assword.*?\"
send \"$2\n\"
expect -re \".*?root.*?\"
send \"useradd -M -p $pass $4 -G $6\n\"
send \"echo DONE!!!\n\"
expect -re \"DONE!!!\""
|
clark21/UAC
|
stack/addUser.sh
|
Shell
|
mit
| 242 |
#!/bin/sh
#
# Update messages_xx.po and messages_xx.class files,
# from both java and jsp sources.
# Requires installed programs xgettext, msgfmt, msgmerge, and find.
#
# usage:
# bundle-messages.sh (generates the resource bundle from the .po file)
# bundle-messages.sh -p (updates the .po file from the source tags, then generates the resource bundle)
#
# zzz - public domain
#
CLASS=org.klomp.snark.web.messages
TMPFILE=build/javafiles.txt
export TZ=UTC
RC=0
if ! $(which javac > /dev/null 2>&1); then
export JAVAC=${JAVA_HOME}/../bin/javac
fi
if [ "$1" = "-p" ]
then
POUPDATE=1
fi
# on windows, one must specify the path of commnad find
# since windows has its own version of find.
if which find|grep -q -i windows ; then
export PATH=.:/bin:/usr/local/bin:$PATH
fi
# Fast mode - update ondemond
# set LG2 to the language you need in envrionment varibales to enable this
# add ../java/ so the refs will work in the po file
JPATHS="../java/src"
for i in ../locale/messages_*.po
do
# get language
LG=${i#../locale/messages_}
LG=${LG%.po}
# skip, if specified
if [ $LG2 ]; then
[ $LG != $LG2 ] && continue || echo INFO: Language update is set to [$LG2] only.
fi
if [ "$POUPDATE" = "1" ]
then
# make list of java files newer than the .po file
find $JPATHS -name *.java -newer $i > $TMPFILE
fi
if [ -s build/obj/org/klomp/snark/web/messages_$LG.class -a \
build/obj/org/klomp/snark/web/messages_$LG.class -nt $i -a \
! -s $TMPFILE ]
then
continue
fi
if [ "$POUPDATE" = "1" ]
then
echo "Updating the $i file from the tags..."
# extract strings from java and jsp files, and update messages.po files
# translate calls must be one of the forms:
# _("foo")
# _x("foo")
# To start a new translation, copy the header from an old translation to the new .po file,
# then ant distclean poupdate.
find $JPATHS -name *.java > $TMPFILE
xgettext -f $TMPFILE -F -L java --from-code=UTF-8 --add-comments\
--keyword=_ --keyword=_x \
-o ${i}t
if [ $? -ne 0 ]
then
echo "ERROR - xgettext failed on ${i}, not updating translations"
rm -f ${i}t
RC=1
break
fi
msgmerge -U --backup=none $i ${i}t
if [ $? -ne 0 ]
then
echo "ERROR - msgmerge failed on ${i}, not updating translations"
rm -f ${i}t
RC=1
break
fi
rm -f ${i}t
# so we don't do this again
touch $i
fi
if [ "$LG" != "en" ]
then
# only generate for non-source language
echo "Generating ${CLASS}_$LG ResourceBundle..."
# convert to class files in build/obj
msgfmt --java --statistics -r $CLASS -l $LG -d build/obj $i
if [ $? -ne 0 ]
then
echo "ERROR - msgfmt failed on ${i}, not updating translations"
# msgfmt leaves the class file there so the build would work the next time
find build/obj -name messages_${LG}.class -exec rm -f {} \;
RC=1
break
fi
fi
done
rm -f $TMPFILE
exit $RC
|
NoYouShutup/CryptMeme
|
CryptMeme/apps/i2psnark/java/bundle-messages.sh
|
Shell
|
mit
| 2,991 |
#!/bin/bash
pushd secure/
for gpg_file in *.gpg
do
file=$(echo $gpg_file | rev | cut -c 5- | rev)
gpg -d $gpg_file > $file
done
chmod 0600 *.key
popd
|
beardandcode/infrastructure
|
tools/secure/decrypt.sh
|
Shell
|
mit
| 162 |
#!/usr/bin/env bash
#
# run experiment ant push results to p2rank-results git repo
#
set -x
git-push() {
set -e
git pull
git add --all
git commit -m "experiment: $@"
git push
}
./prank.sh "$@"
if [ $? -eq 0 ]; then
echo EXPERIMENT WENT OK. Pushing results to git...
( cd ../p2rank-results && git-push )
else
echo FAILED
fi
|
rdk/p2rank
|
experiment.sh
|
Shell
|
mit
| 361 |
#!/usr/bin/env bash
read -rd '' BOOTSTRAP_SCRIPT <<EOF
#!/usr/bin/env bash
supervisord -n
EOF
read -rd '' UWSGI_INI <<EOF
[uwsgi]
socket = /pynab-run/socket
chmod-socket = 666
vacuum = true
master = true
chdir = /pynab
wsgi-file = api.py
processes = 4
threads = 2
EOF
read -rd '' SUP_CONFIG <<EOF
[program:scan]
command=/usr/bin/python3 /pynab/scan.py update
autostart=true
autorestart=true
stopsignal=QUIT
user=root
[program:postproc]
command=/usr/bin/python3 /pynab/postprocess.py
autostart=true
autorestart=true
stopsignal=QUIT
user=root
[program:prebot]
command=/usr/bin/python3 /pynab/prebot.py start
autostart=true
autorestart=true
stopsignal=QUIT
user=root
[program:stats]
command=/usr/bin/python3 /pynab/scripts/stats.py
autostart=true
autorestart=true
stopsignal=QUIT
user=root
[program:api]
command=/usr/bin/uwsgi --ini /etc/uwsgi/apps-enabled/pynab.ini
autostart=true
autorestart=true
stopsignal=QUIT
user=root
[program:backfill]
command=/usr/bin/python3 /pynab/scan.py backfill
autostart=false
autorestart=true
stopsignal=QUIT
user=root
[program:pubsub]
command=/usr/bin/python3 /pynab/pubsub.py start
autostart=false
autorestart=true
stopsignal=QUIT
user=root
[group:pynab]
programs=scan,postproc,prebot,api,stats,backfill,pubsub
EOF
|
NeilBetham/container-build-scripts
|
pynab/config.sh
|
Shell
|
mit
| 1,258 |
#!/bin/bash
wd=`pwd`
cd ../..
if [ $# -ge 1 ]
then
in=$1
shift
python -m jskparser.jskparser $* $wd/$in
else
if [ ! -d $wd/out ]; then mkdir -p $wd/out
else rm $wd/out/*
fi
FILES=$wd/input/*
for f in $FILES
do
echo "python -m jskparser.jskparser $f.java > $wd/out/$(basename $f)"
python -m jskparser.jskparser $f > $wd/out/$(basename $f)
done
cd $wd
diff -rBw input out > results.out
if [ -s results.out ]
then echo "Tests failed.";
else echo "Tests passed!";
fi
fi
|
plum-umd/java-sketch
|
jskparser/tests/symtab/run.sh
|
Shell
|
mit
| 544 |
#!/bin/bash
source inc_vars.sh
# Script for convergence analysis on transport model for different times steps
#------------------------------------------
echo " Basic Parameters "
echo "---------------------------------"
echo " "
cat par/trans.par
awk '{ if ( NR == 9 ) { print "8 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "16 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "32 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "64 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "128 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "256 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "512 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
awk '{ if ( NR == 9 ) { print "1024 0";} else {print $0;} }' par/trans.par > par/trans2.par
cp par/trans2.par par/trans.par
./runngrids7.sh
|
pedrospeixoto/iModel
|
sh/runntimesgrids7.sh
|
Shell
|
mit
| 1,393 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.