code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
echo [`date`] Bootstrapping Haproxy...
function clean_up {
# Perform program exit housekeeping
echo [`date`] Stopping the service...
service rsyslog stop
service haproxy stop
exit
}
trap clean_up SIGTERM
service rsyslog start
service haproxy restart
echo [`date`] Bootstrap finished
tail -f /dev/null &
child=$!
wait "$child"
|
kaliop/ezdocker-stack
|
images/haproxy/bootstrap.sh
|
Shell
|
gpl-2.0
| 365 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTestStdLocations "glucifer testEncoderLibfame.xml" "$0" "$@"
|
geodynamics/gale
|
gLucifer/OutputFormats/tests/testEncoderLibfame.0of1.sh
|
Shell
|
gpl-2.0
| 225 |
#!/bin/sh
INSTALLPERMS=$1
BASEDIR=`echo $2 | sed 's/\/\//\//g'`
LIBDIR=`echo $3 | sed 's/\/\//\//g'`
shift
shift
shift
for d in $BASEDIR $LIBDIR; do
if [ ! -d $d ]; then
mkdir $d
if [ ! -d $d ]; then
echo Failed to make directory $d
exit 1
fi
fi
done
for p in $*; do
p2=`basename $p`
echo Installing $p as $LIBDIR/$p2
cp -f $p $LIBDIR/
chmod $INSTALLPERMS $LIBDIR/$p2
done
exit 0
|
ipwndev/DSLinux-Mirror
|
user/samba/source/script/installmodules.sh
|
Shell
|
gpl-2.0
| 392 |
#!/bin/sh
# This uses installed fonts, which vary between systems
# Segoe UI and Consolas are standard in Windows 10, DejaVu is more common on Linux
echo Building $1.pdf
MainFont="Segoe UI"
MonoFont="Consolas"
is_font_installed() {
fontname=$1
fc-list | grep -i "$fontname" >/dev/null
}
if ! is_font_installed "$MainFont"; then
MainFont="DejaVu Sans"
fi
if ! is_font_installed "$MonoFont"; then
MonoFont="DejaVu Sans Mono"
fi
# echo Using $MainFont / $MonoFont
pandoc $1.md -o $1.pdf -s --number-sections --toc \
--pdf-engine=xelatex \
--listings \
-f markdown \
-V mainfont="$MainFont" \
-V monofont="$MonoFont" \
-V geometry:a4paper \
-V geometry:margin=2.4cm \
-V subparagraph \
-H manual-style.tex
|
danmar/cppcheck
|
man/build-pdf.sh
|
Shell
|
gpl-3.0
| 759 |
#!/bin/sh
##
## configure_linux.sh
## Login : <[email protected]>
## Started on Thu Feb 19 13:04:22 2009 Nicolas Burrus
## $Id$
##
## Copyright (C) 2009, 2010 Nicolas Burrus
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
mkdir -p build || exit 1
cd build || exit 1
cmake .. \
-Wno-dev \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_VERBOSE_MAKEFILE=1 \
$*
echo "Program configured in directory build."
echo "Now go into build/ and run make."
|
rgbdemo/rgbdemo
|
linux_configure.sh
|
Shell
|
lgpl-3.0
| 1,137 |
#!/bin/bash
#
# (C) 2016, Markus Wildi, [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
# 2016-11-22, [email protected]
cd $HOME/rts2/scripts/u_point
source ./rts2_script/u_acquire_settings.sh
#
./u_acquire.py --base-path $BASE_PATH $LATITUDE $LONGITUDE $SUN_SEPARATION --plot --ds9-display --animate --level DEBUG
|
jstrobl/rts2
|
scripts/u_point/rts2_script/u_acquire_plot.sh
|
Shell
|
lgpl-3.0
| 1,067 |
#!/usr/bin/env sh
for dir in *
do
if [ "`basename $0`" != "$dir" ]
then
cp -r ../lib/ $dir/oauth2/
echo "Copied OAuth 2.0 library to $dir"
fi
done
|
borismus/oauth2-extensions
|
samples/cp-oauth2.sh
|
Shell
|
apache-2.0
| 162 |
#!/bin/bash
cd ../
#Copy web pages to data and compress
cp -u ./WebControls/public_html/TurtleMode.html ./data/T/TurtleMode.html
gzip -7 -f ./data/T/TurtleMode.html
#Copy JS to data, compress
cp -u ./WebControls/public_html/js/blocklyBall.js ./data/T/js/blocklyBall.js
gzip -7 -f ./data/T/js/blocklyBall.js
#msg folder
cp -u ./WebControls/public_html/js/msg/messages.js ./data/T/js/msg/messages.js
gzip -7 -f ./data/T/js/msg/messages.js
#msg/js
cp -u ./WebControls/public_html/js/msg/js/en.js ./data/T/js/msg/js/en.js
gzip -7 -f ./data/T/js/msg/js/en.js
cp -u ./WebControls/public_html/js/msg/js/en.js ./data/T/js/msg/js/en-gb.js
gzip -7 -f ./data/T/js/msg/js/en-gb.js
#msg/json
cp -u ./WebControls/public_html/js/msg/json/en.json ./data/T/js/msg/json/en.json
gzip -7 -f ./data/T/js/msg/json/en.json
cp -u ./WebControls/public_html/js/msg/json/en-gb.json ./data/T/js/msg/json/en-gb.json
gzip -7 -f ./data/T/js/msg/json/en-gb.json
|
Rodinga/FH_Tbot
|
Tools/loadWebpages.sh
|
Shell
|
apache-2.0
| 945 |
#!/usr/bin/env bash
set -eu
ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
source $ABSOLUTE_PATH/functions.sh
build_helper gridftp
|
PerilousApricot/lstore
|
scripts/build-bindings.sh
|
Shell
|
apache-2.0
| 143 |
#!/bin/bash
FN="htratfocusprobe_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/htratfocusprobe_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/htratfocusprobe_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-htratfocusprobe/bioconductor-htratfocusprobe_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-htratfocusprobe/bioconductor-htratfocusprobe_2.18.0_src_all.tar.gz"
)
MD5="26a0963d8aff314a4a1f2c47e9147a8a"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-htratfocusprobe/post-link.sh
|
Shell
|
mit
| 1,452 |
export QT_QPA_PLATFORMTHEME=appmenu-qt5
|
Winael/snappy-playpen-1
|
sublime-text-3/stage/etc/profile.d/appmenu-qt5.sh
|
Shell
|
mit
| 40 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
## File : gnutls.sh
##
## Description: Tests for gnutls package.
##
## Author: Gowri Shankar <[email protected]>
###########################################################################################
## source the utility functions
#cd $(dirname $0)
#LTPBIN="${PWD%%/testcases/*}/testcases/bin"
source $LTPBIN/tc_utils.source
################################################################################
# test variables
################################################################################
installed="certtool gnutls-serv gnutls-cli-debug"
required="echo grep"
port=""
pid=""
################################################################################
# test functions
################################################################################
function tc_local_setup()
{
# check installation and environment
tc_root_or_break || return
tc_exec_or_break $installed || return
tc_exec_or_break $required || return
tc_find_port && port=$TC_PORT
tc_break_if_bad $? "no free port available" || return
}
function tc_local_cleanup()
{
:
}
#
# test A
# create self-signed certificate using certtool
#
function test_ca()
{
tc_register "creating CA"
local issuer=""
certtool --generate-privkey > $TCTMP/ca-key.pem 2>$stderr
tc_fail_if_bad_rc $? "unable to create CA private key" || return
# common name of certificate owner
echo 'cn = gnutls CA test' > $TCTMP/ca.tmpl
# this is a CA certificate
echo 'ca' >> $TCTMP/ca.tmpl
# this key will be used to sign other certificates
echo 'cert_signing_key' >> $TCTMP/ca.tmpl
# create self-signed CA certificate
certtool --generate-self-signed \
--load-privkey $TCTMP/ca-key.pem \
--template $TCTMP/ca.tmpl --outfile $TCTMP/ca-cert.pem \
>$stdout 2>$stderr
tc_fail_if_bad_rc $? "unable to create CA certificate" || return
# verify certificate information
certtool --certificate-info \
--infile $TCTMP/ca-cert.pem \
1>$stdout 2>$stderr
issuer=$(grep Issuer $stdout | awk -F= '{print $2}')
[ "$issuer" = "gnutls CA test" ]
tc_pass_or_fail $? "mismatching issuer name found"
}
#
# test B
# 1. list supported algorithms using gnutls-serv
# 2. start gnutls test server instance
#
function test_server()
{
tc_register "checking if supported algorithms are listed"
gnutls-serv -l 1>$stdout 2>$stderr
tc_pass_or_fail $? "unable to list supported algorithms"
tc_register "setting up gnutls test server"
certtool --generate-privkey > $TCTMP/ca-server-key.pem
# organization of the subject
echo 'organization = GnuTLS test server' > $TCTMP/ca-server.tmpl
# common name of certificate owner
echo 'cn = test.example.com' >> $TCTMP/ca-server.tmpl
# this certificate will be used for a TLS server
echo 'tls_www_server' >> $TCTMP/ca-server.tmpl
# this certificate will be used to encrypt data
echo 'encryption_key' >> $TCTMP/ca-server.tmpl
# this certificate will be used to sign data
echo 'signing_key' >> $TCTMP/ca-server.tmpl
# dns name of WWW server
echo 'dns_name = test.example.com' >> $TCTMP/ca-server.tmpl
certtool --generate-certificate \
--load-privkey $TCTMP/ca-server-key.pem \
--load-ca-certificate $TCTMP/ca-cert.pem \
--load-ca-privkey $TCTMP/ca-key.pem \
--template $TCTMP/ca-server.tmpl \
--outfile $TCTMP/ca-server-cert.pem \
&>$stdout
tc_fail_if_bad $? "unable to generate server side certificate" || return
# start the server side instance
gnutls-serv --http \
--port $port \
--x509cafile $TCTMP/ca-cert.pem \
--x509keyfile $TCTMP/ca-server-key.pem \
--x509certfile $TCTMP/ca-server-cert.pem \
1>$stdout 2>$stderr &
tc_pass_or_fail $? "unable to start gnutls test server"
pid=$!
TC_SLEEPER_PIDS=$pid
}
#
# test C
# 1. list supported algorithms using gnutls-cli
# 2. start gnutls test server instance and check with gnutls debug client
# 3. connect gnutls client tool with server in various ways.
#
function test_client()
{
tc_register "list supported algorithms/protocols"
gnutls-cli --list 1>$stdout 2>$stderr
tc_pass_or_fail $? "unable to list algorithms supported"
tc_register "checking with gnutls test client (for debugging)"
tc_info "generating client side certificate"
certtool --generate-privkey > $TCTMP/ca-client-key.pem
# organization of the subject
echo 'organization = GnuTLS test client' > $TCTMP/ca-client.tmpl
# this certificate will be used for a TLS client
echo 'tls_www_client' >> $TCTMP/ca-client.tmpl
# this certificate will be used to encrypt data
echo 'encryption_key' >> $TCTMP/ca-client.tmpl
# this certificate will be used to sign data
echo 'signing_key' >> $TCTMP/ca-client.tmpl
certtool --generate-certificate \
--load-privkey $TCTMP/ca-client-key.pem \
--load-ca-certificate $TCTMP/ca-cert.pem \
--load-ca-privkey $TCTMP/ca-key.pem \
--template $TCTMP/ca-client.tmpl \
--outfile $TCTMP/ca-client-cert.pem \
&>$stdout
tc_fail_if_bad $? "unable to generate client side certificate"
# check if client debug tool can connect the server
gnutls-cli-debug -p $port localhost 1>$stdout 2>$stderr &
tc_wait_for_pid $! && tc_pass_or_fail $? "unable to connect gnutls server"
# tests below verifies the server/client connections.
# bug 83535 is blocking further checks with gnutls-server.
# tests below verifies the usecases of gnutls in vnc, lp/cups, libvirtd
# which will be enhanced later.
}
################################################################################
# main
################################################################################
TST_TOTAL=5
tc_setup
test_ca && test_server
test_ca && test_client
|
PoornimaNayak/autotest-client-tests
|
linux-tools/gnutls/gnutls.sh
|
Shell
|
gpl-2.0
| 7,721 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# This command builds and runs a local kubernetes cluster.
# You may need to run this as root to allow kubelet to open docker's socket,
# and to write the test CA in /var/run/kubernetes.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
PSP_ADMISSION=${PSP_ADMISSION:-""}
NODE_ADMISSION=${NODE_ADMISSION:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
KUBELET_FLAGS=${KUBELET_FLAGS:-""}
# many dev environments run with swap on, so we don't fail in this env
FAIL_SWAP_ON=${FAIL_SWAP_ON:-"false"}
# Name of the network plugin, eg: "kubenet"
NET_PLUGIN=${NET_PLUGIN:-""}
# Place the config files and binaries required by NET_PLUGIN in these directory,
# eg: "/etc/cni/net.d" for config files, and "/opt/cni/bin" for binaries.
CNI_CONF_DIR=${CNI_CONF_DIR:-""}
CNI_BIN_DIR=${CNI_BIN_DIR:-""}
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
# if enabled, must set CGROUP_ROOT
CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
# name of the cgroup driver, i.e. cgroupfs or systemd
CGROUP_DRIVER=${CGROUP_DRIVER:-""}
# owner of client certs, default to current user if not specified
USER=${USER:-$(whoami)}
# enables testing eviction scenarios locally.
EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"}
EVICTION_SOFT=${EVICTION_SOFT:-""}
EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
# This script uses docker0 (or whatever container bridge docker is currently using)
# and we don't know the IP of the DNS pod to pass in as --cluster-dns.
# To set this up by hand, set this flag and change DNS_SERVER_IP.
# Note also that you need API_HOST (defined above) for correct DNS.
KUBEPROXY_MODE=${KUBEPROXY_MODE:-""}
ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
KUBECTL=${KUBECTL:-cluster/kubectl.sh}
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-20}
ENABLE_DAEMON=${ENABLE_DAEMON:-false}
HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
# enable swagger ui
ENABLE_SWAGGER_UI=${ENABLE_SWAGGER_UI:-false}
# enable kubernetes dashboard
ENABLE_CLUSTER_DASHBOARD=${KUBE_ENABLE_CLUSTER_DASHBOARD:-false}
# enable audit log
ENABLE_APISERVER_BASIC_AUDIT=${ENABLE_APISERVER_BASIC_AUDIT:-false}
# RBAC Mode options
AUTHORIZATION_MODE=${AUTHORIZATION_MODE:-"Node,RBAC"}
KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
AUTH_ARGS=${AUTH_ARGS:-""}
# Install a default storage class (enabled by default)
DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
# panic the server on watch decode errors since they are considered coder mistakes
KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
export KUBE_PANIC_WATCH_DECODE_ERROR
ADMISSION_CONTROL=${ADMISSION_CONTROL:-""}
ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
# START_MODE can be 'all', 'kubeletonly', or 'nokubelet'
START_MODE=${START_MODE:-"all"}
# A list of controllers to enable
KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}"
# sanity check for OpenStack provider
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
if [ "${CLOUD_CONFIG}" == "" ]; then
echo "Missing CLOUD_CONFIG env for OpenStack provider!"
exit 1
fi
if [ ! -f "${CLOUD_CONFIG}" ]; then
echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
exit 1
fi
fi
#set feature gates if using ipvs mode
if [ "${KUBEPROXY_MODE}" == "ipvs" ]; then
FEATURE_GATES="$FEATURE_GATES,SupportIPVSProxyMode=true"
fi
# warn if users are running with swap allowed
if [ "${FAIL_SWAP_ON}" == "false" ]; then
echo "WARNING : The kubelet is configured to not fail if swap is enabled; production deployments should disable swap."
fi
if [ "$(id -u)" != "0" ]; then
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
fi
# Stop right away if the build fails
set -e
source "${KUBE_ROOT}/hack/lib/init.sh"
function usage {
echo "This script starts a local kube cluster. "
echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
}
# This function guesses where the existing cached binary build is for the `-O`
# flag
function guess_built_binary_path {
local hyperkube_path=$(kube::util::find-binary "hyperkube")
if [[ -z "${hyperkube_path}" ]]; then
return
fi
echo -n "$(dirname "${hyperkube_path}")"
}
### Allow user to supply the source directory.
GO_OUT=${GO_OUT:-}
while getopts "ho:O" OPTION
do
case $OPTION in
o)
echo "skipping build"
GO_OUT="$OPTARG"
echo "using source $GO_OUT"
;;
O)
GO_OUT=$(guess_built_binary_path)
if [ "$GO_OUT" == "" ]; then
echo "Could not guess the correct output directory to use."
exit 1
fi
;;
h)
usage
exit
;;
?)
usage
exit
;;
esac
done
if [ "x$GO_OUT" == "x" ]; then
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/hyperkube"
else
echo "skipped the build."
fi
function test_rkt {
if [[ -n "${RKT_PATH}" ]]; then
${RKT_PATH} list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that ${RKT_PATH} is the path of rkt binary."
exit 1
fi
else
rkt list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that rkt is in \$PATH."
exit 1
fi
fi
}
# Shut down anyway if there's an error.
set +e
API_PORT=${API_PORT:-8080}
API_SECURE_PORT=${API_SECURE_PORT:-6443}
# WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
API_HOST=${API_HOST:-localhost}
API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
# By default only allow CORS for requests on localhost
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
# Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
LOG_SPEC=${LOG_SPEC:-""}
LOG_DIR=${LOG_DIR:-"/tmp"}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
KEEP_TERMINATED_POD_VOLUMES=${KEEP_TERMINATED_POD_VOLUMES:-"true"}
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
ROOT_CA_KEY=${CERT_DIR}/server-ca.key
CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${ROOT_CA_FILE}"}
CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${ROOT_CA_KEY}"}
# name of the cgroup driver, i.e. cgroupfs or systemd
if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
# default cgroup driver to match what is reported by docker to simplify local development
if [[ -z ${CGROUP_DRIVER} ]]; then
# match driver with docker runtime reported value (they must match)
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
fi
fi
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
function test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
if [[ "${API_PORT}" -gt "0" ]]; then
curl --silent -g $API_HOST:$API_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER insecure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_PORT"
exit 1
fi
fi
curl --silent -k -g $API_HOST:$API_SECURE_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER secure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_SECURE_PORT"
exit 1
fi
}
function detect_binary {
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
exit 1
;;
esac
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
}
cleanup_dockerized_kubelet()
{
if [[ -e $KUBELET_CIDFILE ]]; then
docker kill $(<$KUBELET_CIDFILE) > /dev/null
rm -f $KUBELET_CIDFILE
fi
}
cleanup()
{
echo "Cleaning up..."
# delete running images
# if [[ "${ENABLE_CLUSTER_DNS}" == true ]]; then
# Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
# ${KUBECTL} --namespace=kube-system delete service kube-dns
# And this one hang forever:
# ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
# fi
# Check if the API server is still running
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
if [[ -n "$DOCKERIZE_KUBELET" ]]; then
cleanup_dockerized_kubelet
else
# Check if the kubelet is still running
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS}
fi
# Check if the proxy is still running
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS}
# Check if the scheduler is still running
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS}
# Check if the etcd is still running
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
[[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
exit 0
}
function warning {
message=$1
echo $(tput bold)$(tput setaf 1)
echo "WARNING: ${message}"
echo $(tput sgr0)
}
function start_etcd {
echo "Starting etcd"
kube::etcd::start
}
function set_service_accounts {
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
function start_apiserver {
security_admission=""
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
security_admission=",SecurityContextDeny"
fi
if [[ -n "${PSP_ADMISSION}" ]]; then
security_admission=",PodSecurityPolicy"
fi
if [[ -n "${NODE_ADMISSION}" ]]; then
security_admission=",NodeRestriction"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
audit_arg=""
APISERVER_BASIC_AUDIT_LOG=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
APISERVER_BASIC_AUDIT_LOG=/tmp/kube-apiserver-audit.log
audit_arg=" --audit-log-path=${APISERVER_BASIC_AUDIT_LOG}"
audit_arg+=" --audit-log-maxage=0"
audit_arg+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
audit_arg+=" --audit-log-maxsize=2000000000"
fi
swagger_arg=""
if [[ "${ENABLE_SWAGGER_UI}" = true ]]; then
swagger_arg="--enable-swagger-ui=true "
fi
authorizer_arg=""
if [[ -n "${AUTHORIZATION_MODE}" ]]; then
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE} "
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
if [[ ${ADMISSION_CONTROL} == *"Initializers"* ]]; then
if [[ -n "${RUNTIME_CONFIG}" ]]; then
RUNTIME_CONFIG+=","
fi
RUNTIME_CONFIG+="admissionregistration.k8s.io/v1alpha1"
fi
runtime_config=""
if [[ -n "${RUNTIME_CONFIG}" ]]; then
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
fi
# Let the API server pick a default address when API_HOST_IP
# is set to 127.0.0.1
advertise_address=""
if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
advertise_address="--advertise_address=${API_HOST_IP}"
fi
if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
advertise_address="--advertise_address=${ADVERTISE_ADDRESS}"
fi
# Create CA signers
if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
else
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
fi
# Create auth proxy client ca
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
# serving cert for kube-apiserver
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP}
# Create client certs signed with client-ca, given id, given CN and a number of groups
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
# Create matching certificates for kube-aggregator
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP}
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
# TODO remove masters and add rolebinding
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
${advertise_address} \
--v=${LOG_LEVEL} \
--vmodule="${LOG_SPEC}" \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
--admission-control="${ADMISSION_CONTROL}" \
--admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
--bind-address="${API_BIND_ADDR}" \
--secure-port="${API_SECURE_PORT}" \
--tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
--tls-ca-file="${CERT_DIR}/server-ca.crt" \
--insecure-bind-address="${API_HOST_IP}" \
--insecure-port="${API_PORT}" \
--storage-backend=${STORAGE_BACKEND} \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
--feature-gates="${FEATURE_GATES}" \
--external-hostname="${EXTERNAL_HOSTNAME}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
--proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
# this uses the API port because if you don't have any authenticator, you can't seem to use the secure port at all.
# this matches what happened with the combination in 1.4.
# TODO change this conditionally based on whether API_PORT is on or off
kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} \
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
# Create kubeconfigs for all components, using client certs
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
if [[ -z "${AUTH_ARGS}" ]]; then
AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
fi
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${CONTROLPLANE_SUDO} chown $(whoami) "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
}
function start_controller_manager {
node_cidr_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
fi
CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \
--v=${LOG_LEVEL} \
--vmodule="${LOG_SPEC}" \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--kubeconfig "$CERT_DIR"/controller.kubeconfig \
--use-service-account-credentials \
--controllers="${KUBE_CONTROLLERS}" \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
CTLRMGR_PID=$!
}
function start_kubelet {
KUBELET_LOG=${LOG_DIR}/kubelet.log
mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
if [[ -z "${DOCKERIZE_KUBELET}" ]]; then
# Enable dns
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
dns_args="--cluster-dns=${DNS_SERVER_IP} --cluster-domain=${DNS_DOMAIN}"
else
# To start a private DNS server set ENABLE_CLUSTER_DNS and
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
# DNS server for real world hostnames.
dns_args="--cluster-dns=8.8.8.8"
fi
net_plugin_args=""
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
fi
auth_args=""
if [[ -n "${KUBELET_AUTHORIZATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authorization-mode=Webhook"
fi
if [[ -n "${KUBELET_AUTHENTICATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authentication-token-webhook"
fi
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
auth_args="${auth_args} --client-ca-file=${CLIENT_CA_FILE}"
fi
cni_conf_dir_args=""
if [[ -n "${CNI_CONF_DIR}" ]]; then
cni_conf_dir_args="--cni-conf-dir=${CNI_CONF_DIR}"
fi
cni_bin_dir_args=""
if [[ -n "${CNI_BIN_DIR}" ]]; then
cni_bin_dir_args="--cni-bin-dir=${CNI_BIN_DIR}"
fi
container_runtime_endpoint_args=""
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
fi
image_service_endpoint_args=""
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
image_service_endpoint_args="--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}"
fi
sudo -E "${GO_OUT}/hyperkube" kubelet ${priv_arg}\
--v=${LOG_LEVEL} \
--vmodule="${LOG_SPEC}" \
--chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="${HOSTNAME_OVERRIDE}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--address="${KUBELET_HOST}" \
--kubeconfig "$CERT_DIR"/kubelet.kubeconfig \
--feature-gates="${FEATURE_GATES}" \
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" \
--cgroups-per-qos=${CGROUPS_PER_QOS} \
--cgroup-driver=${CGROUP_DRIVER} \
--keep-terminated-pod-volumes=${KEEP_TERMINATED_POD_VOLUMES} \
--eviction-hard=${EVICTION_HARD} \
--eviction-soft=${EVICTION_SOFT} \
--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD} \
--pod-manifest-path="${POD_MANIFEST_PATH}" \
--fail-swap-on="${FAIL_SWAP_ON}" \
${auth_args} \
${dns_args} \
${cni_conf_dir_args} \
${cni_bin_dir_args} \
${net_plugin_args} \
${container_runtime_endpoint_args} \
${image_service_endpoint_args} \
--port="$KUBELET_PORT" \
${KUBELET_FLAGS} >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
# Quick check that kubelet is running.
if ps -p $KUBELET_PID > /dev/null ; then
echo "kubelet ( $KUBELET_PID ) is running."
else
cat ${KUBELET_LOG} ; exit 1
fi
else
# Docker won't run a container with a cidfile (container id file)
# unless that file does not already exist; clean up an existing
# dockerized kubelet that might be running.
cleanup_dockerized_kubelet
cred_bind=""
# path to cloud credentials.
cloud_cred=""
if [ "${CLOUD_PROVIDER}" == "aws" ]; then
cloud_cred="${HOME}/.aws/credentials"
fi
if [ "${CLOUD_PROVIDER}" == "gce" ]; then
cloud_cred="${HOME}/.config/gcloud"
fi
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
cloud_cred="${CLOUD_CONFIG}"
fi
if [[ -n "${cloud_cred}" ]]; then
cred_bind="--volume=${cloud_cred}:${cloud_cred}:ro"
fi
docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/dev:/dev \
--volume=/run/xtables.lock:/run/xtables.lock:rw \
${cred_bind} \
--net=host \
--privileged=true \
-i \
--cidfile=$KUBELET_CIDFILE \
gcr.io/google_containers/kubelet \
/kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
fi
}
function start_kubeproxy {
PROXY_LOG=${LOG_DIR}/kube-proxy.log
cat <<EOF > /tmp/kube-proxy.yaml
apiVersion: componentconfig/v1alpha1
kind: KubeProxyConfiguration
clientConnection:
kubeconfig: ${CERT_DIR}/kube-proxy.kubeconfig
hostnameOverride: ${HOSTNAME_OVERRIDE}
featureGates: ${FEATURE_GATES}
mode: ${KUBEPROXY_MODE}
EOF
if [ "${KUBEPROXY_MODE}" == "ipvs" ]; then
# Load kernel modules required by IPVS proxier
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4
fi
sudo "${GO_OUT}/hyperkube" proxy \
--config=/tmp/kube-proxy.yaml \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" \
--v=${LOG_LEVEL} 2>&1 &
PROXY_PID=$!
SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" scheduler \
--v=${LOG_LEVEL} \
--kubeconfig "$CERT_DIR"/scheduler.kubeconfig \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
SCHEDULER_PID=$!
}
function start_kubedns {
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns.yaml.in" kube-dns.yaml
sed -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
sed -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
# TODO update to dns role once we have one.
# use kubectl to create kubedns addon
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml
echo "Kube-dns addon successfully deployed."
rm kube-dns.yaml
fi
}
function start_kubedashboard {
if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
echo "Creating kubernetes-dashboard"
# use kubectl to create the dashboard
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml
echo "kubernetes-dashboard deployment and service successfully deployed."
fi
}
function create_psp_policy {
echo "Create podsecuritypolicy policies for RBAC."
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml
}
function create_storage_class {
if [ -z "$CLOUD_PROVIDER" ]; then
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
else
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
fi
if [ -e $CLASS_FILE ]; then
echo "Create default storage class for $CLOUD_PROVIDER"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f $CLASS_FILE
else
echo "No storage class available for $CLOUD_PROVIDER."
fi
}
function print_success {
if [[ "${START_MODE}" != "kubeletonly" ]]; then
cat <<EOF
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
Logs:
${APISERVER_LOG:-}
${CTLRMGR_LOG:-}
${PROXY_LOG:-}
${SCHEDULER_LOG:-}
EOF
fi
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" = true ]]; then
echo " ${APISERVER_BASIC_AUDIT_LOG}"
fi
if [[ "${START_MODE}" == "all" ]]; then
echo " ${KUBELET_LOG}"
elif [[ "${START_MODE}" == "nokubelet" ]]; then
echo
echo "No kubelet was started because you set START_MODE=nokubelet"
echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
echo
cat <<EOF
To start using your cluster, you can open up another terminal/tab and run:
export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
cluster/kubectl.sh
Alternatively, you can write to the default kubeconfig:
export KUBERNETES_PROVIDER=local
cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
cluster/kubectl.sh config set-context local --cluster=local --user=myself
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
EOF
else
cat <<EOF
The kubelet was started.
Logs:
${KUBELET_LOG}
EOF
fi
}
# validate that etcd is: not running, in path, and has minimum required version.
if [[ "${START_MODE}" != "kubeletonly" ]]; then
kube::etcd::validate
fi
if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
exit 1
fi
if [[ "${CONTAINER_RUNTIME}" == "rkt" ]]; then
test_rkt
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
test_apiserver_off
fi
kube::util::test_openssl_installed
kube::util::ensure-cfssl
### IF the user didn't supply an output/ for the build... Then we detect.
if [ "$GO_OUT" == "" ]; then
detect_binary
fi
echo "Detected host and ready to start services. Doing some housekeeping first..."
echo "Using GO_OUT $GO_OUT"
KUBELET_CIDFILE=/tmp/kubelet.cid
if [[ "${ENABLE_DAEMON}" = false ]]; then
trap cleanup EXIT
fi
echo "Starting services now!"
if [[ "${START_MODE}" != "kubeletonly" ]]; then
start_etcd
set_service_accounts
start_apiserver
start_controller_manager
start_kubeproxy
start_kubedns
start_kubedashboard
fi
if [[ "${START_MODE}" != "nokubelet" ]]; then
## TODO remove this check if/when kubelet is supported on darwin
# Detect the OS name/arch and display appropriate error.
case "$(uname -s)" in
Darwin)
warning "kubelet is not currently supported in darwin, kubelet aborted."
KUBELET_LOG=""
;;
Linux)
start_kubelet
;;
*)
warning "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
;;
esac
fi
if [[ -n "${PSP_ADMISSION}" && "${AUTHORIZATION_MODE}" = *RBAC* ]]; then
create_psp_policy
fi
if [[ "$DEFAULT_STORAGE_CLASS" = "true" ]]; then
create_storage_class
fi
print_success
if [[ "${ENABLE_DAEMON}" = false ]]; then
while true; do sleep 1; done
fi
|
fsouza/kubernetes
|
hack/local-up-cluster.sh
|
Shell
|
apache-2.0
| 36,097 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -u
cat input | $JOSHUA/bin/joshua-decoder -c parse.config > output 2> log
diff -u output output.gold > diff
if [ $? -eq 0 ]; then
rm -rf output diff log
exit 0
else
exit 1
fi
|
thammegowda/incubator-joshua
|
src/test/resources/parser/test.sh
|
Shell
|
apache-2.0
| 985 |
#!/usr/bin/env bash
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
case "$OS" in
cygwin*)
echo "debug-core-evergreen.sh is not supported on Windows"
exit 0
;;
esac
echo "Debugging core files"
shopt -s nullglob
for i in *.core; do
echo $i
echo "backtrace full" | gdb -q ./src/libmongoc/test-libmongoc $i
done
# If there is still a test-libmongoc process running (perhaps due to
# deadlock, or very slow test) attach a debugger and print stacks.
TEST_LIBMONGOC_PID="$(pgrep test-libmongoc)"
if [ -n "$TEST_LIBMONGOC_PID" ]; then
echo "test-libmongoc processes still running with PID=$TEST_LIBMONGOC_PID"
echo "backtrace full" | gdb -q -p $TEST_LIBMONGOC_PID
kill $TEST_LIBMONGOC_PID
fi
|
jmikola/mongo-c-driver
|
.evergreen/debug-core-evergreen.sh
|
Shell
|
apache-2.0
| 719 |
#!/bin/bash
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Bundle React Native app's code and image assets.
# This script is supposed to be invoked as part of Xcode build process
# and relies on envoronment variables (including PWD) set by Xcode
case "$CONFIGURATION" in
Debug)
DEV=true
;;
"")
echo "$0 must be invoked by Xcode"
exit 1
;;
*)
DEV=false
;;
esac
# Xcode project file for React Native apps is located in ios/ subfolder
cd ..
set -x
DEST=$CONFIGURATION_BUILD_DIR/$UNLOCALIZED_RESOURCES_FOLDER_PATH
# Define NVM_DIR and source the nvm.sh setup script
[ -z "$NVM_DIR" ] && export NVM_DIR="$HOME/.nvm"
if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
. "$HOME/.nvm/nvm.sh"
elif [[ -x "$(command -v brew)" && -s "$(brew --prefix nvm)/nvm.sh" ]]; then
. "$(brew --prefix nvm)/nvm.sh"
fi
react-native bundle \
--entry-file index.ios.js \
--platform ios \
--dev $DEV \
--bundle-output "$DEST/main.jsbundle" \
--assets-dest "$DEST"
|
andrewljohnson/react-native
|
packager/react-native-xcode.sh
|
Shell
|
bsd-3-clause
| 1,230 |
#!/bin/bash
FN="hu35ksubaprobe_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/annotation/src/contrib/hu35ksubaprobe_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/hu35ksubaprobe_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hu35ksubaprobe/bioconductor-hu35ksubaprobe_2.18.0_src_all.tar.gz"
)
MD5="49bd19ec3b6404211f2e410e473fa644"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-hu35ksubaprobe/post-link.sh
|
Shell
|
mit
| 1,328 |
#!/bin/sh
# Copyright (C) 2010 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/test
aux lvmconf 'devices/filter = [ "a/dev\/mirror/", "a/dev\/mapper\/.*$/", "a/dev\/LVMTEST/", "r/.*/" ]'
aux prepare_pvs 3
vgcreate $vg1 "$dev1" "$dev2"
lvcreate -n $lv1 -l 100%FREE $vg1
#top VG
pvcreate $DM_DEV_DIR/$vg1/$lv1
vgcreate $vg $DM_DEV_DIR/$vg1/$lv1 "$dev3"
vgchange -a n $vg $vg1
# this should fail but not segfault, RHBZ 481793.
not vgsplit $vg $vg1 "$dev3"
|
Jajcus/lvm2
|
test/shell/vgsplit-stacked.sh
|
Shell
|
gpl-2.0
| 846 |
#!/bin/sh
# find root
cd `dirname $PWD/$0`
mkdir -p _work
cd _work
ccache --help 2>&1 > /dev/null
if [ $? = 0 ]; then
[ -z "${CC}" ] && CC=gcc
CC="ccache ${CC}"
export CC
fi
valac --help 2>&1 >/dev/null
if [ ! $? = 0 ]; then
# must install from tarball
VV=0.13.4
SV=$(echo ${VV}|cut -d . -f 1,2)
if [ ! -d vala-${VV} ]; then
wget http://download.gnome.org/sources/vala/${SV}/vala-${VV}.tar.bz2
tar xjvf vala-${VV}.tar.bz2
fi
cd vala-${VV}
./configure --prefix=/usr && \
make && \
sudo make install
cd ..
fi
if [ -d vala ]; then
cd vala
#sudo make uninstall
git pull
else
git clone git://git.gnome.org/vala
cd vala
fi
sh autogen.sh --prefix=/usr && \
make -j 4 && \
sudo make install
cd ..
|
glandium/radare2
|
sys/vala.sh
|
Shell
|
lgpl-3.0
| 716 |
#!/usr/bin/env bash
service php5-fpm start
|
addiscent/php-fpm-docker
|
start.sh
|
Shell
|
apache-2.0
| 43 |
#! /usr/bin/env bash
echo -e "\n-- create Dexter DB --\n"
#wget -O dexter-db.sql https://dexter.atlassian.net/wiki/download/attachments/6258746/dexter-db.sql?api=v2
mysql -u root -p1234 -e "create database if not exists my_dexter_db DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; GRANT ALL PRIVILEGES ON my_dexter_db.* TO 'dexter-user'@'localhost' IDENTIFIED BY 'mypassword'"
# drop tables
#mysql -u dexter-user -pmypassword my_dexter_db < /vagrant/config/remove-database.sql
mysql -u dexter-user -pmypassword my_dexter_db < /vagrant/config/ddl.sql
|
Minjung-Baek/Dexter
|
project/dexter-server/provision/create_dexter_db.sh
|
Shell
|
bsd-2-clause
| 567 |
#!/usr/bin/env bash
cd "$( dirname "${BASH_SOURCE[0]}" )" && vim -Nu vimrc -c 'Vader! *' > /dev/null
|
pathing/my_exvim
|
vimfiles/bundle/vim-markdown/test/run-tests.sh
|
Shell
|
mit
| 102 |
#!/bin/sh
# Copyright (C) 2011, Kees Bos <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$1" == '-n' ] ; then
ACTION="echo rm"
shift
else
ACTION=rm
fi
if [ $# -eq 0 ] ; then
echo "usage: $0 [-n] all|js|patch|complete" >&2
exit 1
fi
clean_js( ) {
git clean -n|awk '/Would remove .*[.]js$/ {print $3}'|xargs ${ACTION}
}
clean_pyc( ) {
git clean -n|awk '/Would remove .*[.]pyc$/ {print $3}'|xargs ${ACTION}
}
clean_patch( ) {
git clean -n|awk '/Would remove .*[.](rej)|(orig)$/ {print $3}'|xargs ${ACTION}
}
for WHAT in $* ; do
case ${WHAT} in
js)
clean_js
;;
pyc)
clean_pyc
;;
patch)
clean_patch
;;
all)
clean_js
clean_pyc
clean_patch
;;
complete)
if [ ! -d .git ] ; then
echo "Cannot find .git directory to be removed." >&2
echo "Switch to root directory of repository." >&2
exit 1
fi
if [ ${ACTION} == 'rm' ] ; then
git-clean -f -d -x
rm -rf .git
else
git-clean -n -d -x
fi
esac
done
|
spaceone/pyjs
|
pyjs/contrib/clean-repository.sh
|
Shell
|
apache-2.0
| 1,481 |
#!/bin/bash
# This script should be run before you commit to verify that the basic tests
# are working as they should
# Once you have run it you can inspect the log file via
#
# $ less before_i_commit.log
# To clean up everything that is left by the running of this tool, do as
# following:
#
# rm *.yamloo; rm before_i_commit.log
#
if [ -f before_i_commit.log ];
then
# this is technically the date it was moved, not the date it was created
mv before_i_commit.log before_i_commit-`date +%s`.log;
touch before_i_commit.log;
else
touch before_i_commit.log;
fi
find . -type f -name "*.py[co]" -delete
if [ -f env/bin/activate ];
then
source env/bin/activate;
else
echo "Assuming that your virtual environment is pre-configured...";
fi
./ooniprobe-dev -i decks/before_i_commit.testdeck
echo "Below you should not see anything"
echo "---------------------------------"
grep "Error: " before_i_commit.log
echo "---------------------------------"
echo "If you do, it means something is wrong."
echo "Read through the log file and fix it."
echo "If you are having some problems fixing some things that have to do with"
echo "the core of OONI, let's first discuss it on IRC, or open a ticket"
read
#cat *.yamloo | less
|
lordappsec/ooni-probe
|
scripts/before_i_commit.sh
|
Shell
|
bsd-2-clause
| 1,228 |
#!/bin/bash
usage() {
echo "Usage:"
echo " $(basename $0) <application-name> [-h|-v] ..."
echo ""
echo "application-name: valid options are: $(valid_app_options)"
echo "-h print this help"
echo "-v display PMD's version"
}
valid_app_options () {
echo "pmd, cpd, cpdgui, designer, bgastviewer"
}
is_cygwin() {
case "$(uname)" in
CYGWIN*)
readonly cygwin=true
;;
esac
# OS specific support. $var _must_ be set to either true or false.
if [ -z ${cygwin} ] ; then
readonly cygwin=false
fi
}
cygwin_paths() {
# For Cygwin, switch paths to Windows format before running java
if ${cygwin} ; then
JAVA_HOME=$(cygpath --windows "${JAVA_HOME}")
classpath=$(cygpath --path --windows "${classpath}")
DIRECTORY=$(cygpath --windows "${DIRECTORY}")
fi
}
convert_cygwin_vars() {
# If cygwin, convert to Unix form before manipulating
if ${cygwin} ; then
[ -n "${JAVA_HOME}" ] &&
JAVA_HOME=$(cygpath --unix "${JAVA_HOME}")
[ -n "${CLASSPATH}" ] &&
CLASSPATH=$(cygpath --path --unix "${CLASSPATH}")
fi
}
java_heapsize_settings() {
local heapsize=${HEAPSIZE:-512m}
case "${heapsize}" in
[1-9]*[mgMG])
readonly HEAPSIZE="-Xmx${heapsize}"
;;
'')
;;
*)
echo "HEAPSIZE '${HEAPSIZE}' unknown (try: 512m)"
exit 1
esac
}
set_lib_dir() {
if [ -z ${LIB_DIR} ]; then
local script_dir=$(dirname ${0})
local cwd="${PWD}"
cd "${script_dir}/../lib"
readonly LIB_DIR=$(pwd -P)
cd "${cwd}"
fi
}
check_lib_dir() {
if [ ! -e "${LIB_DIR}" ]; then
echo "The jar directory [${LIB_DIR}] does not exist"
fi
}
readonly APPNAME="${1}"
if [ -z "${APPNAME}" ]; then
usage
exit 1
fi
shift
case "${APPNAME}" in
"pmd")
readonly CLASSNAME="net.sourceforge.pmd.PMD"
;;
"cpd")
readonly CLASSNAME="net.sourceforge.pmd.cpd.CPD"
;;
"designer")
readonly CLASSNAME="net.sourceforge.pmd.util.designer.Designer"
;;
"bgastviewer")
readonly CLASSNAME="net.sourceforge.pmd.util.viewer.Viewer"
;;
"cpdgui")
readonly CLASSNAME="net.sourceforge.pmd.cpd.GUI"
;;
*)
echo "${APPNAME} is NOT a valid application name, valid options are:$(valid_app_options)"
;;
esac
is_cygwin
set_lib_dir
check_lib_dir
convert_cygwin_vars
classpath=$CLASSPATH
cd "${CWD}"
for jarfile in ${LIB_DIR}/*.jar; do
classpath=$classpath:$jarfile
done
cygwin_paths
java_heapsize_settings
java "${HEAPSIZE}" -cp "${classpath}" "${CLASSNAME}" ${@}
|
betterlife/Java-Build-System
|
tools/pmd/bin/run.sh
|
Shell
|
mit
| 2,648 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python /mnt/terrapin/scripts/throttle_datanode.py
|
fangjian601/terrapin
|
server/src/main/scripts/run_throttle_datanode.sh
|
Shell
|
apache-2.0
| 852 |
#!/bin/bash
ARG_DEFS=(
"--repository=(.*)"
"--directory=(.*)"
"[--branch=(.*)]"
)
function run {
rm -rf $DIRECTORY
mkdir -p $DIRECTORY
echo "-- Cloning $REPOSITORY#$BRANCH to $DIRECTORY..."
ARGS="--branch=${BRANCH:-master} --depth=2"
git config --global user.email "[email protected]"
git config --global user.name "Ionitron"
git clone [email protected]:driftyco/$REPOSITORY.git $DIRECTORY $ARGS
cd $DIRECTORY
git fetch origin --tags
cd ../
}
source $(dirname $0)/../utils.sh.inc
|
AbraaoAlves/ionic
|
scripts/git/clone.sh
|
Shell
|
mit
| 514 |
#!/bin/bash
#
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
package="viber"
command="viber"
#----------------------------------------------------------------------------------
#checking if application is already installed or else install with aur helpers
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "################## "$package" is already installed"
echo "################################################################"
else
#checking which helper is installed
if pacman -Qi packer &> /dev/null; then
echo "Installing with packer"
packer -S --noconfirm --noedit $package
elif pacman -Qi pacaur &> /dev/null; then
echo "Installing with pacaur"
pacaur -S --noconfirm --noedit $package
elif pacman -Qi yaourt &> /dev/null; then
echo "Installing with yaourt"
yaourt -S --noconfirm $package
fi
# Just checking if installation was successful
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "######### "$package" has been installed"
echo "################################################################"
else
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "!!!!!!!!! "$package" has NOT been installed"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
fi
fi
|
erikdubois/AntergosXfce4
|
installation/install-viber-v1.sh
|
Shell
|
gpl-2.0
| 2,010 |
#!/bin/sh
test_description='Test the pick command'
. ./test-lib.sh
test_expect_success \
'Initialize the StGIT repository' \
'
stg init &&
stg new A -m "a" && echo A > a && stg add a && stg refresh &&
stg new B -m "b" && echo B > b && stg add b && stg refresh &&
stg branch --clone foo &&
stg new C -m "c" && echo C > c && stg add c && stg refresh &&
stg new D-foo -m "d" && echo D > d && stg add d && stg refresh &&
stg branch master
'
test_expect_success \
'Pick remote patch' \
'
stg pick foo:C &&
test "$(echo $(stg series --applied --noprefix))" = "A B C"
'
test_expect_success \
'Pick --unapplied remote patch' \
'
stg pick --unapplied --ref-branch foo --name D D-foo &&
test "$(echo $(stg series --applied --noprefix))" = "A B C" &&
test "$(echo $(stg series --unapplied --noprefix))" = "D"
'
test_expect_success \
'Pick local unapplied patch' \
'
stg pick D &&
test "$(echo $(stg series --applied --noprefix))" = "A B C D-0" &&
test "$(echo $(stg series --unapplied --noprefix))" = "D"
'
test_expect_success \
'Pick --fold --revert local patch' \
'
stg pick --fold --revert D &&
stg refresh && stg clean &&
test "$(echo $(stg series --applied --noprefix))" = "A B C" &&
test "$(echo $(stg series --unapplied --noprefix))" = "D"
'
test_expect_success \
'Pick --fold without applied patches' \
'
stg pop --all &&
stg pick --fold D &&
test "$(echo $(stg series --unapplied --noprefix))" = "A B C D" &&
test "$(echo $(stg status))" = "A d"
'
test_done
|
Naoya-Horiguchi/stgit
|
t/t3400-pick.sh
|
Shell
|
gpl-2.0
| 1,505 |
#!/bin/bash
CRIU=../../../criu/criu
set -e -m -x
cat < /dev/zero > /dev/null &
pid=$!
sleep 1
lsof -p $pid
$CRIU exec -t $pid fake_syscall && exit 1 || true
fd=`$CRIU exec -t $pid open '&/dev/null' 0 | sed 's/.*(\(.*\))/\1/'`
$CRIU exec -t $pid dup2 $fd 0
wait $pid
echo PASS
|
eabatalov/criu
|
test/others/exec/run.sh
|
Shell
|
lgpl-2.1
| 280 |
# installs dokku via apt-get
DOKKU_VERSION=$1
wget https://raw.githubusercontent.com/progrium/dokku/v${DOKKU_VERSION}/bootstrap.sh
sudo DOKKU_TAG=v0.4.4 bash bootstrap.sh
|
apachipa/Azure-JSON-Custom
|
dokku-vm/deploy_dokku.sh
|
Shell
|
mit
| 171 |
#! /bin/sh
#
# %CopyrightBegin%
#
# Copyright Ericsson AB 2007-2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# %CopyrightEnd%
#
# This little helper digs out the current version of microsoft CRT
# by compiling hello world and "parsing" the manifest file...
# To debug using a fake version:
# echo "8.0.50727.763"
# exit 0
if [ "$1" = "-n" ]; then
SWITCH=$1
shift
else
SWITCH=""
fi
cat > hello.c <<EOF
#include <windows.h>
#include <stdio.h>
int main(void)
{
printf("Hello world\n");
return 0;
}
EOF
cl.exe -MD hello.c > /dev/null 2>&1
if [ '!' -f hello.exe.manifest ]; then
# Gah - VC 2010 changes the way it handles DLL's and manifests... Again...
# need another way of getting the version
DLLNAME=`dumpbin.exe -imports hello.exe | egrep MSVCR.*dll`
DLLNAME=`echo $DLLNAME`
if [ -z "$DLLNAME" ]; then
DLLNAME=`dumpbin.exe -imports hello.exe | egrep VCRUNTIME.*dll`
DLLNAME=`echo $DLLNAME`
fi
if [ '!' -z "$1" ]; then
FILETOLOOKIN=$1
else
FILETOLOOKIN=$DLLNAME
fi
cat > helper.c <<EOF
#include <windows.h>
#include <stdio.h>
#define REQ_MODULE "$FILETOLOOKIN"
int main(void)
{
DWORD dummy;
DWORD versize;
int i,n;
unsigned char *versinfo;
char buff[100];
char *vs_verinfo;
unsigned int vs_ver_size;
WORD *translate;
unsigned int tr_size;
if (!(versize = GetFileVersionInfoSize(REQ_MODULE,&dummy))) {
fprintf(stderr,"No version info size in %s!\n",REQ_MODULE);
exit(1);
}
versinfo=malloc(versize);
if (!GetFileVersionInfo(REQ_MODULE,dummy,versize,versinfo)) {
fprintf(stderr,"No version info in %s!\n",REQ_MODULE);
exit(2);
}
if (!VerQueryValue(versinfo,"\\\\VarFileInfo\\\\Translation",&translate,&tr_size)) {
fprintf(stderr,"No translation info in %s!\n",REQ_MODULE);
exit(3);
}
n = tr_size/(2*sizeof(*translate));
for(i=0; i < n; ++i) {
sprintf(buff,"\\\\StringFileInfo\\\\%04x%04x\\\\FileVersion",
translate[i*2],translate[i*2+1]);
if (VerQueryValue(versinfo,buff,&vs_verinfo,&vs_ver_size) && vs_ver_size > 2) {
if(vs_verinfo[1] == 0) // Wide char (depends on compiler version!!)
printf("%S\n",(unsigned short *) vs_verinfo);
else
printf("%s\n",(char *) vs_verinfo);
return 0;
}
}
fprintf(stderr,"Failed to find file version of %s\n",REQ_MODULE);
return 0;
}
EOF
cl.exe -MD helper.c version.lib > /dev/null 2>&1
if [ '!' -f helper.exe ]; then
echo "Failed to build helper program." >&2
exit 1
fi
NAME=$DLLNAME
VERSION=`./helper.exe`
else
VERSION=`grep '<assemblyIdentity' hello.exe.manifest | sed 's,.*version=.\([0-9\.]*\).*,\1,g' | grep -v '<'`
NAME=`grep '<assemblyIdentity' hello.exe.manifest | sed 's,.*name=.[A-Za-z\.]*\([0-9]*\).*,msvcr\1.dll,g' | grep -v '<'`
fi
#rm -f hello.c hello.obj hello.exe hello.exe.manifest helper.c helper.obj helper.exe helper.exe.manifest
if [ "$SWITCH" = "-n" ]; then
ASKEDFOR=$NAME
else
ASKEDFOR=$VERSION
fi
if [ -z "$ASKEDFOR" ]; then
exit 1
fi
echo $ASKEDFOR
exit 0
|
emacsmirror/erlang
|
erts/etc/win32/nsis/dll_version_helper.sh
|
Shell
|
apache-2.0
| 3,606 |
#!/bin/bash
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to increment kernel subkey and datakey for firmware updates.
# Used when revving versions for a firmware update.
# Load common constants and variables.
. "${0%/*}"/common.sh
# Abort on errors.
set -e
if [ $# -ne 1 ]; then
cat <<EOF
Usage: $0 <keyset directory>
Increments the kernel subkey, data key and firmware version in the
specified keyset.
EOF
exit 1
fi
KEY_DIR=$1
main() {
load_current_versions "${KEY_DIR}"
new_kernkey_ver=$(increment_version "${KEY_DIR}" "kernel_key_version")
new_firm_ver=$(increment_version "${KEY_DIR}" "firmware_version")
cd "${KEY_DIR}"
backup_existing_kernel_subkeys ${CURR_FIRM_VER} ${CURR_KERNKEY_VER}
backup_existing_kernel_data_keys ${CURR_FIRM_VER} ${CURR_KERNKEY_VER}
cat <<EOF
Generating new kernel subkey, data keys and new kernel keyblock.
New Firmware version (due to kernel subkey change): ${new_firm_ver}.
New Kernel key version (due to kernel datakey change): ${new_kernkey_ver}.
EOF
make_pair kernel_subkey ${KERNEL_SUBKEY_ALGOID} ${new_firm_ver}
make_pair kernel_data_key ${KERNEL_DATAKEY_ALGOID} ${new_kernkey_ver}
make_keyblock kernel ${KERNEL_KEYBLOCK_MODE} kernel_data_key kernel_subkey
write_updated_version_file ${CURR_FIRMKEY_VER} ${new_firm_ver} \
${new_kernkey_ver} ${CURR_KERN_VER}
}
main "$@"
|
ccaapton/vboot_reference
|
scripts/keygeneration/increment_kernel_subkey_and_key.sh
|
Shell
|
bsd-3-clause
| 1,484 |
#!/bin/bash
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to increment firmware version key for firmware updates.
# Used when revving versions for a firmware update.
# Load common constants and variables.
. "$(dirname "$0")/common.sh"
# Abort on errors.
set -e
if [ $# -ne 1 ]; then
cat <<EOF
Usage: $0 <keyset directory>
Increments the firmware version in the specified keyset.
EOF
exit 1
fi
KEY_DIR=$1
main() {
load_current_versions "${KEY_DIR}"
new_firmkey_ver=$(increment_version "${KEY_DIR}" "firmware_key_version")
cd "${KEY_DIR}"
backup_existing_firmware_keys ${CURR_FIRM_VER} ${CURR_FIRMKEY_VER}
cat <<EOF
Generating new firmware version key.
New Firmware key version (due to firmware key change): ${new_firmkey_ver}.
EOF
make_pair firmware_data_key ${FIRMWARE_DATAKEY_ALGOID} ${new_firmkey_ver}
make_keyblock firmware ${FIRMWARE_KEYBLOCK_MODE} firmware_data_key root_key
write_updated_version_file ${new_firmkey_ver} ${CURR_FIRM_VER} \
${CURR_KERNKEY_VER} ${CURR_KERN_VER}
}
main "$@"
|
coreboot/vboot
|
scripts/keygeneration/increment_firmware_data_key.sh
|
Shell
|
bsd-3-clause
| 1,166 |
#!/bin/bash
#
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test rule_test usage.
#
set -euo pipefail
# --- begin runfiles.bash initialization ---
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$0.runfiles"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation "io_bazel/src/test/shell/integration_test_setup.sh")" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
function set_up() {
export MSYS_NO_PATHCONV=1
export MSYS2_ARG_CONV_EXCL="*"
}
function test_local_rule_test_in_root() {
create_new_workspace
cat > BUILD <<EOF
genrule(
name = "turtle",
outs = ["tmnt"],
cmd = "echo 'Leonardo' > \$@",
visibility = ["//visibility:public"],
)
load(
"@bazel_tools//tools/build_rules:test_rules.bzl",
"rule_test",
)
rule_test(
name="turtle_rule_test",
rule="//:turtle",
generates=[
"tmnt",
],
)
EOF
bazel build //:turtle_rule_test &> $TEST_log || fail "turtle_rule_test failed"
}
function test_local_rule_test_in_subpackage() {
create_new_workspace
mkdir p
cat > p/BUILD <<EOF
genrule(
name = "turtle",
outs = ["tmnt"],
cmd = "echo 'Leonardo' > \$@",
visibility = ["//visibility:public"],
)
load(
"@bazel_tools//tools/build_rules:test_rules.bzl",
"rule_test",
)
rule_test(
name="turtle_rule_test",
rule="//p:turtle",
generates=[
"tmnt",
],
)
EOF
bazel build //p:turtle_rule_test &> $TEST_log || fail "turtle_rule_test failed"
}
function test_repository_rule_test_in_root() {
create_new_workspace
mkdir -p r
cat >> WORKSPACE <<EOF
local_repository(name = "r", path = "r")
EOF
cat > r/WORKSPACE <<EOF
workspace(name = "r")
EOF
cat > r/BUILD <<EOF
genrule(
name = "turtle",
outs = ["tmnt"],
cmd = "echo 'Leonardo' > \$@",
visibility = ["//visibility:public"],
)
EOF
cat > BUILD <<EOF
load(
"@bazel_tools//tools/build_rules:test_rules.bzl",
"rule_test",
)
rule_test(
name="turtle_rule_test",
rule="@r//:turtle",
generates=[
"tmnt",
],
)
EOF
bazel build //:turtle_rule_test &> $TEST_log || fail "turtle_rule_test failed"
}
function test_repository_rule_test_in_subpackage() {
create_new_workspace
mkdir -p r
cat >> WORKSPACE <<EOF
local_repository(name = "r", path = "r")
EOF
cat > r/WORKSPACE <<EOF
workspace(name = "r")
EOF
mkdir r/p
cat > r/p/BUILD <<EOF
genrule(
name = "turtle",
outs = ["tmnt"],
cmd = "echo 'Leonardo' > \$@",
visibility = ["//visibility:public"],
)
EOF
cat > BUILD <<EOF
load(
"@bazel_tools//tools/build_rules:test_rules.bzl",
"rule_test",
)
rule_test(
name="turtle_rule_test",
rule="@r//p:turtle",
generates=[
"tmnt",
],
)
EOF
bazel build //:turtle_rule_test &> $TEST_log || fail "turtle_rule_test failed"
}
# Regression test for https://github.com/bazelbuild/bazel/issues/8723
#
# rule_test() is a macro that expands to a sh_test and _rule_test_rule.
# Expect that:
# * test- and build-rule attributes (e.g. "tags") are applied to both rules,
# * test-only attributes are applied only to the sh_rule,
# * the build rule has its own visibility
function test_kwargs_with_macro_rules() {
create_new_workspace
cat > BUILD <<'EOF'
load("@bazel_tools//tools/build_rules:test_rules.bzl", "rule_test")
genrule(
name = "x",
srcs = ["@does_not_exist//:bad"],
outs = ["x.out"],
cmd = "touch $@",
tags = ["dont_build_me"],
)
rule_test(
name = "x_test",
rule = "//:x",
generates = ["x.out"],
visibility = ["//foo:__pkg__"],
tags = ["dont_build_me"],
args = ["x"],
flaky = False,
local = True,
shard_count = 2,
size = "small",
timeout = "short",
)
EOF
bazel build //:all >& "$TEST_log" && fail "should have failed" || true
bazel build --build_tag_filters=-dont_build_me //:all >& "$TEST_log" || fail "build failed"
bazel query --output=label 'attr(tags, dont_build_me, //:all)' >& "$TEST_log" || fail "query failed"
expect_log '//:x_test_impl'
expect_log '//:x_test\b'
expect_log '//:x\b'
bazel query --output=label 'attr(visibility, private, //:all)' >& "$TEST_log" || fail "query failed"
expect_log '//:x_test_impl'
expect_log '//:x\b'
expect_not_log '//:x_test\b'
bazel query --output=label 'attr(visibility, foo, //:all)' >& "$TEST_log" || fail "query failed"
expect_log '//:x_test\b'
expect_not_log '//:x_test_impl'
expect_not_log '//:x\b'
}
run_suite "rule_test tests"
|
werkt/bazel
|
src/test/shell/bazel/rule_test_test.sh
|
Shell
|
apache-2.0
| 5,846 |
#!/bin/bash
NUMPARAM=$#
if [ $NUMPARAM -lt 2 ] ; then
echo " Usage "
echo " $0 OUTNameAffine.txt *Affine.txt "
echo " assumes close to idetity affine transforms "
exit
fi
OUTNM=$1
shift 1
FLIST=$*
NFILES=0
PARAM1=0
PARAM2=0
PARAM3=0
PARAM4=0
PARAM5=0
PARAM6=0
PARAM7=0
PARAM8=0
PARAM9=0
PARAM10=0
PARAM11=0
PARAM12=0
PARAM13=0
PARAM14=0
PARAM15=0
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM1=` awk -v a=$PARAM1 -v b=$x 'BEGIN{print (a + b)}' ` ; let NFILES=$NFILES+1 ; done
PARAM1=` awk -v a=$PARAM1 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM2=` awk -v a=$PARAM2 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM2=` awk -v a=$PARAM2 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM3=` awk -v a=$PARAM3 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM3=` awk -v a=$PARAM3 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 5 `
for x in $LL ; do PARAM4=` awk -v a=$PARAM4 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM4=` awk -v a=$PARAM4 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 6 `
for x in $LL ; do PARAM5=` awk -v a=$PARAM5 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM5=` awk -v a=$PARAM5 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 7 `
for x in $LL ; do PARAM6=` awk -v a=$PARAM6 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM6=` awk -v a=$PARAM6 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 8 `
for x in $LL ; do PARAM7=` awk -v a=$PARAM7 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM7=` awk -v a=$PARAM7 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 9 `
for x in $LL ; do PARAM8=` awk -v a=$PARAM8 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM8=` awk -v a=$PARAM8 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 10 `
for x in $LL ; do PARAM9=` awk -v a=$PARAM9 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM9=` awk -v a=$PARAM9 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 11 `
for x in $LL ; do PARAM10=` awk -v a=$PARAM10 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM10=` awk -v a=$PARAM10 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 12 `
for x in $LL ; do PARAM11=` awk -v a=$PARAM11 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM11=` awk -v a=$PARAM11 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 13 `
for x in $LL ; do PARAM12=` awk -v a=$PARAM12 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM12=` awk -v a=$PARAM12 -v b=$NFILES 'BEGIN{print (a / b)}' `
# translation params below
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM13=` awk -v a=$PARAM13 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM13=` awk -v a=$PARAM13 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM14=` awk -v a=$PARAM14 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM14=` awk -v a=$PARAM14 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM15=` awk -v a=$PARAM15 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM15=` awk -v a=$PARAM15 -v b=$NFILES 'BEGIN{print (a / b)}' `
echo "#Insight Transform File V1.0 " > $OUTNM
echo "# Transform 0 " >> $OUTNM
echo "Transform: MatrixOffsetTransformBase_double_3_3 " >> $OUTNM
echo "Parameters: $PARAM1 $PARAM2 $PARAM3 $PARAM4 $PARAM5 $PARAM6 $PARAM7 $PARAM8 $PARAM9 $PARAM10 $PARAM11 $PARAM12 " >> $OUTNM
echo "FixedParameters: $PARAM13 $PARAM14 $PARAM15 " >> $OUTNM
exit
|
fbudin69500/ANTs
|
Scripts/ANTSAverage3DAffine.sh
|
Shell
|
bsd-3-clause
| 3,899 |
#!/bin/sh
test_description="recursive merge corner cases w/ renames but not criss-crosses"
# t6036 has corner cases that involve both criss-cross merges and renames
. ./test-lib.sh
test_expect_success 'setup rename/delete + untracked file' '
echo "A pretty inscription" >ring &&
git add ring &&
test_tick &&
git commit -m beginning &&
git branch people &&
git checkout -b rename-the-ring &&
git mv ring one-ring-to-rule-them-all &&
test_tick &&
git commit -m fullname &&
git checkout people &&
git rm ring &&
echo gollum >owner &&
git add owner &&
test_tick &&
git commit -m track-people-instead-of-objects &&
echo "Myyy PRECIOUSSS" >ring
'
test_expect_success "Does git preserve Gollum's precious artifact?" '
test_must_fail git merge -s recursive rename-the-ring &&
# Make sure git did not delete an untracked file
test -f ring
'
# Testcase setup for rename/modify/add-source:
# Commit A: new file: a
# Commit B: modify a slightly
# Commit C: rename a->b, add completely different a
#
# We should be able to merge B & C cleanly
test_expect_success 'setup rename/modify/add-source conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n6\n7\n" >a &&
git add a &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
echo 8 >>a &&
git add a &&
git commit -m B &&
git checkout -b C A &&
git mv a b &&
echo something completely different >a &&
git add a &&
git commit -m C
'
test_expect_failure 'rename/modify/add-source conflict resolvable' '
git checkout B^0 &&
git merge -s recursive C^0 &&
test $(git rev-parse B:a) = $(git rev-parse b) &&
test $(git rev-parse C:a) = $(git rev-parse a)
'
test_expect_success 'setup resolvable conflict missed if rename missed' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n" >a &&
echo foo >b &&
git add a b &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
git mv a c &&
echo "Completely different content" >a &&
git add a &&
git commit -m B &&
git checkout -b C A &&
echo 6 >>a &&
git add a &&
git commit -m C
'
test_expect_failure 'conflict caused if rename not detected' '
git checkout -q C^0 &&
git merge -s recursive B^0 &&
test 3 -eq $(git ls-files -s | wc -l) &&
test 0 -eq $(git ls-files -u | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test 6 -eq $(wc -l < c) &&
test $(git rev-parse HEAD:a) = $(git rev-parse B:a) &&
test $(git rev-parse HEAD:b) = $(git rev-parse A:b)
'
test_expect_success 'setup conflict resolved wrong if rename missed' '
git reset --hard &&
git clean -f &&
git checkout -b D A &&
echo 7 >>a &&
git add a &&
git mv a c &&
echo "Completely different content" >a &&
git add a &&
git commit -m D &&
git checkout -b E A &&
git rm a &&
echo "Completely different content" >>a &&
git add a &&
git commit -m E
'
test_expect_failure 'missed conflict if rename not detected' '
git checkout -q E^0 &&
test_must_fail git merge -s recursive D^0
'
# Tests for undetected rename/add-source causing a file to erroneously be
# deleted (and for mishandled rename/rename(1to1) causing the same issue).
#
# This test uses a rename/rename(1to1)+add-source conflict (1to1 means the
# same file is renamed on both sides to the same thing; it should trigger
# the 1to2 logic, which it would do if the add-source didn't cause issues
# for git's rename detection):
# Commit A: new file: a
# Commit B: rename a->b
# Commit C: rename a->b, add unrelated a
test_expect_success 'setup undetected rename/add-source causes data loss' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n" >a &&
git add a &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
git mv a b &&
git commit -m B &&
git checkout -b C A &&
git mv a b &&
echo foobar >a &&
git add a &&
git commit -m C
'
test_expect_failure 'detect rename/add-source and preserve all data' '
git checkout B^0 &&
git merge -s recursive C^0 &&
test 2 -eq $(git ls-files -s | wc -l) &&
test 2 -eq $(git ls-files -u | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test -f a &&
test -f b &&
test $(git rev-parse HEAD:b) = $(git rev-parse A:a) &&
test $(git rev-parse HEAD:a) = $(git rev-parse C:a)
'
test_expect_failure 'detect rename/add-source and preserve all data, merge other way' '
git checkout C^0 &&
git merge -s recursive B^0 &&
test 2 -eq $(git ls-files -s | wc -l) &&
test 2 -eq $(git ls-files -u | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test -f a &&
test -f b &&
test $(git rev-parse HEAD:b) = $(git rev-parse A:a) &&
test $(git rev-parse HEAD:a) = $(git rev-parse C:a)
'
test_expect_success 'setup content merge + rename/directory conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n6\n" >file &&
git add file &&
test_tick &&
git commit -m base &&
git tag base &&
git checkout -b right &&
echo 7 >>file &&
mkdir newfile &&
echo junk >newfile/realfile &&
git add file newfile/realfile &&
test_tick &&
git commit -m right &&
git checkout -b left-conflict base &&
echo 8 >>file &&
git add file &&
git mv file newfile &&
test_tick &&
git commit -m left &&
git checkout -b left-clean base &&
echo 0 >newfile &&
cat file >>newfile &&
git add newfile &&
git rm file &&
test_tick &&
git commit -m left
'
test_expect_success 'rename/directory conflict + clean content merge' '
git reset --hard &&
git reset --hard &&
git clean -fdqx &&
git checkout left-clean^0 &&
test_must_fail git merge -s recursive right^0 &&
test 2 -eq $(git ls-files -s | wc -l) &&
test 1 -eq $(git ls-files -u | wc -l) &&
test 1 -eq $(git ls-files -o | wc -l) &&
echo 0 >expect &&
git cat-file -p base:file >>expect &&
echo 7 >>expect &&
test_cmp expect newfile~HEAD &&
test $(git rev-parse :2:newfile) = $(git hash-object expect) &&
test -f newfile/realfile &&
test -f newfile~HEAD
'
test_expect_success 'rename/directory conflict + content merge conflict' '
git reset --hard &&
git reset --hard &&
git clean -fdqx &&
git checkout left-conflict^0 &&
test_must_fail git merge -s recursive right^0 &&
test 4 -eq $(git ls-files -s | wc -l) &&
test 3 -eq $(git ls-files -u | wc -l) &&
test 1 -eq $(git ls-files -o | wc -l) &&
git cat-file -p left-conflict:newfile >left &&
git cat-file -p base:file >base &&
git cat-file -p right:file >right &&
test_must_fail git merge-file \
-L "HEAD:newfile" \
-L "" \
-L "right^0:file" \
left base right &&
test_cmp left newfile~HEAD &&
test $(git rev-parse :1:newfile) = $(git rev-parse base:file) &&
test $(git rev-parse :2:newfile) = $(git rev-parse left-conflict:newfile) &&
test $(git rev-parse :3:newfile) = $(git rev-parse right:file) &&
test -f newfile/realfile &&
test -f newfile~HEAD
'
test_expect_success 'setup content merge + rename/directory conflict w/ disappearing dir' '
git reset --hard &&
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
mkdir sub &&
printf "1\n2\n3\n4\n5\n6\n" >sub/file &&
git add sub/file &&
test_tick &&
git commit -m base &&
git tag base &&
git checkout -b right &&
echo 7 >>sub/file &&
git add sub/file &&
test_tick &&
git commit -m right &&
git checkout -b left base &&
echo 0 >newfile &&
cat sub/file >>newfile &&
git rm sub/file &&
mv newfile sub &&
git add sub &&
test_tick &&
git commit -m left
'
test_expect_success 'disappearing dir in rename/directory conflict handled' '
git reset --hard &&
git clean -fdqx &&
git checkout left^0 &&
git merge -s recursive right^0 &&
test 1 -eq $(git ls-files -s | wc -l) &&
test 0 -eq $(git ls-files -u | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
echo 0 >expect &&
git cat-file -p base:sub/file >>expect &&
echo 7 >>expect &&
test_cmp expect sub &&
test -f sub
'
# Test for all kinds of things that can go wrong with rename/rename (2to1):
# Commit A: new files: a & b
# Commit B: rename a->c, modify b
# Commit C: rename b->c, modify a
#
# Merging of B & C should NOT be clean. Questions:
# * Both a & b should be removed by the merge; are they?
# * The two c's should contain modifications to a & b; do they?
# * The index should contain two files, both for c; does it?
# * The working copy should have two files, both of form c~<unique>; does it?
# * Nothing else should be present. Is anything?
test_expect_success 'setup rename/rename (2to1) + modify/modify' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n" >a &&
printf "5\n4\n3\n2\n1\n" >b &&
git add a b &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
git mv a c &&
echo 0 >>b &&
git add b &&
git commit -m B &&
git checkout -b C A &&
git mv b c &&
echo 6 >>a &&
git add a &&
git commit -m C
'
test_expect_success 'handle rename/rename (2to1) conflict correctly' '
git checkout B^0 &&
test_must_fail git merge -s recursive C^0 >out &&
grep "CONFLICT (rename/rename)" out &&
test 2 -eq $(git ls-files -s | wc -l) &&
test 2 -eq $(git ls-files -u | wc -l) &&
test 2 -eq $(git ls-files -u c | wc -l) &&
test 3 -eq $(git ls-files -o | wc -l) &&
test ! -f a &&
test ! -f b &&
test -f c~HEAD &&
test -f c~C^0 &&
test $(git hash-object c~HEAD) = $(git rev-parse C:a) &&
test $(git hash-object c~C^0) = $(git rev-parse B:b)
'
# Testcase setup for simple rename/rename (1to2) conflict:
# Commit A: new file: a
# Commit B: rename a->b
# Commit C: rename a->c
test_expect_success 'setup simple rename/rename (1to2) conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
echo stuff >a &&
git add a &&
test_tick &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
git mv a b &&
test_tick &&
git commit -m B &&
git checkout -b C A &&
git mv a c &&
test_tick &&
git commit -m C
'
test_expect_success 'merge has correct working tree contents' '
git checkout C^0 &&
test_must_fail git merge -s recursive B^0 &&
test 3 -eq $(git ls-files -s | wc -l) &&
test 3 -eq $(git ls-files -u | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test $(git rev-parse :1:a) = $(git rev-parse A:a) &&
test $(git rev-parse :3:b) = $(git rev-parse A:a) &&
test $(git rev-parse :2:c) = $(git rev-parse A:a) &&
test ! -f a &&
test $(git hash-object b) = $(git rev-parse A:a) &&
test $(git hash-object c) = $(git rev-parse A:a)
'
# Testcase setup for rename/rename(1to2)/add-source conflict:
# Commit A: new file: a
# Commit B: rename a->b
# Commit C: rename a->c, add completely different a
#
# Merging of B & C should NOT be clean; there's a rename/rename conflict
test_expect_success 'setup rename/rename(1to2)/add-source conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
printf "1\n2\n3\n4\n5\n6\n7\n" >a &&
git add a &&
git commit -m A &&
git tag A &&
git checkout -b B A &&
git mv a b &&
git commit -m B &&
git checkout -b C A &&
git mv a c &&
echo something completely different >a &&
git add a &&
git commit -m C
'
test_expect_failure 'detect conflict with rename/rename(1to2)/add-source merge' '
git checkout B^0 &&
test_must_fail git merge -s recursive C^0 &&
test 4 -eq $(git ls-files -s | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test $(git rev-parse 3:a) = $(git rev-parse C:a) &&
test $(git rev-parse 1:a) = $(git rev-parse A:a) &&
test $(git rev-parse 2:b) = $(git rev-parse B:b) &&
test $(git rev-parse 3:c) = $(git rev-parse C:c) &&
test -f a &&
test -f b &&
test -f c
'
test_expect_success 'setup rename/rename(1to2)/add-source resolvable conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
>a &&
git add a &&
test_tick &&
git commit -m base &&
git tag A &&
git checkout -b B A &&
git mv a b &&
test_tick &&
git commit -m one &&
git checkout -b C A &&
git mv a b &&
echo important-info >a &&
git add a &&
test_tick &&
git commit -m two
'
test_expect_failure 'rename/rename/add-source still tracks new a file' '
git checkout C^0 &&
git merge -s recursive B^0 &&
test 2 -eq $(git ls-files -s | wc -l) &&
test 0 -eq $(git ls-files -o | wc -l) &&
test $(git rev-parse HEAD:a) = $(git rev-parse C:a) &&
test $(git rev-parse HEAD:b) = $(git rev-parse A:a)
'
test_expect_success 'setup rename/rename(1to2)/add-dest conflict' '
git rm -rf . &&
git clean -fdqx &&
rm -rf .git &&
git init &&
echo stuff >a &&
git add a &&
test_tick &&
git commit -m base &&
git tag A &&
git checkout -b B A &&
git mv a b &&
echo precious-data >c &&
git add c &&
test_tick &&
git commit -m one &&
git checkout -b C A &&
git mv a c &&
echo important-info >b &&
git add b &&
test_tick &&
git commit -m two
'
test_expect_success 'rename/rename/add-dest merge still knows about conflicting file versions' '
git checkout C^0 &&
test_must_fail git merge -s recursive B^0 &&
test 5 -eq $(git ls-files -s | wc -l) &&
test 2 -eq $(git ls-files -u b | wc -l) &&
test 2 -eq $(git ls-files -u c | wc -l) &&
test 4 -eq $(git ls-files -o | wc -l) &&
test $(git rev-parse :1:a) = $(git rev-parse A:a) &&
test $(git rev-parse :2:b) = $(git rev-parse C:b) &&
test $(git rev-parse :3:b) = $(git rev-parse B:b) &&
test $(git rev-parse :2:c) = $(git rev-parse C:c) &&
test $(git rev-parse :3:c) = $(git rev-parse B:c) &&
test $(git hash-object c~HEAD) = $(git rev-parse C:c) &&
test $(git hash-object c~B\^0) = $(git rev-parse B:c) &&
test $(git hash-object b~HEAD) = $(git rev-parse C:b) &&
test $(git hash-object b~B\^0) = $(git rev-parse B:b) &&
test ! -f b &&
test ! -f c
'
test_done
|
wsp/git
|
t/t6042-merge-rename-corner-cases.sh
|
Shell
|
gpl-2.0
| 13,651 |
#!/bin/sh
#GPL
#TODO
#add pixelformat/sampleformat into the path of the codecs
FFP=../ffprobe
TMP=$(mktemp) || exit 1
TARGET=$1
shift
for v do
BASE=$(basename $v)
echo $v | egrep -i '(public|private)' >/dev/null && echo Warning $v may be private
$FFP $v 2> $TMP
FORM=$((grep 'Input #0, ' -m1 $TMP || echo 'Input #0, unknown') | sed 's/Input #0, \([a-zA-Z0-9_]*\).*/\1/' )
mkdir -p $TARGET/container/$FORM
ln -s $v $TARGET/container/$FORM/$BASE
eval $(grep 'Stream #0\.[^:]*: [a-zA-Z0-9][^:]*: [a-zA-Z0-9]' $TMP | sed 's#[^:]*: \([a-zA-Z0-9]*\)[^:]*: \([a-zA-Z0-9]*\).*#mkdir -p '$TARGET'/\1/\2 ; ln -s '$v' '$TARGET'/\1/\2/'$BASE' ; #')
done
rm $TMP
|
hp-sam/voip-client-ios
|
submodules/externals/ffmpeg/tools/jauche_sortierer.sh
|
Shell
|
gpl-2.0
| 681 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command checks that the built commands can function together for
# simple scenarios. It does not require Docker.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/make-rules/test-cmd-util.sh"
function run_federation_apiserver() {
kube::log::status "Building federation-apiserver"
make -C "${KUBE_ROOT}" WHAT="federation/cmd/federation-apiserver"
# Start federation-apiserver
kube::log::status "Starting federation-apiserver"
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL="NamespaceLifecycle"
"${KUBE_OUTPUT_HOSTBIN}/federation-apiserver" \
--insecure-port="${API_PORT}" \
--admission-control="${ADMISSION_CONTROL}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--storage-media-type="${KUBE_TEST_API_STORAGE_TYPE-}" \
--cert-dir="${TMPDIR:-/tmp/}" 1>&2 &
APISERVER_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver"
}
function run_federation_controller_manager() {
kube::log::status "Building federation-controller-manager"
make -C "${KUBE_ROOT}" WHAT="federation/cmd/federation-controller-manager"
# Create a kubeconfig for federation apiserver.
local kubeconfig="${KUBE_TEMP}/kubeconfig"
touch "${kubeconfig}"
kubectl config set-cluster "apiserver" --server="http://127.0.0.1:${API_PORT}" --insecure-skip-tls-verify=true --kubeconfig="${kubeconfig}"
kubectl config set-context "context" --cluster="apiserver" --kubeconfig="${kubeconfig}"
kubectl config use-context "context" --kubeconfig="${kubeconfig}"
# Start controller manager
kube::log::status "Starting federation-controller-manager"
"${KUBE_OUTPUT_HOSTBIN}/federation-controller-manager" \
--port="${CTLRMGR_PORT}" \
--kubeconfig="${kubeconfig}" \
--kube-api-content-type="${KUBE_TEST_API_TYPE-}" \
--master="127.0.0.1:${API_PORT}" 1>&2 &
CTLRMGR_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager"
}
kube::log::status "Running kubectl tests for federation-apiserver"
setup
run_federation_apiserver
run_federation_controller_manager
# TODO: Fix for replicasets and deployments.
SUPPORTED_RESOURCES=("configmaps" "daemonsets" "events" "ingress" "namespaces" "secrets" "services")
output_message=$(runTests "SUPPORTED_RESOURCES=${SUPPORTED_RESOURCES[@]}")
# Ensure that tests were run. We cannot check all resources here. We check a few
# to catch bugs due to which no tests run.
kube::test::if_has_string "${output_message}" "Testing kubectl(v1:namespaces)"
kube::test::if_has_string "${output_message}" "Testing kubectl(v1:services)"
kube::log::status "TESTS PASSED"
|
jawnsy/cri-o
|
vendor/k8s.io/kubernetes/hack/make-rules/test-federation-cmd.sh
|
Shell
|
apache-2.0
| 3,323 |
#!/bin/bash
#
# Script to delete all data from the mongo database.
#
# This stops all services to release any caches then deletes the database.
#
source "$(dirname $0)/shell_helpers.sh"
stop_services $GRR_SERVICES
echo "Dropping database"
echo "db.dropDatabase()" | mongo grr
start_services $GRR_SERVICES
|
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert
|
scripts/database_reset.sh
|
Shell
|
apache-2.0
| 308 |
#!/bin/bash
# home location lat, lon, alt, heading
LOCATION="CMAC"
TRACKER_LOCATION="CMAC_PILOTSBOX"
VEHICLE=""
BUILD_TARGET="sitl"
FRAME=""
NUM_PROCS=1
SPEEDUP="1"
# check the instance number to allow for multiple copies of the sim running at once
INSTANCE=0
USE_VALGRIND=0
USE_GDB=0
USE_GDB_STOPPED=0
DEBUG_BUILD=0
USE_MAVLINK_GIMBAL=0
CLEAN_BUILD=0
START_ANTENNA_TRACKER=0
WIPE_EEPROM=0
REVERSE_THROTTLE=0
NO_REBUILD=0
START_HIL=0
TRACKER_ARGS=""
EXTERNAL_SIM=0
MODEL=""
BREAKPOINT=""
OVERRIDE_BUILD_TARGET=""
usage()
{
cat <<EOF
Usage: sim_vehicle.sh [options] [mavproxy_options]
Options:
-v VEHICLE vehicle type (ArduPlane, ArduCopter or APMrover2)
vehicle type defaults to working directory
-I INSTANCE instance of simulator (default 0)
-V enable valgrind for memory access checking (very slow!)
-G use gdb for debugging ardupilot
-g use gdb for debugging ardupilot, but don't auto-start
-D build with debugging
-B add a breakpoint at given location in debugger
-T start an antenna tracker instance
-A pass arguments to antenna tracker
-t set antenna tracker start location
-L select start location from Tools/autotest/locations.txt
-l set the custom start location from -L
-c do a make clean before building
-N don't rebuild before starting ardupilot
-w wipe EEPROM and reload parameters
-R reverse throttle in plane
-M enable MAVLink gimbal
-f FRAME set aircraft frame type
for copters can choose +, X, quad or octa
for planes can choose elevon or vtail
-b BUILD_TARGET override SITL build target
-j NUM_PROC number of processors to use during build (default 1)
-H start HIL
-e use external simulator
-S SPEEDUP set simulation speedup (1 for wall clock time)
mavproxy_options:
--map start with a map
--console start with a status console
--out DEST start MAVLink output to DEST
Note:
eeprom.bin in the starting directory contains the parameters for your
simulated vehicle. Always start from the same directory. It is recommended that
you start in the main vehicle directory for the vehicle you are simulating,
for example, start in the ArduPlane directory to simulate ArduPlane
EOF
}
# parse options. Thanks to http://wiki.bash-hackers.org/howto/getopts_tutorial
while getopts ":I:VgGcj:TA:t:L:l:v:hwf:RNHeMS:DB:b:" opt; do
case $opt in
v)
VEHICLE=$OPTARG
;;
I)
INSTANCE=$OPTARG
;;
V)
USE_VALGRIND=1
;;
N)
NO_REBUILD=1
;;
H)
START_HIL=1
NO_REBUILD=1
;;
T)
START_ANTENNA_TRACKER=1
;;
A)
TRACKER_ARGS="$OPTARG"
;;
R)
REVERSE_THROTTLE=1
;;
G)
USE_GDB=1
;;
D)
DEBUG_BUILD=1
;;
B)
BREAKPOINT="$OPTARG"
;;
M)
USE_MAVLINK_GIMBAL=1
;;
g)
USE_GDB=1
USE_GDB_STOPPED=1
;;
L)
LOCATION="$OPTARG"
;;
l)
CUSTOM_LOCATION="$OPTARG"
;;
f)
FRAME="$OPTARG"
;;
S)
SPEEDUP="$OPTARG"
;;
t)
TRACKER_LOCATION="$OPTARG"
;;
c)
CLEAN_BUILD=1
;;
j)
NUM_PROCS=$OPTARG
;;
w)
WIPE_EEPROM=1
;;
e)
EXTERNAL_SIM=1
;;
b)
OVERRIDE_BUILD_TARGET="$OPTARG"
;;
h)
usage
exit 0
;;
\?)
# allow other args to pass on to mavproxy
break
;;
:)
echo "Option -$OPTARG requires an argument." >&2
usage
exit 1
esac
done
shift $((OPTIND-1))
# kill existing copy if this is the '0' instance only
kill_tasks()
{
[ "$INSTANCE" -eq "0" ] && {
for pname in JSBSim lt-JSBSim ArduPlane.elf ArduCopter.elf APMrover2.elf AntennaTracker.elf JSBSIm.exe MAVProxy.exe; do
pkill "$pname"
done
pkill -f runsim.py
}
}
if [ $START_HIL == 0 ]; then
kill_tasks
fi
trap kill_tasks SIGINT
# setup ports for this instance
MAVLINK_PORT="tcp:127.0.0.1:"$((5760+10*$INSTANCE))
SIMIN_PORT="127.0.0.1:"$((5502+10*$INSTANCE))
SIMOUT_PORT="127.0.0.1:"$((5501+10*$INSTANCE))
FG_PORT="127.0.0.1:"$((5503+10*$INSTANCE))
[ -z "$VEHICLE" ] && {
CDIR="$PWD"
rpath=$(which realpath)
[ -n "$rpath" ] && {
CDIR=$(realpath $CDIR)
}
VEHICLE=$(basename $CDIR)
}
[ -z "$FRAME" -a "$VEHICLE" = "APMrover2" ] && {
FRAME="rover"
}
[ -z "$FRAME" -a "$VEHICLE" = "ArduPlane" ] && {
FRAME="jsbsim"
}
[ -z "$FRAME" -a "$VEHICLE" = "ArduCopter" ] && {
FRAME="quad"
}
[ -z "$FRAME" -a "$VEHICLE" = "AntennaTracker" ] && {
FRAME="tracker"
}
EXTRA_PARM=""
check_jsbsim_version()
{
jsbsim_version=$(JSBSim --version)
if [[ $jsbsim_version != *"ArduPilot"* ]]
then
cat <<EOF
=========================================================
You need the latest ArduPilot version of JSBSim installed
and in your \$PATH
Please get it from git://github.com/tridge/jsbsim.git
See
http://dev.ardupilot.com/wiki/simulation-2/sitl-simulator-software-in-the-loop/setting-up-sitl-on-linux/
for more details
=========================================================
EOF
exit 1
fi
}
# modify build target based on copter frame type
case $FRAME in
+|quad)
BUILD_TARGET="sitl"
MODEL="+"
;;
X)
BUILD_TARGET="sitl"
EXTRA_PARM="param set FRAME 1;"
MODEL="X"
;;
octa*)
BUILD_TARGET="sitl-octa"
MODEL="$FRAME"
;;
heli)
BUILD_TARGET="sitl-heli"
MODEL="heli"
;;
heli-dual)
BUILD_TARGET="sitl-heli-dual"
EXTRA_SIM="$EXTRA_SIM --frame=heli-dual"
MODEL="heli-dual"
;;
heli-compound)
BUILD_TARGET="sitl-heli-compound"
EXTRA_SIM="$EXTRA_SIM --frame=heli-compound"
MODEL="heli-compound"
;;
IrisRos)
BUILD_TARGET="sitl"
;;
Gazebo)
BUILD_TARGET="sitl"
EXTRA_SIM="$EXTRA_SIM --frame=Gazebo"
MODEL="$FRAME"
;;
CRRCSim-heli)
BUILD_TARGET="sitl-heli"
MODEL="$FRAME"
;;
CRRCSim|last_letter*)
BUILD_TARGET="sitl"
MODEL="$FRAME"
;;
jsbsim*)
BUILD_TARGET="sitl"
MODEL="$FRAME"
check_jsbsim_version
;;
*)
MODEL="$FRAME"
;;
esac
if [ $DEBUG_BUILD == 1 ]; then
BUILD_TARGET="$BUILD_TARGET-debug"
fi
if [ -n "$OVERRIDE_BUILD_TARGET" ]; then
BUILD_TARGET="$OVERRIDE_BUILD_TARGET"
fi
autotest="../Tools/autotest"
[ -d "$autotest" ] || {
# we are not running from one of the standard vehicle directories. Use
# the location of the sim_vehicle.sh script to find the path
autotest=$(dirname $(readlink -e $0))
}
VEHICLEDIR="$autotest/../../$VEHICLE"
[ -d "$VEHICLEDIR" ] || {
VEHICLEDIR=$(dirname $(readlink -e $VEHICLEDIR))
}
pushd $VEHICLEDIR || {
echo "Failed to change to vehicle directory for $VEHICLEDIR"
usage
exit 1
}
AUTOTEST=$autotest
export AUTOTEST
VEHICLEDIR=$(pwd)
if [ $NO_REBUILD == 0 ]; then
if [ $CLEAN_BUILD == 1 ]; then
echo "Building clean"
make clean
fi
echo "Building $BUILD_TARGET"
make $BUILD_TARGET -j$NUM_PROCS || {
make clean
make $BUILD_TARGET -j$NUM_PROCS || {
echo >&2 "$0: Build failed"
exit 1
}
}
fi
popd
# get the location information
if [ -z $CUSTOM_LOCATION ]; then
SIMHOME=$(cat $autotest/locations.txt | grep -i "^$LOCATION=" | cut -d= -f2)
else
SIMHOME=$CUSTOM_LOCATION
LOCATION="Custom_Location"
fi
[ -z "$SIMHOME" ] && {
echo "Unknown location $LOCATION"
usage
exit 1
}
echo "Starting up at $LOCATION : $SIMHOME"
TRACKER_HOME=$(cat $autotest/locations.txt | grep -i "^$TRACKER_LOCATION=" | cut -d= -f2)
[ -z "$TRACKER_HOME" ] && {
echo "Unknown tracker location $TRACKER_LOCATION"
usage
exit 1
}
if [ $START_ANTENNA_TRACKER == 1 ]; then
pushd $autotest/../../AntennaTracker
if [ $CLEAN_BUILD == 1 ]; then
make clean
fi
make sitl-debug -j$NUM_PROCS || {
make clean
make sitl-debug -j$NUM_PROCS
}
TRACKER_INSTANCE=1
TRACKER_UARTA="tcp:127.0.0.1:"$((5760+10*$TRACKER_INSTANCE))
cmd="nice /tmp/AntennaTracker.build/AntennaTracker.elf -I1 --model=tracker --home=$TRACKER_HOME"
$autotest/run_in_terminal_window.sh "AntennaTracker" $cmd || exit 1
popd
fi
cmd="$VEHICLEDIR/$VEHICLE.elf -S -I$INSTANCE --home $SIMHOME"
if [ $WIPE_EEPROM == 1 ]; then
cmd="$cmd -w"
fi
cmd="$cmd --model $MODEL --speedup=$SPEEDUP"
case $VEHICLE in
ArduPlane)
PARMS="ArduPlane.parm"
;;
ArduCopter)
PARMS="copter_params.parm"
;;
APMrover2)
PARMS="Rover.parm"
;;
*)
PARMS=""
;;
esac
if [ $USE_MAVLINK_GIMBAL == 1 ]; then
echo "Using MAVLink gimbal"
cmd="$cmd --gimbal"
fi
if [ $START_HIL == 0 ]; then
if [ $USE_VALGRIND == 1 ]; then
echo "Using valgrind"
$autotest/run_in_terminal_window.sh "ardupilot (valgrind)" valgrind $cmd || exit 1
elif [ $USE_GDB == 1 ]; then
echo "Using gdb"
tfile=$(mktemp)
[ $USE_GDB_STOPPED == 0 ] && {
if [ -n "$BREAKPOINT" ]; then
echo "b $BREAKPOINT" >> $tfile
fi
echo r >> $tfile
}
$autotest/run_in_terminal_window.sh "ardupilot (gdb)" gdb -x $tfile --args $cmd || exit 1
else
$autotest/run_in_terminal_window.sh "ardupilot" $cmd || exit 1
fi
fi
trap kill_tasks SIGINT
# mavproxy.py --master tcp:127.0.0.1:5760 --sitl 127.0.0.1:5501 --out 127.0.0.1:14550 --out 127.0.0.1:14551
options=""
if [ $START_HIL == 0 ]; then
options="--master $MAVLINK_PORT --sitl $SIMOUT_PORT"
fi
# If running inside of a vagrant guest, then we probably want to forward our mavlink out to the containing host OS
if [ $USER == "vagrant" ]; then
options="$options --out 10.0.2.2:14550"
fi
options="$options --out 127.0.0.1:14550 --out 127.0.0.1:14551"
extra_cmd1=""
if [ $WIPE_EEPROM == 1 ]; then
extra_cmd="param forceload $autotest/$PARMS; $EXTRA_PARM; param fetch"
fi
if [ $START_ANTENNA_TRACKER == 1 ]; then
options="$options --load-module=tracker"
extra_cmd="$extra_cmd module load map; tracker set port $TRACKER_UARTA; tracker start;"
fi
if [ $START_HIL == 1 ]; then
options="$options --load-module=HIL"
fi
if [ $USE_MAVLINK_GIMBAL == 1 ]; then
options="$options --load-module=gimbal"
fi
if [ -f /usr/bin/cygstart ]; then
cygstart -w "/cygdrive/c/Program Files (x86)/MAVProxy/mavproxy.exe" $options --cmd="$extra_cmd" $*
else
mavproxy.py $options --cmd="$extra_cmd" $*
fi
if [ $START_HIL == 0 ]; then
kill_tasks
fi
|
Toreny/UAV
|
Tools/autotest/sim_vehicle.sh
|
Shell
|
gpl-3.0
| 10,807 |
#!/bin/bash
make -j
mkdir -p $PREFIX/bin
cp $SRC_DIR/mapDIA $PREFIX/bin/mapDIA
chmod +x $PREFIX/bin/mapDIA
|
joachimwolff/bioconda-recipes
|
recipes/mapdia/build.sh
|
Shell
|
mit
| 106 |
#!/bin/sh
for dir in /var/run/hostapd-*; do
[ -d "$dir" ] || continue
hostapd_cli -p "$dir" wps_pbc
done
|
Victek/wrt1900ac-aa
|
package/hostapd/files/wps-hotplug.sh
|
Shell
|
gpl-2.0
| 108 |
#!/bin/bash
echo -e "Removing previous SAML metadata directory"
rm -Rf "${PWD}/ci/tests/puppeteer/scenarios/${SCENARIO}/saml-md"
|
apereo/cas
|
ci/tests/puppeteer/scenarios/saml2-idp-with-cas-request/init.sh
|
Shell
|
apache-2.0
| 129 |
#!/bin/sh
OPTIONS_KEEPDASHDASH=t
OPTIONS_SPEC="\
git-checkout [options] [<branch>] [<paths>...]
--
b= create a new branch started at <branch>
l create the new branch's reflog
track arrange that the new branch tracks the remote branch
f proceed even if the index or working tree is not HEAD
m merge local modifications into the new branch
q,quiet be quiet
"
SUBDIRECTORY_OK=Sometimes
. git-sh-setup
require_work_tree
old_name=HEAD
old=$(git rev-parse --verify $old_name 2>/dev/null)
oldbranch=$(git symbolic-ref $old_name 2>/dev/null)
new=
new_name=
force=
branch=
track=
newbranch=
newbranch_log=
merge=
quiet=
v=-v
LF='
'
while test $# != 0; do
case "$1" in
-b)
shift
newbranch="$1"
[ -z "$newbranch" ] &&
die "git checkout: -b needs a branch name"
git show-ref --verify --quiet -- "refs/heads/$newbranch" &&
die "git checkout: branch $newbranch already exists"
git check-ref-format "heads/$newbranch" ||
die "git checkout: we do not like '$newbranch' as a branch name."
;;
-l)
newbranch_log=-l
;;
--track|--no-track)
track="$1"
;;
-f)
force=1
;;
-m)
merge=1
;;
-q|--quiet)
quiet=1
v=
;;
--)
shift
break
;;
*)
usage
;;
esac
shift
done
arg="$1"
rev=$(git rev-parse --verify "$arg" 2>/dev/null)
if rev=$(git rev-parse --verify "$rev^0" 2>/dev/null)
then
[ -z "$rev" ] && die "unknown flag $arg"
new_name="$arg"
if git show-ref --verify --quiet -- "refs/heads/$arg"
then
rev=$(git rev-parse --verify "refs/heads/$arg^0")
branch="$arg"
fi
new="$rev"
shift
elif rev=$(git rev-parse --verify "$rev^{tree}" 2>/dev/null)
then
# checking out selected paths from a tree-ish.
new="$rev"
new_name="$rev^{tree}"
shift
fi
[ "$1" = "--" ] && shift
case "$newbranch,$track" in
,--*)
die "git checkout: --track and --no-track require -b"
esac
case "$force$merge" in
11)
die "git checkout: -f and -m are incompatible"
esac
# The behaviour of the command with and without explicit path
# parameters is quite different.
#
# Without paths, we are checking out everything in the work tree,
# possibly switching branches. This is the traditional behaviour.
#
# With paths, we are _never_ switching branch, but checking out
# the named paths from either index (when no rev is given),
# or the named tree-ish (when rev is given).
if test "$#" -ge 1
then
hint=
if test "$#" -eq 1
then
hint="
Did you intend to checkout '$@' which can not be resolved as commit?"
fi
if test '' != "$newbranch$force$merge"
then
die "git checkout: updating paths is incompatible with switching branches/forcing$hint"
fi
if test '' != "$new"
then
# from a specific tree-ish; note that this is for
# rescuing paths and is never meant to remove what
# is not in the named tree-ish.
git ls-tree --full-name -r "$new" "$@" |
git update-index --index-info || exit $?
fi
# Make sure the request is about existing paths.
git ls-files --full-name --error-unmatch -- "$@" >/dev/null || exit
git ls-files --full-name -- "$@" |
(cd_to_toplevel && git checkout-index -f -u --stdin)
# Run a post-checkout hook -- the HEAD does not change so the
# current HEAD is passed in for both args
if test -x "$GIT_DIR"/hooks/post-checkout; then
"$GIT_DIR"/hooks/post-checkout $old $old 0
fi
exit $?
else
# Make sure we did not fall back on $arg^{tree} codepath
# since we are not checking out from an arbitrary tree-ish,
# but switching branches.
if test '' != "$new"
then
git rev-parse --verify "$new^{commit}" >/dev/null 2>&1 ||
die "Cannot switch branch to a non-commit."
fi
fi
# We are switching branches and checking out trees, so
# we *NEED* to be at the toplevel.
cd_to_toplevel
[ -z "$new" ] && new=$old && new_name="$old_name"
# If we don't have an existing branch that we're switching to,
# and we don't have a new branch name for the target we
# are switching to, then we are detaching our HEAD from any
# branch. However, if "git checkout HEAD" detaches the HEAD
# from the current branch, even though that may be logically
# correct, it feels somewhat funny. More importantly, we do not
# want "git checkout" nor "git checkout -f" to detach HEAD.
detached=
detach_warn=
describe_detached_head () {
test -n "$quiet" || {
printf >&2 "$1 "
GIT_PAGER= git log >&2 -1 --pretty=oneline --abbrev-commit "$2" --
}
}
if test -z "$branch$newbranch" && test "$new_name" != "$old_name"
then
detached="$new"
if test -n "$oldbranch" && test -z "$quiet"
then
detach_warn="Note: moving to \"$new_name\" which isn't a local branch
If you want to create a new branch from this checkout, you may do so
(now or later) by using -b with the checkout command again. Example:
git checkout -b <new_branch_name>"
fi
elif test -z "$oldbranch" && test "$new" != "$old"
then
describe_detached_head 'Previous HEAD position was' "$old"
fi
if [ "X$old" = X ]
then
if test -z "$quiet"
then
echo >&2 "warning: You appear to be on a branch yet to be born."
echo >&2 "warning: Forcing checkout of $new_name."
fi
force=1
fi
if [ "$force" ]
then
git read-tree $v --reset -u $new
else
git update-index --refresh >/dev/null
git read-tree $v -m -u --exclude-per-directory=.gitignore $old $new || (
case "$merge,$v" in
,*)
exit 1 ;;
1,)
;; # quiet
*)
echo >&2 "Falling back to 3-way merge..." ;;
esac
# Match the index to the working tree, and do a three-way.
git diff-files --name-only | git update-index --remove --stdin &&
work=`git write-tree` &&
git read-tree $v --reset -u $new || exit
eval GITHEAD_$new='${new_name:-${branch:-$new}}' &&
eval GITHEAD_$work=local &&
export GITHEAD_$new GITHEAD_$work &&
git merge-recursive $old -- $new $work
# Do not register the cleanly merged paths in the index yet.
# this is not a real merge before committing, but just carrying
# the working tree changes along.
unmerged=`git ls-files -u`
git read-tree $v --reset $new
case "$unmerged" in
'') ;;
*)
(
z40=0000000000000000000000000000000000000000
echo "$unmerged" |
sed -e 's/^[0-7]* [0-9a-f]* /'"0 $z40 /"
echo "$unmerged"
) | git update-index --index-info
;;
esac
exit 0
)
saved_err=$?
if test "$saved_err" = 0 && test -z "$quiet"
then
git diff-index --name-status "$new"
fi
(exit $saved_err)
fi
#
# Switch the HEAD pointer to the new branch if we
# checked out a branch head, and remove any potential
# old MERGE_HEAD's (subsequent commits will clearly not
# be based on them, since we re-set the index)
#
if [ "$?" -eq 0 ]; then
if [ "$newbranch" ]; then
git branch $track $newbranch_log "$newbranch" "$new_name" || exit
branch="$newbranch"
fi
if test -n "$branch"
then
old_branch_name=`expr "z$oldbranch" : 'zrefs/heads/\(.*\)'`
GIT_DIR="$GIT_DIR" git symbolic-ref -m "checkout: moving from ${old_branch_name:-$old} to $branch" HEAD "refs/heads/$branch"
if test -n "$quiet"
then
true # nothing
elif test "refs/heads/$branch" = "$oldbranch"
then
echo >&2 "Already on branch \"$branch\""
else
echo >&2 "Switched to${newbranch:+ a new} branch \"$branch\""
fi
elif test -n "$detached"
then
old_branch_name=`expr "z$oldbranch" : 'zrefs/heads/\(.*\)'`
git update-ref --no-deref -m "checkout: moving from ${old_branch_name:-$old} to $arg" HEAD "$detached" ||
die "Cannot detach HEAD"
if test -n "$detach_warn"
then
echo >&2 "$detach_warn"
fi
describe_detached_head 'HEAD is now at' HEAD
fi
rm -f "$GIT_DIR/MERGE_HEAD"
else
exit 1
fi
# Run a post-checkout hook
if test -x "$GIT_DIR"/hooks/post-checkout; then
"$GIT_DIR"/hooks/post-checkout $old $new 1
fi
|
2ndy/RaspIM
|
usr/share/doc/git/contrib/examples/git-checkout.sh
|
Shell
|
gpl-2.0
| 7,618 |
#!/bin/sh
# -- check for root --
if [ "`id -u`" != "0" ]; then
echo "add_pkgs.sh: sorry, this must be done as root." 1>&2
exit 1
fi
# -- add pkgs --
pkg install \
pkg \
nano \
zsh \
tmux \
dhcpcd \
iperf \
|| exit 1
|
guyyur/configs
|
orange-pi-one-tf-slot-dead/FreeBSD/add_pkgs.sh
|
Shell
|
isc
| 237 |
#!/bin/bash
OBJ_DIR=$PWD/obj_lib
QMAKE=qmake
DIR_OPTIONS="--base-directory $PWD/lib --directory $OBJ_DIR" #--directory obj_tests --directory tests
function error_exit
{
echo "$1" 1>&2
exit 1
}
$QMAKE "COVERAGE=1" && make VERBOSE=1
[ $? -ne 0 ] && error_exit "Compile step failed."
lcov --zerocounters $DIR_OPTIONS #> lcov.log 2> lcov.err
lcov --capture --initial $DIR_OPTIONS --output-file app.baseline #>> lcov.log 2>> lcov.err
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/bin/
bin/tests
echo "analyzing coverage data...: lcov --no-checksum $DIR_OPTIONS --capture --output-file app"
lcov --no-checksum $DIR_OPTIONS --capture --output-file app #>> lcov.log 2>> lcov.err
#lcov --no-checksum $DIR_OPTIONS --capture --output-file app.info
echo "merging datas"
lcov --add-tracefile app.baseline --add-tracefile app --output-file app.info
echo "removing useless data"
lcov --remove app.info "/usr*" --output-file app.info
lcov --remove app.info "external/*" --output-file app.info
lcov --remove app.info "*/ETL/etl/*" --output-file app.info
echo "generation html doc"
genhtml -o cov-html app.info #>> lcov.log 2>> lcov.err
echo "done!"
|
julienlopez/QRocketLaunchSimulator
|
tests.sh
|
Shell
|
mit
| 1,146 |
#! /bin/sh
export REDMINE_LANG=en
export RAILS_ENV=test
PLUGIN_NAME="redmine_bx"
# Initialize redmine
bundle exec rake generate_secret_token
bundle exec rake db:migrate
#bundle exec rake redmine:load_default_data
# Copy assets & execute plugin's migration
bundle exec rake redmine:plugins NAME=${PLUGIN_NAME}
if [ $TARGET = "redmine" ]; then
export SCMS=bazaar,cvs,subversion,git,mercurial,filesystem
# Execute redmine tests
bundle exec rake ci
else
# Initialize RSpec
bundle exec rails g rspec:install
# Execute plugin test by RSpec
bundle exec rspec plugins/${PLUGIN_NAME}/spec -c
fi
|
pinzolo/redmine_bx
|
travis/exec_test.sh
|
Shell
|
mit
| 607 |
#!/bin/sh
cd ..
make
cd tools
|
PanCzerwonySer/DogOS-WP
|
tools/build.sh
|
Shell
|
mit
| 30 |
ps -ef | grep -v grep | grep DiceRoller
if [ $? -eq 1 ]
then
cd ~/chancecoinj
nohup sh start_diceroller.sh &
else
echo "Already running"
fi
|
chancecoin/chancecoinj
|
keep_alive_diceroller.sh
|
Shell
|
mit
| 140 |
#! /bin/bash
# USAGE:
# Usage: eval `PROGRAM [-c] [-s] setup_version`
# -OR-
# Usage: eval `PROGRAM [-c] [-s] TOS_SYSTEM COMPILER_VERSION`
# END USAGE
# HELP:
# where SETUP_VERSION is one of the version strings returned if PROGRAM
# is invoked without arguments.
# This program writes shell command to standard out such that if the
# result is evaluated by the shell, the critical environmental variables
# and shell functions needed by both the BUILD script and the build
# environment in general get set properly. For csh-like shells, use an
# alias like so:
# alias setup 'eval `setup_internal -c \!*`'
# For sh-like shells, use a shell function like so:
# setup(){ eval `setup_internal "$@"`; }
# When -c is specified, the commands are suitable for csh and
# derivatives.
# When -s is specified, the commands are suitable for sh and
# derivatives.
# When neither -c nor -s is specified, the environment variable SHELL is
# checked. If is contains the string "csh", then csh syntax is used,
# otherwise sh syntax is used.
# END HELP
prog=${0##*/}
prog=${prog%%_internal}
USAGE=$(sed -e '1,/^# USAGE:$/d' -e '/^# END USAGE$/,$d' -e 's/^# //' \
-e "s PROGRAM $prog " $0)
HELP="$USAGE
$(sed -e '1,/^# HELP:$/d' -e '/^# END HELP/,$d' -e 's/^# //' \
-e "s PROGRAM $prog " $0)"
USAGE="$USAGE
Use any unused flag for more help"
# Variables set via command line options
SUFFIX=
# Internal use variables
SYSTEM=
VERSION=
# First, try to determine SHELL type. Use heuristic, if csh appears in
# $SHELL, set csh, else set sh
if [ "${SHELL##*csh}" = "$SHELL" ]
then SUFFIX=sh
else SUFFIX=csh
fi
Usage() {
echo "$USAGE" 1>&2
exit ${1-1}
}
Help() {
echo "$HELP" 1>&2
exit ${1-1}
}
# process options
while getopts cs opt
do case $opt in
c) SUFFIX=csh ;;
s) SUFFIX=sh ;;
\?) Help;;
esac
done
shift $((OPTIND - 1))
# process arguments
case $# in
0) list_versions
exit 0
;;
1) VERSION=${1##*/}
SYSTEM=${1%%/$VERSION}
;;
2) VERSION=$2
SYSTEM=${1%%/}
;;
*) Usage 2;;
esac
doOp() {
if "$@"
then echo "Successfully completed '$*'"
else echo "FAILED: '$*'"
fi
}
donedir=$HOME/.$prog.done
doDependentOp() {
# First arg is a list of files that must be older than the done
# done file
dependencies=$1
shift
donefile=$donedir/.$(echo "$*"|tr " /" "_%")
for dependency in $dependencies
do if [[ $donefile -nt $dependency ]]
then : # no need to execute
else # execute and cut the loop short
if "$@"
then echo "Successfully completed '$*'"
touch $donefile
return 0
else echo "FAILED: '$*'"
return 1
fi
fi
done
echo "Not Needed, not executed: '$*'"
}
countlines() {
# because 'echo "$var"|wc -l' counts zero lines as one
if [[ -z "$*" ]]
then echo 0
else echo "$*" | wc -l
fi
}
|
shindochan/bashenv
|
scripts/template.bash
|
Shell
|
mit
| 2,930 |
#!/bin/bash
# Copyright (c) 2017 MAZA Network Developers, Robert Nelson (guruvan)
test -f EasyGitian.env && source EasyGitian.env
if [ "$EASYGITIAN_DEBUG}" = "true" ] ; then
DEBUG=true
set -xeo pipefail
fi
## This script runs on the host machine
# First install Virtualbox and Vagrant
# Vagrant
get_vagrant () {
# Get files
vagrant_version=2.1.4
curl -O https://releases.hashicorp.com/vagrant/$vagrant_version/vagrant_${vagrant_version}_x86_64.dmg -o vagrant_${vagrant_version}_x86_64.dmg
curl -O https://releases.hashicorp.com/vagrant/${vagrant_version}/vagrant_${vagrant_version}_SHA256SUMS -o vagrant_${vagrant_version}_SHA256SUMS
curl -O https://releases.hashicorp.com/vagrant/${vagrant_version}/vagrant_${vagrant_version}_SHA256SUMS.sig -o vagrant_${vagrant_version}_SHA256SUMS.sig
# Verify shasums signature via gpg
#gpg --recv-keys 51852D87348FFC4C || exit 9
gpg --import hashicorp.asc \
|| gpg --recv-keys --keyserver pool.sks-keyservers.net 51852D87348FFC4C \
|| exit 9
gpg --verify vagrant_${vagrant_version}_SHA256SUMS.sig vagrant_${vagrant_version}_SHA256SUMS || exit 8
# Verify shasum for download
grep dmg vagrant_${vagrant_version}_SHA256SUMS | shasum -c || exit 7
# Mount the dmg and open it
hdiutil attach vagrant_${vagrant_version}_x86_64.dmg -autoopen
# User must install the app
echo "Now double click the Vagrant pkg file"
read -n 1 -s -r -p "Press any key to continue";echo
which vagrant || not_installed vagrant
touch .vagrant_installed
}
# Virtualbox
get_vbox () {
# Get files
vbox_version=5.2.18-124319
vbox_shortver=5.2.18
curl -O http://download.virtualbox.org/virtualbox/${vbox_shortver}/VirtualBox-${vbox_version}-OSX.dmg
curl -O http://download.virtualbox.org/virtualbox/${vbox_shortver}/Oracle_VM_VirtualBox_Extension_Pack-${vbox_version}.vbox-extpack
curl -O https://www.virtualbox.org/download/hashes/${vbox_shortver}/SHA256SUMS
mv SHA256SUMS vbox_${vbox_shortver}.SHA256SUMS
# Verify shasum for download
grep dmg vbox_${vbox_shortver}.SHA256SUMS | shasum -c || exit 6
grep "${vbox_version}.vbox-extpack" vbox_${vbox_shortver}.SHA256SUMS | shasum -c || exit 5
# Mount the dmg and open it
hdiutil attach VirtualBox-${vbox_version}-OSX.dmg -autoopen
# User must install the app
echo "Now double-click the VirtualBox.pkg icon to install VirtualBox"
echo "If the VirtualBox installation fails at this point, you can"
echo "reboot to finish the installation."
echo " "
echo "macOS security prevents VirtualBox from loading drivers without"
echo "your permission. An "Allow" button will appear in your Security Preferences"
echo "pane. Click the Lock Icon to unlock, and then click Allow"
echo "Once you do this run EasyGitian again with:"
echo " "
echo "source ~/EasyGitian.env ; ./EasyGitian "
read -n 1 -s -r -p "Press any key to continue";echo
which VBoxManage || not_installed VBoxManage
echo "Installing VirtualBox Extension Pack (required)"
sleep 5
extpack_installed=$(VBoxManage list extpacks |grep "Usable" | awk '{print $2}')
if [ "$extpack_installed" != "true" ] ; then
VBoxManage extpack install --replace Oracle_VM_VirtualBox_Extension_Pack-${vbox_version}.vbox-extpack
fi
# TODO - if mojave user doesn't allow the kernel module to load
# Vbox will fail later, requiring a reboot
# determine if we can reload the vbox driver if the user didn't click allow
# Remind user to unlock before clicking allow
touch .Vbox_installed
}
not_installed () {
(( attempts++ ))
if [ ${attempts} -le 3 ]; then
echo "Attempting to install ${1} - ${attempts} tries"
which "$1" || get_"${1}"
else
echo "Installation of ${1} failed"
test -f ./.Vbox_installed && echo "VirtualBox seems installed"
test -f ./.vagrant_installed && echo "Vagrant seems installed"
echo " "
echo "If both Virtualbox and Vagrant seem installed, and you still see this message"
echo "Please report an issue on https://github.com/mazaclub/EasyGitianBuilder"
echo " "
echo "You may attempt to install ${1} on your own and run EasyGitian later"
exit 99
fi
}
attempts=1
if [ -z "$1" ]; then
which vagrant || get_vagrant
which VBoxManage || get_vbox
which vagrant && which VBoxManage && touch .prereq_install_complete
echo "Prerequisites should now be installed"
else
get_vagrant
get_vbox
fi
|
mazaclub/EasyGitianBuilder
|
darwin-Base-System.sh
|
Shell
|
mit
| 4,256 |
#!/bin/sh
for i in {1..1000}
do
../bin/avl >> dump
done
|
abhassaroha/GeneralLib
|
DataStructures/tests/test.sh
|
Shell
|
mit
| 57 |
#!/bin/bash
grunt Linux64_v0.12.0 && cp nwjs/nwjs-v0.12.0-linux-x64/libffmpegsumo.so dist/Linux64_v0.12.0/ && ./dist/Linux64_v0.12.0/nw ./dist/Linux64_v0.12.0/app.nw/
|
cortezcristian/trainer
|
build.sh
|
Shell
|
mit
| 168 |
#!/bin/bash
numkeys=100
if [ $# -gt 0 ]; then
numkeys=$1
fi
die(){
kill "$general"
wait >/dev/null 2>&1
exit "$status"
}
trap die 2
./general -c certs/localhost.pem &
general=$!
sleep 1
./test_https -f5 -l10 -n$numkeys -ph https://localhost:5443
status=$?
die
|
aki5/general
|
run-test.sh
|
Shell
|
mit
| 270 |
PROMPT_GOOD_BG=45
PROMPT_GOOD_FG=16
PROMPT_BAD_BG=178
PROMPT_BAD_FG=16
ENABLE_STANDARD_BAR_STATUS=0
promptBar()
{
local FG=${PROMPT_GOOD_FG}
local BG=${PROMPT_GOOD_BG}
if [[ ${LAST_RETURN} != 0 && ${ENABLE_STANDARD_BAR_STATUS} != 0 ]]; then
FG=${PROMPT_BAD_FG}
BG=${PROMPT_BAD_BG}
fi
bartlet_segment ${FG} ${BG} "$(bartlet_color_wrap bold)\u@\h $(bartlet_color_wrap regular)\W " ""
}
bartlet_define "Prompt" "promptBar"
|
jwkblades/Bartlet
|
src/StandardSuite/Prompt.sh
|
Shell
|
mit
| 458 |
#!/bin/sh
# See cat << HEREDOC for file description.
# Set this to the complete path of the tidy for which you want to generate
# documentation. Relative path is okay. You shouldn't have to change this
# too often if your compiler always puts tidy in the same place.
TIDY_PATH="../build/cmake/tidy" # Build directory.
TIDY_VERSION=`head -n 1 ../version.txt` # In project root directory.
cat << HEREDOC
Build 'tidy.1', which is a man page suitable for installation
in all Unix-like operating systems. This script will build
it, but not install it.
Build 'quickref.html'. This is distributed with the
the Tidy source code and also used on Tidy's website.
Be sure to distribute it with 'quickref.css' as well.
Build the 'tidylib_api' directory, which contains a website
with the documentation for TidyLib’s headers.
These files will be built into '{current_dir}/temp'.
HEREDOC
# Output and flags' declarations.
DOXY_CFG="./doxygen.cfg"
OUTP_DIR="./temp"
BUILD_XSLT=1
BUILD_API=1
##
# Ensure the output dir exists.
##
if [ ! -d "$OUTP_DIR" ]; then
mkdir $OUTP_DIR
fi
##
# Preflight
##
# Check for a valid tidy.
if [ ! -x "$TIDY_PATH" ]; then
BUILD_XSLT=0
echo "- '$TIDY_PATH' not found. You should set TIDY_PATH in this script."
fi
# Check for xsltproc dependency.
hash xsltproc 2>/dev/null || { echo "- xsltproc not found. You require an XSLT processor."; BUILD_XSLT=0; }
##
# Build 'quickref.html' and 'tidy.1'.
##
if [ "$BUILD_XSLT" -eq 1 ]; then
# Use the designated tidy to get its config and help.
# These temporary files will be cleaned up later.
$TIDY_PATH -xml-config > "$OUTP_DIR/tidy-config.xml"
$TIDY_PATH -xml-help > "$OUTP_DIR/tidy-help.xml"
# 'quickref.html' and 'quickref_include.html' for the Doxygen build.
xsltproc "./quickref.xsl" "$OUTP_DIR/tidy-config.xml" > "$OUTP_DIR/quickref.html"
xsltproc "./quickref.include.xsl" "$OUTP_DIR/tidy-config.xml" > ./examples/quickref_include.html
# Well, duh, we should tidy quickref.html
$TIDY_PATH -config "./tidy.cfg" -modify "$OUTP_DIR/quickref.html"
# 'tidy.1'; create a valid tidy1.xsl first by subbing CMAKE's variable.
sed "s|@TIDYCONFIG@|./tidy-config.xml|g" < ./tidy1.xsl.in > "$OUTP_DIR/tidy1.xsl"
xsltproc "$OUTP_DIR/tidy1.xsl" "$OUTP_DIR/tidy-help.xml" > "$OUTP_DIR/tidy.1"
# Cleanup
rm "$OUTP_DIR/tidy-config.xml"
rm "$OUTP_DIR/tidy-help.xml"
rm "$OUTP_DIR/tidy1.xsl"
echo "'quickref.html' and 'tidy.1' have been built.\n"
else
echo "* tidy.1 was skipped because not all dependencies were satisfied."
fi
##
# Preflight
##
# Check for the doxygen.cfg file.
if [ ! -f "$DOXY_CFG" ]; then
BUILD_API=0
echo "- 'DOXY_CFG' not found. It is required to configure doxygen."
fi
# Check for doxygen dependency.
hash doxygen 2>/dev/null || { echo "- doxygen not found. This script requires doxygen."; BUILD_XSLT=0; }
##
# Build the doxygen project.
##
if [ "$BUILD_API" -eq 1 ]; then
echo "The following is doxygen's stderr output. It doesn't indicate errors with this script:\n"
# echo the output of tidy5 --help so we can include
$TIDY_PATH -h > "./examples/tidy5.help.txt"
$TIDY_PATH -help-config > "./examples/tidy5.config.txt"
## copy license file to examples for includsing
cp ../README/LICENSE.md ./examples/
## this lot
# - echoes and catches output of the doxygen config
# - overwrites some vars but appending some to config at end
# - which are then passed to doxygen as stdin (instead of the path to a config.file)
( cat "$DOXY_CFG"; \
echo "PROJECT_NUMBER=$TIDY_VERSION"; \
echo "GENERATE_TAGFILE=$OUTP_DIR/tidylib_api/tidy.tags"; \
echo "HTML_EXTRA_FILES= ./examples/tidy5.help.txt ./examples/tidy5.config.txt"; ) \
| doxygen - > /dev/null
# cleanup
rm "./examples/tidy5.help.txt"
rm "./examples/tidy5.config.txt"
rm "./examples/LICENSE.md"
echo "\nTidyLib API documentation has been built."
else
echo "* $OUTP_DIR/tidylib_api/ was skipped because not all dependencies were satisfied."
fi
##
# Done
##
echo "\nDone.\n"
|
cremame/tidy-html5
|
ext/tidy-html5/documentation/build_docs.sh
|
Shell
|
mit
| 4,073 |
#
git clone https://github.com/orian/psz_utils && cd psz_utils && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX:PATH=/libs /src && make all install
# failes with :ro
cmake -DCMAKE_BUILD_TYPE=Release -DJSONCPP_LIB_BUILD_SHARED=ON -DCMAKE_INSTALL_PREFIX:PATH=/libs -G "Unix Makefiles" /src && mv /libs/include/json /libs/include/jsoncpp/json
|
orian/cppenv
|
install.sh
|
Shell
|
mit
| 352 |
#!/usr/bin/env bash
sudo npm install --global yo gulp-cli bower
sudo npm install --global generator-webapp
|
maxg0/dotfiles
|
node/generator.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
set -eo pipefail -o nounset
if [[ -z $(conda info --envs | grep "*" | grep -o "\/.*") ]]; then
export CONDA_ROOT=$(conda info --root)
env_dir=$CONDA_ROOT
export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/GRCh37/grch37-amino-acid-sequences-chr-regions-gencode-v1/1
elif [[ $(conda info --envs | grep "*" | grep -o "\/.*") == "base" ]]; then
export CONDA_ROOT=$(conda info --root)
env_dir=$CONDA_ROOT
export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/GRCh37/grch37-amino-acid-sequences-chr-regions-gencode-v1/1
else
env_dir=$(conda info --envs | grep "*" | grep -o "\/.*")
export CONDA_ROOT=$env_dir
export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/GRCh37/grch37-amino-acid-sequences-chr-regions-gencode-v1/1
fi
PKG_DIR=`find "$CONDA_SOURCE_PREFIX/pkgs/" -name "$PKG_NAME-$PKG_VERSION*" | grep -v ".tar.bz2" | grep "$PKG_VERSION.*$PKG_BUILDNUM$"`
if [ -d $RECIPE_DIR ]; then
rm -r $RECIPE_DIR
fi
mkdir -p $RECIPE_DIR
(cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)
cd $RECIPE_DIR
## Iterate over new files and replace file name with data package name and data version
for f in *; do
ext="${f#*.}"
filename="{f%%.*}"
if [[ ! -f "grch37-amino-acid-sequences-chr-regions-gencode-v1.$ext" ]]
then
(mv $f "grch37-amino-acid-sequences-chr-regions-gencode-v1.$ext")
fi
done
## Add environment variables
#### File
if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file
then
recipe_env_file_name="ggd_grch37-amino-acid-sequences-chr-regions-gencode-v1_file"
recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')"
file_path="$(find $RECIPE_DIR -type f -maxdepth 1)"
elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files
then
indexed_file=`find $RECIPE_DIR -type f \( -name "*.tbi" -or -name "*.fai" -or -name "*.bai" -or -name "*.crai" -or -name "*.gzi" \) -maxdepth 1`
if [[ ! -z "$indexed_file" ]] ## If index file exists
then
recipe_env_file_name="ggd_grch37-amino-acid-sequences-chr-regions-gencode-v1_file"
recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')"
file_path="$(echo $indexed_file | sed 's/\.[^.]*$//')" ## remove index extension
fi
fi
#### Dir
recipe_env_dir_name="ggd_grch37-amino-acid-sequences-chr-regions-gencode-v1_dir"
recipe_env_dir_name="$(echo "$recipe_env_dir_name" | sed 's/-/_/g' | sed 's/\./_/g')"
activate_dir="$env_dir/etc/conda/activate.d"
deactivate_dir="$env_dir/etc/conda/deactivate.d"
mkdir -p $activate_dir
mkdir -p $deactivate_dir
echo "export $recipe_env_dir_name=$RECIPE_DIR" >> $activate_dir/env_vars.sh
echo "unset $recipe_env_dir_name">> $deactivate_dir/env_vars.sh
#### File
## If the file env variable exists, set the env file var
if [[ ! -z "${recipe_env_file_name:-}" ]]
then
echo "export $recipe_env_file_name=$file_path" >> $activate_dir/env_vars.sh
echo "unset $recipe_env_file_name">> $deactivate_dir/env_vars.sh
fi
echo 'Recipe successfully built!'
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/GRCh37/grch37-amino-acid-sequences-chr-regions-gencode-v1/post-link.sh
|
Shell
|
mit
| 3,130 |
#!/bin/bash
#tests and builds project
function error_exit
{
echo "$1" 1>&2
exit 1
}
set -x
if [ $TRAVIS_BRANCH == "master" ]; then
if npm test; then
if npm run build:production; then
echo "******TESTS PASSED******"
exit 0
else
error_exit "******BUILD FAILED! Aborting.*********"
fi
else
error_exit "******TESTS FAILED! Aborting build.*********"
fi
elif [ $TRAVIS_BRANCH == "develop" ]; then
if npm test; then
if npm run build:staging; then
echo "******TESTS PASSED******"
exit 0
else
error_exit "******BUILD FAILED! Aborting.*********"
fi
else
error_exit "******TESTS FAILED! Aborting build.*********"
fi
else
if npm run test; then
echo "*****TESTS PASSED****"
else
error_exit "******TESTS FAILED! Aborting build.*********"
fi
fi
|
DataSF/open-data-explorer
|
.travis/build.sh
|
Shell
|
mit
| 831 |
#!/usr/bin/env bash
#####################################################################
##
## title: String Extension
##
## description:
## String extension of shell (bash, ...)
## with well-known function for string manipulation
## Function list based on:
## https://docs.oracle.com/javase/7/docs/api/java/lang/String.html
## https://docs.python.org/2/library/string.html#string-functions
##
## author: Mihaly Csokas
##
## date: 07. Dec. 2017
##
## license: MIT
##
#####################################################################
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "$DIR/console.sh"
string() {
local operation="$1"
shift
case "$operation" in
capitalize)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local str="$1"
local retval
retval="${str^}"
echo "$retval"
;;
## pre-condition:
## - number of params cannot be less than two
##
## params:
## - str: string : the string we investigate
## - pos: integer : the position in which the character sits
## return:
## - retval: character : the character on the passed position
char_at)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str="$1"
local pos="$2" # pos must be >= 0
if [[ "$pos" -lt 0 ]]; then
pos=${#str}
fi
local retval
retval=${str:pos:1}
echo "$retval"
;;
# like sort method
# Apple pear < APple peAr
compare_to)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local left right
left="$1"
right="$2"
local retval
if [[ "$left" < "$right" ]]; then
retval=-1
elif [[ "$left" == "$right" ]]; then
retval=0
elif [[ "$left" > "$right" ]]; then
retval=1
else
log_failure "[Unhandled state]"
fi
echo "$retval"
;;
compare_to_ignore_case)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local left right
left="${1,,}"
right="${2,,}"
local retval
if [[ "$left" < "$right" ]]; then
retval=-1
elif [[ "$left" == "$right" ]]; then
retval=0
else
retval=1
fi
echo "$retval"
;;
concat)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local left right
left="$1"
right="$2"
local retval
retval+="$left$right"
echo "$retval"
;;
contains)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local left right
left="$1"
right="$2"
local retval
retval=false
if [[ "$left" == *"$right"* ]]; then
retval=true
fi
echo "$retval"
;;
count)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str pattern
str="$1"
pattern="$2"
local retval
local char_str char_pat
retval=0
if [[ "${#pattern}" -le "${#str}" ]]; then
for (( i=0; i<"${#str}"; i+=1 )); do
for (( j=0; j<"${#pattern}"; j+=1 )); do
char_str="${str:$i+$j:1}"
char_pat="${pattern:$j:1}"
if [[ ! "$char_str" == "$char_pat" ]]; then
continue 2 # ugly mish-mashing! TODO: fix it
fi
done
(( retval+=1 ))
done
else
retval=0
fi
echo "$retval"
;;
ends_with)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str pattern
str="$1"
pattern="$2"
local retval
if [[ "$str" == *"$pattern" ]]; then
retval=true
else
retval=false
fi
echo "$retval"
;;
equals)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str pattern
str="$1"
pattern="$2"
local retval
if [[ "$str" == "$pattern" ]]; then
retval=true
else
retval=false
fi
echo "$retval"
;;
equals_ignore_case)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str pattern
str="${1,,}"
pattern="${2,,}"
local retval
if [[ "$str" == "$pattern" ]]; then
retval=true
else
retval=false
fi
echo "$retval"
;;
## Example:
## index_of "apple" "p" -> 1
## index_of "apple" "pl" -> 2
## index_of "apple" "p" 2 -> 2
index_of)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str="$1"
local sub_str="$2"
local retval
local temp
temp=${str#*$sub_str}
retval=$(( ${#str} - ${#sub_str} - ${#temp}))
echo $retval
;;
is_empty)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local str
str="$1"
local retval
retval=false
if [[ -z "$str" ]]; then
retval=true
fi
echo "$retval"
;;
join_fields)
[[ "$#" -lt 2 ]] && log_failure "[must be two or more params, where the separator between elements is the first param]" && return 1
local separator retval
separator="$1"
retval="$2"
shift
shift
for i; do retval="$retval$separator$i"; done
echo "$retval"
;;
## Example:
## last_index_of "apple" "p" -> 2
## last_index_of "apple" "pl" -> 2
## last_index_of "apple" "p" 2 -> 0
last_index_of)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str="$1"
local sub_str="$2"
local retval
local temp
temp=${str##*$sub_str}
retval=$(( ${#str} - ${#sub_str} - ${#temp}))
echo $retval
;;
length)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local str="$1"
local retval
retval="${#str}"
echo "$retval"
;;
## Example:
## replace "apple" "p" "c" -> accle
## replace "apple" "pp" "c" -> acle
replace)
# pre-conditions:
[[ "$#" -lt 3 ]] && log_failure "[must be three params]" && return 1
local original_string string_to_replace string_to_replace_with retval
original_string="$1"
string_to_replace="$2"
string_to_replace_with="$3"
retval="${original_string/$string_to_replace/$string_to_replace_with}"
echo "$retval"
;;
replace_all)
# pre-conditions:
[[ "$#" -lt 3 ]] && log_failure "[must be three params]" && return 1
local original_string string_to_replace_with retval
original_string="$1"
string_to_replace="$2"
string_to_replace_with="$3"
retval="${original_string//$string_to_replace/$string_to_replace_with}"
echo "$retval"
;;
## Example:
## -
## -
starts_with)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str pattern retval
str="$1"
pattern="$2"
retval=false
[[ "$str" == "$pattern"* ]] && retval=true
echo "$retval"
;;
strip)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two params]" && return 1
local str strip_char
str="$1"
strip_char="$2"
(
shopt -s extglob
str="${str##*($strip_char)}"
str="${str%%*($strip_char)}"
echo "${str}"
)
;;
## Example:
## begindex
## begindex, endindex
substring)
# pre-conditions:
[[ "$#" -lt 2 ]] && log_failure "[must be two or three params]" && return 1
local str=$1
local retval
if [[ "$#" -eq 2 ]]; then
if [[ "$2" -le ${#str} ]]; then
retval=${str:$2}
else
log_failure "[begindex must be less than string length]" && return 1
fi
fi
if [[ "$#" -eq 3 ]]; then
if [[ "$2" -le $3 ]]; then
local substring_length
substring_length=$(( $3-$2 ))
retval=${str:$2:$substring_length}
else
log_failure "[begindex must be less than or equal to endindex]" && return 1
fi
fi
echo "$retval"
;;
swapcase)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local retval
retval="${1~~}"
echo "$retval"
;;
title)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local str=$1
local alpha="[[:alpha:]]"
local retval=$1
for (( i=0; i<${#str}; i++ )); do
if [[ ${str:$i:1} != $alpha && ${str:$(($i+1)):1} == $alpha ]]; then
local char_to_upper_case
char_to_upper_case=${str:$(($i+1)):1}
retval="${retval:0:$(($i+1))}${char_to_upper_case^^}${str:$(($i+2))}"
fi
done
retval="${retval[@]^}"
echo "$retval"
;;
to_lower_case)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local retval
retval="${1,,}"
echo "$retval"
;;
to_upper_case)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
local retval
retval="${1^^}"
echo "$retval"
;;
trim)
# pre-conditions:
[[ "$#" -lt 1 ]] && log_failure "[must be one param]" && return 1
(
shopt -s extglob
local str="$1"
str="${str##*([[:space:]])}"
str="${str%%*([[:space:]])}"
echo "${str}"
)
;;
*)
echo $"Usage: $0 { "\
" capitalize | char_at | compare_to "\
"| compare_to_ignore_case | concat "\
"| contains | count | ends_with | equals "\
"| equals_ignore_case | index_of "\
"| is_empty | join_fields | last_index_of "\
"| length | replace | replace_all "\
"| replace_first | starts_with | strip | substring "\
"| swapcase | title | to_lower_case "\
"| to_upper_case | trim }"
exit 1
esac
}
|
torokmark/shell_utils
|
lib/string.sh
|
Shell
|
mit
| 12,538 |
#!/bin/bash
APPS=( "foo|http://ci.example.com:8080/job/JOBNAME/lastSuccessfulBuild/artifact/foo/target/foo.war"
"bar|http://ci.example.com:8080/job/JOBNAME/lastSuccessfulBuild/artifact/bar/target/bar.war"
"baz|http://ci.example.com:8080/job/JOBNAME/lastSuccessfulBuild/artifact/baz/target/baz.war" )
BASEDIR=$(cd "$(dirname "$0")"; pwd)
source "$BASEDIR/deploy_functions.sh"
set -eu
BROKEN_ARTIFACTS=0
for app in ${APPS[@]} ; do
artifact=${app%%|*}
url=${app##*|}
wget $url -O "$BASEDIR/$artifact.war"
if (( ! $(testWarIntegrity "$BASEDIR/$artifact.war") )); then
BROKEN_ARTIFACTS=1
echo "Broken artifact received for $artifact.war"
break
fi
done
if (( $BROKEN_ARTIFACTS )); then
for app in ${APPS[@]} ; do
artifact=${app%%|*}
echo "Removing $BASEDIR/$artifact.war"
rm -f "$BASEDIR/$artifact.war"
done
echo "Error in getting all artifacts. All artifacts deleted."
exit 1
fi
|
sirkkalap/tomcat_deploy_sh
|
wget_wars.sh
|
Shell
|
mit
| 935 |
#!/bin/sh
#
# Copyright (c) 2009-2015 Robert Nelson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Split out, so build_kernel.sh and build_deb.sh can share..
. ${DIR}/version.sh
if [ -f ${DIR}/system.sh ] ; then
. ${DIR}/system.sh
fi
#Debian 7 (Wheezy): git version 1.7.10.4 and later needs "--no-edit"
unset git_opts
git_no_edit=$(LC_ALL=C git help pull | grep -m 1 -e "--no-edit" || true)
if [ ! "x${git_no_edit}" = "x" ] ; then
git_opts="--no-edit"
fi
git="git am"
#git_patchset=""
#git_opts
if [ "${RUN_BISECT}" ] ; then
git="git apply"
fi
echo "Starting patch.sh"
git_add () {
git add .
git commit -a -m 'testing patchset'
}
start_cleanup () {
git="git am --whitespace=fix"
}
cleanup () {
if [ "${number}" ] ; then
git format-patch -${number} -o ${DIR}/patches/
fi
exit 2
}
external_git () {
git_tag=""
echo "pulling: ${git_tag}"
git pull ${git_opts} ${git_patchset} ${git_tag}
}
local_patch () {
echo "dir: dir"
${git} "${DIR}/patches/dir/0001-patch.patch"
}
#external_git
#local_patch
reverts () {
echo "dir: reverts"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
#my major screw up...
${git} "${DIR}/patches/reverts/0001-Revert-ARM-dts-am335x-boneblack-disable-RTC-only-sle.patch"
${git} "${DIR}/patches/reverts/0002-Revert-spi-spidev-Warn-loudly-if-instantiated-from-D.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=2
cleanup
fi
}
dts () {
echo "dir: dts"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/dts/0001-ARM-dts-omap3-beagle-add-i2c2.patch"
${git} "${DIR}/patches/dts/0002-ARM-dts-omap3-beagle-xm-spidev.patch"
${git} "${DIR}/patches/dts/0003-ARM-dts-beagle-xm-make-sure-dvi-is-enabled.patch"
${git} "${DIR}/patches/dts/0004-ARM-DTS-omap3-beagle-xm-disable-powerdown-gpios.patch"
${git} "${DIR}/patches/dts/0005-ARM-DTS-omap3-beagle.dts-enable-twl4030-power-reset.patch"
${git} "${DIR}/patches/dts/0006-arm-dts-omap4-move-emif-so-panda-es-b3-now-boots.patch"
${git} "${DIR}/patches/dts/0007-omap3-beagle-xm-ehci-works-again.patch"
${git} "${DIR}/patches/dts/0008-ARM-dts-omap3-beagle-ddc-i2c-bus-is-not-responding-d.patch"
${git} "${DIR}/patches/dts/0009-first-pass-imx6q-ccimx6sbc.patch"
${git} "${DIR}/patches/dts/0010-imx6-wl1835-base-boards.patch"
${git} "${DIR}/patches/dts/0011-imx6q-sabresd-add-support-for-wilink8-wlan-and-bluet.patch"
${git} "${DIR}/patches/dts/0012-imx6sl-evk-add-support-for-wilink8-wlan-and-bluetoot.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=12
cleanup
fi
}
wand () {
echo "dir: wand"
${git} "${DIR}/patches/wand/0001-ARM-i.MX6-Wandboard-add-wifi-bt-rfkill-driver.patch"
${git} "${DIR}/patches/wand/0002-ARM-dts-wandboard-add-binding-for-wand-rfkill-driver.patch"
}
errata () {
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
echo "dir: errata"
${git} "${DIR}/patches/errata/0001-hack-omap-clockk-dpll5-apply-sprz319e-2.1-erratum.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=1
cleanup
fi
}
fixes () {
echo "dir: fixes"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/fixes/0001-trusty-gcc-4.8-4.8.2-19ubuntu1-has-fix.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=1
cleanup
fi
}
pru () {
echo "dir: pru"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/pru/0001-Making-the-uio-pruss-driver-work.patch"
${git} "${DIR}/patches/pru/0002-Cleaned-up-error-reporting.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=2
cleanup
fi
}
bbb_overlays () {
echo "dir: bbb_overlays"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/bbb_overlays/0001-regmap-Introduce-regmap_get_max_register.patch"
${git} "${DIR}/patches/bbb_overlays/0002-regmap-Introduce-regmap_get_reg_stride.patch"
${git} "${DIR}/patches/bbb_overlays/0003-nvmem-Add-a-simple-NVMEM-framework-for-nvmem-provide.patch"
${git} "${DIR}/patches/bbb_overlays/0004-nvmem-Add-a-simple-NVMEM-framework-for-consumers.patch"
${git} "${DIR}/patches/bbb_overlays/0005-nvmem-Add-nvmem_device-based-consumer-apis.patch"
${git} "${DIR}/patches/bbb_overlays/0006-nvmem-Add-bindings-for-simple-nvmem-framework.patch"
${git} "${DIR}/patches/bbb_overlays/0007-nvmem-Add-simple-nvmem-mmio-consumer-helper-function.patch"
${git} "${DIR}/patches/bbb_overlays/0008-nvmem-qfprom-Add-Qualcomm-QFPROM-support.patch"
${git} "${DIR}/patches/bbb_overlays/0009-nvmem-qfprom-Add-bindings-for-qfprom.patch"
${git} "${DIR}/patches/bbb_overlays/0010-nvmem-sunxi-Move-the-SID-driver-to-the-nvmem-framewo.patch"
${git} "${DIR}/patches/bbb_overlays/0011-configfs-Implement-binary-attributes-v4.patch"
${git} "${DIR}/patches/bbb_overlays/0012-OF-DT-Overlay-configfs-interface-v5.patch"
${git} "${DIR}/patches/bbb_overlays/0013-gitignore-Ignore-DTB-files.patch"
if [ "x${regenerate}" = "xenable" ] ; then
${git} "${DIR}/patches/bbb_overlays/0014-add-PM-firmware.patch"
${git} "${DIR}/patches/bbb_overlays/0015-ARM-CUSTOM-Build-a-uImage-with-dtb-already-appended.patch"
fi
${git} "${DIR}/patches/bbb_overlays/0016-arm-omap-Proper-cleanups-for-omap_device.patch"
${git} "${DIR}/patches/bbb_overlays/0017-serial-omap-Fix-port-line-number-without-aliases.patch"
${git} "${DIR}/patches/bbb_overlays/0018-tty-omap-serial-Fix-up-platform-data-alloc.patch"
${git} "${DIR}/patches/bbb_overlays/0019-scripts-dtc-Update-to-upstream-version-with-overlay-.patch"
${git} "${DIR}/patches/bbb_overlays/0020-ARM-DT-Enable-symbols-when-CONFIG_OF_OVERLAY-is-used.patch"
${git} "${DIR}/patches/bbb_overlays/0021-of-Custom-printk-format-specifier-for-device-node.patch"
${git} "${DIR}/patches/bbb_overlays/0022-i2c-Mark-instantiated-device-nodes-with-OF_POPULATE.patch"
${git} "${DIR}/patches/bbb_overlays/0023-of-overlay-kobjectify-overlay-objects.patch"
${git} "${DIR}/patches/bbb_overlays/0024-of-overlay-global-sysfs-enable-attribute.patch"
${git} "${DIR}/patches/bbb_overlays/0025-of-overlay-add-per-overlay-sysfs-attributes.patch"
${git} "${DIR}/patches/bbb_overlays/0026-Documentation-ABI-sys-firmware-devicetree-overlays.patch"
${git} "${DIR}/patches/bbb_overlays/0027-of-Move-OF-flags-to-be-visible-even-when-CONFIG_OF.patch"
${git} "${DIR}/patches/bbb_overlays/0028-i2c-nvmem-at24-Provide-an-EEPROM-framework-interface.patch"
${git} "${DIR}/patches/bbb_overlays/0029-misc-Beaglebone-capemanager.patch"
${git} "${DIR}/patches/bbb_overlays/0030-doc-misc-Beaglebone-capemanager-documentation.patch"
${git} "${DIR}/patches/bbb_overlays/0031-doc-dt-beaglebone-cape-manager-bindings.patch"
${git} "${DIR}/patches/bbb_overlays/0032-doc-ABI-bone_capemgr-sysfs-API.patch"
${git} "${DIR}/patches/bbb_overlays/0033-MAINTAINERS-Beaglebone-capemanager-maintainer.patch"
${git} "${DIR}/patches/bbb_overlays/0034-arm-dts-Beaglebone-i2c-definitions.patch"
${git} "${DIR}/patches/bbb_overlays/0035-arm-dts-Enable-beaglebone-cape-manager.patch"
if [ "x${regenerate}" = "xenable" ] ; then
${git} "${DIR}/patches/bbb_overlays/0036-boneblack-defconfig.patch"
fi
${git} "${DIR}/patches/bbb_overlays/0037-gcl-Fix-resource-linking.patch"
${git} "${DIR}/patches/bbb_overlays/0038-of-overlay-Implement-indirect-target-support.patch"
${git} "${DIR}/patches/bbb_overlays/0039-of-unittest-Add-indirect-overlay-target-test.patch"
${git} "${DIR}/patches/bbb_overlays/0040-doc-dt-Document-the-indirect-overlay-method.patch"
${git} "${DIR}/patches/bbb_overlays/0041-of-overlay-Introduce-target-root-capability.patch"
${git} "${DIR}/patches/bbb_overlays/0042-of-unittest-Unit-tests-for-target-root-overlays.patch"
${git} "${DIR}/patches/bbb_overlays/0043-doc-dt-Document-the-target-root-overlay-method.patch"
${git} "${DIR}/patches/bbb_overlays/0044-of-dynamic-Add-__of_node_dupv.patch"
${git} "${DIR}/patches/bbb_overlays/0045-of-changesets-Introduce-changeset-helper-methods.patch"
${git} "${DIR}/patches/bbb_overlays/0046-RFC-Device-overlay-manager-PCI-USB-DT.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=46
cleanup
fi
}
dtb_makefile_append () {
sed -i -e 's:am335x-boneblack.dtb \\:am335x-boneblack.dtb \\\n\t'$device' \\:g' arch/arm/boot/dts/Makefile
}
beaglebone () {
echo "dir: beaglebone/dts"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/beaglebone/dts/0001-am335x-boneblack-add-cpu0-opp-points.patch"
${git} "${DIR}/patches/beaglebone/dts/0002-dts-am335x-bone-common-fixup-leds-to-match-3.8.patch"
${git} "${DIR}/patches/beaglebone/dts/0003-arm-dts-am335x-bone-common-add-collision-and-carrier.patch"
${git} "${DIR}/patches/beaglebone/dts/0004-add-am335x-bonegreen.patch"
${git} "${DIR}/patches/beaglebone/dts/0005-add-overlay-dtb.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=5
cleanup
fi
echo "dir: beaglebone/capes"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/beaglebone/capes/0001-cape-Argus-UPS-cape-support.patch"
${git} "${DIR}/patches/beaglebone/capes/0002-Added-support-for-Replicape.patch"
${git} "${DIR}/patches/beaglebone/capes/0003-ARM-dts-am335x-boneblack-enable-wl1835mod-cape-suppo.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=3
cleanup
fi
echo "dir: beaglebone/pinmux-helper"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/beaglebone/pinmux-helper/0001-BeagleBone-pinmux-helper.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0002-pinmux-helper-Add-runtime-configuration-capability.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0003-pinmux-helper-Switch-to-using-kmalloc.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0004-gpio-Introduce-GPIO-OF-helper.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0005-Add-dir-changeable-property-to-gpio-of-helper.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0006-am33xx.dtsi-add-ocp-label.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0007-beaglebone-added-expansion-header-to-dtb.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0008-bone-pinmux-helper-Add-support-for-mode-device-tree-.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0009-pinmux-helper-add-P8_37_pinmux-P8_38_pinmux.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0010-pinmux-helper-hdmi.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0011-pinmux-helper-can1.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0012-Remove-CONFIG_EXPERIMENTAL-dependency-on-CONFIG_GPIO.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0013-pinmux-helper-add-P9_19_pinmux-P9_20_pinmux.patch"
${git} "${DIR}/patches/beaglebone/pinmux-helper/0014-gpio-of-helper-idr_alloc.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=14
cleanup
fi
echo "dir: beaglebone/eqep"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/beaglebone/eqep/0001-Provides-a-sysfs-interface-to-the-eQEP-hardware-on-t.patch"
${git} "${DIR}/patches/beaglebone/eqep/0002-tieqep.c-devres-remove-devm_request_and_ioremap.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=2
cleanup
fi
# echo "dir: beaglebone/hdmi-audio"
# #regenerate="enable"
# if [ "x${regenerate}" = "xenable" ] ; then
# start_cleanup
# fi
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0001-ASoC-davinci-mcasp-Calculate-BCLK-using-TDM-slots-an.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0002-ASoC-davinci-mcasp-Channel-count-constraints-for-mul.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0003-ASoC-davinci-macsp-Optimize-implicit-BLCK-sample-rat.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0004-drm-tilcdc-Fix-module-unloading.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0005-drm-tilcdc-Remove-tilcdc-slave-support-for-tda998x-d.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0006-drm-tilcdc-Add-support-for-external-tda998x-encoder.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0007-drm-tilcdc-Add-DRM_TILCDC_SLAVE_COMPAT-for-ti-tilcdc.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0008-drm-tilcdc-Force-building-of-DRM_TILCDC_SLAVE_COMPAT.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0009-ARM-dts-am335x-boneblack-Use-new-binding-for-HDMI.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0010-ARM-dts-am335x-boneblack-Add-HDMI-audio-support-HACK.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0011-ASoC-hdmi-codec-lib-Add-hdmi-codec-lib-for-external-.patch"
# ${git} "${DIR}/patches/beaglebone/hdmi-audio/0012-drm-i2c-tda998x-HACK-Implement-primitive-HDMI-audio-.patch"
# if [ "x${regenerate}" = "xenable" ] ; then
# number=12
# cleanup
# fi
#This has to be last...
echo "dir: beaglebone/dtbs"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
patch -p1 < "${DIR}/patches/beaglebone/dtbs/0001-sync-am335x-peripheral-pinmux.patch"
exit 2
fi
${git} "${DIR}/patches/beaglebone/dtbs/0001-sync-am335x-peripheral-pinmux.patch"
####
#dtb makefile
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
device="am335x-arduino-tre.dtb" ; dtb_makefile_append
device="am335x-bone-can0.dtb" ; dtb_makefile_append
device="am335x-bone-cape-bone-argus.dtb" ; dtb_makefile_append
device="am335x-boneblack-bbb-exp-c.dtb" ; dtb_makefile_append
device="am335x-boneblack-bbb-exp-r.dtb" ; dtb_makefile_append
device="am335x-boneblack-can0.dtb" ; dtb_makefile_append
device="am335x-boneblack-cape-bone-argus.dtb" ; dtb_makefile_append
device="am335x-boneblack-replicape.dtb" ; dtb_makefile_append
device="am335x-boneblack-wl1835mod.dtb" ; dtb_makefile_append
device="am335x-boneblack-universal.dtb" ; dtb_makefile_append
git commit -a -m 'auto generated: capes: add dtbs to makefile' -s
git format-patch -1 -o ../patches/beaglebone/generated/
exit 2
else
${git} "${DIR}/patches/beaglebone/generated/0001-auto-generated-capes-add-dtbs-to-makefile.patch"
fi
echo "dir: beaglebone/phy"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/beaglebone/phy/0001-cpsw-Add-support-for-byte-queue-limits.patch"
${git} "${DIR}/patches/beaglebone/phy/0002-cpsw-napi-polling-of-64-is-good-for-gigE-less-good-f.patch"
${git} "${DIR}/patches/beaglebone/phy/0003-cpsw-search-for-phy.patch"
${git} "${DIR}/patches/beaglebone/capes/0004-Revert-Replicape-dtb-to-work-with-cape-manager.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=3
cleanup
fi
}
etnaviv () {
echo "dir: etnaviv"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
patch -p1 < "${DIR}/patches/etnaviv/0001-staging-etnaviv-add-drm-driver.patch"
exit 2
cd ~/linux-src
git checkout v4.0-rc6 -b tmp
git pull --no-edit git://git.pengutronix.de/git/lst/linux.git etnaviv-for-upstream
meld KERNEL/Documentation/devicetree/bindings/drm/etnaviv/etnaviv-drm.txt ~/linux-src/Documentation/devicetree/bindings/drm/etnaviv/etnaviv-drm.txt
meld KERNEL/Documentation/devicetree/bindings/vendor-prefixes.txt ~/linux-src/Documentation/devicetree/bindings/vendor-prefixes.txt
meld KERNEL/arch/arm/boot/dts/imx6dl.dtsi ~/linux-src/arch/arm/boot/dts/imx6dl.dtsi
meld KERNEL/arch/arm/boot/dts/imx6q.dtsi ~/linux-src/arch/arm/boot/dts/imx6q.dtsi
meld KERNEL/arch/arm/boot/dts/imx6qdl.dtsi ~/linux-src/arch/arm/boot/dts/imx6qdl.dtsi
meld KERNEL/drivers/staging/Kconfig ~/linux-src/drivers/staging/Kconfig
meld KERNEL/drivers/staging/Makefile ~/linux-src/drivers/staging/Makefile
meld KERNEL/drivers/staging/etnaviv/ ~/linux-src/drivers/staging/etnaviv/
meld KERNEL/include/uapi/drm/etnaviv_drm.h ~/linux-src/include/uapi/drm/etnaviv_drm.h
fi
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
${git} "${DIR}/patches/etnaviv/0001-staging-etnaviv-add-drm-driver.patch"
${git} "${DIR}/patches/etnaviv/0002-etnaviv-wheezy-build-fix.patch"
${git} "${DIR}/patches/etnaviv/0003-Revert-iommu-Remove-domain_init-and-domain_free-iomm.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=3
cleanup
fi
# echo "dir: etnaviv/fixes"
}
reverts
dts
wand
errata
fixes
pru
bbb_overlays
beaglebone
etnaviv
packaging_setup () {
cp -v "${DIR}/3rdparty/packaging/builddeb" "${DIR}/KERNEL/scripts/package"
git commit -a -m 'packaging: sync with mainline' -s
git format-patch -1 -o "${DIR}/patches/packaging"
exit 2
}
packaging () {
echo "dir: packaging"
#regenerate="enable"
if [ "x${regenerate}" = "xenable" ] ; then
start_cleanup
fi
#${git} "${DIR}/patches/packaging/0001-packaging-sync-with-mainline.patch"
${git} "${DIR}/patches/packaging/0002-deb-pkg-install-dtbs-in-linux-image-package.patch"
#${git} "${DIR}/patches/packaging/0003-deb-pkg-no-dtbs_install.patch"
if [ "x${regenerate}" = "xenable" ] ; then
number=3
cleanup
fi
}
#packaging_setup
packaging
echo "patch.sh ran successfully"
|
eliasbakken/linux-dev
|
patch.sh
|
Shell
|
mit
| 18,144 |
function toggl::authenticate () {
echo "Get your personal API token from Toggl profile settings page and enter it below."
while [[ ${#token} -lt 20 ]]; do
echo "Personal Toggl API token:"
read -r -s token
done
mkdir -p ~/.toggl
echo "${token}" > ~/.toggl/api-token
# TODO: ~/.toggl not mounted be default!
}
function toggl::expose_api_token () {
toggl_api_token=$(cat ~/.toggl/api-token 2> /dev/null || :)
if [[ ! "${toggl_api_token}" ]]; then
echo "Toggl API token is missing. You need to authenticate first."
toggl::authenticate
toggl_api_token=$(cat ~/.toggl/api-token 2> /dev/null || :)
fi
}
|
TaitoUnited/taito-cli
|
plugins/toggl/lib/all.bash
|
Shell
|
mit
| 637 |
sudo update-rc.d runPelmetcam.init remove
|
martinohanlon/pelmetcam
|
dontRunAtStartUp.sh
|
Shell
|
mit
| 42 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2013:0506
#
# Security announcement date: 2013-03-09 00:42:54 UTC
# Script generation date: 2017-01-01 21:10:41 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - samba4.x86_64:4.0.0-55.el6.rc4
# - samba4-client.x86_64:4.0.0-55.el6.rc4
# - samba4-common.x86_64:4.0.0-55.el6.rc4
# - samba4-dc.x86_64:4.0.0-55.el6.rc4
# - samba4-dc-libs.x86_64:4.0.0-55.el6.rc4
# - samba4-devel.x86_64:4.0.0-55.el6.rc4
# - samba4-libs.x86_64:4.0.0-55.el6.rc4
# - samba4-pidl.x86_64:4.0.0-55.el6.rc4
# - samba4-python.x86_64:4.0.0-55.el6.rc4
# - samba4-swat.x86_64:4.0.0-55.el6.rc4
# - samba4-test.x86_64:4.0.0-55.el6.rc4
# - samba4-winbind.x86_64:4.0.0-55.el6.rc4
# - samba4-winbind-clients.x86_64:4.0.0-55.el6.rc4
# - samba4-winbind-krb5-locator.x86_64:4.0.0-55.el6.rc4
#
# Last versions recommanded by security team:
# - samba4.x86_64:4.2.10-7.el6_8
# - samba4-client.x86_64:4.2.10-7.el6_8
# - samba4-common.x86_64:4.2.10-7.el6_8
# - samba4-dc.x86_64:4.2.10-7.el6_8
# - samba4-dc-libs.x86_64:4.2.10-7.el6_8
# - samba4-devel.x86_64:4.2.10-7.el6_8
# - samba4-libs.x86_64:4.2.10-7.el6_8
# - samba4-pidl.x86_64:4.2.10-7.el6_8
# - samba4-python.x86_64:4.2.10-7.el6_8
# - samba4-swat.x86_64:4.0.0-68.el6_7.rc4
# - samba4-test.x86_64:4.2.10-7.el6_8
# - samba4-winbind.x86_64:4.2.10-7.el6_8
# - samba4-winbind-clients.x86_64:4.2.10-7.el6_8
# - samba4-winbind-krb5-locator.x86_64:4.2.10-7.el6_8
#
# CVE List:
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install samba4.x86_64-4.2.10 -y
sudo yum install samba4-client.x86_64-4.2.10 -y
sudo yum install samba4-common.x86_64-4.2.10 -y
sudo yum install samba4-dc.x86_64-4.2.10 -y
sudo yum install samba4-dc-libs.x86_64-4.2.10 -y
sudo yum install samba4-devel.x86_64-4.2.10 -y
sudo yum install samba4-libs.x86_64-4.2.10 -y
sudo yum install samba4-pidl.x86_64-4.2.10 -y
sudo yum install samba4-python.x86_64-4.2.10 -y
sudo yum install samba4-swat.x86_64-4.0.0 -y
sudo yum install samba4-test.x86_64-4.2.10 -y
sudo yum install samba4-winbind.x86_64-4.2.10 -y
sudo yum install samba4-winbind-clients.x86_64-4.2.10 -y
sudo yum install samba4-winbind-krb5-locator.x86_64-4.2.10 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_6/x86_64/2013/CESA-2013:0506.sh
|
Shell
|
mit
| 2,377 |
#!/usr/bin/env bash
mkdir -p out
javac -d out \
-cp ./src:$LOG4J2_HOME/log4j-core-2.11.0.jar:$LOG4J2_HOME/log4j-api-2.11.0.jar \
src/HelloLog4J2ConfigCLI.java
cp ./src/log4j2alt.xml ./out
|
jbannick/hellokata-java
|
src/log4j2/hellolog4j2configCLI/build.sh
|
Shell
|
mit
| 198 |
#!/usr/bin/env sh
# This example script uses "nTunes" to iterate through the track list
# CRITERIA, and using the result as the playlist for the Icecast server.
# The hostname and port of our Node Icecast server.
ICECAST=localhost:5555
# The filter command to send to nTunes. In this case, get all my "Pink Floyd" songs.
CRITERIA="/source/1/playlist/1/track/artist=Pink%20Floyd"
# The hostname and port of the 'nTunes' server.
NTUNES=localhost:8888
# The concatenated 'curl' command to use when communicating with the nTunes server.
CURL="curl --silent $NTUNES$CRITERIA"
while (true);
do
# First, get the total count of the selected criteria.
N=$[$[`$CURL/count`] + 1];
# Check if 'currentSong' exists. If it does, then load the number from that
# as a lazy 'saved-state' on server reboots. Otherwise, just set 'i' to 1.
if [ -e "$PWD/currentSong" ]
then
i=`cat "$PWD/currentSong"`;
echo "Loaded '$i' from 'currentSong'" >&2;
else
echo "'currentSong' does not exist, setting index to 1..." >&2;
i="1";
fi;
while [ $i -lt $N ]
do
# Save the current state, in case we need to reboot the server.
echo $i > "$PWD/currentSong";
# Get the location, name, artist and album of the track.
LOCATION=`$CURL/$i/location?format=txt`
NAME=`$CURL/$i/name?format=txt`
ARTIST=`$CURL/$i/artist?format=txt`
ALBUM=`$CURL/$i/album?format=txt`
DURATION=`$CURL/$i/duration?format=txt`
# Set a 'metadata' event to update the current track
curl --silent -X POST -u "node:rules" \
-H "X-Title: $NAME" \
-H "X-Artist: $ARTIST" \
-H "X-Album: $ALBUM" \
-H "X-Duration: $DURATION" \
"$ICECAST/metadata" > /dev/null;
# Use 'ffmpeg' to decode the input file to raw 16-bit PCM, 44100
ffmpeg -i "$LOCATION" -f s16le -acodec pcm_s16le -ar 44100 -ac 2 - 2>/dev/null;
i=$[ $i + 1 ];
if [ $i -eq $N ]
then
rm "$PWD/currentSong"
fi
done;
# Pipe stdout of the neverending 'while' statement (i.e. the continous
# ffmpeg instances) to stdin of our Node server.
done | node server.js;
|
TooTallNate/NodeFloyd
|
decodeFromNTunes.sh
|
Shell
|
mit
| 2,102 |
#!/bin/bash
set -e
#####
# Unit Tests
dart test/test.dart
#####
# Type Analysis
echo
echo "dartanalyzer lib/curry.dart"
dartanalyzer lib/curry.dart
|
eee-c/dart-curry
|
test/run.sh
|
Shell
|
mit
| 152 |
#!/bin/bash
# NOTE: Adding apps to the Dock should happen in the scripts where said apps are installed.
## Disable animations when opening an application from the Dock.
defaults write com.apple.dock launchanim -bool FALSE
## Automatically hide and show the Dock.
defaults write com.apple.dock autohide -bool TRUE
# We use dockutil to add and remove icons in the Mac OS X dock.
## Install dockutil
brew install --quiet dockutil
## Remove rarely-used Dock items.
for dock_item in Siri Launchpad Contacts Notes Reminders Maps Messages FaceTime iBooks Podcasts TV ; do
dockutil --remove "$dock_item" 1>/dev/null
done
## Add Activity Monitor to the Dock.
dockutil --find "Activity Monitor" 1>/dev/null \
|| dockutil --add "/System/Applications/Utilities/Activity Monitor.app" --position end
## Restart the Dock.
killall Dock
|
boochtek/mac_config
|
os/dock.sh
|
Shell
|
mit
| 835 |
#!/bin/bash
source inc_vars.sh
# Script for multiple vector reconstruction tests
#------------------------------------------
echo " Basic Parameters "
echo "---------------------------------"
echo " "
#cat par/simul.par
#Recon method
awk '{ if ( NR == 3 ) { print "nonehx";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "nonetr";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1perhx+2lintrv+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1lsqhxe+2lintrv+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1perhx+2lintrv+3lsqhxe+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1wht+2wach+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1lsqtrc+2wach+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1rbftr+2wach+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1rbfetr+2wach+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1pertr+2wach+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1rbfhx+2lintrv+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
#Recon method
awk '{ if ( NR == 3 ) { print "+1kls+2lintrv+";} else {print $0;} }' par/simul.par > par/simul2.par
awk '{ if ( NR == 5 ) { print "HC";} else {print $0;} }' par/simul2.par > par/simul.par
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
./runngrids.sh
|
pedrospeixoto/iModel
|
sh/run_nvecrecon.sh
|
Shell
|
mit
| 3,792 |
#!/usr/bin/env bash
# Enable bash completion
if [[ -f /usr/local/etc/bash_completion ]]
then
# shellcheck disable=SC1091
source /usr/local/etc/bash_completion
fi
|
fgimian/macbuild
|
files/dotfiles/.bash_profile.d/bash_completion.sh
|
Shell
|
mit
| 167 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2014:1087
#
# Security announcement date: 2014-08-21 16:02:14 UTC
# Script generation date: 2017-01-01 21:15:28 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - antlr-eap6.noarch:2.7.7-17.redhat_4.1.ep6.el6
# - apache-commons-collections-eap6.noarch:3.2.1-15.redhat_3.1.ep6.el6
# - apache-commons-collections-tomcat-eap6.noarch:3.2.1-15.redhat_3.1.ep6.el6
# - apache-commons-daemon-eap6.noarch:1.0.15-5.redhat_1.ep6.el6
# - apache-commons-logging-eap6.noarch:1.1.1-7.9_redhat_1.ep6.el6
# - apache-commons-logging-tomcat-eap6.noarch:1.1.1-7.9_redhat_1.ep6.el6
# - apache-commons-pool-eap6.noarch:1.6-7.redhat_6.1.ep6.el6
# - apache-commons-pool-tomcat-eap6.noarch:1.6-7.redhat_6.1.ep6.el6
# - dom4j-eap6.noarch:1.6.1-20.redhat_6.1.ep6.el6
# - ecj3.noarch:3.7.2-9.redhat_3.1.ep6.el6
# - hibernate4-c3p0-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-core-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-entitymanager-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-envers-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-infinispan-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - javassist-eap6.noarch:3.18.1-1.GA_redhat_1.1.ep6.el6
# - jboss-logging.noarch:3.1.4-1.GA_redhat_1.1.ep6.el6
# - jboss-transaction-api_1.1_spec.noarch:1.0.1-12.Final_redhat_2.2.ep6.el6
# - mod_cluster.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - mod_cluster-tomcat6.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - mod_cluster-tomcat7.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - storeconfig-tc6.noarch:0.0.1-7.Alpha3_redhat_12.3.ep6.el6
# - storeconfig-tc7.noarch:0.0.1-7.Alpha3_redhat_12.5.ep6.el6
# - tomcat6.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-admin-webapps.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-docs-webapp.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-el-2.1-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-javadoc.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-jsp-2.1-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-lib.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-log4j.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-servlet-2.5-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-webapps.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat7.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-admin-webapps.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-docs-webapp.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-el-2.2-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-javadoc.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-jsp-2.2-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-lib.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-log4j.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-servlet-3.0-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-webapps.noarch:7.0.54-6_patch_02.ep6.el6
# - apache-commons-daemon-jsvc-eap6.x86_64:1.0.15-6.redhat_2.ep6.el6
# - apache-commons-daemon-jsvc-eap6-debuginfo.x86_64:1.0.15-6.redhat_2.ep6.el6
# - httpd.x86_64:2.2.26-35.ep6.el6
# - httpd-debuginfo.x86_64:2.2.26-35.ep6.el6
# - httpd-devel.x86_64:2.2.26-35.ep6.el6
# - httpd-manual.x86_64:2.2.26-35.ep6.el6
# - httpd-tools.x86_64:2.2.26-35.ep6.el6
# - mod_cluster-native.x86_64:1.2.9-3.Final_redhat_2.ep6.el6
# - mod_cluster-native-debuginfo.x86_64:1.2.9-3.Final_redhat_2.ep6.el6
# - mod_jk-ap22.x86_64:1.2.40-2.redhat_1.ep6.el6
# - mod_jk-debuginfo.x86_64:1.2.40-2.redhat_1.ep6.el6
# - mod_jk-manual.x86_64:1.2.40-2.redhat_1.ep6.el6
# - mod_rt.x86_64:2.4.1-6.GA.ep6.el6
# - mod_rt-debuginfo.x86_64:2.4.1-6.GA.ep6.el6
# - mod_snmp.x86_64:2.4.1-13.GA.ep6.el6
# - mod_snmp-debuginfo.x86_64:2.4.1-13.GA.ep6.el6
# - mod_ssl.x86_64:2.2.26-35.ep6.el6
# - tomcat-native.x86_64:1.1.30-2.redhat_1.ep6.el6
# - tomcat-native-debuginfo.x86_64:1.1.30-2.redhat_1.ep6.el6
#
# Last versions recommanded by security team:
# - antlr-eap6.noarch:2.7.7-17.redhat_4.1.ep6.el6
# - apache-commons-collections-eap6.noarch:3.2.1-15.redhat_3.1.ep6.el6
# - apache-commons-collections-tomcat-eap6.noarch:3.2.1-15.redhat_3.1.ep6.el6
# - apache-commons-daemon-eap6.noarch:1.0.15-5.redhat_1.ep6.el6
# - apache-commons-logging-eap6.noarch:1.1.1-7.9_redhat_1.ep6.el6
# - apache-commons-logging-tomcat-eap6.noarch:1.1.1-7.9_redhat_1.ep6.el6
# - apache-commons-pool-eap6.noarch:1.6-7.redhat_6.1.ep6.el6
# - apache-commons-pool-tomcat-eap6.noarch:1.6-7.redhat_6.1.ep6.el6
# - dom4j-eap6.noarch:1.6.1-20.redhat_6.1.ep6.el6
# - ecj3.noarch:3.7.2-9.redhat_3.1.ep6.el6
# - hibernate4-c3p0-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-core-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-entitymanager-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-envers-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - hibernate4-infinispan-eap6.noarch:4.2.14-3.SP1_redhat_1.1.ep6.el6
# - javassist-eap6.noarch:3.18.1-1.GA_redhat_1.1.ep6.el6
# - jboss-logging.noarch:3.1.2-3.GA_redhat_1.ep6.el6
# - jboss-transaction-api_1.1_spec.noarch:1.0.1-6.Final_redhat_2.ep6.el6
# - mod_cluster.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - mod_cluster-tomcat6.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - mod_cluster-tomcat7.noarch:1.2.9-1.Final_redhat_1.1.ep6.el6
# - storeconfig-tc6.noarch:0.0.1-7.Alpha3_redhat_12.3.ep6.el6
# - storeconfig-tc7.noarch:0.0.1-7.Alpha3_redhat_12.5.ep6.el6
# - tomcat6.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-admin-webapps.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-docs-webapp.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-el-2.1-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-javadoc.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-jsp-2.1-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-lib.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-log4j.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-servlet-2.5-api.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat6-webapps.noarch:6.0.41-5_patch_02.ep6.el6
# - tomcat7.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-admin-webapps.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-docs-webapp.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-el-2.2-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-javadoc.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-jsp-2.2-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-lib.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-log4j.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-servlet-3.0-api.noarch:7.0.54-6_patch_02.ep6.el6
# - tomcat7-webapps.noarch:7.0.54-6_patch_02.ep6.el6
# - apache-commons-daemon-jsvc-eap6.x86_64:1.0.15-6.redhat_2.ep6.el6
# - apache-commons-daemon-jsvc-eap6-debuginfo.x86_64:1.0.15-6.redhat_2.ep6.el6
# - httpd.x86_64:2.2.26-54.ep6.el6
# - httpd-debuginfo.x86_64:2.2.26-54.ep6.el6
# - httpd-devel.x86_64:2.2.26-54.ep6.el6
# - httpd-manual.x86_64:2.2.26-54.ep6.el6
# - httpd-tools.x86_64:2.2.26-54.ep6.el6
# - mod_cluster-native.x86_64:1.2.13-3.Final_redhat_2.ep6.el6
# - mod_cluster-native-debuginfo.x86_64:1.2.13-3.Final_redhat_2.ep6.el6
# - mod_jk-ap22.x86_64:1.2.41-2.redhat_4.ep6.el6
# - mod_jk-debuginfo.x86_64:1.2.41-2.redhat_4.ep6.el6
# - mod_jk-manual.x86_64:1.2.41-2.redhat_3.ep6.el6
# - mod_rt.x86_64:2.4.1-6.GA.ep6.el6
# - mod_rt-debuginfo.x86_64:2.4.1-6.GA.ep6.el6
# - mod_snmp.x86_64:2.4.1-13.GA.ep6.el6
# - mod_snmp-debuginfo.x86_64:2.4.1-13.GA.ep6.el6
# - mod_ssl.x86_64:2.2.26-54.ep6.el6
# - tomcat-native.x86_64:1.1.34-5.redhat_1.ep6.el6
# - tomcat-native-debuginfo.x86_64:1.1.34-5.redhat_1.ep6.el6
#
# CVE List:
# - CVE-2013-4590
# - CVE-2014-0118
# - CVE-2014-0119
# - CVE-2014-0226
# - CVE-2014-0231
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install antlr-eap6.noarch-2.7.7 -y
sudo yum install apache-commons-collections-eap6.noarch-3.2.1 -y
sudo yum install apache-commons-collections-tomcat-eap6.noarch-3.2.1 -y
sudo yum install apache-commons-daemon-eap6.noarch-1.0.15 -y
sudo yum install apache-commons-logging-eap6.noarch-1.1.1 -y
sudo yum install apache-commons-logging-tomcat-eap6.noarch-1.1.1 -y
sudo yum install apache-commons-pool-eap6.noarch-1.6 -y
sudo yum install apache-commons-pool-tomcat-eap6.noarch-1.6 -y
sudo yum install dom4j-eap6.noarch-1.6.1 -y
sudo yum install ecj3.noarch-3.7.2 -y
sudo yum install hibernate4-c3p0-eap6.noarch-4.2.14 -y
sudo yum install hibernate4-core-eap6.noarch-4.2.14 -y
sudo yum install hibernate4-eap6.noarch-4.2.14 -y
sudo yum install hibernate4-entitymanager-eap6.noarch-4.2.14 -y
sudo yum install hibernate4-envers-eap6.noarch-4.2.14 -y
sudo yum install hibernate4-infinispan-eap6.noarch-4.2.14 -y
sudo yum install javassist-eap6.noarch-3.18.1 -y
sudo yum install jboss-logging.noarch-3.1.2 -y
sudo yum install jboss-transaction-api_1.1_spec.noarch-1.0.1 -y
sudo yum install mod_cluster.noarch-1.2.9 -y
sudo yum install mod_cluster-tomcat6.noarch-1.2.9 -y
sudo yum install mod_cluster-tomcat7.noarch-1.2.9 -y
sudo yum install storeconfig-tc6.noarch-0.0.1 -y
sudo yum install storeconfig-tc7.noarch-0.0.1 -y
sudo yum install tomcat6.noarch-6.0.41 -y
sudo yum install tomcat6-admin-webapps.noarch-6.0.41 -y
sudo yum install tomcat6-docs-webapp.noarch-6.0.41 -y
sudo yum install tomcat6-el-2.1-api.noarch-6.0.41 -y
sudo yum install tomcat6-javadoc.noarch-6.0.41 -y
sudo yum install tomcat6-jsp-2.1-api.noarch-6.0.41 -y
sudo yum install tomcat6-lib.noarch-6.0.41 -y
sudo yum install tomcat6-log4j.noarch-6.0.41 -y
sudo yum install tomcat6-servlet-2.5-api.noarch-6.0.41 -y
sudo yum install tomcat6-webapps.noarch-6.0.41 -y
sudo yum install tomcat7.noarch-7.0.54 -y
sudo yum install tomcat7-admin-webapps.noarch-7.0.54 -y
sudo yum install tomcat7-docs-webapp.noarch-7.0.54 -y
sudo yum install tomcat7-el-2.2-api.noarch-7.0.54 -y
sudo yum install tomcat7-javadoc.noarch-7.0.54 -y
sudo yum install tomcat7-jsp-2.2-api.noarch-7.0.54 -y
sudo yum install tomcat7-lib.noarch-7.0.54 -y
sudo yum install tomcat7-log4j.noarch-7.0.54 -y
sudo yum install tomcat7-servlet-3.0-api.noarch-7.0.54 -y
sudo yum install tomcat7-webapps.noarch-7.0.54 -y
sudo yum install apache-commons-daemon-jsvc-eap6.x86_64-1.0.15 -y
sudo yum install apache-commons-daemon-jsvc-eap6-debuginfo.x86_64-1.0.15 -y
sudo yum install httpd.x86_64-2.2.26 -y
sudo yum install httpd-debuginfo.x86_64-2.2.26 -y
sudo yum install httpd-devel.x86_64-2.2.26 -y
sudo yum install httpd-manual.x86_64-2.2.26 -y
sudo yum install httpd-tools.x86_64-2.2.26 -y
sudo yum install mod_cluster-native.x86_64-1.2.13 -y
sudo yum install mod_cluster-native-debuginfo.x86_64-1.2.13 -y
sudo yum install mod_jk-ap22.x86_64-1.2.41 -y
sudo yum install mod_jk-debuginfo.x86_64-1.2.41 -y
sudo yum install mod_jk-manual.x86_64-1.2.41 -y
sudo yum install mod_rt.x86_64-2.4.1 -y
sudo yum install mod_rt-debuginfo.x86_64-2.4.1 -y
sudo yum install mod_snmp.x86_64-2.4.1 -y
sudo yum install mod_snmp-debuginfo.x86_64-2.4.1 -y
sudo yum install mod_ssl.x86_64-2.2.26 -y
sudo yum install tomcat-native.x86_64-1.1.34 -y
sudo yum install tomcat-native-debuginfo.x86_64-1.1.34 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2014/RHSA-2014:1087.sh
|
Shell
|
mit
| 11,309 |
#!/bin/bash
. /kb/deployment/user-env.sh
if [ $# -eq 0 ] ; then
sh ./scripts/start_server.sh
elif [ "${1}" = "test" ] ; then
echo "Run Tests"
make test
elif [ "${1}" = "async" ] ; then
sh ./scripts/run_async.sh
elif [ "${1}" = "init" ] ; then
echo "Initialize module"
else
echo Unknown
fi
|
kbaseIncubator/variation
|
scripts/entrypoint.sh
|
Shell
|
mit
| 302 |
echo "----------------------------------------------"
../Debug/typestest A | grep "already taken"
echo "----------------------------------------------"
|
rbdannenberg/o2
|
test/ports.sh
|
Shell
|
mit
| 152 |
#!/usr/bin/env bash
set -ex -o pipefail
# These ones can be `npm link`ed for fast development
LINKABLE_PKGS=(
$(pwd)/dist/packages-dist/{common,core,compiler,compiler_cli,platform-{browser,server}}
$(pwd)/dist/tools/@angular/tsc-wrapped
)
PKGS=(
reflect-metadata
typescript@next
zone.js
rxjs
@types/{node,jasmine}
jasmine
)
TMPDIR=${TMPDIR:-.}
readonly TMP=$TMPDIR/e2e_test.$(date +%s)
mkdir -p $TMP
cp -R -v modules/@angular/compiler_cli/integrationtest/* $TMP
# Try to use the same versions as angular, in particular, this will
# cause us to install the same rxjs version.
cp -v package.json $TMP
# run in subshell to avoid polluting cwd
(
cd $TMP
npm install ${PKGS[*]}
# TODO(alexeagle): allow this to be npm link instead
npm install ${LINKABLE_PKGS[*]}
# Compile the compiler_cli integration tests
./node_modules/.bin/ngc
./node_modules/.bin/jasmine init
# Run compiler_cli integration tests in node
./node_modules/.bin/jasmine test/*_spec.js
)
|
hannahhoward/angular
|
scripts/ci-lite/offline_compiler_test.sh
|
Shell
|
mit
| 989 |
#! /bin/sh
#
# Copyright (c) 2011 Mans Rullgard <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
set -e
export LC_ALL=C
die(){
echo "$@"
exit 1
}
test -n "$FATEDIR" || die "FATEDIR not set"
test -n "$FATE_USER" || die "FATE_USER not set"
reptmp=$(mktemp -d)
trap 'rm -r $reptmp' EXIT
cd $reptmp
tar xzk
header=$(head -n1 report)
# Can't use expr on this one because $version might be 0
version=$(echo "$header" | sed "s/^fate:\([0-9]*\):.*/\1/")
date=$(expr "$header" : "fate:$version:\([0-9]*\):")
slot=$(expr "$header" : "fate:$version:$date*:\([A-Za-z0-9_.-]*\):")
rev=$(expr "$header" : "fate:$version:$date:$slot:\([A-Za-z0-9_.-]*\):")
branch=master
if [ $version -eq 1 ]; then
branch=$(expr "$header" : "fate:$version:$date:$slot:$rev:[0-9]*:[ A-Za-z0-9_.-]*:\([A-Za-z0-9_.-\/]*\):")
branch=$(echo "$branch" | sed 's,^release/,v,')
fi
test -e "$FATEDIR/branches" || touch "$FATEDIR/branches"
grep -q "^$branch$" "$FATEDIR/branches" || \
[ "$branch" = 'master' ] || \
(echo "$branch" >>"$FATEDIR/branches" && \
echo "Setting up new branch $branch" >&2)
test -n "$date" && test -n "$slot" || die "Invalid report header"
slotdir=$FATEDIR/$slot
if [ -d "$slotdir" ]; then
echo "$FATE_USER" >"$slotdir/owner"
owner=$(cat "$slotdir/owner")
test "$owner" = "$FATE_USER" || die "Slot $slot owned by somebody else"
else
mkdir "$slotdir"
echo "$FATE_USER" >"$slotdir/owner"
fi
exec <report
head -n2 >summary
ntest=0
npass=0
IFS=:
exec >pass
while read name status rest; do
if [ "$status" -eq 0 ]; then
echo "$name:$date:$rev"
npass=$(($npass+1))
fi
ntest=$(($ntest+1))
done
exec <&- >&-
upass(){
read pname pdate prev || return 0
# Because we `sort`ed the input before passing it to upass, same tests
# always in couplets in the order of new to old.
while read lname ldate lrev; do
# If the second line describes the same test, discard it because it's
# the old result.
test "$lname" != "$pname" && echo "$pname:$pdate:$prev"
pname=$lname
pdate=$ldate
prev=$lrev
done
echo "$pname:$pdate:$prev"
}
lastpass=$slotdir/lastpass
if [ -r $lastpass ]; then
sort pass $lastpass | upass >lastpass
else
sort -o lastpass pass
fi
unset IFS
nwarn=$(grep -Eci '\<warning\>' compile.log) || nwarn=0
echo "stats:$ntest:$npass:$nwarn" >>summary
repdir=$slotdir/$date
mkdir $repdir
gzip -9 *.log
xz -0 report
cp -p summary report.xz *.log.gz $repdir
chmod 644 $repdir/*
rm -f $slotdir/previous
test -e $slotdir/latest && mv $slotdir/latest $slotdir/previous
ln -s $date $slotdir/latest
cp lastpass ${lastpass}.new && mv ${lastpass}.new $lastpass
|
TimothyGu/fateserver-node
|
fate-recv.sh
|
Shell
|
mit
| 3,387 |
#!/bin/sh
# Echoes the full path of the first font whose filename contains a given string
find_font(){
# List of directories used on Mac OS to store fonts
# Source: https://support.apple.com/en-au/HT201722
#
# We're searching in *reverse* order to ensure we don't pick up a modified copy
# of $font_name from a user directory. Unlikely, but could happen.
font_paths=(
/System\ Folder/Fonts/
/System/Library/Fonts/
/Network/Library/Fonts/
/Library/Fonts/
"$HOME/Library/Fonts/"
);
# Search through each system path in search of $font_name
for i in "${font_paths[@]}"; do
matches=$(ls -1 "$i" 2>/dev/null | grep $font_name)
# Files containing the font's name exist in this directory
[ $? -eq 0 ] && {
matches=($matches)
# Cycle through each font and return the first one that's of a valid format
for m in "${matches[@]}"; do
file="${i}${m}";
# Verify the file's readable and that it's actually of a proper font format
[ ! -r "$file" ] && { >&2 echo "Skipping unreadable file: $file"; } || {
# Open/close the file with FontForge
fontforge -nosplash -lang=ff -c 'Open($1); Close();' "$file" 2>/dev/null;
# If it opened okay, it's obviously legit font material. Go for it.
[ $? -eq 0 ] && { echo "$file"; exit 0; }
>&2 echo "Not a font file: $file";
}
done;
};
done;
>&2 printf 'Failed to locate font "%s"\n' $font_name
exit 1;
}
# Label our arguments with something more readable
font_name=$1
copy_to=$2
# Find where the queried font lives
path=$(find_font "$font_name" 2>/dev/null)
# Make sure we found something before continuing
[ $? -eq 0 ] && {
mkdir -p $(dirname $copy_to)
file "$path" | grep -iE '(Open|True)Type' | grep -vi 'font collection' && {
cp "$path" $copy_to;
} || {
fontforge -nosplash -lang=ff -c 'Open($1); Generate($2);' 2>/dev/null "$path" $copy_to
};
};
|
Alhadis/Menloco
|
utils/find-font.sh
|
Shell
|
mit
| 1,904 |
#!/bin/bash
# Function definitions
function userExit()
{
echo "Received SIGINT. Exiting."
rosnode kill -all
./cleanup.sh
exit
}
startGazeboServer()
{
local world_file_path=$1
local random_seed=$2
rosparam set /use_sim_time true
setsid rosrun gazebo_ros gzserver $world_file_path --seed $random_seed &
echo "Attempted to start Gazebo server with world file: $world_file_path and random seed $random_seed"
}
stopGazebo()
{
pkill gzserver
echo "Attempted to stop Gazebo server"
}
startGazeboClient()
{
setsid rosrun gazebo_ros gzclient __name:=gzclient &
echo "Attempted to start Gazebo client"
}
stopGazeboClient()
{
pkill gzclient
echo "Attempted to stop Gazebo client"
}
addCollectionZone()
{
setsid rosrun gazebo_ros spawn_model -sdf -file $PWD/simulation/models/collection_disk/model.sdf \
-model collection_disk \
-x 0 \
-y 0 \
-z 0 \
-R 0 \
-P 0 \
-Y 0
echo "Attempted to add collection_zone: name=collection_disk, x=0, y=0, z=0, roll=0, pitch=0, yaw=0"
}
addGroundPlane()
{
setsid rosrun gazebo_ros spawn_model -sdf -file $PWD/simulation/models/concrete_ground_plane/model.sdf \
-model concrete_ground_plane \
-x 0 \
-y 0 \
-z 0 \
-R 0 \
-P 0 \
-Y 0
echo "Attempted to add concrete ground plane: name=concrete_ground_plane, x=0, y=0, z=0, roll=0, pitch=0, yaw=0"
}
# Stops the ROS nodes associated with rovers
startRoverNodes()
{
local rover_name=$1
setsid roslaunch $PWD/launch/swarmie.launch name:=$rover_name > logs/$rover_name.log &
echo "Attempted to start rover ROS nodes"
}
# Stops the ROS nodes associated with rovers
stopRoverNodes()
{
local rover_name=$1
rosnode kill rover_name_APRILTAG
rosnode kill rover_name_BASE2CAM
rosnode kill rover_name_DIAGNOSTICS
rosnode kill rover_name_MAP
rosnode kill rover_name_BEHAVIOUR
rosnode kill rover_name_SBRIDGE
rosnode kill rover_name_NAVSAT
rosnode kill rover_name_ODOM
rosnode cleanup
echo "Attempted to kill rover ROS nodes: name=$rover_name"
}
addRover()
{
local rover_name=$1
local x=$2
local y=$3
local z=$4
local roll=$5
local pitch=$6
local yaw=$7
setsid rosrun gazebo_ros spawn_model -sdf -file $PWD/simulation/models/$rover_name/model.sdf \
-model $rover_name \
-x $x \
-y $y \
-z $z \
-R $roll \
-P $pitch \
-Y $yaw
echo "Attempted to add rover: name=$rover_name, x=$x, y=$y, z=$z, roll=$roll, pitch=$pitch, yaw=$yaw"
}
#---------------------------------------------------------#
#
# The top level script
#
#
#---------------------------------------------------------#
# Exit script if user enters ctl-c or sends interrupt
trap userExit SIGINT
# If not given 4 or 5 arguments then show the usage text
if [ $# -ne 6 -a $# -ne 5 ]
then
echo "Usage: $0 world_file_path num_rovers(1-8) scoring_output_path experiment_duration_in_minutes random_seed [visualize]"
echo "Example: $0 simulation/worlds/powerlaw_targets_example.world 6 ~/swarmathon_data/experiment1.txt 30 random_seed visualize"
echo "Example: $0 simulation/worlds/powerlaw_targets_example.world 6 ~/swarmathon_data/experiment1.txt random_seed 30"
exit 1
fi
EXPERIMENT_REAL_SETUP_START_TIME_IN_SECONDS=$(date +%s)
echo "Running in $PWD"
previous_gazebo_model_path=${GAZEBO_MODEL_PATH}
previous_gazebo_plugin_path=${GAZEBO_PLUGIN_PATH}
export SWARMATHON_APP_ROOT="$PWD"
export GAZEBO_MODEL_PATH="$PWD/simulation/models"
export GAZEBO_PLUGIN_PATH="$PWD/build/gazebo_plugins"
source "$PWD/devel/setup.bash"
echo Cleaning up ROS and Gazebo Processes
./cleanup.sh
echo Killing rosmaster
pkill rosmaster
echo Killing roscore
pkill roscore
roscore &
sleep 2
echo "Experiment started at $(date +%d-%m-%Y" "%H:%M:%S)."
#---------------------------------------------------------#
# Set the interval at which to check whether the experiment duration has elapsed
END_EXPERIMENT_CHECK_INTERVAL=1s
# Delay between adding rovers
# The following line set the interval to 2 seconds
MODEL_ADD_INTERVAL=2s
#---------------------------------------------------------#
# The maximum number of rovers a user can request is currently
MAX_ROVERS=8
#---------------------------------------------------------#
# Read the world file path from command line
WORLD_FILE_PATH=$1
echo "World file path: $WORLD_FILE_PATH"
#---------------------------------------------------------#
# Read the random seed to give gazebo
RANDOM_SEED=$5
echo "Random seed: $RANDOM_SEED"
# Start the gazebo simulation
startGazeboServer $WORLD_FILE_PATH $RANDOM_SEED
# Start the gazebo simulation
if [ $# -eq 6 -a "$6" = "visualize" ]
then
echo "User requested that the Gazebo client be started"
startGazeboClient
fi
# Read the number of rovers to create from command line
NUM_ROVERS=$2
echo "The user requested $NUM_ROVERS rovers."
if [[ $NUM_ROVERS -gt $MAX_ROVERS ]]; then
echo "User requested too many rovers. Maximum rovers is $MAX_ROVERS. Exiting."
exit 2
fi
#---------------------------------------------------------#
addCollectionZone
addGroundPlane
#---------------------------------------------------------#
# Add the rovers to the simulation
# The distances that determine the X, Y coords of the rovers is determined as follows:
# The distance to the rover from a corner position is calculated differently
# than the distance to a cardinal position.
#
# The cardinal direction rovers are a straightforward calculation where:
# a = the distance to the edge of the collection zone
# i.e., 1/2 of the collection zone square side length
# b = the 50cm distance required by the rules for placing the rover
# c = offset for the simulation for the center of the rover (30cm)
# i.e., the rover position is at the center of its body
#
# The corner rovers use trigonometry to calculate the distance where each
# value of d, e, and f, are the legs to an isosceles right triangle. In
# other words, we are calculating and summing X and Y offsets to position
# the rover.
# d = a
# e = xy offset to move the rover 50cm from the corner of the collection zone
# f = xy offset to move the rover 30cm to account for its position being
# calculated at the center of its body
#
# * * d = 0.508m
# * * e = 0.354m
# * * + f = 0.212m
# * /* * ------------
# * / | f * 1.072m
# * /--| *
# /* *
# / | e
# /--|
# *************
# * /|
# * / |
# * / | d a = 0.508m
# * / | ********* b = 0.500m
# * / | * * + c = 0.300m
# * *-----|-----*---* * ------------
# * a * b * c * 1.308m
# * * *********
# * *
# * *
# * *
# *************
# Specify rover names
ROVER_NAMES=( "achilles" "aeneas" "ajax" "diomedes" "hector" "paris" "thor" "zeus" )
# Specify rover start coordinates
ROVER_POSITIONS_X=( -1.308 0.000 1.308 0.000 1.072 -1.072 -1.072 1.072 )
ROVER_POSITIONS_Y=( 0.000 -1.308 0.000 1.308 1.072 -1.072 1.072 -1.072 )
# In this case, the yaw is the value that turns rover "left" and "right" */
ROVER_YAWS=( 0.000 1.571 -3.142 -1.571 -2.356 0.785 -0.785 2.356 )
echo "Adding rovers to Gazebo and starting their ROS nodes..."
# Add rovers to the simulation and start the associated ROS nodes
for (( i=0;i<$NUM_ROVERS;i++ ));
do
sleep $MODEL_ADD_INTERVAL
addRover ${ROVER_NAMES[i]} ${ROVER_POSITIONS_X[i]} ${ROVER_POSITIONS_Y[i]} 0 0 0 ${ROVER_YAWS[i]}
sleep $MODEL_ADD_INTERVAL
startRoverNodes ${ROVER_NAMES[i]}
done
echo "Finished adding rovers."
#---------------------------------------------------------#
sleep $MODEL_ADD_INTERVAL
echo "Setting rovers to autonomous mode..."
# Send the autonomous command to all rovers
for (( i=0;i<$NUM_ROVERS;i++ ));
do
# Publish the autonomous mode command ("2") to each rover. Latch the message ("-l").
rostopic pub -l /${ROVER_NAMES[i]}/mode std_msgs/UInt8 2 &
echo "Publishing 2 on /${ROVER_NAMES[i]}/mode"
done
echo "Finished setting rovers to autonomous mode."
# Read output file path from command line
SCORE_OUTPUT_PATH=$3
mkdir -p $(dirname $SCORE_OUTPUT_PATH)
echo "User specified $SCORE_OUTPUT_PATH as the file to which score information should be appended."
#---------------------------------------------------------#
# Read the experiment time from command line
EXPERIMENT_DURATION_IN_MINUTES=$4
EXPERIMENT_DURATION_IN_SECONDS=$(( $EXPERIMENT_DURATION_IN_MINUTES*60 ))
echo "Experiment duration will be $EXPERIMENT_DURATION_IN_SECONDS seconds."
# Read the current sim time (in seconds) from the ros topic /clock
# Uses awk to match on first occurence of "secs: number".
# {print $2} prints the number to the shell variable
# exit the awk after first match because we dont care about "nsecs: number"
CURRENT_TIME=$(rostopic echo -n 1 /clock | awk '/secs: [0-9]+$/{print $2; exit}')
START_TIME=$(rostopic echo -n 1 /clock | awk '/secs: [0-9]+$/{print $2; exit}')
echo "Initialised current gazebo time to $CURRENT_TIME"
echo "Initialised start gazebo time to $START_TIME"
# Let the simulation run until the experiment duration is reached
YELLOW='\033[0;33m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
EXPERIMENT_REAL_SETUP_END_TIME_IN_SECONDS=$(date +%s)
EXPERIMENT_REAL_START_TIME_IN_SECONDS=$(date +%s)
#Collect the score over time and write to screen and file
echo "Time (s), Score\n" >> $SCORE_OUTPUT_PATH
until (( $CURRENT_TIME-$START_TIME>=$EXPERIMENT_DURATION_IN_SECONDS )); do
# Update the current sim time
CURRENT_TIME=$(rostopic echo -n 1 /clock | awk '/secs: [0-9]+$/{print $2; exit}')
echo -e "Experiment time remaining ${YELLOW}$(( $EXPERIMENT_DURATION_IN_SECONDS-($CURRENT_TIME-$START_TIME) ))${NC} seconds."
# Pipe /score ros topic to output file
echo -e "${PURPLE}Time: $(($CURRENT_TIME-$START_TIME)), Score: $(rostopic echo -n 1 /collectionZone/score | sed 's/[^0-9]*//g')${NC}"
echo "$(($CURRENT_TIME-$START_TIME)), $(rostopic echo -n 1 /collectionZone/score | sed 's/[^0-9]*//g')\n" >> $SCORE_OUTPUT_PATH
sleep $END_EXPERIMENT_CHECK_INTERVAL
done
echo "The specified experiment duration ($EXPERIMENT_DURATION_IN_MINUTES) has elapsed. End autonomous mode for all rovers."
# Send the manual command to all rovers
for (( i=0;i<$NUM_ROVERS;i++ ));
do
# Publish the manual mode command ("1") to each rover. Latch the message ("-l").
rostopic pub -l /${ROVER_NAMES[i]}/mode std_msgs/UInt8 1 &
echo "Publishing 1 on /${ROVER_NAMES[i]}/mode"
done
EXPERIMENT_REAL_END_TIME_IN_SECONDS=$(date +%s)
ELAPSED_REAL_TIME_IN_SECONDS=$(( $EXPERIMENT_REAL_END_TIME_IN_SECONDS-$EXPERIMENT_REAL_START_TIME_IN_SECONDS ))
ELAPSED_REAL_TIME_IN_MINUTES=$(( $ELAPSED_REAL_TIME_IN_SECONDS/60 ))
ELAPSED_SETUP_REAL_TIME_IN_SECONDS=$(( $EXPERIMENT_REAL_SETUP_END_TIME_IN_SECONDS-$EXPERIMENT_REAL_SETUP_START_TIME_IN_SECONDS ))
ELAPSED_SETUP_REAL_TIME_IN_MINUTES=$(( $ELAPSED_SETUP_REAL_TIME_IN_SECONDS/60 ))
ELAPSED_TOTAL_TIME_IN_SECONDS=$(( $ELAPSED_SETUP_REAL_TIME_IN_SECONDS+$ELAPSED_REAL_TIME_IN_SECONDS ))
ELAPSED_TOTAL_TIME_IN_MINUTES=$(( $ELAPSED_TOTAL_TIME_IN_SECONDS/60 ))
echo "Experiment setup took $ELAPSED_SETUP_REAL_TIME_IN_SECONDS seconds."
echo "Experiment setup took $ELAPSED_SETUP_REAL_TIME_IN_MINUTES minutes."
echo "Experiment run took $ELAPSED_REAL_TIME_IN_SECONDS seconds."
echo "Experiment run took $ELAPSED_REAL_TIME_IN_MINUTES minutes."
echo "Experiment run took $ELAPSED_TOTAL_TIME_IN_SECONDS seconds."
echo "Experiment run took $ELAPSED_TOTAL_TIME_IN_MINUTES minutes."
echo "Finished placing all rovers into manual mode. Ending simulation..."
# Report some simulation efficiency information
echo "The $EXPERIMENT_DURATION_IN_MINUTES minute long experiment took $ELAPSED_REAL_TIME_IN_MINUTES minutes of real time to simulate with $ELAPSED_SETUP_REAL_TIME_IN_MINUTES minutes of setup time."
# The rover program cleans up after itself but if there is a crash this helps to make sure there are no leftovers
echo Cleaning up ROS and Gazebo Processes
rosnode kill -a
echo Killing rosmaster
pkill rosmaster
echo Killing roscore
pkill roscore
./cleanup.sh
# Restore previous environment
export GAZEBO_MODEL_PATH=$previous_gazebo_model_path
export GAZEBO_PLUGIN_PATH=$previous_gazebo_plugin_path
echo "Experiment finished at $(date +%d-%m-%Y" "%H:%M:%S)."
|
BCLab-UNM/SwarmBaseCode-ROS
|
run_headless_sim.sh
|
Shell
|
mit
| 12,910 |
#!/bin/bash
cd /nodeapp
sh ./shell/start.sh
while [ true ]
do
echo "nodejs env running..."
sleep 10
done
|
zmoon111/docker-web-env
|
nodejs/start.sh
|
Shell
|
mit
| 116 |
echo "[COMPILING]"
cat core/loader.js core/cookies.js core/watch.js core/tangular.js core/template.js core/model.js core/miracles.js core/core.js core/language.js core/dom.js core/domanipulator.js | uglifyjs -c -o mj.min.js
read -n 1 -p "Press any key to continue"
|
chrysls/miraclejs
|
minify(windows).sh
|
Shell
|
mit
| 264 |
#!/bin/sh
:>lang.stat
echo "| ELF executable (all) | $(grep "ELF ..-bit LSB executable" lang.txt |wc -l)" >>lang.stat
echo "| ELF executable (set*id) | $(grep "set.id.*ELF ..-bit LSB executable" lang.txt |wc -l)" >>lang.stat
echo "| ELF executable (linked to libc) | $(cat elf.txt | xargs -n1 ldd | grep "=>" | grep "libc\.so" |wc -l)" >>lang.stat
echo "| ELF executable (linked to libstdc\++ = C++)| $(cat elf.txt | xargs -n1 ldd | grep "=>" | grep "libstdc++\.so" |wc -l)" >>lang.stat
echo "| ELF executable (linked to libX11 = X) | $(cat elf.txt | xargs -n1 ldd | grep "=>" |grep "libX11\.so" |wc -l)" >>lang.stat
echo "| ELF executable (linked to gobject = GNOME) | $(cat elf.txt | xargs -n1 ldd | grep "=>" |grep "libgobject-.\..\.so" |wc -l)" >>lang.stat
echo "| ELF executable (linked to libQtCore = KDE) | $(cat elf.txt | xargs -n1 ldd | grep "=>" |grep "libQtCore\.so" |wc -l)" >>lang.stat
echo "| ELF executable (linked to libncurses) | $(cat elf.txt | xargs -n1 ldd | grep "=>" |grep "libncurses\.so" |wc -l)" >>lang.stat
echo "| POSIX shell script | $(grep "^POSIX shell script" lang.txt |wc -l )" >>lang.stat
echo "| Perl script | $(grep "^Perl script" lang.txt |wc -l )" >>lang.stat
echo "| Python script | $(grep "^Python script" lang.txt |wc -l )" >>lang.stat
echo "| Bash shell script | $(grep "^Bourne-Again shell script" lang.txt |wc -l)" >>lang.stat
echo "| Ruby script | $(grep "^Ruby script" lang.txt |wc -l )" >>lang.stat
echo "| Lua script | $(grep "^Lua script" lang.txt |wc -l )" >>lang.stat
#
|
osamuaoki/fun2prog
|
stat/lang/lang.stat.sh
|
Shell
|
mit
| 1,670 |
#!/bin/bash
spiders=(existential xkcd dilbert commitstrip cynadine)
for spider in ${spiders[*]}
do
echo "Start spider $spider"
/usr/local/bin/scrapy crawl -L ERROR --logfile=/var/log/scrapping.log -a createdAt=1 $spider
done
|
kozko2001/existentialcomics
|
scrape/all_spiders.sh
|
Shell
|
mit
| 233 |
#!/bin/sh
cd lib
# Get the PSR 2 sniffer
echo "Getting PSR 2 code validator"
curl -sOL https://squizlabs.github.io/PHP_CodeSniffer/phpcs.phar
# Get PHPUnit
echo "Getting PHP Unit"
curl -sOL https://phar.phpunit.de/phpunit.phar
echo "Getting Composer"
curl -sOL https://getcomposer.org/composer.phar
chmod +x *.phar
exit 0
|
ronaldbradford/PivotTable
|
scripts/get_required_libraries.sh
|
Shell
|
mit
| 329 |
#!/bin/bash
# Constant
Logger_error_color='\033[1;31m'
Logger_success_color='\033[0;32m'
Logger_no_color='\033[1;0m'
Logger_alert_color='\033[1;34m'
# Configurable
Logger__prefix='Originator'
Logger__has_prefix=1
#################################################
# Logs a message
#
# @param $1: The message to log
# @param $2: The color to log
#################################################
Logger_log() {
# Setting color (if param is passed)
if [ ! -z "$2" ]; then
echo -ne "$2"
fi
# Logging
if [ $Logger__has_prefix -eq 1 ]; then
echo "<< $Logger__prefix >>: $1"
else
echo "$1"
fi
# Disabling color (if param is passed)
if [ ! -z "$2" ]; then
echo -ne "$Logger_no_color"
fi
}
Logger__log() {
Logger_log "$1" "$Logger_no_color"
}
Logger__error() {
Logger_log "$1" "$Logger_error_color"
}
Logger__success() {
Logger_log "$1" "$Logger_success_color"
}
Logger__alert() {
Logger_log "$1" "$Logger_alert_color"
}
Logger__prompt() {
if [ $Logger__has_prefix -eq 1 ]; then
echo -n "<< $Logger__prefix >>: $1"
else
echo -n "$1"
fi
}
|
DigitalCitadel/originator
|
lib/logger.bash
|
Shell
|
mit
| 1,156 |
## for TransDecoder > github:190730
#=== conf ===
GENOME=../../Om2.assembly.fasta
TDDIR=~/bio/applications/TransDecoder-v5.0.2
TRANSCRIPT=stringtie_merged.transcripts.fasta
TRANSCRIPT_GFF=stringtie_merged.gff3
NCPU=8
PFAM=longest_orfs.pep.pfam.domtblout
BLAST=longest_orfs.pep.vs.uniprot_ref_proteomes.diamond.blastp.dmnd.out
#===
#$TDDIR/TransDecoder.Predict --cpu $NCPU -t $TRANSCRIPT \
# --retain_pfam_hits $PFAM \
# --retain_blastp_hits $BLAST
TD_GFF=stringtie_merged.transcripts.fasta.transdecoder.gff3
OUTF=`basename $TD_GFF .gff3`.genome.gff3
$TDDIR/util/cdna_alignment_orf_to_genome_orf.pl \
$TD_GFF \
$TRANSCRIPT_GFF \
$TRANSCRIPT \
> $OUTF
|
shujishigenobu/genome_annot
|
tasks/HISAT2_StringTie_TransDecoder/run_TransDecoder_GenomeBased_step5.sh
|
Shell
|
mit
| 666 |
# -----------------------------------------------------------------------------
# Print red text to stderr.
#
# https://linuxtidbits.wordpress.com/2008/08/11/output-color-on-bash-scripts/
# -----------------------------------------------------------------------------
red() {
echo >&2 "$(tput setaf 1)${1}$(tput sgr0)"
}
# -----------------------------------------------------------------------------
# Print yellow text to stderr.
# -----------------------------------------------------------------------------
yellow() {
echo >&2 "$(tput setaf 3)${1}$(tput sgr0)"
}
# -----------------------------------------------------------------------------
# Print green text to stderr.
# -----------------------------------------------------------------------------
green() {
echo >&2 "$(tput setaf 2)${1}$(tput sgr0)"
}
die() { red $2; exit $1; }
|
eHarmony/aloha
|
travis-bash-fns.sh
|
Shell
|
mit
| 857 |
#!/usr/bin/env bash
# Gabriele Girelli - 20170619
# Aim: split fasta file based on gene name
#
# Usage: ./split_fa_by_gene.sh all_gene.fa outdir
#
# Note:
# - works only if no space is present in the fasta headers
# - as sequences are appended, remove previous run outputs when re-running
#
outdir=$2
mkdir -p $outdir
while IFS='' read -r line || [[ -n "$line" ]]; do
if [ ${line:0:1} == '>' ]; then
name=$(echo "$line" | sed 's/^>\ //' | tr '_' ' ' | cut -d ' ' -f 1)
echo -e "$line" | tr '\t' '\n' >> $outdir"/"$name".fa"
fi
done < <(cat "$1" | paste - -)
|
ggirelli/potpourri
|
680-genes-fish-oligos/split_fa_by_gene.sh
|
Shell
|
mit
| 581 |
# This is my default zsh file, set the main .zshrc file to open this one
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="bzvestey"
# Uncomment the following line to use case-sensitive completion.
CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
HIST_STAMPS="yyyy-mm-dd"
# Would you like to use another custom folder than $ZSH/custom?
ZSH_CUSTOM=~/.mydotfiles/zsh/custom
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git golang docker kubectl node npm)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/rsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
###############################################################################
## Load our extra files ##
###############################################################################
# exports
source ~/.mydotfiles/zsh/exports.zsh
|
bzvestey/dotfiles
|
zsh/zshrc-linux.zsh
|
Shell
|
mit
| 3,074 |
#!/usr/bin/env bash
pokebotpath=$(cd "$(dirname "$0")"; pwd)
cd $pokebotpath
source bin/activate 2> /dev/null
if [[ $? -eq 1 ]];
then
echo "Virtualenv does not exits"
echo "Run: ./setup.sh -i"
exit 1
fi
git fetch -a
installed=(`pip list 2>/dev/null |sed -e 's/ //g' -e 's/(/:/' -e 's/)//' -e 's/[-_]//g' | awk '{print tolower($0)}'`)
required=(`cat requirements.txt | sed -e 's/.*pgoapi$/pgoapi==2.13.0/' -e 's/[-_]//g' -e 's/==\(.*\)/:\1/' | awk '{print tolower($0)}'`)
for package in ${required[@]}
do
if [[ ! (${installed[*]} =~ $package) ]];
then
echo "Some of the required packages are not found / have different version."
echo "Run: ./setup.sh -u"
exit 1
fi
done
if [ "1" == $(git branch -vv |grep -c "* dev") ] && [ $(git log --pretty=format:"%h" -1) != $(git log --pretty=format:"%h" -1 origin/dev) ] ||
[ "1" == $(git branch -vv |grep -c "* master") ] && [ $(git log --pretty=format:"%h" -1) != $(git log --pretty=format:"%h" -1 origin/master) ]
then
read -p "Branch has an update. Run ./setup.sh -u to update? y/n
" do_setup
if [[ $do_setup = "y" || $do_setup = "Y" ]];
then
./setup.sh -u
fi
fi
python MultiBot.py
exit 0
|
goedzo/PokemonGo-Bot
|
runMultiBot.sh
|
Shell
|
mit
| 1,171 |
#!/usr/bin/env bash
ACTION_NAME="rechmod"
ACTION_VERSION="2015-01-11"
CHMOD_DEFAULT_WRITE="777"
rechmod() {
if [ -z "${CHMOD_PATHS_WRITE+x}" ] || [ -z "${CHMOD_PATHS_WRITE}" ]
then
loge "Paths for CHMOD not defined or is empty array"
fi
log "Setting defaults (755 / 644) to all project directories/files"
# folders
find ${PROJECT_LOCAL_ROOT} -type d -print0 | xargs -0 chmod 755
# files
find ${PROJECT_LOCAL_ROOT} -type f -print0 | xargs -0 chmod 644
for LOCAL_PATH in "${CHMOD_PATHS_WRITE[@]}"
do
log "Chmoding ${CHMOD_DEFAULT_WRITE} to \"${LOCAL_PATH}\""
chmod -R ${CHMOD_DEFAULT_WRITE} "${PROJECT_LOCAL_ROOT}${LOCAL_PATH}"
done
logs "Chmoding finished"
}
|
hansek/cotls
|
actions/rechmod.sh
|
Shell
|
mit
| 738 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3333-1
#
# Security announcement date: 2015-08-12 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:31 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - iceweasel:38.2.0esr-1~deb8u1
#
# Last versions recommanded by security team:
# - iceweasel:38.8.0esr-1~deb8u1
#
# CVE List:
# - CVE-2015-4473
# - CVE-2015-4478
# - CVE-2015-4479
# - CVE-2015-4480
# - CVE-2015-4484
# - CVE-2015-4487
# - CVE-2015-4488
# - CVE-2015-4489
# - CVE-2015-4492
# - CVE-2015-4493
# - CVE-2015-4475
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade iceweasel=38.8.0esr-1~deb8u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_8_(Jessie)/x86_64/2015/DSA-3333-1.sh
|
Shell
|
mit
| 832 |
#!/bin/bash
source /usr/local/bin/virtualenvwrapper.sh
workon drftest
cd /home/drftest/drftest/
./manage.py migrate
./manage.py initshopdb
./manage.py runserver 0.0.0.0:8000
|
andreagrandi/drf3-test
|
docker/run.sh
|
Shell
|
mit
| 174 |
#! /bin/bash -
#
# No-PPT 自动线上部署脚本。
#
# Grammar:
# jetty_deploy.sh -g {groupId} -a {artifactId} [-v {version}]
#
# @author ISME
# @version 1.0.1
# @since 1.0.0
#
# Reset IFS
IFS=$'\040\t\n'
# Reset PATH
OLDPATH="${PATH}"
PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin"
export PATH
# Reset JAVA_HOME
JAVA_HOME="/usr/local/java"
CLASSPATH="${JAVA_HOME}/lib/dt.jar:${JAVA_HOME}/lib/tools.jar"
PATH="${JAVA_HOME}/bin:${PATH}"
export JAVA_HOME
export CLASSPATH
export PATH
################################ Global functions ################################
function info()
{
echo -e "\e[32m[INFO]: $1\e[0m"
}
function warn()
{
echo -e "\e[33m[WARN]: $1\e[0m"
}
function error()
{
echo -e "\e[31m[ERROR]: $1\e[0m"
}
##################################################################################
EXITCODE=0
PROGRAM=`basename $0`
VERSION="1.0.0"
# Nexus configurations.
NEXUS_SERVER="http://dev.noppt.cn:8081/nexus/content/repositories/releases"
MAVEN_METADATA_FILE="maven-metadata.xml"
WORKING_DIR="./" # TODO: Modify this before used in production environment.
DOWNLOAD_TMP_DIR="./.lifter_download_tmp_" # TODO: Modify this before used in production environment.
# Define parameters.
groupId=""
artifactId=""
version=""
function usage()
{
echo "Usage: ${PROGRAM} [OPTIONS]..."
echo " -a Project artifactId."
echo " -g Project groupId."
echo " -v Project version."
echo " -h This help."
}
function download_file()
{
local url="$1"
local filename="$2"
# Download file.
wget -t 3 -T 60 --quiet "${url}" -O "${filename}"
wget -t 3 -T 60 --quiet "${url}.md5" -O "${filename}.md5"
# Checksum
check_md5 "${filename}"
}
function check_md5()
{
local file="$1"
which md5sum > /dev/null 2>&1
if [ $? -eq 0 ]
then
if [ "$(md5sum "${file}" | awk '{print $1}')" != "$(cat "${file}.md5")" ]
then
error "${file} MD5 checksum failed."
exit 1
fi
else
# For Mac OSX.
which md5 > /dev/null 2>&1
if [ $? -eq 0 ]
then
if [ "$(md5 -q "${file}")" != "$(cat "${file}.md5")" ]
then
error "${file} MD5 checksum failed."
exit 1
fi
else
error "Your system not support md5sum."
exit 1
fi
fi
}
function prepare_options()
{
# Check groupId and artifactId.
if [ "${groupId}" = "" ] || [ "${artifactId}" = "" ]
then
error "Please specify groupId and artifactId."
exit 1
fi
# Check version.
if [ "${version}" = "" ]
then
# Maven metadata download url.
local url="${NEXUS_SERVER}/$(echo ${groupId} | sed 's/\./\//g')/${artifactId}/${MAVEN_METADATA_FILE}"
# Download metadata and md5 file.
mkdir -p "${DOWNLOAD_TMP_DIR}/${groupId}/${artifactId}"
cd "${DOWNLOAD_TMP_DIR}/${groupId}/${artifactId}/"
download_file "${url}" "${MAVEN_METADATA_FILE}"
# Read latest version.
version="$(cat "${MAVEN_METADATA_FILE}" | grep "<release>.*</release>" | sed -E 's/( )*<(\/)*release>//g')"
if [ "${version}" = "" ]
then
error "Get version failed. Please input version and retry."
exit 1
fi
# Back to the main directory.
cd - > /dev/null 2>&1
fi
# Enter project workspace.
cd ${artifactId}
}
function download_packages()
{
# WAR file URL.
local filename="${artifactId}-${version}.war"
local url="${NEXUS_SERVER}/$(echo ${groupId} | sed 's/\./\//g')/${artifactId}/${version}/${filename}"
# Download WAR file and MD5 file.
mkdir -p "${DOWNLOAD_TMP_DIR}/${groupId}/${artifactId}/${version}"
cd "${DOWNLOAD_TMP_DIR}/${groupId}/${artifactId}/${version}"
download_file "${url}" "${filename}"
cd - > /dev/null 2>&1
}
function backup_old_version()
{
local backup_dir=".lifter_${groupId}_${artifactId}_${version}_rollback_"
mkdir "${backup_dir}"
cp -r "static" "${backup_dir}"
cp -r "webapps" "${backup_dir}"
}
function deploy_new_version()
{
rm -rf "webapps" "static"
mkdir "webapps"
unzip "${DOWNLOAD_TMP_DIR}/${groupId}/${artifactId}/${version}/${artifactId}-${version}.war" -d "webapps"
mv "webapps/static" .
}
function main()
{
while getopts a:g:v:ch opt;
do
case ${opt} in
a)
artifactId=${OPTARG}
;;
g)
groupId=${OPTARG}
;;
v)
version=${OPTARG}
;;
h)
usage
exit 0
;;
c)
rm -rf .lifter*
exit 0
;;
*)
;;
esac
done
# Prepare options.
prepare_options
# Download packages.
download_packages
# Backup old version.
backup_old_version
# Stop service.
bin/jetty.sh stop
# Deploy new version.
deploy_new_version
# Restart service.
bin/jetty.sh start
}
main $@
|
no-ppt/noppt-tools
|
operator/jetty_deploy/jetty_deploy.sh
|
Shell
|
mit
| 5,289 |
#!/bin/bash
cd "$(dirname "$BASH_SOURCE")"
###Some convenience functions
prnt(){
printf "$*\n" >>/dev/stdout
}
err() {
printf "$*\n" >>/dev/stderr
}
Exit(){
prnt '\nCleaning'
if [ "$2" != '-p' ]; then
kill $pid >/dev/null 2>&1 && prnt "\tKilled pid: $pid"
fi
sudo ./lampi -rmd "$site1" >/dev/null 2>&1 && prnt "\tRemoved site: $site1"
sudo ./lampi -rmd "$site2" >/dev/null 2>&1 && prnt "\tRemoved site: $site2"
exit $1
}
trap 'Exit 1 2>/dev/null' SIGINT
#trap Exit INT TERM EXIT
doc_root1="$(mktemp -d)"
doc_root2="$(mktemp -d)"
site1=letsacme-host1.local
site2=letsacme-host2.local
acme_dir=$doc_root1/acme-challenge
prnt "\nCreating test sites..."
sudo ./lampi -n "$site1" -dr "$doc_root1" >/dev/null && prnt "\tCreated site: $site1"
sudo ./lampi -n "$site2" -dr "$doc_root2" >/dev/null && prnt "\tCreated site: $site2"
#Test the sites
prnt '\tTesting sites..'
mkdir -p "$doc_root1"/.well-known/acme-challenge
mkdir -p "$doc_root2"/.well-known/acme-challenge
site_test(){
# $1: burl, $2:docroot
somefile="$(tr -cd 0-9 </dev/urandom | head -c 65)"
echo working > "$2/.well-known/acme-challenge/$somefile"
if curl "$1/.well-known/acme-challenge/$somefile" >/dev/null 2>&1;then
prnt "\t\tPassed: $1"
else
prnt "\t\tFailed: $1"
Exit 1
fi
}
site_test "http://$site1" "$doc_root1"
site_test "http://$site2" "$doc_root2"
prnt "\nngrok ..."
nweb=127.0.0.1:4040
nconf="tunnels:
$site1:
proto: http
host_header: rewrite
addr: $site1:80
web_addr: $nweb
$site2:
proto: http
host_header: rewrite
addr: $site2:80
web_addr: $nweb"
nconf_f=ngrok.yml
echo "$nconf" > "$nconf_f" && prnt '\tCreated ngrok config file'
nohup ./ngrok start -config "$nconf_f" $site1 $site2 >/dev/null 2>&1 &
pid=$!
prnt "\tRunning ngrok in the background (pid: $pid)"
t1=$(date +%s)
max_t=7 #limit max try in seconds
while true; do
tunnel_info_json="$(curl -s http://$nweb/api/tunnels)"
#echo $tunnel_info_json
public_url1="$(echo "$tunnel_info_json" | jq -r '.tunnels[0].public_url' | grep 'http://')"
dom1="$(echo "$public_url1" |sed -n -e 's#.*//\([^/]*\)/*.*#\1#p')"
public_url2="$(echo "$tunnel_info_json" | jq -r '.tunnels[2].public_url' | grep 'http://')"
dom2="$(echo "$public_url2" |sed -n -e 's#.*//\([^/]*\)/*.*#\1#p')"
if [ -n "$dom1" ] && [ -n "$dom2" ]; then
break
fi
t2=$(date +%s)
time="$(expr $t2 - $t1)"
if [ $time -ge $max_t ]; then
t1=$(date +%s)
prnt "\tngork froze. Restarting ..."
kill $pid >/dev/null 2>&1 && prnt "\tKilled pid: $pid"
nohup ./ngrok start -config "$nconf_f" $site1 $site2 >/dev/null 2>&1 &
pid=$!
prnt "\tngrok restarted (pid: $pid)"
fi
done
if [ "$dom1" = "$dom2" ]; then
err '\tE: Both domain can not be same. abort'
Exit 1 2>/dev/null
fi
prnt "\tSite1: $public_url1"
prnt "\tSite2: $public_url2"
prnt "\tTesting sites ..."
#tphp='<?php phpinfo(); ?>'
#ls -la "$doc_root1" "$doc_root2"
site_test "$public_url1" "$doc_root1"
site_test "$public_url2" "$doc_root2"
#ls -la "$doc_root1" "$doc_root2"
#sleep 30
prnt '\nPreparing ...'
if [ -f account.key ]; then
prnt '\tUsing existing account.key'
else
openssl genrsa 4096 > account.key && prnt '\tCreated account.key'
fi
printf "$dom1\n$dom2\n" > dom.list && prnt '\tCreated dom.list file'
./gencsr >/dev/null && prnt '\tCreated CSR'
prnt '
*********************************************************
*** Test 1: With --config-json and using document root
*********************************************************
'
conf_json='{
"'$dom1'":
{
"DocumentRoot": "'$doc_root1'"
},
"'$dom2'":
{
"DocumentRoot": "'$doc_root2'"
},
"DocumentRoot": "'$doc_root2'",
"AccountKey":"account.key",
"CSR": "dom.csr",
"CertFile":"dom.crt",
"ChainFile":"chain.crt",
"Test":"True"
}' && prnt '\tConfiguration JSON prepared'
#echo "$conf_json" > config.json && prnt '\tCreated config.json file'
prnt '\tRunning letsacme: python ../letsacme.py --config-json $conf_json'
t="$(mktemp)"
{ python ../letsacme.py --config-json "$conf_json" 2>&1; echo $? >"$t"; } | sed -e 's/.*/\t\t&/'
es=$(cat $t)
rm "$t"
if [ $es -eq 0 ]; then
prnt '\n\t*** success on test 1 ***'
else
err '\tE: Failed to get the certs'
#sleep 30
Exit 1 2>/dev/null
fi
prnt "
*******************************************************
*** Test 2: With --acme-dir and without --config-json
*******************************************************
"
red_code="
RewriteEngine On
RewriteBase /
RewriteRule ^.well-known/acme-challenge/(.*)$ http://$dom1/acme-challenge/\$1 [L,R=302]
"
echo "$red_code" > "$doc_root1"/.htaccess && prnt "\tRedirect for $site1 is set"
echo "$red_code" > "$doc_root2"/.htaccess && prnt "\tRedirect for $site2 is set"
prnt "\tRunning letsacme:
\tpython ../letsacme.py --test\\\\\n\t --account-key account.key\\\\\n\t --csr dom.csr\\\\\n\t --acme-dir $acme_dir\\\\\n\t --chain-file chain.crt\\\\\n\t --cert-file dom.crt"
t="$(mktemp)"
{ python ../letsacme.py --test --account-key account.key --csr dom.csr --acme-dir "$acme_dir" --chain-file chain.crt --cert-file dom.crt 2>&1; echo $? >"$t"; } | sed -e 's/.*/\t\t&/'
es=$(cat "$t")
rm $t
if [ $es -eq 0 ]; then
prnt '\n\t*** success on test 2 ***'
else
err '\tE: Failed to get the certs'
Exit 1 2>/dev/null
fi
############## Final cleaning ###############
Exit 0 2>/dev/null
#############################################
|
neurobin/letsacme
|
test/check.sh
|
Shell
|
mit
| 5,528 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2319-3
#
# Security announcement date: 2014-09-16 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:59 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - openjdk-7-jre-lib:7u65-2.5.2-3~14.04
# - openjdk-7-jre-zero:7u65-2.5.2-3~14.04
# - icedtea-7-jre-jamvm:7u65-2.5.2-3~14.04
# - openjdk-7-jre-headless:7u65-2.5.2-3~14.04
# - openjdk-7-jre:7u65-2.5.2-3~14.04
# - openjdk-7-jre-headless:7u65-2.5.2-3~14.04
# - openjdk-7-jre:7u65-2.5.2-3~14.04
#
# Last versions recommanded by security team:
# - openjdk-7-jre-lib:7u65-2.5.2-3~14.04
# - openjdk-7-jre-zero:7u65-2.5.2-3~14.04
# - icedtea-7-jre-jamvm:7u121-2.6.8-1ubuntu0.14.04.1
# - openjdk-7-jre-headless:7u121-2.6.8-1ubuntu0.14.04.1
# - openjdk-7-jre:7u121-2.6.8-1ubuntu0.14.04.1
# - openjdk-7-jre-headless:7u121-2.6.8-1ubuntu0.14.04.1
# - openjdk-7-jre:7u121-2.6.8-1ubuntu0.14.04.1
#
# CVE List:
# - CVE-2014-2483
# - CVE-2014-2490
# - CVE-2014-4216
# - CVE-2014-4219
# - CVE-2014-4223
# - CVE-2014-4262
# - CVE-2014-4209
# - CVE-2014-4244
# - CVE-2014-4263
# - CVE-2014-4218
# - CVE-2014-4266
# - CVE-2014-4264
# - CVE-2014-4221
# - CVE-2014-4252
# - CVE-2014-4268
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade openjdk-7-jre-lib=7u65-2.5.2-3~14.04 -y
sudo apt-get install --only-upgrade openjdk-7-jre-zero=7u65-2.5.2-3~14.04 -y
sudo apt-get install --only-upgrade icedtea-7-jre-jamvm=7u121-2.6.8-1ubuntu0.14.04.1 -y
sudo apt-get install --only-upgrade openjdk-7-jre-headless=7u121-2.6.8-1ubuntu0.14.04.1 -y
sudo apt-get install --only-upgrade openjdk-7-jre=7u121-2.6.8-1ubuntu0.14.04.1 -y
sudo apt-get install --only-upgrade openjdk-7-jre-headless=7u121-2.6.8-1ubuntu0.14.04.1 -y
sudo apt-get install --only-upgrade openjdk-7-jre=7u121-2.6.8-1ubuntu0.14.04.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/x86_64/2014/USN-2319-3.sh
|
Shell
|
mit
| 2,022 |
#!/bin/sh
#
# install ODA-OS ingestion engine
#
#======================================================================
. `dirname $0`/../lib_logging.sh
NAME="Coast-Line Dataset"
info "Installing Ingestion Engine $NAME ... "
#======================================================================
[ -z "$ODAOS_IE_HOME" ] && error "Missing the required ODAOS_IE_HOME variable!"
[ -z "$CONTRIB" ] && error "Missing the required CONTRIB variable!"
[ -z "$ODAOSUSER" ] && error "Missing the required ODAOSUSER variable!"
[ -z "$ODAOSGROUP" ] && error "Missing the required ODAOSGROUP variable!"
URL="http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/10m/physical/ne_10m_land.zip"
TARGET="$ODAOS_IE_HOME/ingestion/media/etc/coastline_data"
#======================================================================
# trying to locate the data-file
#
# public domain dataset
# source: Natural Earth (http://www.naturalearthdata.com/)
#
FILE="`find "$CONTRIB" -name 'ne_10m_land.zip' | sort -r | head -n 1`"
if [ -z "$FILE" ]
then
info "Downloading from: $URL"
FILE="$CONTRIB/`basename "$URL"`"
info "Saving to : $FILE"
curl -L "$URL" -o "$FILE"
[ -f "$FILE" ] || error "Failed to download the $NAME!" \
&& info "$NAME downloaded."
else # found - using local copy
info "Using the existing local copy of the $NAME."
fi
info "$FILE"
#======================================================================
# remove previous data
[ -d "$TARGET" -o -f "$TARGET" ] && rm -fR "$TARGET"
# unpack the data
sudo -u "$ODAOSUSER" mkdir -p "$TARGET"
unzip "$FILE" -d "$TARGET"
chown -R "$ODAOSUSER:$ODAOSGROUP" "$TARGET"
info "$NAME installed to: $TARGET"
|
DREAM-ODA-OS/ODA-OS_subsystem
|
scripts/install.d/31_ie_coastline.sh
|
Shell
|
mit
| 1,711 |
#!/usr/bin/env bash
# Build package in place.
source "$(dirname "${BASH_SOURCE[0]}")/../../../../../scripts/common.sh"
[[ "${#}" -ge 1 ]] || abort "usage: ${0} /path/to/v8 [{x64.release|...} [/path/to/boost]]"
readonly PROJECT="$(realpath "${HERE}/..")"
readonly V8="$(realpath "${1}")"
ensure_directory "${V8}"
readonly CONFIG="${2:-x64.release}"
ensure_directory "${V8}/out.gn/${CONFIG}"
readonly BOOST="${3:-}"
INCLUDE_DIRS="${V8}:${V8}/include"
LIBRARY_DIRS="${V8}/out.gn/${CONFIG}/obj"
if [[ -n "${BOOST}" ]]; then
INCLUDE_DIRS+=":${BOOST}/include"
LIBRARY_DIRS+=":${BOOST}/lib"
fi
set -o xtrace
cd "${PROJECT}"
pip3 install \
--global-option=copy_files \
--global-option="--src-dir=${V8}/out.gn/${CONFIG}" \
--global-option=build_ext \
--global-option=--inplace \
--global-option="--include-dirs=${INCLUDE_DIRS}" \
--global-option="--library-dirs=${LIBRARY_DIRS}" \
--editable .
|
clchiou/garage
|
py/g1/third-party/v8/scripts/develop.sh
|
Shell
|
mit
| 912 |
#goole-chrome install
echo 'Installing Google Chrome'
sudo touch /etc/yum.repos.d/google-chrome.repo
cd /etc/yum.repos.d
sudo chmod 666 google-chrome.repo
sudo echo '[google-chrome]' >> google-chrome.repo
sudo echo 'name=google-chrome - 64-bit' >> google-chrome.repo
sudo echo 'baseurl=http://dl.google.com/linux/chrome/rpm/stable/x86_64' >> google-chrome.repo
sudo echo 'enabled=1' >> google-chrome.repo
sudo echo 'gpgcheck=1' >> google-chrome.repo
sudo echo 'gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub' >> google-chrome.repo
sudo chmod 644 google-chrome.repo
sudo yum -y install google-chrome-stable
google-chrome
echo 'Google Chrome install complete'
|
dcorns/bash_scripts
|
Ichrome.sh
|
Shell
|
mit
| 672 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.