code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -eu
INTERACTIVE=1
for i in "$@";
do
echo "=== Processing ${i} ===";
# check existence
if ! test -f "${i}"
then
echo "${i} does not exist or is not a regular file."
exit 1
fi
# extract
EXTRACTED=0
if which atool &>/dev/null
then
atool -x "${i}" && EXTRACTED=1
else
DST=$(splitext.py "${i}")
unzip -d "${DST}" "${i}" && EXTRACTED=1
echo "Extracted to ${DST}"
fi
# exit on failure
if [ ${EXTRACTED} -ne 1 ]
then
echo "Error processing ${i}"
exit 1
fi
# remove zip file
if [ ${INTERACTIVE} -eq 0 ]
then
rm --verbose "${i}";
else
echo "Remove ${i}? (y/n/a/q)"
read ans
case $ans in
y) rm --verbose "${i}";;
n) echo "Leaving ${i}";;
a) INTERACTIVE=0 && rm --verbose "${i}";;
q) exit 0;;
*) rm -i --verbose "${i}";;
esac
fi
done
|
KIAaze/bin_and_dotfiles_public
|
bins/public_bin/zip2dir.sh
|
Shell
|
gpl-3.0
| 874 |
#!/bin/bash
#
# Dutch to German - stations table
#
# Copyright (C) 2017-2018 [email protected]
# Distributed under the terms of the GNU General Public License v3
#
# Thanks to Claude, Alexandre and PPL (from ICFB) for translation.
#
# To be sure that sed will behave as expected :
export LC_COLLATE="C"
STATIONS_CSV="$(dirname $0)/../db/stations.csv"
sed -i \
-e 's/verwijst de code FCV misschien naar/abkürzung FCV verweist wahrscheinlich nach/2' \
-e "s/werkbasis en noodperron bij HSL/arbeitstelle und hilfbahnsteig von NBS/2" \
-e 's/Franse benaming/Französische bezeichnung/2' \
-e 's|in december 2002 omgedoopt in Spa-Géronstère en van een code voorzien / zie aldaar|in Dez.2002 in Spa-Géronstère umbenennt und mit einer abkürzung versehen : siehe dort (FSSG)|2' \
-e "s/verbindingswissels en uitwijkspoor op HSL/verbindungs und abstellgleisweiche auf NBS/2" \
-e "s|tot dec. 2002 / had geen code|bis Dez.2002; hat kein abkürzung|2" \
-e 's/-Strand/-Strand/2' \
-e 's/Kaai/Kai/2' \
-e "s/uitwijkspoor en noodperron op HSL/abstellgleis und behelfsbahnsteig auf NBS/2" \
-e 's/Vorming/Rangierbahnhof/2' \
-e 's/goederenstation/güterbahnhof/2' \
-e "s/nieuwe stopplaats vanaf 10 juni 2001/neue haltestelle ab dem 10. Juni 2001/2" \
-e "s/tot 11 juni 2017/bis zum 11 Juni 2017/2" \
-e 's/zie/siehe/2' \
"$STATIONS_CSV"
# Note:
#
# Equivalence of HSL/LGV ?
#
# NBS for "Neubaustrecke" is usually used which corresponds literally to
# "new line" : https://de.wikipedia.org/wiki/Neubaustrecke
#
# NBS is not strictly synonymous with LGV but will be easier to read
# that "hochgeschwindigkeitslinie" fully spelled out.
|
linuxunderground/be-rail
|
scripts/nl2de_stations.sh
|
Shell
|
gpl-3.0
| 1,670 |
#!/bin/bash
BUTTERFLY_BUILD_ROOT=$1
BUTTERFLY_SRC_ROOT=$(cd "$(dirname $0)/../.." && pwd)
source $BUTTERFLY_SRC_ROOT/tests/functions.sh
network_connect 0 1
server_start 0
nic_add 0 1 42 sg-1
nic_add 0 2 42 sg-1
nic_add 0 3 42 sg-2
qemus_start 1 2 3
sg_member_add 0 sg-1 42.0.0.1
sg_member_add 0 sg-1 42.0.0.2
sg_member_add 0 sg-2 42.0.0.3
sg_rule_add_with_sg_member udp sg-1 0 8000 sg-2
ssh_connection_test udp 3 1 8000
ssh_connection_test udp 3 2 8000
ssh_no_connection_test udp 1 3 8000
ssh_no_connection_test udp 1 3 8000
ssh_no_connection_test udp 1 2 8000
ssh_no_connection_test udp 2 1 8000
sg_rule_add_with_sg_member udp sg-1 0 8000 sg-1
ssh_connection_test udp 3 1 8000
ssh_connection_test udp 3 2 8000
ssh_no_connection_test udp 1 3 8000
ssh_no_connection_test udp 2 3 8000
ssh_connection_test udp 1 2 8000
ssh_connection_test udp 2 1 8000
remove_sg_from_nic 0 2
sg_member_del 0 sg-1 42.0.0.2
ssh_connection_test udp 3 1 8000
ssh_no_connection_test udp 3 2 8000
ssh_no_connection_test udp 1 3 8000
ssh_no_connection_test udp 2 3 8000
ssh_no_connection_test udp 1 2 8000
ssh_no_connection_test udp 2 1 8000
qemus_stop 1 2 3
server_stop 0
network_disconnect 0 1
return_result
|
outscale-jju/butterfly
|
tests/scenario_29/test.sh
|
Shell
|
gpl-3.0
| 1,189 |
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Error, usage: $0 <your url>"
exit 1
fi
url=$1
# curl
short=`curl -s http://t34.me/api/?u=${url}`
echo $short
# wget
short=`wget -qO- http://t34.me/api/?u=${url}`
echo $short
exit 0
|
z0rr0/t34.me
|
configs/api_sh.sh
|
Shell
|
agpl-3.0
| 231 |
#!/bin/bash
getargbool 1 rd.brltty && getargbool 1 rd.brltty.bluetooth && {
systemctl -q is-active bluetooth || {
systemctl --no-block start bluetooth
}
}
|
brltty/brltty
|
Initramfs/Dracut/bluetooth-start.sh
|
Shell
|
lgpl-2.1
| 168 |
#!/bin/bash
#1 : number of companies
#2 : number of control relations
#3 : number of products
if [[ $# -lt 3 ]]; then
echo "Error: Script expects 3 parameter"
exit 1;
fi
for (( i=1; i <= $2; i++ ))
do
j1=$(( ($RANDOM%$1)+1 ))
j2=$(( ($RANDOM%$1)+1 ))
j3=$(( ($RANDOM%$1)+1 ))
j4=$(( ($RANDOM%$1)+1 ))
j5=$(( ($RANDOM%$1)+1 ))
echo "controlled_by(c$j1,c$j2,c$j3,c$j4,c$j5)."
done
for (( i=1; i <= $3; i++ ))
do
j1=$(( ($RANDOM%$1)+1 ))
j2=$(( ($RANDOM%$1)+1 ))
j3=$(( ($RANDOM%$1)+1 ))
j4=$(( ($RANDOM%$1)+1 ))
echo "produced_by(p$i,c$j1,c$j2,c$j3,c$j4)."
done
for (( i=1; i <= $1; i++ ))
do
echo "company(c$i)."
done
|
hexhex/core
|
benchmarks/strategiccompanies-extcontrl/generate_instance.sh
|
Shell
|
lgpl-2.1
| 636 |
#!/bin/bash
cd cmn
echo "cmn: npm install "
npm install
cd ..
echo "-----------"
cd app
echo "app: npm install "
npm install
cd ..
echo "-----------"
cd web
echo "web: npm install "
npm install
cd ..
|
wzx-xle/PictureSpider
|
init.sh
|
Shell
|
lgpl-2.1
| 202 |
#! /usr/bin/env bash
$XGETTEXT `find -name \*.cpp -o -name \*.h` -o $podir/plasma_applet_gluonplayer.pot
|
KDE/gluon
|
apps/plasmoid/Messages.sh
|
Shell
|
lgpl-2.1
| 105 |
CFLAGS=""
LDFLAGS="-static-libgcc -static"
#EXTRA_CFLAGS="-march=armv7-a -mfpu=neon -mfloat-abi=softfp -mvectorize-with-neon-quad"
EXTRA_CFLAGS="-O2"
EXTRA_LDFLAGS="-static-libgcc -static"
FFMPEG_FLAGS="--prefix=/home/captain/ffmpeg/ffmpeg_win64 \
--target-os=mingw32 \
--arch=x86_64 \
--enable-cross-compile \
--cross-prefix=x86_64-w64-mingw32- \
--pkg-config=pkg-config \
--enable-memalign-hack \
--enable-shared \
--disable-symver \
--disable-doc \
--disable-ffplay \
--disable-ffmpeg \
--disable-ffprobe \
--disable-ffserver \
--disable-avdevice \
--disable-avfilter \
--disable-encoders \
--disable-muxers \
--disable-filters \
--disable-devices \
--disable-everything \
--enable-protocols \
--enable-parsers \
--enable-demuxers \
--disable-demuxer=sbg \
--enable-decoders \
--enable-bsfs \
--enable-network \
--enable-swscale \
--enable-asm \
--enable-version3"
../ffmpeg/configure $FFMPEG_FLAGS --extra-cflags="$CFLAGS $EXTRA_CFLAGS" --extra-ldflags="$EXTRA_LDFLAGS"
|
captain-mayhem/captainsengine
|
Adventure/AdvEngine/Engine/ffmpeg/build/build_win32/build_win64.sh
|
Shell
|
lgpl-2.1
| 1,043 |
luarocks install luasec OPENSSL_LIBDIR=/usr/lib/x86_64-linux-gnu --local
luarocks install busted --local
luarocks install luaposix --local
luarocks install luacov --local
luarocks install luacov-coveralls --local
|
ld-test/lua-rote
|
.travis/install_rocks.sh
|
Shell
|
lgpl-2.1
| 213 |
#!/bin/sh
OPENSSL_VERSION=1.0.2h
WORKSPACE=$(pwd)
INSTALL_DIR="${WORKSPACE}/MacOSX/x86_64"
mkdir -p $INSTALL_DIR
if [ ! -e "openssl-${OPENSSL_VERSION}-x86_64" ]; then
tar -zxvf openssl-${OPENSSL_VERSION}.tar.gz
mv openssl-${OPENSSL_VERSION} openssl-${OPENSSL_VERSION}-x86_64
fi
cd openssl-${OPENSSL_VERSION}-x86_64
./Configure \
--prefix=${INSTALL_DIR} \
no-shared \
no-zlib \
no-hw \
no-asm \
no-dso \
no-krb5 \
darwin64-x86_64-cc \
| tee ${INSTALL_DIR}/configuration.txt || exit 1
make clean
make -j4 || exit 1
make install || exit 1
|
WadeHsiao/B
|
3rd/openssl/build-openssl-for-osx-x86_64.sh
|
Shell
|
lgpl-3.0
| 555 |
#!/bin/bash
helm package continuous-delivery/jenkins-agent/
helm package continuous-delivery/nexus3/
helm package continuous-delivery/sonar/
helm package continuous-delivery/sonardb/
helm package continuous-delivery/jenkins/
mv *.tgz docs/
helm repo index docs
git add -u
git add docs
git commit --amend
git push -f
helm repo update
sleep 2
helm repo update
sleep 2
helm repo update
sleep 2
helm repo update
#helm dependency build continuous-delivery/jenkins
# helm package continuous-delivery/jenkins
# mv *.tgz docs/
# helm repo index docs
# git add -u
# git commit --amend
# git push -f
# helm repo update
|
hellgate75/continuous-delivery-charts
|
build-all-charts.sh
|
Shell
|
lgpl-3.0
| 609 |
#!/usr/bin/env bash
# builds the cmake files into the build directory
set -e
if [ -d "./build/debug/" ]; then
echo "Debug build found, cleaning up..."
cd ./build/
rm -rf -- debug/
mkdir debug
cd debug
cmake -DCMAKE_BUILD_TYPE=DEBUG ../../
else
echo "Debug build not found, making directory..."
mkdir -p ./build/debug/
cd ./build/debug/
cmake -DCMAKE_BUILD_TYPE=DEBUG ../../
fi
|
maldworth/gphoto2pp
|
cmake_debug.sh
|
Shell
|
lgpl-3.0
| 390 |
#!/usr/bin/env bash
if ! command -v svgo >/dev/null
then
echo "Please install svgo: npm install svgo"
exit 1
fi
if ! command -v compare >/dev/null
then
echo "Please install compare"
exit 1
fi
# regarding convertStyleToAttrs, see: https://github.com/svg/svgo/issues/489
# regarding convertPathData, see: https://github.com/svg/svgo/issues/490
ARGS="--pretty --disable=convertStyleToAttrs --disable=convertPathData"
function generatePng {
inkscape -z -D $1 --export-png=$2 --export-width=200 --export-background=transparent > /dev/null
}
# args: pngA pngB final.svg temp.svg
function evaluateOptimization {
# that regex is to just take A from "A (B)"
res=`compare -metric MAE $1 $2 /dev/null 2>&1 | sed "s/^\\([0-9]*\\).*/\\1/"` #-fuzz 5
if [ ! -z "$res" ]; then
if [ $res -gt 100 ]; then
echo "huuuuge difference of $res in $3"
else
mv $4 $3
fi
fi
rm -f $4
}
find Hawaii -name "*.svg" -size 4k -print0 | while IFS= read -r -d '' file
do
echo "doing... $file"
generatePng "$file" /tmp/A.png
svgo -i "$file" -o "$file".tmp.svg $ARGS
generatePng "$file".tmp.svg /tmp/B.png
evaluateOptimization /tmp/A.png /tmp/B.png "$file" "$file".tmp.svg
done
find Hawaii -name "*.svgz" -print0 | while IFS= read -r -d '' file
do
echo "z-doing... $file"
generatePng "$file" /tmp/A.png
gunzip "$file" -S .svgz -c | svgo -i - $ARGS | gzip -c > "$file".tmp.svgz
generatePng "$file".tmp.svgz /tmp/B.png
evaluateOptimization /tmp/A.png /tmp/B.png "$file" "$file".tmp.svgz
done
|
hawaii-desktop/hawaii-icon-theme
|
optimize-svg.sh
|
Shell
|
lgpl-3.0
| 1,588 |
#!/bin/bash
set -ex -o pipefail
whereami="$(cd "$(dirname $(readlink -f "$0"))" && pwd -P)"
BUILD_ENV_PATH=${1:?"ERROR: env file is not given."}
if [[ -n "${BUILD_ENV_PATH}" && ! -f "${BUILD_ENV_PATH}" ]]; then
echo "ERROR: Can't find the file: ${BUILD_ENV_PATH}" >&2
exit 1
fi
set -a
. ${BUILD_ENV_PATH}
set +a
DATA_DIR="${DATA_DIR:-/data}"
CACHE_DIR="/data/openvnet-ci/el7/branches"
repo_and_tag="openvnet/integration-test/el7:${BRANCH}.${RELEASE_SUFFIX}"
function cleanup () {
[[ -z "${CID}" ]] && return 0
[[ -z "${LEAVE_CONTAINER}" || "${LEAVE_CONTAINER}" == "0" ]] && {
sudo docker rm -f "${CID}"
} || { echo "Skip container cleanup for: ${CID}" ; }
local user=$(/usr/bin/id -run)
sudo chown -R $user:$user "${CACHE_DIR}"/"${BRANCH}"
}
trap "cleanup" EXIT
echo "COMMIT_ID=$(git rev-parse HEAD)" >> ${BUILD_ENV_PATH}
sudo docker build -t "${repo_and_tag}" \
--build-arg BRANCH="${BRANCH}" \
--build-arg RELEASE_SUFFIX="${RELEASE_SUFFIX}" \
--build-arg BUILD_OS="${BUILD_OS}" \
--build-arg REBUILD="${REBUILD}" -f "./ci/ci.el7/integration_test/Dockerfile" .
CID=$(sudo docker run --privileged -v "${DATA_DIR}":/data ${BUILD_ENV_PATH:+--env-file $BUILD_ENV_PATH} -d "${repo_and_tag}")
sudo docker attach $CID
|
axsh/openvnet
|
ci/ci.el7/integration_test/build_and_run_in_docker.sh
|
Shell
|
lgpl-3.0
| 1,276 |
#!/bin/bash
# NTSC 720x486 in columns 88x27
DIRNAME=$(date | tr " " "_")
mkdir -p $DIRNAME
create_video() {
echo "Putting together images ... "
echo "Please wait ..."
convert -antialias -resize 720x486! -delay 60 $DIRNAME/* CAPTURE_$DIRNAME.gif
echo "Done!"
echo "Cleaning up images ..."
du -h $DIRNAME
rm -rf $DIRNAME
exit 0
}
trap create_video SIGINT SIGTERM
echo "Recording ..."
i=0
while [ 1 ]
do
x=$[i+10000];
import -frame -window root "$DIRNAME/cap_${x/1/}.png"
i=$(( $i + 1 ))
# sleep 2s
done
|
alejandrogallo/dotfiles
|
bin/record_screen.sh
|
Shell
|
unlicense
| 531 |
#Need to find a better way of doing this...
#https://forums.mozilla.org/viewtopic.php?p=33582&sid=f87500f0267f160fb0389b26636bf131
firefox\
https://addons.mozilla.org/firefox/downloads/latest/tab-for-a-cause/\
https://addons.mozilla.org/firefox/downloads/latest/ecosia-the-green-search/\
https://addons.mozilla.org/firefox/downloads/latest/ecolink/\
https://addons.mozilla.org/firefox/downloads/latest/calomel-ssl-validation/\
https://addons.mozilla.org/firefox/downloads/latest/wot-safe-browsing-tool/\
https://addons.mozilla.org/firefox/downloads/latest/flagfox/\
https://addons.mozilla.org/firefox/downloads/latest/greasemonkey/\
https://addons.mozilla.org/firefox/downloads/latest/stylish/\
https://addons.mozilla.org/firefox/downloads/latest/ublock/\
https://addons.mozilla.org/firefox/downloads/latest/cleantube/\
https://addons.mozilla.org/firefox/downloads/latest/bloody-vikings/\
https://addons.mozilla.org/firefox/downloads/latest/imacros-for-firefox/\
;
|
Sharminator/Tux
|
FEZ.sh
|
Shell
|
unlicense
| 1,013 |
#!/bin/bash
#
# $Id: userid-patch-controller.sh 3.1 2017-10-21 17:04:25 rob.navarro $
#
# patch the controller.sh script to:
# 1. reduce failures around unexpected/root file ownership
#
# Copyright 2017 AppDynamics, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export PATH=/bin:/usr/bin:/sbin:/usr/sbin
cd $(dirname $0)
CONTR_SH=../bin/controller.sh
CONTR_TMP=../bin/controller.sh.userid-patch-tmp
CONTR_SAVE=../bin/controller.sh.pre-userid-patch
###########################################################
# Main body
###########################################################
# first, make a copy
cp $CONTR_SH $CONTR_TMP
#
# check if controller.sh already includes userID patches
#
if ! grep -q "added by userid-patch-controller.sh" $CONTR_TMP ; then
ex -s $CONTR_TMP <<- 'ENDOFTEXT'
/^#######################/
a
#### added by userid-patch-controller.sh ####
embed check_for_root_files.sh
#### end addition ####
.
/^_stopControllerAppServer/
+
a
#### edited by userid-patch-controller.sh ####
# stop early if insufficient permissions to stop Glassfish
checkIfWrongUser || exit 1
#### end edit ####
.
/^_startControllerAppServer/
+
a
#### edited by userid-patch-controller.sh ####
warnIfBadFileOwnership
warnIfDifferentEUID
#### end edit ####
.
w
q
ENDOFTEXT
err=$?
fi
if cmp -s $CONTR_SH $CONTR_TMP ; then
echo controller.sh already patched for userid issues
rm $CONTR_TMP
else
echo controller.sh patched for userid issues
mv $CONTR_SH $CONTR_SAVE
mv $CONTR_TMP $CONTR_SH
fi
|
Appdynamics/HA-toolkit
|
userid-patch-controller.sh
|
Shell
|
apache-2.0
| 2,022 |
#!/usr/bin/env bash
echo 'Loading instrumentation javaagent '
# add required jars to the classpath based on the folder location
# wso2 product : $CARBON_HOME/lib/javaagent/*.jar
# other product : path/to/javaagent/lib/*.jar
# Eg: export CARBON_CLASSPATH="$CARBON_CLASSPATH":"$(echo $CARBON_HOME/lib/javaagent/*.jar | tr ' ' ':')"
export CARBON_CLASSPATH="$CARBON_CLASSPATH":"$(echo $CARBON_HOME/lib/javaagent/*.jar | tr ' ' ':')"
# pass product type and path of configuration files to the agent as arguments
# (arguments in [carbon_product,config_file_path] order seperated by ',')
# wso2 product : true
# other product : false,path/to/config/file/folder/
# Eg: export JAVA_OPTS="$JAVA_OPTS -javaagent:/path/to/instrumentation-agent-1.0-SNAPSHOT.jar=flase,path/to/config/folder/"
# content of configuration folder
# [data-agent-config.xml, inst-agent-config.xml, log4j.properties, client-trustore.jks]
export JAVA_OPTS="$JAVA_OPTS -javaagent:/path/to/agent/instrumentation-agent-1.0-SNAPSHOT.jar=true"
|
wso2/analytics-data-agents
|
java-instrumentation-agent/conf/java-agent.sh
|
Shell
|
apache-2.0
| 1,011 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that creates a Kubemark cluster for any given cloud provider.
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
KUBE_ROOT=$(readlink -e ${TMP_ROOT} 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' ${TMP_ROOT})
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/test/kubemark/${CLOUD_PROVIDER}/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
# what what is default in hack/lib/etcd.sh
# To avoid it, if it is empty, we set it to 'avoid-overwrite' and
# clean it after that.
if [ -z "${ETCD_VERSION:-}" ]; then
ETCD_VERSION="avoid-overwrite"
fi
source "${KUBE_ROOT}/hack/lib/init.sh"
if [ "${ETCD_VERSION:-}" == "avoid-overwrite" ]; then
ETCD_VERSION=""
fi
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
# Write all environment variables that we need to pass to the kubemark master,
# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh.
function create-master-environment-file {
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
# Generic variables.
INSTANCE_PREFIX="${INSTANCE_PREFIX:-}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}"
# Etcd related variables.
ETCD_IMAGE="${ETCD_IMAGE:-3.0.14-alpha.1}"
ETCD_VERSION="${ETCD_VERSION:-}"
# Controller-manager related variables.
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-}"
ALLOCATE_NODE_CIDRS="${ALLOCATE_NODE_CIDRS:-}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-}"
TERMINATED_POD_GC_THRESHOLD="${TERMINATED_POD_GC_THRESHOLD:-}"
# Scheduler related variables.
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}"
# Apiserver related variables.
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}"
STORAGE_BACKEND="${STORAGE_BACKEND:-}"
NUM_NODES="${NUM_NODES:-}"
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota}"
EOF
echo "Created the environment file for master."
}
# Generate certs/keys for CA, master, kubelet and kubecfg, and tokens for kubelet
# and kubeproxy.
function generate-pki-config {
ensure-temp-dir
gen-kube-bearertoken
gen-kube-basicauth
create-certs ${MASTER_IP}
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "Generated PKI authentication data for kubemark."
}
# Wait for the master to be reachable for executing commands on it. We do this by
# trying to run the bash noop(:) on the master, with 10 retries.
function wait-for-master-reachability {
execute-cmd-on-master-with-retries ":" 10
echo "Checked master reachability for remote command execution."
}
# Write all the relevant certs/keys/tokens to the master.
function write-pki-config-to-master {
PKI_SETUP_CMD="sudo mkdir /home/kubernetes -p && sudo mkdir /etc/srv/kubernetes -p && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3
echo "Wrote PKI certs, keys, tokens and admin password to master."
}
# Copy all the necessary resource files (scripts/configs/manifests) to the master.
function copy-resource-files-to-master {
copy-files \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
"kubernetes@${MASTER_NAME}":/home/kubernetes/
echo "Copied server binary, master startup scripts, configs and resource manifests to master."
}
# Make startup scripts executable and run start-kubemark-master.sh.
function start-master-components {
echo ""
MASTER_STARTUP_CMD="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
sudo bash /home/kubernetes/start-kubemark-master.sh"
execute-cmd-on-master-with-retries "${MASTER_STARTUP_CMD}"
echo "The master has started and is now live."
}
# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to
# use kubectl locally.
function write-local-kubeconfig {
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1
kind: Config
users:
- name: kubecfg
user:
client-certificate-data: "${KUBECFG_CERT_BASE64}"
client-key-data: "${KUBECFG_KEY_BASE64}"
username: admin
password: admin
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubecfg
name: kubemark-context
current-context: kubemark-context
EOF
echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}."
}
# Finds the right kubemark binary for 'linux/amd64' platform and uses it to
# create a docker image for hollow-node and upload it to the appropriate
# docker container registry for the cloud provider.
function create-and-upload-hollow-node-image {
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
KUBEMARK_BIN="$(kube::util::find-binary-for-platform kubemark linux/amd64)"
if [[ -z "${KUBEMARK_BIN}" ]]; then
echo 'Cannot find cmd/kubemark binary'
exit 1
fi
echo "Copying kubemark binary to ${MAKE_DIR}"
cp "${KUBEMARK_BIN}" "${MAKE_DIR}"
CURR_DIR=`pwd`
cd "${MAKE_DIR}"
RETRIES=3
for attempt in $(seq 1 ${RETRIES}); do
if ! REGISTRY="${CONTAINER_REGISTRY}" PROJECT="${PROJECT}" make "${KUBEMARK_IMAGE_MAKE_TARGET}"; then
if [[ $((attempt)) -eq "${RETRIES}" ]]; then
echo "${color_red}Make failed. Exiting.${color_norm}"
exit 1
fi
echo -e "${color_yellow}Make attempt $(($attempt)) failed. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
else
break
fi
done
rm kubemark
cd $CURR_DIR
echo "Created and uploaded the kubemark hollow-node image to docker registry."
}
# Generate secret and configMap for the hollow-node pods to work, prepare
# manifests of the hollow-node and heapster replication controllers from
# templates, and finally create these resources through kubectl.
function create-kube-hollow-node-resources {
# Create kubeconfig for Kubelet.
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: "${KUBELET_CERT_BASE64}"
client-key-data: "${KUBELET_KEY_BASE64}"
clusters:
- name: kubemark
cluster:
certificate-authority-data: "${CA_CERT_BASE64}"
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Kubeproxy.
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for Heapster.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: heapster
user:
token: ${HEAPSTER_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: heapster
name: kubemark-context
current-context: kubemark-context")
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: kubemark
cluster:
insecure-skip-tls-verify: true
server: https://${MASTER_IP}
contexts:
- context:
cluster: kubemark
user: node-problem-detector
name: kubemark-context
current-context: kubemark-context")
# Create kubemark namespace.
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
# Create configmap for configuring hollow- kubelet, proxy and npd.
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
--from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}"
# Create addon pods.
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
# Create the replication controller for hollow-nodes.
sed "s/{{numreplicas}}/${NUM_NODES:-10}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.json" > "${RESOURCE_DIRECTORY}/hollow-node.json"
proxy_cpu=20
if [ "${NUM_NODES:-10}" -gt 1000 ]; then
proxy_cpu=40
fi
proxy_mem_per_node=100
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES:-10}))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{registry}}/${CONTAINER_REGISTRY}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{project}}/${PROJECT}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.json"
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.json" --namespace="kubemark"
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
}
# Wait until all hollow-nodes are running or there is a timeout.
function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
echo -n "."
sleep 1
now=$(date +%s)
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
else
echo "Got error while trying to list hollow-nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo -e "${color_green} Done!${color_norm}"
}
############################### Main Function ########################################
detect-project &> /dev/null
# Setup for master.
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
find-release-tars
create-master-environment-file
create-master-instance-with-resources
generate-pki-config
wait-for-master-reachability
write-pki-config-to-master
copy-resource-files-to-master
start-master-components
# Setup for hollow-nodes.
echo ""
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
write-local-kubeconfig
create-and-upload-hollow-node-image
create-kube-hollow-node-resources
wait-for-hollow-nodes-to-run-or-timeout
echo ""
echo "Master IP: ${MASTER_IP}"
echo "Password to kubemark master: ${KUBE_PASSWORD}"
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"
|
hpcloud/kubernetes
|
test/kubemark/start-kubemark.sh
|
Shell
|
apache-2.0
| 15,637 |
#!/bin/bash
# Entrypoint script for fakes3
if [[ -n $FAKES3_BUCKETS ]]; then
IFS=','; for bucket in $FAKES3_BUCKETS; do
mkdir -p /var/data/fakes3/$bucket
done
fi
exec "$@"
|
jusbrasil/docker-fakes3
|
entrypoint.sh
|
Shell
|
apache-2.0
| 181 |
rm -rf bin/*
fsc -d bin src/main/scala/cs220/*.scala
|
umass-cs-220/week-03-programming-paradigms
|
code/scala-checksum-app/build.sh
|
Shell
|
apache-2.0
| 53 |
#!/bin/bash
DAEMONIZED=false
WORKERS=6
for i in "$@"
do
case $i in
-d|--daemonized)
DAEMONIZED=true
shift # past argument=value
;;
-w=*|--workers=*)
WORKERS="${i#*=}"
shift # past argument=value
;;
-?|--help)
echo "USAGE: ./run_canary.sh -d -w=10"
echo "-d | --daemonized : run in daemonized mode"
echo "-w | --workers : the number of canary-worker processes to spawn (defaults to 6)"
exit
shift
;;
*)
echo "Invalid Options"
echo "Run ./run_canary.sh --help for valid parameters."
exit
# unknown option
;;
esac
done
# kill existing canary's
./kill_canary.sh
# remove existing containers
docker kill docker_cassandra_1
docker rm docker_cassandra_1
# run mongo
pip install docker-compose
docker-compose -f docker/dependencies.yml up -d
# start the canary workers
COUNTER=0
while [ $COUNTER -lt $WORKERS ]; do
exec canary-worker > /dev/null 2>&1 &
echo "canary-worker spawned."
let COUNTER=COUNTER+1
done
# start the canary producer
exec canary-producer > /dev/null 2>&1 &
# start the canary-server
exec canary-server > /dev/null 2>&1 &
|
rackerlabs/canary
|
run_canary.sh
|
Shell
|
apache-2.0
| 1,194 |
#!/bin/bash
# Yet Another UserAgent Analyzer
# Copyright (C) 2013-2017 Niels Basjes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
INPUT=OperatingSystemNames.csv
OUTPUT=../OperatingSystemNames.yaml
if [ "Generate.sh" -ot "${OUTPUT}" ]; then
if [ "${INPUT}" -ot "${OUTPUT}" ]; then
echo "${OUTPUT} is up to date";
exit;
fi
fi
echo "Generating ${OUTPUT}";
(
echo "# ============================================="
echo "# THIS FILE WAS GENERATED; DO NOT EDIT MANUALLY"
echo "# ============================================="
echo "#"
echo "# Yet Another UserAgent Analyzer"
echo "# Copyright (C) 2013-2017 Niels Basjes"
echo "#"
echo "# Licensed under the Apache License, Version 2.0 (the \"License\");"
echo "# you may not use this file except in compliance with the License."
echo "# You may obtain a copy of the License at"
echo "#"
echo "# http://www.apache.org/licenses/LICENSE-2.0"
echo "#"
echo "# Unless required by applicable law or agreed to in writing, software"
echo "# distributed under the License is distributed on an \"AS IS\" BASIS,"
echo "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied."
echo "# See the License for the specific language governing permissions and"
echo "# limitations under the License."
echo "#"
echo "config:"
echo "- lookup:"
echo " name: 'OperatingSystemName'"
echo " map:"
cat "OperatingSystemNames.csv" | grep . | fgrep -v '#' | while read line ; \
do
tag=$( echo ${line} | cut -d'|' -f1)
osname=$( echo ${line} | cut -d'|' -f2)
osversion=$( echo ${line} | cut -d'|' -f3)
echo " \"${tag}\" : \"${osname}\""
done
echo "- lookup:"
echo " name: 'OperatingSystemVersion'"
echo " map:"
cat "OperatingSystemNames.csv" | grep . | fgrep -v '#' | while read line ; \
do
tag=$( echo ${line} | cut -d'|' -f1)
osname=$( echo ${line} | cut -d'|' -f2)
osversion=$( echo ${line} | cut -d'|' -f3)
echo " \"${tag}\" : \"${osversion}\""
done
) > ${OUTPUT}
|
Innometrics/yauaa
|
analyzer/src/main/resources/UserAgents/OSNames/Generate.sh
|
Shell
|
apache-2.0
| 2,488 |
#!/bin/bash
#
# Builds the project on MacOS
# (tested on Sierra 10.12.2)
#
rm -rf build bin lib
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release ..
make
|
bitbouncer/csi-avro-utils
|
rebuild_macos.sh
|
Shell
|
apache-2.0
| 159 |
#!/bin/bash
set -eux
if [ -z "$(which etcd)" ]; then
ETCD_VERSION=3.1.10
case `uname -s` in
Darwin)
OS=darwin
SUFFIX=zip
;;
Linux)
OS=linux
SUFFIX=tar.gz
;;
*)
echo "Unsupported OS"
exit 1
esac
case `uname -m` in
x86_64)
MACHINE=amd64
;;
*)
echo "Unsupported machine"
exit 1
esac
TARBALL_NAME=etcd-v${ETCD_VERSION}-$OS-$MACHINE
test ! -d "$TARBALL_NAME" && curl -L https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${TARBALL_NAME}.${SUFFIX} | tar xz
export PATH=$PATH:$TARBALL_NAME
fi
$*
|
dims/etcd3-gateway
|
setup-etcd-env.sh
|
Shell
|
apache-2.0
| 725 |
#!/usr/bin/env bash
. .env/bin/activate
./envir_collector.py
|
clinstid/energydash
|
start_envir_collector.sh
|
Shell
|
apache-2.0
| 61 |
#!/bin.bash
set -e
set -x
# ==========
# FOREMAN SMART PROXY
# ==========
cat <<EOF >> /etc/foreman-proxy/settings.yml
:tftp: true
:tftproot: /var/tftpboot
:tftp_servername: 192.168.0.1
:dns: true
:dns_provider: virsh
:dhcp: true
:dhcp_vendor: virsh
:virsh_network: default
EOF
|
Vultour/homelab
|
foreman/proxy.sh
|
Shell
|
apache-2.0
| 280 |
perl $HOME/bin/ensembl-vep/vep \
--cache \
--cache_version 93 \
--dir /hps/nobackup2/production/ensembl/anja/vep/ \
--input_file /hps/nobackup2/production/ensembl/anja/vep_data/input/grch37/rachel_43_no_id_sorted.vcf.gz \
--output_file /hps/nobackup2/production/ensembl/anja/vep_data/output/coding_only.txt \
--force_overwrite \
--assembly GRCh37 \
--port 3337 \
--cache \
--symbol \
--coding_only \
#--no_intergenic \
#--dir_plugins $HOME/bin/VEP_plugins \
#--plugin G2P,file='/homes/anja/bin/work/vep/DDG2P_12_6_2018.csv.gz',af_from_vcf=1 \
#--transcript_filter "gene_symbol in /homes/anja/bin/work/vep/MED12.txt" \
|
at7/work
|
vep/vep_cache_consequences_filter.sh
|
Shell
|
apache-2.0
| 619 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/sokobanzai
OUTPUT_BASENAME=sokobanzai
PACKAGE_TOP_DIR=sokobanzai/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/sokobanzai/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/sokobanzai.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/sokobanzai.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
guiguyz/Sokobanzai
|
sokobanzai/nbproject/Package-Debug.bash
|
Shell
|
apache-2.0
| 1,457 |
#!/bin/bash
#
# This file is part of the KubeVirt project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 Red Hat, Inc.
#
# https://fedoraproject.org/wiki/Scsi-target-utils_Quickstart_Guide
PORT=${PORT:-3260}
WWN=${WWN:-iqn.2017-01.io.kubevirt:wrapper}
LUNID=1
IMAGE_NAME=$(ls -1 /disk/ | tail -n 1)
IMAGE_PATH=/disk/$IMAGE_NAME
if [ -n "$PASSWORD_BASE64" ]; then
PASSWORD=$(echo $PASSWORD_BASE64 | base64 -d)
fi
if [ -n "$USERNAME_BASE64" ]; then
USERNAME=$(echo $USERNAME_BASE64 | base64 -d)
fi
# If PASSWORD is provided, enable authentication features
authenticate=0
if [ -n "$PASSWORD" ]; then
authenticate=1
fi
if [ -z "$IMAGE_NAME" ] || ! [ -f "$IMAGE_PATH" ]; then
echo "vm image not found in /disk directory"
exit 1
fi
echo $IMAGE_NAME | grep -q "\.raw$"
if [ $? -ne 0 ]; then
/usr/bin/qemu-img convert $IMAGE_PATH /disk/image.raw
if [ $? -ne 0 ]; then
echo "Failed to convert image $IMAGE_PATH to .raw file"
exit 1
fi
IMAGE_PATH=/disk/image.raw
fi
# USING 'set -e' error detection for everything below this point.
set -e
echo "Starting tgtd at port $PORT"
tgtd -f --iscsi portal="0.0.0.0:${PORT}" &
sleep 5
echo "Adding target and exposing it"
tgtadm --lld iscsi --mode target --op new --tid=1 --targetname $WWN
tgtadm --lld iscsi --mode target --op bind --tid=1 -I ALL
if [ $authenticate -eq 1 ]; then
echo "Adding authentication for user $USERNAME"
tgtadm --lld iscsi --op new --mode account --user $USERNAME --password $PASSWORD
tgtadm --lld iscsi --op bind --mode account --tid=1 --user $USERNAME
fi
echo "Adding volume file as LUN"
tgtadm --lld iscsi --mode logicalunit --op new --tid=1 --lun=$LUNID -b $IMAGE_PATH
tgtadm --lld iscsi --mode logicalunit --op update --tid=1 --lun=$LUNID --params thin_provisioning=1
echo "Start monitoring"
touch /tmp/healthy
touch previous_state
while true ; do
tgtadm --lld iscsi --mode target --op show > current_state
diff -q previous_state current_state || ( date ; cat current_state ; )
mv -f current_state previous_state
sleep 5
done
|
admiyo/kubevirt
|
cmd/registry-disk-v1alpha/entry-point.sh
|
Shell
|
apache-2.0
| 2,536 |
#!/bin/bash
alias demo_li_test="echo 'done'"
|
banadiga/li-aliases
|
test/demo-li-test.sh
|
Shell
|
apache-2.0
| 45 |
#!/bin/sh
# subdirs="${HOME}/Development/${base}/ ${HOME}/.virtualenvs/${base}/src/${base}/"
for project in $(find -H -x ${HOME}/Development/ -type d -depth 1); do
base=$(basename ${project});
if git -C ${HOME}/Development/${base}/ rev-parse --git-dir > /dev/null 2>&1; then
echo '//////' ${base};
git -C ${HOME}/Development/${base}/ checkout -b himself -q > /dev/null 2>&1;
git -C ${HOME}/Development/${base}/ fetch;
git -C ${HOME}/Development/${base}/ rebase origin/master;
echo ${base} '//////';
echo;
fi
done;
for project in $(find -H -x ${HOME}/.virtualenvs/ -type d -depth 1); do
base=$(basename ${project});
if git -C ${HOME}/.virtualenvs/${base}/src/${base}/ rev-parse --git-dir > /dev/null 2>&1; then
echo '//////' ${base};
git -C ${HOME}/.virtualenvs/${base}/src/${base}/ checkout -b himself -q > /dev/null 2>&1;
git -C ${HOME}/.virtualenvs/${base}/src/${base}/ fetch;
git -C ${HOME}/.virtualenvs/${base}/src/${base}/ rebase origin/master;
echo ${base} '//////';
echo;
fi
done;
|
sebastianseitz/dotfiles
|
bin/grb_all.sh
|
Shell
|
apache-2.0
| 1,111 |
# ----------------------------------------------------------------------------
#
# Package : gobuffalo/attrs
# Version : v0.0.0-20190224210810-a9411de4debd
# Source repo : https://github.com/gobuffalo/attrs.git
# Tested on : UBI 8.4
# Language : GO
# Travis-Check : True
# Script License : Apache License, Version 2 or later
# Maintainer : Vikas . <[email protected]>
#
# Disclaimer : This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
set -e
if [ -z "$1" ]; then
export VERSION=a9411de4debd
else
export VERSION=$1
fi
if [ -d "attrs" ] ; then
rm -rf attrs
fi
# Dependency installation
dnf install -y git gcc
if ! command -v go &> /dev/null
then
GO_VERSION=go1.16.12
curl -O https://dl.google.com/go/${GO_VERSION}.linux-ppc64le.tar.gz
tar -C /usr/local -xzf ${GO_VERSION}.linux-ppc64le.tar.gz
export GOROOT=/usr/local/go
export GOPATH=$HOME/go
export PATH=$GOROOT/bin:$GOPATH/bin:$PATH
export GO111MODULE=auto
fi
# Download the repos
git clone https://github.com/gobuffalo/attrs.git
# Build and Test attrs
cd attrs
git checkout $VERSION
ret=$?
if [ $ret -eq 0 ] ; then
echo "$VERSION found to checkout "
else
echo "$VERSION not found "
exit
fi
go get -v -t ./...
ret=$?
if [ $ret -ne 0 ] ; then
echo "Build failed "
else
go test -v ./...
ret=$?
if [ $ret -ne 0 ] ; then
echo "Tests failed "
else
echo "Build & unit tests Successful "
fi
fi
|
ppc64le/build-scripts
|
a/attrs/attrs_ubi_8.4.sh
|
Shell
|
apache-2.0
| 1,874 |
#!/usr/bin/env bash
# Copyright (C) Schweizerische Bundesbahnen SBB, 2020
# Sicherstellen, das der Bildschirm nicht in den Schlafmodus übergeht
SETUPDIR=$(dirname $(readlink -f $0))
DMCONF=/etc/lightdm/lightdm.conf
# avoid sleep...
sudo mv $DMCONF $DMCONF.orig
sudo cp -f $SETUPDIR/lightdm/lightdm.conf $DMCONF
sudo chown root $DMCONF
sudo chmod u=rw,g=r,o=r $DMCONF
|
SchweizerischeBundesbahnen/cimon_setup
|
web/install_disable_screensleep.sh
|
Shell
|
apache-2.0
| 369 |
#!/bin/sh
#删除7天以前的bt种子文件.
#需要find的-mtime参数支持
dir=/mnt/sda1/tmp
config_dir=/mnt/sda1/.aria2
if [ ! -f "/etc/crontabs/root" ]; then
echo "1 3 * * 1 /mnt/sda1/.aria2/rmoldtorrent.sh" > /etc/crontabs/root
echo " " >> /etc/crontabs/root
echo 'root' >> /etc/crontabs/cron.update
else
cun=`grep 'rmoldtorrent.sh' /etc/crontabs/root`
if [ "$cun" = "" ];then
sed -i 1i\\'1 3 * * 1 /mnt/sda1/.aria2/rmoldtorrent.sh' /etc/crontabs/root
fi
fi
grep -i '.torrent' $config_dir/aria2.session > /tmp/list1
find $dir -type f -iname '*.torrent' -mtime +7 > /tmp/list2
grep -vf /tmp/list1 /tmp/list2 | xargs rm -rf
find $dir -type d -exec rmdir {} \;
|
ghostry/openwrt-gmod
|
scripts/rmoldtorrent.sh
|
Shell
|
apache-2.0
| 676 |
#!/bin/bash
# Author: Eason Yi
# Date: 2017-05-20
if [[ $1 == "6" ]]; then
echo "[INFO] IPv6 for this machine:"
ifconfig|awk '/inet6 / {printf "%s/%s/%s\n",$2,$4,$6}'
else
echo "[INFO] IPv4 for this machine:"
#ifconfig|awk '/inet / {printf "%s/%s\n",$2,$4}'
ifconfig|awk '/inet / {cmd = "dc -e 16i2o"toupper(substr($4,3))"p";cmd | getline subnetmask;close(cmd);gsub("0","",subnetmask);printf "%s/%s\n",$2,length(subnetmask)}'
fi
curl -sSL https://httpbin.org/ip|jq .origin
|
EasonYi/shell
|
ip.sh
|
Shell
|
apache-2.0
| 484 |
#!/bin/sh
RAW_FILES=`cat inputFilesRaw.txt`
INFILE=fidoinputTest.txt
rm -f ${INFILE}
for n in ${RAW_FILES};do
ID=`basename ${n} |sed s/.raw//`
DIR=`dirname ${n} |sed s/testdata/migratedTestData/`
echo fido fido --input=\"hdfs://${DIR}/${ID}.nxs\" \> \"hdfs://${DIR}/${ID}_fido.log\" >> ${INFILE}
done
|
openpreserve/stfc-nexus
|
scripts/createInputFileFidoTest.sh
|
Shell
|
apache-2.0
| 310 |
#!/bin/sh
echo " "
echo "---------------------------------------------------------------------------------------------------------------"
echo "----- install Extra Packages for Enterprise Linux (EPEL)"
echo "---------------------------------------------------------------------------------------------------------------"
echo " "
yum --enablerepo=extras install -y epel-release
echo " "
echo "---------------------------------------------------------------------------------------------------------------"
echo "----- install ansible"
echo "---------------------------------------------------------------------------------------------------------------"
echo " "
yum install -y ansible
echo " "
echo "---------------------------------------------------------------------------------------------------------------"
echo "----- run ansible"
echo "---------------------------------------------------------------------------------------------------------------"
echo " "
cd /vagrant/ansible
ansible-playbook main.yml --limit "mgmt,admin,repo,dn1"
|
manfredpaul/ambari-ansible
|
install.sh
|
Shell
|
apache-2.0
| 1,053 |
#!/usr/bin/env bash
gfsh << ENDGFSH
connect --locator=${locatorArray[0]}[10334]
shutdown --include-locators=true --time-out=30
Y
ENDGFSH
|
charliemblack/gemfire-copy
|
scripts/shutdown_gemfire.sh
|
Shell
|
apache-2.0
| 140 |
#!/usr/bin/env bash
function checkFileExists {
if [ ! -f $1 ]; then
echo "$1 not exists !!!"
exit
fi
}
function deleteDir {
rm -rf $1
}
echo "input source apk path:"
read sourceApk
echo "input dest save dir:"
read saveDir
echo "input channel file path:"
read channelsFile
checkFileExists $sourceApk
checkFileExists $channelsFile
if [ ! -d $saveDir ]; then
mkdir -p $saveDir
fi
tmpdir="./tmp"
if [ -d $tmpdir ]; then
deleteDir $tmpdir#delete old files.
fi
mkdir -p $tmpdir
unzip ${sourceApk} -d ${tmpdir}
if [ $? -ne 0 ]; then
echo "unzip fail !!!!!!!!!!!!!!!"
deleteDir $tmpdir#delete old files.
exit
fi
for channelName in `cat ${channelsFile}`
do
cd $tmpdir
channel="./META-INF/channel_${channelName}"
touch ${channel}
zip -r ${channelName}.apk .
rm -f ${channel}
cd ..
mv ${tmpdir}/${channelName}.apk ${saveDir}
done
deleteDir $tmpdir#delete old files.
echo "---------------------------success------------------------------"
|
lchli/MvpGithub
|
AnGithub/buildSystem/apkFlavor.sh
|
Shell
|
apache-2.0
| 941 |
# Script to start "supermonkey" on the device, which has a very rudimentary
# shell.
# Fix permissions before running the script:
# chmod 755 supermonkey.sh
# chown root.shell supermonkey.sh
ret=`pm path hk.hku.cs.srli.supermonkey`
package=${ret#package:}
base=/system/bin
export CLASSPATH=$package
trap "" HUP
exec app_process $base com.android.commands.monkey.Monkey $*
|
leethree/attentive-ui
|
scripts/supermonkey.sh
|
Shell
|
apache-2.0
| 372 |
#!/bin/bash
if [ $# -eq 4 ]
then
PREFIX=$1 #
MASK=$2 #
DEV=$3 #
VLAN=$4 #
ip route del table main ${PREFIX}/${MASK} dev $DEV
RTN=$?
if [ $RTN -ne 0 ]
then
echo "Error:ip route del failed $RTN"
exit $RTN
fi
ip route flush cache
elif [ $# -eq 5 ]
then
PREFIX=$1 #
MASK=$2 #
DEV=$3 #
VLAN=$4 #
GW=$5
ip route del table main ${PREFIX}/${MASK} dev $DEV via $GW
RTN=$?
if [ $RTN -ne 0 ]
then
echo "Error:ip route del failed $RTN"
exit $RTN
fi
ip route flush cache
else
echo "Usage: $0 <prefix> <mask> <dev> <vlan> [<gw>]"
echo "Example: $0 192.168.81.0 255.255.255.0 data0 241"
echo " OR"
echo "Example: $0 192.168.81.0 255.255.255.0 data0 241 192.168.81.249"
exit 0
fi
|
WU-ARL/ONLdaemons
|
swrouter/scripts/novirtual/swrd_del_route_main.sh
|
Shell
|
apache-2.0
| 784 |
#!/bin/bash
touch vars
rm vars
# MariaDB Node
MARIADB=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 50181 "hostname --ip-address"`
echo "export MARIADB=${MARIADB}" >> vars
# Redis Node
REDIS=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 59733 "hostname --ip-address"`
echo "export REDIS=${REDIS}" >> vars
# Mongo Node
MONGO=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 50888 "hostname --ip-address"`
echo "export MONGO=${MONGO}" >> vars
# Webservice1 Node
WEBSERVICE1=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 5677 "hostname --ip-address"`
echo "export WEBSERVICE1=${WEBSERVICE1}" >> vars
# Webservice2 Node
WEBSERVICE2=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 49483 "hostname --ip-address"`
echo "export WEBSERVICE2=${WEBSERVICE2}" >> vars
# HAProxy Node
HAPROXY=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 60724 "hostname --ip-address"`
echo "export HAPROXY=${HAPROXY}" >> vars
# Iservice1 Node
ISERVICE1=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 50188 "hostname --ip-address"`
echo "export ISERVICE1=${ISERVICE1}" >> vars
# Iservice2 Node
ISERVICE2=`sshpass -p ${ssh_pass} ssh -o StrictHostKeyChecking=no [email protected] -p 5999 "hostname --ip-address"`
echo "export ISERVICE2=${ISERVICE2}" >> vars
|
mtenrero/vetManager
|
updateNodes.sh
|
Shell
|
apache-2.0
| 1,581 |
#!/usr/bin/env bash
VERSION=1.8.1
# parent pom
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=pom.xml \
-Dfile=pom.xml
# core
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=core/pom.xml \
-Dfile=core/target/carnotzet-core-$VERSION.jar
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=core/pom.xml \
-Dfile=core/target/carnotzet-core-$VERSION-javadoc.jar \
-Dclassifier=javadoc
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=core/pom.xml \
-Dfile=core/target/carnotzet-core-$VERSION-sources.jar \
-Dclassifier=sources
# file-merger-json
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=file-merger-json/pom.xml \
-Dfile=file-merger-json/target/carnotzet-file-merger-json-$VERSION.jar
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=file-merger-json/pom.xml \
-Dfile=file-merger-json/target/carnotzet-file-merger-json-$VERSION-javadoc.jar \
-Dclassifier=javadoc
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=file-merger-json/pom.xml \
-Dfile=file-merger-json/target/carnotzet-file-merger-json-$VERSION-sources.jar \
-Dclassifier=sources
# docker-compose
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=docker-compose/pom.xml \
-Dfile=docker-compose/target/carnotzet-orchestrator-docker-compose-$VERSION.jar
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=docker-compose/pom.xml \
-Dfile=docker-compose/target/carnotzet-orchestrator-docker-compose-$VERSION-javadoc.jar \
-Dclassifier=javadoc
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=docker-compose/pom.xml \
-Dfile=docker-compose/target/carnotzet-orchestrator-docker-compose-$VERSION-sources.jar \
-Dclassifier=sources
# maven plugin
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=maven-plugin/pom.xml \
-Dfile=maven-plugin/target/zet-maven-plugin-$VERSION.jar
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=maven-plugin/pom.xml \
-Dfile=maven-plugin/target/zet-maven-plugin-$VERSION-javadoc.jar \
-Dclassifier=javadoc
mvn gpg:sign-and-deploy-file \
-Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ \
-DrepositoryId=ossrh \
-DpomFile=maven-plugin/pom.xml \
-Dfile=maven-plugin/target/zet-maven-plugin-$VERSION-sources.jar \
-Dclassifier=sources
|
matthyx/carnotzet
|
deploy-central.sh
|
Shell
|
apache-2.0
| 3,168 |
#!/bin/bash
# This is the fifth of five scripts that install CLARA on a
# Raspberry Pi, assumed to be running Raspbian Stretch.
#
# They _should_ work in other Raspbian environments, but have not
# been tested. Proceed at your own risk.
#
# For more information, read the guide below:
# https://www.pyimagesearch.com/2017/09/04/raspbian-stretch-install-opencv-3-python-on-your-raspberry-pi/
#
# Note that this installation is left deliberately incomplete. After running this script,
# follow the procedure given in the above guide for 'Step 5: Compile and Install OpenCV'
# using the instructions for Python 3.
source ~/.profile
workon cv
# Prepare OpenCV for compilation.
# MAKE SURE to verify that output from this step
# is correct before proceeding. See guide.
cd ~/opencv-3.3.1/build
sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_contrib-3.3.1/modules \
-D BUILD_EXAMPLES=ON ..
|
N03048839/spine-reader
|
install/install5.sh
|
Shell
|
apache-2.0
| 1,000 |
#!/bin/bash
set -e
set -x
cd libtrellis
cmake \
-DCMAKE_INSTALL_PREFIX=/ \
-DCMAKE_INSTALL_BINDIR='/bin' \
-DCMAKE_INSTALL_DATADIR='/share' \
-DCMAKE_INSTALL_DATAROOTDIR='/share' \
-DCMAKE_INSTALL_DOCDIR='/share/doc' \
-DCMAKE_INSTALL_INCLUDEDIR='/include' \
-DCMAKE_INSTALL_INFODIR='/share/info' \
-DCMAKE_INSTALL_LIBDIR='/lib' \
-DCMAKE_INSTALL_LIBEXECDIR='/libexec' \
-DCMAKE_INSTALL_LOCALEDIR='/share/locale' \
\
-DPYTHON_EXECUTABLE=$(which python) \
-DPYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \
-DPYTHON_LIBRARY=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR'))") \
\
-DBOOST_ROOT="${BUILD_PREFIX}" \
-DBoost_NO_SYSTEM_PATHS:BOOL=ON \
-DBOOST_INCLUDEDIR="${BUILD_PREFIX}/include" \
-DBOOST_LIBRARYDIR="${BUILD_PREFIX}/lib" \
-DBUILD_PYTHON=ON \
-DBUILD_SHARED=ON \
-DSTATIC_BUILD=OFF \
-DBoost_USE_STATIC_LIBS=ON \
.
make -j$CPU_COUNT
make DESTDIR=${PREFIX} install
|
litex-hub/litex-conda-eda
|
bit/prjtrellis/build.sh
|
Shell
|
apache-2.0
| 1,023 |
#!/usr/bin/env bash
node ../../packages/teraslice/service.js -c ./teraslice-master-local.yaml > ./master.log 2>&1 &
pid=$!
echo ${pid} > master.pid
echo "Server PID: ${pid}"
echo "Follow logs with:"
echo " tail -f master.log | bunyan"
echo "Kill server with:"
echo ' kill $(cat master.pid)'
|
terascope/teraslice
|
examples/k8s/run-ts-master.sh
|
Shell
|
apache-2.0
| 295 |
#!/bin/bash
xfce4-panel --quit
pkill xfconfd
rm -rf ~/.config/xfce4/panel
rm -rf ~/.config/xfce4/xfconf/xfce-perchannel-xml/xfce4-panel.xml
xfce4-panel
|
dvdvideo1234/UbuntuBatches
|
Olimex-A20/Scripts/refresh-xfce4.sh
|
Shell
|
apache-2.0
| 153 |
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
export BOUNDARY_SDK_HOME=$PWD
popd > /dev/null
export BOUNDARY_SDK_VERSION=$(egrep "<version>(.*)</version>" "$BOUNDARY_SDK_HOME/pom.xml" | head -1 | sed '-e s/<version>//' -e 's/<\/version>//' -e 's/[[:blank:]]//' | tr -d ' ')
export BOUNDARY_SDK_SCRIPT_DIR=$BOUNDARY_HOME=src/main/resources/META-INF/js
export BOUNDARY_MIB_REPOSITORY="$BOUNDARY_SDK_HOME/target/classes/mibrepository"
export PATH=$PATH:"$BOUNDARY_SDK_HOME/src/main/scripts"
alias bsdk='cd $BOUNDARY_SDK_HOME'
alias benv='env | grep BOUNDARY'
|
boundary/boundary-event-sdk
|
env.sh
|
Shell
|
apache-2.0
| 557 |
#!/bin/bash
cf d -f hello-service-client
cf ds -f hello-service
cf disable-service-access hello-service
cf delete-service-broker -f Hello
cf d -f hello-service-broker
cf d -f hello-service
|
Sbodiu-pivotal/hello-service-broker
|
undeploy_broker.sh
|
Shell
|
apache-2.0
| 188 |
#!/bin/bash
set -e
if [[ -z $1 ]]; then
"I need a command!"
exit 1
fi
case "$1" in
install_docker)
if [[ -z $DOCKER_VERSION ]]; then
echo "DOCKER_VERSION needs to be set as an environment variable"
exit 1
fi
# TODO detect which docker version is already installed and skip
# uninstall/reinstall if it matches $DOCKER_VERSION
# stop docker service if running
sudo stop docker || :
# Remove old docker files that might prevent the installation and starting of other versions
sudo rm -fr /var/lib/docker || :
# As instructed on http://docs.master.dockerproject.org/engine/installation/linux/ubuntulinux/
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo sh -c "echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list"
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
if [[ "$RC" == "true" ]]; then
dist_version="$(lsb_release --codename | cut -f2)"
sudo sh -c "echo deb [arch=$(dpkg --print-architecture)] https://apt.dockerproject.org/repo ubuntu-${dist_version} testing >> /etc/apt/sources.list.d/docker.list"
fi
sudo apt-get -qq update
sudo apt-get -q -y purge docker-engine docker-ce
apt-cache policy docker-engine
if [[ "$DOCKER_CE" == "1" ]]; then
sudo apt-get -q -y install docker-ce=$DOCKER_VERSION* "linux-image-extra-$(uname -r)"
else
sudo apt-get -q -y install docker-engine=$DOCKER_VERSION* "linux-image-extra-$(uname -r)"
fi
# set DOCKER_OPTS to make sure docker listens on the ports we intend
echo 'DOCKER_OPTS="-D=true -H=unix:///var/run/docker.sock -H=tcp://127.0.0.1:2375"' | sudo tee -a /etc/default/docker
if [[ "$DOCKER_VERSION" =~ ^1\.9\..* && ! $(mount | grep /dev/mqueue) ]]; then
# docker-engine 1.9.x doesn't mount /dev/mqueue which is necessary to test `--ipc=host`
sudo mkdir -p /dev/mqueue
sudo mount -t mqueue none /dev/mqueue
fi
# restart the service for the /etc/default/docker change we made after
# installing the package
sudo restart docker
# Give it time to be ready
sleep 10
# initialize docker swarm to be able to run docker tests
sudo docker swarm init --advertise-addr 127.0.0.1
# Wait a minute so we can see more docker logs in case something goes wrong
sleep 60
;;
dump_docker_config)
# output the upstart config and default config in case they are needed for
# troubleshooting
echo "Contents of /etc/init/docker.conf:"
sudo cat /etc/init/docker.conf
echo "Contents of /etc/default/docker"
sudo cat /etc/default/docker || :
echo "Contents of /var/log/upstart/docker.log"
sudo cat /var/log/upstart/docker.log
;;
*)
echo "Unknown command $1"
exit 2
esac
|
rgrunber/docker-client
|
.travis.sh
|
Shell
|
apache-2.0
| 2,927 |
#!/bin/bash
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test related to exec groups.
#
# --- begin runfiles.bash initialization ---
# Copy-pasted from Bazel's Bash runfiles library (tools/bash/runfiles/runfiles.bash).
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$0.runfiles"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation "io_bazel/src/test/shell/integration_test_setup.sh")" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
# `uname` returns the current platform, e.g "MSYS_NT-10.0" or "Linux".
# `tr` converts all upper case letters to lower case.
# `case` matches the result if the `uname | tr` expression to string prefixes
# that use the same wildcards as names do in Bash, i.e. "msys*" matches strings
# starting with "msys", and "*" matches everything (it's the default case).
case "$(uname -s | tr [:upper:] [:lower:])" in
msys*)
# As of 2019-01-15, Bazel on Windows only supports MSYS Bash.
declare -r is_windows=true
;;
*)
declare -r is_windows=false
;;
esac
# NOTE: All tests need to delcare targets in a custom package, which is why they
# all use the pkg=${FUNCNAME[0]} variable.
function test_target_exec_properties_starlark() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/rules.bzl << EOF
def _impl(ctx):
out_file = ctx.outputs.output
ctx.actions.run_shell(inputs = [], outputs = [out_file], arguments=[out_file.path], progress_message = "Saying hello", command = "echo hello > \"\$1\"")
my_rule = rule(
implementation = _impl,
attrs = {
"output": attr.output(),
}
)
EOF
cat > ${pkg}/BUILD << EOF
load("//${pkg}:rules.bzl", "my_rule")
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
my_rule(
name = "a",
output = "out.txt",
exec_properties = {"key3": "value3", "overridden": "child_value"},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"key2": "value2",
"overridden": "parent_value",
},
constraint_values = [":local"],
)
EOF
bazel build --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt &> $TEST_log || fail "Build failed"
grep "key2" out.txt || fail "Did not find the platform key"
grep "key3" out.txt || fail "Did not find the target attribute key"
grep "child_value" out.txt || fail "Did not find the overriding value"
}
function test_target_exec_properties_starlark_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
if "$is_windows"; then
script_name="test_script.bat"
script_content="@echo off\necho hello\n"
else
script_name="test_script.sh"
script_content="#!/bin/bash\necho hello\n"
fi
cat > ${pkg}/rules.bzl <<EOF
def _impl(ctx):
out_file = ctx.actions.declare_file("$script_name")
ctx.actions.write(out_file, "$script_content", is_executable=True)
return [DefaultInfo(executable = out_file)]
my_rule_test = rule(
implementation = _impl,
test = True,
)
EOF
cat > ${pkg}/BUILD << EOF
load("//${pkg}:rules.bzl", "my_rule_test")
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
my_rule_test(
name = "a",
exec_properties = {"key3": "value3", "overridden": "child_value"},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"key2": "value2",
"overridden": "parent_value",
},
constraint_values = [":local"],
)
EOF
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt &> $TEST_log || fail "Build failed"
grep "key2" out.txt || fail "Did not find the platform key"
grep "key3" out.txt || fail "Did not find the target attribute key"
grep "child_value" out.txt || fail "Did not find the overriding value"
}
function test_target_exec_properties_cc() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
#include <stdio.h>
int main() {
printf("Hello\n");
}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_binary(
name = "a",
srcs = ["a.cc"],
exec_properties = {"key3": "value3", "overridden": "child_value"},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"key2": "value2",
"overridden": "parent_value",
},
constraint_values = [":local"],
)
EOF
bazel build \
--extra_execution_platforms="${pkg}:my_platform" \
--toolchain_resolution_debug=.* \
--execution_log_json_file out.txt \
${pkg}:a &> $TEST_log || fail "Build failed"
grep "key3" out.txt || fail "Did not find the target attribute key"
grep "child_value" out.txt || fail "Did not find the overriding value"
grep "key2" out.txt || fail "Did not find the platform key"
}
function test_target_exec_properties_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
#include <stdio.h>
int main() {
printf("Hello\n");
}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_properties = {"key3": "value3", "overridden": "child_value"},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"key2": "value2",
"overridden": "parent_value",
},
constraint_values = [":local"],
)
EOF
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt &> $TEST_log || fail "Build failed"
grep "key2" out.txt || fail "Did not find the platform key"
grep "key3" out.txt || fail "Did not find the target attribute key"
grep "child_value" out.txt || fail "Did not find the overriding value"
}
function test_target_test_properties_sh_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.sh <<EOF
#!/bin/bash
echo hello
EOF
chmod u+x ${pkg}/a.sh
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
sh_test(
name = "a",
srcs = ["a.sh"],
exec_properties = {"key3": "value3", "overridden": "child_value"},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"key2": "value2",
"overridden": "parent_value",
},
constraint_values = [":local"],
)
EOF
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt &> $TEST_log || fail "Build failed"
grep "key2" out.txt || fail "Did not find the platform key"
grep "key3" out.txt || fail "Did not find the target attribute key"
grep "child_value" out.txt || fail "Did not find the overriding value"
}
function test_platform_execgroup_properties_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"platform_key": "default_value",
"test.platform_key": "test_value",
},
constraint_values = [":local"],
)
EOF
bazel build --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Build failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "default_value" out.txt || fail "Did not find the default value"
grep "test_value" out.txt && fail "Used the test-action value when not testing"
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Test failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "test_value" out.txt || fail "Did not find the test-action value"
}
function test_platform_execgroup_properties_nongroup_override_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_properties = {
"platform_key": "override_value",
},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"platform_key": "default_value",
"test.platform_key": "test_value",
},
constraint_values = [":local"],
)
EOF
bazel build --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Build failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "override_value" out.txt || fail "Did not find the overriding value"
grep "default_value" out.txt && fail "Used the default value"
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Test failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "override_value" out.txt || fail "Did not find the overriding value"
}
function test_platform_execgroup_properties_group_override_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_properties = {
"test.platform_key": "test_override",
},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"platform_key": "default_value",
"test.platform_key": "test_value",
},
constraint_values = [":local"],
)
EOF
bazel build --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Build failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "default_value" out.txt || fail "Used the default value"
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Test failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "test_override" out.txt || fail "Did not find the overriding test-action value"
}
function test_platform_execgroup_properties_override_group_and_default_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_properties = {
"platform_key": "override_value",
"test.platform_key": "test_override",
},
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"platform_key": "default_value",
"test.platform_key": "test_value",
},
constraint_values = [":local"],
)
EOF
bazel build --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Build failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "override_value" out.txt || fail "Did not find the overriding value"
grep "default_value" out.txt && fail "Used the default value"
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Test failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "test_override" out.txt || fail "Did not find the overriding test-action value"
}
function test_platform_execgroup_properties_test_inherits_default() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_compatible_with = [":local"],
)
# This platform should be first in --extra_execution_platforms.
# It has no constraints and only exists to detect if the correct platform is not
# used.
platform(
name = "platform_no_constraint",
parents = ["${default_host_platform}"],
exec_properties = {
"exec_property": "no_constraint",
},
)
# This platform should be second. The constraint means it will be used for
# the cc_test.
# The exec_property should be used for the actual test execution.
platform(
name = "platform_with_constraint",
parents = ["${default_host_platform}"],
exec_properties = {
"exec_property": "requires_test_constraint",
},
constraint_values = [":local"],
)
EOF
bazel test --extra_execution_platforms="${pkg}:platform_no_constraint,${pkg}:platform_with_constraint" ${pkg}:a --execution_log_json_file out.txt || fail "Test failed"
grep --after=4 "platform" out.txt | grep "exec_property" || fail "Did not find the property key"
grep --after=4 "platform" out.txt | grep "no_constraint" && fail "Found the wrong property."
grep --after=4 "platform" out.txt | grep "requires_test_constraint" || fail "Did not find the property value"
}
function test_platform_properties_only_applied_for_relevant_execgroups_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
constraint_setting(name = "setting")
constraint_value(name = "local", constraint_setting = ":setting")
cc_test(
name = "a",
srcs = ["a.cc"],
exec_compatible_with = [":local"],
)
platform(
name = "my_platform",
parents = ["${default_host_platform}"],
exec_properties = {
"platform_key": "default_value",
"unknown.platform_key": "unknown_value",
},
constraint_values = [":local"],
)
EOF
bazel test --extra_execution_platforms="${pkg}:my_platform" ${pkg}:a --execution_log_json_file out.txt || fail "Build failed"
grep "platform_key" out.txt || fail "Did not find the platform key"
grep "default_value" out.txt || fail "Did not find the default value"
}
function test_cannot_set_properties_for_irrelevant_execgroup_on_target_cc_test() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
cat > ${pkg}/a.cc <<EOF
int main() {}
EOF
cat > ${pkg}/BUILD <<EOF
cc_test(
name = "a",
srcs = ["a.cc"],
exec_properties = {
"platform_key": "default_value",
"unknown.platform_key": "unknown_value",
},
)
EOF
bazel test ${pkg}:a &> $TEST_log && fail "Build passed when we expected an error"
grep "Tried to set properties for non-existent exec group" $TEST_log || fail "Did not complain about unknown exec group"
}
function write_toolchains_for_exec_group_tests() {
mkdir -p ${pkg}/platform
cat >> ${pkg}/platform/toolchain.bzl <<EOF
def _impl(ctx):
toolchain = platform_common.ToolchainInfo(
message = ctx.attr.message)
return [toolchain]
test_toolchain = rule(
implementation = _impl,
attrs = {
'message': attr.string(),
}
)
EOF
cat >> ${pkg}/platform/BUILD <<EOF
package(default_visibility = ['//visibility:public'])
toolchain_type(name = 'toolchain_type')
constraint_setting(name = 'setting')
constraint_value(name = 'value_foo', constraint_setting = ':setting')
constraint_value(name = 'value_bar', constraint_setting = ':setting')
load(':toolchain.bzl', 'test_toolchain')
# Define the toolchains.
test_toolchain(
name = 'test_toolchain_impl_foo',
message = 'foo',
)
test_toolchain(
name = 'test_toolchain_impl_bar',
message = 'bar',
)
# Declare the toolchains.
toolchain(
name = 'test_toolchain_foo',
toolchain_type = ':toolchain_type',
exec_compatible_with = [
':value_foo',
],
target_compatible_with = [],
toolchain = ':test_toolchain_impl_foo',
)
toolchain(
name = 'test_toolchain_bar',
toolchain_type = ':toolchain_type',
exec_compatible_with = [
':value_bar',
],
target_compatible_with = [],
toolchain = ':test_toolchain_impl_bar',
)
# Define the platforms.
platform(
name = 'platform_foo',
constraint_values = [':value_foo'],
)
platform(
name = 'platform_bar',
constraint_values = [':value_bar'],
)
EOF
cat >> WORKSPACE <<EOF
register_toolchains('//${pkg}/platform:all')
register_execution_platforms('//${pkg}/platform:all')
EOF
}
# Test basic inheritance of constraints and toolchains on a single rule.
function test_exec_group_rule_constraint_inheritance() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
write_toolchains_for_exec_group_tests
# Add a rule with default execution constraints.
mkdir -p ${pkg}/demo
cat >> ${pkg}/demo/rule.bzl <<EOF
def _impl(ctx):
toolchain = ctx.toolchains['//${pkg}/platform:toolchain_type']
out_file_main = ctx.actions.declare_file("%s.log" % ctx.attr.name)
ctx.actions.run_shell(
outputs = [out_file_main],
command = "echo 'hi from %s, toolchain says %s' > '%s'" %
(ctx.attr.name, toolchain.message, out_file_main.path),
)
out_file_extra = ctx.actions.declare_file("%s_extra.log" % ctx.attr.name)
extra_toolchain = ctx.exec_groups['extra'].toolchains['//${pkg}/platform:toolchain_type']
ctx.actions.run_shell(
outputs = [out_file_extra],
command = "echo 'extra from %s, toolchain says %s' > '%s'" %
(ctx.attr.name, extra_toolchain.message, out_file_extra.path),
)
return [DefaultInfo(files = depset([out_file_main, out_file_extra]))]
sample_rule = rule(
implementation = _impl,
exec_groups = {
# extra should inherit both the exec constraint and the toolchain.
'extra': exec_group(copy_from_rule = True),
},
exec_compatible_with = ['//${pkg}/platform:value_foo'],
toolchains = ['//${pkg}/platform:toolchain_type'],
)
EOF
# Use the new rule.
cat >> ${pkg}/demo/BUILD <<EOF
load(':rule.bzl', 'sample_rule')
sample_rule(
name = 'use',
)
EOF
# Build the target, using debug messages to verify the correct platform was selected.
bazel build \
--experimental_exec_groups \
//${pkg}/demo:use &> $TEST_log || fail "Build failed"
cat bazel-bin/${pkg}/demo/use.log >> $TEST_log
cat bazel-bin/${pkg}/demo/use_extra.log >> $TEST_log
expect_log "hi from use, toolchain says foo"
expect_log "extra from use, toolchain says foo"
}
# Test basic inheritance of constraints and toolchains with a target.
function test_exec_group_target_constraint_inheritance() {
local -r pkg=${FUNCNAME[0]}
mkdir $pkg || fail "mkdir $pkg"
write_toolchains_for_exec_group_tests
# Add a rule with default execution constraints.
mkdir -p ${pkg}/demo
cat >> ${pkg}/demo/rule.bzl <<EOF
def _impl(ctx):
toolchain = ctx.toolchains['//${pkg}/platform:toolchain_type']
out_file_main = ctx.actions.declare_file("%s.log" % ctx.attr.name)
ctx.actions.run_shell(
outputs = [out_file_main],
command = "echo 'hi from %s, toolchain says %s' > '%s'" %
(ctx.attr.name, toolchain.message, out_file_main.path),
)
out_file_extra = ctx.actions.declare_file("%s_extra.log" % ctx.attr.name)
extra_toolchain = ctx.exec_groups['extra'].toolchains['//${pkg}/platform:toolchain_type']
ctx.actions.run_shell(
outputs = [out_file_extra],
command = "echo 'extra from %s, toolchain says %s' > '%s'" %
(ctx.attr.name, extra_toolchain.message, out_file_extra.path),
)
return [DefaultInfo(files = depset([out_file_main, out_file_extra]))]
sample_rule = rule(
implementation = _impl,
exec_groups = {
# extra should inherit the toolchain, and the exec constraint from the target.
'extra': exec_group(copy_from_rule = True),
},
toolchains = ['//${pkg}/platform:toolchain_type'],
)
EOF
# Use the new rule.
cat >> ${pkg}/demo/BUILD <<EOF
load(':rule.bzl', 'sample_rule')
sample_rule(
name = 'use',
exec_compatible_with = ['//${pkg}/platform:value_bar'],
)
EOF
# Build the target, using debug messages to verify the correct platform was selected.
bazel build \
--experimental_exec_groups \
//${pkg}/demo:use &> $TEST_log || fail "Build failed"
cat bazel-bin/${pkg}/demo/use.log >> $TEST_log
cat bazel-bin/${pkg}/demo/use_extra.log >> $TEST_log
expect_log "hi from use, toolchain says bar"
expect_log "extra from use, toolchain says bar"
}
run_suite "exec group test"
|
meteorcloudy/bazel
|
src/test/shell/integration/exec_group_test.sh
|
Shell
|
apache-2.0
| 22,530 |
#!/bin/bash
# Author:Tyson
# E-mail:admin#svipc.com
# Website:http://www.svipc.com
# Version:1.0.0 Aug-16-2015-12:28:58
# Notes:Autoscripts for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
sed -i 's@^exclude@#exclude@' /etc/yum.conf
yum clean all
yum makecache
if [ "$CentOS_RHEL_version" == '7' ];then
yum -y install iptables-services
systemctl mask firewalld.service
systemctl enable iptables.service
elif [ "$CentOS_RHEL_version" == '6' ];then
yum -y groupremove "FTP Server" "PostgreSQL Database client" "PostgreSQL Database server" "MySQL Database server" "MySQL Database client" "Web Server" "Office Suite and Productivity" "E-mail server" "Ruby Support" "Printing client"
elif [ "$CentOS_RHEL_version" == '5' ];then
yum -y groupremove "FTP Server" "Windows File Server" "PostgreSQL Database" "News Server" "MySQL Database" "DNS Name Server" "Web Server" "Dialup Networking Support" "Mail Server" "Ruby" "Office/Productivity" "Sound and Video" "Printing Support" "OpenFabrics Enterprise Distribution"
fi
yum check-update
# check upgrade OS
[ "$upgrade_yn" == 'y' ] && yum -y upgrade
# Install needed packages
for Package in deltarpm gcc gcc-c++ make cmake autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel libaio readline-devel curl curl-devel e2fsprogs e2fsprogs-devel krb5-devel libidn libidn-devel openssl openssl-devel libxslt-devel libevent-devel libtool libtool-ltdl bison gd-devel vim-enhanced pcre-devel zip unzip ntpdate sysstat patch bc expect rsync git
do
yum -y install $Package
done
yum -y update bash openssl glibc
# use gcc-4.4
if [ -n "`gcc --version | head -n1 | grep '4\.1\.'`" ];then
yum -y install gcc44 gcc44-c++ libstdc++44-devel
export CC="gcc44" CXX="g++44"
fi
# check sendmail
#[ "$sendmail_yn" == 'y' ] && yum -y install sendmail && service sendmail restart
# closed Unnecessary services and remove obsolete rpm package
for Service in `chkconfig --list | grep 3:on | awk '{print $1}'`;do chkconfig --level 3 $Service off;done
for Service in sshd network crond iptables messagebus irqbalance syslog rsyslog sendmail;do chkconfig --level 3 $Service on;done
# Close SELINUX
setenforce 0
sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config
# initdefault
sed -i 's/^id:.*$/id:3:initdefault:/' /etc/inittab
init q
# PS1
[ -z "`cat ~/.bashrc | grep ^PS1`" ] && echo 'PS1="\[\e[37;40m\][\[\e[32;40m\]\u\[\e[37;40m\]@\h \[\e[35;40m\]\W\[\e[0m\]]\\$ "' >> ~/.bashrc
# history size
sed -i 's/^HISTSIZE=.*$/HISTSIZE=100/' /etc/profile
[ -z "`cat ~/.bashrc | grep history-timestamp`" ] && echo "export PROMPT_COMMAND='{ msg=\$(history 1 | { read x y; echo \$y; });user=\$(whoami); echo \$(date \"+%Y-%m-%d %H:%M:%S\"):\$user:\`pwd\`/:\$msg ---- \$(who am i); } >> /tmp/\`hostname\`.\`whoami\`.history-timestamp'" >> ~/.bashrc
# /etc/security/limits.conf
[ -z "`cat /etc/security/limits.conf | grep 'nproc 65535'`" ] && cat >> /etc/security/limits.conf <<EOF
* soft nproc 65535
* hard nproc 65535
* soft nofile 65535
* hard nofile 65535
EOF
[ -z "`cat /etc/rc.local | grep 'ulimit -SH 65535'`" ] && echo "ulimit -SH 65535" >> /etc/rc.local
# /etc/hosts
[ "$(hostname -i | awk '{print $1}')" != "127.0.0.1" ] && sed -i "s@^127.0.0.1\(.*\)@127.0.0.1 `hostname` \1@" /etc/hosts
# Set timezone
rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
# Set DNS
#cat > /etc/resolv.conf << EOF
#nameserver 114.114.114.114
#nameserver 8.8.8.8
#EOF
# Wrong password five times locked 180s
[ -z "`cat /etc/pam.d/system-auth | grep 'pam_tally2.so'`" ] && sed -i '4a auth required pam_tally2.so deny=5 unlock_time=180' /etc/pam.d/system-auth
# alias vi
[ -z "`cat ~/.bashrc | grep 'alias vi='`" ] && sed -i "s@alias mv=\(.*\)@alias mv=\1\nalias vi=vim@" ~/.bashrc && echo 'syntax on' >> /etc/vimrc
# /etc/sysctl.conf
sed -i 's/net.ipv4.tcp_syncookies.*$/net.ipv4.tcp_syncookies = 1/g' /etc/sysctl.conf
[ -z "`cat /etc/sysctl.conf | grep 'fs.file-max'`" ] && cat >> /etc/sysctl.conf << EOF
fs.file-max=65535
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_max_syn_backlog = 65536
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.route.gc_timeout = 100
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_synack_retries = 1
net.core.somaxconn = 65535
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_max_orphans = 262144
EOF
sysctl -p
if [ "$CentOS_RHEL_version" == '5' ];then
sed -i 's@^[3-6]:2345:respawn@#&@g' /etc/inittab
sed -i 's@^ca::ctrlaltdel@#&@' /etc/inittab
sed -i 's@LANG=.*$@LANG="en_US.UTF-8"@g' /etc/sysconfig/i18n
elif [ "$CentOS_RHEL_version" == '6' ];then
sed -i 's@^ACTIVE_CONSOLES.*@ACTIVE_CONSOLES=/dev/tty[1-2]@' /etc/sysconfig/init
sed -i 's@^start@#start@' /etc/init/control-alt-delete.conf
fi
init q
# Update time
ntpdate pool.ntp.org
[ -z "`grep 'pool.ntp.org' /var/spool/cron/root`" ] && { echo "*/20 * * * * `which ntpdate` pool.ntp.org > /dev/null 2>&1" >> /var/spool/cron/root;chmod 600 /var/spool/cron/root; }
service crond restart
# iptables
if [ -e '/etc/sysconfig/iptables' ] && [ -n "`grep ':INPUT DROP' /etc/sysconfig/iptables`" -a -n "`grep 'NEW -m tcp --dport 22 -j ACCEPT' /etc/sysconfig/iptables`" -a -n "`grep 'NEW -m tcp --dport 80 -j ACCEPT' /etc/sysconfig/iptables`" ];then
IPTABLES_STATUS=yes
else
IPTABLES_STATUS=no
fi
if [ "$IPTABLES_STATUS" == 'no' ];then
cat > /etc/sysconfig/iptables << EOF
# Firewall configuration written by system-config-securitylevel
# Manual customization of this file is not recommended.
*filter
:INPUT DROP [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:syn-flood - [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
-A INPUT -p icmp -m limit --limit 100/sec --limit-burst 100 -j ACCEPT
-A INPUT -p icmp -m limit --limit 1/s --limit-burst 10 -j ACCEPT
-A INPUT -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j syn-flood
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A syn-flood -p tcp -m limit --limit 3/sec --limit-burst 6 -j RETURN
-A syn-flood -j REJECT --reject-with icmp-port-unreachable
COMMIT
EOF
fi
FW_PORT_FLAG=`grep -ow "dport $SSH_PORT" /etc/sysconfig/iptables`
[ -z "$FW_PORT_FLAG" -a "$SSH_PORT" != '22' ] && sed -i "s@dport 22 -j ACCEPT@&\n-A INPUT -p tcp -m state --state NEW -m tcp --dport $SSH_PORT -j ACCEPT@" /etc/sysconfig/iptables
service iptables restart
service sshd restart
# install tmux
if [ ! -e "`which tmux`" ];then
cd src
src_url=http://downloads.sourceforge.net/project/levent/libevent/libevent-2.0/libevent-2.0.22-stable.tar.gz && Download_src
src_url=http://downloads.sourceforge.net/project/tmux/tmux/tmux-2.0/tmux-2.0.tar.gz && Download_src
tar xzf libevent-2.0.22-stable.tar.gz
cd libevent-2.0.22-stable
./configure
make && make install
cd ..
tar xzf tmux-2.0.tar.gz
cd tmux-2.0
CFLAGS="-I/usr/local/include" LDFLAGS="-L//usr/local/lib" ./configure
make && make install
cd ../../
if [ `getconf WORD_BIT` == 32 ] && [ `getconf LONG_BIT` == 64 ];then
ln -s /usr/local/lib/libevent-2.0.so.5 /usr/lib64/libevent-2.0.so.5
else
ln -s /usr/local/lib/libevent-2.0.so.5 /usr/lib/libevent-2.0.so.5
fi
fi
# install htop
if [ ! -e "`which htop`" ];then
cd src
src_url=http://hisham.hm/htop/releases/1.0.3/htop-1.0.3.tar.gz && Download_src
tar xzf htop-1.0.3.tar.gz
cd htop-1.0.3
./configure
make && make install
cd ../../
fi
. /etc/profile
. ~/.bashrc
|
LongTaiJun/Autoscripts
|
include/init_CentOS.sh
|
Shell
|
apache-2.0
| 7,905 |
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../../../k8s.io/code-generator)}
verify="${VERIFY:-}"
${CODEGEN_PKG}/generate-groups.sh "deepcopy" \
github.com/openshift/api/generated \
github.com/openshift/api \
"apps:v1 authorization:v1 build:v1 config:v1 image:v1,docker10,dockerpre012 kubecontrolplane:v1 legacyconfig:v1 network:v1 oauth:v1 operator:v1alpha1 osin:v1 project:v1 quota:v1 route:v1 security:v1 servicecertsigner:v1alpha1 template:v1 user:v1 webconsole:v1" \
--go-header-file ${SCRIPT_ROOT}/hack/empty.txt \
${verify}
|
PI-Victor/origin
|
vendor/github.com/openshift/api/hack/update-deepcopy.sh
|
Shell
|
apache-2.0
| 711 |
#!/usr/bin/env bash
jvmargs="-Djava.net.preferIPv4Stack=true -Djgroups.tcp.address=${IP} -Djgroups.use.jdk_logger=true"
CONFIG="default-jgroups-tcp.xml"
if [[ "${IP}" != "127.0.0.1" ]]
then
CONFIG=default-jgroups-google.xml
cat ${CONFIG} \
| sed s,%BUCKET%,${BUCKET},g \
| sed s,%BUCKET_KEY%,${BUCKET_KEY},g \
| sed s,%BUCKET_SECRET%,${BUCKET_SECRET},g \
| sed s,%IP%,${IP},g > tmp
mv tmp ${CONFIG}
fi
mv ${CONFIG} jgroups.xml
java -cp ${JAR}:lib/* ${jvmargs} eu.tsp.transactions.Server
|
otrack/cloud-computing-hands-on
|
transactions/src/main/bin/run.sh
|
Shell
|
apache-2.0
| 520 |
#!/bin/bash
# shellcheck disable=SC1091
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright Clairvoyant 2018
PATH=/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin
echo "********************************************************************************"
echo "*** $(basename "$0")"
echo "********************************************************************************"
echo "Configuring JDK to disable all except TLS v1.2..."
if [ -f /etc/profile.d/jdk.sh ]; then
. /etc/profile.d/jdk.sh
elif [ -f /etc/profile.d/java.sh ]; then
. /etc/profile.d/java.sh
elif [ -d /usr/java/default ]; then
JAVA_HOME=/usr/java/default
fi
if [ -z "${JAVA_HOME}" ]; then echo "ERROR: \$JAVA_HOME is not set."; exit 10; fi
if [ ! -f ${JAVA_HOME}/jre/lib/security/java.security-orig ]; then
/bin/cp -p ${JAVA_HOME}/jre/lib/security/java.security ${JAVA_HOME}/jre/lib/security/java.security-orig
fi
if ! grep ^jdk.tls.disabledAlgorithms= ${JAVA_HOME}/jre/lib/security/java.security | grep -q "TLSv1.1, TLSv1,"; then
sed -e '/^jdk.tls.disabledAlgorithms=/s|SSLv3|TLSv1.1, TLSv1, SSLv3|' -i ${JAVA_HOME}/jre/lib/security/java.security
fi
|
teamclairvoyant/hadoop-deployment-bash
|
tls/configure_jdk_tlsv1.2.sh
|
Shell
|
apache-2.0
| 1,629 |
#!/bin/bash
directory=`dirname $0`
dt=$(date +'%Y-%m-%d %H:%M:%S')
echo "Reboot at '$dt'"
echo "Kill old serial proxy"
$directory/KillSerialProxy.sh
sleep 1
echo "Start update"
$directory/Service/Update.sh
sleep 1
echo "Restart serial proxy"
$directory/RunSerialProxy.sh
|
MaxMorgenstern/EmeraldAI
|
EmeraldAI/Application/ExecutionScripts/Cron_UpdateAndRebootSerialProxy.sh
|
Shell
|
apache-2.0
| 277 |
#!/usr/bin/env bash
if [ "$TRAVIS_PULL_REQUEST" == 'false' ]; then
mvn deploy -DskipTests=true -P sign --settings settings.xml
fi
|
jwtodd/Autumn
|
bin/deploy.sh
|
Shell
|
apache-2.0
| 134 |
#!/bin/bash
# Unpack Tools
# - Used to unpack packages and install base file system
set -e
set -u
. settings/config
. scripts/utils/utils.sh
if [ -n "${1-}" ]; then
FAKEROOT="${1}"
else
if [ -e "${FAKEROOT}" ]; then
echo "Fakeroot exists! Cleaning..."
cd "${BUILD_ROOT}/"
rm -rf "${FAKEROOT}/"
fi
echo "Creating Fakeroot Directory..."
mkdir -pv "${FAKEROOT}"
check_status
fi
# now unpack everything from ${BUILD_ROOT}/packages directory
PACKAGES=$(find "${FAKEROOT_PKGDIR}/" -type f -name "*.tar.gz")
for package in ${PACKAGES[@]}; do
echo "Extracting package: ${package}"
tar -zxvf "${package}" -C "${FAKEROOT}/"
check_status
done
# we shold sync now to ensure all buffers are flushed to disk
echo ""
echo -n "Syncing disk"
sync
check_status
echo ""
echo "Packages Unpacked:"
echo ""
for package in ${PACKAGES[@]}; do
echo -n "${package}"
show_status "${OK}"
done
echo ""
echo "All packages unpacked!"
exit 0
|
SnakeDoc/LiLi
|
scripts/utils/unpack.sh
|
Shell
|
apache-2.0
| 990 |
#!/bin/bash
PROGRAM=./partition
PROGRAM_WEIGHT=./partition_weight
PROGRAM_SOLUTION=./partition_solutions
# MPI_PATH=/usr/local/openmpi-1.6.5_gcc
# MPI_EXEC=$MPI_PATH/bin/mpiexec
MPI_EXEC=mpiexec
MODEL_SIZE=32
set -x
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MPI_PATH/lib
rm -rf input_$MODEL_SIZE
rm -rf output
rm -rf output_$MODEL_SIZE*
rm -rf output_weight
rm -rf output_noneweight
rm -rf output_solution
# create model
../cgns/create_cgns --name=input_$MODEL_SIZE/cgns_hexa.cgns \
--size=$MODEL_SIZE,$MODEL_SIZE,$MODEL_SIZE \
--coords=1.0,1.0,1.0
# partition from 1 to 4
$MPI_EXEC -np 4 $PROGRAM input_$MODEL_SIZE/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m1-p4
# partition from 4 to 8
$MPI_EXEC -np 8 $PROGRAM output_$MODEL_SIZE-m1-p4/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m4-p8
# partition from 8 to 16
$MPI_EXEC -np 16 $PROGRAM output_$MODEL_SIZE-m4-p8/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m8-p16
# partition from 16 to 8
$MPI_EXEC -np 8 $PROGRAM output_$MODEL_SIZE-m8-p16/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m16-p8
# partition from 8 to 4
$MPI_EXEC -np 4 $PROGRAM output_$MODEL_SIZE-m16-p8/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m8-p4
# partition from 4 to 1
$MPI_EXEC -np 1 $PROGRAM output_$MODEL_SIZE-m8-p4/index.dfi
if [ $? -ne 0 ]; then exit;fi
mv output output_$MODEL_SIZE-m4-p1
# partition with weight
$MPI_EXEC -np 8 $PROGRAM_WEIGHT input_$MODEL_SIZE/index.dfi
# partition with solution
$MPI_EXEC -np 8 $PROGRAM_SOLUTION input_$MODEL_SIZE/index.dfi
|
avr-aics-riken/UDMlib
|
examples/cc/partition/partition_run.sh
|
Shell
|
bsd-2-clause
| 1,675 |
#!/usr/bin/env bash
MASON_NAME=geometry
MASON_VERSION=0.5.0
MASON_HEADER_ONLY=true
. ${MASON_DIR}/mason.sh
function mason_load_source {
mason_download \
https://github.com/mapbox/geometry.hpp/archive/v${MASON_VERSION}.tar.gz \
c6e53f0a74c8016365d414451beaa71032112a48
mason_extract_tar_gz
export MASON_BUILD_PATH=${MASON_ROOT}/.build/geometry.hpp-${MASON_VERSION}
}
function mason_compile {
mkdir -p ${MASON_PREFIX}/include/
cp -r include/mapbox ${MASON_PREFIX}/include/mapbox
}
function mason_cflags {
echo "-I${MASON_PREFIX}/include"
}
function mason_ldflags {
:
}
mason_run "$@"
|
hydrays/osrm-backend
|
third_party/mason/scripts/geometry/0.5.0/script.sh
|
Shell
|
bsd-2-clause
| 636 |
# Author : Jeonghoonkang, github.com/jeonghoonkang
# should run by sudo
docker run --detach \
--name nginx-proxy \
--publish 80:80 \
--publish 443:443 \
--volume /home/tinyos/devel/docker/nginx_proxy/nginx_certs:/etc/nginx/certs \
--volume /home/tinyos/devel/docker/nginx_proxy/nginx_vhost.d:/etc/nginx/vhost.d \
--volume /home/tinyos/devel/docker/nginx_proxy/nginx_html:/usr/share/nginx/html \
--volume /home/tinyos/devel/docker/nginx_proxy/nginx_conf:/etc/nginx/conf.d \
--volume /var/run/docker.sock:/tmp/docker.sock:ro \
jwilder/nginx-proxy
docker run --detach \
--name nginx-proxy-letsencrypt \
--volumes-from nginx-proxy \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env "[email protected]" \
jrcs/letsencrypt-nginx-proxy-companion
docker run --detach \
--name your-proxyed-app \
--env "VIRTUAL_HOST=www.win.tld" \
--env "LETSENCRYPT_HOST=tinyos.win.tld" \
nginxdemos/hello
nginx
docker run --detach \
--name grafana \
--env "VIRTUAL_HOST=othersubdomain.yourdomain.tld" \
--env "VIRTUAL_PORT=3000" \
--env "LETSENCRYPT_HOST=othersubdomain.yourdomain.tld" \
--env "[email protected]" \
grafana/grafana
docker run -d --name wordpress -e VIRTUAL_HOST=imbang.net, www.imbang.net -e \
[email protected] -v wordpress:/var/www/html wordpress:latest
docker run -P -d nginxdemos/hello
|
jeonghoonkang/BerePi
|
apps/docker/docker_compose/docker_proxcy_le.sh
|
Shell
|
bsd-2-clause
| 1,484 |
#!/usr/bin/env bash
sudo apt-get update
sudo apt-get install -y git nodejs
# Prevent some error messages, make /tmp writable
# see http://stackoverflow.com/questions/23822491/ruby-on-rails-permission-denied-when-using-rails-generate-controller-welcome
sudo chmod -R 1777 /tmp
# Fix a permission issue with leopard/rwtrusty vagrant box:
sudo -u postgres dropdb vagrant
sudo -u postgres dropuser vagrant
sudo -u postgres createuser --superuser vagrant
# Now cd into /vagrant/invoicerio and install all dependencies:
cd /vagrant
bundle install
|
asm-products/invoicerio
|
vagrant/bootstrap.sh
|
Shell
|
bsd-2-clause
| 544 |
#!/bin/bash
# Set up test binaries from test_relocs.c
# TODO(jvoung): Have a CHECK syntax like the LLVM-lit tests?
set -e
set -u
set -x
readonly arches="i686 x86_64 arm mips"
readonly TC_ROOT=${NACL_SDK_ROOT}/toolchain/linux_pnacl/bin
pushd test_binaries
for arch in ${arches}; do
OUT=${arch}
${TC_ROOT}/pnacl-clang -c -arch ${arch} test_func_secs1.c -O1 \
-o ${OUT}/test_func_secs1.o --pnacl-allow-translate -Wt,-ffunction-sections
${TC_ROOT}/pnacl-clang -c -arch ${arch} test_func_secs2.c -O1 \
-o ${OUT}/test_func_secs2.o --pnacl-allow-translate -Wt,-ffunction-sections
# TODO(jvoung): generate a --section-ordering-file?
${TC_ROOT}/pnacl-clang -arch ${arch} \
${OUT}/test_func_secs1.o \
${OUT}/test_func_secs2.o \
-o ${OUT}/test_func_secs.nexe \
-Wn,--section-ordering-file=test_func_secs_order.txt \
--pnacl-allow-native \
-ffunction-sections -save-temps --pnacl-driver-verbose
done
popd
|
jvoung/go-ld
|
test_binaries/test_func_sections.sh
|
Shell
|
bsd-3-clause
| 939 |
#!/usr/bin/env bash
### bashrc.venv.sh
# note: most of these aliases and functions are overwritten by `we`
## Variables
function _setup_venv {
# _setup_venv() -- configure __PROJECTSRC, PATH, __VENV, _setup_venv_SRC()
# __PROJECTSRC (str): path to local project settings script to source
export __PROJECTSRC="${__WRK}/.projectsrc.sh"
# shellcheck disable=1090
[ -f "$__PROJECTSRC" ] && source "$__PROJECTSRC"
# PATH="~/.local/bin:$PATH" (if not already there)
PATH_prepend "${HOME}/.local/bin"
# __VENV -- path to local venv config script (executable)
export __VENV="${__DOTFILES}/scripts/venv.py"
# CdAlias functions and completions
# shellcheck source=../venv/scripts/venv.sh
source "${__DOTFILES}/etc/venv/scripts/venv.sh"
if [ "${VENVPREFIX}" == "/" ]; then
# shellcheck source=../venv/scripts/venv_root_prefix.sh
source "${__DOTFILES}/etc/venv/scripts/venv_root_prefix.sh"
fi
# You must run this manually if you want a default src venv
# _setup_venv_SRC
}
function _setup_venv_SRC {
# _setup_venv_SRC() -- configure __SRCVENV and __SRC global virtualenv
# __SRCVENV (str): global 'src' venv symlink (~/-wrk/src)
# (e.g. ln -s ~/-wrk/-ve27/src ~/-wrk/src)
export __SRCVENV="${__WRK}/src"
# __SRC (str): global 'src' venv ./src directory path (~/-wrk/src/src)
export __SRC="${__SRCVENV}/src"
if [ ! -e "${__SRCVENV}" ]; then
if [ ! -d "${WORKON_HOME}/src" ]; then
mkvirtualenv -p "$(command -v python)" \
-i pyrpo -i pyline -i pgs src
fi
ln -s "${WORKON_HOME}/src" "${__SRCVENV}"
fi
# ($__SRC/git $__SRC/git)
if [ ! -d "$__SRC" ]; then
mkdir -p \
"${__SRC}/git/github.com" \
"${__SRC}/git/gitlab.com" \
"${__SRC}/git/bitbucket.org" \
"${__SRC}/hg/bitbucket.org"
fi
}
_setup_venv
## Functions
function venv {
# venv $@ -- call $_VENV $@
# venv -h -- print venv --help
# venv --print-bash -- print bash configuration
# venv --print-json -- print IPython configuration as JSON
(set -x; $__VENV "${@}")
}
function venvw {
# venvw $@ -- venv -E $@ (for the current environment)
(set -x; $__VENV -e "${@}")
}
function workon_venv {
# workon_venv() -- workon a virtualenv and load venv (TAB-completion)
# param $1: $VIRTUAL_ENV_NAME ("dotfiles")
# param $2: $_APP ("dotfiles") [default: $1)
# ${WORKON_HOME}/${VIRTUAL_ENV_NAME} # == $VIRTUAL_ENV
# ${VIRTUAL_ENV}/src # == $_SRC
# ${_SRC}/${VIRTUAL_ENV_NAME} # == $_WRD
# examples:
# we dotfiles
# we dotfiles dotfiles
if [ -n "${1}" ]; then
local _venvstr
local _workon_home
if [ -d "${WORKON_HOME}/${1}" ]; then
_venvstr="${1}"
_workon_home="${WORKON_HOME}"
shift
elif [ -d "${1}" ]; then
_venvstr="$(basename "${1}")"
_workon_home="$(dirname "${1}")"
shift
else
echo "err: venv not found: ${1}"
return 1
fi
#append to shell history
history -a
# shellcheck disable=1090
workon "${_venvstr}" && \
source <($__VENV \
--wrk="$__WRK" \
--wh="${_workon_home}" \
--print-bash \
"${_venvstr}" "${@}" ) && \
dotfiles_status && \
declare -f '_setup_venv_prompt' > /dev/null 2>&1 \
&& _setup_venv_prompt "${_TERM_ID:-${_venvstr}}"
else
#if no arguments are specified, list virtual environments
lsvirtualenvs
return 1
fi
}
function we {
# we() -- workon_venv
workon_venv "${@}"
}
complete -o default -o nospace -F _virtualenvs workon_venv
complete -o default -o nospace -F _virtualenvs we
function _setup_venv_aliases {
# _setup_venv_aliases() -- load venv aliases
# note: these are overwritten by `we` [`source <(venv -b)`]
# shellcheck source=../../scripts/_ewrd.sh
source "${__DOTFILES}/scripts/_ewrd.sh"
# shellcheck source=../../scripts/_grinwrd.sh
source "${__DOTFILES}/scripts/_grinwrd.sh"
# makew -- make -C "${WRD}" ${@} [scripts/makew <TAB>]
# shellcheck source=../../scripts/makew
source "${__DOTFILES}/scripts/makew"
# shellcheck source=../../scripts/ssv
source "${__DOTFILES}/scripts/ssv"
# shellcheck disable=2119
_setup_supervisord
# hgw -- hg -R ${_WRD} [scripts/hgw <TAB>]
# shellcheck source=../../scripts/hgw
source "${__DOTFILES}/scripts/hgw"
# gitw -- git -C ${_WRD} [scripts/gitw <TAB>]
# shellcheck source=../../scripts/gitw
source "${__DOTFILES}/scripts/gitw"
# serve-() -- ${_SERVE_}
# alias serve-='${_SERVE_}'
# shell-() -- ${_SHELL_}
# alias shell-='${_SHELL_}'
# test-() -- cd ${_WRD} && python setup.py test
alias testpyw='(cd ${_WRD} && python "${_WRD_SETUPY}" test)'
# testr-() -- reset; cd ${_WRD} && python setup.py test
alias testpywr='(reset; cd ${_WRD} && python "${_WRD_SETUPY}" test)'
}
_setup_venv_aliases
function _setup_venv_prompt {
# _setup_venv_prompt() -- set PS1 with $WINDOW_TITLE, $VIRTUAL_ENV_NAME,
# and ${debian_chroot}
# "WINDOW_TITLE (venvprompt) [debian_chroot]"
# try: _APP, VIRTUAL_ENV_NAME, $(basename VIRTUAL_ENV)
local venvprompt=""
venvprompt=${_APP:-${VIRTUAL_ENV_NAME:-${VIRTUAL_ENV:+"$(basename $VIRTUAL_ENV)"}}}
# TODO: CONDA
# shellcheck disable=2154
export VENVPROMPT="${venvprompt:+"($venvprompt) "}${debian_chroot:+"[$debian_chroot] "}${WINDOW_TITLE:+"$WINDOW_TITLE "}"
if [ -n "$BASH_VERSION" ]; then
# shellcheck disable=2154
if [ "$color_prompt" == yes ]; then
PS1='${VENVPROMPT}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\n\$ '
else
PS1='${VENVPROMPT}\u@\h:\w\n\$ '
unset color_prompt
fi
fi
}
_setup_venv_prompt
function venv_ls {
# venv_ls() -- list virtualenv directories
prefix=${1:-${VIRTUAL_ENV}}
lsargs=${2:-${lsargs:-"-ld"}}
if [ -z "${prefix}" ]; then
return
fi
#ls -ld ${prefix}/**
find "${prefix}" "${prefix}/lib" -maxdepth 2 -type d -print0 \
| xargs -0 ls --color=auto ${lsargs}
}
function lsvenv {
# lsvenv() -- venv_ls()
venv_ls "${@}"
}
function venv_mkdirs {
# venv_mkdirs() -- create FSH paths in ${1} or ${VIRTUAL_ENV}
prefix=${1:-${VIRTUAL_ENV}}
if [ -z "${prefix}" ]; then
return
fi
ensure_mkdir "${prefix}"
ensure_mkdir "${prefix}/bin"
ensure_mkdir "${prefix}/etc"
#ensure_mkdir "${prefix}/home"
ensure_mkdir "${prefix}/lib"
#ensure_mkdir "${prefix}/opt"
#ensure_mkdir "${prefix}/sbin"
#ensure_mkdir "${prefix}/share/doc"
ensure_mkdir "${prefix}/src"
#ensure_mkdir "${prefix}/srv"
ensure_mkdir "${prefix}/tmp"
ensure_mkdir "${prefix}/usr/share/doc"
ensure_mkdir "${prefix}/var/cache"
ensure_mkdir "${prefix}/var/log"
ensure_mkdir "${prefix}/var/run"
ensure_mkdir "${prefix}/var/www"
venv_ls "${prefix}"
}
|
westurner/dotfiles
|
etc/bash/10-bashrc.venv.sh
|
Shell
|
bsd-3-clause
| 7,361 |
#!/bin/bash
# --- using: GCC 4 ---
# set constants ----------------------------------------------------------------
COMPILE_OPTIONS="-c -x c++ -ansi -std=c++98 -pedantic -fno-gnu-keywords -fno-enforce-eh-specs -fno-rtti -O3 -ffast-math -mfpmath=sse -msse -Wall -Wextra -D TESTING"
# compile ----------------------------------------------------------------------
echo
g++ --version
echo "--- compile ---"
g++ $COMPILE_OPTIONS PowFast.cpp
g++ $COMPILE_OPTIONS PowFast-tester.cpp
# link -------------------------------------------------------------------------
echo
echo "--- link ---"
g++ -o PowFast-tester_cpp *.o
rm *.o
echo
echo "--- done ---"
exit
|
hxa7241/powfast
|
powfast-cpp/build-gcc-cpp.sh
|
Shell
|
bsd-3-clause
| 666 |
#!/bin/bash
# Doxygen directory
doxygenDir="/usr/local/bin"
# Set dirs
cmdDir=$(dirname $0)
baseDir="${cmdDir}/.."
tempDir="${baseDir}/build/doxygen"
distDir="${baseDir}/dist/ios"
# Copy proxy files to temp directory
rm -rf ${tempDir}
mkdir -p ${tempDir}
cp -r ${baseDir}/generated/ios-objc/proxies/* ${tempDir}
cp -r ${baseDir}/ios/objc/ui/MapView.h ${tempDir}/NTMapView.h
cp -r ${baseDir}/ios/objc/ui/MapView.mm ${tempDir}/NTMapView.mm
find ${tempDir} -name "*NTBaseMapView.*" -exec rm {} \;
find ${tempDir} -name "*NTRedrawRequestListener.*" -exec rm {} \;
find ${tempDir} -name "*NTIOSUtils.*" -exec rm {} \;
# Remove attributes unsupported by doxygen from source files
find ${tempDir} -name "*.h" -exec sed -i '' 's/__attribute__ ((visibility("default")))//g' {} \;
# Execute doxygen
rm -rf ${distDir}/docObjC
${doxygenDir}/doxygen "doxygen/doxygen-objc.conf"
# Finished
echo "Done!"
|
CartoDB/mobile-sdk
|
scripts/generate-doxygen.sh
|
Shell
|
bsd-3-clause
| 896 |
#!/bin/bash
# Copyright: 2017, Loic Esteve
# License: BSD 3 clause
# This script is used in Travis to check that PRs do not add obvious
# flake8 violations. It relies on two things:
# - computing a similar diff to what github is showing in a PR. The
# diff is done between:
# 1. the common ancestor of the local branch and the
# joblib/joblib remote
# 2. the local branch
# - run flake8 --diff on the computed diff
#
# Additional features:
# - the line numbers in Travis match the local branch on the PR
# author machine.
# - bash continuous_integration/azure/flake8_diff.sh can be run
# locally for quick turn-around
set -e
# pipefail is necessary to propagate exit codes
set -o pipefail
PROJECT=joblib/joblib
PROJECT_URL=https://github.com/$PROJECT.git
# Find the remote with the project name (upstream in most cases)
REMOTE=$(git remote -v | grep $PROJECT | cut -f1 | head -1 || echo '')
# Add a temporary remote if needed. For example this is necessary when
# Travis is configured to run in a fork. In this case 'origin' is the
# fork and not the reference repo we want to diff against.
if [[ -z "$REMOTE" ]]; then
TMP_REMOTE=tmp_reference_upstream
REMOTE=$TMP_REMOTE
git remote add $REMOTE $PROJECT_URL
fi
echo "Remotes:"
git remote --verbose
# Travis does the git clone with a limited depth (50 at the time of
# writing). This may not be enough to find the common ancestor with
# $REMOTE/master so we unshallow the git checkout
if [[ -a .git/shallow ]]; then
echo 'Unshallowing the repo.'
git fetch --unshallow
fi
# Try to find the common ancestor between $LOCAL_BRANCH_REF and
# $REMOTE/master
if [[ -z "$LOCAL_BRANCH_REF" ]]; then
LOCAL_BRANCH_REF=$(git rev-parse --abbrev-ref HEAD)
fi
REMOTE_MASTER_REF="$REMOTE/master"
# Make sure that $REMOTE_MASTER_REF is a valid reference
echo -e "Fetching $REMOTE_MASTER_REF"
git fetch $REMOTE master:refs/remotes/$REMOTE_MASTER_REF
LOCAL_BRANCH_SHORT_HASH=$(git rev-parse --short $LOCAL_BRANCH_REF)
REMOTE_MASTER_SHORT_HASH=$(git rev-parse --short $REMOTE_MASTER_REF)
# Very confusing: need to use '..' i.e. two dots for 'git
# rev-list' but '...' i.e. three dots for 'git diff'
DIFF_RANGE="$REMOTE_MASTER_SHORT_HASH...$LOCAL_BRANCH_SHORT_HASH"
REV_RANGE="$REMOTE_MASTER_SHORT_HASH..$LOCAL_BRANCH_SHORT_HASH"
echo -e "Running flake8 on the diff in the range" \
"from $LOCAL_BRANCH_REF to $REMOTE_MASTER_REF ($DIFF_RANGE)\n" \
"$(git rev-list $REV_RANGE | wc -l) commit(s)"
# Remove temporary remote only if it was previously added.
if [[ -n "$TMP_REMOTE" ]]; then
git remote remove $TMP_REMOTE
fi
# We ignore files from doc/sphintext. Unfortunately there is no
# way to do it with flake8 directly (the --exclude does not seem to
# work with --diff). We could use the exclude magic in the git pathspec
# ':!doc/sphintext' but it is only available on git 1.9 and Travis
# uses git 1.8.
# We need the following command to exit with 0 hence the echo in case
# there is no match
MODIFIED_FILES=$(git diff --name-only $DIFF_RANGE | \
grep -v 'doc/sphinxext' || echo "no_match")
if [[ "$MODIFIED_FILES" == "no_match" ]]; then
echo "No file outside doc/sphinxext has been modified"
else
# Conservative approach: diff without context so that code that
# was not changed does not create failures
git diff --unified=0 $DIFF_RANGE -- $MODIFIED_FILES | flake8 --diff --show-source
fi
echo -e "No problem detected by flake8\n"
|
joblib/joblib
|
continuous_integration/flake8_diff.sh
|
Shell
|
bsd-3-clause
| 3,497 |
#!/bin/bash
#SBATCH --account=nstaff
#SBATCH --constraint=knl
#SBATCH --core-spec=4
#SBATCH --image=docker:rcthomas/nersc-python-bench:0.3.2
#SBATCH --job-name=pynamic-cori-knl-shifter-012
#SBATCH --mail-type=FAIL
#SBATCH [email protected]
#SBATCH --nodes=12
#SBATCH --ntasks-per-node=8
#SBATCH --output=logs/pynamic-cori-knl-shifter-012-%j.out
#SBATCH --partition=regular
#SBATCH --qos=normal
#SBATCH --time=10
# Configuration.
commit=false
# Initialize benchmark result.
if [ $commit = true ]; then
shifter python /usr/local/bin/report-benchmark.py initialize
fi
# Run benchmark.
export OMP_NUM_THREADS=1
unset PYTHONSTARTUP
pynamic_dir=/opt/pynamic-master/pynamic-pyMPI-2.6a1
output=tmp/latest-$SLURM_JOB_NAME.txt
srun -c 32 --cpu_bind=cores shifter $pynamic_dir/pynamic-pyMPI $pynamic_dir/pynamic_driver.py $(date +%s) | tee $output
# Extract result.
startup_time=$( grep '^Pynamic: startup time' $output | awk '{ print $(NF-1) }' )
import_time=$( grep '^Pynamic: module import time' $output | awk '{ print $(NF-1) }' )
visit_time=$( grep '^Pynamic: module visit time' $output | awk '{ print $(NF-1) }' )
total_time=$( echo $startup_time + $import_time + $visit_time | bc )
echo total_time $total_time s
# Finalize benchmark result.
if [ $commit = true ]; then
shifter python /usr/local/bin/report-benchmark.py finalize $total_time
fi
|
NERSC/nersc-python-bench
|
scripts/pynamic-cori-knl-shifter-012.sh
|
Shell
|
bsd-3-clause
| 1,369 |
#!/bin/bash
which readlink 2>&1 > /dev/null
if [[ $? == 0 ]]; then
premakeDir=$(readlink -f $(dirname $0) )
else
premakeDir=$(dirname $0)
fi
premake="$premakeDir/bin/debug/premake4 --scripts=$premakeDir/src"
systemScript="--systemScript=$premakeDir/premake-system.lua"
hashFile=$premakeDir/hash.tmp
cd $premakeDir
forceBuild=0
threads="-j16"
verbose=
debug=""
while getopts ":vdfj:-" OPTION
do
case "$OPTION" in
v) verbose=1 ;;
f) forceBuild=1 ;;
j) threads="-j$OPTARG" ;;
d) debug=" --debug " ;;
-) break ;;
\?) ;;
esac
done
shift $(($OPTIND-1))
if [[ $verbose ]]; then
echo "Building Premake"
fi
if [[ $forceBuild == 1 ]]; then
(rm -rf $premakeDir/bin
rm -rf $premakeDir/obj
rm *.ninja
rm .ninja_log) 2> /dev/null
fi
if [[ ! -f "$premakeDir/build.ninja" ]]; then
cp $premakeDir/build.ninja.default $premakeDir/build.ninja
fi
if [[ ! -f "$premakeDir/buildedges.ninja" ]]; then
cp $premakeDir/buildedges.ninja.default $premakeDir/buildedges.ninja
fi
# Test if premake exists
if [[ ! -f "$premakeDir/bin/release/premake4" || ! -f "$premakeDir/bin/debug/premake4" ]]; then
# Assume that ninja files in the depot are valid
ninja $threads
result=$?
if [[ $result != 0 ]]; then
echo "Error building Premake : ninja bootstrap of premake failed"
exit $result
fi
fi
# Now rebuild to make sure it's the latest
$premake --file=$premakeDir/premake4.lua embed nobuild --quiet --reporoot=$premakeDir "$@"
result=$?
if [[ $result == 0 ]]; then
$premake --file=$premakeDir/premake4.lua $systemScript --reporoot=$premakeDir ninja $debug $threads "$@"
result=$?
fi
if [[ $result != 0 ]]; then
echo "Error : Failed to build Premake"
rm $hashFile 2> /dev/null
fi
if [[ $verbose ]]; then
echo "---------------------"
fi
exit $result
|
annulen/premake-dev-rgeary
|
buildPremake.sh
|
Shell
|
bsd-3-clause
| 1,768 |
#!/bin/sh
set -e
# do not use debug.c and debuglog.c, they #include <syslog.h>
# and splint does not like this include
# do not use configfile.c since it is a lex file from configfile.l
if [ $# -lt 1 ]
then
files=$(ls -1 src/*.c | grep -v debug | grep -v configfile)
else
files="$@"
fi
inc="-I. -Isrc -Isrc/PCSC -I/usr/include/hal -I/usr/include/dbus-1.0 -I/usr/lib/dbus-1.0/include "
opt="-warnposix -unrecog -type -predboolint -likelybool"
splint $inc $opt $files
|
vicamo/pcsc-lite-android
|
splint.sh
|
Shell
|
bsd-3-clause
| 471 |
#!/bin/bash
# Installation script for MyTAP
SQLHOST='localhost';
SQLPORT=3306;
SQLSOCK=''
NOTESTS=0
NOINSTALL=0
FILTER=0
while [[ "${#}" > 0 ]]; do
case ${1} in
-u|--user)
SQLUSER="${2}";
shift
;;
-p|--password)
SQLPASS="${2}"
shift
;;
-h|--host)
SQLHOST="${2}"
shift
;;
-P|--port)
SQLPORT="${2}"
shift
;;
-S|--socket)
SQLSOCK="${2}"
shift
;;
-f|--filter)
NOFILTER=0
FILTER="${2}"
shift
;;
-t|--no-tests)
NOTESTS=1
;;
-i|--no-install)
NOINSTALL=1
;;
-?|--help)
cat << EOF
Usage:
install.sh [options]
Options:
-u, --user string MySQL username
-p, --password string MySQL password
-h, --host name or IP MySQL host
-P, --port name MySQL port
-S, --socket filename MySQL host
-t, --no-tests Don't run the test suite when the install is completed
-i, --no-install Don't perform the installation, i.e. just run the test suite
-f, --filter string Perform the action on one class of objects <matching|eq|moretap|todo|utils|charset|collation|column|constraint|engine|event|index|partition|privilege|role|routines|table|trigger|schemata|user|view>
EOF
exit 1
;;
*)
exit 1
;;
esac;
shift;
done
MYSQLOPTS="--disable-pager --batch --raw --skip-column-names --unbuffered"
if [[ ${SQLUSER} != '' ]] && [[ ${SQLPASS} != '' ]]; then
MYSQLOPTS="${MYSQLOPTS} -u${SQLUSER} -p${SQLPASS}";
fi
if [[ ${SQLSOCK} != '' ]]; then
MYSQLOPTS="${MYSQLOPTS} --socket=${SQLSOCK}";
fi
if [[ ${SQLHOST} != 'localhost' ]]; then
MYSQLOPTS="${MYSQLOPTS} --host=${SQLHOST}";
fi
if [[ ${SQLPORT} != '3306' ]]; then
MYSQLOPTS="${MYSQLOPTS} --port=${SQLPORT}"
fi
MYVER=$(mysql ${MYSQLOPTS} --execute "
SELECT (SUBSTRING_INDEX(VERSION(), '.', 1) * 100000)
+ (SUBSTRING_INDEX(SUBSTRING_INDEX(VERSION(), '.', 2), '.', -1) * 1000)
+ CAST(SUBSTRING_INDEX(SUBSTRING_INDEX(SUBSTRING_INDEX(VERSION(), '-', 1),'.', 3), '.', -1) AS UNSIGNED);
")
MYVARIANT=$(mysql ${MYSQLOPTS} --execute "
SELECT
CASE
WHEN version() REGEXP 'MariaDB' = 1 THEN 'MariaDB'
WHEN version() REGEXP 'Percona' = 1 THEN 'Percona'
ELSE 'MySQL'
END;
")
# checking thread_stack settings. See #44 for reference.
thread_stack=$(mysql ${MYSQLOPTS} --execute "SELECT @@thread_stack" --skip_column_names)
if [[ ${thread_stack} -lt 262144 ]]; then
echo "Your thread_stack variable is set to ${thread_stack} bytes and will"
echo "be too low to use myTAP. You should change the thread_stack variable to"
echo "at least 262144 bytes (add thread_stack=256k to your mysql conf file)."
exit 1
fi
# import the full package before running the tests
# you can't use a wildcard with the source command so all version specific files need
# to be separately listed
if [[ ${NOINSTALL} -eq 0 ]]; then
echo "============= installing myTAP ============="
echo "Importing myTAP base"
mysql ${MYSQLOPTS} --execute 'source ./mytap.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-schemata.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-engine.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-collation.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-charset.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-timezone.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-user.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-event.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-table.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-view.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-column.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-trigger.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-role.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-routines.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-constraint.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-index.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-partition.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-privilege.sql';
if [[ ${MYVER} -ge 506004 ]]; then
echo "Importing Version 5.6.4 patches";
mysql ${MYSQLOPTS} --execute 'source ./mytap-table-564.sql';
fi
if [[ ${MYVER} -ge 507006 ]]; then
echo "Importing Version 5.7.6 patches";
mysql ${MYSQLOPTS} --execute 'source ./mytap-table-576.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-global-576.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-user-576.sql';
fi
if [[ ${MYVER} -ge 800011 ]]; then
echo "Importing Version 8.0.11 patches";
mysql ${MYSQLOPTS} --execute 'source ./mytap-role-8011.sql';
mysql ${MYSQLOPTS} --execute 'source ./mytap-table-8011.sql';
fi
echo "Importing cross-variant compatibility layer";
if [ "${MYVARIANT}" == "MariaDB" ]; then
mysql ${MYSQLOPTS} --execute 'source ./mytap-compat-mariadb.sql';
else
mysql ${MYSQLOPTS} --execute 'source ./mytap-compat-mysql.sql';
fi
fi
if [[ ${NOTESTS} -eq 0 ]]; then
if [[ ${FILTER} != 0 ]]; then
echo "Running test suite with filter: ${FILTER}";
else
echo "Running Full test suite, this will take a couple of minutes to complete."
fi
sleep 2;
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "matching" ]]; then
echo "============= matching ============="
mysql ${MYSQLOPTS} --database tap --execute 'source tests/matching.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "eq" ]]; then
echo "============= eq ============="
mysql ${MYSQLOPTS} --database tap --execute 'source tests/eq.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "moretap" ]]; then
echo "============= moretap ============="
mysql ${MYSQLOPTS} --database tap --execute 'source tests/moretap.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "todotap" ]]; then
echo "============= todotap ============="
mysql ${MYSQLOPTS} --database tap --execute 'source tests/todotap.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "utils" ]]; then
echo "============= utils ============="
mysql ${MYSQLOPTS} --database tap --execute 'source tests/utils.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "charset" ]]; then
echo "============= character sets ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-charset.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "collation" ]]; then
echo "============= collations ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-collation.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "column" ]]; then
echo "============= columns ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-column.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "constraint" ]]; then
echo "============= constraints ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-constraint.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "engine" ]]; then
echo "============= engines ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-engine.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "event" ]]; then
echo "============= events ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-event.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "index" ]]; then
echo "============= indexes ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-index.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "partition" ]]; then
echo "============= partitions ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-partition.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "privilege" ]]; then
echo "============= privileges ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-privilege.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "role" ]]; then
echo "============= role ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-role.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "routines" ]]; then
echo "============= routines ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-routines.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "schemata" ]]; then
echo "============= schemas ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-schemata.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "table" ]]; then
echo "============= tables ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-table.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "trigger" ]]; then
echo "============= triggers ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-trigger.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "user" ]]; then
echo "============= users ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-user.my'
fi
if [[ ${FILTER} == 0 ]] || [[ ${FILTER} =~ "view" ]]; then
echo "============= views ============"
mysql ${MYSQLOPTS} --database tap --execute 'source tests/test-mytap-view.my'
fi
fi
echo "Finished"
|
hepabolu/mytap
|
install.sh
|
Shell
|
bsd-3-clause
| 9,893 |
#!/bin/bash
rm $PREFIX/bin/activate $PREFIX/bin/deactivate
$PYTHON setup.py install
|
pelson/raspberrypi-conda-recipes
|
recipes/conda-env/build.sh
|
Shell
|
bsd-3-clause
| 86 |
#!/bin/bash
###############################################################################
# #
# Author: cyosp #
# Version: 1.0.1 #
# #
# --------------------------------------------------------------------------- #
# #
# Generate C++ files using MPAPO.xml and litesql-gen #
# #
# --------------------------------------------------------------------------- #
# #
# 1.0.1 - 2017-01-15 #
# - Update class folder path #
# 1.0.0 - 2015-12-03 #
# - First version #
# #
###############################################################################
SCRIPT_HOME=$(readlink -f "$0")
SCRIPT_DIR_NAME=$(dirname "$SCRIPT_HOME")
CLASS_NAME="MPAPO"
CLASS_FOLDER="com/cyosp/mpa/po"
cd "$SCRIPT_DIR_NAME"
# Generate C++ files
litesql-gen -t c++ ${CLASS_NAME}.xml
# Update header file
sed -i "s|${CLASS_NAME,,}\.hpp|${CLASS_FOLDER}/${CLASS_NAME}\.hpp|" ${CLASS_NAME,,}.cpp
# Move file to the right place
mv ${CLASS_NAME,,}.hpp ${CLASS_FOLDER}/${CLASS_NAME}.hpp
mv ${CLASS_NAME,,}.cpp ${CLASS_FOLDER}/${CLASS_NAME}.cpp
exit 0
|
cyosp/MPA
|
src/MPAPO.bash
|
Shell
|
bsd-3-clause
| 1,844 |
#!/bin/bash
function 0neGal/set-up-status-line {
# Hide the normal mode name
bleopt keymap_vi_mode_show=
function ble/prompt/backslash:0neGal/currentmode {
bleopt keymap_vi_mode_update_prompt=1
local mode; ble/keymap:vi/script/get-mode
case $mode in
(*n) ble/prompt/print $'\e[1m-- NORMAL --\e[m' ;;
(*v) ble/prompt/print $'\e[1m-- VISUAL --\e[m' ;;
(*V) ble/prompt/print $'\e[1m-- V-LINE --\e[m' ;;
(*) ble/prompt/print $'\e[1m-- V-BLOQ --\e[m' ;;
(*s) ble/prompt/print $'\e[1m-- SELECT --\e[m' ;;
(*S) ble/prompt/print $'\e[1m-- S-LINE --\e[m' ;;
(*) ble/prompt/print $'\e[1m-- S-BLOQ --\e[m' ;;
(i) ble/prompt/print $'\e[1m-- INSERT --\e[m' ;;
(R) ble/prompt/print $'\e[1m-- RPLACE --\e[m' ;;
() ble/prompt/print $'\e[1m-- VPLACE --\e[m' ;;
(*) ble/prompt/print $'\e[1m-- ?????? --\e[m' ;;
esac
# Change the default color of status line
case $mode in
(*n) ble-face prompt_status_line=bg=gray,fg=white ;;
(*[vVsS]) ble-face prompt_status_line=bg=teal,fg=white ;;
(*[iR]) ble-face prompt_status_line=bg=navy,fg=white ;;
(*) ble-face prompt_status_line=bg=240,fg=231 ;;
esac
}
# In this example, we put the mode string, date and time, and the
# current working directory in the status line.
bleopt prompt_status_line='\q{0neGal/currentmode}\r\e[96m\w\e[m\r\D{%F %H:%M}'
}
blehook/eval-after-load keymap_vi 0neGal/set-up-status-line
|
akinomyoga/ble.sh
|
memo/D1488.vim-mode-in-status.bash
|
Shell
|
bsd-3-clause
| 1,482 |
#!/bin/sh
#
# Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: clean.sh,v 1.1.2.1 2010/06/01 03:55:01 marka Exp $
rm -f random.data
rm -f ns*/named.run
rm -f ns1/K*
rm -f ns1/*.db
rm -f ns1/*.signed
rm -f ns1/dsset-*
rm -f ns1/keyset-*
rm -f ns1/trusted.conf
rm -f ns1/private.nsec.conf
rm -f ns1/private.nsec3.conf
rm -f ns1/signer.err
rm -f */named.memstats
rm -f dig.out.ns*.test*
|
WigWagCo/node-isc-dhclient
|
deps/isc-dhcp/bind/bind-expanded-tar/bin/tests/system/wildcard/clean.sh
|
Shell
|
isc
| 1,119 |
#!/bin/sh
#######################################################################################################
#
#
# Script for downloading, upacking and launching the DIRAC4Grid suite on some grid CE of given VO.
#
#
#######################################################################################################
# check the parameter - VO name
if [[ $1 != "voce" && $1 != "compchem" && $1 != "isragrid" && $1 != "osg" && $1 != "sivvp.slovakgrid.sk" && $1 != "enmr.eu" ]]; then
echo -e "\n wrong parameter - VO name : $1 "
exit 12
else
VO=$1
echo -e "\n OK, you specified properly the VO=$VO, continuing \n"
fi
# include all external functions from file copied onto current CE
if [ -e "UtilsCE.sh" ]
then
source ./UtilsCE.sh
else
echo -e "\n Source file UtilsCE not found! Error exit 13 ! \n"
exit 13
fi
# name of Dirac package distributed over grid clusters
package="DIRAC4Grid_suite.tgz"
print_CE_info
querry_CE_attributes $VO
check_file_on_SE $VO $package
# download & unpack tar-file onto CE - MUST be successfull or exit
download_from_SE $VO $package
# get number of procs #
unset nprocs
get_nprocs_CE nprocs
#RETVAL=$?; [ $RETVAL -ne 0 ] && exit 5
echo -e "\n Number of #CPU obtained from the function: $nprocs \n"
#
# Unpack the downloaded DIRAC tar-ball
#
unpack_DIRAC $package
#RETVAL=$?; [ $RETVAL -ne 0 ] && exit 6
#-----------------------------------------------
# specify the scratch space for DIRAC runs #
#-----------------------------------------------
#echo "--scratch=\$PWD/DIRAC_scratch" > ~/.diracrc
#echo -e "\n\n The ~/.diracrc file was created, containing: "; cat ~/.diracrc
##########################################
# set build dirs and paths #
##########################################
# directories with all static executables - dirac.x and OpenMPI
export PATH_SAVED=$PATH
export LD_LIBRARY_PATH_SAVED=$LD_LIBRARY_PATH
# set the Dirac basis set library path for pam
export BASDIR_PATH=$PWD/basis:$PWD/basis_dalton:$PWD/basis_ecp
export BUILD_MPI1=$PWD/build_intelmkl_openmpi-1.10.1_i8_static
export BUILD_MPI2=$PWD/build_openmpi_gnu_i8_openblas_static
export BUILD1=$PWD/build_intelmkl_i8_static
export BUILD2=$PWD/build_gnu_i8_openblas_static
export PAM_MPI1=$BUILD_MPI1/pam
export PAM_MPI2=$BUILD_MPI2/pam
export PAM1=$BUILD1/pam
export PAM2=$BUILD2/pam
# take care of unique nodes ...
#UNIQUE_NODES="`cat $PBS_NODEFILE | sort | uniq`"
#UNIQUE_NODES="`echo $UNIQUE_NODES | sed s/\ /,/g `"
#echo -e "\n Unique nodes for parallel run (from PBS_NODEFILE): $UNIQUE_NODES"
#echo "PBS_NODEFILE=$PBS_NODEFILE"
#echo "PBS_O_QUEUE=$PBS_O_QUEUE"
#echo "PBS_O_WORKDIR=$PBS_O_WORKDIR"
#####################################################################
# Run few control tests
#####################################################################
export DIRTIMEOUT="25m"
echo -e "\n Time limit for running DIRAC tests, DIRTIMEOUT=$DIRTIMEOUT "
echo -e "When you finish running tests, set it to other value, according to size of your jobs !"
echo -e "\n\n --- Going to launch parallel Dirac - OpenMPI+Intel+MKL+i8 - with few tests --- \n "; date
#----------------------------------------------------------
# Main cycle over OpenMPI-OpenMP number of tasks/threads
#----------------------------------------------------------
#for ij in 1-1 1-6 1-12 2-1 2-6 6-1 6-2 12-1; do
for ij in 1-1 1-6 1-12 1-24 1-64 2-1 2-6 2-12 2-32 6-1 6-2 6-4 12-1 12-2 16-1 16-2 16-4 24-1 32-1 32-2 64-1; do
set -- ${ij//-/ }
npn=$1
nmkl=$2
echo -e "\n \n ========== Hybrid OpenMPI-OpenMP run on 1 node ======== #OpenMPI=$npn #OpenMP=$nmkl "
# set MKL envirovariables
unset MKL_NUM_THREADS
export MKL_NUM_THREADS=$nmkl
echo -e "\n Updated MKL_NUM_THREADS=$MKL_NUM_THREADS"
echo -e "MKL_DYNAMIC=$MKL_DYNAMIC"
echo -e "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo -e "OMP_DYNAMIC=$OMP_DYNAMIC"
# set OpenMPI variables
unset PATH
export PATH=$BUILD_MPI1/bin:$PATH_SAVED
export LD_LIBRARY_PATH=$BUILD_MPI1/lib:$LD_LIBRARY_PATH_SAVED
unset OPAL_PREFIX
export OPAL_PREFIX=$BUILD_MPI1
echo -e "\n The modified PATH=$PATH"
echo -e "The LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
echo -e "The variable OPAL_PREFIX=$OPAL_PREFIX"
echo -e "\n The mpirun in PATH ... \c"; which mpirun; mpirun --version
#export DIRAC_MPI_COMMAND="mpirun -H ${UNIQUE_NODES} -npernode $npn --prefix $BUILD_MPI1"
export DIRAC_MPI_COMMAND="mpirun -np $npn --prefix $BUILD_MPI1"
echo -e "\n The DIRAC_MPI_COMMAND=${DIRAC_MPI_COMMAND} \n"
#time test/cosci_energy/test -b $BUILD_MPI1 -d -v
time test/cc_energy_and_mp2_dipole/test -b $BUILD_MPI1 -d -v
time test/cc_linear/test -b $BUILD_MPI1 -d -v
time test/fscc/test -b $BUILD_MPI1 -d -v
#time test/fscc_highspin/test -b $BUILD_MPI1 -d -v
# set OpenBLAS enviro-variables
unset OPENBLAS_NUM_THREADS
export OPENBLAS_NUM_THREADS=$nmkl
echo -e "\n Updated OPENBLAS_NUM_THREADS=${OPENBLAS_NUM_THREADS}"
# set OpenMPI variables
unset PATH
export PATH=$BUILD_MPI2/bin:$PATH_SAVED
export LD_LIBRARY_PATH=$BUILD_MPI2/lib:$LD_LIBRARY_PATH_SAVED
unset OPAL_PREFIX
export OPAL_PREFIX=$BUILD_MPI2
echo -e "\n The modified PATH=$PATH"
echo -e "The LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
echo -e "The variable OPAL_PREFIX=$OPAL_PREFIX"
echo -e "\n The mpirun in PATH ... \c"; which mpirun; mpirun --version
#export DIRAC_MPI_COMMAND="mpirun -H ${UNIQUE_NODES} -npernode $npn --prefix $BUILD_MPI2"
export DIRAC_MPI_COMMAND="mpirun -np $npn --prefix $BUILD_MPI2"
echo -e "\n The DIRAC_MPI_COMMAND=${DIRAC_MPI_COMMAND} \n"
#time test/cosci_energy/test -b $BUILD_MPI2 -d -v
time test/cc_energy_and_mp2_dipole/test -b $BUILD_MPI2 -d -v
time test/cc_linear/test -b $BUILD_MPI2 -d -v
time test/fscc/test -b $BUILD_MPI2 -d -v
#time test/fscc_highspin/test -b $BUILD_MPI2 -d -v
done
#
# Individual runs
#
#echo -e "\n --- Launching simple parallel pam test --- \n ";
#python ./pam --inp=test/fscc/fsccsd_IH.inp --mol=test/fscc/Mg.mol --mw=92 --outcmo --mpi=$nprocs --dirac=$BUILD/dirac.x
##############################################################
# #
# pack selected files to get them back from CE #
# #
##############################################################
echo -e "\n --------------------------------- \n ";
# delete old tar-ball first
#ls -lt DIRAC_grid_suite.tgz
#echo -e "\n deleting the old DIRAC_grid_suite.tgz..."
#rm dirac_grid_suite.tgz
#echo "check files..."
#ls -lt
#echo -e "\n --- Packing all wanted stuff back from the grid CE --- ";
#tar --version
#echo -e "\n we have to pack (ls -lt) :"
#ls -lt
#echo " "
#tar czf DIRAC_grid_suite_back.tgz test *.out
#echo -e "\n selected directories/files of the DIRAC suite packed back, ls -lt DIRAC_grid_suite_back.tgz:";ls -lt DIRAC_grid_suite_back.tgz
# upload final tarball onto SE so that you can dowload it later
# upload_to_SE $VO
#############################################
#### flush out some good-bye message ... ####
#############################################
final_message
exit 0
|
miroi/DIRAC_scripts
|
grid_runs/virtual_organizations/enmr_eu/1node/max_cpus/run_on_CE_1n.sh
|
Shell
|
mit
| 7,187 |
#!/bin/bash -ex
# dummy-server.sh
# something for vocto clients to connect to,
# displays stream in a local window
gst-launch-1.0 \
tcpserversrc host=0.0.0.0 port=4953 ! \
tcpserversink host=0.0.0.0 port=4954
|
xfxf/voctomix-outcasts
|
tests/coupler-server.sh
|
Shell
|
mit
| 220 |
# Fail the build if this step fails
set -e
# Update the webdriver-screenshots folder of the current branch, as long as it's a push and not a savage- branch.
if [[ "$TRAVIS_PULL_REQUEST" == "false" && ! $TRAVIS_BRANCH =~ $SAVAGE_BRANCH ]]; then
echo -e "Starting to update skyux2.\n"
git config --global user.email "[email protected]"
git config --global user.name "Blackbaud Sky Build User"
git clone --quiet --branch=$TRAVIS_BRANCH https://${GH_TOKEN}@github.com/blackbaud/skyux2.git skyux2 > /dev/null
ls
echo -e "Changing directory to skyux-spa-visual-tests"
cd skyux-spa-visual-tests
ls
cp -rf screenshots-baseline/ ../skyux2/skyux-spa-visual-tests/
echo -e "Changing directory to skyux 2"
cd ../skyux2
if [ -z "$(git ls-files --others --exclude-standard)" ]; then
echo -e "No changes to commit to skyux2."
else
git add skyux-spa-visual-tests/screenshots-baseline/
git commit -m "Travis build $TRAVIS_BUILD_NUMBER pushed to skyux2 [ci skip]"
git push -fq origin $TRAVIS_BRANCH > /dev/null
echo -e "skyux2 successfully updated.\n"
fi
fi
|
blackbaud/skyux2
|
scripts/visual-baseline.sh
|
Shell
|
mit
| 1,110 |
#!/bin/bash
# Helper routines for building source libraries
# source this script to set up library building functions and vars
#
# You'll later need any relevant libraries stored at $ARCHIVE_PATH (see below)
# Get needed utilities
TERRYFY_DIR=$(dirname "$BASH_SOURCE[0]}")
source $TERRYFY_DIR/travis_tools.sh
# Get absolute path to script directory
TERRYFY_DIR=$(abspath "$TERRYFY_DIR")
# Compiler defaults
SYS_CC=clang
SYS_CXX=clang++
MACOSX_DEPLOYMENT_TARGET='10.6'
# Default location for source archives
SRC_ARCHIVES=archives
# Default location for unpacking sources
export SRC_PREFIX=$PWD/working
# PATH when we start
START_PATH=$PATH
# BUILD_PREFIXES
BUILD_PREFIX_32=$PWD/build32
BUILD_PREFIX_64=$PWD/build64
BUILD_PREFIX_DUAL=$PWD/build
function set_dual_prefix {
export ARCH_FLAGS="-arch i386 -arch x86_64"
export BUILD_PREFIX=$BUILD_PREFIX_DUAL
set_from_prefix
}
function set_32_prefix {
export ARCH_FLAGS="-arch i386"
export BUILD_PREFIX=$BUILD_PREFIX_32
set_from_prefix
}
function set_64_prefix {
export ARCH_FLAGS="-arch x86_64"
export BUILD_PREFIX=$BUILD_PREFIX_64
set_from_prefix
}
function set_from_prefix {
check_var $BUILD_PREFIX
mkdir -p $BUILD_PREFIX/bin
export PATH=$BUILD_PREFIX/bin:$START_PATH
mkdir -p $BUILD_PREFIX/include
export CPATH=$BUILD_PREFIX/include
mkdir -p $BUILD_PREFIX/lib
export LIBRARY_PATH=$BUILD_PREFIX/lib
export DYLD_LIBRARY_PATH=$LIBRARY_PATH
export PKG_CONFIG_PATH=$BUILD_PREFIX/lib/pkgconfig
}
# Set dual-arch prefix by default
set_dual_prefix
function clean_builds {
check_var $SRC_PREFIX
check_var $BUILD_PREFIX
rm -rf $SRC_PREFIX
mkdir $SRC_PREFIX
rm -rf $BUILD_PREFIX_32
rm -rf $BUILD_PREFIX_64
rm -rf $BUILD_PREFIX_DUAL
}
function clean_submodule {
local submodule=$1
check_var $submodule
cd $submodule
git clean -fxd
git reset --hard
cd ..
}
function standard_install {
# Required arguments
# pkg_name (e.g. libpng)
# pkg_version (e.g. 1.6.12)
#
# Optional arguments
# archive_suffix (default .tar.gz)
# archive_prefix (default "$pkg_name-")
# extra_configures (default empty)
# This last can either be extra flags to pass to configure step, or the
# string "cmake" in which case use cmake for configure step
local pkg_name=$1
check_var $pkg_name
local pkg_version=$2
check_var $pkg_version
local archive_suffix=$3
if [ -z "$archive_suffix" ]; then
archive_suffix=.tar.gz
fi
local archive_prefix=$4
if [ -z "$archive_prefix" ]; then
archive_prefix="${pkg_name}-"
fi
# Put the rest of the positional parameters into new positional params
set -- "${@:5}"
check_var $SRC_PREFIX
check_var $BUILD_PREFIX
local archive_path="$SRC_ARCHIVES/${archive_prefix}${pkg_version}${archive_suffix}"
tar xvf $archive_path -C $SRC_PREFIX
cd $SRC_PREFIX/$pkg_name-$pkg_version
require_success "Failed to cd to $pkg_name directory"
if [ "$1" == "cmake" ]; then # cmake configure
CC=${SYS_CC} CXX=${SYS_CXX} CFLAGS=$ARCH_FLAGS \
CMAKE_INCLUDE_PATH=$CPATH \
CMAKE_LIBRARY_PATH=$LIBRARY_PATH \
cmake -DCMAKE_INSTALL_PREFIX:PATH=$BUILD_PREFIX .
else # standard configure
CC=${SYS_CC} CXX=${SYS_CXX} CFLAGS=$ARCH_FLAGS ./configure \
--prefix=$BUILD_PREFIX "$@"
fi
make
make install
require_success "Failed to install $pkg_name $pkg_version"
cd ../..
}
|
eevans/squash-deb
|
plugins/brotli/brotli/terryfy/library_installers.sh
|
Shell
|
mit
| 3,555 |
#!/bin/bash
set -eu
declare -A aliases=(
[4]='latest'
)
defaultVariant='debian'
self="$(basename "$BASH_SOURCE")"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
versions=( */ )
versions=( "${versions[@]%/}" )
# sort version numbers with highest first
IFS=$'\n'; versions=( $(echo "${versions[*]}" | sort -rV) ); unset IFS
# get the most recent commit which modified any of "$@"
fileCommit() {
git log -1 --format='format:%H' HEAD -- "$@"
}
# get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
dirCommit() {
local dir="$1"; shift
(
cd "$dir"
fileCommit \
Dockerfile \
$(git show HEAD:./Dockerfile | awk '
toupper($1) == "COPY" {
for (i = 2; i < NF; i++) {
print $i
}
}
')
)
}
getArches() {
local repo="$1"; shift
local officialImagesUrl='https://github.com/docker-library/official-images/raw/master/library/'
eval "declare -g -A parentRepoToArches=( $(
find -name 'Dockerfile' -exec awk '
toupper($1) == "FROM" && $2 !~ /^('"$repo"'|scratch|.*\/.*)(:|$)/ {
print "'"$officialImagesUrl"'" $2
}
' '{}' + \
| sort -u \
| xargs bashbrew cat --format '[{{ .RepoName }}:{{ .TagName }}]="{{ join " " .TagEntry.Architectures }}"'
) )"
}
getArches 'ghost'
cat <<-EOH
# this file is generated via https://github.com/docker-library/ghost/blob/$(fileCommit "$self")/$self
Maintainers: Tianon Gravi <[email protected]> (@tianon),
Joseph Ferguson <[email protected]> (@yosifkit)
GitRepo: https://github.com/docker-library/ghost.git
EOH
# prints "$2$1$3$1...$N"
join() {
local sep="$1"; shift
local out; printf -v out "${sep//%/%%}%s" "$@"
echo "${out#$sep}"
}
for version in "${versions[@]}"; do
for variant in debian alpine; do
commit="$(dirCommit "$version/$variant")"
fullVersion="$(git show "$commit":"$version/$variant/Dockerfile" | awk '$1 == "ENV" && $2 == "GHOST_VERSION" { print $3; exit }')"
versionAliases=()
while [ "$fullVersion" != "$version" -a "${fullVersion%[.-]*}" != "$fullVersion" ]; do
versionAliases+=( $fullVersion )
fullVersion="${fullVersion%[.-]*}"
done
versionAliases+=(
$version
${aliases[$version]:-}
)
if [ "$variant" = "$defaultVariant" ]; then
variantAliases=( "${versionAliases[@]}" )
else
variantAliases=( "${versionAliases[@]/%/-$variant}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
fi
variantParent="$(awk 'toupper($1) == "FROM" { print $2 }' "$version/$variant/Dockerfile")"
variantArches="${parentRepoToArches[$variantParent]}"
if [ "$variant" = 'alpine' ]; then
# ERROR: unsatisfiable constraints:
# vips-dev (missing):
variantArches="$(sed -e 's/ ppc64le / /g' -e 's/ s390x / /g' <<<" $variantArches ")"
fi
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
Architectures: $(join ', ' $variantArches)
GitCommit: $commit
Directory: $version/$variant
EOE
done
done
|
infosiftr/ghost
|
generate-stackbrew-library.sh
|
Shell
|
mit
| 2,941 |
#!/bin/sh
sudo cp -v "${DIR}/octavo/wallpapers/OSD3358-SM-RED-Background-Circuits.png" "${tempdir}/opt/scripts/images/wallpaper.png"
sudo chown root:root "${tempdir}/opt/scripts/images/wallpaper.png"
sudo cat "${tempdir}/home/debian/.config/pcmanfm-qt/lxqt/settings.conf" | sed s/beaglebg.jpg/wallpaper.png/ > ${DIR}/tmp.settings
sudo mv "${DIR}/tmp.settings" "${tempdir}/home/debian/.config/pcmanfm-qt/lxqt/settings.conf"
|
beagleboard/image-builder
|
octavo/octavo-lxqt.sh
|
Shell
|
mit
| 425 |
#!/bin/sh
# This script sets up Adobe AIR environment.
. "${CB_SDK_SCRIPT:-$(dirname "$0")}/common.sh"
# -----------------------------------------------------------------------------
# Call out to setup Adobe AIR.
#
. "${CB_SDK_SETUP_SCRIPT}/setup_air.sh"
common_success
|
ChartBoost/air
|
scripts/setup/setup_all.sh
|
Shell
|
mit
| 274 |
#!/bin/sh
#
# Control script grant/revoke access to X for the ATI External Events Daemon
#
# Distro maintainers may modify this reference script as necessary to conform
# to their distribution policies.
#
# Copyright (c) 2006, ATI Technologies Inc. All rights reserved.
#
#
# Parameters:
# $1 is a keyword, either "grant" or "revoke"
# $2 is the display name
# $3 is the X authorization file to be authorized
#
# Returns:
# 0 if authorization was successfully granted/revoked
# nonzero on failure
#
# Note:
# The third parameter only makes sense if xauth is being used. If another
# mechanism such as xhost is being used it can be ignored. For setups that
# do not do any form of authentication(!) this script can be trimmed down
# to just "exit 0" and the daemon will assume that it is always authorized.
#
GetServerAuthFile()
{
# Determine where the authorization key may be hiding. The location will
# vary depending upon whether X was started via xdm/kdm, gdm or startx, so
# check each one in turn.
# Check xdm/kdm
XDM_AUTH_FILE=/var/lib/xdm/authdir/authfiles/A$1*
if [ -e $XDM_AUTH_FILE ]; then
SERVER_AUTH_FILE=`ls -t $XDM_AUTH_FILE | head -n 1`
DISP_SEARCH_STRING="#ffff#"
return 0
fi
# Check gdm
GDM_AUTH_FILE=/var/gdm/$1.Xauth
if [ -e $GDM_AUTH_FILE ]; then
SERVER_AUTH_FILE=$GDM_AUTH_FILE
DISP_SEARCH_STRING="$1"
return 0
fi
# Finally, check for startx
for XPID in `pidof X`; do
TRIAL_XAUTH_FILE=`tr '\0' '\n' < /proc/$XPID/environ | grep -e "^XAUTHORITY=" | cut -d= -f2`
TRIAL_XAUTH_KEY=`xauth -f $TRIAL_XAUTH_FILE list | grep "unix$1"`
if [ -n "$TRIAL_XAUTH_KEY" ]; then
SERVER_AUTH_FILE=$TRIAL_XAUTH_FILE
DISP_SEARCH_STRING="unix$1"
return 0
fi
done
# Couldn't find the key
return -1
}
# Main part of script
#
# Since the daemon is usually started during init time before X comes up,
# $PATH may not yet contain the paths to the X binaries, particularly xauth.
# Add the usual location for where xauth may live and fail out if we still
# can't find it.
#
PATH=$PATH:/usr/bin:/usr/X11R6/bin
which xauth > /dev/null || exit -1
case "$1" in
grant)
GetServerAuthFile $2 || exit -1
DISP_AUTH_KEY=`xauth -f $SERVER_AUTH_FILE list | grep $DISP_SEARCH_STRING | awk '{ print $3 }'`
if [ -n "$DISP_AUTH_KEY" ]; then
xauth -f $3 add $2 . $DISP_AUTH_KEY || exit -1
else
exit -1
fi
;;
revoke)
xauth -f $3 remove $2 || exit -1
;;
*)
exit -1
;;
esac
exit 0
|
Scorpio92/linux_kernel_3.18.5
|
drv/packages/RedFlag/authatieventsd.sh
|
Shell
|
gpl-2.0
| 2,691 |
#!/bin/sh
#
# Author: Ilya Storozhilov
# Description: Main CI-cycle script
# Copyright (c) 2013-2014 EPAM Systems
#
# This file is part of Nfstrace.
#
# Nfstrace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Nfstrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nfstrace. If not, see <http://www.gnu.org/licenses/>.
# Platform identification
PLATFORM=$(uname)
if [ $? -ne 0 ] ; then
echo ">>> Platform identification error"
exit 1
fi
if [ "$PLATFORM" = "Linux" ] ; then
OS_RELEASE_FILE="/etc/os-release"
if [ ! -r "$OS_RELEASE_FILE" ] ; then
echo ">>> Linux distro identification error: file '$OS_RELEASE_FILE' not found" >&2
exit 1
fi
LINUX_DISTRO=$(grep "^NAME=" "$OS_RELEASE_FILE" | sed -e 's/NAME=//g' | sed -e 's/"//g')
echo ">>> Running CI-cycle on '$LINUX_DISTRO' platform"
else
echo ">>> Running CI-cycle on '$PLATFORM' platform"
fi
# Pulling environment variables using default values
: ${WORKSPACE:="$(pwd)/$(dirname $0)/.."}
# Processing CLI arguments
SKIP_CPPCHECK=false
SKIP_SCAN_BUILD=false
SKIP_MEMCHECK=false
SKIP_PACKAGING=false
SKIP_COVERAGE=false
for CLI_OPT in "$@" ; do
case $CLI_OPT in
--skip-cppcheck) SKIP_CPPCHECK=true ;;
--skip-scan-build) SKIP_SCAN_BUILD=true ;;
--skip-memcheck) SKIP_MEMCHECK=true ;;
--skip-packaging) SKIP_PACKAGING=true ;;
--skip-coverage) SKIP_COVERAGE=true ;;
esac
done
# Generating cppcheck report
if [ "$SKIP_CPPCHECK" = true ] ; then
echo ">>> Skipping cppcheck report generation"
else
cd $WORKSPACE
echo ">>> Generating cppcheck report"
cppcheck --enable=all --std=c++11 --inconclusive --xml --xml-version=2 src analyzers/src 2> cppcheck.xml
if [ $? -ne 0 ] ; then
echo ">>> Cppcheck report generation error"
exit 1
fi
fi
# Generating scan-build report
if [ "$SKIP_SCAN_BUILD" = true ] ; then
echo ">>> Skipping scan-build report generation"
elif [ "$LINUX_DISTRO" = "openSUSE" ] ; then
echo ">>> Will not generate scan-build report - OpenSUSE is not supported at the moment"
else
SCAN_BUILD_TMPDIR=$(mktemp -d /tmp/scan-build.XXXXXX)
SCAN_BUILD_ARCHIVE=$WORKSPACE/scan-build-archive
SCAN_BUILD_DIR=$WORKSPACE/scan-build
if [ "$PLATFORM" = "FreeBSD" ] ; then
CCC_ANALYZER=ccc-analyzer35
CXX_ANALYZER=c++-analyzer35
SCAN_BUILD=/usr/local/llvm35/bin/scan-build
elif [ "$LINUX_DISTRO" = "Ubuntu" ] ; then
# Different Ubuntu versions have different locations for CLang analyser binaries
CCC_ANALYZER=$(find /usr/share/clang/ -name ccc-analyzer)
if [ $? -ne 0 ] ; then
echo ">>> Scan-build C language analyzer executable lookup error"
exit 1
fi
CXX_ANALYZER=$(find /usr/share/clang/ -name c++-analyzer)
if [ $? -ne 0 ] ; then
echo ">>> Scan-build C++ language analyzer executable lookup error"
exit 1
fi
SCAN_BUILD=scan-build
elif [ "$LINUX_DISTRO" = "ALT Linux" ] ; then
CCC_ANALYZER=/usr/lib64/clang-analyzer/scan-build/ccc-analyzer
CXX_ANALYZER=/usr/lib64/clang-analyzer/scan-build/c++-analyzer
SCAN_BUILD=scan-build
elif [ "$LINUX_DISTRO" = "CentOS Linux" ] ; then
CCC_ANALYZER=/usr/libexec/clang-analyzer/scan-build/ccc-analyzer
CXX_ANALYZER=/usr/libexec/clang-analyzer/scan-build/c++-analyzer
SCAN_BUILD=scan-build
else
echo ">>> WARNING: Scan-build binaries supposed to be in PATH environment variable due to unknown platform"
CCC_ANALYZER=ccc-analyzer
CXX_ANALYZER=c++-analyzer
SCAN_BUILD=scan-build
fi
echo ">>> Generating scan-build report"
rm -rf $SCAN_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Scan-build directory removal error"
exit 1
fi
mkdir $SCAN_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Scan-build directory creation error"
exit 1
fi
cd $SCAN_BUILD_DIR
cmake -DCMAKE_BUILD_TYPE=Debug \
-DCMAKE_C_COMPILER=$CCC_ANALYZER \
-DCMAKE_CXX_COMPILER=$CXX_ANALYZER ../
if [ $? -ne 0 ] ; then
echo ">>> Scan-build configuration error"
exit 1
fi
$SCAN_BUILD --use-analyzer=/usr/bin/clang++ \
-analyze-headers \
-o ${SCAN_BUILD_TMPDIR} \
-enable-checker alpha.core \
-enable-checker alpha.cplusplus \
-enable-checker alpha.deadcode \
-enable-checker alpha.security \
-enable-checker alpha.unix \
-enable-checker security \
make
if [ $? -ne 0 ] ; then
echo ">>> Scan-build report generation error"
exit 1
fi
# Get the directory name of the report created by scan-build
SCAN_BUILD_REPORT=$(find $SCAN_BUILD_TMPDIR -maxdepth 1 -not -empty -not -name `basename $SCAN_BUILD_TMPDIR`)
if [ $? -ne 0 ] ; then
echo ">>> Scan-build report output directory identification error"
exit 1
fi
if [ -z "$SCAN_BUILD_REPORT" ]; then
echo ">>> No scan-build report has been generated"
else
echo ">>> Scan-build report has been generated in '$SCAN_BUILD_REPORT' directory"
if [ ! -d "$SCAN_BUILD_ARCHIVE" ]; then
mkdir "$SCAN_BUILD_ARCHIVE"
if [ $? -ne 0 ] ; then
echo ">>> Scan-build report archive directory creation error"
exit 1
fi
else
rm -rf $SCAN_BUILD_ARCHIVE/*
if [ $? -ne 0 ] ; then
echo ">>> Scan-build report archive directory cleanup error"
exit 1
fi
fi
echo ">>> Archiving scan-build report to '$SCAN_BUILD_ARCHIVE' directory"
mv $SCAN_BUILD_REPORT/* $SCAN_BUILD_ARCHIVE/
if [ $? -ne 0 ] ; then
echo ">>> Scan-build report archiving error"
exit 1
fi
rm -rf "$SCAN_BUILD_TMPDIR"
fi
fi
INCLUDE_COVERAGE=true
if [ "$SKIP_COVERAGE" = true ] ; then
INCLUDE_COVERAGE=false
echo ">>> Skipping coverage info generation (gcc --coverage)"
fi
# Doing Debug build
DEBUG_BUILD_DIR=$WORKSPACE/debug
echo ">>> Doing Debug build in '$DEBUG_BUILD_DIR' directory"
rm -rf $DEBUG_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Debug build directory removal error"
exit 1
fi
mkdir $DEBUG_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Debug build directory creation error"
exit 1
fi
cd $DEBUG_BUILD_DIR
cmake -DINCLUDE_COVERAGE_INFO="${INCLUDE_COVERAGE}" -DCMAKE_BUILD_TYPE=Debug -DGMOCK_SOURCE_DIR="$HOME/gmock-1.7.0" ../
if [ $? -ne 0 ] ; then
echo ">>> Debug build configuration error"
exit 1
fi
make
if [ $? -ne 0 ] ; then
echo ">>> Debug build compilation error"
exit 1
fi
CTEST_OUTPUT_ON_FAILURE=TRUE make test
if [ $? -ne 0 ] ; then
echo ">>> Running tests on Debug build error"
exit 1
fi
if [ "$PLATFORM" = "FreeBSD" ] ; then
# TODO: Support for code coverage on FreeBSD
echo ">>> Coverage report generation is not supported on FreeBSD at the moment"
else
if [ "$SKIP_COVERAGE" = false ] ; then
make coverage
if [ $? -ne 0 ] ; then
echo ">>> Code coverage report creation error"
exit 1
fi
fi
fi
# Running valgrind/memcheck
if [ "$SKIP_MEMCHECK" = true ] ; then
echo ">>> Skipping valgrind/memcheck report generation"
elif [ "$PLATFORM" = "FreeBSD" ] ; then
# TODO: Valgrind causes error on FreeBSD, see https://bugs.kde.org/show_bug.cgi?id=306235
echo ">>> Valgrind/memcheck report generation is not supported on FreeBSD, see https://bugs.kde.org/show_bug.cgi?id=306235"
elif [ "$LINUX_DISTRO" = "ALT Linux" ] ; then
# TODO: Jenkins causes error on ALT Linux on publish valgrind report phase
echo ">>> Valgrind/memcheck report generation is not supported on ALT Linux"
else
echo ">>> Generating valgrind/memcheck report"
make memcheck-report-xml
fi
# Doing Release build
RELEASE_BUILD_DIR=$WORKSPACE/release
echo ">>> Doing Release build in '$RELEASE_BUILD_DIR' directory"
cd $WORKSPACE
rm -rf $RELEASE_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Release build directory removal error"
exit 1
fi
mkdir $RELEASE_BUILD_DIR
if [ $? -ne 0 ] ; then
echo ">>> Release build directory creation error"
exit 1
fi
cd $RELEASE_BUILD_DIR
cmake -DCMAKE_BUILD_TYPE=Release -DGMOCK_SOURCE_DIR="$HOME/gmock-1.7.0" ../
if [ $? -ne 0 ] ; then
echo ">>> Release build configuration error"
exit 1
fi
make
if [ $? -ne 0 ] ; then
echo ">>> Release build compilation error"
exit 1
fi
CTEST_OUTPUT_ON_FAILURE=TRUE make test
if [ $? -ne 0 ] ; then
echo ">>> Running tests on Release build error"
exit 1
fi
# Packaging
if [ "$SKIP_PACKAGING" = true ] ; then
echo ">>> Skipping packaging"
elif [ "$LINUX_DISTRO" = "ALT Linux" ] ; then
# TODO: Packaging support on ALT Linux
echo ">>> Packaging is not supported on ALT Linux at the moment"
elif [ "$LINUX_DISTRO" = "Ubuntu" -o "$LINUX_DISTRO" = "Debian" ] ; then
echo ">>> Making DEB-package"
cpack -G DEB
if [ $? -ne 0 ] ; then
echo ">>> Making DEB-package error"
exit 1
fi
echo ">>> Installing DEB-package"
sudo dpkg -i *.deb
if [ $? -ne 0 ] ; then
echo ">>> Installing DEB-package error"
exit 1
fi
echo ">>> Uninstalling DEB-package"
sudo dpkg -r nfstrace
if [ $? -ne 0 ] ; then
echo ">>> Uninstalling DEB-package error"
exit 1
fi
#elif [ "$LINUX_DISTRO" = "CentOS Linux" -o "$LINUX_DISTRO" = "openSUSE" -o "$LINUX_DISTRO" = "ALT Linux" ] ; then
elif [ "$LINUX_DISTRO" = "CentOS Linux" -o "$LINUX_DISTRO" = "openSUSE" ] ; then
echo ">>> Making RPM-package"
cpack -G RPM
echo ">>> Installing RPM-package"
sudo rpm -i nfstrace*.rpm
echo ">>> Uninstalling RPM-package"
sudo rpm -e nfstrace
else
echo ">>> Making archived package"
make package
if [ $? -ne 0 ] ; then
echo ">>> Making archived package error"
exit 1
fi
echo ">>> Installing NFSTrace"
sudo make install
if [ $? -ne 0 ] ; then
echo ">>> NFSTrace installation error"
exit 1
fi
echo ">>> Uninstalling NFSTrace"
sudo xargs rm < install_manifest.txt
if [ $? -ne 0 ] ; then
echo ">>> NFSTrace uninstallation error"
exit 1
fi
fi
|
ailyasov/nfstrace_test
|
ci/ci_build.sh
|
Shell
|
gpl-2.0
| 10,793 |
#!/bin/sh
start_time=`date +'%d/%m/%y %H:%M:%S'`
export KERNELDIR=`readlink -f .`
export RAMFS_SOURCE=`readlink -f $KERNELDIR/ramfs`
#export USE_SEC_FIPS_MODE=true
if [ "${1}" != "" ];then
export KERNELDIR=`readlink -f ${1}`
fi
RAMFS_TMP="/home/yank555-lu/temp/tmp/ramfs-source-sgn3"
. $KERNELDIR/.config
echo ".............................................................Building new ramdisk............................................................."
#remove previous ramfs files
rm -rf $RAMFS_TMP
rm -rf $RAMFS_TMP.cpio
rm -rf $RAMFS_TMP.cpio.gz
#copy ramfs files to tmp directory
cp -ax $RAMFS_SOURCE $RAMFS_TMP
#clear git repositories in ramfs
find $RAMFS_TMP -name .git -exec rm -rf {} \;
#remove empty directory placeholders
find $RAMFS_TMP -name EMPTY_DIRECTORY -exec rm -rf {} \;
rm -rf $RAMFS_TMP/tmp/*
#remove mercurial repository
rm -rf $RAMFS_TMP/.hg
cd $RAMFS_TMP
find | fakeroot cpio -H newc -o > $RAMFS_TMP.cpio 2>/dev/null
ls -lh $RAMFS_TMP.cpio
gzip -9 $RAMFS_TMP.cpio
echo "...............................................................Compiling kernel..............................................................."
#remove previous out files
rm $KERNELDIR/dt.img
rm $KERNELDIR/boot.img
rm $KERNELDIR/*.ko
#compile kernel
cd $KERNELDIR
make -j `getconf _NPROCESSORS_ONLN` || exit 1
echo "..............................................................Making new dt image............................................................."
./buildtools/dtbtool -o $KERNELDIR/dt.img -s 2048 -p $KERNELDIR/scripts/dtc/ $KERNELDIR/arch/arm/boot/
echo ".............................................................Making new boot image............................................................"
./buildtools/dt-mkbootimg --base 0x0 --kernel $KERNELDIR/arch/arm/boot/zImage --ramdisk_offset 0x2000000 --tags_offset 0x1e00000 --pagesize 2048 --cmdline 'console=null androidboot.hardware=qcom user_debug=31 msm_rtb.filter=0x3F androidboot.selinux=permissive' --ramdisk $RAMFS_TMP.cpio.gz --dt $KERNELDIR/dt.img -o $KERNELDIR/boot.img
echo "...............................................................Stripping Modules.............................................................."
find . -name "*.ko" -exec mv {} . \;
${CROSS_COMPILE}strip --strip-unneeded ./*.ko
echo "Started : $start_time"
echo "Finished : `date +'%d/%m/%y %H:%M:%S'`"
echo ".....................................................................done....................................................................."
find . -name "boot.img"
find . -name "*.ko"
|
yank555-lu/N3-CM-Unified
|
build_kernel.sh
|
Shell
|
gpl-2.0
| 2,557 |
#!/bin/bash
#
# todo:
# - thresholds
# - couch response time
# - make CURL/URL/DBLIST_EXCLUDE vars configurable
# - move load_nagios_utils() to helper library so we can use it from multiple scripts
start_time=$(date +%s.%N)
CURL='curl -s --netrc-file /etc/couchdb/couchdb.netrc'
URL='http://127.0.0.1:5984'
TMPFILE=$(mktemp)
DBLIST_EXCLUDE='(user-|sessions_|tokens_)'
PREFIX='Couchdb_'
load_nagios_utils () {
# load the nagios utils
# in debian, the package nagios-plugins-common installs utils.sh to /usr/lib/nagios/plugins/utils.sh
utilsfn=
for d in $PROGPATH /usr/lib/nagios/plugins /usr/lib64/nagios/plugins /usr/local/nagios/libexec /opt/nagios-plugins/libexec . ; do
if [ -f "$d/utils.sh" ]; then
utilsfn=$d/utils.sh;
fi
done
if [ "$utilsfn" = "" ]; then
echo "UNKNOWN - cannot find utils.sh (part of nagios plugins)";
exit 3;
fi
. "$utilsfn";
STATE[$STATE_OK]='OK'
STATE[$STATE_WARNING]='Warning'
STATE[$STATE_CRITICAL]='Critical'
STATE[$STATE_UNKNOWN]='Unknown'
STATE[$STATE_DEPENDENT]='Dependend'
}
get_global_stats_perf () {
trap "localexit=3" ERR
local localexit db_count
localexit=0
# get a list of all dbs
$CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
db_count=$( wc -l < $TMPFILE)
excluded_db_count=$( egrep -c "$DBLIST_EXCLUDE" $TMPFILE )
echo "db_count=$db_count|excluded_db_count=$excluded_db_count"
return ${localexit}
}
db_stats () {
trap "localexit=3" ERR
local db db_stats doc_count del_doc_count localexit
localexit=0
db="$1"
name="$2"
if [ -z "$name" ]
then
name="$db"
fi
perf="$perf|${db}_docs=$( $CURL -s -X GET ${URL}/$db | json_pp |grep 'doc_count' | sed 's/[^0-9]//g' )"
db_stats=$( $CURL -s -X GET ${URL}/$db | json_pp )
doc_count=$( echo "$db_stats" | grep 'doc_count' | grep -v 'deleted_doc_count' | sed 's/[^0-9]//g' )
del_doc_count=$( echo "$db_stats" | grep 'doc_del_count' | sed 's/[^0-9]//g' )
# don't divide by zero
if [ $del_doc_count -eq 0 ]
then
del_doc_perc=0
else
del_doc_perc=$(( del_doc_count * 100 / doc_count ))
fi
bytes=$( echo "$db_stats" | grep disk_size | sed 's/[^0-9]//g' )
disk_size=$( echo "scale = 2; $bytes / 1024 / 1024" | bc -l )
echo -n "${localexit} ${PREFIX}${name}_database ${name}_docs=$doc_count|${name}_deleted_docs=$del_doc_count|${name}_deleted_docs_percentage=${del_doc_perc}%"
printf "|${name}_disksize_mb=%02.2fmb ${STATE[localexit]}: database $name\n" "$disk_size"
return ${localexit}
}
# main
load_nagios_utils
# per-db stats
# get a list of all dbs
$CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
# get list of dbs to check
dbs=$( egrep -v "${DBLIST_EXCLUDE}" $TMPFILE | tr -d '\n"' | sed 's/,/ /g' )
for db in $dbs
do
db_stats "$db"
done
# special handling for rotated dbs
suffix=$(($(date +'%s') / (60*60*24*30) + 1))
db_stats "sessions_${suffix}" "sessions"
db_stats "tokens_${suffix}" "tokens"
# show global couchdb stats
global_stats_perf=$(get_global_stats_perf)
exitcode=$?
end_time=$(date +%s.%N)
duration=$( echo "scale = 2; $end_time - $start_time" | bc -l )
printf "${exitcode} ${PREFIX}global_stats ${global_stats_perf}|script_duration=%02.2fs ${STATE[exitcode]}: global couchdb status\n" "$duration"
rm "$TMPFILE"
|
Alster-Hamburgers/leap_platform
|
puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
|
Shell
|
gpl-3.0
| 3,301 |
# Put lvm-related utilities here.
# This file is sourced from test-lib.sh.
# Copyright (C) 2007, 2008 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
export LVM_SUPPRESS_FD_WARNINGS=1
ME=$(basename "$0")
warn() { echo >&2 "$ME: $@"; }
unsafe_losetup_()
{
f=$1
test -n "$G_dev_" \
|| error "Internal error: unsafe_losetup_ called before init_root_dir_"
# Iterate through $G_dev_/loop{,/}{0,1,2,3,4,5,6,7,8,9}
for slash in '' /; do
for i in 0 1 2 3 4 5 6 7 8 9; do
dev=$G_dev_/loop$slash$i
losetup $dev > /dev/null 2>&1 && continue;
losetup "$dev" "$f" > /dev/null && { echo "$dev"; return 0; }
break
done
done
return 1
}
loop_setup_()
{
file=$1
dd if=/dev/zero of="$file" bs=1M count=1 seek=1000 > /dev/null 2>&1 \
|| { warn "loop_setup_ failed: Unable to create tmp file $file"; return 1; }
# NOTE: this requires a new enough version of losetup
dev=$(unsafe_losetup_ "$file" 2>/dev/null) \
|| { warn "loop_setup_ failed: Unable to create loopback device"; return 1; }
echo "$dev"
return 0;
}
# set up private /dev and /etc
lvm_init_root_dir_()
{
test -n "$t_" \
|| skip_test_ "Internal error: called lvm_init_root_dir_ before" \
"defining \$t_"
test_dir_rand_=$t_
# Define these two globals.
G_root_=$test_dir_rand_/root
G_dev_=$G_root_/dev
export LVM_SYSTEM_DIR=$G_root_/etc
export DM_DEV_DIR=$G_dev_
# Only the first caller does anything.
mkdir -p $G_root_/etc $G_dev_ $G_dev_/mapper $G_root_/lib
for i in 0 1 2 3 4 5 6 7; do
mknod $G_root_/dev/loop$i b 7 $i
done
for i in $abs_top_builddir/dmeventd/mirror/*.so \
$abs_top_builddir/dmeventd/snapshot/*.so
do
# NOTE: This check is necessary because the loop above will give us the
# value "$abs_top_builddir/dmeventd/mirror/*.so" if no files ending in
# 'so' exist. This is the best way I could quickly determine to skip
# over this bogus value.
if [ -f $i ]; then
echo Setting up symlink from $i to $G_root_/lib
ln -s $i $G_root_/lib
fi
done
cat > $G_root_/etc/lvm.conf <<-EOF
devices {
dir = "$G_dev_"
scan = "$G_dev_"
filter = [ "a/loop/", "a/mirror/", "a/mapper/", "r/.*/" ]
cache_dir = "$G_root_/etc"
sysfs_scan = 0
}
log {
verbose = $verboselevel
syslog = 0
indent = 1
}
backup {
backup = 0
archive = 0
}
global {
library_dir = "$G_root_/lib"
}
EOF
}
|
unsound/parted-freebsd
|
tests/t-lvm.sh
|
Shell
|
gpl-3.0
| 2,823 |
#!/bin/sh
# Ensure that cp -a and cp --preserve=context work properly.
# In particular, test on a writable NFS partition.
# Check also locally if --preserve=context, -a and --preserve=all
# does work
# Copyright (C) 2007-2012 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
require_root_
require_selinux_
cwd=$(pwd)
cleanup_() { cd /; umount "$cwd/mnt"; }
# This context is special: it works even when mcstransd isn't running.
ctx=root:object_r:tmp_t:s0
# Check basic functionality - before check on fixed context mount
touch c || framework_failure_
chcon $ctx c || framework_failure_
cp -a c d 2>err || framework_failure_
cp --preserve=context c e || framework_failure_
cp --preserve=all c f || framework_failure_
ls -Z d | grep $ctx || fail=1
test -s err && fail=1 #there must be no stderr output for -a
ls -Z e | grep $ctx || fail=1
ls -Z f | grep $ctx || fail=1
skip=0
# Create a file system, then mount it with the context=... option.
dd if=/dev/zero of=blob bs=8192 count=200 || skip=1
mkdir mnt || skip=1
mkfs -t ext2 -F blob ||
skip_ "failed to create an ext2 file system"
mount -oloop,context=$ctx blob mnt || skip=1
test $skip = 1 \
&& skip_ "insufficient mount/ext2 support"
cd mnt || framework_failure_
echo > f || framework_failure_
echo > g || framework_failure_
# /bin/cp from coreutils-6.7-3.fc7 would fail this test by letting cp
# succeed (giving no diagnostics), yet leaving the destination file empty.
cp -a f g 2>err || fail=1
test -s g || fail=1 # The destination file must not be empty.
test -s err && fail=1 # There must be no stderr output.
# =====================================================
# Here, we expect cp to succeed and not warn with "Operation not supported"
rm -f g
echo > g
cp --preserve=all f g 2>err || fail=1
test -s g || fail=1
grep "Operation not supported" err && fail=1
# =====================================================
# The same as above except destination does not exist
rm -f g
cp --preserve=all f g 2>err || fail=1
test -s g || fail=1
grep "Operation not supported" err && fail=1
# An alternative to the following approach would be to run in a confined
# domain (maybe creating/loading it) that lacks the required permissions
# to the file type.
# Note: this test could also be run by a regular (non-root) user in an
# NFS mounted directory. When doing that, I get this diagnostic:
# cp: failed to set the security context of 'g' to 'system_u:object_r:nfs_t': \
# Operation not supported
cat <<\EOF > exp || framework_failure_
cp: failed to set the security context of
EOF
rm -f g
echo > g
# =====================================================
# Here, we expect cp to fail, because it cannot set the SELinux
# security context through NFS or a mount with fixed context.
cp --preserve=context f g 2> out && fail=1
# Here, we *do* expect the destination to be empty.
test -s g && fail=1
sed "s/ .g' to .*//" out > k
mv k out
compare exp out || fail=1
rm -f g
echo > g
# Check if -a option doesn't silence --preserve=context option diagnostics
cp -a --preserve=context f g 2> out2 && fail=1
# Here, we *do* expect the destination to be empty.
test -s g && fail=1
sed "s/ .g' to .*//" out2 > k
mv k out2
compare exp out2 || fail=1
Exit $fail
|
homer6/gnu_coreutils
|
tests/cp/cp-a-selinux.sh
|
Shell
|
gpl-3.0
| 4,081 |
#! /bin/bash
#
# terminal_test.sh
#
# VT100 terminal validation script
# Use this script when evaluating a new terminal for use with the vt100.sh
# library to ensure that it will be 100% compatible.
#
# This file is part of Sosaria Rebourne. See authors.txt for copyright
# details.
#
# Sosaria Rebourne is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sosaria Rebourne is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sosaria Rebourne. If not, see <http://www.gnu.org/licenses/>.
. ./src/vt100.sh
# HIGH and CLEAR
vt100_high
echo "Welcome to the vt100.sh test suite. As this is a test of the visual "
echo "output of your terminal, this is an interactive process."
echo
echo "This text should be bright white. If not, vt100_high does not work."
echo
echo "Press ENTER. The screen should clear. If it does not, vt100_clear"
echo "does not work."
read
vt100_clear
# FOREGROUND
echo "The text below should be black but readable."
vt100_fg $COLOR_BLACK
echo "This should be BLACK"
vt100_fg $COLOR_RED
echo "This should be RED"
vt100_fg $COLOR_GREEN
echo "This should be GREEN"
vt100_fg $COLOR_BLUE
echo "This should be BLUE"
vt100_fg $COLOR_YELLOW
echo "This should be YELLOW"
vt100_fg $COLOR_TEAL
echo "This should be TEAL"
vt100_fg $COLOR_PURPLE
echo "This should be PURPLE"
vt100_fg $COLOR_WHITE
echo "This should be WHITE"
echo
echo "If any of the above are not correct, vt100_fg does not work."
echo "Press ENTER to continue"
read
vt100_clear
# BACKGROUND
echo "The text below should be white but readable."
vt100_bg $COLOR_WHITE
echo "This should be WHITE"
vt100_bg $COLOR_RED
echo "This should be RED"
vt100_bg $COLOR_GREEN
echo "This should be GREEN"
vt100_bg $COLOR_BLUE
echo "This should be BLUE"
vt100_bg $COLOR_YELLOW
echo "This should be YELLOW"
vt100_bg $COLOR_TEAL
echo "This should be TEAL"
vt100_bg $COLOR_PURPLE
echo "This should be PURPLE"
vt100_bg $COLOR_BLACK
echo "This should be BLACK"
echo
echo "If any of the above are not correct, vt100_bg does not work."
echo "Press ENTER to continue"
read
vt100_clear
# GOTO
vt100_goto 4 9
echo "This text should be at 10, 5. If it is not, vt100_goto does not work."
echo
echo "Press ENTER to continue"
read
vt100_clear
# UP / DOWN / LEFT / RIGHT
vt100_home
vt100_down 10
vt100_right 20
vt100_up 5
vt100_left 10
echo "This text should be at 5, 10. If it is not, vt100_up/down does not work."
echo
echo "Press ENTER to continue"
read
vt100_clear
# SAVE / RESTORE
echo "The below line should read \"ABCdefg\". If it does not, vt100_save or"
echo "vt100_restore does not work."
echo
vt100_save_cursor
echo -n "abcdefg"
vt100_restore_cursor
echo -n "ABC"
echo
echo
echo "Press ENTER to continue"
read
vt100_clear
# DEFAULT
vt100_defaults
echo "Finally, this text should look like the default text of your terminal."
echo "If it does not, then vt100_default does not work."
|
qbradq/sosaria-rebourne
|
tools/terminal_test.sh
|
Shell
|
gpl-3.0
| 3,286 |
rm -rf Makefile.in aclocal.m4 autom4te.cache config.guess config.h.in config.sub configure depcomp install-sh ltmain.sh m4 missing groonga-token-filter-yatof.pc packages/rpm/centos/groonga-token-filter-yatof.spec packages/rpm/fedora/groonga-token-filter-yatof.spec Makefile config.log config.h libtool stamp-h1 packages/Makefile packages/Makefile.in packages/apt/Makefile packages/apt/Makefile.in packages/rpm/Makefile packages/rpm/Makefile.in packages/rpm/centos/Makefile packages/rpm/centos/Makefile.in packages/rpm/fedora/Makefile packages/rpm/fedora/Makefile.in packages/source/Makefile packages/source/Makefile.in packages/yum/Makefile packages/yum/Makefile.in test/Makefile.in test/Makefile token_filters/Makefile token_filters/Makefile.in token_filters/.deps/ token_filters/.libs/ token_filters/yatof.la token_filters/yatof.lo token_filters/yatof.o config.status tmp
|
naoa/groonga-token-filter-yatof
|
clean.sh
|
Shell
|
lgpl-2.1
| 877 |
LIBS=ffmpeg-$FFMPEG_VERSION-win32-shared/bin/*.dll
7z x ffmpeg-$FFMPEG_VERSION-win32-shared.7z $LIBS
mkdir -p com/googlecode/javacv/cpp/windows-x86/
cp $LIBS com/googlecode/javacv/cpp/windows-x86/
jar cvf ffmpeg-$FFMPEG_VERSION-windows-x86.jar com/
rm -Rf com/
|
nragot/super-lama-video-editor
|
librairies/javacv-cppjars/build_ffmpeg-windows-x86.sh
|
Shell
|
apache-2.0
| 261 |
#!/bin/bash
function buildEnvironment(){
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ ${SOURCE} != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
cd ${DIR}/../
ENV=app-env
REQUIREMENTS=requirements.txt
virtualenv -p python2.7 $ENV
PYTHONENV=${ENV}/bin
SRCROOT=src
#activate env and add project src to PYTHONPATH
chmod +x ${PYTHONENV}/activate
${PYTHONENV}/activate
export PYTHONPATH=${PYTHONPATH}:${SRCROOT}
if [ -f ${REQUIREMENTS} ]; then
${PYTHONENV}/pip install -r ${REQUIREMENTS}
fi
}
buildEnvironment
|
guidj/irc
|
scripts/build-env.sh
|
Shell
|
apache-2.0
| 907 |
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# name: setup-bootstrap.sh
# Author: George Li
#
# Description
# - This script is use to initialise the ubuntu environment and download
# required script and configure files from repository.
# - This script will be execute when system booting up at first time.
#
## Shell Opts ---------------------------------
set -e -u -x
## Vars ---------------------------------------
REPO_URL="https://raw.githubusercontent.com/GeorgeL1/openstack-deployment/master"
# check network connectivity, stop script if no internet access
if ! (curl --silent --head http://www.google.com/ | egrep "20[0-9] Found|30[0-9] Found" >/dev/null) then
echo "* Internet connection is required to run this script (setup-offline.sh)! "
exit 0
fi
# Update the package cache.
apt-get update
# Upgrade all package but keep old configure file
apt-get -o Dpkg::Options::="--force-confold" -y dist-upgrade
# Install required packages
apt-get install -y tcptrack
# Remove unused Kernels
dpkg -l 'linux-*' | sed '/^ii/!d;/'"$(uname -r | sed "s/\(.*\)-\([^0-9]\+\)/\1/")"'/d;s/^[^ ]* [^ ]* \([^ ]*\).*/\1/;/[0-9]/!d' | xargs apt-get -y purge
# Download the required script file into place
echo -n "Download the required configurate file into place ..."
wget -O /etc/issue $REPO_URL/ubuntu/etc/issue
wget -O /etc/issue.net $REPO_URL/ubuntu/etc/issue.net
wget -O /etc/motd.footer $REPO_URL/ubuntu/etc/motd.footer
wget -O /etc/motd.tail $REPO_URL/ubuntu/etc/motd.tail
wget -O /etc/ramdisk.sh $REPO_URL/ubuntu/etc/ramdisk.sh
echo .
# Enable banner in sshd_config
if grep "^#Banner" /etc/ssh/sshd_config > /dev/null; then
sed -i 's/^#Banner.*/Banner \/etc\/issue.net/' /etc/ssh/sshd_config
else
echo 'Banner /etc/issue.net' >> /etc/ssh/sshd_config
fi
# Ensure that sshd permits root login.
if grep "^PermitRootLogin" /etc/ssh/sshd_config > /dev/null; then
sed -i 's/^PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
else
echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config
fi
# Enable Welcome info for remote console - 00-header
if [ -f /etc/update-motd.d/00-header ]; then
if ! grep "motd.tail" /etc/update-motd.d/00-header > /dev/null; then
sed -i 's/printf "Welcome/if [ -f \/etc\/motd.tail ]; then\n cat \/etc\/motd.tail\nfi\n\n&/g' /etc/update-motd.d/00-header
fi
else
(
cat << 'EOF'
#!/bin/sh
#
# 00-header - create the header of the MOTD
# Copyright (C) 2009-2010 Canonical Ltd.
#
# To add dynamic information, add a numbered
# script to /etc/update-motd.d/
[ -f /etc/motd.tail ] && cat /etc/motd.tail || true
printf "Welcome to %s (%s %s %s)\n" "$DISTRIB_DESCRIPTION" "$(uname -o)" "$(uname -r)" "$(uname -m)"
EOF
) > /etc/update-motd.d/00-header
fi
# Enable Welcome info for remote console - 99-info-text
if [ -f /etc/update-motd.d/99-info-text ]; then
if ! grep "motd.footer" /etc/update-motd.d/99-info-text > /dev/null; then
echo '[ -f /etc/motd.footer ] && cat /etc/motd.footer || true' >> /etc/update-motd.d/99-info-text
fi
else
(
cat << 'EOF'
#!/bin/sh
#
# 99-footer - write the admin's footer to the MOTD
# Copyright (C) 2009-2010 Canonical Ltd.
#
# To add dynamic information, add a numbered
# script to /etc/update-motd.d/
[ -f /etc/motd.footer ] && cat /etc/motd.footer || true
EOF
) > /etc/update-motd.d/99-info-text
fi
# Enable Login Welcome messages
chmod +x /etc/update-motd.d/00-header
chmod +x /etc/update-motd.d/99-info-text
# Disable update and upgrade messages
chmod -x /etc/update-motd.d/10-help-text
chmod -x /etc/update-motd.d/90-updates-available
chmod -x /etc/update-motd.d/91-release-upgrade
# image clean up .
apt-get autoremove -y
apt-get clean
rm -f /var/cache/apt/archives/*.deb
rm -f /var/cache/apt/*cache.bin
rm -f /var/lib/apt/lists/*_Packages
rm -rf /usr/src/*
# Setup completed, so disable the setup-bootstrap.sh
chmod -x /etc/setup-bootstrap.sh
sed -i '/setup-bootstrap.sh/d' /etc/rc.local
reboot
|
GeorgeL1/openstack-deploy
|
ubuntu/etc/setup-bootstrap.sh
|
Shell
|
apache-2.0
| 4,465 |
#! /bin/bash
# Database Configuration
export DBNAME=deepdive_smoke
export PGUSER=${PGUSER:-`whoami`}
export PGPASSWORD=${PGPASSWORD:-}
export PGPORT=${PGPORT:-5432}
export PGHOST=${PGHOST:-localhost}
|
feiranwang/deepdive
|
examples/smoke/env.sh
|
Shell
|
apache-2.0
| 202 |
#!/bin/sh
if [ -z $2 ]
then
echo "Usage: script/setup <application name> <main module package name>"
echo "example: ./cli/personalize_proj_working.sh CountdownApp com.cmuse13.countdownapp.countdownmodule"
echo "<application name> is the display name of your application"
echo "<main module package name> is the fully qualified package path to the main module of your application"
exit 0
fi
echo "Creating project to $1 using package $2"
ORIGINAL_APP_DIRECTORY="MyStarterApp"
ORIGINAL_PACKAGE_NAME="co.kaush.mystarterapp.myappmodule"
ORIGINAL_PACKAGE_PATH=`echo $ORIGINAL_PACKAGE_NAME | sed 's/\./\//g'`
NEW_APP_DIRECTORY=$1 # CountdownApp
NEW_PACKAGE_NAME=$2 # com.cmuse13.countdownapp.countdownmodule
NEW_APP_MAIN_MODULE="${NEW_APP_DIRECTORY}Module"
NEW_PACKAGE_PATH=`echo $NEW_PACKAGE_NAME | sed 's/\./\//g'`
echo "Copying files..."
mkdir -p ../$NEW_APP_DIRECTORY
cp -r * ../$NEW_APP_DIRECTORY
cp -r .idea ../$NEW_APP_DIRECTORY
cp .gitignore ../$NEW_APP_DIRECTORY
cd ../$NEW_APP_DIRECTORY
rm -f .idea/workspace.xml
rm -f .idea/tasks.xml
echo "Renaming files and directories..."
mv MyAppModule ${NEW_APP_MAIN_MODULE}
mv ${ORIGINAL_APP_DIRECTORY}.iml ${NEW_APP_DIRECTORY}.iml
rm -Rf ./${NEW_APP_MAIN_MODULE}/build ./${NEW_APP_MAIN_MODULE}/target
echo "Renaming files and directories within subModules"
mv subModules/MyAppPojos subModules/${NEW_APP_DIRECTORY}Pojos
cd subModules/${NEW_APP_DIRECTORY}Pojos
mv MyAppPojos.iml ${NEW_APP_DIRECTORY}Pojos.iml
mkdir -p src/main/java/$NEW_PACKAGE_PATH
mkdir -p src/test/java/$NEW_PACKAGE_PATH
mv src/main/java/$ORIGINAL_PACKAGE_PATH/* src/main/java/$NEW_PACKAGE_PATH
mv src/test/java/$ORIGINAL_PACKAGE_PATH/* src/test/java/$NEW_PACKAGE_PATH
# Delete directories if they are empty, i.e. if the new package name is not com.pivotallabs...
rmdir src/main/java/co/kaush/mystarterapp/myappmodule > /dev/null 2>&1
rmdir src/main/java/co/kaush/mystarterapp > /dev/null 2>&1
rmdir src/main/java/co/kaush > /dev/null 2>&1
rmdir src/main/java/co > /dev/null 2>&1
rmdir src/test/java/co/kaush/mystarterapp/myappmodule > /dev/null 2>&1
rmdir src/test/java/co/kaush/mystarterapp > /dev/null 2>&1
rmdir src/test/java/co/kaush > /dev/null 2>&1
rmdir src/test/java/co > /dev/null 2>&1
cd ../..
echo "Renaming files and directories within the Main App Module"
cd $NEW_APP_MAIN_MODULE
mv MyAppModule.iml ${NEW_APP_MAIN_MODULE}.iml
mkdir -p src/main/java/$NEW_PACKAGE_PATH
mkdir -p src/androidTest/java/$NEW_PACKAGE_PATH
mv src/main/java/$ORIGINAL_PACKAGE_PATH/* src/main/java/$NEW_PACKAGE_PATH
mv src/androidTest/java/$ORIGINAL_PACKAGE_PATH/* src/androidTest/java/$NEW_PACKAGE_PATH
rm -rf src/main/java/$ORIGINAL_PACKAGE_PATH
# Delete directories if they are empty, i.e. if the new package name is not com.pivotallabs...
rmdir src/main/java/co/kaush/mystarterapp/myappmodule > /dev/null 2>&1
rmdir src/main/java/co/kaush/mystarterapp > /dev/null 2>&1
rmdir src/main/java/co/kaush > /dev/null 2>&1
rmdir src/main/java/co > /dev/null 2>&1
rmdir src/androidTest/java/co/kaush/mystarterapp/myappmodule > /dev/null 2>&1
rmdir src/androidTest/java/co/kaush/mystarterapp > /dev/null 2>&1
rmdir src/androidTest/java/co/kaush > /dev/null 2>&1
rmdir src/androidTest/java/co > /dev/null 2>&1
cd ..
echo "Renaming in files..."
find ${NEW_APP_MAIN_MODULE} -name *.xml -print | xargs sed -i "" -e "s/MyStarterApp/$NEW_APP_DIRECTORY/g"
find . -name "*.iml" -print | xargs sed -i "" -e "s/MyStarterApp/$NEW_APP_DIRECTORY/g"
find . -name "*.iml" -print | xargs sed -i "" -e "s/MyAppModule/${NEW_APP_MAIN_MODULE}/g"
find . -name "*.iml" -print | xargs sed -i "" -e "s/MyAppPojos/${NEW_APP_DIRECTORY}Pojos/g"
find .idea -name .name -print | xargs sed -i "" -e "s/MyStarterApp/$NEW_APP_DIRECTORY/g"
find .idea -name *.xml -print | xargs sed -i "" -e "s/MyStarterApp/$NEW_APP_DIRECTORY/g"
find .idea -name *.xml -print | xargs sed -i "" -e "s/MyAppModule/${NEW_APP_MAIN_MODULE}/g"
find .idea -name *.xml -print | xargs sed -i "" -e "s/MyAppPojos/${NEW_APP_DIRECTORY}Pojos/g"
find . -name "*.gradle" -print | xargs sed -i "" -e "s/MyAppModule/${NEW_APP_MAIN_MODULE}/g"
find . -name "*.gradle" -print | xargs sed -i "" -e "s/MyAppPojos/${NEW_APP_DIRECTORY}Pojos/g"
find ${NEW_APP_MAIN_MODULE}/src -name *.java -print | xargs sed -i "" -e "s/$ORIGINAL_PACKAGE_NAME/$NEW_PACKAGE_NAME/g"
find subModules/${NEW_APP_DIRECTORY}Pojos/src -name *.java -print | xargs sed -i "" -e "s/$ORIGINAL_PACKAGE_NAME/$NEW_PACKAGE_NAME/g"
sed -i "" -e "s/$ORIGINAL_PACKAGE_NAME/$NEW_PACKAGE_NAME/g" ${NEW_APP_MAIN_MODULE}/src/main/AndroidManifest.xml
sed -i "" -e "s/$ORIGINAL_APP_DIRECTORY/$NEW_APP_DIRECTORY/g" ${NEW_APP_DIRECTORY}.iml .idea/*.xml .idea/runConfigurations/*.xml
|
aristiden7o/MyStarterApp
|
cli/personalize_proj.sh
|
Shell
|
apache-2.0
| 4,712 |
#!/bin/bash
# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
# 2014 Guoguo Chen
# Create denominator lattices for MMI/MPE training, with SGMM models. If the
# features have fMLLR transforms you have to supply the --transform-dir option.
# It gets any speaker vectors from the "alignment dir" ($alidir). Note: this is
# possibly a slight mismatch because the speaker vectors come from supervised
# adaptation.
# Begin configuration section.
nj=4
cmd=run.pl
sub_split=1
beam=13.0
lattice_beam=7.0
acwt=0.1
max_active=5000
transform_dir=
max_mem=20000000 # This will stop the processes getting too large.
# End configuration section.
echo "$0 $@" # Print the command line for logging
[ -f ./path.sh ] && . ./path.sh; # source the path.
. parse_options.sh || exit 1;
if [ $# != 4 ]; then
echo "Usage: steps/make_denlats_sgmm.sh [options] <data-dir> <lang-dir> <src-dir|alidir> <exp-dir>"
echo " e.g.: steps/make_denlats_sgmm.sh data/train data/lang exp/sgmm4a_ali exp/sgmm4a_denlats"
echo "Works for (delta|lda) features, and (with --transform-dir option) such features"
echo " plus transforms."
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --nj <nj> # number of parallel jobs"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --sub-split <n-split> # e.g. 40; use this for "
echo " # large databases so your jobs will be smaller and"
echo " # will (individually) finish reasonably soon."
echo " --transform-dir <transform-dir> # directory to find fMLLR transforms."
exit 1;
fi
data=$1
lang=$2
alidir=$3 # could also be $srcdir, but only if no vectors supplied.
dir=$4
sdata=$data/split$nj
splice_opts=`cat $alidir/splice_opts 2>/dev/null`
cmvn_opts=`cat $srcdir/cmvn_opts 2>/dev/null`
mkdir -p $dir/log
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
echo $nj > $dir/num_jobs
utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1;
cp $alidir/phones.txt $dir || exit 1;
oov=`cat $lang/oov.int` || exit 1;
mkdir -p $dir
cp -RH $lang $dir/
# Compute grammar FST which corresponds to unigram decoding graph.
new_lang="$dir/"$(basename "$lang")
echo "$0: Making unigram grammar FST in $new_lang"
cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \
awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \
utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \
|| exit 1;
# mkgraph.sh expects a whole directory "lang", so put everything in one directory...
# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and
# final.mdl from $alidir; the output HCLG.fst goes in $dir/graph.
echo "$0: Compiling decoding graph in $dir/dengraph"
if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then
echo "$0: Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation."
else
utils/mkgraph.sh $new_lang $alidir $dir/dengraph || exit 1;
fi
if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
echo "$0: feature type is $feat_type"
case $feat_type in
delta) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
lda) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |"
cp $alidir/final.mat $dir
;;
*) echo "$0: Invalid feature type $feat_type" && exit 1;
esac
if [ ! -z "$transform_dir" ]; then # add transforms to features...
echo "$0: using fMLLR transforms from $transform_dir"
[ ! -f $transform_dir/trans.1 ] && echo "Expected $transform_dir/trans.1 to exist."
[ "`cat $transform_dir/num_jobs`" -ne "$nj" ] \
&& echo "$0: mismatch in number of jobs with $transform_dir" && exit 1;
[ -f $alidir/final.mat ] && ! cmp $transform_dir/final.mat $alidir/final.mat && \
echo "$0: LDA transforms differ between $alidir and $transform_dir"
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/trans.JOB ark:- ark:- |"
else
echo "$0: Assuming you don't have a SAT system, since no --transform-dir option supplied "
fi
if [ -f $alidir/gselect.1.gz ]; then
gselect_opt="--gselect=ark,s,cs:gunzip -c $alidir/gselect.JOB.gz|"
else
echo "$0: no such file $alidir/gselect.1.gz" && exit 1;
fi
if [ -f $alidir/vecs.1 ]; then
spkvecs_opt="--spk-vecs=ark:$alidir/vecs.JOB --utt2spk=ark:$sdata/JOB/utt2spk"
else
if [ -f $alidir/final.alimdl ]; then
echo "$0: You seem to have an SGMM system with speaker vectors,"
echo "yet we can't find speaker vectors. Perhaps you supplied"
echo "the model director instead of the alignment directory?"
exit 1;
fi
fi
# if this job is interrupted by the user, we want any background jobs to be
# killed too.
cleanup() {
local pids=$(jobs -pr)
[ -n "$pids" ] && kill $pids
}
trap "cleanup" INT QUIT TERM EXIT
if [ $sub_split -eq 1 ]; then
$cmd JOB=1:$nj $dir/log/decode_den.JOB.log \
sgmm-latgen-faster $spkvecs_opt "$gselect_opt" --beam=$beam \
--lattice-beam=$lattice_beam --acoustic-scale=$acwt \
--max-mem=$max_mem --max-active=$max_active \
--word-symbol-table=$lang/words.txt $alidir/final.mdl \
$dir/dengraph/HCLG.fst "$feats" "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1;
else
# each job from 1 to $nj is split into multiple pieces (sub-split), and we aim
# to have at most two jobs running at each time. The idea is that if we have
# stragglers from one job, we can be processing another one at the same time.
rm $dir/.error 2>/dev/null
prev_pid=
for n in `seq $[nj+1]`; do
if [ $n -gt $nj ]; then
this_pid=
elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $alidir/final.mdl ]; then
echo "$0: Not processing subset $n as already done (delete $dir/.done.$n if not)";
this_pid=
else
sdata2=$data/split$nj/$n/split${sub_split}utt;
split_data.sh --per-utt $sdata/$n $sub_split || exit 1;
mkdir -p $dir/log/$n
mkdir -p $dir/part
feats_subset=`echo $feats | sed "s/trans.JOB/trans.$n/g" | sed s:JOB/:$n/split${sub_split}utt/JOB/:g`
spkvecs_opt_subset=`echo $spkvecs_opt | sed "s/JOB/$n/g"`
gselect_opt_subset=`echo $gselect_opt | sed "s/JOB/$n/g"`
$cmd JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \
sgmm-latgen-faster $spkvecs_opt_subset "$gselect_opt_subset" \
--beam=$beam --lattice-beam=$lattice_beam \
--acoustic-scale=$acwt --max-mem=$max_mem --max-active=$max_active \
--word-symbol-table=$lang/words.txt $alidir/final.mdl \
$dir/dengraph/HCLG.fst "$feats_subset" \
"ark:|gzip -c >$dir/lat.$n.JOB.gz" || touch $dir/.error &
this_pid=$!
fi
if [ ! -z "$prev_pid" ]; then # Wait for the previous job to merge lattices.
wait $prev_pid
[ -f $dir/.error ] && \
echo "$0: error generating denominator lattices" && exit 1;
rm $dir/.merge_error 2>/dev/null
echo "$0: Merging archives for data subset $prev_n"
for k in `seq $sub_split`; do
gunzip -c $dir/lat.$prev_n.$k.gz || touch $dir/.merge_error;
done | gzip -c > $dir/lat.$prev_n.gz || touch $dir/.merge_error;
[ -f $dir/.merge_error ] && \
echo "$0: Merging lattices for subset $prev_n failed" && exit 1;
rm $dir/lat.$prev_n.*.gz
touch $dir/.done.$prev_n
fi
prev_n=$n
prev_pid=$this_pid
done
fi
echo "$0: done generating denominator lattices with SGMMs."
|
jmolina116/kaldi-yesno-tutorial
|
steps/make_denlats_sgmm.sh
|
Shell
|
apache-2.0
| 7,955 |
#! /usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Start: Resolve Script Directory
SOURCE="${BASH_SOURCE[0]}"
while [[ -h "$SOURCE" ]]; do # resolve $SOURCE until the file is no longer a symlink
bin=$( cd -P "$( dirname "$SOURCE" )" && pwd )
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$bin/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
bin=$( cd -P "$( dirname "$SOURCE" )" && pwd )
script=$( basename "$SOURCE" )
# Stop: Resolve Script Directory
lib=${bin}/../lib
native_tarball=${lib}/accumulo-native.tar.gz
final_native_target="${lib}/native"
if [[ ! -f $native_tarball ]]; then
echo "Could not find native code artifact: ${native_tarball}";
exit 1
fi
# Make the destination for the native library
mkdir -p "${final_native_target}" || exit 1
# Make a directory for us to unpack the native source into
TMP_DIR=$(mktemp -d /tmp/accumulo-native.XXXX) || exit 1
# Unpack the tarball to our temp directory
tar xf "${native_tarball}" -C "${TMP_DIR}"
if [[ $? != 0 ]]; then
echo "Failed to unpack native tarball to ${TMP_DIR}"
exit 1
fi
# Move to the first (only) directory in our unpacked tarball
native_dir=$(find "${TMP_DIR}" -maxdepth 1 -mindepth 1 -type d)
cd "${native_dir}" || exit 1
# Make the native library
export USERFLAGS="$@"
make
# Make sure it didn't fail
if [[ $? != 0 ]]; then
echo "Make failed!"
exit 1
fi
# "install" the artifact
cp libaccumulo.* "${final_native_target}" || exit 1
# Clean up our temp directory
rm -rf "${TMP_DIR}"
echo "Successfully installed native library"
|
adamjshook/accumulo
|
assemble/bin/build_native_library.sh
|
Shell
|
apache-2.0
| 2,396 |
#!/bin/bash
fw_depends java scala sbt
|
kellabyte/FrameworkBenchmarks
|
frameworks/Scala/unfiltered/install.sh
|
Shell
|
bsd-3-clause
| 38 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.