code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
# Copyright © 2018 Feature.su. All rights reserved.
# Licensed under the Apache License, Version 2.0
if [ -z $1 ]
then
echo "Usage for create certificate: sh self-signed-ssl.sh domain.name"
else
openssl req -x509 -nodes -newkey rsa:2048 -days 365 -keyout /tmp/$1.key -out /tmp/$1.crt -subj /C=/ST=/L=/O=/CN=$1
cat /tmp/$1.key /tmp/$1.crt > /tmp/$1.pem
openssl x509 -outform der -in /tmp/$1.crt -out /tmp/$1.cer
cat /tmp/$1.pem
fi
|
faew/linux.feature
|
script/centos7/self-signed-ssl.sh
|
Shell
|
apache-2.0
| 434 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 6+ Debian 8+ and Ubuntu 14+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
Install_JDK18() {
pushd ${oneinstack_dir}/src > /dev/null
JDK_FILE="jdk-`echo ${jdk18_ver} | awk -F. '{print $2}'`u`echo ${jdk18_ver} | awk -F_ '{print $NF}'`-linux-${SYS_BIT_j}.tar.gz"
JAVA_dir=/usr/java
JDK_NAME="jdk${jdk18_ver}"
JDK_PATH=${JAVA_dir}/${JDK_NAME}
[ "${PM}" == 'yum' ] && [ -n "`rpm -qa | grep jdk`" ] && rpm -e `rpm -qa | grep jdk`
[ ! -e ${JAVA_dir} ] && mkdir -p ${JAVA_dir}
tar xzf ${JDK_FILE} -C ${JAVA_dir}
if [ -d "${JDK_PATH}" ]; then
chown -R ${run_user}:${run_group} ${JDK_PATH}
/bin/cp ${JDK_PATH}/jre/lib/security/cacerts /etc/ssl/certs/java
[ -z "`grep ^'export JAVA_HOME=' /etc/profile`" ] && { [ -z "`grep ^'export PATH=' /etc/profile`" ] && echo "export JAVA_HOME=${JDK_PATH}" >> /etc/profile || sed -i "s@^export PATH=@export JAVA_HOME=${JDK_PATH}\nexport PATH=@" /etc/profile; } || sed -i "s@^export JAVA_HOME=.*@export JAVA_HOME=${JDK_PATH}@" /etc/profile
[ -z "`grep ^'export CLASSPATH=' /etc/profile`" ] && sed -i "s@export JAVA_HOME=\(.*\)@export JAVA_HOME=\1\nexport CLASSPATH=\$JAVA_HOME/lib/tools.jar:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib@" /etc/profile
[ -n "`grep ^'export PATH=' /etc/profile`" -a -z "`grep '$JAVA_HOME/bin' /etc/profile`" ] && sed -i "s@^export PATH=\(.*\)@export PATH=\$JAVA_HOME/bin:\1@" /etc/profile
[ -z "`grep ^'export PATH=' /etc/profile | grep '$JAVA_HOME/bin'`" ] && echo 'export PATH=$JAVA_HOME/bin:$PATH' >> /etc/profile
. /etc/profile
echo "${CSUCCESS}$JDK_NAME installed successfully! ${CEND}"
else
echo "${CFAILURE}JDK install failed, Please contact the author! ${CEND}" && lsb_release -a
kill -9 $$
fi
popd
}
|
lj2007331/oneinstack
|
include/jdk-1.8.sh
|
Shell
|
apache-2.0
| 1,906 |
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp35-cp35m-manylinux1_x86_64.whl
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp36-cp36m-manylinux1_x86_64.whl
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp37-cp37m-manylinux1_x86_64.whl
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp35-cp35m-macosx_10_6_intel.whl
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp36-cp36m-macosx_10_6_intel.whl
wget https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp37-cp37m-macosx_10_6_intel.whl
|
stephanie-wang/ray
|
doc/dev/download_wheels.sh
|
Shell
|
apache-2.0
| 798 |
$YARN_HOME/sbin/yarn-daemon.sh stop nodemanager
|
yongkianon/test
|
hadoop/bin/stopNM.sh
|
Shell
|
apache-2.0
| 48 |
#!/bin/bash
is_controller=%(is_controller)s
is_ceph=%(is_ceph)s
is_cinder=%(is_cinder)s
is_mongo=%(is_mongo)s
install_pkg {
pkg=$1
cd %(dst_dir)s/upgrade
tar -xzf $pkg
dir=${pkg::-7}
cd $dir
python setup.py build
python setup.py install
}
controller() {
PKGS=%(dst_dir)s/upgrade/*
for pkg in $PKGS
do
if [[ $pkg == *"bsnstacklib"* ]]; then
install_pkg $pkg
neutron-db-manage upgrade heads
service neutron-server restart
service neutron-bsn-lldp restart
fi
if [[ $pkg == *"horizon-bsn"* ]]; then
install_pkg $pkg
service apache2 restart
fi
done
}
compute() {
PKGS=%(dst_dir)s/upgrade/*
for pkg in $PKGS
do
if [[ $pkg == *"bsnstacklib"* ]]; then
install_pkg $pkg
service neutron-bsn-lldp restart
fi
done
}
ceph() {
}
cinder() {
}
mongo() {
}
set +e
# Make sure only root can run this script
if [[ "$(id -u)" != "0" ]]; then
echo -e "Please run as root"
exit 1
fi
if [[ $is_controller == true ]]; then
controller
elif [[ $is_ceph == true ]]; then
ceph
elif [[ $is_cinder == true ]]; then
cinder
elif [[ $is_mongo == true ]]; then
mongo
else
compute
fi
set -e
exit 0
|
xinwu/bosi-1
|
etc/t5/bash_template/ubuntu_14_upgrade.sh
|
Shell
|
apache-2.0
| 1,309 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup kubeconfig
function set_up_credentials {
gcloud container clusters get-credentials ${WORKLOAD_CLUSTER} --region $WORKLOAD_CLUSTER_REGION --project ${PROJECT_ID}
kubectl config rename-context gke_${PROJECT_ID}_${WORKLOAD_CLUSTER_REGION}_${WORKLOAD_CLUSTER} ${WORKLOAD_CLUSTER}
}
# Annotate "default" namespace in the WORKLOAD_CLUSTER to enforce sidecar injection
function enforce_sidecar_injection {
kubectl --context=${WORKLOAD_CLUSTER} label namespace default istio-injection=enabled --overwrite
echo "Sidecar injection enabled"
}
# Enforce mTLS for services running the "default" namespace in the WORKLOAD_CLUSTER
function enforce_mtls_in_namespace {
kubectl --context=${WORKLOAD_CLUSTER} apply -n default -f manifests/enforce-mtls.yaml
echo "mTLS enforced in namespace"
}
# Deploy sample application
function deploy_sample_app {
kubectl --context=${WORKLOAD_CLUSTER} apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/platform/kube/bookinfo.yaml
kubectl --context=${WORKLOAD_CLUSTER} apply -f https://raw.githubusercontent.com/istio/istio/master/samples/bookinfo/networking/destination-rule-all.yaml
# Run kubectl wait to check if the reviews app is deployed and available
kubectl wait deployments.apps -n default reviews-v1 --for condition=available --timeout=600s
echo "Sample application deployment complete"
}
# Deploy a virtual service for reviews
function deploy_reviews_vs {
kubectl --context=${WORKLOAD_CLUSTER} apply -f manifests/virtualservice-reviews.yaml
}
# Annotate the istio-ingressgateway service to use an internal load balancer
function annotate_to_use_ilb {
kubectl --context=${WORKLOAD_CLUSTER} annotate svc istio-ingressgateway -n istio-system cloud.google.com/load-balancer-type="Internal" --overwrite
echo "Continuing in 30 seconds. Ctrl+C to cancel"
sleep 30
# Get the IP of the internal load balancer created
NETWORKLB=$(kubectl --context=${WORKLOAD_CLUSTER} get services/istio-ingressgateway -n istio-system \
--output=jsonpath='{.status.loadBalancer.ingress[0].ip }')
echo "The IP of the internal LoadBalancer is ${NETWORKLB}"
}
function install_certmanager {
echo "👩🏽💼 Installing Cert Manager"
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.16.1/cert-manager.yaml
}
function deploy_reviews_proxy() {
echo "🦄 Deploy Sample Proxy"
NETWORKLB=$(kubectl --context=${CLUSTER_NAME} get services/istio-ingressgateway -n istio-system \
--output=jsonpath='{.status.loadBalancer.ingress[0].ip }')
sed -i "" "s/@TargetURL@/$NETWORKLB/" apigee-hybrid/reviews-v1/apiproxy/targets/default.xml
(cd apigee-hybrid/reviews-v1 && zip -r apiproxy.zip apiproxy/*)
sed -i "" "s/$NETWORKLB/@TargetURL@/" apigee-hybrid/reviews-v1/apiproxy/targets/default.xml
curl -X POST \
"https://apigee.googleapis.com/v1/organizations/${PROJECT_ID}/apis?action=import&name=reviews-v1&validate=true" \
-H "Authorization: Bearer $(token)" \
-H "Content-Type: multipart/form-data" \
-F "zipFile=@apigee-hybrid/reviews-v1/apiproxy.zip"
PROXY_REV=$(curl -X POST \
"https://apigee.googleapis.com/v1/organizations/${PROJECT_ID}/apis?action=import&name=reviews-v1&validate=true" \
-H "Authorization: Bearer $(token)" \
-H "Content-Type: multipart/form-data" \
-F "zipFile=@apigee-hybrid/reviews-v1/apiproxy.zip" | grep '"revision": "[^"]*' | cut -d'"' -f4)
rm apigee-hybrid/reviews-v1/apiproxy.zip
curl -v -X POST \
"https://apigee.googleapis.com/v1/organizations/${PROJECT_ID}/environments/$ENV_NAME/apis/reviews-v1/revisions/${PROXY_REV}/deployments?override=true" \
-H "Authorization: Bearer $(token)"
echo "✅ Sample Proxy Deployed"
echo "🤓 Try without DNS (first deployment takes a few seconds. Relax and breathe!):"
echo "curl --cacert $QUICKSTART_ROOT/hybrid-files/certs/$DNS_NAME.crt --resolve api.$DNS_NAME:443:$INGRESS_IP https://api.$DNS_NAME/reviews/1"
echo "👋 To reach it via the FQDN: Make sure you add this as an NS record for $DNS_NAME: $NAME_SERVER"
}
function run_apigee {
curl https://raw.githubusercontent.com/apigee/devrel/main/tools/hybrid-quickstart/hybrid13/steps.sh -o /tmp/apigee-hybrid-quickstart-steps.sh
source /tmp/apigee-hybrid-quickstart-steps.sh
export CLUSTER_NAME=${APIGEE_RUNTIME_CLUSTER:='apigee-hybrid'}
export REGION=${APIGEE_RUNTIME_REGION:='europe-west1'}
export ZONE=${APIGEE_RUNTIME_ZONE:='europe-west1-b'}
enable_all_apis
set_config_params
gcloud container clusters get-credentials ${APIGEE_RUNTIME_CLUSTER} --zone $ZONE --project ${PROJECT_ID}
kubectl config rename-context gke_${PROJECT_ID}_${ZONE}_${APIGEE_RUNTIME_CLUSTER} ${APIGEE_RUNTIME_CLUSTER}
kubectl config use-context ${APIGEE_RUNTIME_CLUSTER}
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole cluster-admin --user "$(gcloud config get-value account)"
install_certmanager # defined in this file, not the remote steps.sh file
create_apigee_org
create_apigee_env "test"
create_apigee_envgroup "default"
add_env_to_envgroup "test" "default"
configure_network
download_apigee_ctl
prepare_resources
create_sa
install_runtime
deploy_reviews_proxy # defined in this file, not the remote steps.sh file
}
function run_all {
set_up_credentials
enforce_sidecar_injection
enforce_mtls_in_namespace
deploy_sample_app
deploy_reviews_vs
annotate_to_use_ilb
run_apigee
echo "Done running"
}
run_all
|
GoogleCloudPlatform/anthos-security-blueprints
|
protecting-api-endpoints/run.sh
|
Shell
|
apache-2.0
| 6,102 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# included in all the hadoop scripts with source command
# should not be executable directly
# also should not be passed any arguments, since we need original $*
# resolve links - $0 may be a softlink
this="$0"
while [ -h "$this" ]; do
ls=`ls -ld "$this"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
this="$link"
else
this=`dirname "$this"`/"$link"
fi
done
# convert relative path to absolute path
bin=`dirname "$this"`
script=`basename "$this"`
bin=`cd "$bin"; pwd`
this="$bin/$script"
# the root of the Hadoop installation
if [ -z "$HADOOP_HOME" ]; then
export HADOOP_HOME=`dirname "$this"`/..
fi
# double check that our HADOOP_HOME looks reasonable.
# cding to / here verifies that we have an absolute path, which is
# necessary for the daemons to function properly
if [ -z "$(cd / && ls $HADOOP_HOME/hadoop-core-*.jar $HADOOP_HOME/build 2>/dev/null)" ]; then
cat 1>&2 <<EOF
+================================================================+
| Error: HADOOP_HOME is not set correctly |
+----------------------------------------------------------------+
| Please set your HADOOP_HOME variable to the absolute path of |
| the directory that contains hadoop-core-VERSION.jar |
+================================================================+
EOF
exit 1
fi
#check to see if the conf dir is given as an optional argument
if [ $# -gt 1 ]
then
if [ "--config" = "$1" ]
then
shift
confdir=$1
shift
HADOOP_CONF_DIR=$confdir
fi
fi
# Allow alternate conf dir location.
HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
# attempt to find java
if [ -z "$JAVA_HOME" ]; then
for candidate in \
/usr/lib/jvm/java-6-sun \
/usr/lib/j2sdk1.6-sun \
/usr/java/jdk1.6* \
/usr/java/jre1.6* \
/Library/Java/Home ; do
if [ -e $candidate/bin/java ]; then
export JAVA_HOME=$candidate
break
fi
done
# if we didn't set it
if [ -z "$JAVA_HOME" ]; then
cat 1>&2 <<EOF
+======================================================================+
| Error: JAVA_HOME is not set and Java could not be found |
+----------------------------------------------------------------------+
| Please download the latest Sun JDK from the Sun Java web site |
| > http://java.sun.com/javase/downloads/ < |
| |
| Hadoop requires Java 1.6 or later. |
| NOTE: This script will find Sun Java whether you install using the |
| binary or the RPM based installer. |
+======================================================================+
EOF
exit 1
fi
fi
if [ -d $HADOOP_HOME/pids ]; then
HADOOP_PID_DIR="${HADOOP_PID_DIR:-$HADOOP_HOME/pids}"
fi
#check to see it is specified whether to use the slaves or the
# masters file
if [ $# -gt 1 ]
then
if [ "--hosts" = "$1" ]
then
shift
slavesfile=$1
shift
export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile"
fi
fi
|
ryanobjc/hadoop-cloudera
|
bin/hadoop-config.sh
|
Shell
|
apache-2.0
| 4,023 |
#!/bin/bash
if [ $# -lt 2 ] ; then
echo -e "\e[1;33m!!!USEAGE: check_status.sh [IP_ADDR] [PASSWORD]\e[0m"
exit 1
fi
./sync_cmd.sh $1 $2 "grep -r 'application_name' /var/vcap/data/dea_next/db/instances.json"
|
tesir/extend_dea
|
list_apps.sh
|
Shell
|
artistic-2.0
| 217 |
#!/bin/bash
# Just checks, if dropbox or owncloud are running.
# I created that script because I was online a lot via smartphone AP and wanted to minimize background traffic
if [ -z "$(pgrep dropbox)" ]; then
echo "No running dropbox instance found"
else
echo "Dropbox is running"
killall dropbox
echo "Dropbox killed"
fi
if [ -z "$(pgrep owncloud)" ]; then
echo "No running owncloud instance found"
else
echo "Owncloud is running"
killall owncloud
echo "Owncloud killed"
fi
|
saschalalala/someScripts
|
mobile.sh
|
Shell
|
bsd-2-clause
| 488 |
[[ ! -d prod~ ]] && mkdir prod~
ncftpget -R -u voilokov -p `pass gifts` ftp.voilokov.com prod~/ '/public_html/gifts/*.*'
diff -q src/ prod~/
|
serge-v/gifts
|
bin/compare.sh
|
Shell
|
bsd-2-clause
| 141 |
#!/bin/bash -ex
export DJANGO_SETTINGS_MODULE=standup.settings
export DATABASE_URL=sqlite://
export SECRET_KEY=itsasekrit
export OIDC_RP_CLIENT_ID=ou812
export OIDC_RP_CLIENT_SECRET=secret_ou812
export OIDC_OP_DOMAIN=example.com
flake8
py.test $@
|
mozilla/standup
|
bin/run-tests.sh
|
Shell
|
bsd-3-clause
| 250 |
#!/bin/bash
mkdir build && cd build
if [ -z "$COVERALLS" ]
then
cmake -DCOVERALLS=On -DCMAKE_BUILD_TYPE=Debug ..
else
cmake -DCMAKE_BUILD_TYPE=Debug ..
fi
cmake --build .
cd ../
./build/test
|
pierreguillot/zpd
|
config/script.sh
|
Shell
|
bsd-3-clause
| 192 |
#!/bin/sh
rm output/*
./optical_flow_software ../datasets/current/frame1.png ../datasets/current/frame2.png ../datasets/current/frame3.png ../datasets/current/frame4.png ../datasets/current/frame5.png output/out.flo ../datasets/current/ref.flo
../dependencies/sintel_flow_code/C/color_flow output/out.flo output/out.png
|
cornell-zhang/datuner
|
flows/opticalflow_hls/run.sh
|
Shell
|
bsd-3-clause
| 321 |
#!/bin/sh
java -cp ".:libs/*" BatchUploader
|
jcline/Batch-Queuer
|
run.sh
|
Shell
|
bsd-3-clause
| 44 |
#!/bin/bash
# TODO double check this, it's logically different from original php code.
# Two path checks would both always run in php. In this code the check
# for apc.so only happens if the check for php fails. Is that ok?
RETCODE=$(fw_exists ${IROOT}/php.installed)
[ ! "$RETCODE" == 0 ] || { \
echo "Moving PHP config files into place";
sudo cp $FWROOT/config/php.ini /usr/local/lib/php.ini
sudo cp $FWROOT/config/php-fpm.conf /usr/local/lib/php-fpm.conf
return 0; }
VERSION="5.5.17"
fw_get http://php.net/distributions/php-${VERSION}.tar.gz -O php-${VERSION}.tar.gz
fw_untar php-${VERSION}.tar.gz
mv php-${VERSION} php
cd php
echo "Configuring PHP quietly..."
./configure --prefix=$IROOT/php-${VERSION} --with-pdo-mysql \
--with-mysql --with-mcrypt --enable-intl --enable-mbstring \
--enable-fpm --with-fpm-user=testrunner --with-fpm-group=testrunner \
--with-openssl --enable-opcache --quiet
echo "Making PHP quietly..."
make --quiet
echo "Installing PHP quietly"
make --quiet install
cd ..
cp $FWROOT/config/php.ini $IROOT/php-${VERSION}/lib/php.ini
cp $FWROOT/config/php-fpm.conf $IROOT/php-${VERSION}/lib/php-fpm.conf
# =======================
#
# Install the PHP extensions that our tests need
# Install all of them here becuase our config file references
# all of these *.so
# ========================
echo PHP compilation finished, installing extensions
# Apc.so
$IROOT/php-${VERSION}/bin/pecl config-set php_ini $IROOT/php-${VERSION}/lib/php.ini
#printf "\n" | $IROOT/php-5.5.17/bin/pecl install -f apc-beta
printf "\n" | $IROOT/php-${VERSION}/bin/pecl -q install -f redis
# yaf.so
printf "\n" | $IROOT/php-${VERSION}/bin/pecl -q install -f yaf
# phalcon.so
# The configure seems broken, does not respect prefix. If you
# update the value of PATH then it finds the prefix from `which php`
git clone --depth=1 --branch=phalcon-v1.3.2 --single-branch \
--quiet git://github.com/phalcon/cphalcon.git
cd cphalcon/build/64bits
$IROOT/php-5.5.17/bin/phpize
# For some reason we have to point to php-config
# explicitly, it's not found by the prefix settings
./configure --prefix=$IROOT/php-${VERSION} --exec-prefix=$IROOT/php-${VERSION} \
--with-php-config=$IROOT/php-${VERSION}/bin/php-config \
--enable-phalcon --quiet
make --quiet
make install
# Clean up a bit
rm -rf $IROOT/php
touch $IROOT/php.installed
|
kellabyte/FrameworkBenchmarks
|
toolset/setup/linux/languages/php.sh
|
Shell
|
bsd-3-clause
| 2,364 |
#!/bin/bash
# run preprocessing of the data
R --vanilla < ../src/fat.rcim.preproc.R
# run rcim in all step sizes
# each script loops through the three window sizes
R --vanilla < ../src/fat.rcim_s0.R
R --vanilla < ../src/fat.rcim_s1.R
R --vanilla < ../src/fat.rcim_s05.R
# summarizing the individual runs
R --vanilla < ../src/fat.rcim_smry_s0,1,05.R
# to run plots succesfully, you require the 3x3 rCIM factorial design
R --vanilla < ../src/fat.plots.R
|
RodrigoGM/Mmu2QTL
|
analysis/fat.rcim.analysis.sh
|
Shell
|
bsd-3-clause
| 456 |
#!/bin/bash
# update your current image from the default repo
# This is will ensure that the remaining package management installs will work!
sudo apt-get -y update
# install to allow x11 fowarding of gui interfaces
sudo apt-get -y install xauth x11-apps libxt-dev
# install nano xemacs emacs editors
sudo apt-get -y install nano xemacs21 xemacs21-bin xemacs21-supportel xemacs21-basesupport-el
sudo apt-get -y install emacs emacs24 emacs24-common emacs-goodies-el vim
# install tools that are needed for maali to work
sudo apt-get -y install bc wget make environment-modules libtool
# install the default gnu compilers
# to find the the version number "gcc -v"
sudo apt-get -y install gcc gfortran g++ libgomp1
if [ ! -d "/cloud" ]; then
sudo mkdir /cloud
sudo chown -R ubuntu /cloud
fi
# install additional packages needed to install R/3.3.+ with maali
sudo apt -y install libreadline6 libreadline6-dev
sudo apt -y install libssl-dev
sudo apt -y install screen
|
chrisbpawsey/maali
|
install_scripts/Install_maali_bio_ubuntu.sh
|
Shell
|
bsd-3-clause
| 983 |
#!/bin/sh
PACKAGE=lighttpd
VERSION=1.4.37
NAME=lighttpd-1.4.37
DISTDIR="/home/jan/wwwroot/servers/www.lighttpd.net/pages/download/"
RPMS="/home/jan/rpmbuild/RPMS/i386/${NAME}-1.i386.rpm \
/home/jan/rpmbuild/SRPMS/${NAME}-1.src.rpm"
FILES="${RPMS} ${NAME}.tar.gz \
NEWS.html \
ChangeLog \
release-news.${VERSION}.txt \
${NAME}.tar.gz.sig"
DLURL="http://www.lighttpd.net/download"
pack=0
echo $1
case "$1" in
--pack) pack=1;;
esac
echo ${nopack}
if test x${pack} = x1; then
make distcheck && rpmbuild -ta --nodeps ${NAME}.tar.gz
gpg --detach-sign ${NAME}.tar.gz
rpm --addsign ${RPMS}
fi
MD5RPM=`md5sum /home/jan/rpmbuild/RPMS/i386/${NAME}-1.i386.rpm| cut -b 1-32`
MD5SRPM=`md5sum /home/jan/rpmbuild/SRPMS/${NAME}-1.src.rpm| cut -b 1-32`
MD5TGZ=`md5sum ${NAME}.tar.gz| cut -b 1-32`
DATE=`date +'%Y-%m-%d %H:%M'`
NEWS=`cat NEWS | sed "/^- ${VERSION}/,/^-/p;d" | sed "/^- /d;/^$/d"`
DLNAME="${DLURL}/${NAME}"
cat > release-news.${VERSION}-mail.txt <<EOF
${PACKAGE} ${VERSION} - ${DATE}
Changes
-------
${NEWS}
Download
- ${NAME}-1.i386.rpm [built on Fedora Core 4]
${DLNAME}-1.i386.rpm
MD5: ${MD5RPM}
- ${NAME}-1.src.rpm
${DLNAME}-1.src.rpm
MD5: ${MD5SRPM}
- ${NAME}.tar.gz
${DLNAME}.tar.gz
MD5: ${MD5TGZ}
Signature: ${DLNAME}.tar.gz.sig
EOF
cat > release-news.${VERSION}.txt <<EOF
${PACKAGE} ${VERSION} - ${DATE}
Changes
-------
${NEWS}
Checksums
- ${NAME}-1.i386.rpm [built on Fedora Core 4]
MD5: ${MD5RPM}
- ${NAME}-1.src.rpm
MD5: ${MD5SRPM}
- ${NAME}.tar.gz
MD5: ${MD5TGZ}
EOF
rst2html NEWS > NEWS.html
for i in ${DISTDIR}; do
cp -u ${FILES} $i
done
curdir=`pwd`
cd ~/wwwroot/servers/www.lighttpd.net/
make put
cd ${curdir}
|
vadimsu/lighttpd_ported
|
distribute.sh
|
Shell
|
bsd-3-clause
| 1,670 |
#!/bin/bash
BASE_ENDPOINT="$1"
SWAGGER_ENDPOINT="$2"
BASELINE_REP_NAME="$3"
API_REP_NAME="$4"
REPORT_DEST_DIR="$5"
docker pull owasp/zap2docker-weekly
## run the baseline scan
if [[ $6 == '--baseline=true' ]]; then
echo 'Running the baseline test'
docker run -v $(pwd):/zap/wrk/:rw -t owasp/zap2docker-weekly zap-baseline.py -c udaruBaseline.config -t $BASE_ENDPOINT -r $BASELINE_REP_NAME \
-z "-config replacer.full_list\(0\).description=auth1 -config replacer.full_list\(0\).enabled=true -config replacer.full_list\(0\).matchtype=REQ_HEADER -config replacer.full_list\(0\).matchstr=Authorization -config replacer.full_list\(0\).regex=false -config replacer.full_list\(0\).replacement=ROOTid"
if [ ! -f $BASELINE_REP_NAME ]; then
echo 'Moving report file'
mv $BASELINE_REP_NAME $REPORT_DEST_DIR
fi
fi
if [[ $7 == '--api=true' ]]; then
## run the api attack scan
echo 'Running the API attach test'
docker run -v $(pwd):/zap/wrk/:rw -t owasp/zap2docker-weekly zap-api-scan.py -t $SWAGGER_ENDPOINT -f openapi -d -c udaruApi.config -r $API_REP_NAME \
-z "-config replacer.full_list\(0\).description=auth1 -config replacer.full_list\(0\).enabled=true -config replacer.full_list\(0\).matchtype=REQ_HEADER -config replacer.full_list\(0\).matchstr=Authorization -config replacer.full_list\(0\).regex=false -config replacer.full_list\(0\).replacement=ROOTid"
if [ ! -f $API_REP_NAME ]; then
echo 'Moving report file'
mv $API_REP_NAME $REPORT_DEST_DIR
fi
fi
|
nearform/labs-authorization
|
packages/udaru-hapi-server/security/penetration/runner.sh
|
Shell
|
mit
| 1,532 |
#!/bin/bash
# -*- coding: utf-8 -*-
# Configuration overloads for the vps module. Basically it offers a more
# sofisticated way to configure $vps_ip, $vps_intranet and $vps_host_ip, which
# you might want to set yourself in your script that calls `conf() vps`.
# Prompts the admin for the host ip to use.
# Example config:
## # eth0
## ROUTER_INTERNET_MAP+=("1.1.1.0")
## ROUTER_INTRANET_MAP+=("192.168.1.")
## ROUTER_ZONE_MAP+=("net")
## ROUTER_LABEL_MAP+=("Non-free (eth0)")
## # eth1
## ROUTER_INTERNET_MAP+=("1.1.1.1")
## ROUTER_INTRANET_MAP+=("192.168.2.")
## ROUTER_ZONE_MAP+=("net2")
## ROUTER_LABEL_MAP+=("Free (eth1)")
# Note that those which are really required for vps_conf_interactive_network()
# ar the INTERNET and INTRANET ones. The ZONE one is used by the swall module
# tiein. The LABEL one is just to give your tubes a name, for example we use
# one for clients and one for us and whatever we want to host, so when it says
# "Free" the admin friend who helped, it really means "YAY ENJOY YOUR FREE GBPS
# BRO!".
# @variable $ROUTER_INTERNET MAP: list of internet ips.
# @variable $ROUTER_INTRANET_MAP: list of *corresponding* intranet ips.
# @variable $ROUTER_LABEL_MAP: optionnal.
function vps_conf_interactive_network() {
if [[ -z $ROUTER_INTERNET_MAP ]] || [[ -z $ROUTER_INTRANET_MAP ]]; then
mlog warning "ROUTER_INTERNET_MAP and ROUTER_INTRANET_MAP are not set, cannot configure network"
fi
local choice=""
local line=""
mlog info "Please select the network for this ROUTER"
for index in ${!ROUTER_INTERNET_MAP[@]}; do
line="${index}) "
if [[ -n $ROUTER_LABEL_MAP ]]; then
line+="${ROUTER_LABEL_MAP[$index]} "
fi
line+="${ROUTER_INTERNET_MAP[$index]} "
line+="vps_ip: ${ROUTER_INTRANET_MAP[$index]}${vps_id}"
echo $line
done
read -p "Choice number> " choice
vps_intranet=${ROUTER_INTRANET_MAP[$choice]}
vps_ip=${vps_intranet}${vps_id}
vps_host_ip=${ROUTER_INTERNET_MAP[$choice]}
}
# If $ROUTER_INTERNET_MAP and friends are set then use the sofisticated
# multi-interface network routing configurator. Runs normally otherwise.
function vps_conf_interactive() {
if [[ -z $ROUTER_INTERNET_MAP ]] || [[ -z $ROUTER_INTRANET_MAP ]]; then
unset vps_ip
unset vps_intranet
unset vps_host_ip
fi
conf_interactive vps
if [[ -z $ROUTER_INTERNET_MAP ]] || [[ -z $ROUTER_INTRANET_MAP ]]; then
vps_conf_interactive_network
fi
}
# Setter for the "master" variable.
# Polite caller:
## conf_set master somevalue
function vps_master_set() {
vps_master="$1"
vps_packages_dir="$VPS_DIR/$vps_master/pkgdir"
}
# Setter for the "name" variable.
# Polite caller:
## conf_set name somevalue
function vps_name_set() {
vps_name="$1"
vps_root="${VPS_DIR}/${vps_name}"
vps_conf_path="$(vps_conf_get_path)"
}
# Setter for the "stage_name" variable.
# Polite caller:
## conf_set stage_name somevalue
function vps_stage_name_set() {
vps_stage_name="$1"
vps_stage_url="http://bb.xnull.de/projects/gentoo/stages/i686/gentoo-i686-20090611/vserver/${vps_stage_name}";
vps_stage_path="/tmp/${vps_stage_name}"
}
# Logs configuration inconsistencies.
# @credit MetaPhaze
function vps_conf_forensic() {
if [[ ! -d $vps_root ]]; then
mlog alert "root of vps ($vps_name) is not a directory: $vps_root"
fi
if ! echo "$vps_packages_dir" | grep -q "$(vps_get_property $vps_master root)"; then
mlog alert "master root ($(vps_get_property $vps_master root)) not in $vps_packages_dir"
fi
source "$vps_root/etc/make.globals"
source "$vps_root/etc/make.conf"
local guest_pkgdir="${PKGDIR}"
source "$(vps_get_property $vps_master root)/etc/make.globals"
source "$(vps_get_property $vps_master root)/etc/make.conf"
local master_pkgdir="$(vps_get_property $vps_master root)${PKGDIR}"
# test guest fstab pkgdir
local expected="$master_pkgdir $guest_pkgdir"
local fstab="$VPS_ETC_DIR/$vps_name/fstab"
if ! grep -q "$expected" $fstab; then
mlog alert "fstab ($fstab) does not mount master pkgdir ($master_pkgdir) on guest pkgdir ($guest_pkgdir)"
fi
source $(vps_get_property $vps_master root)/etc/make.globals
source $(vps_get_property $vps_master root)/etc/make.conf
# test master buildpkg feature
if ! echo $FEATURES | grep -q buildpkg; then
mlog alert "'buildpkg' not in master portage FEATURES"
fi
local baselayout=$(find $vps_root/var/db/pkg/sys-apps/ -name "baselayout-*" -type d)
baselayout=${baselayout##*/}
baselayout=${baselayout/baselayout-/}
if [[ ! $baselayout =~ ^2\. ]]; then
mlog alert "Installed baselayout version should be 2.x, current: $baselayout and don't forget to patch it with vps_configure_baselayout()"
fi
}
# Outputs a list of conf names useable with the vps() conf loading function.
function vps_conf_all() {
local name
for name in $VPS_ETC_DIR/*.config; do
name="${name/$VPS_ETC_DIR\//}"
name="${name/.config/}"
echo $name
done
}
|
jpic/bashworks
|
vps/conf.sh
|
Shell
|
mit
| 5,149 |
###############
## functions ##
###############
man() {
env \
LESS_TERMCAP_mb=$(printf "\e[1;31m") \
LESS_TERMCAP_md=$(printf "\e[1;31m") \
LESS_TERMCAP_me=$(printf "\e[0m") \
LESS_TERMCAP_se=$(printf "\e[0m") \
LESS_TERMCAP_so=$(printf "\e[1;44;33m") \
LESS_TERMCAP_ue=$(printf "\e[0m") \
LESS_TERMCAP_us=$(printf "\e[1;32m") \
man "$@"
}
weather() {
if [[ -n "$1" ]]; then
curl wttr.in
else
curl wttr.in | tac | tac | head -n 7
fi
}
if which cower &> /dev/null ; then
coweri() {
if [[ -n "$1" ]]; then
currentdir=$(pwd)
if [[ ! ( -d ~/.cache/cower ) ]]; then
mkdir -p -v ~/.cache/cower
fi
cd ~/.cache/cower
if [[ ( -d ~/.cache/cower/$1 ) ]]; then
whattodo="r"
vared -p "~/.cache/cower/$1 already exists, (o)verwrite/(r)emove? : " -c whattodo
[[ "$whattodo" == "o" ]] && cower -df $1 && cd $1
[[ "$whattodo" == "r" ]] && rm -rf ~/.cache/cower/$1 && cower -d $1 && cd $1
else
cower -d $1 && cd $1
fi
$EDITOR PKGBUILD
ans="y"
vared -p "Install $1?: " -c ans
[[ "$ans" == "y" ]] && makepkg -is && cd $currentdir
fi
}
fi
# -----------------------------------------------------------------------------------------
# fzf
# -----------------------------------------------------------------------------------------
if which fzf &> /dev/null ; then
if which rg &> /dev/null ; then
export FZF_DEFAULT_COMMAND='rg --files --no-ignore --hidden --follow --glob "!.git/*"'
elif which ag &> /dev/null ; then
export FZF_DEFAULT_COMMAND='ag --hidden --ignore .git -g ""'
fi
# -----------------------------------------------------------------------------------------
# Usage: fkill | kill process
# -----------------------------------------------------------------------------------------
fkill() {
local pid
pid=$(ps -ef | sed 1d | fzf -m | awk '{print $2}')
if [ "x$pid" != "x" ]
then
echo $pid | xargs kill -${1:-9}
fi
}
# -----------------------------------------------------------------------------------------
# tm - create new tmux session, or switch to existing one. Works from within tmux too. (@bag-man)
# `tm` will allow you to select your tmux session via fzf.
# `tm irc` will attach to the irc session (if it exists), else it will create it.
# -----------------------------------------------------------------------------------------
tm() {
[[ -n "$TMUX" ]] && change="switch-client" || change="attach-session"
if [ $1 ]; then
tmux $change -t "$1" 2>/dev/null || (tmux new-session -d -s $1 && tmux $change -t "$1"); return
fi
session=$(tmux list-sessions -F "#{session_name}" 2>/dev/null | fzf --exit-0) && tmux $change -t "$session" || echo "No sessions found."
}
# -----------------------------------------------------------------------------------------
# Usage: sf <keyword>
# -----------------------------------------------------------------------------------------
sf() {
if [[ "$#" -lt 1 ]]; then echo "Supply string to search for!"; return 1; fi
printf -v search "%q" "$*"
include="yml,js,json,php,md,styl,pug,jade,html,config,py,cpp,c,go,hs,rb,conf,fa,lst"
exclude=".config,.git,node_modules,vendor,build,yarn.lock,*.sty,*.bst,*.coffee,dist"
rg_command='rg --column --line-number --no-heading --fixed-strings --ignore-case --no-ignore --hidden --follow --color "always" -g "*.{'$include'}" -g "!{'$exclude'}/*"'
result=$(eval $rg_command $search | fzf --ansi --multi --reverse | awk -F ':' '{print $1":"$2":"$3}')
files=$(echo $result | awk -F ':' '{print $1}')
lines=$(echo $result | awk -F ':' '{print $2}')
[[ -n "$files" ]] && ${EDITOR:-vim} +$lines $files
}
# -----------------------------------------------------------------------------------------
# Usage: vimf | list subdirectories recursively with preview
# -----------------------------------------------------------------------------------------
vimf() {
previous_file="$1"
file_to_edit=$(select_file $previous_file)
if [[ -n "$file_to_edit" ]]; then
$EDITOR "$file_to_edit"
vimf "$file_to_edit"
fi
}
select_file() {
given_file="$1"
fzf --preview-window right:70%:wrap --query "$given_file" --preview '[[ $(file --mime {}) =~ binary ]] &&
echo {} is a binary file ||
(rougify {} ||
highlight -O ansi -l {} ||
coderay {} ||
cat {}) 2> /dev/null | head -500'
}
fi
# -----------------------------------------------------------------------------------------
# Usage: brightness <level> | adjust brightness
# -----------------------------------------------------------------------------------------
brightness() {
if [[ -n "$1" ]]; then
xrandr --output LVDS1 --brightness $1
else
echo "brightness <0-1>"
fi
}
2display() {
# display screen information
#xrandr
# LVDS1 as primary monitor, HDMI1 right of LVDS1
#xrandr --output LVDS1 --auto --primary --output HDMI1 --auto --right-of LVDS1
connected_displays=$(xrandr | grep " connected" | awk '{print $1}')
echo $connected_displays
vared -p "main display : " -c main
vared -p "second display : " -c second
[[ $connected_displays =~ "$main" ]] &&
[[ $connected_displays =~ "$second" ]] &&
[[ "$main" != "$second" ]] &&
xrandr --output $main --auto --primary --output $second --auto --right-of $main
}
mirrordisplay() {
connected_displays=$(xrandr | grep " connected" | awk '{print $1}')
echo $connected_displays
vared -p "main display : " -c main
vared -p "second display : " -c second
[[ $connected_displays =~ "$main" ]] &&
[[ $connected_displays =~ "$second" ]] &&
[[ "$main" != "$second" ]] &&
xrandr --output $main --auto --primary --output $second --auto --same-as $main
#[[ $connected_displays =~ "$main" ]] && echo "1ok"
#[[ $connected_displays =~ "$second" ]] && echo "2ok"
#[[ "$main" != "$second" ]] && echo "3ok"
}
# -----------------------------------------------------------------------------------------
# Usage: ipv4_in <filename> | grep ipv4 in file
# -----------------------------------------------------------------------------------------
ipv4_in() {
if [[ -n "$1" ]]; then
regex='([0-9]{1,3}\.){3}[0-9]{1,3}'
grep -oE "$regex" $1
else
echo "'$1' is not a valid file"
fi
}
# -----------------------------------------------------------------------------------------
# Usage: ipv6_in <filename> | grep ipv4 in file
# -----------------------------------------------------------------------------------------
ipv6_in() {
if [[ -n "$1" ]]; then
regex='(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))'
grep -oE "$regex" $1
else
echo "'$1' is not a valid file"
fi
}
# -----------------------------------------------------------------------------------------
# Usage: url_in <filename> | grep url in file
# -----------------------------------------------------------------------------------------
url_in() {
if [[ -n "$1" ]]; then
regex="(http[s]?|ftp|file)://[a-zA-Z0-9][a-zA-Z0-9_-]*(\.[a-zA-Z0-9][a-zA-Z0-9_-]*)*(:\d\+)?(\/[a-zA-Z0-9_/.\-+%?&=;@$,!''*~-]*)?(#[a-zA-Z0-9_/.\-+%#?&=;@$,!''*~]*)?"
grep -oE "$regex" $1
else
echo "'$1' is not a valid file"
fi
}
# -------------------------------------------------------------------
# Show how much RAM application uses.
# $ ram safari
# # => safari uses 154.69 MBs of RAM.
# from https://github.com/paulmillr/dotfiles
# -------------------------------------------------------------------
ram() {
local sum
local items
local app="$1"
if [[ -z "$app" ]]; then
echo "First argument - pattern to grep from processes"
else
sum=0
for i in $(ps aux | grep -i "$app" | grep -v "grep" | awk '{print $6}'); do
sum=$(($i + $sum))
done
sum=$(echo "scale=2; $sum / 1024.0" | bc)
if [[ $sum != "0" ]]; then
echo "${fg[blue]}${app}${reset_color} uses ${fg[green]}${sum}${reset_color} MBs of RAM."
else
echo "There are no processes with pattern '${fg[blue]}${app}${reset_color}' are running."
fi
fi
}
# -----------------------------------------------------------------------------------------
# Usage: extract_frame <filename>
# -----------------------------------------------------------------------------------------
extract_frame() {
echo "Extracting frame from $1 ..."
if [[ -f $1 ]]; then
mkdir -p frame
time ffmpeg -i $1 frame/frame%09d.bmp
cd frame
else
echo "'$1' is not a valid file"
fi
}
# -----------------------------------------------------------------------------------------
# Usage: gz <filename> | get gzipped size
# -----------------------------------------------------------------------------------------
gz() {
echo -n "\noriginal size (bytes): "
cat "$1" | wc -c
echo -n "\ngzipped size (bytes): "
gzip -c "$1" | wc -c
echo -n "\ngzipped -9 size (bytes): "
gzip -c -9 "$1" | wc -c
}
# -----------------------------------------------------------------------------------------
# Usage: extract <filename>
# -----------------------------------------------------------------------------------------
extract() {
echo Extracting $1 ...
if [[ -f $1 ]]; then
case $1 in
*.7z) 7z x $1 ;;
*.Z) uncompress $1 ;;
*.bz2) bunzip2 $1 ;;
*.gz) gunzip $1 ;;
*.rar) unrar x $1 ;;
*.tar) tar xvf $1 ;;
*.tar.bz2) tar xjvf $1 ;;
*.tar.gz) tar xzvf $1 ;;
*.tar.xz) tar xvf $1 ;;
*.tbz2) tar xjvf $1 ;;
*.tgz) tar xzvf $1 ;;
*.zip) unzip $1 ;;
*) echo "'$1' cannot be extracted via extract" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
# -----------------------------------------------------------------------------------------
# Usage: compress <file> (<type>)
# -----------------------------------------------------------------------------------------
compress() {
if [[ -e $1 ]]; then
if [[ $2 ]]; then
case $2 in
bz2 | bzip2) bzip2 $1 ;;
gpg) gpg -e --default-recipient-self $1 ;;
gz | gzip) gzip $1 ;;
tar) tar -cvf $1.$2 $1 ;;
tar.Z) tar -Zcvf $1.$2 $1 ;;
tbz2 | tar.bz2) tar -jcvf $1.$2 $1 ;;
tgz | tar.gz) tar -zcvf $1.$2 $1 ;;
zip) zip -r $1.$2 $1 ;;
*)
echo "Error: $2 is not a valid compression type"
;;
esac
else
compress $1 tar.gz
fi
else
echo "File ('$1') does not exist!"
fi
}
sshlf() {
localport=$1
targethost=$2
targetport=$3
remoteaccount=$4
remotehost=$5
remotesshport=$6
keyfile=$7
if [[ "$#" -eq 5 ]]; then
echo "127.0.0.1:$localport --> $remotehost --> $targethost:$targetport"
ssh -gNfL $localport:$targethost:$targetport $remoteaccount@$remotehost
elif [[ "$#" -eq 6 ]]; then
echo "127.0.0.1:$localport --> $remotehost --> $targethost:$targetport"
ssh -p $remotesshport -gNfL $localport:$targethost:$targetport $remoteaccount@$remotehost
elif [[ "$#" -eq 7 ]]; then
echo "127.0.0.1:$localport --> $remotehost --> $targethost:$targetport"
ssh -i $keyfile -p $remotesshport -gNfL $localport:$targethost:$targetport $remoteaccount@$remotehost
else
echo "Usage: sshlf <localport> <targethost> <targetport> <remoteaccount> <remotehost> <remotesshport> <keyfile>"
echo "127.0.0.1:localport --> remotehost --> targethost:targetport"
fi
}
sshrf() {
localport=$1
remoteaccount=$2
remotehost=$3
remoteport=$4
remotepsshport=$5
keyfile=$6
if [[ "$#" -eq 4 ]]; then
echo "$remotehost:$remoteport --> 127.0.0.1:$localport"
ssh -NfR $remoteport:127.0.0.1:$localport $remoteaccount@$remotehost
elif [[ "$#" -eq 5 ]]; then
echo "$remotehost:$remoteport --> 127.0.0.1:$localport"
ssh -p $remotesshport -NfR $remoteport:127.0.0.1:$localport $remoteaccount@$remotehost
elif [[ "$#" -eq 6 ]]; then
echo "$remotehost:$remoteport --> 127.0.0.1:$localport"
ssh -i $keyfile -p $remotesshport -NfR $remoteport:127.0.0.1:$localport $remoteaccount@$remotehost
else
echo "Usage: sshrf <localport> <remoteaccount> <remotehost> <remoteport> <remotesshport> <keyfile>"
echo "remotehost:remoteport --> 127.0.0.1:localport"
fi
}
# -----------------------------------------------------------------------------------------
# Usage: base64key <keyname> <keysize>
# -----------------------------------------------------------------------------------------
base64key() {
if [[ ( -n $1 && -n $2 ) ]]; then
keyname=$1
size=$2
time openssl rand -base64 -out $keyname $size
else
echo "Usage: base64key <keyname> <keysize>"
fi
}
rsa() {
if [[ $1 == "keygen" ]]; then
if [[ ( -n $2 && -n $3 ) ]]; then
pri=$2
size=$3
pub="$pri.pub"
time openssl genrsa -out $pri $size
time openssl rsa -in $pri -out $pub -outform PEM -pubout
else
echo "Usage: rsa keygen <keyname> <keysize>"
fi
elif [[ ( $1 == "encrypt" || $1 == "e" )]]; then
if [[ ( -n $2 && -n $3 && -n $4) ]]; then
pub=$2
infile=$3
outfile=$4
time openssl rsautl -encrypt -inkey $pub -pubin -in $infile -out $outfile
else
echo "Usage: rsa encrypt <pubkey> <infile> <outfile>"
fi
elif [[ ( $1 == "decrypt" || $1 == "d" ) ]]; then
if [[ ( -n $2 && -n $3 && -n $4) ]]; then
pri=$2
infile=$3
outfile=$4
time openssl rsautl -decrypt -inkey $pri -in $infile -out $outfile
else
echo "Usage: rsa decrypt <prikey> <infile> <outfile>"
fi
else
echo "Usage:"
echo "rsa keygen <keyname> <keysize>"
echo "rsa encrypt <pubkey> <infile> <outfile>"
echo "rsa decrypt <prikey> <infile> <outfile>"
fi
}
aes() {
if [[ ( $1 == "encrypt" || $1 == "e" ) ]]; then
if [[ ( -n $2 && -n $3 && -n $4 ) ]]; then
infile=$2
outfile=$3
keyfile=$4
#time openssl aes-256-cbc -a -salt -in $infile -out $outfile -kfile $keyfile
time openssl enc -aes-256-cbc -a -salt -in $infile -out $outfile -pass file:$keyfile
elif [[ ( -n $2 && -n $3 ) ]]; then
infile=$2
outfile=$3
#time openssl aes-256-cbc -a -salt -in $infile -out $outfile
time openssl enc -aes-256-cbc -a -salt -in $infile -out $outfile
else
echo "Usage:"
echo "aes encrypt <infile> <outfile>"
echo "aes encrypt <infile> <outfile> <keyfile>"
fi
elif [[ $1 == "decrypt" || $1 == "d" ]]; then
if [[ ( -n $2 && -n $3 && -n $4 ) ]]; then
infile=$2
outfile=$3
keyfile=$4
#time openssl aes-256-cbc -d -a -in $infile -out $outfile -kfile $keyfile
time openssl enc -aes-256-cbc -d -a -in $infile -out $outfile -pass file:$keyfile
elif [[ ( -n $2 && -n $3 ) ]]; then
infile=$2
outfile=$3
#time openssl aes-256-cbc -d -a -in $infile -out $outfile
time openssl enc -aes-256-cbc -d -a -in $infile -out $outfile
else
echo "Usage:"
echo "aes decrypt <infile> <outfile>"
echo "aes decrypt <infile> <outfile> <keyfile>"
fi
else
echo "Usage:"
echo "aes encrypt <infile> <outfile>"
echo "aes encrypt <infile> <outfile> <keyfile>"
echo "aes decrypt <infile> <outfile>"
echo "aes decrypt <infile> <outfile> <keyfile>"
fi
}
nerd-fonts-install() {
time mkdir -p ~/git
time rm -rf ~/git/nerd-fonts
time git clone https://github.com/ryanoasis/nerd-fonts ~/git/nerd-fonts
time ~/git/nerd-fonts/install.sh
}
nerd-fonts-update() {
if [[ -d ~/git/nerd-fonts ]]; then
time cd ~/git/nerd-fonts && time git pull
time ./install.sh
else
nerd-fonts-install
fi
}
edb-install() {
echo "=============================================="
echo "== edb - cross platform x86/x86-64 debugger =="
echo "=============================================="
currentdir=$(pwd)
if which apt-get &> /dev/null ; then
# install dependencies For Ubuntu >= 15.10
sudo apt-get install -y \
cmake \
build-essential \
libboost-dev \
libqt5xmlpatterns5-dev \
qtbase5-dev \
qt5-default \
libqt5svg5-dev \
libgraphviz-dev \
libcapstone-dev
elif which pacman &> /dev/null ; then
sudo pacman -S --needed qt4 boost boost-libs capstone graphviz
sudo pacman -S --needed $(pacman -Ssq qt | sort -u | grep -E "^qt5-")
fi
if [[ -d ~/git/edb-debugger ]]; then
time rm -rf ~/git/edb-debugger && cd ~/git
else
mkdir -p ~/git && cd ~/git
fi
time git clone --recursive https://github.com/eteran/edb-debugger.git
cd edb-debugger
mkdir build && cd build
time cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ ..
time make && time sudo make install && time edb --version && cd $currentdir
}
plasma-install() {
echo "========================================================"
echo "== plasma - interactive disassembler for x86/ARM/MIPS =="
echo "========================================================"
if which apt-get &> /dev/null ; then
currentdir=$(pwd)
if [[ -d ~/git/plasma ]]; then
time rm -rf ~/git/plasma && cd ~/git
else
mkdir -p ~/git && cd ~/git
fi
time git clone https://github.com/plasma-disassembler/plasma
cd plasma && time ./install.sh && cd $currentdir
elif which pacman &> /dev/null ; then
if which pacaur &> /dev/null ; then
pacaur -S plasma-git
else
sudo pacman -S pacaur
pacaur -S plasma-git
fi
fi
}
yuzu-install() {
echo "======================================"
echo "== yuzu - Nintendo Switch Emulator =="
echo "======================================"
if which apt-get &> /dev/null ; then
sudo apt-get install build-essential clang cmake libc++-dev libcurl4-openssl-dev libqt5opengl5-dev libsdl2-2.0-0 libsdl2-dev qtbase5-dev sdl2
elif which pacman &> /dev/null ; then
sudo pacman -S --needed base-devel clang cmake libcurl-compat qt5 sdl2
fi
currentdir=$(pwd)
if [[ -d ~/git/yuzu ]]; then
time rm -rf ~/git/yuzu && cd ~/git
else
mkdir -p ~/git && cd ~/git
fi
time git clone --recursive https://github.com/yuzu-emu/yuzu
cd yuzu
mkdir build && cd build
cmake ../
make
sudo make install
}
# -----------------------------------------------------------------------------------------
# Usage: viewimg <filename> | display image in terminal
# -----------------------------------------------------------------------------------------
if which w3m &> /dev/null && [[ -f /usr/lib/w3m/w3mimgdisplay ]]; then
viewimg() {
w3m -o imgdisplay=/usr/lib/w3m/w3mimgdisplay -o ext_image_viewer=N $1
}
fi
# -----------------------------------------------------------------------------------------
# Usage: $ (tor-)transfer hello.txt | Function for upload file to https://transfer.sh/
# -----------------------------------------------------------------------------------------
tor-transfer() {
torIp=127.0.0.1
torPort=9050
if [ $# -eq 0 ]; then
echo -e "No arguments specified. Usage:\necho tor-transfer /tmp/test.md\ncat /tmp/test.md | tor-transfer test.md";
return 1;
fi
torstatus=$(systemctl status tor | grep Active | cut -d":" -f2 | cut -d" " -f2)
if [[ -n "$torstatus" ]] && [[ "$torstatus" == "active" ]];then
tmpfile=$( mktemp -t transferXXX );
if tty -s; then
basefile=$(basename "$1" | sed -e 's/[^a-zA-Z0-9._-]/-/g');
curl --socks5-hostname ${torIp}:${torPort} --retry 3 --connect-timeout 60 --progress-bar --upload-file "$1" "https://transfer.sh/$basefile" >> $tmpfile;
else
curl --socks5-hostname ${torIp}:${torPort} --retry 3 --connect-timeout 60 --progress-bar --upload-file "-" "https://transfer.sh/$1" >> $tmpfile ;
fi;
echo ""
cat $tmpfile;
echo ""
rm -f $tmpfile;
else
echo "tor is inactive"
return 1;
fi
}
transfer() {
if [ $# -eq 0 ]; then
echo -e "No arguments specified. Usage:\necho transfer /tmp/test.md\ncat /tmp/test.md | transfer test.md";
return 1;
fi
tmpfile=$( mktemp -t transferXXX );
if tty -s; then
basefile=$(basename "$1" | sed -e 's/[^a-zA-Z0-9._-]/-/g');
curl --progress-bar --upload-file "$1" "https://transfer.sh/$basefile" >> $tmpfile;
else
curl --progress-bar --upload-file "-" "https://transfer.sh/$1" >> $tmpfile ;
fi;
echo ""
cat $tmpfile;
echo ""
rm -f $tmpfile;
}
# -----------------------------------------------------------------------------------------
# Docker functions
# -----------------------------------------------------------------------------------------
if which docker &> /dev/null ; then
docker_alias_stop_all_containers() { docker stop $(docker ps -a -q); }
docker_alias_remove_all_containers() { docker rm $(docker ps -a -q); }
docker_alias_remove_all_empty_images() { docker images | awk '{print $2 " " $3}' | grep '^<none>' | awk '{print $2}' | xargs -I{} docker rmi {}; }
docker_alias_docker_file_build() { docker build -t=$1 .; }
docker_alias_show_all_docker_related_alias() { alias | grep 'docker' | sed "s/^\([^=]*\)=\(.*\)/\1 => \2/"| sed "s/['|\']//g" | sort; }
docker_alias_bash_into_running_container() { docker exec -it $(docker ps -aqf "name=$1") bash; }
fi
# -----------------------------------------------------------------------------------------
# nethack NAO
# -----------------------------------------------------------------------------------------
nethack-nao() {
if [[ -z "$DGLAUTH" ]]; then
echo "DGLAUTH is empty"
vared -p "value : " -c value
export DGLAUTH="$value"
ssh -Y -o SendEnv=DGLAUTH [email protected]
else
ssh -Y -o SendEnv=DGLAUTH [email protected]
fi
}
nethack-nao-game-status() {
if [[ -z "$1" ]]; then
url="https://alt.org/nethack/mostrecent.php"
if which firefox &> /dev/null ; then
firefox $url
else
echo "$url"
fi
else
url="https://alt.org/nethack/player-all.php?player=$1"
if which firefox &> /dev/null ; then
firefox $url
else
echo "$url"
fi
fi
}
# -----------------------------------------------------------------------------------------
# Dungeon Crawl Stone Soup (crawl.jorgrun.rocks [Montreal, Canada])
# -----------------------------------------------------------------------------------------
crawl-cjr() {
if [[ -f ~/.ssh/jorgrun_key ]]; then
ssh -Y -i ~/.ssh/jorgrun_key [email protected]
else
curl -fLo ~/.ssh/jorgrun_key --create-dirs https://crawl.jorgrun.rocks/ssh/jorgrun_key
chmod 600 ~/.ssh/jorgrun_key
crawl-cjr
fi
}
# -----------------------------------------------------------------------------------------
# Dungeon Crawl Stone Soup (crawl.akrasiac.org [Arizona, United States of America])
# -----------------------------------------------------------------------------------------
crawl-cao() {
if [[ -f ~/.ssh/cao_key ]]; then
ssh -Y -i ~/.ssh/cao_key [email protected]
else
curl -fLo ~/.ssh/cao_key --create-dirs http://crawl.develz.org/cao_key
chmod 600 ~/.ssh/cao_key
crawl-cao
fi
}
|
T6705/dotfiles
|
.zsh/functions.zsh
|
Shell
|
mit
| 26,166 |
#!/bin/bash
# Script Name: AtoMiC Organizr Menu
SUBCHOICE=$(whiptail --title "AtoMiC Toolkit - Manage Organizr" \
--menu "What would you like to do?" --backtitle "$BACKTITLE" \
--fb --cancel-button "Exit" $LINES $COLUMNS "$NETLINES" \
"Install" "Install Organizr" \
"Uninstall" "Uninstall Organizr" \
"Manual Update" "Manually update Organizr" \
"Access Details" "View Organizr access details" \
"Go Back" "Back to Main Menu" 3>&1 1>&2 2>&3)
exitstatus=$?
if [[ $exitstatus = 0 ]]; then
source "$SCRIPTPATH/organizr/organizr-constants.sh"
case "$SUBCHOICE" in
"Install" ) source "$SCRIPTPATH/$APPNAME/$APPNAME-installer.sh" ;;
"Uninstall" ) source "$SCRIPTPATH/$APPNAME/$APPNAME-uninstaller.sh" ;;
"Manual Update" ) source "$SCRIPTPATH/$APPNAME/$APPNAME-update.sh" ;;
"Access Details" ) source "$SCRIPTPATH/inc/app-access-details.sh" ;;
"Go Back" ) source "$SCRIPTPATH/menus/menu-administration-tools.sh" ;;
*) source "$SCRIPTPATH/inc/invalid-option.sh" ;;
esac
else
source "$SCRIPTPATH/inc/thankyou.sh"
echo
sleep 1
exit 0
fi
|
htpcBeginner/AtoMiC-ToolKit
|
organizr/organizr-menu.sh
|
Shell
|
mit
| 1,105 |
#!/bin/sh
# base16-shell (https://github.com/chriskempson/base16-shell)
# Base16 Shell template by Chris Kempson (http://chriskempson.com)
# Summerfruit Dark scheme by Christopher Corley (http://christop.club/)
color00="15/15/15" # Base 00 - Black
color01="FF/00/86" # Base 08 - Red
color02="00/C9/18" # Base 0B - Green
color03="AB/A8/00" # Base 0A - Yellow
color04="37/77/E6" # Base 0D - Blue
color05="AD/00/A1" # Base 0E - Magenta
color06="1F/AA/AA" # Base 0C - Cyan
color07="D0/D0/D0" # Base 05 - White
color08="50/50/50" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="FF/FF/FF" # Base 07 - Bright White
color16="FD/89/00" # Base 09
color17="CC/66/33" # Base 0F
color18="20/20/20" # Base 01
color19="30/30/30" # Base 02
color20="B0/B0/B0" # Base 04
color21="E0/E0/E0" # Base 06
color_foreground="D0/D0/D0" # Base 05
color_background="15/15/15" # Base 00
if [ -n "$TMUX" ]; then
# Tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
put_template() { printf '\033Ptmux;\033\033]4;%d;rgb:%s\033\033\\\033\\' $@; }
put_template_var() { printf '\033Ptmux;\033\033]%d;rgb:%s\033\033\\\033\\' $@; }
put_template_custom() { printf '\033Ptmux;\033\033]%s%s\033\033\\\033\\' $@; }
elif [ "${TERM%%[-.]*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
put_template() { printf '\033P\033]4;%d;rgb:%s\007\033\\' $@; }
put_template_var() { printf '\033P\033]%d;rgb:%s\007\033\\' $@; }
put_template_custom() { printf '\033P\033]%s%s\007\033\\' $@; }
elif [ "${TERM%%-*}" = "linux" ]; then
put_template() { [ $1 -lt 16 ] && printf "\e]P%x%s" $1 $(echo $2 | sed 's/\///g'); }
put_template_var() { true; }
put_template_custom() { true; }
else
put_template() { printf '\033]4;%d;rgb:%s\033\\' $@; }
put_template_var() { printf '\033]%d;rgb:%s\033\\' $@; }
put_template_custom() { printf '\033]%s%s\033\\' $@; }
fi
# 16 color space
put_template 0 $color00
put_template 1 $color01
put_template 2 $color02
put_template 3 $color03
put_template 4 $color04
put_template 5 $color05
put_template 6 $color06
put_template 7 $color07
put_template 8 $color08
put_template 9 $color09
put_template 10 $color10
put_template 11 $color11
put_template 12 $color12
put_template 13 $color13
put_template 14 $color14
put_template 15 $color15
# 256 color space
put_template 16 $color16
put_template 17 $color17
put_template 18 $color18
put_template 19 $color19
put_template 20 $color20
put_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
put_template_custom Pg D0D0D0 # foreground
put_template_custom Ph 151515 # background
put_template_custom Pi D0D0D0 # bold color
put_template_custom Pj 303030 # selection color
put_template_custom Pk D0D0D0 # selected text color
put_template_custom Pl D0D0D0 # cursor
put_template_custom Pm 151515 # cursor text
else
put_template_var 10 $color_foreground
if [ "$BASE16_SHELL_SET_BACKGROUND" != false ]; then
put_template_var 11 $color_background
if [ "${TERM%%-*}" = "rxvt" ]; then
put_template_var 708 $color_background # internal border (rxvt)
fi
fi
put_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset -f put_template
unset -f put_template_var
unset -f put_template_custom
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
|
tulhan/.files
|
config/base16-shell/scripts/base16-summerfruit-dark.sh
|
Shell
|
cc0-1.0
| 3,972 |
#!/bin/bash -l
model=$1
outpath=$2
name=$3
ml R/3.5.0
# Get script path
if [ -n $SLURM_JOB_ID ]; then
toolpath=$(scontrol show job $SLURM_JOB_ID | awk -F= '/Command=/{print $2}' | cut -f1 -d' ')
toolpath=`dirname $toolpath`
else
toolpath=$(dirname "$0")
fi
# Run R
R -e "source('${toolpath}/plot_csvs_simulation.R'); csvs_test('${model}', '${outpath}/${name}.test.csv', output_outliers='${outpath}/${name}.test.outliers.txt')"
|
babelomics/pvs
|
csvs-utils/outlier_detection/launch_csvs_get_outliers.sh
|
Shell
|
gpl-2.0
| 440 |
#!/bin/sh
#desc: Extract or compress initrd
# Variables
SCRIPTSDIR="scripts"
. $SCRIPTSDIR/sdk_variables.sh
# Check
if ! test -f $INITRDIMG; then
echo "ERROR: initrd image don't exist"
exit 1
fi
case $1 in
extract )
if test -d $INITRDDIR; then
echo "WARN: initrd is already extracted"
else
# Extracting initrd
cd $TMPDIR
tar xfJp ../$INITRDIMG
cd ..
fi
# Installing qemu for RaspberryPi
if [ "$ARCH" != "i386" ]; then
if
cp /usr/bin/qemu-arm-static $INITRDDIR/usr/bin
then
echo "INFO: Installing RaspberryPi emulator ..."
else
echo "ERROR: You need qemu-arm-static to create RaspberryPi distro"
exit 1
fi
fi
;;
make )
echo "INFO: Creating images ..."
# Creating image basic structure
rm $TARGETDIR/initrd.gz 2>/dev/null
mkdir -p $IMAGESDIR 2>/dev/null
# Creating opendomo configurations
echo 'LABEL="opendomodistro"' > $TARGETDIR/opendomo.cfg
echo 'CONFDEVICE="1"' >> $TARGETDIR/opendomo.cfg
echo 'SYSDEVICE="1"' >> $TARGETDIR/opendomo.cfg
echo 'HOMEDEVICE="1"' >> $TARGETDIR/opendomo.cfg
# Adding opendomo version
echo "PRETTY_NAME=\"Opendomo OS version $OD_VERSION\"" > $INITRDDIR/etc/os-release
echo "NAME=\"Opendomo OS\"" >> $INITRDDIR/etc/os-release
echo "VERSION_ID=$OD_VERSION" >> $INITRDDIR/etc/os-release
echo "VERSION=$OD_VERSION" >> $INITRDDIR/etc/os-release
echo "ID=opendomo" >> $INITRDDIR/etc/os-release
echo "ANSI_COLOR=\"1;31\"" >> $INITRDDIR/etc/os-release
echo "HOME_URL=http://es.opendomo.org/" >> $INITRDDIR/etc/os-release
echo "SUPPORT_URL=http://www.opendomo.com/wiki/index.php?title=P%C3%A1gina_Principal" >> $INITRDDIR/etc/os-release
echo "BUG_REPORT_URL=https://github.com/opalenzuela/opendomo/issues" >> $INITRDDIR/etc/os-release
echo "$OD_VERSION" >> $INITRDDIR/etc/VERSION
# Creating raw image files
if ! test -f $IMAGESDIR/$DEFCHANGESIMG.gz; then
if dd if=/dev/zero of=$IMAGESDIR/$DEFCHANGESIMG bs=1024 count=1500000 >/dev/null 2>/dev/null; then
# Creating fs and copy files
mkfs.ext2 -F $IMAGESDIR/$DEFCHANGESIMG >/dev/null >/dev/null 2>/dev/null
mount -o loop $IMAGESDIR/$DEFCHANGESIMG $MOUNTDIR 2>/dev/null
mkdir -p $MOUNTDIR/usr && mv $INITRDDIR/usr/share $MOUNTDIR/usr/ 2>/dev/null
# Unmount
while ! umount $MOUNTDIR 2>/dev/null; do
sleep 1
done
# Creating default changes and custom changes image
cp $IMAGESDIR/$DEFCHANGESIMG $IMAGESDIR/$CSTCHANGESIMG
gzip $IMAGESDIR/$DEFCHANGESIMG
# Creating home image
if ! test -f $IMAGESDIR/$HOMEFSIMG; then
dd if=/dev/zero of=$IMAGESDIR/$HOMEFSIMG bs=1024 count=10000 2>/dev/null
mkfs.ext2 -F $IMAGESDIR/$HOMEFSIMG >/dev/null 2>/dev/null
fi
fi
fi
# Checking initrd size
INITRDSIZE=`du $INITRDDIR | tail -n1 | sed 's/\t.*//'`
SIZE=`expr $INITRDSIZE + $FREESIZE`
if [ "$ARCH" != "i386" ]; then
# Clean emulator for RaspberryPi
rm $INITRDDIR/usr/bin/qemu-arm-static 2>/dev/null
# Copy RasberryPi firmware to boot
cp $RPIFILESDIR/bootcode.bin $INITRDDIR/boot/
cp $RPIFILESDIR/start* $INITRDDIR/boot/
cp $RPIFILESDIR/fixup* $INITRDDIR/boot/
# Creating RaspberryPi boot config file
echo "rw root=/dev/ram0 ramdisk_size=$SIZE quiet rootwait" >$RPIFILESDIR/cmdline.txt
else
# Creating syslinux boot configuration files
echo "DEFAULT linux initrd=initrd.gz ramdisk_size=$SIZE rw root=/dev/ram0 quiet" >$ISOFILESDIR/syslinux.cfg
fi
# Creating initrd
if dd if=/dev/zero of=$TARGETDIR/initrd bs=1024 count=$SIZE >/dev/null 2>/dev/null; then
mkfs.ext2 -F $TARGETDIR/initrd >/dev/null 2>/dev/null
mount -o loop $TARGETDIR/initrd $MOUNTDIR
cp -rp $INITRDDIR/* $MOUNTDIR
# Force home directories permissions
chmod 700 $MOUNTDIR/home/admin
chown -R 1000:1000 $MOUNTDIR/home/admin
# Unmount initrd and compress
while ! umount $MOUNTDIR 2>/dev/null; do
sleep 1
done
gzip $TARGETDIR/initrd
fi
;;
esac
exit 0
|
opalenzuela/opendomo
|
scripts/build_initrd.sh
|
Shell
|
gpl-3.0
| 3,947 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-mediancpu_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::mediancpu_0:1.0 -N ID0000004 -R condorpool -L example_workflow -T 2016-10-25T01:22:05+00:00 ./example_workflow-mediancpu_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/logs/w-05/20161025T012206+0000/00/00/mediancpu_0_ID0000004.sh
|
Shell
|
gpl-3.0
| 1,237 |
#!/bin/bash
PROCNAME='nginx'
DAEMON='/usr/sbin/nginx'
DAEMON_ARGS=( -g 'daemon off;' )
if [ -z "$1" ]; then
set -- "${DAEMON}" "${DAEMON_ARGS[@]}"
elif [ "${1:0:1}" = '-' ]; then
set -- "${DAEMON}" "$@"
elif [ "${1}" = "${PROCNAME}" ]; then
shift
if [ -n "${1}" ]; then
set -- "${DAEMON}" "$@"
else
set -- "${DAEMON}" "${DAEMON_ARGS[@]}"
fi
fi
if [ "$1" = "${DAEMON}" ]; then
if [ ! -f /etc/ssl/certs/ssl-cert-snakeoil.pem -o ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
dpkg-reconfigure ssl-cert
fi
fi
exec "$@"
|
spatialy/docker-images
|
nginx/docker-entrypoint.sh
|
Shell
|
gpl-3.0
| 552 |
#!/bin/bash
#
# install.sh -- install script for the binary distribution
#
# Copyright 2013,2014 James Fidell ([email protected])
#
# License:
#
# This file is part of the Open Astro Project.
#
# The Open Astro Project is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The Open Astro Project is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Open Astro Project. If not, see
# <http://www.gnu.org/licenses/>.
#
uid=`id -u`
if [ $uid -ne 0 ]; then
echo "This script must be run as root"
exit 1
fi
mkdir -p /usr/local/openastro/bin
mkdir -p /usr/local/openastro/lib
rsync -a lib/ /usr/local/openastro/lib
ln -s /usr/local/openastro/lib/*.so /usr/local/lib
cp bin/oacapture /usr/local/openastro/bin
chmod 755 /usr/local/openastro/bin/oacapture
mkdir -p /usr/local/bin
ln -sf /usr/local/openastro/bin/oacapture /usr/local/bin
for rules in udev/*.rules
do
r=`basename $rules`
target=/etc/udev/rules.d/$r
replace=1
if [ -f $target ]
then
cmp -s $rules $target
if [ $? -eq 0 ]
then
# file exists and is the same
replace=0
else
# file exists and is different
n=1
while [ -f $target.$n ]
do
n=$(( $n + 1 ))
done
cp $target $target.$n
fi
fi
if [ $replace -ne 0 ]
then
cp $rules $target
fi
done
echo "install complete"
echo "For QHY cameras, remember to add yourself to the 'users' group and log in again"
|
openastroproject/openastro
|
bin/install-binaries.sh
|
Shell
|
gpl-3.0
| 1,863 |
#!/bin/sh
# build-swig-wrapper-classes.sh
#
# For each scripting language liblldb supports, we need to create the
# appropriate Script Bridge wrapper classes for that language so that
# users can call Script Bridge functions from within the script interpreter.
#
# We use SWIG to help create the appropriate wrapper classes/functions for
# the scripting language. In some cases the file generated by SWIG may
# need some tweaking before it is completely ready to use.
# Below are the arguments/parameters that this script takes (and passes along
# to all the language-specific build scripts that it calls):
#
# SRC_ROOT is the root of the lldb source tree.
# TARGET_DIR is where the lldb framework/shared library gets put.
# CONFIG_BUILD_DIR is where the build-swig-Python-LLDB.sh shell script
# put the lldb.py file it was generated from running SWIG.
# PREFIX is where non-Darwin systems want to put the .py and .so
# files so that Python can find them automatically.
# debug_flag (optional) determines whether or not this script outputs
# additional information when running.
SRC_ROOT=$1
TARGET_DIR=$2
CONFIG_BUILD_DIR=$3
PREFIX=$4
shift 4
#
# Check to see if we are in debug-mode or not.
#
if [ -n "$1" -a "$1" = "-debug" ]
then
debug_flag="$1"
Debug=1
shift
else
debug_flag=""
Debug=0
fi
#
# Check to see if we were called from the Makefile system. If we were, check
# if the caller wants swig to generate a dependency file.
#
if [ -n "$1" -a "$1" = "-m" ]
then
makefile_flag="$1"
shift
if [ -n "$1" -a "$1" = "-M" ]
then
dependency_flag="$1"
shift
else
dependency_flag=""
fi
else
makefile_flag=""
dependency_flag=""
fi
#
# Verify that 'lldb.swig' exists.
#
if [ ! -f ${SRC_ROOT}/scripts/lldb.swig ]
then
echo Error: unable to find file 'lldb.swig' >&2
exit 1
fi
if [ $Debug -eq 1 ]
then
echo "Found lldb.swig file"
fi
#
# Next look for swig
#
SWIG=`which swig`
if [ ! -x "$SWIG" -a -f /usr/bin/swig ]
then
SWIG=/usr/bin/swig
else
if [ -f /usr/local/bin/swig ]
then
SWIG=/usr/local/bin/swig
fi
fi
if [ ${SWIG}a = a ]
then
echo Error: could not find the swig binary
exit 1
fi
#
# For each scripting language, make sure the build script for that language
# exists, and if so, call it.
#
# For now the only language we support is Python, but we expect this to
# change.
languages="Python"
cwd=${SRC_ROOT}/scripts
for curlang in $languages
do
if [ $Debug -eq 1 ]
then
echo "Current language is $curlang"
fi
if [ ! -d "$cwd/$curlang" ]
then
echo "Error: unable to find $curlang script sub-dirctory" >&2
continue
else
if [ $Debug -eq 1 ]
then
echo "Found $curlang sub-directory"
fi
cd $cwd/$curlang
filename="./build-swig-${curlang}.sh"
if [ ! -f $filename ]
then
echo "Error: unable to find swig build script for $curlang: $filename" >&2
continue
else
if [ $Debug -eq 1 ]
then
echo "Found $curlang build script."
echo "Executing $curlang build script..."
fi
./build-swig-${curlang}.sh "$SRC_ROOT" "$TARGET_DIR" "$CONFIG_BUILD_DIR" "${PREFIX}" "${debug_flag}" "${SWIG}" "${makefile_flag}" "${dependency_flag}" || exit $?
fi
fi
done
|
s20121035/rk3288_android5.1_repo
|
external/lldb/scripts/build-swig-wrapper-classes.sh
|
Shell
|
gpl-3.0
| 3,454 |
#!/bin/bash
# sample script for testing the basic features of nwpconf package
# setup common to user scripts
# basic variables
export NWPCONFDIR=$PWD/conf
export NWPCONFBINDIR=$PWD/../bin
export NWPCONF=production/dailymodelrun/forecast
# source the main library module
set -e
. $NWPCONFBINDIR/nwpconf.sh
# end of setup
# generate file from template
conf_template modelrun.conf
|
ARPA-SIMC/nwpconf
|
examples/testenv.sh
|
Shell
|
gpl-3.0
| 380 |
util_dir="$(dirname $(readlink -f $BASH_SOURCE))"
hookit_dir="$(readlink -f ${util_dir}/../../src)"
payloads_dir=$(readlink -f ${util_dir}/../payloads)
payload() {
cat ${payloads_dir}/${1}.json
}
run_hook() {
container=$1
hook=$2
payload=$3
docker exec \
$container \
/opt/nanobox/hooks/$hook "$payload"
}
start_container() {
name=$1
docker run \
--name=$name \
-d \
-e "PATH=$(path)" \
--privileged \
nanobox/redis:$VERSION
}
stop_container() {
docker stop $1
docker rm $1
}
path() {
paths=(
"/opt/gonano/sbin"
"/opt/gonano/bin"
"/opt/gonano/bin"
"/usr/local/sbin"
"/usr/local/bin"
"/usr/sbin"
"/usr/bin"
"/sbin"
"/bin"
)
path=""
for dir in ${paths[@]}; do
if [[ "$path" != "" ]]; then
path="${path}:"
fi
path="${path}${dir}"
done
echo $path
}
|
pagodabox/nanobox-docker-redis
|
test/util/docker.sh
|
Shell
|
mpl-2.0
| 869 |
#!/bin/bash
CHANGED_FILES=$(git diff-tree --no-commit-id --name-only -r HEAD)
CHANGES_IN_STORYBOOK="client/.storybook/"
CHANGES_IN_APP_COMPONENTS="client/app/components/"
CHANGES_IN_APP_STORIES="client/app/stories/"
CHANGES_IN_APP_STYLES="client/app/styles/"
CHANGES_IN_PACKAGE_JSON="client/package.json"
CHANGES_IN_RAILS_ASSETS="app/assets/"
CHANGES_IN_RAILS_ASSETS="config/locales/"
if [[ "$CHANGED_FILES" =~ "$CHANGES_IN_STORYBOOK" || "$CHANGED_FILES" =~ "$CHANGES_IN_APP_COMPONENTS" || "$CHANGED_FILES" =~ "$CHANGES_IN_APP_STORIES" || "$CHANGED_FILES" =~ "$CHANGES_IN_APP_STYLES" || "$CHANGED_FILES" =~ "$CHANGES_IN_PACKAGE_JSON" || "$CHANGED_FILES" =~ "$CHANGES_IN_RAILS_ASSETS" || "$CHANGED_FILES" =~ "$CHANGES_IN_RAILS_LOCALES" ]]; then
if [[ "$PWD" != */client ]]; then
cd client || exit
fi
echo "Deploy changes to design.if-me.org"
yarn build:storybook
mv .out .public
yarn run surge --project .public --domain design.if-me.org
else
echo "No changes to deploy to design.if-me.org"
exit
fi
|
julianguyen/ifme
|
client/.storybook/deploy.sh
|
Shell
|
agpl-3.0
| 1,021 |
#!/usr/bin/env bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
set -e
set -x
N_JOBS=$(grep -c ^processor /proc/cpuinfo)
echo ""
echo "Bazel will use ${N_JOBS} concurrent job(s)."
echo ""
# Run configure.
export TF_NEED_GCP=0
export TF_NEED_HDFS=0
export TF_NEED_CUDA=0
export PYTHON_BIN_PATH=`which python3`
yes "" | ./configure
# Run bazel test command. Double test timeouts to avoid flakes.
bazel test --test_tag_filters=-gpu,-benchmark-test --test_lang_filters=py -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only \
--test_output=errors -- \
//tensorflow/contrib/...
|
taknevski/tensorflow-xsmm
|
tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
|
Shell
|
apache-2.0
| 1,259 |
#!/bin/bash
vips_site=https://github.com/jcupitt/libvips/releases/download
version=$VIPS_VERSION_MAJOR.$VIPS_VERSION_MINOR.$VIPS_VERSION_MICRO
set -e
# do we already have the correct vips built? early exit if yes
# we could check the configure params as well I guess
if [ -d "$HOME/vips/bin" ]; then
installed_version=$($HOME/vips/bin/vips --version)
escaped_version="$VIPS_VERSION_MAJOR\.$VIPS_VERSION_MINOR\.$VIPS_VERSION_MICRO"
echo "Need vips-$version"
echo "Found $installed_version"
if [[ "$installed_version" =~ ^vips-$escaped_version ]]; then
echo "Using cached directory"
exit 0
fi
fi
rm -rf $HOME/vips
wget $vips_site/v$version/vips-$version.tar.gz
tar xf vips-$version.tar.gz
cd vips-$version
CXXFLAGS=-D_GLIBCXX_USE_CXX11_ABI=0 ./configure --prefix=$HOME/vips $*
make && make install
|
drthomas21/WordPress_Tutorial
|
wordpress_htdocs/services/image-service/vendor/jcupitt/vips/install-vips.sh
|
Shell
|
apache-2.0
| 811 |
#!/bin/bash
set -euo pipefail
# something something folders
mkdir -p /var/lib/mysql
mkdir -p /var/lib/nginx
mkdir -p /var/log
mkdir -p /var/log/mysql
mkdir -p /var/log/nginx
# Wipe /var/run, since pidfiles and socket files from previous launches should go away
# TODO someday: I'd prefer a tmpfs for these.
rm -rf /var/run
mkdir -p /var/run
rm -rf /var/tmp
mkdir -p /var/tmp
mkdir -p /var/run/mysqld
UWSGI_SOCKET_FILE=/var/run/uwsgi.sock
# Ensure mysql tables created
HOME=/etc/mysql /usr/bin/mysql_install_db --force
# Spawn mysqld
HOME=/etc/mysql /usr/sbin/mysqld &
MYSQL_SOCKET_FILE=/var/run/mysqld/mysqld.sock
# Wait for mysql to bind its socket
while [ ! -e $MYSQL_SOCKET_FILE ] ; do
echo "waiting for mysql to be available at $MYSQL_SOCKET_FILE"
sleep .2
done
# Spawn uwsgi
HOME=/var uwsgi \
--socket $UWSGI_SOCKET_FILE \
--plugin python \
--virtualenv /opt/app/env \
--wsgi-file /opt/app/main.py &
# Wait for uwsgi to bind its socket
while [ ! -e $UWSGI_SOCKET_FILE ] ; do
echo "waiting for uwsgi to be available at $UWSGI_SOCKET_FILE"
sleep .2
done
# Start nginx.
/usr/sbin/nginx -c /opt/app/.sandstorm/service-config/nginx.conf -g "daemon off;"
|
ndarilek/vagrant-spk
|
stacks/uwsgi/launcher.sh
|
Shell
|
apache-2.0
| 1,210 |
mpicc -o application-MPI main.c
mpirun -hostfile my_hostfile -np 8 application-MPI
|
javierip/parallel-code-examples
|
05-clusters/01-mpi/05-hello-MPI-cluster/run.sh
|
Shell
|
apache-2.0
| 85 |
#!/bin/bash
set -e
usage() {
cat << EOF
Usage: ./run-job.sh -m MainClass [-c Twitter consumer key] [-k Twitter secret key] [-t Twitter access token] [-s Twitter acess token secret] [-d duration(s)] [-v connector Scala version]
-m Main class of the job
-c Twitter consumer key
-k Twitter secret key
-t Twitter access token
-s Twitter acess token secret
-d Maximum duration in seconds to run the job (default: -1, run forever)
-v Connector Scala version (2.10 or 2.11, default 2.10)
-h help
EOF
}
while getopts ":k:v:s:d:t:c:m:h" o; do
case "${o}" in
h) usage; exit 0;;
k)
k=${OPTARG}
;;
v)
v=${OPTARG}
;;
s)
s=${OPTARG}
;;
d)
d=${OPTARG}
;;
t)
t=${OPTARG}
;;
c)
c=${OPTARG}
;;
m)
m=${OPTARG}
;;
*)
usage; exit 0;;
esac
done
shift $((OPTIND-1))
if [ ! "$m" ]
then
echo "ERROR: Main class not specified"
usage
exit 1
fi
CLASSNAME=${m}
DURATION=${d:--1}
SCALA_VERSION=${v:-2.10}
echo "Obtaining Spark master"
SPARK_MASTER_NAME="sparkMaster"
INFINISPAN_NAME="ispn-1"
STATE=$(docker inspect --format="{{ .State.Running }}" $SPARK_MASTER_NAME || exit 1;)
if [ "$STATE" == "false" ]
then
echo "Docker containers not started, exiting..."
exit 1
fi
SPARK_MASTER="$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $SPARK_MASTER_NAME)"
INFINISPAN_MASTER="$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $INFINISPAN_NAME)"
echo "Submitting the job"
docker exec -it $SPARK_MASTER_NAME /usr/local/spark/bin/spark-submit --driver-java-options "-Dtwitter4j.oauth.consumerKey=$c -Dtwitter4j.oauth.consumerSecret=$k -Dtwitter4j.oauth.accessToken=$t -Dtwitter4j.oauth.accessTokenSecret=$s" --master spark://$SPARK_MASTER:7077 --class $CLASSNAME /usr/local/code/scala-$SCALA_VERSION/infinispan-spark-twitter.jar ${INFINISPAN_MASTER} ${DURATION}
|
gustavonalle/infinispan-spark
|
examples/twitter/run-job.sh
|
Shell
|
apache-2.0
| 2,060 |
#!/usr/bin/env bash
usage()
{
echo "Runs our integration suite on Linux"
echo "usage: cibuild.sh [options]"
echo ""
echo "Options"
echo " --debug Build Debug (default)"
echo " --release Build Release"
echo " --nocache Force download of toolsets"
}
BUILD_CONFIGURATION=Debug
USE_CACHE=true
# LTTNG is the logging infrastructure used by coreclr. Need this variable set
# so it doesn't output warnings to the console.
export LTTNG_HOME=$HOME
export MONO_THREADS_PER_CPU=50
# There are some stability issues that are causing Jenkins builds to fail at an
# unacceptable rate. To temporarily work around that we are going to retry the
# unstable tasks a number of times.
RETRY_COUNT=5
while [[ $# > 0 ]]
do
opt="$1"
case $opt in
-h|--help)
usage
exit 1
;;
--debug)
BUILD_CONFIGURATION=Debug
shift 1
;;
--release)
BUILD_CONFIGURATION=Release
shift 1
;;
--nocache)
USE_CACHE=false
shift 1
;;
*)
usage
exit 1
;;
esac
done
run_make()
{
local is_good=false
MAKE="make"
if [[ $OSTYPE == *bsd* ]]; then
MAKE="gmake"
fi
for i in `seq 1 $RETRY_COUNT`
do
$MAKE "$@" BUILD_CONFIGURATION=$BUILD_CONFIGURATION
if [ $? -eq 0 ]; then
is_good=true
break
fi
echo Build retry $i
done
if [ "$is_good" != "true" ]; then
echo Build failed
exit 1
fi
}
if [ "$CLEAN_RUN" == "true" ]; then
echo Clean out the enlistment
git clean -dxf .
fi
if [ "$USE_CACHE" == "false" ]; then
echo Clean out the toolsets
make clean_toolset
fi
echo Building Bootstrap
run_make bootstrap
echo Building CrossPlatform.sln
run_make all BOOTSTRAP=true BUILD_LOG_PATH=Binaries/Build.log
make test
|
antonssonj/roslyn
|
cibuild.sh
|
Shell
|
apache-2.0
| 1,914 |
cat Resizable.js MainMenu.js LevelSelect.js GameControl.js Main.js > game.js
|
kaydensigh/bib
|
ivank_0_8/game/join.sh
|
Shell
|
apache-2.0
| 77 |
TENANT=$1
HOSTNAME="dba-postgres-prod-32.ist.berkeley.edu port=5307 sslmode=prefer"
#HOSTNAME="dba-postgres-dev-32.ist.berkeley.edu port=5107 sslmode=prefer"
USERNAME="nuxeo_${TENANT}"
DATABASE="${TENANT}_domain_${TENANT}"
CONNECTSTRING="host=$HOSTNAME dbname=$DATABASE password=xxxx"
time psql -U $USERNAME -d "$CONNECTSTRING" -c "select utils.refreshculturehierarchytable();"
time psql -U $USERNAME -d "$CONNECTSTRING" -c "select utils.refreshmaterialhierarchytable();"
time psql -U $USERNAME -d "$CONNECTSTRING" -c "select utils.refreshtaxonhierarchytable();"
time psql -U $USERNAME -d "$CONNECTSTRING" -c "select utils.refreshobjectplacelocationtable();"
|
itsdavidbaxter/Tools
|
scripts/pahma/hierarchies/refresh.sh
|
Shell
|
apache-2.0
| 659 |
#!/bin/sh
# genymotion.sh
# AnimatedTableView
#
# Created by indianic on 20/11/13.
#
rm -r build/android
ti build -p android -b
echo 'Installing Project to Device'
/Volumes/DATA/ANDROID_SN/android-sdk-mac_86/platform-tools/adb install -r build/android/bin/app.apk
exit
|
jayeshIT/AlloyTwitterLogin_old
|
genymotion.sh
|
Shell
|
apache-2.0
| 276 |
#!/bin/bash
# Copyright 2016 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "starting pgpool container...."
sudo docker stop pgpool
sudo docker rm pgpool
sudo docker run \
-p 12003:5432 \
--link master:master \
--link replica:replica \
-e PG_MASTER_SERVICE_NAME=master \
-e PG_SLAVE_SERVICE_NAME=replica \
-e PG_USERNAME=testuser \
-e PG_PASSWORD=password \
-e PG_DATABASE=postgres \
--name=pgpool \
--hostname=pgpool \
-d crunchydata/crunchy-pgpool:$CCP_IMAGE_TAG
|
CrunchyData/crunchy-proxy
|
scripts/docker/run-pgpool.sh
|
Shell
|
apache-2.0
| 1,009 |
#!/bin/bash
. "$( dirname "${BASH_SOURCE[0]}" )/setenv.sh"
if [ -f "$JSVC_PID_FILE" ]; then
echo "Daemon is already running. ($( cat "$JSVC_PID_FILE" ))" >&2
exit 1
fi
echo 'Starting Spring Boot app in Foreground.'
$JSVC_EXECUTABLE -server -cp "$JAVA_CLASSPATH" -user "$JSVC_USER" \
-nodetach -outfile '&1' -errfile '&2' -pidfile $JSVC_PID_FILE \
$JAVA_OPTS $JAVA_MAIN_CLASS $JAVA_ARGUMENTS
|
jokoframework/porandu_backend
|
src/main/dist/bin/run.sh
|
Shell
|
apache-2.0
| 398 |
#!/bin/bash
# setup operations tenant and user
source ~vagrant/admin-openrc.sh labstack
openstack user create --password labstack operator --or-show
operator_user_id=$(openstack user list | awk '/operator/ {print $2}')
admin_role_id=$(openstack role list | awk '/admin/ {print $2}')
admin_project_id=$(openstack project list | awk '/admin/ {print $2}')
demo_project_id=$(openstack project list | awk '/demo/ {print $2}')
openstack role add --user operator --project admin admin
openstack role add --user operator --project demo admin
cp /vagrant/scripts/operations/operator-openrc.sh ~vagrant/operator-openrc.sh
source ~vagrant/operator-openrc.sh labstack
export OS_TENANT_NAME="admin"
openstack project create --description 'Operations tenant.' operations --or-show
operations_project_id=$(openstack project list | awk '/operations/ {print $2}')
openstack role create operator --or-show
operator_role_id=$(openstack role list | awk '/operator/ {print $2}')
admin_role_id=$(openstack role list | awk '/admin/ {print $2}')
member_role_id=$(openstack role list | awk '/Member/ {print $2}')
openstack role add --user operator --project operations operator
openstack role add --user operator --project operations admin
openstack role add --user operator --project operations Member
# set operator user's default tenant to the operations project we just created
openstack user set operator --project operations --email "[email protected]"
# now that we have a 'operations' tenant let's source that
source ~vagrant/operator-openrc.sh labstack
# create a new network in our operations tenant with it's own route to the public network
network_id=$(neutron net-create private | awk '/ id / {print $4}')
echo "network_id = $network_id"
subnet_id=$(neutron subnet-create --name private-subnet $network_id 10.0.4.0/24 \
--gateway 10.0.4.1 \
--host-route destination=0.0.0.0/0,nexthop=192.168.27.100 \
--dns_nameservers list=true 8.8.8.8 8.8.4.4 \
| awk '/ id / {print $4}')
echo "subnet_id = $subnet_id"
#neutron subnet-update $subnet_id --dns_nameservers list=true 8.8.8.8 8.8.4.4
#neutron router-create router2
neutron router-interface-add demorouter $subnet_id
#neutron router-gateway-set router2 public
# setup vagrant user with ssh keys
mkdir -p ~vagrant/.ssh
ssh-keygen -t rsa -N "" -f ~vagrant/.ssh/id_rsa -C "[email protected]"
# create new keypair on openstack for the 'operator' user using vagrants ssh pub/priv keys
nova keypair-add --pub-key ~vagrant/.ssh/id_rsa.pub --key-type ssh operator
public_key=`cat ~vagrant/.ssh/id_rsa.pub`
private_key=`cat ~vagrant/.ssh/id_rsa`
# setup root and vagrant user's for no-password login via ssh keys
echo | sudo /bin/sh <<EOF
mkdir -p /root/.ssh
echo '#{private_key}' > /root/.ssh/id_rsa
chmod 600 /root/.ssh/id_rsa
echo '#{ops_private_key}' > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
echo '#{ops_private_key}' > /root/.ssh/id_rsa.pub
chmod 644 /root/.ssh/id_rsa.pub
EOF
# turn off strict host key checking for the vagrant user
echo 'Host *' > ~vagrant/.ssh/config
echo StrictHostKeyChecking no >> ~vagrant/.ssh/config
chown -R vagrant: ~vagrant/.ssh
echo 'Host *' > /root/.ssh/config
echo StrictHostKeyChecking no >> /root/.ssh/config
chown -R root: /root/.ssh
cat > ~vagrant/.operations <<EOF
#!/bin/bash
export ops_user_id=$operator_user_id
export ops_ssh_keypair=operator
export ops_project_id=$operations_project_id
export ops_network_id=$network_id
export ops_subnet_id=$subnet_id
EOF
|
tpouyer/stackinabox
|
scripts/operations/init.sh
|
Shell
|
apache-2.0
| 3,493 |
#!/bin/bash
MAJOR_VERSION=0.20
SRC_PKG=hadoop-$MAJOR_VERSION-mapreduce
namenode_user=hdfs
secondarynamenode_user=hdfs
datanode_user=hdfs
jobtracker_user=mapred
tasktracker_user=mapred
for node in namenode secondarynamenode jobtracker tasktracker datanode ; do
service_pkgdir=debian/$SRC_PKG-$node
debdir=$service_pkgdir/DEBIAN
template="debian/service-init.d.tpl"
user=$(eval "echo \$${node}_user")
mkdir -p $service_pkgdir/etc/init.d/ $debdir
sed -e "s|@HADOOP_DAEMON@|$node|" -e "s|@HADOOP_MAJOR_VERSION@|$MAJOR_VERSION|" \
-e "s|@DAEMON_USER@|$user|" \
$template > $service_pkgdir/etc/init.d/$SRC_PKG-$node
sed -e "s|@HADOOP_DAEMON@|$node|" -e "s|@HADOOP_MAJOR_VERSION@|$MAJOR_VERSION|" \
-e "s|@DAEMON_USER@|$user|" \
debian/service-postinst.tpl > $debdir/postinst
sed -e "s|@HADOOP_DAEMON@|$node|" -e "s|@HADOOP_MAJOR_VERSION@|$MAJOR_VERSION|" \
-e "s|@DAEMON_USER@|$user|" \
debian/service-postrm.tpl > $debdir/postrm
chmod 755 $service_pkgdir/etc/init.d/* $debdir/postinst $debdir/postrm
# We aren't making packages for debian itself, so override ITP lintian warnings
mkdir -p $service_pkgdir/usr/share/lintian/overrides
echo "$SRC_PKG-$node: new-package-should-close-itp-bug" > $service_pkgdir/usr/share/lintian/overrides/$SRC_PKG-$node
done
|
cloudera/cdh-package
|
bigtop-packages/src/deb/mr1/install_init_scripts.sh
|
Shell
|
apache-2.0
| 1,331 |
#!/bin/sh
set -eux
if ! command -v brew >/dev/null; then
echo "Homebrew missing, attempting to install"
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" < /dev/null
fi
brew update
brew install \
libarchive \
libsodium \
node \
openssl \
pkg-config \
zeromq
|
georgemarshall/habitat
|
support/mac/install_dev_0_mac_latest.sh
|
Shell
|
apache-2.0
| 326 |
java -Djava.library.path=lib/sqlite4java-282 -cp conf:$1 edu.washington.escience.myria.parallel.Worker --workingDir $2
|
jamesmarva/myria
|
startWorker.sh
|
Shell
|
bsd-3-clause
| 119 |
# Create the docs and push them to github pages
# ---------------------------------------------
conda install --yes sphinx numpydoc openmm networkx matplotlib ipython-notebook
python setup.py develop
cd docs/tutorials
ipython nbconvert --to html *.ipynb
cd ..
make html
source update_gh_pages.sh
|
iModels/simgen
|
simgen/devtools/travis-ci/create_docs.sh
|
Shell
|
mit
| 299 |
#!/bin/sh
set -e
# Perform all actions as user 'postgres'
export PGUSER=postgres
# Add pgRouting Functions to the database
psql --dbname="$POSTGRES_DB" <<EOSQL
CREATE EXTENSION postgis;
CREATE EXTENSION pgrouting;
EOSQL
|
Starefossen/docker-pgrouting
|
9.6-2.3-2.3/initdb-pgrouting.sh
|
Shell
|
mit
| 222 |
#!/bin/sh
# script_test_15a.sh -- test for .bss placement.
# Copyright (C) 2016-2020 Free Software Foundation, Inc.
# Written by Cary Coutant <[email protected]>.
# This file is part of gold.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
# MA 02110-1301, USA.
# Check that the .bss section is not allocated in the file image.
check()
{
if ! grep -q "$2" "$1"
then
echo "Did not find expected section in $1:"
echo " $2"
echo ""
echo "Actual output below:"
cat "$1"
exit 1
fi
}
check script_test_15a.stdout "LOAD.*0x0\+0... 0x0\+1... RW"
check script_test_15a.stdout "01.*\.data .*\.data.extra .*\.bss"
|
mattstock/binutils-bexkat1
|
gold/testsuite/script_test_15a.sh
|
Shell
|
gpl-2.0
| 1,267 |
#!/bin/sh
#set -x
verify_not_running() {
while read j
do
case $j in "Name="*|"Exec="*)
name="${j#Name=}"
name="${name#Exec=}"
name="${name#sh -c }"
name="${name//\'/}"
name="${name//\"/}"
name="${name%% *}"
case $name in echo|sleep) #add more
return 0
break
esac
if pidof "$name" >/dev/null 2>&1 ; then
return 1
break
fi
;;
esac
done < "$1"
return 0
}
run_desktop() {
while read j
do
case $j in "Exec="*)
sh -c "${j#Exec=}" &
break
;;
esac
done < "$1"
}
#=================================================
for i in /etc/xdg/autostart/*.desktop
do
if ! [ -f $i ] ; then
continue
fi
if ! verify_not_running $i ; then
continue
fi
run_desktop $i
done
#=================================================
for i in $HOME/.config/autostart/*.desktop
do
if ! [ -f $i ] ; then
continue
fi
if [ -f /etc/xdg/autostart/${i} ] ; then
continue
fi
if ! verify_not_running $i ; then
continue
fi
run_desktop $i
done
### END ###
|
rserwin1/woof-CE
|
woof-code/rootfs-skeleton/bin/xdg_autostart.sh
|
Shell
|
gpl-2.0
| 1,010 |
#! /usr/bin/env bash
# _________________________________________________________________________
# /\ \
# \_| ___ __ _ _ |
# | |_ _|_ __ / _| ___ _ __ _ __ ___ __ _| |_(_) ___ _ __ |
# | | || '_ \| |_ / _ \| '__| '_ ` _ \ / _` | __| |/ _ \| '_ \ |
# | | || | | | _| (_) | | | | | | | | (_| | |_| | (_) | | | | |
# | |___|_| |_|_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_| |
# | |
# | imgs (Images): Images Wizard, fetch and manipulate images |
# | Copyright (C) 2013 - 2014 Juan Manuel Borges Caño |
# | The thought of creating something usable from google images |
# | encouraged a research that found |
# | http://sam.nipl.net/code/nipl-tools/bin/google-images, |
# | tweaked to be smart, elegant, efficient, fast and durable. |
# | Combined with ImageMagick powerful manipulation tools. |
# | _ _ |
# | | | (_) ___ ___ _ __ ___ ___ |
# | | | | |/ __/ _ \ '_ \/ __|/ _ \ |
# | | |___| | (_| __/ | | \__ \ __/ |
# | |_____|_|\___\___|_| |_|___/\___| |
# | |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# | ____________________________________________________________________|_
# \_/______________________________________________________________________/
shopt -s extglob
# The modes
imgs_modes=(fetch unify slideshow fortune custom)
# The --list option
function imgs_option_list()
{
for imgs_mode in "${imgs_modes[@]}"
do
printf "%s\n" "$imgs_mode"
done
exit 0
}
# The fetch mode
function imgs_fetch
{
# # Tweaked http://sam.nipl.net/code/nipl-tools/bin/google-images to be silent, and elegant
# [ $# = 0 ] && cmd_error "Usage: query count parallel safe opts timeout tries agent1 agent2"
#
# query=$1 count=${2:-100} parallel=${3:-10} safe=$4 opts=$5 timeout=${6:-10} tries=${7:-2} agent1=${8:-Mozilla/5.0 (X11; Linux i686; rv:25.0) Gecko/20100101 Firefox/25.0} agent2=${9:-Googlebot-Image/1.0}
#
# query_esc=`perl -e 'use URI::Escape; print uri_escape($ARGV[0]);' "$query"`
# dir=`echo "$query_esc" | sed 's/%20/-/g'`-`date +%s`; mkdir "$dir" || exit 2; cd "$dir"
# url="http://www.google.com/search?tbm=isch&safe=$safe&tbs=$opts&q=$query_esc" procs=0
# echo >.URL "$url" ; for A; do echo >>.args "$A"; done
#
# htmlsplit() { tr '\n\r \t' ' ' | sed 's/</\n</g; s/>/>\n/g; s/\n *\n/\n/g; s/^ *\n//; s/ $//;'; }
#
# for start in `seq 0 100 $[$count-1]`; do wget --quiet -U"$agent1" -T"$timeout" --tries="$tries" -O- "$url&start=$start" | htmlsplit; done | perl -ne 'use HTML::Entities; /^<a .*?href="(.*?)"/ and print decode_entities($1), "\n";' | grep '/imgres?' | perl -ne 'use URI::Escape; ($ref, $img) = map { uri_unescape($_) } /imgrefurl=(.*?)&imgurl=(.*?)&/; $ext = $img; for ($ext) { s,.*[/.],,; s/[^a-z0-9].*//i; $_ ||= "img"; } $save = sprintf("%04d.$ext", ++$i); print join("\t", $save, $img, $ref), "\n";' | tee -a .images.tsv | while IFS=$'\t' read -r save img ref; do wget --quiet -U"$agent2" -T"$timeout" --tries="$tries" --referer="$ref" -O "$save" "$img" || rm "$save" & procs=$[$procs + 1]; [ $procs = $parallel ] && { wait; procs=0; }; done
#
# wait
#
# echo "$dir"
# exit 0
# Thanks To: https://developers.google.com/image-search/v1/jsondevguide :-)
# 3 Seconds Between Resuts, 1 Second Between Fetch
# Don't Fetch Useless Stuff, Be Clever
# Don't Abuse Services, Be Nice
# Development Getting Out Of Google Services
[ $# = 0 ] && cmd_error "Usage: query count safe rights size chromacity filetype"
query="$1"
count="${2:-50}"
safe="$3" # "moderate"
results="8"
rights="$4" # "(cc_publicdomain|cc_attribute|cc_sharealike).-(cc_noncommercial|cc_nonderived)"
size="$5" #icon small|medium|large|xlarge
chromacity="$6" # "color"
filetype="$7" # "png" "jpg"
# Keep The sam.nipl.net Dir
query_esc=`perl -e 'use URI::Escape; print uri_escape($ARGV[0]);' "$query"`
dir="$(echo "$query_esc" | sed 's/%20/-/g')"-"$(date +%s)"
mkdir "$dir" || exit 2
cd "$dir"
# Don't Fetch Useless Stuff, Be Clever
for ((start=1; start<=count; start+=results))
do
search="$(
printf "$query\n" | curl \
-s --connect-timeout 300 --max-time 300 \
'https://ajax.googleapis.com/ajax/services/search/images' \
--get --data-urlencode "v=1.0" \
--data-urlencode "safe=$safe" \
--data-urlencode "rsz=$results" \
--data-urlencode "start=$start" \
--data-urlencode "as_rights=$rights" \
--data-urlencode "imgsz=$size" \
--data-urlencode "imgc=$chromacity" \
--data-urlencode "as_filetype=$filetype" \
--data-urlencode "q@-" \
-A "Mozilla/5.0"
)"
# Cache/Reference/Credit The Search
printf "%s\n" "$search" | python -mjson.tool > ".search-$start-$((start+results-1))"
urls="$(
printf "%s\n" "$search" | python -c \
'
import json, sys
obj = json.load(sys.stdin)
urls = [result["url"] for result in obj["responseData"]["results"]]
for url in urls: print url
'
)"
# Index/Reference/Credit The URLs
printf "%s\n" "$urls" >> ".urls"
# Don't Abuse Services, Be Nice
sleep 3
done
wget \
--quiet -T300 --tries 3 \
-i .urls \
--wait 1 \
-U "Mozilla/5.0"
echo "$dir"
exit 0
}
# The unify mode
function imgs_unify
{
# Relies in ImageMagick's *convert*
(( $# == 0 )) && cmd_error "Usage: WidthxHeight Images ..."
images=(); for file in "${@:2:$#}"; do [[ "$(file -bi "$file" | cut -d'/' -f1)" == "image" ]] && images=( "${images[@]}" "$file" ); done
size="$1"
unification=$(mktemp -d "Unification-$size-XXXXXXXXXX")
for i in ${!images[@]}; do convert -quiet -scale "$size"\! "${images[i]}" "$unification/$(printf "%.10i" "$i").png"; done
echo "$unification"
exit 0
}
# The slideshow mode
function imgs_slideshow
{
(( $# == 0 )) && cmd_error "Usage: WidthxHeight Images ... SlideshowProduct.gif"
size="$1"
slideshow="${@:$#}"
unification="$(imgs_unify "${@:1:$#-1}")"
echo "$unification"
convert -quiet -alpha off -scale "$size"\! -delay 50 -loop 0 "$unification"/* "$slideshow"
echo "$slideshow"
exit 0
}
# The fortune mode
function imgs_fortune
{
size="${1:-320x240}"
word="${2:-$(shuf /usr/share/dict/words | head -1)}"
echo "$word"
source=$(imgs_fetch "$word" 25)
echo "$source"
imgs_slideshow "$size" "$source"/* "$word.gif"
exit 0
}
# The custom mode
function imgs_custom
{
word="${1:-$(shuf /usr/share/dict/words | head -1)}"
size="${2:-320x240}"
count="${3:-50}"
echo "$word"
source=$(imgs_fetch "$word" "$count")
echo "$source"
imgs_slideshow "$size" "$source"/* "$word.gif"
exit 0
}
# ... and imgs, the program itself
# The cmd init function
function imgs_init
{
#shopt -s extglob
#shopt -s nullglob
imgs_mode="custom"
}
# The cmd main function
function imgs_main
{
imgs_modesel="${1:-custom}"
for imgs_mode in "${imgs_modes[@]}"
do
if [[ "$imgs_mode" = "$imgs_modesel" ]]
then
shift
"imgs_$imgs_mode" "$@"
fi
done
cmd_error "unknown mode"
}
# The cmd fields
cmd_package="[@]pkg[@]"
cmd="imgs"
cmd_name="images"
cmd_version="[@]pkgversion[@]"
cmd_description="Images Wizard"
cmd_explanation="images is a program that fetches and/or manipulates groups of images. imgs fetches google images, unify groups to sizes and builds slideshows."
cmd_license="[@]pkglicense[@]"
cmd_homepage="[@]pkghomepage[@]"
cmd_author="[@]pkgauthor[@]"
cmd_email="[@]pkgemail[@]"
cmd_social="[@]pkgsocial[@]"
cmd_blog="[@]pkgblog[@]"
cmd_usage="$cmd [OPTIONS] [MODE] [-- MODEOPTIONS]"
cmd_options=("/l/list/list modes/imgs_option_list/")
cmd_examples=("$cmd custom -- linux 50 320x240")
cmd_extrahelp="By default works in custom mode. Respect the terms of use of online services."
cmd_extranotes="Don't Fetch Useless Stuff, Be Clever. Don't Abuse Services, It's not funny, Be Nice. Development Getting Out Of Google Services. For more information, check documentation."
cmd_init="imgs_init"
cmd_main="imgs_main"
cmd_datadir="[@]pkgdatadir[@]"
# The cmd environment
source "$cmd_datadir/cmd.sh"
|
sholaoyedeji/codemiscs
|
cmds/imgs/imgs.in.sh
|
Shell
|
gpl-3.0
| 9,344 |
#!/bin/sh
# stop on failure
set -e
BACKEND="${1}"
if [ -z "${WEBOOB_WORKDIR}" ]; then
# use the old workdir by default
WEBOOB_WORKDIR="${HOME}/.weboob"
# but if we can find a valid xdg workdir, switch to it
[ "${XDG_CONFIG_HOME}" != "" ] || XDG_CONFIG_HOME="${HOME}/.config"
[ -d "${XDG_CONFIG_HOME}/weboob" ] && WEBOOB_WORKDIR="${XDG_CONFIG_HOME}/weboob"
fi
[ -z "${TMPDIR}" ] && TMPDIR="/tmp"
[ -z "${WEBOOB_BACKENDS}" ] && WEBOOB_BACKENDS="${WEBOOB_WORKDIR}/backends"
# allow private environment setup
[ -f "${WEBOOB_WORKDIR}/pre-test.sh" ] && source "${WEBOOB_WORKDIR}/pre-test.sh"
# setup xunit reporting (buildbot slaves only)
if [ -n "${RSYNC_TARGET}" ]; then
# by default, builder name is containing directory name
[ -z "${BUILDER_NAME}" ] && BUILDER_NAME=$(basename $(readlink -e $(dirname $0)/../..))
else
RSYNC_TARGET=""
fi
# find executables
if [ -z "${PYTHON}" ]; then
which python >/dev/null 2>&1 && PYTHON=$(which python)
which python2 >/dev/null 2>&1 && PYTHON=$(which python2)
which python2.7 >/dev/null 2>&1 && PYTHON=$(which python2.7)
fi
if [ -z "${NOSE}" ]; then
which nosetests >/dev/null 2>&1 && NOSE=$(which nosetests)
which nosetests2 >/dev/null 2>&1 && NOSE=$(which nosetests2)
which nosetests-python2.7 >/dev/null 2>&1 && NOSE=$(which nosetests-python2.7)
fi
if [ -z "${PYTHON}" ]; then
echo "Python required"
exit 1
fi
if [ -z "${NOSE}" ]; then
echo "python-nose required"
exit 1
fi
# do not allow undefined variables anymore
set -u
WEBOOB_TMPDIR=$(mktemp -d "${TMPDIR}/weboob_test.XXXXX")
cp "${WEBOOB_BACKENDS}" "${WEBOOB_TMPDIR}/backends"
# xunit nose setup
if [ -n "${RSYNC_TARGET}" ]; then
XUNIT_ARGS="--with-xunit --xunit-file=${WEBOOB_TMPDIR}/xunit.xml"
else
XUNIT_ARGS=""
fi
# path to sources
WEBOOB_DIR=$(cd $(dirname $0)/.. && pwd -P)
${PYTHON} "$(dirname $0)/stale_pyc.py"
echo "file://$WEBOOB_DIR/modules" > "${WEBOOB_TMPDIR}/sources.list"
export WEBOOB_WORKDIR="${WEBOOB_TMPDIR}"
export PYTHONPATH="${WEBOOB_DIR}"
export NOSE_NOPATH="1"
${PYTHON} "${WEBOOB_DIR}/scripts/weboob-config" update
# allow failing commands past this point
set +e
if [ -n "${BACKEND}" ]; then
${PYTHON} ${NOSE} -c /dev/null -sv "${WEBOOB_DIR}/modules/${BACKEND}" ${XUNIT_ARGS}
STATUS_CORE=0
else
echo "=== Weboob ==="
${PYTHON} ${NOSE} -c ${WEBOOB_DIR}/setup.cfg -sv
STATUS_CORE=$?
echo "=== Modules ==="
find "${WEBOOB_DIR}/modules" -name "test.py" | sort | xargs ${PYTHON} ${NOSE} -c /dev/null -sv ${XUNIT_ARGS}
fi
STATUS=$?
# xunit transfer
if [ -n "${RSYNC_TARGET}" ]; then
rsync -iz "${WEBOOB_TMPDIR}/xunit.xml" "${RSYNC_TARGET}/${BUILDER_NAME}-$(date +%s).xml"
rm "${WEBOOB_TMPDIR}/xunit.xml"
fi
# safe removal
rm -r "${WEBOOB_TMPDIR}/icons" "${WEBOOB_TMPDIR}/repositories" "${WEBOOB_TMPDIR}/modules" "${WEBOOB_TMPDIR}/keyrings"
rm "${WEBOOB_TMPDIR}/backends" "${WEBOOB_TMPDIR}/sources.list"
rmdir "${WEBOOB_TMPDIR}"
[ $STATUS_CORE -gt 0 ] && exit $STATUS_CORE
exit $STATUS
|
blckshrk/Weboob
|
tools/run_tests.sh
|
Shell
|
agpl-3.0
| 3,029 |
#!/bin/bash
docker run -i \
--net=son-sp \
--network-alias=son-gtklic \
-e DATABASE_HOST=son-postgres \
-e DATABASE_PORT=5432 \
-e POSTGRES_PASSWORD=sonata \
-e POSTGRES_USER=sonatatest \
-e POSTGRES_DB=gatekeeper \
--rm=true \
-v "$(pwd)/spec/reports/son-gtklic:/code/log" \
registry.sonata-nfv.eu:5000/son-gtklic python tests.py
|
sonata-nfv/son-gkeeper
|
tests/unit/gtklic.sh
|
Shell
|
apache-2.0
| 330 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Just a simple script to generate users on the local archiva instance for interactive UI testing
BASE_URL="http://localhost:8080/archiva"
USER_NAME="admin"
PASSWD="admin456"
USERS=50
#Authenticate
TOKEN=$(curl -s -X POST "${BASE_URL}/api/v2/redback/auth/authenticate" -H "accept: application/json" -H "Content-Type: application/json" \
-d "{\"grant_type\":\"authorization_code\",\"client_id\":\"test-bash\",\"client_secret\":\"string\",\"code\":\"string\",\"scope\":\"string\",\"state\":\"string\",\"user_id\":\"${USER_NAME}\",\
\"password\":\"${PASSWD}\",\"redirect_uri\":\"string\"}"|sed -n -e '/access_token/s/.*"access_token":"\([^"]\+\)".*/\1/gp')
if [ "${TOKEN}" == "" ]; then
echo "Authentication failed!"
exit 1
fi
NUM=$USERS
while [ $NUM -ge 0 ]; do
SUFFIX=$(printf "%03d" $NUM)
echo "User: test${SUFFIX}"
curl -s -w ' - %{http_code}' -X POST "${BASE_URL}/api/v2/redback/users" -H "accept: application/json" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
-d "{\"user_id\":\"test${SUFFIX}\",\"full_name\":\"Test User ${SUFFIX}\",\"email\":\"test${SUFFIX}@test.org\",\"validated\":true,\"locked\":false,\"password_change_required\":false,\"password\":\"test123\"}"
NUM=$((NUM-1))
echo " "
sleep 0.2 # Sleeping to get different creation timestamps
done
|
sadlil/archiva
|
archiva-modules/archiva-web/archiva-webapp/src/test/resources/generate-users.sh
|
Shell
|
apache-2.0
| 2,126 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script contains functions for configuring instances to run kubernetes
# master and nodes. It is uploaded as GCE instance metadata. The upstart jobs
# in cluster/gce/trusty/<node.yaml, master.yaml> download it and make use
# of needed functions. The script itself is not supposed to be executed in
# other manners.
download_kube_env() {
# Fetch kube-env from GCE metadata server.
readonly tmp_install_dir="/var/cache/kubernetes-install"
mkdir -p "${tmp_install_dir}"
curl --fail --retry 5 --retry-delay 3 --silent --show-error \
-H "X-Google-Metadata-Request: True" \
-o "${tmp_install_dir}/kube_env.yaml" \
http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env
# Convert the yaml format file into a shell-style file.
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v))))
''' < "${tmp_install_dir}/kube_env.yaml" > /etc/kube-env)
}
validate_hash() {
file="$1"
expected="$2"
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [ "${actual}" != "${expected}" ]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1: The sha1 of the URL. Can be "" if the sha1 is unknown, which means
# we are downloading a hash file.
# $2: The temp file containing a list of urls to download.
download_or_bust() {
file_hash="$1"
tmpfile_urls="$2"
while true; do
# Read urls from the file one-by-one.
while read -r url; do
if [ ! -n "${file_hash}" ]; then
url="${url/.tar.gz/.tar.gz.sha1}"
fi
file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [ -n "${file_hash}" ] && ! validate_hash "${file}" "${file_hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [ -n "${file_hash}" ]; then
echo "== Downloaded ${url} (SHA1 = ${file_hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done < "${tmpfile_urls}"
done
}
# Downloads kubernetes binaries and kube-system manifest tarball, unpacks them,
# and places them into suitable directories. Files are placed in /home/kubernetes.
install_kube_binary_config() {
# Upstart does not support shell array well. Put urls in a temp file with one
# url at a line, and we will use 'read' command to get them one-by-one.
tmp_binary_urls=$(mktemp /tmp/kube-temp.XXXXXX)
echo "${SERVER_BINARY_TAR_URL}" | tr "," "\n" > "${tmp_binary_urls}"
tmp_manifests_urls=$(mktemp /tmp/kube-temp.XXXXXX)
echo "${KUBE_MANIFESTS_TAR_URL}" | tr "," "\n" > "${tmp_manifests_urls}"
kube_home="/home/kubernetes"
mkdir -p "${kube_home}"
cd "${kube_home}"
read -r server_binary_tar_url < "${tmp_binary_urls}"
readonly server_binary_tar="${server_binary_tar_url##*/}"
if [ -n "${SERVER_BINARY_TAR_HASH:-}" ]; then
readonly server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}"
else
echo "Downloading binary release sha1 (not found in env)"
download_or_bust "" "${tmp_binary_urls}"
readonly server_binary_tar_hash=$(cat "${server_binary_tar}.sha1")
fi
echo "Downloading binary release tar"
download_or_bust "${server_binary_tar_hash}" "${tmp_binary_urls}"
tar xzf "${kube_home}/${server_binary_tar}" -C "${kube_home}" --overwrite
# Copy docker_tag and image files to /home/kubernetes/kube-docker-files.
src_dir="${kube_home}/kubernetes/server/bin"
dst_dir="${kube_home}/kube-docker-files"
mkdir -p "${dst_dir}"
cp "${src_dir}/"*.docker_tag "${dst_dir}"
if [ "${KUBERNETES_MASTER:-}" = "false" ]; then
cp "${src_dir}/kube-proxy.tar" "${dst_dir}"
else
cp "${src_dir}/kube-apiserver.tar" "${dst_dir}"
cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}"
cp "${src_dir}/kube-scheduler.tar" "${dst_dir}"
cp -r "${kube_home}/kubernetes/addons" "${dst_dir}"
fi
# Use the binary from the release tarball if they are not preinstalled, or if this is
# a test cluster.
readonly BIN_PATH="/usr/bin"
if ! which kubelet > /dev/null || ! which kubectl > /dev/null; then
cp "${src_dir}/kubelet" "${BIN_PATH}"
cp "${src_dir}/kubectl" "${BIN_PATH}"
elif [ "${TEST_CLUSTER:-}" = "true" ]; then
kube_bin="${kube_home}/bin"
mkdir -p "${kube_bin}"
cp "${src_dir}/kubelet" "${kube_bin}"
cp "${src_dir}/kubectl" "${kube_bin}"
mount --bind "${kube_bin}/kubelet" "${BIN_PATH}/kubelet"
mount --bind -o remount,ro,^noexec "${BIN_PATH}/kubelet" "${BIN_PATH}/kubelet"
mount --bind "${kube_bin}/kubectl" "${BIN_PATH}/kubectl"
mount --bind -o remount,ro,^noexec "${BIN_PATH}/kubectl" "${BIN_PATH}/kubectl"
fi
# Put kube-system pods manifests in /home/kubernetes/kube-manifests/.
dst_dir="${kube_home}/kube-manifests"
mkdir -p "${dst_dir}"
read -r manifests_tar_url < "${tmp_manifests_urls}"
readonly manifests_tar="${manifests_tar_url##*/}"
if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then
readonly manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}"
else
echo "Downloading k8s manifests sha1 (not found in env)"
download_or_bust "" "${tmp_manifests_urls}"
readonly manifests_tar_hash=$(cat "${manifests_tar}.sha1")
fi
echo "Downloading k8s manifests tar"
download_or_bust "${manifests_tar_hash}" "${tmp_manifests_urls}"
tar xzf "${kube_home}/${manifests_tar}" -C "${dst_dir}" --overwrite
readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
if [ "${kube_addon_registry}" != "gcr.io/google_containers" ]; then
find "${dst_dir}" -name \*.yaml -or -name \*.yaml.in | \
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
find "${dst_dir}" -name \*.manifest -or -name \*.json | \
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
fi
cp "${dst_dir}/kubernetes/trusty/configure-helper.sh" /etc/kube-configure-helper.sh
# Clean up.
rm -rf "${kube_home}/kubernetes"
rm -f "${kube_home}/${server_binary_tar}"
rm -f "${kube_home}/${server_binary_tar}.sha1"
rm -f "${kube_home}/${manifests_tar}"
rm -f "${kube_home}/${manifests_tar}.sha1"
rm -f "${tmp_binary_urls}"
rm -f "${tmp_manifests_urls}"
}
|
ojarjur/kubernetes
|
cluster/gce/trusty/configure.sh
|
Shell
|
apache-2.0
| 7,112 |
#!/bin/bash
FN="HsAgilentDesign026652.db_3.2.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/HsAgilentDesign026652.db_3.2.3.tar.gz"
"https://bioarchive.galaxyproject.org/HsAgilentDesign026652.db_3.2.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hsagilentdesign026652.db/bioconductor-hsagilentdesign026652.db_3.2.3_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hsagilentdesign026652.db/bioconductor-hsagilentdesign026652.db_3.2.3_src_all.tar.gz"
)
MD5="dcd2c748bf9d7c002611cd5cf2ff38c0"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-hsagilentdesign026652.db/post-link.sh
|
Shell
|
mit
| 1,510 |
start_dir=$(pwd)
if [ $# -gt 0 ]; then
cd $1
else
echo "[ERROR] Usage: $0 <Working directory>"
return
fi
wget http://apache.mirrors.pair.com/storm/apache-storm-0.9.4/apache-storm-0.9.4.tar.gz
tar -xzf apache-storm-0.9.4.tar.gz
rm apache-storm-0.9.4.tar.gz
#cd storm-0.9.4
cd $start_dir
|
preems/realtime-event-processing
|
deploy/strategy_1/storm_check_install.sh
|
Shell
|
mit
| 300 |
#!/bin/bash
FN="pd.ht.hg.u133.plus.pm_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/pd.ht.hg.u133.plus.pm_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.ht.hg.u133.plus.pm_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.ht.hg.u133.plus.pm/bioconductor-pd.ht.hg.u133.plus.pm_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.ht.hg.u133.plus.pm/bioconductor-pd.ht.hg.u133.plus.pm_3.12.0_src_all.tar.gz"
)
MD5="5b1c3dd0ab3f8b21154982c197512a7c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-pd.ht.hg.u133.plus.pm/post-link.sh
|
Shell
|
mit
| 1,494 |
#!/bin/sh -e
#
# Copyright (C) 2010, 2012 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: sign.sh,v 1.2 2010/06/21 02:31:46 marka Exp $
SYSTEMTESTTOP=../..
. $SYSTEMTESTTOP/conf.sh
RANDFILE=../random.data1
RANDFILE2=../random.data2
zone=example.
infile=example.db.in
zonefile=example.db
zskname=`$KEYGEN -q -r $RANDFILE -a RSASHA1 -b 768 -n zone $zone`
kskname=`$KEYGEN -q -r $RANDFILE -a RSASHA1 -b 1024 -f KSK -n zone $zone`
cat $infile $zskname.key $kskname.key > $zonefile
$SIGNER -P -e +1000d -r $RANDFILE -o $zone $zonefile > /dev/null
# zsk, no -R
keyname=`$KEYGEN -q -r $RANDFILE2 -a RSASHA1 -b 768 -n zone \
-P +20 -A +1h -I +1d -D +1mo $zone`
echo $keyname > keyname
|
phra/802_21
|
myODTONE/app/dhcp_usr/libs/bind/bind-9.8.4-P1/bin/tests/virtual-time/autosign-zsk/ns1/sign.sh
|
Shell
|
gpl-2.0
| 1,396 |
#!/bin/sh
if test -f "$1" ; then
cat $1 | tr -d '\n'
else
(git describe --tags --always 2>/dev/null || git describe --tags 2>/dev/null) | tr -d '\n'
fi
|
garlik/4store
|
version.sh
|
Shell
|
gpl-3.0
| 155 |
test_number=02
description="middle field"
expected=$test_dir/test_$test_number.expected
input=$test_dir/test_$test_number.in
cat > $input << "END_INPUT"
f0 f1 f2
00 01 02
10 11 12
20 21 22
END_INPUT
cat > $expected << "END_EXPECT"
f0 f2
00 02
10 12
20 22
END_EXPECT
subtest=1
output=$test_dir/test_$test_number.$subtest.out
$bin -f 2 $input > $output
if [ $? -ne 0 ] || [ "`diff -q $expected $output`" ]; then
test_status $test_number $subtest "$description (indexes)" FAIL
has_error=1
else
test_status $test_number $subtest "$description (indexes)" PASS
rm $output
fi
subtest=2
output=$test_dir/test_$test_number.$subtest.out
$bin -F f1 $input > $output
if [ $? -ne 0 ] || [ "`diff -q $expected $output`" ]; then
test_status $test_number $subtest "$description (labels)" FAIL
has_error=1
else
test_status $test_number $subtest "$description (labels)" PASS
rm $output
fi
test $has_error || rm $input $expected
|
johan/crush-tools
|
src/cutfield/test/test_02.sh
|
Shell
|
apache-2.0
| 934 |
test_number=00
description="formula using indexes"
infile="$test_dir/test.txt"
outfile="$test_dir/test_$test_number.actual"
expected="$test_dir/test_$test_number.expected"
$bin -d ';' -e '[3] / [2]' -b '0.0' $infile > "$outfile"
if [ $? -ne 0 ] ||
[ "`diff -q $outfile $expected`" ]; then
test_status $test_number 1 "$description" FAIL
else
test_status $test_number 1 "$description" PASS
rm "$outfile"
fi
|
dbushong/crush-tools
|
src/calcfield/test/test_00.sh
|
Shell
|
apache-2.0
| 417 |
#!/bin/sh
echo Outputting Raw HTML
mkdir -p out/raw
cp -R images out/raw
sed -e 's/\$images\$/images\/tutorial\//g' tutorial.md > out/raw/tutorial.md
pandoc -f markdown -t html -s -S --toc out/raw/tutorial.md > out/raw/tutorial.html
|
njbartlett/bndtools
|
bndtools.manual/raw.sh
|
Shell
|
epl-1.0
| 235 |
#!/bin/sh
type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
dev="$1"
devenc=$(str_replace "$1" '/' '\2f')
[ -e /tmp/dmraid.$devenc ] && exit 0
>/tmp/dmraid.$devenc
DM_RAIDS=$(getargs rd.dm.uuid -d rd_DM_UUID=)
if [ -n "$DM_RAIDS" ] || getargbool 0 rd.auto; then
DM_CLEANUP="no"
# run dmraid if udev has settled
info "Scanning for dmraid devices $DM_RAIDS"
SETS=$(dmraid -c -s)
if [ "$SETS" = "no raid disks" -o "$SETS" = "no raid sets" ]; then
return
fi
info "Found dmraid sets:"
echo $SETS|vinfo
if [ -n "$DM_RAIDS" ]; then
# only activate specified DM RAIDS
for r in $DM_RAIDS; do
for s in $SETS; do
if [ "${s##$r}" != "$s" ]; then
info "Activating $s"
dmraid -ay -i -p --rm_partitions "$s" 2>&1 | vinfo
[ -e "/dev/mapper/$s" ] && kpartx -a "/dev/mapper/$s" 2>&1 | vinfo
udevsettle
fi
done
done
else
# scan and activate all DM RAIDS
for s in $SETS; do
info "Activating $s"
dmraid -ay -i -p --rm_partitions "$s" 2>&1 | vinfo
[ -e "/dev/mapper/$s" ] && kpartx -a "/dev/mapper/$s" 2>&1 | vinfo
udevsettle
done
fi
need_shutdown
fi
|
Calrama/dracut
|
modules.d/90dmraid/dmraid.sh
|
Shell
|
gpl-2.0
| 1,324 |
#!/usr/bin/env bash
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Usage: ci_build.sh <CONTAINER_TYPE> [--dockerfile <DOCKERFILE_PATH>]
# <COMMAND>
#
# CONTAINER_TYPE: Type of the docker container used the run the build:
# e.g., (cpu | gpu | android | tensorboard)
#
# DOCKERFILE_PATH: (Optional) Path to the Dockerfile used for docker build.
# If this optional value is not supplied (via the
# --dockerfile flag), default Dockerfiles in the same
# directory as this script will be used.
#
# COMMAND: Command to be executed in the docker container, e.g.,
# tensorflow/tools/ci_build/builds/pip.sh gpu -c opt --config=cuda
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/builds/builds_common.sh"
# Get the command line arguments.
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
shift 1
# Dockerfile to be used in docker build
DOCKERFILE_PATH="${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}"
DOCKER_CONTEXT_PATH="${SCRIPT_DIR}"
if [[ "$1" == "--dockerfile" ]]; then
DOCKERFILE_PATH="$2"
DOCKER_CONTEXT_PATH=$(dirname "${DOCKERFILE_PATH}")
echo "Using custom Dockerfile path: ${DOCKERFILE_PATH}"
echo "Using custom docker build context path: ${DOCKER_CONTEXT_PATH}"
shift 2
fi
if [[ ! -f "${DOCKERFILE_PATH}" ]]; then
die "Invalid Dockerfile path: \"${DOCKERFILE_PATH}\""
fi
COMMAND=("$@")
# Validate command line arguments.
if [ "$#" -lt 1 ] || [ ! -e "${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE}" ]; then
supported_container_types=$( ls -1 ${SCRIPT_DIR}/Dockerfile.* | \
sed -n 's/.*Dockerfile\.\([^\/]*\)/\1/p' | tr '\n' ' ' )
>&2 echo "Usage: $(basename $0) CONTAINER_TYPE COMMAND"
>&2 echo " CONTAINER_TYPE can be one of [ ${supported_container_types}]"
>&2 echo " COMMAND is a command (with arguments) to run inside"
>&2 echo " the container."
>&2 echo ""
>&2 echo "Example (run all tests on CPU):"
>&2 echo "$0 CPU bazel test //tensorflow/..."
exit 1
fi
# Optional arguments - environment variables. For example:
# CI_DOCKER_EXTRA_PARAMS='-it --rm' CI_COMMAND_PREFIX='' tensorflow/tools/ci_build/ci_build.sh CPU /bin/bash
CI_TENSORFLOW_SUBMODULE_PATH="${CI_TENSORFLOW_SUBMODULE_PATH:-.}"
CI_COMMAND_PREFIX=("${CI_COMMAND_PREFIX[@]:-${CI_TENSORFLOW_SUBMODULE_PATH}/tensorflow/tools/ci_build/builds/with_the_same_user "\
"${CI_TENSORFLOW_SUBMODULE_PATH}/tensorflow/tools/ci_build/builds/configured ${CONTAINER_TYPE}}")
# cmake (CPU) builds do not require configuration.
if [[ "${CONTAINER_TYPE}" == "cmake" ]]; then
CI_COMMAND_PREFIX=("")
fi
# Use nvidia-docker if the container is GPU.
if [[ "${CONTAINER_TYPE}" == gpu* ]]; then
DOCKER_BINARY="nvidia-docker"
else
DOCKER_BINARY="docker"
fi
# Helper function to traverse directories up until given file is found.
function upsearch () {
test / == "$PWD" && return || \
test -e "$1" && echo "$PWD" && return || \
cd .. && upsearch "$1"
}
# Set up WORKSPACE and BUILD_TAG. Jenkins will set them for you or we pick
# reasonable defaults if you run it outside of Jenkins.
WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
BUILD_TAG="${BUILD_TAG:-tf_ci}"
# Add extra params for cuda devices and libraries for GPU container.
# And clear them if we are not building for GPU.
if [[ "${CONTAINER_TYPE}" != gpu* ]]; then
GPU_EXTRA_PARAMS=""
fi
# Determine the docker image name
DOCKER_IMG_NAME="${BUILD_TAG}.${CONTAINER_TYPE}"
# Under Jenkins matrix build, the build tag may contain characters such as
# commas (,) and equal signs (=), which are not valid inside docker image names.
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g')
# Convert to all lower-case, as per requirement of Docker image names
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
# Print arguments.
echo "WORKSPACE: ${WORKSPACE}"
echo "CI_DOCKER_BUILD_EXTRA_PARAMS: ${CI_DOCKER_BUILD_EXTRA_PARAMS[*]}"
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[*]}"
echo "COMMAND: ${COMMAND[*]}"
echo "CI_COMMAND_PREFIX: ${CI_COMMAND_PREFIX[*]}"
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
echo "BUILD_TAG: ${BUILD_TAG}"
echo " (docker container name will be ${DOCKER_IMG_NAME})"
echo ""
# Build the docker container.
echo "Building container (${DOCKER_IMG_NAME})..."
docker build -t ${DOCKER_IMG_NAME} ${CI_DOCKER_BUILD_EXTRA_PARAMS[@]} \
-f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}"
# Check docker build status
if [[ $? != "0" ]]; then
die "ERROR: docker build failed. Dockerfile is at ${DOCKERFILE_PATH}"
fi
# If caller wants the with_the_same_user script to allow bad usernames,
# pass the var to the docker environment
if [ -n "${CI_BUILD_USER_FORCE_BADNAME}" ]; then
CI_BUILD_USER_FORCE_BADNAME_ENV="-e CI_BUILD_USER_FORCE_BADNAME=yes"
fi
# Run the command inside the container.
echo "Running '${COMMAND[*]}' inside ${DOCKER_IMG_NAME}..."
mkdir -p ${WORKSPACE}/bazel-ci_build-cache
# By default we cleanup - remove the container once it finish running (--rm)
# and share the PID namespace (--pid=host) so the process inside does not have
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
${DOCKER_BINARY} run --rm --pid=host \
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
-e "CI_BUILD_USER=$(id -u -n)" \
-e "CI_BUILD_UID=$(id -u)" \
-e "CI_BUILD_GROUP=$(id -g -n)" \
-e "CI_BUILD_GID=$(id -g)" \
-e "CI_TENSORFLOW_SUBMODULE_PATH=${CI_TENSORFLOW_SUBMODULE_PATH}" \
${CI_BUILD_USER_FORCE_BADNAME_ENV} \
-v ${WORKSPACE}:/workspace \
-w /workspace \
${GPU_EXTRA_PARAMS} \
${CI_DOCKER_EXTRA_PARAMS[@]} \
"${DOCKER_IMG_NAME}" \
${CI_COMMAND_PREFIX[@]} \
${COMMAND[@]}
|
ZhangXinNan/tensorflow
|
tensorflow/tools/ci_build/ci_build.sh
|
Shell
|
apache-2.0
| 6,503 |
#!/bin/sh
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
if [ $# -gt 1 ]; then
cd "$2" || exit 1
fi
if [ $# -gt 0 ]; then
FILE="$1"
shift
if [ -f "$FILE" ]; then
INFO="$(head -n 1 "$FILE")"
fi
else
echo "Usage: $0 <filename> <srcroot>"
exit 1
fi
git_check_in_repo() {
! { git status --porcelain -uall --ignored "$@" 2>/dev/null || echo '??'; } | grep -q '?'
}
DESC=""
SUFFIX=""
if [ "${BITCOIN_GENBUILD_NO_GIT}" != "1" ] && [ -e "$(command -v git)" ] && [ "$(git rev-parse --is-inside-work-tree 2>/dev/null)" = "true" ] && git_check_in_repo share/genbuild.sh; then
# clean 'dirty' status of touched files that haven't been modified
git diff >/dev/null 2>/dev/null
# if latest commit is tagged and not dirty, then override using the tag name
RAWDESC=$(git describe --abbrev=0 2>/dev/null)
if [ "$(git rev-parse HEAD)" = "$(git rev-list -1 $RAWDESC 2>/dev/null)" ]; then
git diff-index --quiet HEAD -- && DESC=$RAWDESC
fi
# otherwise generate suffix from git, i.e. string like "59887e8-dirty"
SUFFIX=$(git rev-parse --short HEAD)
git diff-index --quiet HEAD -- || SUFFIX="$SUFFIX-dirty"
fi
if [ -n "$DESC" ]; then
NEWINFO="#define BUILD_DESC \"$DESC\""
elif [ -n "$SUFFIX" ]; then
NEWINFO="#define BUILD_SUFFIX $SUFFIX"
else
NEWINFO="// No build information available"
fi
# only update build.h if necessary
if [ "$INFO" != "$NEWINFO" ]; then
echo "$NEWINFO" >"$FILE"
fi
|
nikkitan/bitcoin
|
share/genbuild.sh
|
Shell
|
mit
| 1,635 |
#!/bin/sh
# Confirm that copying a directory into itself gets a proper diagnostic.
# Copyright (C) 2001-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# In 4.0.35 and earlier, 'mkdir dir && cp -R dir dir' would produce this:
# cp: won't create hard link 'dir/dir/dir' to directory ''
# Now it gives this:
# cp: can't copy a directory 'dir' into itself 'dir/dir'
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
mkdir a dir || framework_failure_
# This command should exit nonzero.
cp -R dir dir 2> out && fail=1
echo 1 >> out
# This should, too. However, with coreutils-7.1 it would infloop.
cp -rl dir dir 2>> out && fail=1
echo 2 >> out
cp -rl a dir dir 2>> out && fail=1
echo 3 >> out
cp -rl a dir dir 2>> out && fail=1
echo 4 >> out
cat > exp <<\EOF
cp: cannot copy a directory, 'dir', into itself, 'dir/dir'
1
cp: cannot copy a directory, 'dir', into itself, 'dir/dir'
2
cp: cannot copy a directory, 'dir', into itself, 'dir/dir'
3
cp: cannot copy a directory, 'dir', into itself, 'dir/dir'
4
EOF
#'
compare exp out || fail=1
Exit $fail
|
mmayer/coreutils
|
tests/cp/into-self.sh
|
Shell
|
gpl-3.0
| 1,696 |
#!/bin/sh
#
# This script is used to pull down updates from github
#+ additionally call the deployment scripts if operating on the generic cluster
#
# For legacy reasons this script is symlinked to the project root during install
#+ This can (should) change once we have migrated off of the generic cluster
#+ Ideally this should all take place through Captain-Shove or CI automation
#
# To run this script from the project root directory:
#+ bash update.sh
#
# Grab any changes
git pull
# Pull in any updates to submodules
git submodule update --init --recursive
# install any extensions managed by Composer
# to update, run php tools/composer.phar update prior to deployment
php tools/composer.phar install
# Run the maintenance script for any database migrations
cd core
php maintenance/update.php --quick
cd ../
# Call the deploy script if necessary and reload Apache
if [ $(hostname) == 'genericadm.private.phx1.mozilla.com' ]; then
if [ $(pwd | grep -c "wiki-dev.allizom.org") == 1 ]; then
/data/genericrhel6-dev/deploy wiki-dev.allizom.org
issue-multi-command genericrhel6-dev service httpd graceful
elif [ $(pwd | grep -c "wiki.allizom.org") == 1 ]; then
/data/genericrhel6-stage/deploy wiki.allizom.org
issue-multi-command genericrhel6-stage service httpd graceful
elif [ $(pwd | grep -c "wiki.mozilla.org") == 1 ]; then
/data/genericrhel6/deploy wiki.mozilla.org
issue-multi-command genericrhel6 service httpd graceful
else
echo "ERROR: Could not match deployment environment"
exit 1
fi
else
service apache2 graceful
fi
# eof
|
wagnerand/wiki.mozilla.org
|
tools/update.sh
|
Shell
|
mpl-2.0
| 1,632 |
#!/usr/bin/env bash
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -x
BUILDER=${BUILDER:-false} # Whether this is running a PR builder job.
export GO_FLAGS="-race"
export GORACE="halt_on_error=1"
# Check whether assets need to be rebuilt.
FORCE=true build/assets.sh
if [[ ! -z "$(git diff --name-only pages)" ]]; then
echo "Found changes to UI assets:"
git diff --name-only pages
echo "Run: `make assets FORCE=true`"
exit 1
fi
# Build & test with go 1.8
docker run --rm \
-w "/go/src/github.com/google/cadvisor" \
-v "${GOPATH}/src/github.com/google/cadvisor:/go/src/github.com/google/cadvisor" \
golang:1.8 make all test-runner
# Nodes that are currently stable. When tests fail on a specific node, and the failure is not remedied within a week, that node will be removed from this list.
golden_nodes=(
e2e-cadvisor-ubuntu-trusty
e2e-cadvisor-container-vm-v20151215
e2e-cadvisor-container-vm-v20160127
e2e-cadvisor-rhel-7
)
# TODO: Add test on GCI
# TODO: Add test for kubernetes default image
# e2e-cadvisor-container-vm-v20160321
# TODO: Temporarily disabled for #1344
# e2e-cadvisor-coreos-beta
# TODO: enable when docker 1.10 is working
# e2e-cadvisor-ubuntu-trusty-docker110
# TODO: Always fails with "Network tx and rx bytes should not be equal"
# e2e-cadvisor-centos-v7
max_retries=8
./runner --logtostderr --test-retry-count=$max_retries \
--test-retry-whitelist=integration/runner/retrywhitelist.txt \
--ssh-options "-i /var/lib/jenkins/gce_keys/google_compute_engine -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o CheckHostIP=no -o StrictHostKeyChecking=no" \
${golden_nodes[*]}
|
sg00dwin/origin
|
vendor/github.com/google/cadvisor/build/jenkins_e2e.sh
|
Shell
|
apache-2.0
| 2,218 |
#!/bin/sh
# this assumes webserver is running on port 8080
echo "Deploy everything first"
java org.apache.axis.client.AdminClient deploy.wsdd $*
echo "These next 3 should work..."
java samples.stock.GetQuote -uuser1 -wpass1 XXX $*
java samples.stock.GetQuote -uuser2 XXX $*
java samples.stock.GetInfo -uuser3 -wpass3 IBM address
echo "The rest of these should fail... (nicely of course)"
java samples.stock.GetQuote XXX $*
java samples.stock.GetQuote -uuser1 -wpass2 XXX $*
java samples.stock.GetQuote -uuser3 -wpass3 XXX $*
echo "This should work but print debug info on the client and server"
java samples.stock.GetQuote -d -uuser1 -wpass1 XXX $*
# Now undeploy everything
java org.apache.axis.client.AdminClient undeploy.wsdd $*
|
hugosato/apache-axis
|
samples/stock/testit.sh
|
Shell
|
apache-2.0
| 737 |
#!/usr/bin/env bash
set -euxo pipefail
export PATH=/opt/ghc/$GHCVER/bin:/opt/cabal/$CABALVER/bin:$HOME/.cabal/bin:$PATH
ghc --version
cabal --version
if [ "$CABALVER" = "1.18" ]
then
TEST=--enable-tests
else
TEST=--run-tests
fi
if [ "$BACKEND" = "none" ]
then
cabal install --force-reinstalls $TEST $(cat sources.txt)
else
if [ "$BACKEND" = "postgresql" ]
then
psql -c 'create database persistent;' -U postgres
elif [ "$BACKEND" = "mysql" ]
then
mysql -e 'create database persistent;'
elif [ "$BACKEND" = "zookeeper" ]
then
#sudo add-apt-repository -y ppa:yandex-sysmon/zookeeper-3.4
#sudo apt-get update
#sudo apt-get install -y libzookeeper-mt-dev zookeeperd
#sudo mkdir -p /var/log/zookeeper
#sudo chmod -R 777 /var/log/zookeeper
#sudo chmod 666 /etc/zookeeper/conf/zoo.cfg
#echo maxClientCnxns=128 >> /etc/zookeeper/conf/zoo.cfg
#sudo service zookeeper restart
#sleep 10
/usr/share/zookeeper/bin/zkCli.sh create /persistent null
fi
cd persistent-test
cabal install --force-reinstalls --only-dependencies --enable-tests -f$BACKEND
# Make sure we get regular output sent to Travis to avoid it canceling our
# builds
cabal configure --enable-tests -f$BACKEND
cabal build
cabal test
fi
|
jasonzoladz/persistent
|
travis/run.sh
|
Shell
|
mit
| 1,356 |
#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is invoked by run_interop_tests.py to build the docker image
# for interop testing. You should never need to call this script on your own.
set -x
# Params:
# INTEROP_IMAGE - name of tag of the final interop image
# BASE_NAME - base name used to locate the base Dockerfile and build script
# TTY_FLAG - optional -t flag to make docker allocate tty
# BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
# docker run command
# GRPC_ROOT - grpc base directory, default to top of this tree.
# GRPC_GO_ROOT - grpc-go base directory, default to '$GRPC_ROOT/../grpc-go'
# GRPC_JAVA_ROOT - grpc-java base directory, default to '$GRPC_ROOT/../grpc-java'
cd "$(dirname "$0")/../../.."
echo "GRPC_ROOT: ${GRPC_ROOT:=$(pwd)}"
MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
echo "GRPC_JAVA_ROOT: ${GRPC_JAVA_ROOT:=$(cd ../grpc-java && pwd)}"
if [ -n "$GRPC_JAVA_ROOT" ]
then
MOUNT_ARGS+=" -v $GRPC_JAVA_ROOT:/var/local/jenkins/grpc-java:ro"
else
echo "WARNING: grpc-java not found, it won't be mounted to the docker container."
fi
echo "GRPC_GO_ROOT: ${GRPC_GO_ROOT:=$(cd ../grpc-go && pwd)}"
if [ -n "$GRPC_GO_ROOT" ]
then
MOUNT_ARGS+=" -v $GRPC_GO_ROOT:/var/local/jenkins/grpc-go:ro"
else
echo "WARNING: grpc-go not found, it won't be mounted to the docker container."
fi
echo "GRPC_DART_ROOT: ${GRPC_DART_ROOT:=$(cd ../grpc-dart && pwd)}"
if [ -n "$GRPC_DART_ROOT" ]
then
MOUNT_ARGS+=" -v $GRPC_DART_ROOT:/var/local/jenkins/grpc-dart:ro"
else
echo "WARNING: grpc-dart not found, it won't be mounted to the docker container."
fi
echo "GRPC_NODE_ROOT: ${GRPC_NODE_ROOT:=$(cd ../grpc-node && pwd)}"
if [ -n "$GRPC_NODE_ROOT" ]
then
MOUNT_ARGS+=" -v $GRPC_NODE_ROOT:/var/local/jenkins/grpc-node:ro"
else
echo "WARNING: grpc-node not found, it won't be mounted to the docker container."
fi
mkdir -p /tmp/ccache
# Mount service account dir if available.
# If service_directory does not contain the service account JSON file,
# some of the tests will fail.
if [ -e "$HOME/service_account" ]
then
MOUNT_ARGS+=" -v $HOME/service_account:/var/local/jenkins/service_account:ro"
fi
# Use image name based on Dockerfile checksum
# on OSX use md5 instead of sha1sum
if which sha1sum > /dev/null;
then
BASE_IMAGE=${BASE_NAME}_$(sha1sum "tools/dockerfile/interoptest/$BASE_NAME/Dockerfile" | cut -f1 -d\ )
else
BASE_IMAGE=${BASE_NAME}_$(md5 -r "tools/dockerfile/interoptest/$BASE_NAME/Dockerfile" | cut -f1 -d\ )
fi
if [ "$DOCKERHUB_ORGANIZATION" != "" ]
then
BASE_IMAGE=$DOCKERHUB_ORGANIZATION/$BASE_IMAGE
time docker pull "$BASE_IMAGE"
else
# Make sure docker image has been built. Should be instantaneous if so.
docker build -t "$BASE_IMAGE" --force-rm=true "tools/dockerfile/interoptest/$BASE_NAME" || exit $?
fi
# Create a local branch so the child Docker script won't complain
git branch -f jenkins-docker
CONTAINER_NAME="build_${BASE_NAME}_$(uuidgen)"
# Prepare image for interop tests, commit it on success.
# TODO: Figure out if is safe to eliminate the suppression. It's currently here
# because $MOUNT_ARGS and $BUILD_INTEROP_DOCKER_EXTRA_ARGS can have legitimate
# spaces, but the "correct" way to do this is to utilize proper arrays.
# Same for $TTY_FLAG
# shellcheck disable=SC2086
(docker run \
--cap-add SYS_PTRACE \
-e CCACHE_DIR=/tmp/ccache \
-e THIS_IS_REALLY_NEEDED='see https://github.com/docker/docker/issues/14203 for why docker is awful' \
-e THIS_IS_REALLY_NEEDED_ONCE_AGAIN='For issue 4835. See https://github.com/docker/docker/issues/14203 for why docker is awful' \
-i \
$TTY_FLAG \
$MOUNT_ARGS \
$BUILD_INTEROP_DOCKER_EXTRA_ARGS \
-v /tmp/ccache:/tmp/ccache \
--name="$CONTAINER_NAME" \
"$BASE_IMAGE" \
bash -l "/var/local/jenkins/grpc/tools/dockerfile/interoptest/$BASE_NAME/build_interop.sh" \
&& docker commit "$CONTAINER_NAME" "$INTEROP_IMAGE" \
&& echo "Successfully built image $INTEROP_IMAGE")
EXITCODE=$?
# remove intermediate container, possibly killing it first
docker rm -f "$CONTAINER_NAME"
exit $EXITCODE
|
thinkerou/grpc
|
tools/run_tests/dockerize/build_interop_image.sh
|
Shell
|
apache-2.0
| 4,632 |
#!/bin/bash
set -x -u
SRC=../../../data
DST=$1
# Remove old links
rm -rf $DST
mkdir $DST
files=(copyright.html resources-mdpi_clear resources-hdpi_clear resources-xhdpi_clear resources-xxhdpi_clear categories.txt classificator.txt
types.txt fonts_blacklist.txt fonts_whitelist.txt languages.txt unicode_blocks.txt \
drules_proto_clear.bin packed_polygons.bin countries.txt World.mwm WorldCoasts.mwm 00_roboto_regular.ttf 01_dejavusans.ttf 02_droidsans-fallback.ttf
03_jomolhari-id-a3d.ttf 04_padauk.ttf 05_khmeros.ttf 06_code2000.ttf)
for item in ${files[*]}
do
ln -s $SRC/$item $DST/$item
done
|
Zverik/omim
|
tizen/scripts/update_assets_for_version.sh
|
Shell
|
apache-2.0
| 622 |
#!/bin/sh
DFSAN_DIR=$(dirname "$0")/../
DFSAN_CUSTOM_TESTS=${DFSAN_DIR}/../../test/dfsan/custom.cpp
DFSAN_CUSTOM_WRAPPERS=${DFSAN_DIR}/dfsan_custom.cpp
DFSAN_ABI_LIST=${DFSAN_DIR}/done_abilist.txt
DIFFOUT=$(mktemp -q /tmp/tmp.XXXXXXXXXX)
ERRORLOG=$(mktemp -q /tmp/tmp.XXXXXXXXXX)
DIFF_A=$(mktemp -q /tmp/tmp.XXXXXXXXXX)
DIFF_B=$(mktemp -q /tmp/tmp.XXXXXXXXXX)
on_exit() {
rm -f ${DIFFOUT} 2> /dev/null
rm -f ${ERRORLOG} 2> /dev/null
rm -f ${DIFF_A} 2> /dev/null
rm -f ${DIFF_B} 2> /dev/null
}
# Ignore __sanitizer_cov_trace* because they are implemented elsewhere.
trap on_exit EXIT
grep -E "^fun:.*=custom" ${DFSAN_ABI_LIST} \
| grep -v "dfsan_get_label\|__sanitizer_cov_trace" \
| sed "s/^fun:\(.*\)=custom.*/\1/" | sort > $DIFF_A
grep -E "__dfsw.*\(" ${DFSAN_CUSTOM_WRAPPERS} \
| grep -v "__sanitizer_cov_trace" \
| sed "s/.*__dfsw_\(.*\)(.*/\1/" | sort > $DIFF_B
diff -u $DIFF_A $DIFF_B > ${DIFFOUT}
if [ $? -ne 0 ]
then
echo -n "The following differences between the ABI list and ">> ${ERRORLOG}
echo "the implemented custom wrappers have been found:" >> ${ERRORLOG}
cat ${DIFFOUT} >> ${ERRORLOG}
fi
grep -E __dfsw_ ${DFSAN_CUSTOM_WRAPPERS} \
| grep -v "__sanitizer_cov_trace" \
| sed "s/.*__dfsw_\([^(]*\).*/\1/" | sort > $DIFF_A
grep -E "^[[:space:]]*test_.*\(\);" ${DFSAN_CUSTOM_TESTS} \
| sed "s/.*test_\(.*\)();/\1/" | sort > $DIFF_B
diff -u $DIFF_A $DIFF_B > ${DIFFOUT}
if [ $? -ne 0 ]
then
echo -n "The following differences between the implemented " >> ${ERRORLOG}
echo "custom wrappers and the tests have been found:" >> ${ERRORLOG}
cat ${DIFFOUT} >> ${ERRORLOG}
fi
if [ -s ${ERRORLOG} ]
then
cat ${ERRORLOG}
exit 1
fi
|
endlessm/chromium-browser
|
third_party/llvm/compiler-rt/lib/dfsan/scripts/check_custom_wrappers.sh
|
Shell
|
bsd-3-clause
| 1,678 |
# Check for updates on initial load...
if [ "$DISABLE_AUTO_UPDATE" != "true" ]; then
/usr/bin/env ZSH=$ZSH DISABLE_UPDATE_PROMPT=$DISABLE_UPDATE_PROMPT zsh $ZSH/tools/check_for_upgrade.sh
fi
# Initializes Oh My Zsh
# add a function path
fpath=($ZSH/functions $ZSH/completions $fpath)
# Load all of the config files in ~/oh-my-zsh that end in .zsh
# TIP: Add files you don't want in git to .gitignore
for config_file ($ZSH/lib/*.zsh); do
source $config_file
done
# Set ZSH_CUSTOM to the path where your custom config files
# and plugins exists, or else we will use the default custom/
if [[ -z "$ZSH_CUSTOM" ]]; then
ZSH_CUSTOM="$ZSH/custom"
fi
is_plugin() {
local base_dir=$1
local name=$2
test -f $base_dir/plugins/$name/$name.plugin.zsh \
|| test -f $base_dir/plugins/$name/_$name
}
# Add all defined plugins to fpath. This must be done
# before running compinit.
for plugin ($plugins); do
if is_plugin $ZSH_CUSTOM $plugin; then
fpath=($ZSH_CUSTOM/plugins/$plugin $fpath)
elif is_plugin $ZSH $plugin; then
fpath=($ZSH/plugins/$plugin $fpath)
fi
done
# Figure out the SHORT hostname
if [ -n "$commands[scutil]" ]; then
# OS X
SHORT_HOST=$(scutil --get ComputerName)
else
SHORT_HOST=${HOST/.*/}
fi
# Save the location of the current completion dump file.
ZSH_COMPDUMP="${ZDOTDIR:-${HOME}}/.zcompdump-${SHORT_HOST}-${ZSH_VERSION}"
# Load and run compinit
autoload -U compinit
compinit -i -d "${ZSH_COMPDUMP}"
# Load all of the plugins that were defined in ~/.zshrc
for plugin ($plugins); do
if [ -f $ZSH_CUSTOM/plugins/$plugin/$plugin.plugin.zsh ]; then
source $ZSH_CUSTOM/plugins/$plugin/$plugin.plugin.zsh
elif [ -f $ZSH/plugins/$plugin/$plugin.plugin.zsh ]; then
source $ZSH/plugins/$plugin/$plugin.plugin.zsh
fi
done
# Load all of your custom configurations from custom/
for config_file ($ZSH_CUSTOM/*.zsh(N)); do
source $config_file
done
unset config_file
# Load the theme
if [ "$ZSH_THEME" = "random" ]; then
themes=($ZSH/themes/*zsh-theme)
N=${#themes[@]}
((N=(RANDOM%N)+1))
RANDOM_THEME=${themes[$N]}
source "$RANDOM_THEME"
echo "[oh-my-zsh] Random theme '$RANDOM_THEME' loaded..."
else
if [ ! "$ZSH_THEME" = "" ]; then
if [ -f "$ZSH_CUSTOM/$ZSH_THEME.zsh-theme" ]; then
source "$ZSH_CUSTOM/$ZSH_THEME.zsh-theme"
elif [ -f "$ZSH_CUSTOM/themes/$ZSH_THEME.zsh-theme" ]; then
source "$ZSH_CUSTOM/themes/$ZSH_THEME.zsh-theme"
else
source "$ZSH/themes/$ZSH_THEME.zsh-theme"
fi
fi
fi
|
tsprlng/oh-my-zsh
|
oh-my-zsh.sh
|
Shell
|
mit
| 2,502 |
!Acessando o modo exec user
enable
!Configuração de data/hora
clock set 19:00:00 24 Mar 2017
!Acessar modo de configuração global
configure terminal
!Configuração do nome do equipamento
hostname SW-XXX
!Habilitar o serviço de Criptografia de Senhas Password
service password-encryption
!Desativar a resolução de nomes de domínio
no ip domain-lookup
!Configuração da mensagem do dia
banner motd #Warning - Aviso: Acesso autorizado somente a funcionarios#
!Habilitar a senha do tipo secret para o modo enable
enable secret cisco2017
!Criação do usuário com senha
username cisco privilege 15 secret cisco2017
!Configuração do nome de domínio
ip domain-name cisco.com
!Criação da chave e habilitação do serviço de ssh
!Router/Switch real: crypto key generate rsa usage-keys modulus 1024
crypto key generate rsa
!Tamanho da chave: 1024 bits
!Habilitando a versão 2 do SSH
ip ssh version 2
!Tempo de esgotamente de conexão (segundos)
ip ssh time-out 60
!Número de tentativas de conexão com o SSH
ip ssh authentication-retries 3
!Acessando a linha console
line console 0
!Habilitando senha do tipo Password
password cisco2017
!Forçando fazer login com usuário e senha
login local
!Sincronizando os logs na tela
logging synchronous
!Habilitando o tempo de inatividade
exec-timeout 3 30
!Saindo da linha console
exit
!Acessando as linhas virtuais
line vty 0 4
!Habilitando senha do tipo Password
password cisco2017
!Forçando fazer login com usuário e senha
login local
!Sincronizando os logs na tela
logging synchronous
!Habilitando o tempo de inatividade
exec-timeout 3 30
!Configuração do tipo de protocolo de transporte de entrada
transport input ssh
!Saindo de todos os níveis
end
!Salvando as informações
copy running-config startup-config
!Comandos para visualização das informações:
!Informações sobre as configuração da RAM
show running-config
!Informações sobre data e hora
show clock
!Informações sobre o serviço do SSH
show ssh
!Informações sobre o serviço das chaves do SSH
show ip ssh
!Informações sobre as chaves Públicas e Privadas do SSH
show crypto key mypubkey rsa
!Informações sobre as configurações das lines (somente equipamento real ou IOS 15.x)
show line
!Informações sobre as configurações de login (somente equipamento real ou IOS 15.x)
show login
!Informações sobre acesso as lines e console
show sessions
!Informações sobre usuários logados
show users
!Informações sobre a versão do IOS e detalhes de hardware
show version
|
vaamonde/netacad
|
modulo-03/capitulo-02/Cenario-C/01-Script-switch-basic.sh
|
Shell
|
gpl-3.0
| 2,715 |
#!/bin/bash
export QGIS_SERVER_LOG_FILE=/var/log/qgisserver.log
export QGIS_SERVER_LOG_LEVEL=0
export QGIS_PREFIX_PATH=/usr/src/qgis/build/output
exec /usr/bin/spawn-fcgi -n -p 5555 /usr/src/qgis/build/output/bin/qgis_mapserv.fcgi
|
rduivenvoorde/QGIS
|
.ci/ogc/qgis_mapserv.sh
|
Shell
|
gpl-2.0
| 233 |
#!/bin/bash
number=$1
method=$2
ss-tunnel -k test -m $method -l 8387 -L 127.0.0.1:8388 -s 127.0.0.1 -p 8389 &
ss_tunnel_pid=$!
ss-server -k test -m $method -s 127.0.0.1 -p 8389 &
ss_server_pid=$!
iperf -s -p 8388 &
iperf_pid=$!
sleep 1
iperf -c 127.0.0.1 -p 8387 -n $number
# Wait for iperf server to receive all data.
# One second should be enough in most cases.
sleep 1
kill $ss_tunnel_pid
kill $ss_server_pid
kill $iperf_pid
sleep 1
echo "Test Finished"
|
Jigsaw-Code/outline-client
|
third_party/shadowsocks-libev/scripts/iperf.sh
|
Shell
|
apache-2.0
| 465 |
#!/bin/bash
# This script tests the high level end-to-end functionality demonstrated
# as part of the examples/sample-app
STARTTIME=$(date +%s)
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
os::log::info "Starting containerized end-to-end test"
unset KUBECONFIG
os::util::environment::use_sudo
os::cleanup::tmpdir
os::util::environment::setup_all_server_vars
export HOME="${FAKE_HOME_DIR}"
# Allow setting $JUNIT_REPORT to toggle output behavior
if [[ -n "${JUNIT_REPORT:-}" ]]; then
export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log"
fi
function cleanup()
{
out=$?
echo
if [ $out -ne 0 ]; then
echo "[FAIL] !!!!! Test Failed !!!!"
else
os::log::info "Test Succeeded"
fi
echo
set +e
os::cleanup::dump_container_logs
# pull information out of the server log so that we can get failure management in jenkins to highlight it and
# really have it smack people in their logs. This is a severe correctness problem
grep -ra5 "CACHE.*ALTERED" ${LOG_DIR}/containers
os::cleanup::dump_etcd
os::cleanup::dump_events
if [[ -z "${SKIP_TEARDOWN-}" ]]; then
os::cleanup::containers
fi
truncate_large_logs
os::test::junit::generate_oscmd_report
set -e
os::log::info "Exiting"
ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"
exit $out
}
trap "cleanup" EXIT INT TERM
os::log::system::start
# Setup
os::log::info "openshift version: `openshift version`"
os::log::info "oc version: `oc version`"
os::log::info "Using images: ${USE_IMAGES}"
os::log::info "Starting OpenShift containerized server"
oc cluster up --server-loglevel=4 --version="${TAG}" \
--host-data-dir="${VOLUME_DIR}/etcd" \
--host-volumes-dir="${VOLUME_DIR}"
oc cluster status
IMAGE_WORKING_DIR=/var/lib/origin
docker cp origin:${IMAGE_WORKING_DIR}/openshift.local.config ${BASETMPDIR}
export ADMIN_KUBECONFIG="${MASTER_CONFIG_DIR}/admin.kubeconfig"
export CLUSTER_ADMIN_CONTEXT=$(oc config view --config=${ADMIN_KUBECONFIG} --flatten -o template --template='{{index . "current-context"}}')
sudo chmod -R a+rwX "${ADMIN_KUBECONFIG}"
export KUBECONFIG="${ADMIN_KUBECONFIG}"
os::log::info "To debug: export KUBECONFIG=$ADMIN_KUBECONFIG"
${OS_ROOT}/test/end-to-end/core.sh
|
tmckayus/oshinko-cli
|
vendor/github.com/openshift/origin/cmd/service-catalog/go/src/github.com/kubernetes-incubator/service-catalog/hack/test-end-to-end-docker.sh
|
Shell
|
apache-2.0
| 2,234 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Discover all the ephemeral disks
block_devices=()
ephemeral_devices=$(curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/ | grep ephemeral)
for ephemeral_device in $ephemeral_devices; do
echo "Checking ephemeral device: ${ephemeral_device}"
aws_device=$(curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/${ephemeral_device})
device_path=""
if [ -b /dev/$aws_device ]; then
device_path="/dev/$aws_device"
else
# Check for the xvd-style name
xvd_style=$(echo $aws_device | sed "s/sd/xvd/")
if [ -b /dev/$xvd_style ]; then
device_path="/dev/$xvd_style"
fi
fi
if [[ -z ${device_path} ]]; then
echo " Could not find disk: ${ephemeral_device}@${aws_device}"
else
echo " Detected ephemeral disk: ${ephemeral_device}@${device_path}"
block_devices+=(${device_path})
fi
done
# These are set if we should move where docker/kubelet store data
# Note this gets set to the parent directory
move_docker=""
move_kubelet=""
apt-get update
docker_storage=${DOCKER_STORAGE:-aufs}
# Format the ephemeral disks
if [[ ${#block_devices[@]} == 0 ]]; then
echo "No ephemeral block devices found; will use aufs on root"
docker_storage="aufs"
else
echo "Block devices: ${block_devices[@]}"
# Remove any existing mounts
for block_device in ${block_devices}; do
echo "Unmounting ${block_device}"
/bin/umount ${block_device}
sed -i -e "\|^${block_device}|d" /etc/fstab
done
if [[ ${docker_storage} == "btrfs" ]]; then
apt-get install --yes btrfs-tools
if [[ ${#block_devices[@]} == 1 ]]; then
echo "One ephemeral block device found; formatting with btrfs"
mkfs.btrfs -f ${block_devices[0]}
else
echo "Found multiple ephemeral block devices, formatting with btrfs as RAID-0"
mkfs.btrfs -f --data raid0 ${block_devices[@]}
fi
echo "${block_devices[0]} /mnt/ephemeral btrfs noatime 0 0" >> /etc/fstab
mkdir -p /mnt/ephemeral
mount /mnt/ephemeral
mkdir -p /mnt/ephemeral/kubernetes
move_docker="/mnt/ephemeral"
move_kubelet="/mnt/ephemeral/kubernetes"
elif [[ ${docker_storage} == "aufs-nolvm" ]]; then
if [[ ${#block_devices[@]} != 1 ]]; then
echo "aufs-nolvm selected, but multiple ephemeral devices were found; only the first will be available"
fi
mkfs -t ext4 ${block_devices[0]}
echo "${block_devices[0]} /mnt/ephemeral ext4 noatime 0 0" >> /etc/fstab
mkdir -p /mnt/ephemeral
mount /mnt/ephemeral
mkdir -p /mnt/ephemeral/kubernetes
move_docker="/mnt/ephemeral"
move_kubelet="/mnt/ephemeral/kubernetes"
elif [[ ${docker_storage} == "devicemapper" || ${docker_storage} == "aufs" ]]; then
# We always use LVM, even with one device
# In devicemapper mode, Docker can use LVM directly
# Also, fewer code paths are good
echo "Using LVM2 and ext4"
apt-get install --yes lvm2
# Don't output spurious "File descriptor X leaked on vgcreate invocation."
# Known bug: e.g. Ubuntu #591823
export LVM_SUPPRESS_FD_WARNINGS=1
for block_device in ${block_devices}; do
pvcreate ${block_device}
done
vgcreate vg-ephemeral ${block_devices[@]}
if [[ ${docker_storage} == "devicemapper" ]]; then
# devicemapper thin provisioning, managed by docker
# This is the best option, but it is sadly broken on most distros
# Bug: https://github.com/docker/docker/issues/4036
# 80% goes to the docker thin-pool; we want to leave some space for host-volumes
lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral
DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool"
# Note that we don't move docker; docker goes direct to the thinpool
# Remaining space (20%) is for kubernetes data
# TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data?
lvcreate -l 100%FREE -n kubernetes vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/kubernetes
mkdir -p /mnt/ephemeral/kubernetes
echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/kubernetes
move_kubelet="/mnt/ephemeral/kubernetes"
else
# aufs
# We used to split docker & kubernetes, but we no longer do that, because
# host volumes go into the kubernetes area, and it is otherwise very easy
# to fill up small volumes.
#
# No need for thin pool since we are not over-provisioning or doing snapshots
# (probably shouldn't be doing snapshots on ephemeral disk? Should be stateless-ish.)
# Tried to do it, but it cause problems (#16188)
lvcreate -l 100%VG -n ephemeral vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/ephemeral
mkdir -p /mnt/ephemeral
echo "/dev/vg-ephemeral/ephemeral /mnt/ephemeral ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral
mkdir -p /mnt/ephemeral/kubernetes
move_docker="/mnt/ephemeral"
move_kubelet="/mnt/ephemeral/kubernetes"
fi
else
echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}"
fi
fi
if [[ ${docker_storage} == "btrfs" ]]; then
DOCKER_OPTS="${DOCKER_OPTS} -s btrfs"
elif [[ ${docker_storage} == "aufs-nolvm" || ${docker_storage} == "aufs" ]]; then
# Install aufs kernel module
# Fix issue #14162 with extra-virtual
apt-get install --yes linux-image-extra-$(uname -r) linux-image-extra-virtual
# Install aufs tools
apt-get install --yes aufs-tools
DOCKER_OPTS="${DOCKER_OPTS} -s aufs"
elif [[ ${docker_storage} == "devicemapper" ]]; then
DOCKER_OPTS="${DOCKER_OPTS} -s devicemapper"
else
echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}"
fi
if [[ -n "${move_docker}" ]]; then
# Move docker to e.g. /mnt
if [[ -d /var/lib/docker ]]; then
mv /var/lib/docker ${move_docker}/
fi
mkdir -p ${move_docker}/docker
ln -s ${move_docker}/docker /var/lib/docker
DOCKER_ROOT="${move_docker}/docker"
DOCKER_OPTS="${DOCKER_OPTS} -g ${DOCKER_ROOT}"
fi
if [[ -n "${move_kubelet}" ]]; then
# Move /var/lib/kubelet to e.g. /mnt
# (the backing for empty-dir volumes can use a lot of space!)
if [[ -d /var/lib/kubelet ]]; then
mv /var/lib/kubelet ${move_kubelet}/
fi
mkdir -p ${move_kubelet}/kubelet
ln -s ${move_kubelet}/kubelet /var/lib/kubelet
KUBELET_ROOT="${move_kubelet}/kubelet"
fi
|
combk8s/kubernetes
|
cluster/aws/templates/format-disks.sh
|
Shell
|
apache-2.0
| 7,066 |
#!/bin/bash
pause() {
while true; do
read -p "$1 " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
}
cd `dirname $0`/..
SOURCE=`pwd`
CUR_VERSION=`node -e 'console.log(require("./package.json").version)'`
git --no-pager log --first-parent --oneline v$CUR_VERSION..master
echo "current version is $CUR_VERSION"
read -p "enter version number for the build " VERSION_NUM
node -e "
var fs = require('fs');
var version = '$VERSION_NUM';
function replaceVersion(str) {
return str.replace(/(['\"]?version['\"]?\s*[:=]\s*['\"])[\\d.\\w\\-]+(['\"])/, function(_, m1, m2) {
return m1 + version + m2;
});
}
function update(path, replace) {
var pkg = fs.readFileSync(path, 'utf8');
pkg = (replace || replaceVersion)(pkg);
fs.writeFileSync(path, pkg, 'utf8');
}
update('package.json');
update('build/package.json');
update('./lib/ace/ext/menu_tools/generate_settings_menu.js');
update('ChangeLog.txt', function(str) {
var date='"`date +%Y.%m.%d`"';
return date + ' Version ' + version + '\n' + str.replace(/^\d+.*/, '').replace(/^\n/, '');
});
"
pause "versions updated. do you want to start build script? [y/n]"
node Makefile.dryice.js full
cd build
git add .
git commit --all -m "package `date +%d.%m.%y`"
echo "build task completed."
pause "continue creating the tag for v$VERSION_NUM [y/n]"
if [[ ${VERSION_NUM} != *"-"* ]]; then
git tag "v"$VERSION_NUM
fi
pause "continue pushing to github? [y/n]"
git push --progress --tags "origin" HEAD:gh-pages HEAD:master
echo "build repository updated"
pause "continue update ace repo? [y/n]"
cd ..
echo "new commit added"
pause "continue creating the tag for v$VERSION_NUM [y/n]"
if [[ ${VERSION_NUM} != *"-"* ]]; then
git tag "v"$VERSION_NUM
fi
pause "continue pushing to github? [y/n]"
git push --progress --tags "origin" HEAD:gh-pages HEAD:master
echo "All done!"
pause "May I go now? [y/n]"
|
yoer/hue
|
tools/ace-editor/tool/release.sh
|
Shell
|
apache-2.0
| 2,079 |
# Authors:
# https://github.com/tristola
#
# Docker-compose related zsh aliases
# Aliases ###################################################################
# Use dco as alias for docker-compose, since dc on *nix is 'dc - an arbitrary precision calculator'
# https://www.gnu.org/software/bc/manual/dc-1.05/html_mono/dc.html
alias dco='docker-compose'
alias dcb='docker-compose build'
alias dce='docker-compose exec'
alias dcps='docker-compose ps'
alias dcrestart='docker-compose restart'
alias dcrm='docker-compose rm'
alias dcr='docker-compose run'
alias dcstop='docker-compose stop'
alias dcup='docker-compose up'
alias dcdn='docker-compose down'
alias dcl='docker-compose logs'
alias dclf='docker-compose logs -f'
|
kristiankubik/dotfiles
|
zsh_plugins/docker-compose/docker-compose.plugin.zsh
|
Shell
|
mit
| 722 |
bash -ex .travis-opam.sh
## Add more stuff here
|
Drup/empty
|
.travis-ci.sh
|
Shell
|
isc
| 48 |
export HOMEBREW_NO_ENV_HINTS=true
if type brew &>/dev/null
then
fpath+="$(brew --prefix)/share/zsh/site-functions"
fi
|
lburgazzoli/lb-dotfiles
|
.zshrc.d/199-core-brew.zsh
|
Shell
|
mit
| 121 |
storm jar target/yu-storm-hack-0.0.1-SNAPSHOT-jar-with-dependencies.jar yu.storm.LocalTestTopology twitter web 3 1 10 1 1
|
ZhuangER/social-network-analysi-with-lambda-architecture
|
storm-wordcount/run.sh
|
Shell
|
mit
| 121 |
#!/bin/bash
try() {
local DETACH=false
local DISOWN=false
while true; do
case "$1" in
--detach) DETACH=true ;;
--disown) DISOWN=true ;;
*) break ;;
esac
shift
done
printf "$1"
shift
if $DISOWN; then
$@ >/dev/null 2>&1 & disown
elif $DETACH; then
$@ >/dev/null 2>&1 &
else
$@ >/dev/null 2>&1
fi
if [[ $? -eq 0 ]]; then
printf " [\e[32m\e[1mok\e[0m]\n"
else
printf " [\e[31m\e[1merror\e[0m]\n"
fi
}
|
ammongit/scripts
|
wip/try.sh
|
Shell
|
mit
| 484 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2553-2
#
# Security announcement date: 2015-04-01 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:25 UTC
#
# Operating System: Ubuntu 14.10
# Architecture: i686
#
# Vulnerable packages fix on version:
# - libtiff5:4.0.3-10ubuntu0.2
#
# Last versions recommanded by security team:
# - libtiff5:4.0.3-10ubuntu0.2
#
# CVE List:
# - CVE-2014-8127
# - CVE-2014-8128
# - CVE-2014-8129
# - CVE-2014-8130
# - CVE-2014-9330
# - CVE-2014-9655
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libtiff5=4.0.3-10ubuntu0.2 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.10/i686/2015/USN-2553-2.sh
|
Shell
|
mit
| 719 |
#!/bin/bash
set -x
: ${ZK_VERSION="3.4.6"}
case $1 in
-travis)
echo "install zk for travis"
wget "http://apache.cs.utah.edu/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz"
tar -xf "zookeeper-${ZK_VERSION}.tar.gz"
mv zookeeper-${ZK_VERSION} zk
mv ./zk/conf/zoo_sample.cfg ./zk/conf/zoo.cfg
;;
esac
|
talbright/go-curator
|
script/install_zk.sh
|
Shell
|
mit
| 344 |
# Use `hub` as our git wrapper:
# http://defunkt.github.com/hub/
hub_path=$(which hub)
if (( $+commands[hub] ))
then
alias git=$hub_path
fi
# The rest of my fun git aliases
alias gl='git log'
alias glog="git log --graph --pretty=format:'%Cred%h%Creset %an: %s - %Creset %C(yellow)%d%Creset %Cgreen(%cr)%Creset' --abbrev-commit --date=relative"
alias gd='git diff'
alias gp='git pull --rebase'
alias gc='git commit'
alias gca='git commit -a'
alias gco='git checkout'
alias gcb='git copy-branch-name'
alias gb='git branch'
alias gs='git status -sb' # upgrade your git if -sb breaks for you. it's fun.
alias gpu='git push'
|
antico5/dotfiles
|
git/aliases.zsh
|
Shell
|
mit
| 625 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1752-1
#
# Security announcement date: 2013-02-27 00:00:00 UTC
# Script generation date: 2017-02-01 21:01:44 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libgnutls26:2.12.14-5ubuntu3.2
#
# Last versions recommanded by security team:
# - libgnutls26:2.12.14-5ubuntu3.13
#
# CVE List:
# - CVE-2013-1619
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libgnutls26=2.12.14-5ubuntu3.13 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2013/USN-1752-1.sh
|
Shell
|
mit
| 637 |
#!/bin/bash
echo "Compiling..."
echo " "
cd src/
g++ -c main.cpp Particle.cpp -I/Documents/SFML/SFML_SRC/include
echo "Linking..."
echo " "
if [ -e main.cpp~ ] ;
then
rm main.cpp~
fi
mv *.o ../linkers
cd ../linkers
g++ main.o Particle.o -o particle -L/Documents/SFML/SFML_SRC/lib -lsfml-graphics -lsfml-window -lsfml-system
mv particle ../
cd ../
if [ -e compile.sh~ ] ;
then
rm compile.sh~
fi
cd include/
if [ -e *.hh~ ];
then
rm *.hh~
fi
cd ..
echo "Executing..."
echo " "
./particle
|
freddyox/particle-dev
|
compile.sh
|
Shell
|
mit
| 510 |
rabbitmq-server -detached -setcookie 'SeansSecretRabbitMqCookie'
sleep 10
rabbitmqctl start_app
sleep 10
/var/MyApp/rabbitmqadmin import /var/MyApp/rabbit_queue_defs.json
/var/MyApp/WorldServer &
/var/MyApp/ElixirMessagingServer
|
spoconnor/ElixirMessagingServer
|
Scripts/start_all.sh
|
Shell
|
mit
| 233 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# 96-well source plate
pyTecanFluent LITE --prefix /tmp/LITE_basic_96well \
$DIR/../../tests/data/basic_96well.txt
# # 384-well source plate
# pyTecanFluent map2robot --prefix /tmp/Map2Robot_basic_384well \
# $DIR/../../tests/data/basic_384well.txt
# # 96-well source plate, 384-well destination plate
# pyTecanFluent map2robot --prefix /tmp/Map2Robot_basic_96-384well \
# --dest-type '384 Well Biorad PCR' \
# $DIR/../../tests/data/basic_96well.txt
# # 96-well source plate, 96-well designation, no primers
# pyTecanFluent map2robot --prefix /tmp/Map2Robot_basic_96well_noPrimer \
# --prm-volume 0 \
# $DIR/../../tests/data/basic_96well.txt
|
leylabmpi/pyTecanFluent
|
examples/LITE/basic.sh
|
Shell
|
mit
| 728 |
#!/bin/sh
mypath="/usr/local/bin"
snaplist=""
getOlderSnap() {
snaplist=`zfs list -Hr -t snapshot -o name,zbackup:time $1 | grep -v $1/ | grep -v '-' | sort -k 2 | head -n $2`
}
getAllDataset() {
datasets=`zfs list -Hr -t snapshot -o name,zbackup:time | grep -v '-' | awk '{print $1}' | cut -d '@' -f 1 | uniq`
}
# Args : dataset, rotation_count
createSnap() {
countCursnap $dataset
count=$?
echo "Get amount of current snapshot : $count "
time=`date +%s`
if [ $2 -gt $count ]; then
echo "Adding new snapshot $dataset@$time"
# Simply add new snapshot
zfs snapshot $dataset@$time
# Add our own attribute
# zfs set zbackup:mark 1 $dataset@$time
zfs set zbackup:time=$time $dataset@$time
else
# Delete the snapshot older than rotation count $3
echo "Need to delete" $((count-$2+1)) "snapshot"
getOlderSnap $dataset $((count-$2+1))
if test -z "$snaplist"; then
echo "Cannot find snapshot list"
else
# Delete the entry in $snaplist
IFS=$'\n'
for i in ${snaplist}
do
unset IFS
local name=`echo $i | cut -d ' ' -f 1`
echo "Deleting snapshot : $name"
zfs destroy $name
done
fi
echo "Adding new snapshot : $dataset@$time"
# Add new snapshot
zfs snapshot $dataset@$time
# Add our own attribute
# zfs set zbackup:mark 1 $dataset@$time
zfs set zbackup:time=$time $dataset@$time
fi
}
# Args : dataset
countCursnap() {
# zfs list -r -t snapshot $1
count=`zfs list -Hr -t snapshot -o zbackup:time $1 | grep -v $1/ | grep -v '-' | wc -l`
count=`expr $count`
return $count
}
if [ "$#" == "0" ]; then
echo "Usage: ./zbackup [[--list | --delete] target dataset [ID] | target dataset [rotation count]]"
fi
arg=0
if [ "$1" = "--list" ]; then
# Format : ./zbackup --list [target_dataset [ID]]
counter=0
tmp=""
timestamp=""
# Get dataset name
if test -z $2; then
datasets=""
getAllDataset
IFS=$'\n'
for i in ${datasets}
do
unset IFS
${mypath}/zbackup --list $i
done
exit
else
dataset=$2
fi
if ! test -z $3; then
ID=$3
fi
echo "====List snapshot===="
echo "Dataset : $dataset"
echo "Target ID : $ID"
getOlderSnap $dataset 100
printf "ID\tDataset\t\tTime\n"
IFS=$'\n'
if test -z $3; then
# List all snapshot
for i in ${snaplist}
do
unset IFS
counter=$((counter+1))
tmp=`echo $i | cut -d ' ' -f 2`
timestamp=`date -j -f %s $tmp +"%Y-%m-%d %H:%M:%S"`
printf "$counter\t$dataset\t$timestamp\n"
done
else
# List the target snapshot
for i in ${snaplist}
do
unset IFS
counter=$((counter+1))
if [ $counter -eq $3 ]; then
tmp=`echo $i | cut -d ' ' -f 2`
timestamp=`date -j -f %s $tmp +"%Y-%m-%d %H:%M:%S"`
printf "$counter\t$dataset\t$timestamp\n"
break
fi
done
fi
elif [ "$1" = "--delete" ]; then
# Format : ./zbackup --delete [target_dataset [ID]]
# Get dataset name
if test -z $2; then
exit
else
dataset=$2
fi
# Get optional sequence id
if ! test -z $3; then
ID=$3
fi
echo "====Deleting snapshot===="
echo "Dataset : $dataset"
echo "Target ID : $ID"
getOlderSnap $dataset 100
IFS=$'\n'
if test -z $3; then
# Delete all snapshot
for i in ${snaplist}
do
unset IFS
name=`echo $i | cut -d ' ' -f 1`
echo "Deleting snapshot : $name "
zfs destroy $name
done
else
# Delete the target snapshot
for i in ${snaplist}
do
unset IFS
counter=$((counter+1))
name=`echo $i | cut -d ' ' -f 1`
if [ $counter -eq $3 ]; then
zfs destroy $name
break
fi
done
fi
else
# Format : ./zbackup target_dataset [rotation count]
# Get dataset name
if test -z $1; then
exit
else
dataset=$1
fi
# Get rotate count
if ! test -z $2; then
rotate=$2
else
rotate=20
fi
# cur_time=`date +"%Y-%m-%d %H:%M:%S"`
echo "====Creating snapshot===="
echo "Dataset : $dataset"
echo "Rotation: $rotate"
createSnap $dataset $rotate
fi
|
iverJisty/zfs-snapshot-manage
|
zbackup.sh
|
Shell
|
mit
| 4,730 |
#!/bin/sh
autoreconf -v --force --install || exit $?
[ "$NOCONFIGURE" ] || exec ./configure "$@"
|
kitsunyan/appindicator-collection
|
autogen.sh
|
Shell
|
mit
| 97 |
#!/bin/bash
# Script for loading sql into pdns database
# script filename
DOM=$1
# postgres container name
CON=consup_postgres_common
# database name
. .config
cat $DOM | docker exec -i $CON gosu postgres psql $DB_NAME
|
LeKovr/consup
|
Dockerfiles/pdns/skel/load.sh
|
Shell
|
mit
| 224 |
#/bin/sh
sudo add-apt-repository 'deb http://repo.vivaldi.com/archive/deb/ stable main'
wget -qO- http://repo.vivaldi.com/archive/linux_signing_key.pub | sudo apt-key add -
sudo apt update
sudo apt install vivaldi-stable
|
locoesso/install
|
vivaldi-stable.sh
|
Shell
|
mit
| 222 |
#!/bin/bash
#Give message when starting the container
printf "\n \n \n ------------------------Starting container ------------------------ \n \n \n"
# Configure user nobody to match unRAID's settings
#export DEBIAN_FRONTEND="noninteractive"
usermod -u 99 nobody
usermod -g 100 nobody
usermod -d /home nobody
chown -R nobody:users /home
#chsh -s /bin/bash nobody
cp /converteac3.sh /config/converteac3.sh
chown -R nobody:users /config
echo "[Info] Starting script"
bash /config/converteac3.sh
#su - nobody -c /config/converteac3.sh
echo "Stopping Container, script finished.."
|
mezz64/ConvertEAC3
|
start.sh
|
Shell
|
mit
| 582 |
#!/usr/bin/env bash
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$CURRENT_DIR/helpers.sh"
print_git_shortsha() {
local working_dir=$(tmux display-message -p "#{pane_current_path}")
local shortsha=$(cd "$working_dir" && git log -1 --pretty=format:%h)
echo $shortsha
}
main() {
print_git_shortsha
}
main
|
shuber/tmux-git
|
scripts/shortsha.sh
|
Shell
|
mit
| 339 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.