code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
DBHOST=localhost
DBNAME=xxxx
DBUSER=yyyy
|
TreeBASE/treebase
|
treebase-core/db/tb1load/scripts/dbinfo.sh
|
Shell
|
bsd-3-clause
| 41 |
#! /bin/sh
. ../../dttools/test/test_runner_common.sh
. ./parrot-test.sh
tmp_dir_main=${PWD}/parrot_temp_dir
tmp_dir_hitcher=${PWD}/parrot_temp_dir_hitcher
test_file=/cvmfs/atlas.cern.ch/repo/conditions/logDir/lastUpdate
prepare()
{
$0 clean
}
run()
{
if parrot --check-driver cvmfs
then
parrot -t${tmp_dir_main} -- sh -c "head $test_file > /dev/null; sleep 10" &
pid_main=$!
parrot -t${tmp_dir_hitcher} --cvmfs-alien-cache=${tmp_dir_main}/cvmfs -- sh -c "stat $test_file"
status=$?
kill $pid_main
return $status
else
return 0
fi
}
clean()
{
if [ -n "${tmp_dir_main}" -a -d "${tmp_dir_main}" ]
then
rm -rf ${tmp_dir_main}
fi
if [ -n "${tmp_dir_hitcher}" -a -d ${tmp_dir_hitcher} ]
then
rm -rf ${tmp_dir_hitcher}
fi
return 0
}
dispatch "$@"
# vim: set noexpandtab tabstop=4:
|
btovar/cctools
|
parrot/test/TR_parrot_cvmfs_alien_cache.sh
|
Shell
|
gpl-2.0
| 817 |
#
# rhsm-icon bash completion script
# based on rhn-migrate-classic-to-rhsm bash completion script
#
# main completion function
_rhsm-icon()
{
local first cur prev opts base
COMPREPLY=()
first="${COMP_WORDS[1]}"
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="-h --help --help-all --help-gtk -c --check-period -d
--debug -f --force-icon -i --check-immediately --display"
case "${cur}" in
-*)
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
return 0
;;
esac
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
return 0
}
complete -F _rhsm-icon rhsm-icon
|
vritant/subscription-manager
|
etc-conf/rhsm-icon.completion.sh
|
Shell
|
gpl-2.0
| 601 |
#
# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# @test
# @ignore until 6543856 is fixed
# @bug 4990825
# @summary attach to external but local JVM processes
# @library ../../testlibrary
# @build Sleeper
# @build JavaProcess
# @build MonitorVmStartTerminate
# @run shell MonitorVmStartTerminate.sh
#
. ${TESTSRC-.}/../../testlibrary/utils.sh
setup
verify_os
JAVA="${TESTJAVA}/bin/java"
CP=${TESTJAVA}${FS}lib${FS}tools.jar${PS}${TESTCLASSES}
${JAVA} ${TESTVMOPTS} -classpath ${CP} MonitorVmStartTerminate
|
openjdk/jdk7u
|
jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.sh
|
Shell
|
gpl-2.0
| 1,498 |
#!/bin/bash
export VMNAME=${VMNAME:-"$1"}
export VMNAME=${VMNAME:-"omerovm"}
export MEMORY=${MEMORY:-"1024"}
export SSH_PF=${SSH_PF:-"2222"}
export OMERO_PORT=${OMERO_PORT:-"4063"}
export OMERO_PF=${OMERO_PF:-"4063"}
export OMEROS_PORT=${OMEROS_PORT:-"4064"}
export OMEROS_PF=${OMEROS_PF:-"4064"}
export OMERO_JOB=${OMERO_JOB:-"OMERO-stable"}
set -e
set -u
set -x
VBOX="VBoxManage --nologo"
OS=`uname -s`
ATTEMPTS=0
MAXATTEMPTS=5
DELAY=2
NATADDR="10.0.2.15"
##################
##################
# SCRIPT FUNCTIONS
##################
##################
function checknet ()
{
UP=$($VBOX guestproperty enumerate $VMNAME | grep "10.0.2.15") || true
ATTEMPTS=$(($ATTEMPTS + 1))
}
function installvm ()
{
ssh-keygen -R [localhost]:2222 -f ~/.ssh/known_hosts
chmod 600 ./omerovmkey
SCP="scp -2 -o NoHostAuthenticationForLocalhost=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -o PasswordAuthentication=no -o ChallengeResponseAuthentication=no -o PreferredAuthentications=publickey -i omerovmkey -P $SSH_PF"
SSH="ssh -2 -o StrictHostKeyChecking=no -i omerovmkey -p $SSH_PF -t"
echo "Copying scripts to VM"
$SCP ../../../target/OMERO.server*zip omero@localhost:~/
$SCP driver.sh omero@localhost:~/
$SCP setup_userspace.sh omero@localhost:~/
$SCP setup_postgres.sh omero@localhost:~/
$SCP setup_environment.sh omero@localhost:~/
$SCP setup_omero.sh omero@localhost:~/
$SCP setup_nginx.sh omero@localhost:~/
$SCP setup_omero_daemon.sh omero@localhost:~/
$SCP omero-init.d omero@localhost:~/
$SCP omero-web-init.d omero@localhost:~/
$SCP virtualbox-network-fix-init.d omero@localhost:~/
$SCP virtualbox_fix.sh omero@localhost:~/
$SCP nginx-control.sh omero@localhost:~/
echo "ssh : exec driver.sh"
$SSH omero@localhost "export OMERO_JOB=$OMERO_JOB; bash /home/omero/driver.sh"
sleep 10
echo "ALL DONE!"
}
function failfast ()
{
exit 1
}
function poweroffvm ()
{
$VBOX list runningvms | grep "$VMNAME" && {
VBoxManage controlvm "$VMNAME" poweroff && sleep 10
} || true
}
function poweronvm ()
{
$VBOX list runningvms | grep "$VMNAME" || {
$VBOX startvm "$VMNAME" --type headless && sleep 45
}
}
function rebootvm ()
{
poweroffvm
poweronvm
}
function killallvbox ()
{
ps aux | grep [V]Box && {
if [ "$OS" == "Darwin" ]; then
killall -m [V]Box
else [ "$OS" == "Linux" ];
killall -r [V]Box
fi
} || true
ps aux | grep [V]irtualBox && {
if [ "$OS" == "Darwin" ]; then
killall -m [V]irtualBox
else [ "$OS" == "Linux" ];
killall -r [V]irtualBox
fi
} || true
}
function checkhddfolder ()
{
if test -e $HOME/Library/VirtualBox; then
export HARDDISKS=${HARDDISKS:-"$HOME/Library/VirtualBox/HardDisks/"}
elif test -e $HOME/.VirtualBox; then
export HARDDISKS=${HARDDISKS:-"$HOME/.VirtualBox/HardDisks/"}
else
echo "Cannot find harddisks! Trying setting HARDDISKS"
failfast
fi
}
function deletevm ()
{
poweroffvm
$VBOX list vms | grep "$VMNAME" && {
VBoxManage storageattach "$VMNAME" --storagectl "SATA CONTROLLER" --port 0 --device 0 --type hdd --medium none
VBoxManage unregistervm "$VMNAME" --delete
VBoxManage closemedium disk $HARDDISKS"$VMNAME".vdi --delete
} || true
}
function createvm ()
{
$VBOX list vms | grep "$VMNAME" || {
VBoxManage clonehd "$OMERO_BASE_IMAGE" "$HARDDISKS$VMNAME.vdi"
VBoxManage createvm --name "$VMNAME" --register --ostype "Debian"
VBoxManage storagectl "$VMNAME" --name "SATA CONTROLLER" --add sata
VBoxManage storageattach "$VMNAME" --storagectl "SATA CONTROLLER" --port 0 --device 0 --type hdd --medium $HARDDISKS$VMNAME.vdi
VBoxManage modifyvm "$VMNAME" --nic1 nat --nictype1 "82545EM"
VBoxManage modifyvm "$VMNAME" --memory $MEMORY --acpi on
VBoxManage modifyvm "$VMNAME" --natpf1 "ssh,tcp,127.0.0.1,2222,10.0.2.15,22"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-unsec,tcp,127.0.0.1,4063,10.0.2.15,4063"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-ssl,tcp,127.0.0.1,4064,10.0.2.15,4064"
VBoxManage modifyvm "$VMNAME" --natpf1 "omero-web,tcp,127.0.0.1,8080,10.0.2.15,8080"
}
}
####################
####################
# SCRIPT ENTRY POINT
####################
####################
checkhddfolder
killallvbox
deletevm
createvm
poweronvm
checknet
if [[ -z "$UP" ]]
then
while [[ -z "$UP" && $ATTEMPTS -lt $MAXATTEMPTS ]]
do
rebootvm
checknet
sleep $DELAY
done
if [[ -z "$UP" ]]
then
echo "No connection to x. Failure after $ATTEMPTS tries"
failfast
fi
fi
echo "Network up after $ATTEMPTS tries"
installvm
bash export_ova.sh ${VMNAME}
|
jballanc/openmicroscopy
|
docs/install/VM/omerovm.sh
|
Shell
|
gpl-2.0
| 4,584 |
# Version number and release date.
VERSION_NUMBER=0.18.3
RELEASE_DATE=2013-07-07 # in "date +%Y-%m-%d" format
|
OS2World/DEV-MISC-gettext
|
version.sh
|
Shell
|
gpl-3.0
| 115 |
#! /bin/sh
# Download and build glib 2.x statically with all dependencies and then
# compile GNU Midnight Commander against it.
# Copyright (C) 2003 Pavel Roskin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is incomplete! It doesn't download libiconv. This is OK
# for glibc-based systems, but probably not for others. This limitation
# is known. Please don't report it.
: ${MC_TOPDIR=`pwd`}
: ${WORK_TOPDIR=$MC_TOPDIR/build_glib2}
: ${TMP_INSTDIR=$WORK_TOPDIR/tmp-inst}
: ${GLIB_VERSION=2.21.0}
: ${PKGC_VERSION=0.23}
: ${GETTEXT_VERSION=0.17}
GLIB_DIR="glib-$GLIB_VERSION"
GLIB_TARBALL="glib-$GLIB_VERSION.tar.gz"
GLIB_URL="ftp://ftp.gtk.org/pub/glib/2.21/$GLIB_TARBALL"
PKGC_DIR="pkg-config-$PKGC_VERSION"
PKGC_TARBALL="pkg-config-$PKGC_VERSION.tar.gz"
PKGC_URL="http://pkgconfig.freedesktop.org/releases/$PKGC_TARBALL"
GETTEXT_DIR="gettext-$GETTEXT_VERSION/gettext-runtime"
GETTEXT_TARBALL="gettext-$GETTEXT_VERSION.tar.gz"
GETTEXT_URL="ftp://ftp.gnu.org/gnu/gettext/$GETTEXT_TARBALL"
get_file() {
curl --remote-name "$1" || \
wget --passive-ftp "$1" || \
wget "$1" || \
ftp "$1" </dev/null || \
exit 1
}
if test ! -d $WORK_TOPDIR; then
mkdir -p $WORK_TOPDIR
fi
if test -f $MC_TOPDIR/src/dir.c; then : ; else
echo "Not in the top-level directory of GNU Midnight Commander." 2>&1
exit 1
fi
if test -f $MC_TOPDIR/configure; then : ; else
$MC_TOPDIR/autogen.sh --help >/dev/null || exit 1
fi
rm -rf "$TMP_INSTDIR"
PATH="$TMP_INSTDIR/bin:$PATH"
export PATH
# Compile gettext
cd "$WORK_TOPDIR"
if gzip -vt "$GETTEXT_TARBALL"; then : ; else
get_file "$GETTEXT_URL"
fi
rm -rf "$GETTEXT_DIR"
gzip -cd "$GETTEXT_TARBALL" | tar xf -
cd "$GETTEXT_DIR"
if test -f src/gettext.c; then : ; else
echo "gettext source is incomplete" 2>&1
exit 1
fi
./configure --disable-shared --disable-nls --prefix="$TMP_INSTDIR" || exit 1
make all || exit 1
make install || exit 1
# Compile pkgconfig
cd "$WORK_TOPDIR"
if gzip -vt "$PKGC_TARBALL"; then : ; else
get_file "$PKGC_URL"
fi
rm -rf "$PKGC_DIR"
gzip -cd "$PKGC_TARBALL" | tar xf -
cd "$PKGC_DIR"
if test -f pkg.c; then : ; else
echo "pkgconfig source is incomplete" 2>&1
exit 1
fi
./configure --disable-shared --prefix="$TMP_INSTDIR" || exit 1
make all || exit 1
make install || exit 1
# Compile glib
cd "$WORK_TOPDIR"
if gzip -vt "$GLIB_TARBALL"; then : ; else
get_file "$GLIB_URL" || exit 1
fi
rm -rf "$GLIB_DIR"
gzip -cd "$GLIB_TARBALL" | tar xf -
cd "$GLIB_DIR"
if test -f glib/glist.c; then : ; else
echo "glib source is incomplete" 2>&1
exit 1
fi
./configure --disable-shared --prefix="$TMP_INSTDIR" \
PKG_CONFIG="$TMP_INSTDIR/bin/pkg-config" \
CPPFLAGS="-I$TMP_INSTDIR/include" \
LDFLAGS="-L$TMP_INSTDIR/lib" || exit 1
make all || exit 1
make install || exit 1
cd "$MC_TOPDIR"
./configure PKG_CONFIG="$TMP_INSTDIR/bin/pkg-config" $@ || exit 1
make clean || exit 1
make || exit 1
echo "GNU Midnight Commander has been successfully compiled"
|
NoSeungHwan/mc_kor_dev
|
build-glib2.sh
|
Shell
|
gpl-3.0
| 3,575 |
#!/bin/bash
#
# Bootstraps everything installing Django and all required eggs, configuring
# the database and making sure a consistent Mono development environment is
# installed.
# Here we customize src/epiweb/settings.py by setting the user preferred
# database, language and country. Note that the database and the user used
# to connect should already exist.
DB_ENGINE=""
DB_NAME=""
DB_HOST=""
DB_PORT=""
DB_USERNAME=""
DB_USERNAME=""
DJANGO_ENGINE="unconfigured"
TIMEZONE=""
LANGUAGE=""
COUNTRY=""
echo ""
echo -n "Checking for pre-requisites: python ... "
exe_python="$(which python)"
if [ -n "$exe_python" ] ; then
echo "$exe_python"
else
echo "no found; place make sure Python 2.6 is installed"
echo ""
exit 1
fi
echo ""
echo -n "Checking for pre-requisites: easy_install ... "
exe_easy_install="$(which easy_install)"
if [ -n "$exe_easy_install" ] ; then
echo "$exe_easy_install"
else
echo "no found; place make sure setuptools are installed"
echo ""
exit 1
fi
echo ""
echo -n "Checking for pre-requisites: pip ... "
exe_pip="$(which pip)"
if [ -n "$exe_pip" ] ; then
echo "$exe_pip"
else
echo "no found; place make sure pip is installed (sudo easy_install pip)"
echo ""
exit 1
fi
echo ""
echo -n "Checking for pre-requisites: virtualenv ... "
exe_virtualenv="$(which virtualenv)"
if [ -n "$exe_virtualenv" ] ; then
echo "$exe_virtualenv"
else
echo "no found; place make sure virtualenv is installed (sudo pip install --upgrade virtualenv)"
echo ""
exit 1
fi
echo -n "Checking for pre-requisites: mysql ... "
exe_mysql="$(which mysql)"
if [ -n "$exe_mysql" ] ; then
echo "$exe_mysql"
else
echo "not found; automatic MySQL configuration disabled"
fi
echo -n "Checking for pre-requisites: mysql_config ... "
exe_mysql_config="$(which mysql_config)"
if [ -n "$exe_mysql_config" ] ; then
echo "$exe_mysql_config"
else
unset exe_mysql
echo "not found; automatic MySQL configuration disabled (please install the libmysqlclient-dev package)"
fi
echo -n "Checking for pre-requisites: psql ... "
exe_psql="$(which psql)"
if [ -n "$exe_psql" ] ; then
echo "$exe_psql"
else
echo "not found; automatic PostgreSQL configuration disabled"
fi
# Can we keep access to mapnik module and still use --no-site-packages?
# virtualenv --no-site-packages .
virtualenv .
source ./bin/activate
pip install -r requirements.txt
if [ -n "$exe_mysql" ] ; then
pip install MySQL-python
fi
if [ -n "$exe_psql" ] ; then
pip install psycopg2
fi
echo ""
while [ -z "$LANGUAGE" ] ; do
echo -n "Please, choose your country and language (be, it, nl, uk, pt, se): "
read line && [ -n "$line" ] && LANGUAGE="$line";
COUNTRY="$LANGUAGE"
done
while [ -z "$TIMEZONE" ] ; do
test -f /etc/timezone && TIMEZONE="$(cat /etc/timezone)"
echo -n "Please, enter your time zone (default is $TIMEZONE): "
read line && [ -n "$line" ] && TIMEZONE="$line";
done
while [ -z "$DB_ENGINE" ] ; do
echo -n "Please, choose a database engine (sqlite3, the default, postgresql or mysql): "
read line
DB_ENGINE="${line:-sqlite3}"
if [ "$DB_ENGINE" != "sqlite3" -a "$DB_ENGINE" != "mysql" -a "$DB_ENGINE" != "postgresql" ] ; then
DB_ENGINE=""
fi
done
if [ "$DB_ENGINE" = "sqlite3" ] ; then
DB_NAME="ggm.db"
DJANGO_ENGINE="sqlite3"
fi
if [ "$DB_ENGINE" = "mysql" ] ; then
echo ""
echo -n "Database host (just hit enter if on localhost/same host): "
read line && [ -n "$line"] && DB_HOST="$line";
echo -n "Database port (just hit enter if using default port): "
read line && [ -n "$line"] && DB_PORT="$line";
while [ -z "$DB_NAME" ] ; do
echo -n "Database name (database will be created if necessary; default is epiwork): "
read line && DB_NAME="${line:-epiwork}";
done
while [ -z "$DB_USERNAME" ] ; do
echo -n "Database username (user will be created if necessary; default is epiwork): "
read line && DB_USERNAME="${line:-epiwork}";
done
while [ -z "$DB_PASSWORD" ] ; do
echo -n "Database password: "
read line && [ -n "$line" ] && DB_PASSWORD="$line";
done
DJANGO_ENGINE="mysql"
if [ -n "$exe_mysql" ] ; then
echo ""
echo "Note: the following data will NOT be saved, but it is necessary to create"
echo "the database '$DB_NAME' and the user '$DB_USERNAME' that will be used to"
echo "connect to database for normal operation."
echo ""
echo -n "Please, insert MySQL administrator's username (default is root): "
read line
root_username="${line:-root}"
root_password=""
while [ -z "$root_password" ] ; do
echo -n "Please, insert MySQL administrator's passsword: "
read line && [ -n "$line" ] && root_password="$line";
done
fi
fi
if [ "$DB_ENGINE" = "postgresql" ] ; then
echo ""
echo -n "Database host (just hit enter if on localhost/same host): "
read line && [ -n "$line" ] && DB_HOST="$line";
echo -n "Database port (just hit enter if using default port): "
read line && [ -n "$line" ] && DB_PORT="$line";
while [ -z "$DB_NAME" ] ; do
echo -n "Database name (database will be created if necessary; default is epiwork): "
read line && DB_NAME="${line:-epiwork}";
done
while [ -z "$DB_USERNAME" ] ; do
echo -n "Database username (user will be created if necessary; default is epiwork): "
read line && DB_USERNAME="${line:-epiwork}";
done
while [ -z "$DB_PASSWORD" ] ; do
echo -n "Database password: "
read line && [ -n "$line" ] && DB_PASSWORD="$line";
done
DJANGO_ENGINE="postgresql_psycopg2"
if [ -n "$exe_psql" ] ; then
echo ""
echo "Note: the following data will NOT be saved, but it is necessary to create"
echo "the database '$DB_NAME' and the user '$DB_USERNAME' that will be used to"
echo "connect to database for normal operation."
echo ""
echo -n "Please, insert PostgrSQL administrator's username (default is postgres): "
read line
root_username="${line:-postgres}"
fi
fi
echo ""
echo "Configuration parameters:"
echo ""
echo " country and language: $LANGUAGE"
echo " time zone: $TIMEZONE"
echo " database engine: $DB_ENGINE ($DJANGO_ENGINE)"
echo " database name: $DB_NAME"
echo " database host: ${DB_HOST:-localhost}"
echo " database port: ${DB_PORT:-(default)}"
echo " database username: $DB_USERNAME"
echo " database password: $DB_PASSWORD"
echo ""
echo "We are about to generate a new Django configuration and to create a new"
echo "database. This will destroy your previous configuration and make you lose"
echo "all you data. Make sure all parameters are correct before proceeding."
echo ""
echo -n "Please, type YES if you want to preceed or ABORT to exit now: "
line=""
while [ -z "$line" ] ; do
read line
if [ "$line" = "ABORT" ] ; then exit 0 ; fi
if [ "$line" != "YES" ] ; then line="" ; fi
done
echo ""
if [ "$DB_ENGINE" = "sqlite3" ] ; then
echo -n "Creating database $DB_NAME ... "
rm -f $DB_NAME
echo "done"
fi
if [ "$DB_ENGINE" = "mysql" -a -n "$exe_mysql" ] ; then
echo -n "Creating database $DB_NAME ... "
mysql --batch --host=${DB_HOST:-localhost} --port=${DB_PORT:-0} --user=$root_username --password=$root_password mysql <<EOF
CREATE DATABASE IF NOT EXISTS $DB_NAME ;
INSERT INTO user VALUES ('%', '$DB_USERNAME', PASSWORD('$DB_PASSWORD'),
'Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y',
'Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y',
'','','','',0,0,0,0)
ON DUPLICATE KEY UPDATE User = '$DB_USERNAME' ;
FLUSH PRIVILEGES ;
GRANT ALL PRIVILEGES ON $DB_NAME.* TO $DB_USERNAME ;
echo "done"
EOF
fi
if [ "$DB_ENGINE" = "postgresql" -a -n "$exe_psql" ] ; then
echo "Creating database $DB_NAME ... "
args="--username=$root_username template1"
if [ -n "$DB_PORT" ] ; then
args="--port=$DB_PORT $args"
fi
if [ -n "$DB_HOST" ] ; then
args="--host=$DB_HOST $args"
fi
psql -q $args <<EOF
DROP DATABASE IF EXISTS $DB_NAME ;
DROP USER IF EXISTS $DB_USERNAME ;
CREATE USER $DB_USERNAME WITH ENCRYPTED PASSWORD '$DB_PASSWORD' ;
CREATE DATABASE $DB_NAME WITH OWNER = $DB_USERNAME ;
EOF
echo "PostgreSQL setup complete"
fi
echo ""
echo -n "Generating settings.py ... "
cat local_settings.py.in \
| sed -e "s/@DB_ENGINE@/django.db.backends.$DJANGO_ENGINE/g" \
| sed -e "s/@DB_NAME@/$DB_NAME/g" \
| sed -e "s/@DB_HOST@/$DB_HOST/g" \
| sed -e "s/@DB_PORT@/$DB_PORT/g" \
| sed -e "s/@DB_USERNAME@/$DB_USERNAME/g" \
| sed -e "s/@DB_PASSWORD@/$DB_PASSWORD/g" \
| sed -e "s/@LANGUAGE@/$LANGUAGE/g" \
| sed -e "s/@COUNTRY@/$COUNTRY/g" \
| sed -e "s%@TIMEZONE@%$TIMEZONE%g" \
> local_settings.py
echo "done"
echo ""
echo "Initializing Django database and loading default surveys:"
echo ""
python manage.py syncdb
# On PostgreSQL ovverride the order of migrated tables creating first
# referenced tables (journal migration fails if CMS isn't available.)
python manage.py migrate cms
python manage.py migrate
python manage.py rule_type_register --title 'Show Question' --jsclass 'wok.pollster.rules.ShowQuestion'
python manage.py rule_type_register --title 'Hide Question' --jsclass 'wok.pollster.rules.HideQuestion'
python manage.py rule_type_register --title 'Show Options' --jsclass 'wok.pollster.rules.ShowOptions'
python manage.py rule_type_register --title 'Hide Options' --jsclass 'wok.pollster.rules.HideOptions'
python manage.py rule_type_register --title 'Check Options' --jsclass 'wok.pollster.rules.CheckOptions'
python manage.py rule_type_register --title 'Uncheck Options' --jsclass 'wok.pollster.rules.UncheckOptions'
python manage.py rule_type_register --title 'Exclusive' --jsclass 'wok.pollster.rules.Exclusive'
python manage.py rule_type_register --title 'Future Fill' --jsclass 'wok.pollster.rules.FutureFill'
python manage.py rule_type_register --title 'Future Show Question' --jsclass 'wok.pollster.rules.FutureShowQuestion'
python manage.py rule_type_register --title 'Future Hide Question' --jsclass 'wok.pollster.rules.FutureHideQuestion'
python manage.py rule_type_register --title 'Future Show Options' --jsclass 'wok.pollster.rules.FutureShowOptions'
python manage.py rule_type_register --title 'Future Hide Options' --jsclass 'wok.pollster.rules.FutureHideOptions'
python manage.py rule_type_register --title 'Fill' --jsclass 'wok.pollster.rules.Fill'
python manage.py question_data_type_register --title 'Text' --dbtype 'django.db.models.TextField(null=True, blank=True)' --cssclass 'text-type' --jsclass 'wok.pollster.datatypes.Text'
python manage.py question_data_type_register --title 'Numeric' --dbtype 'django.db.models.PositiveIntegerField(null=True, blank=True)' --cssclass 'numeric-type' --jsclass 'wok.pollster.datatypes.Numeric'
python manage.py question_data_type_register --title 'Date' --dbtype 'django.db.models.DateField(null=True, blank=True)' --cssclass 'date-type' --jsclass 'wok.pollster.datatypes.Date'
python manage.py question_data_type_register --title 'YearMonth' --dbtype 'db.models.YearMonthField(null=True, blank=True)' --cssclass 'monthyear-type' --jsclass 'wok.pollster.datatypes.YearMonth'
python manage.py question_data_type_register --title 'Timestamp' --dbtype 'django.db.models.DateTimeField(null=True, blank=True)' --cssclass 'timestamp-type' --jsclass 'wok.pollster.datatypes.Timestamp'
# PostalCode is added by the pollster migration 0005_postalcodefield.py
# python manage.py question_data_type_register --title 'PostalCode' --dbtype 'django.db.models.PostalCodeField(null=True, blank=True)' --cssclass 'postalcode-type' --jsclass 'wok.pollster.datatypes.PostalCode'
python manage.py virtual_option_type_register --title 'Range' --question-data-type-title 'Text' --jsclass 'wok.pollster.virtualoptions.TextRange'
python manage.py virtual_option_type_register --title 'Range' --question-data-type-title 'Numeric' --jsclass 'wok.pollster.virtualoptions.NumericRange'
python manage.py virtual_option_type_register --title 'Range' --question-data-type-title 'Date' --jsclass 'wok.pollster.virtualoptions.DateRange'
python manage.py virtual_option_type_register --title 'Years ago' --question-data-type-title 'Date' --jsclass 'wok.pollster.virtualoptions.DateYearsAgo'
python manage.py virtual_option_type_register --title 'Years ago' --question-data-type-title 'YearMonth' --jsclass 'wok.pollster.virtualoptions.YearMonthYearsAgo'
python manage.py virtual_option_type_register --title 'Weeks ago' --question-data-type-title 'Timestamp' --jsclass 'wok.pollster.virtualoptions.TimestampWeeksAgo'
python manage.py virtual_option_type_register --title 'Regular expression' --question-data-type-title 'Text' --jsclass 'wok.pollster.virtualoptions.RegularExpression'
python manage.py createcachetable django_cache 2>/dev/null || echo 'Cache table errors ignored'
if [ "$DB_ENGINE" = "postgresql" -a -n "$exe_psql" ] ; then
postgis=$(ls /usr/share/postgresql/*/contrib/postgis-*/postgis.sql)
srefsys=$(ls /usr/share/postgresql/*/contrib/postgis-*/spatial_ref_sys.sql)
if [ -n "$postgis" -a -n "$srefsys" ] ; then
echo "Setting up PostGIS"
args="--username=$root_username $DB_NAME"
if [ -n "$DB_PORT" ] ; then
args="--port=$DB_PORT $args"
fi
if [ -n "$DB_HOST" ] ; then
args="--host=$DB_HOST $args"
fi
psql -q $args <<EOF
\i $postgis
\i $srefsys
CREATE TABLE pollster_zip_codes (id serial, country TEXT, zip_code_key TEXT);
SELECT AddGeometryColumn('pollster_zip_codes', 'geometry', 4326, 'MULTIPOLYGON', 2);
ALTER TABLE pollster_zip_codes OWNER TO $DB_USERNAME;
ALTER TABLE spatial_ref_sys OWNER TO $DB_USERNAME;
ALTER TABLE geometry_columns OWNER TO $DB_USERNAME;
ALTER VIEW geography_columns OWNER TO $DB_USERNAME;
EOF
echo "PostGIS setup complete"
fi
fi
echo ""
echo "** All done. You can start the system by issuing: 'source ./bin/activate && python manage.py runserver'"
echo ""
|
sbfnk/epiwork-website
|
bootstrap.sh
|
Shell
|
agpl-3.0
| 14,295 |
#!/bin/sh
source $(dirname $0)/bootstrap.sh
MAINCLASS=org.apache.zookeeper.server.quorum.QuorumPeerMain
$SCRIPTDIR/shutdown.sh $MAINCLASS pid/zk
mkdir -p etc pid
rm -f pid/zk
for ZK in {1,2,3}; do
ZKCONF=etc/zk$ZK.cfg
echo tickTime=2000 > $ZKCONF
echo initLimit=10 >>$ZKCONF
echo syncLimit=5 >>$ZKCONF
echo dataDir=data/zk$ZK >>$ZKCONF
echo clientPort=218$ZK >>$ZKCONF
echo server.1=localhost:2888:3888 >>$ZKCONF
echo server.2=localhost:2889:3889 >>$ZKCONF
echo server.3=localhost:2890:3890 >>$ZKCONF
rm -rf data/zk$ZK
mkdir -p data/zk$ZK
echo $ZK > data/zk$ZK/myid
log4j=`echo $LIBDIR/log4j-*.jar`
zookeeper=`echo $LIBDIR/zookeeper-*.jar`
java -cp $log4j:$zookeeper $MAINCLASS $ZKCONF &
echo $! >> pid/zk
done
|
dmontag/neo4j-enterprise
|
ha/src/main/script/zkdevcluster.sh
|
Shell
|
agpl-3.0
| 790 |
#!/bin/bash -l
#SBATCH -p micro
#SBATCH --ntasks=1
#SBATCH -t 00:20:00
#SBATCH -J my_job
#SBATCH --cpus-per-task=1
# #SBATCH --ntasks-per-node=1
# #SBATCH --gres=gpu:4
#SBATCH --workdir $HOME/NO_BACKUP/
python worker.py test --sheep testcase-worker-allegro
|
markovmodel/adaptivemd
|
examples/rp/submit_worker.sh
|
Shell
|
lgpl-2.1
| 259 |
# Shim to run a modular input written in Java. The modular input
# is assumed to be in the form of an executable jar. This shim
# is in ${PLATFORM}/bin/${INPUTNAME}.sh of the app the modular
# input is contained in, and the jar is assumed to be in
# jars/${INPUTNAME}.jar in the app.
#
# Extra arguments to the JVM (i.e., -Xms512M) can be put in
# a file jars/${INPUTNAME}.vmopts and will be interpolated
# into the command to run the JVM.
SCRIPT=$(readlink -f "$0")
BASENAME=$(basename "$SCRIPT" .sh)
JAR_DIR=$(dirname "$SCRIPT")/../../jars
if [ -f $JAR_DIR/$BASENAME.vmopts ]; then
VMOPTS=`cat $JAR_DIR/$BASENAME.vmopts`
else
VMOPTS=""
fi
exec java $VMOPTS -jar $JAR_DIR/$BASENAME.jar $@
|
dkelmer/splunk-sdk-java
|
launchers/shim-linux.sh
|
Shell
|
apache-2.0
| 700 |
#!/bin/bash
if [ "x$1" == "x--help" ]; then
echo "Release and version bump script for Maven based projects"
echo " with X.Y[-SNAPSHOT] versioning scheme."
echo "Usage: $0 [--major]"
echo
echo " --major This is a major release. Bump the X version"
echo " and reset Y to 0 prior to release."
echo " example: 1.5-SNAPSHOT -> 2.0 -> 2.1-SNAPSHOT"
exit 0
fi
function relhook_failed() {
echo "Release hook $1 failed."
exit 2
}
BRANCH=$(git rev-parse --abbrev-ref HEAD)
echo "Current branch is $BRANCH"
echo "Verifying git status"
GIT_STATUS=$(git status --porcelain | grep -v '^?? ')
if [[ $GIT_STATUS ]]; then
git status
echo
echo "Your git tree is not clean, aborting."
exit 1
fi
CURRENT=$(mvn help:evaluate -Dexpression=project.version | grep -v '\[')
if [ "x$1" == "x--major" ]; then
echo "Performing MAJOR release version bump!"
BUMP_MAJOR="+1"
BUMP_MINOR="-\$2"
elif [[ "$CURRENT" != *SNAPSHOT ]]; then
echo "Performing full minor release version bump"
BUMP_MAJOR="+0"
BUMP_MINOR="+1"
else
echo "Performing minor release version bump"
BUMP_MAJOR="+0"
BUMP_MINOR="+0"
fi
CURRENT_RELEASE=$(echo $CURRENT | awk -F'[.-]' "{print (\$1$BUMP_MAJOR)\".\"(\$2$BUMP_MINOR)}")
NEXT_VERSION=$(echo $CURRENT | awk -F'[.-]' "{print (\$1$BUMP_MAJOR)\".\"(\$2$BUMP_MINOR+1)\"-SNAPSHOT\"}")
TAG="v$CURRENT_RELEASE"
echo
echo "Verify the following:"
echo "Current version: $CURRENT"
echo
echo "Release: $CURRENT_RELEASE"
echo "Tag: $TAG"
echo
echo "Next development version: $NEXT_VERSION"
echo
echo -n "Is everything correct? [yes/no]: "
read ok
if [ "x$ok" != "xyes" ]; then
echo "Negative answer. Aborting."
exit 1
fi
echo "Preparing release"
mvn versions:set -DnewVersion=$CURRENT_RELEASE | grep -v '\['
echo "Executing custom release scripts for $TAG"
if [ -d release.d ]; then
for SCRIPT in release.d/*
do
if [ -f $SCRIPT -a -x $SCRIPT ]; then
$SCRIPT $CURRENT_RELEASE $TAG || relhook_failed $SCRIPT
fi
done
fi
NAME=$(git config user.name)
EMAIL=$(git config user.email)
git commit -a -m "Releasing new version $CURRENT_RELEASE
Signed-off-by: $NAME <$EMAIL>"
git tag "$TAG"
echo "Preparing development version"
mvn versions:set -DnewVersion=$NEXT_VERSION | grep -v '\['
echo "Executing custom release scripts for $NEXT_VERSION"
if [ -d release.d ]; then
for SCRIPT in release.d/*
do
if [ -f $SCRIPT -a -x $SCRIPT ]; then
$SCRIPT $NEXT_VERSION latest || relhook_failed $SCRIPT
fi
done
fi
git commit -a -m "Preparing for next development iteration
Signed-off-by: $NAME <$EMAIL>"
echo "Perform the following command to push everything to the server:"
echo "git push origin $BRANCH $TAG"
|
oVirt/ovirt-optimizer
|
release.sh
|
Shell
|
apache-2.0
| 2,704 |
#!/bin/bash
# Copyright (C) 2017 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Description:
# This metrics test measures the TCP latency using qperf acting between two
# containers.
set -e
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/lib/network-common.bash"
source "${SCRIPT_PATH}/../lib/common.bash"
TEST_NAME="network qperf TCP latency"
function latency() {
# Image name (qperf installed by default)
local image="gabyct/network"
# Arguments to run the client/server
local client_extra_args="--rm"
local server_extra_args=""
# Initialize/clean environment
init_env
check_images "$image"
local server_command="qperf"
local server_address=$(start_server "$image" "$server_command" "$server_extra_args")
# Verify server IP address
if [ -z "$server_address" ];then
clean_env
die "server: ip address no found"
fi
local client_command="qperf ${server_address} tcp_lat conf"
result=$(start_client "$image" "$client_command" "$client_extra_args")
local total_latency=$(echo "$result" | grep latency | cut -f2 -d '=' | awk '{print $1}')
local units=$(echo "$result" | grep latency | cut -f2 -d '=' | awk '{print $2}' | tr -d '\r')
echo "Ping total latency: $total_latency $units"
# Note, for now we save with the units we get from the results - but at some
# point we might wish to unify the results into a stated fixed unit (ns or so)
# so we have uniformity in the historical data records.
save_results "$TEST_NAME" "" "${total_latency}" "${units}"
clean_env
echo "Finish"
}
latency
|
devimc/tests
|
metrics/network/network-latency-qperf.sh
|
Shell
|
apache-2.0
| 2,238 |
#!/usr/bin/env bash
set -xe
# Desktop and other tutorial tools.
# VirtualBox doesn't like Gnome, use Unity:
# https://askubuntu.com/questions/1035410/ubuntu-18-04-gnome-hangs-on-virtualbox-with-3d-acceleration-enabled
echo "gdm3 shared/default-x-display-manager select lightdm" | debconf-set-selections
echo "lightdm shared/default-x-display-manager select lightdm" | debconf-set-selections
# Install ubuntu desktop from tasksel
apt-get install -y --no-install-recommends tasksel
DEBIAN_FRONTEND=noninteractive tasksel install ubuntu-desktop
# Remove gnome, install unity
apt-get remove -y gdm3 ubuntu-desktop
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ubuntu-unity-desktop lightdm \
gnome-panel \
gnome-settings-daemon \
metacity \
nautilus
# FIXME: app menu is empty in unity
snap install intellij-idea-community --classic
# TODO: install plugins, P4 plugin and Python CE
DEBIAN_FRONTEND=noninteractive apt-get -y install wireshark
echo "wireshark-common wireshark-common/install-setuid boolean true" | debconf-set-selections
DEBIAN_FRONTEND=noninteractive dpkg-reconfigure wireshark-common
|
opennetworkinglab/onos
|
tools/dev/p4vm/tutorial-bootstrap.sh
|
Shell
|
apache-2.0
| 1,152 |
#!/bin/bash
DRIVER=
UWSGI=
NGINX=
PROCESS_OR_THREAD=-p
source ${TROOT}/config.sh
|
sanjoydesk/FrameworkBenchmarks
|
frameworks/C++/cutelyst/setup_pf.sh
|
Shell
|
bsd-3-clause
| 83 |
#!/bin/bash
if [ "$INSTALL_BOWER" = "true" ]; then
node ./node_modules/bower/bin/bower install
fi
|
simonfork/fuelux
|
postinstall.sh
|
Shell
|
bsd-3-clause
| 100 |
#!/bin/bash
# Provision the db vm
ALL_PROVISIONERS="check setup install postinstall netcadbca import service"
if [[ $@ ]]; then
PROVISIONERS=$@
else
PROVISIONERS=$ALL_PROVISIONERS
fi
echo "running provisioners $PROVISIONERS"
for p in $PROVISIONERS
do
case "$p" in
check)
# check for required software
source /vagrant/scripts/db11g/provision_check_software.sh
;;
setup)
# provisioning script converts to oracle linux and installs db prereqs
/vagrant/scripts/db11g/provision_setup.sh
;;
install)
# installs oracle 11.2.0.4
/vagrant/scripts/db11g/provision_install.sh
;;
postinstall)
# runs oracle postinstallation tasks as root
/vagrant/scripts/db11g/provision_postinstall.sh
;;
netcadbca)
# installs tns listener and creates empty db
/vagrant/scripts/db11g/provision_netca_dbca.sh
;;
import)
# imports the ATG db dump
/vagrant/scripts/db11g/provision_import.sh
;;
service)
# sets up the init.d service
/vagrant/scripts/db11g/provision_service_setup.sh
;;
none)
echo "No provisioners run"
;;
*)
echo "Invalid provisioning arg $p. Valid args are: $ALL_PROVISIONERS"
esac
done
|
kpath/Vagrant-CRS
|
scripts/db11g/provision.sh
|
Shell
|
mit
| 1,455 |
#!/bin/bash
set -e
mo=$(/bin/ls -1 ./locale/*.pot)
mo="${mo##*/}"
mo="${mo%.*}"
for f in ./locale/*.po; do
echo -n "$f: "
l="${f%.po}"
l="${l##*/}"
mkdir -p "./locale/$l/LC_MESSAGES"
msgfmt -o "./locale/$l/LC_MESSAGES/$mo.mo" "$f"
echo "ok"
done
|
marguerite/susews-planet
|
l10n-compile-po.sh
|
Shell
|
gpl-2.0
| 272 |
#!/system/bin/sh
. /data/data/info.guardianproject.gpg/app_opt/aliases/common
/data/data/info.guardianproject.gpg/app_opt/bin/gpgsm-gencert.sh $@
|
guardianproject/gnupg-for-android
|
assets/aliases/gpgsm-gencert.sh
|
Shell
|
gpl-3.0
| 148 |
#!/bin/sh
#
# @file pkg_install_vl32.sh
# @brief OpenRTM-aist dependent packages install script for Vine Linux 3.2
# @author Noriaki Ando <[email protected]> and Shinji Kurihara
#
#
rpm_dir='/root/rpm/RPMS/i386'
ace=`rpm -qa ace`
ace_devel=`rpm -qa ace-devel`
### ¥½¡¼¥¹RPM¥Õ¥¡¥¤¥ë¤Î¼èÆÀ¤È¥ê¥Ó¥ë¥É¤Ë¤È¤Æ¤â»þ´Ö¤¬¤«¤«¤ë¤¿¤á¡¢[OpenRTM¥¤¥ó¥¹¥È¡¼¥ë]¤Î¥Ú¡¼¥¸¤«¤érpm¥Ñ¥Ã¥±¡¼¥¸¤ò¥À¥¦¥ó¥í¡¼¥É¤·¤Æ¤¤¤ë¡£
ace_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/ace-5.4.1-1.i386.rpm"
### ¥½¡¼¥¹RPM¥Õ¥¡¥¤¥ë¤Î¼èÆÀ¤È¥ê¥Ó¥ë¥É¤Ë¤È¤Æ¤â»þ´Ö¤¬¤«¤«¤ë¤¿¤á¡¢[OpenRTM¥¤¥ó¥¹¥È¡¼¥ë]¤Î¥Ú¡¼¥¸¤«¤érpm¥Ñ¥Ã¥±¡¼¥¸¤ò¥À¥¦¥ó¥í¡¼¥É¤·¤Æ¤¤¤ë¡£
ace_devel_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/ace-devel-5.4.1-1.i386.rpm"
boost=`rpm -qa boost`
boost_devel=`rpm -qa boost-devel`
boost_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/boost-1.32.0-1.i386.rpm"
boost_dev_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/boost-devel-1.32.0-1.i386.rpm"
omniorb=`rpm -qa omniORB`
omniorbpy=`rpm -qa omniORBpy`
omniorb_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/omniORB-4.0.5-1.i386.rpm"
omniorb_dev_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/omniORB-devel-4.0.5-1.i386.rpm"
omniorb_doc_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/omniORB-doc-4.0.5-1.i386.rpm"
omniorbpy_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/omniORBpy-2.3-1.i386.rpm"
wxpython_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/wxPythonGTK-py2.3-2.5.1.5-1.i386.rpm"
pyxml_pkg="http://www.is.aist.go.jp/rt/OpenRTM-aist-Tutorial/packages/Vine3.0/RPMS/i386/PyXML-0.8.4-3.i386.rpm"
if test "x$boost" = "x" ; then
echo "boost is not installed."
echo "Installing boost"
wget $boost_pkg
rpm -ivh boost-1.32.0-1.i386.rpm
echo "done"
else
echo "boost is already installed."
fi
if test "x$boost_devel" = "x" ; then
echo "boost-devel is not installed."
echo "Installing boost-devel"
wget $boost_dev_pkg
rpm -ivh boost-devel-1.32.0-1.i386.rpm
echo "done"
else
echo "boost-devel is already installed."
fi
if test "x$ace" = "x" ; then
echo "ace is not installed."
echo "downloading ace...."
wget $ace_pkg
echo "Installing ace...."
rpm -ivh ace-5.4.1-1.i386.rpm
echo "done"
else
echo "ace is already installed."
fi
if test "x$ace_devel" = "x" ; then
echo "ace-devel is not installed."
echo "downloading ace-devel...."
wget $ace_devel_pkg
echo "Installing ace-devel...."
rpm -i --nodeps ace-devel-5.4.1-1.i386.rpm
echo "done"
else
echo "ace-devel is already installed."
fi
if test "x$omniorb" = "x" ; then
echo "omniORB is not installed."
echo "downloading omniORB...."
wget $omniorb_pkg
wget $omniorb_dev_pkg
wget $omniorb_doc_pkg
echo "Installing omniORB...."
rpm -ivh omniORB-4.0.5-1.i386.rpm
rpm -ivh omniORB-devel-4.0.5-1.i386.rpm
rpm -ivh omniORB-doc-4.0.5-1.i386.rpm
echo "done"
else
echo "omniORB is already installed."
fi
if test "x$omniorbpy" = "x" ; then
echo "omniORBpy is not installed."
echo "downloading omniORBpy...."
wget $omniorbpy_pkg
echo "Installing ommniORBpy...."
rpm -ivh omniORBpy-2.3-1.i386.rpm
echo "done"
else
echo "omniORBpy is already installed."
fi
echo "downloading wxPythonGTK...."
wget $wxpython_pkg
echo "Installing wxPythonGTK...."
rpm -ivh wxPythonGTK-py2.3-2.5.1.5-1.i386.rpm
echo "downloading PyXML...."
wget $pyxml_pkg
echo "Installing PyXML...."
rpm -ivh PyXML-0.8.4-3.i386.rpm
|
yosuke/OpenRTM-aist-portable
|
build/pkg_install_vl32.sh
|
Shell
|
lgpl-3.0
| 3,618 |
#!/bin/bash
# Utility script to update the ansible repo with the latest templates and image
# streams from several github repos
#
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.3.6
ORIGIN_VERSION=${1:-v1.5}
RHAMP_TAG=2.0.0.GA
RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
TEMP=`mktemp -d`
pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
wget https://github.com/3scale/rhamp-openshift-templates/archive/${RHAMP_TAG}.zip -O amp.zip
unzip origin-master.zip
unzip application-templates-master.zip
unzip fis-GA.zip
unzip amp.zip
mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
mv origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
# fis content from jboss-fuse/application-templates-GA would collide with jboss-openshift/application-templates
# as soon as they use the same branch/tag names
mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
find 3scale-amp-openshift-templates-${RHAMP_TAG}/ -name '*.yml' -exec mv {} ${EXAMPLES_BASE}/quickstart-templates/ \;
popd
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
git diff files/examples
|
rhdedgar/openshift-tools
|
openshift/installer/vendored/openshift-ansible-3.5.91/roles/openshift_examples/examples-sync.sh
|
Shell
|
apache-2.0
| 2,632 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# any command line arguments will be passed to hack/build_go.sh to build the
# cmd/integration binary. --use_go_build is a legitimate argument, as are
# any other build time arguments.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
# Lists of API Versions of each groups that should be tested, groups are
# separated by comma, lists are separated by semicolon. e.g.,
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
# TODO: It's going to be:
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"}
KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1;v1,autoscaling/v1,batch/v1,apps/v1alpha1,policy/v1alpha1,extensions/v1beta1"}
# Give integration tests longer to run
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s}
KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"}
LOG_LEVEL=${LOG_LEVEL:-2}
cleanup() {
kube::etcd::cleanup
kube::log::status "Integration test cleanup complete"
}
runTests() {
kube::etcd::start
kube::log::status "Running integration test cases"
# TODO: Re-enable race detection when we switch to a thread-safe etcd client
# KUBE_RACE="-race"
KUBE_GOFLAGS="-tags 'integration no-docker' " \
KUBE_RACE="" \
KUBE_TIMEOUT="${KUBE_TIMEOUT}" \
KUBE_TEST_API_VERSIONS="$1" \
"${KUBE_ROOT}/hack/test-go.sh" test/integration
kube::log::status "Running integration test scenario with watch cache on"
KUBE_TEST_API_VERSIONS="$1" "${KUBE_OUTPUT_HOSTBIN}/integration" --v=${LOG_LEVEL} \
--max-concurrency="${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY}" --watch-cache=true
cleanup
}
checkEtcdOnPath() {
kube::log::status "Checking etcd is on PATH"
which etcd && return
kube::log::status "Cannot find etcd, cannot run integration tests."
kube::log::status "Please see docs/devel/testing.md for instructions."
return 1
}
checkEtcdOnPath
"${KUBE_ROOT}/hack/build-go.sh" "$@" cmd/integration
# Run cleanup to stop etcd on interrupt or other kill signal.
trap cleanup EXIT
# Convert the CSV to an array of API versions to test
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
for apiVersion in "${apiVersions[@]}"; do
runTests "${apiVersion}"
done
|
devops-docker/kubernetes
|
hack/test-integration.sh
|
Shell
|
apache-2.0
| 2,923 |
#!/bin/bash
set -e
#-------------------------------------------------------------------------------
# Start docker container
#-------------------------------------------------------------------------------
cd "$APP_FOLDER"
if [ -a src/main/docker/jhipster-registry.yml ]; then
docker-compose -f src/main/docker/jhipster-registry.yml up -d
fi
if [ -a src/main/docker/consul.yml ]; then
docker-compose -f src/main/docker/consul.yml up -d
fi
if [ -a src/main/docker/cassandra.yml ]; then
docker-compose -f src/main/docker/cassandra.yml up -d
fi
if [ -a src/main/docker/mongodb.yml ]; then
docker-compose -f src/main/docker/mongodb.yml up -d
fi
if [ -a src/main/docker/mysql.yml ]; then
docker-compose -f src/main/docker/mysql.yml up -d
fi
if [ -a src/main/docker/postgresql.yml ]; then
docker-compose -f src/main/docker/postgresql.yml up -d
fi
if [ -a src/main/docker/elasticsearch.yml ]; then
docker-compose -f src/main/docker/elasticsearch.yml up -d
fi
if [ -a src/main/docker/mariadb.yml ]; then
docker-compose -f src/main/docker/mariadb.yml up -d
fi
if [ -a src/main/docker/kafka.yml ]; then
docker-compose -f src/main/docker/kafka.yml up -d
fi
docker ps -a
|
stevehouel/generator-jhipster
|
travis/scripts/03-docker-compose.sh
|
Shell
|
apache-2.0
| 1,197 |
#!/usr/bin/env bash
usage()
{
echo "Runs our integration suite on Linux"
echo "usage: cibuild.sh [options]"
echo ""
echo "Options"
echo " --mono-path <path> Path to the mono installation to use for the run"
echo " --os <os> OS to run (Linux / Darwin)"
}
XUNIT_VERSION=2.0.0-alpha-build2576
BUILD_CONFIGURATION=Debug
OS_NAME=$(uname -s)
USE_CACHE=true
MONO_ARGS='--runtime=v4.0.30319 --gc=boehm --debug=mdb-optimizations --attach=disable'
# There are some stability issues that are causing Jenkins builds to fail at an
# unacceptable rate. To temporarily work around that we are going to retry the
# unstable tasks a number of times.
RETRY_COUNT=5
while [[ $# > 0 ]]
do
opt="$1"
case $opt in
-h|--help)
usage
exit 1
;;
--mono-path)
CUSTOM_MONO_PATH=$2
shift 2
;;
--os)
OS_NAME=$2
shift 2
;;
--debug)
BUILD_CONFIGURATION=Debug
shift 1
;;
--release)
BUILD_CONFIGURATION=Release
shift 1
;;
--nocache)
USE_CACHE=false
shift 1
;;
*)
usage
exit 1
;;
esac
done
restore_nuget()
{
curl -O https://dotnetci.blob.core.windows.net/roslyn/nuget.3.zip
unzip nuget.3.zip
rm nuget.3.zip
}
run_msbuild()
{
local is_good=false
for i in `seq 1 $RETRY_COUNT`
do
o=$(mono $MONO_ARGS packages/Microsoft.Build.Mono.Debug.14.1.0.0-prerelease/lib/MSBuild.exe /v:m /p:SignAssembly=false /p:DebugSymbols=false "$@")
if [ $? -eq 0 ]; then
echo "$o"
is_good=true
break
fi
echo Build retry $i
done
if [ "$is_good" != "true" ]; then
echo "$o"
echo Build failed
exit 1
fi
}
# NuGet crashes on occasion during restore. This isn't a fatal action so
# we re-run it a number of times.
run_nuget()
{
local is_good=false
for i in `seq 1 $RETRY_COUNT`
do
mono $MONO_ARGS .nuget/NuGet.exe "$@"
if [ $? -eq 0 ]; then
is_good=true
break
fi
done
if [ "$is_good" != "true" ]; then
echo NuGet failed
exit 1
fi
}
# Run the compilation. Can pass additional build arguments as parameters
compile_toolset()
{
echo Compiling the toolset compilers
echo -e "Compiling the C# compiler"
run_msbuild src/Compilers/CSharp/csc/csc.csproj /p:Configuration=$BUILD_CONFIGURATION
echo -e "Compiling the VB compiler"
run_msbuild src/Compilers/VisualBasic/vbc/vbc.csproj /p:Configuration=$BUILD_CONFIGURATION
}
# Save the toolset binaries from Binaries/BUILD_CONFIGURATION to Binaries/Bootstrap
save_toolset()
{
local compiler_binaries=(
csc.exe
Microsoft.CodeAnalysis.dll
Microsoft.CodeAnalysis.CSharp.dll
System.Collections.Immutable.dll
System.Reflection.Metadata.dll
vbc.exe
Microsoft.CodeAnalysis.VisualBasic.dll)
mkdir Binaries/Bootstrap
for i in ${compiler_binaries[@]}; do
cp Binaries/$BUILD_CONFIGURATION/${i} Binaries/Bootstrap/${i}
if [ $? -ne 0 ]; then
echo Saving bootstrap binaries failed
exit 1
fi
done
}
# Clean out all existing binaries. This ensures the bootstrap phase forces
# a rebuild instead of picking up older binaries.
clean_roslyn()
{
echo Cleaning the enlistment
mono $MONO_ARGS packages/Microsoft.Build.Mono.Debug.14.1.0.0-prerelease/lib/MSBuild.exe /v:m /t:Clean build/Toolset.sln /p:Configuration=$BUILD_CONFIGURATION
rm -rf Binaries/$BUILD_CONFIGURATION
}
build_roslyn()
{
local bootstrapArg=/p:BootstrapBuildPath=$(pwd)/Binaries/Bootstrap
echo Building CrossPlatform.sln
run_msbuild $bootstrapArg CrossPlatform.sln /p:Configuration=$BUILD_CONFIGURATION
}
# Install the specified Mono toolset from our Azure blob storage.
install_mono_toolset()
{
local target=/tmp/$1
echo "Installing Mono toolset $1"
if [ -d $target ]; then
if [ "$USE_CACHE" = "true" ]; then
echo "Already installed"
return
fi
fi
pushd /tmp
rm -r $target 2>/dev/null
rm $1.tar.bz2 2>/dev/null
curl -O https://dotnetci.blob.core.windows.net/roslyn/$1.tar.bz2
tar -jxf $1.tar.bz2
if [ $? -ne 0 ]; then
echo "Unable to download toolset"
exit 1
fi
popd
}
# This function will update the PATH variable to put the desired
# version of Mono ahead of the system one.
set_mono_path()
{
if [ "$CUSTOM_MONO_PATH" != "" ]; then
if [ ! -d "$CUSTOM_MONO_PATH" ]; then
echo "Not a valid directory $CUSTOM_MONO_PATH"
exit 1
fi
echo "Using mono path $CUSTOM_MONO_PATH"
PATH=$CUSTOM_MONO_PATH:$PATH
return
fi
if [ "$OS_NAME" = "Darwin" ]; then
MONO_TOOLSET_NAME=mono.mac.1
elif [ "$OS_NAME" = "Linux" ]; then
MONO_TOOLSET_NAME=mono.linux.1
else
echo "Error: Unsupported OS $OS_NAME"
exit 1
fi
install_mono_toolset $MONO_TOOLSET_NAME
PATH=/tmp/$MONO_TOOLSET_NAME/bin:$PATH
}
test_roslyn()
{
local xunit_runner=packages/xunit.runners.$XUNIT_VERSION/tools/xunit.console.x86.exe
local test_binaries=(
Roslyn.Compilers.CSharp.CommandLine.UnitTests
Roslyn.Compilers.CSharp.Syntax.UnitTests
Roslyn.Compilers.CSharp.Semantic.UnitTests
Roslyn.Compilers.CSharp.Symbol.UnitTests
Roslyn.Compilers.VisualBasic.Syntax.UnitTests)
local any_failed=false
for i in "${test_binaries[@]}"
do
mono $MONO_ARGS $xunit_runner Binaries/$BUILD_CONFIGURATION/$i.dll -xml Binaries/$BUILD_CONFIGURATION/$i.TestResults.xml -noshadow
if [ $? -ne 0 ]; then
any_failed=true
fi
done
if [ "$any_failed" = "true" ]; then
echo Unit test failed
exit 1
fi
}
echo Clean out the enlistment
git clean -dxf .
restore_nuget
set_mono_path
which mono
compile_toolset
save_toolset
clean_roslyn
build_roslyn
test_roslyn
|
VitalyTVA/roslyn
|
cibuild.sh
|
Shell
|
apache-2.0
| 6,156 |
#!/bin/sh
source ./shared.functions.sh
START_DIR=$PWD
WORK_DIR=$START_DIR/../../../../../../.macosbuild
mkdir -p $WORK_DIR
WORK_DIR=$(abspath "$WORK_DIR")
source ./mac.05.libvcx.env.sh
cd ../../../..
export ORIGINAL_PATH=$PATH
#export ORIGINAL_PKG_CONFIG_PATH=$PKG_CONFIG_PATH
# Commenting because we don't want to compile cargo everytime
# cargo clean
# cargo install
export OPENSSL_DIR_DARWIN=$OPENSSL_DIR
# KS: Commenting it out because we want to debug only on armv7 based device/simulator
# export PATH=$WORK_DIR/NDK/arm/bin:$ORIGINAL_PATH
# export OPENSSL_DIR=$WORK_DIR/openssl_for_ios_and_android/output/android/openssl-armeabi
# export ANDROID_SODIUM_LIB=$WORK_DIR/libzmq-android/libsodium/libsodium_arm/lib
# export ANDROID_ZMQ_LIB=$WORK_DIR/libzmq-android/zmq/libzmq_arm/lib
# export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/arm-linux-androideabi/release
# cargo build --target arm-linux-androideabi --release --verbose
# export PATH=$WORK_DIR/NDK/arm/bin:$ORIGINAL_PATH
# export OPENSSL_DIR=$WORK_DIR/openssl_for_ios_and_android/output/android/openssl-armeabi-v7a
# export ANDROID_SODIUM_LIB=$WORK_DIR/libzmq-android/libsodium/libsodium_armv7/lib
# export ANDROID_ZMQ_LIB=$WORK_DIR/libzmq-android/zmq/libzmq_armv7/lib
# export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/armv7-linux-androideabi/debug
# cargo build --target armv7-linux-androideabi
# export PATH=$WORK_DIR/NDK/arm64/bin:$ORIGINAL_PATH
# export OPENSSL_DIR=$WORK_DIR/openssl_for_ios_and_android/output/android/openssl-arm64-v8a
# export ANDROID_SODIUM_LIB=$WORK_DIR/libzmq-android/libsodium/libsodium_arm64/lib
# export ANDROID_ZMQ_LIB=$WORK_DIR/libzmq-android/zmq/libzmq_arm64/lib
# export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/aarch64-linux-android/release
# cargo build --target aarch64-linux-android --release --verbose
export PATH=$WORK_DIR/NDK/x86/bin:$ORIGINAL_PATH
export OPENSSL_DIR=$WORK_DIR/openssl_for_ios_and_android/output/android/openssl-x86
export ANDROID_SODIUM_LIB=$WORK_DIR/libzmq-android/libsodium/libsodium_x86/lib
export ANDROID_ZMQ_LIB=$WORK_DIR/libzmq-android/zmq/libzmq_x86/lib
export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/i686-linux-android/release
# export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/i686-linux-android/debug
cargo build --target i686-linux-android
# cargo build --target i686-linux-android --release
# KS: Commenting it out because we want to debug only on armv7 based device/simulator
# export PATH=$WORK_DIR/NDK/x86_64/bin:$ORIGINAL_PATH
# export OPENSSL_DIR=$WORK_DIR/openssl_for_ios_and_android/output/android/openssl-x86_64
# export ANDROID_SODIUM_LIB=$WORK_DIR/libzmq-android/libsodium/libsodium_x86_64/lib
# export ANDROID_ZMQ_LIB=$WORK_DIR/libzmq-android/zmq/libzmq_x86_64/lib
# export LIBINDY_DIR=$WORK_DIR/vcx-indy-sdk/libindy/target/x86_64-linux-android/release
# cargo build --target x86_64-linux-android --release --verbose
# This builds the library for code that runs in OSX
# ln -sf $WORK_DIR/vcx-indy-sdk/libindy/target/x86_64-apple-darwin/debug/libindy.dylib /usr/local/lib/libindy.dylib
export PATH=$ORIGINAL_PATH
export OPENSSL_DIR=$OPENSSL_DIR_DARWIN
unset ANDROID_SODIUM_LIB
unset ANDROID_ZMQ_LIB
unset LIBINDY_DIR
# cargo build --target x86_64-apple-darwin --release --verbose
#cargo test
#export PKG_CONFIG_PATH=$ORIGINAL_PKG_CONFIG_PATH
# To build for macos
#cargo build
#export LIBINDY_DIR=/usr/local/lib
#export RUST_BACKTRACE=1
# To build for iOS
#LIBINDY_DIR=/usr/local/lib RUST_BACKTRACE=1 cargo lipo --release
#cargo lipo --release --verbose --targets="aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios"
#LIBINDY_DIR=/usr/local/lib RUST_BACKTRACE=1 cargo lipo
#LIBINDY_DIR=/usr/local/lib cargo test
|
anastasia-tarasova/indy-sdk
|
vcx/libvcx/build_scripts/android/mac/debug/mac.06.x86.libvcx.build.sh
|
Shell
|
apache-2.0
| 3,754 |
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
set -e
eval $(go env)
export GOROOT # the api test requires GOROOT to be set.
unset CDPATH # in case user has it set
unset GOPATH # we disallow local import for non-local packages, if $GOROOT happens
# to be under $GOPATH, then some tests below will fail
# no core files, please
ulimit -c 0
# Raise soft limits to hard limits for NetBSD/OpenBSD.
# We need at least 256 files and ~300 MB of bss.
# On OS X ulimit -S -n rejects 'unlimited'.
#
# Note that ulimit -S -n may fail if ulimit -H -n is set higher than a
# non-root process is allowed to set the high limit.
# This is a system misconfiguration and should be fixed on the
# broken system, not "fixed" by ignoring the failure here.
# See longer discussion on golang.org/issue/7381.
[ "$(ulimit -H -n)" == "unlimited" ] || ulimit -S -n $(ulimit -H -n)
[ "$(ulimit -H -d)" == "unlimited" ] || ulimit -S -d $(ulimit -H -d)
# Thread count limit on NetBSD 7.
if ulimit -T &> /dev/null; then
[ "$(ulimit -H -T)" == "unlimited" ] || ulimit -S -T $(ulimit -H -T)
fi
# allow all.bash to avoid double-build of everything
rebuild=true
if [ "$1" == "--no-rebuild" ]; then
shift
else
echo '# Building packages and commands.'
time go install -a -v std
echo
fi
# we must unset GOROOT_FINAL before tests, because runtime/debug requires
# correct access to source code, so if we have GOROOT_FINAL in effect,
# at least runtime/debug test will fail.
unset GOROOT_FINAL
# increase timeout for ARM up to 3 times the normal value
timeout_scale=1
[ "$GOARCH" == "arm" ] && timeout_scale=3
echo '# Testing packages.'
time go test std -short -timeout=$(expr 120 \* $timeout_scale)s
echo
# We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code,
# creation of first goroutines and first garbage collections in the parallel setting.
echo '# GOMAXPROCS=2 runtime -cpu=1,2,4'
GOMAXPROCS=2 go test runtime -short -timeout=$(expr 300 \* $timeout_scale)s -cpu=1,2,4
echo
echo '# sync -cpu=10'
go test sync -short -timeout=$(expr 120 \* $timeout_scale)s -cpu=10
# Race detector only supported on Linux and OS X,
# and only on amd64, and only when cgo is enabled.
case "$GOHOSTOS-$GOOS-$GOARCH-$CGO_ENABLED" in
linux-linux-amd64-1 | darwin-darwin-amd64-1)
echo
echo '# Testing race detector.'
go test -race -i runtime/race flag
go test -race -run=Output runtime/race
go test -race -short flag
esac
xcd() {
echo
echo '#' $1
builtin cd "$GOROOT"/src/$1 || exit 1
}
# NOTE: "set -e" cannot help us in subshells. It works until you test it with ||.
#
# $ bash --version
# GNU bash, version 3.2.48(1)-release (x86_64-apple-darwin12)
# Copyright (C) 2007 Free Software Foundation, Inc.
#
# $ set -e; (set -e; false; echo still here); echo subshell exit status $?
# subshell exit status 1
# # subshell stopped early, set exit status, but outer set -e didn't stop.
#
# $ set -e; (set -e; false; echo still here) || echo stopped
# still here
# # somehow the '|| echo stopped' broke the inner set -e.
#
# To avoid this bug, every command in a subshell should have '|| exit 1' on it.
# Strictly speaking, the test may be unnecessary on the final command of
# the subshell, but it aids later editing and may avoid future bash bugs.
[ "$CGO_ENABLED" != 1 ] ||
[ "$GOHOSTOS" == windows ] ||
(xcd ../misc/cgo/stdio
go run $GOROOT/test/run.go - . || exit 1
) || exit $?
[ "$CGO_ENABLED" != 1 ] ||
(xcd ../misc/cgo/life
go run $GOROOT/test/run.go - . || exit 1
) || exit $?
[ "$CGO_ENABLED" != 1 ] ||
(xcd ../misc/cgo/test
go test -ldflags '-linkmode=auto' || exit 1
# linkmode=internal fails on dragonfly since errno is a TLS relocation.
[ "$GOHOSTOS" == dragonfly ] || go test -ldflags '-linkmode=internal' || exit 1
case "$GOHOSTOS-$GOARCH" in
openbsd-386 | openbsd-amd64)
# test linkmode=external, but __thread not supported, so skip testtls.
go test -ldflags '-linkmode=external' || exit 1
;;
darwin-386 | darwin-amd64)
# linkmode=external fails on OS X 10.6 and earlier == Darwin
# 10.8 and earlier.
case $(uname -r) in
[0-9].* | 10.*) ;;
*) go test -ldflags '-linkmode=external' || exit 1;;
esac
;;
dragonfly-386 | dragonfly-amd64 | freebsd-386 | freebsd-amd64 | freebsd-arm | linux-386 | linux-amd64 | linux-arm | netbsd-386 | netbsd-amd64)
go test -ldflags '-linkmode=external' || exit 1
go test -ldflags '-linkmode=auto' ../testtls || exit 1
go test -ldflags '-linkmode=external' ../testtls || exit 1
case "$GOHOSTOS-$GOARCH" in
netbsd-386 | netbsd-amd64) ;; # no static linking
freebsd-arm) ;; # -fPIC compiled tls code will use __tls_get_addr instead
# of __aeabi_read_tp, however, on FreeBSD/ARM, __tls_get_addr
# is implemented in rtld-elf, so -fPIC isn't compatible with
# static linking on FreeBSD/ARM with clang. (cgo depends on
# -fPIC fundamentally.)
*)
if ! $CC -xc -o /dev/null -static - 2>/dev/null <<<'int main() {}' ; then
echo "No support for static linking found (lacks libc.a?), skip cgo static linking test."
else
go test -ldflags '-linkmode=external -extldflags "-static -pthread"' ../testtls || exit 1
go test ../nocgo || exit 1
go test -ldflags '-linkmode=external' ../nocgo || exit 1
go test -ldflags '-linkmode=external -extldflags "-static -pthread"' ../nocgo || exit 1
fi
;;
esac
;;
esac
) || exit $?
# This tests cgo -godefs. That mode is not supported,
# so it's okay if it doesn't work on some systems.
# In particular, it works badly with clang on OS X.
[ "$CGO_ENABLED" != 1 ] || [ "$GOOS" == darwin ] ||
(xcd ../misc/cgo/testcdefs
./test.bash || exit 1
) || exit $?
[ "$CGO_ENABLED" != 1 ] ||
[ "$GOHOSTOS" == windows ] ||
(xcd ../misc/cgo/testso
./test.bash || exit 1
) || exit $?
[ "$CGO_ENABLED" != 1 ] ||
[ "$GOHOSTOS-$GOARCH" != linux-amd64 ] ||
(xcd ../misc/cgo/testasan
go run main.go || exit 1
) || exit $?
[ "$CGO_ENABLED" != 1 ] ||
[ "$GOHOSTOS" == windows ] ||
(xcd ../misc/cgo/errors
./test.bash || exit 1
) || exit $?
[ "$GOOS" == nacl ] ||
(xcd ../doc/progs
time ./run || exit 1
) || exit $?
[ "$GOOS" == nacl ] ||
[ "$GOARCH" == arm ] || # uses network, fails under QEMU
(xcd ../doc/articles/wiki
./test.bash || exit 1
) || exit $?
[ "$GOOS" == nacl ] ||
(xcd ../doc/codewalk
time ./run || exit 1
) || exit $?
[ "$GOOS" == nacl ] ||
[ "$GOARCH" == arm ] ||
(xcd ../test/bench/shootout
time ./timing.sh -test || exit 1
) || exit $?
[ "$GOOS" == openbsd ] || # golang.org/issue/5057
(
echo
echo '#' ../test/bench/go1
go test ../test/bench/go1 || exit 1
) || exit $?
(xcd ../test
unset GOMAXPROCS
GOOS=$GOHOSTOS GOARCH=$GOHOSTARCH go build -o runtest run.go || exit 1
time ./runtest || exit 1
rm -f runtest
) || exit $?
[ "$GOOS" == nacl ] ||
(
echo
echo '# Checking API compatibility.'
time go run $GOROOT/src/cmd/api/run.go || exit 1
) || exit $?
echo
echo ALL TESTS PASSED
|
pombredanne/go-deleteme
|
src/run.bash
|
Shell
|
bsd-3-clause
| 7,007 |
#!/bin/bash
FN="mgug4120a.db_3.2.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/mgug4120a.db_3.2.3.tar.gz"
"https://bioarchive.galaxyproject.org/mgug4120a.db_3.2.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mgug4120a.db/bioconductor-mgug4120a.db_3.2.3_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mgug4120a.db/bioconductor-mgug4120a.db_3.2.3_src_all.tar.gz"
)
MD5="bb57e8b2efe3d038ec2a0ace0313a4e7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-mgug4120a.db/post-link.sh
|
Shell
|
mit
| 1,426 |
#!/usr/bin/env sh
# generated from catkin/cmake/template/setup.sh.in
# Sets various environment variables and sources additional environment hooks.
# It tries it's best to undo changes from a previously sourced setup file before.
# Supported command line options:
# --extend: skips the undoing of changes from a previously sourced setup file
_SETUP_UTIL="/usr/local/_setup_util.py"
if [ ! -f "$_SETUP_UTIL" ]; then
echo "Missing Python script: $_SETUP_UTIL"
return 22
fi
# detect if running on Darwin platform
_UNAME=`which uname`
_UNAME=`$_UNAME`
_IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
_IS_DARWIN=1
fi
# make sure to export all environment variables
export CMAKE_PREFIX_PATH
export CPATH
if [ $_IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH
else
export DYLD_LIBRARY_PATH
fi
export PATH
export PKG_CONFIG_PATH
export PYTHONPATH
# remember type of shell if not already set
if [ -z "$CATKIN_SHELL" ]; then
CATKIN_SHELL=sh
fi
# invoke Python script to generate necessary exports of environment variables
_MKTEMP=`which mktemp`
_SETUP_TMP=`$_MKTEMP /tmp/setup.sh.XXXXXXXXXX`
if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
echo "Could not create temporary file: $_SETUP_TMP"
return 1
fi
CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ > $_SETUP_TMP
. $_SETUP_TMP
_RM=`which rm`
$_RM $_SETUP_TMP
# source all environment hooks
_IFS=$IFS
IFS=":"
for _envfile in $_CATKIN_ENVIRONMENT_HOOKS; do
IFS=$_IFS
. "$_envfile"
done
IFS=$_IFS
unset _CATKIN_ENVIRONMENT_HOOKS
|
Boberito25/ButlerBot
|
rosbuild_ws/src/intelligence/build/catkin_generated/installspace/setup.sh
|
Shell
|
bsd-3-clause
| 1,488 |
#!/bin/bash
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# # Use of this source code is governed by a BSD-style license that can be
# # found in the LICENSE file.
#
# This script is tested ONLY on Linux. It may not work correctly on
# Mac OS X.
#
if [ $# -lt 1 ];
then
echo "Usage: "$0" (android|android_extra|android_small|cast|chromeos|common|flutter|ios)" >&2
exit 1
fi
TOPSRC="$(dirname "$0")/.."
source "${TOPSRC}/scripts/data_common.sh"
function copy_common {
DATA_PREFIX="data/out/tmp/icudt${VERSION}"
TZRES_PREFIX="data/out/build/icudt${VERSION}l"
echo "Generating the big endian data bundle"
LD_LIBRARY_PATH=lib bin/icupkg -tb "${DATA_PREFIX}l.dat" "${DATA_PREFIX}b.dat"
echo "Copying icudtl.dat and icudtlb.dat"
for endian in l b
do
cp "${DATA_PREFIX}${endian}.dat" "${TOPSRC}/common/icudt${endian}.dat"
done
echo "Copying metaZones.res, timezoneTypes.res, zoneinfo64.res"
for tzfile in metaZones timezoneTypes zoneinfo64
do
cp "${TZRES_PREFIX}/${tzfile}.res" "${TOPSRC}/tzres/${tzfile}.res"
done
echo "Done with copying pre-built ICU data files."
}
function copy_data {
echo "Copying icudtl.dat for $1"
cp "data/out/tmp/icudt${VERSION}l.dat" "${TOPSRC}/$2/icudtl.dat"
echo "Done with copying pre-built ICU data file for $1."
}
function copy_android_extra {
echo "Copying icudtl_extra.dat for AndroidExtra"
LD_LIBRARY_PATH=lib/ bin/icupkg -r \
"${TOPSRC}/filters/android-extra-removed-resources.txt" \
"data/out/tmp/icudt${VERSION}l.dat"
echo "AFTER strip out the content is"
LD_LIBRARY_PATH=lib/ bin/icupkg -l \
"data/out/tmp/icudt${VERSION}l.dat"
cp "data/out/tmp/icudt${VERSION}l.dat" "${TOPSRC}/android_small/icudtl_extra.dat"
echo "Done with copying pre-built ICU data file for AndroidExtra."
}
BACKUP_DIR="dataout/$1"
function backup_outdir {
rm -rf "${BACKUP_DIR}"
mkdir "${BACKUP_DIR}"
find "data/out" | cpio -pdmv "${BACKUP_DIR}"
}
case "$1" in
"chromeos")
copy_data ChromeOS $1
backup_outdir $1
;;
"common")
copy_common
backup_outdir $1
;;
"android")
copy_data Android $1
backup_outdir $1
;;
"android_small")
copy_data AndroidSmall $1
backup_outdir $1
;;
"android_extra")
copy_android_extra
backup_outdir $1
;;
"ios")
copy_data iOS $1
backup_outdir $1
;;
"cast")
copy_data Cast $1
backup_outdir $1
;;
"flutter")
copy_data Flutter $1
backup_outdir $1
;;
esac
|
endlessm/chromium-browser
|
third_party/icu/scripts/copy_data.sh
|
Shell
|
bsd-3-clause
| 2,502 |
#!/bin/sh
git clone https://github.com/biicode/boost.git biicode-boost
mkdir -p blocks
cd biicode-boost
./generate $1 --no-publish
cp -r -p blocks/* ../blocks
|
bowlofstew/boost-examples-headeronly
|
bootstrap.sh
|
Shell
|
mit
| 161 |
#!/bin/sh
###
# = amsn.sh(3)
# Gabriel Craciunescu <[email protected]>
#
# == NAME
# amsn.sh - for Frugalware
#
# == SYNOPSIS
# Common schema for amsn plugins packages.
#
# == EXAMPLE
# --------------------------------------------------
# pkgname=amsn-plugin-amsnplus
# _F_amsn_name="amsnplus"
# pkgver=2.6.1
# pkgrel=1
# pkgdesc="aMSN plus plugin similar to MSN Plus!"
# _F_sourceforge_ext=".zip"
# _F_sourceforge_dirname="amsn"
# _F_sourceforge_name="$_F_amsn_name"
# _F_amsn_clean_files=(Makefile Snapshot.exe snapshot.c)
# Finclude sourceforge amsn
# archs=('i686')
# sha1sums=('62ec1c2b6a70e1c01d7d52d4a5a6418b99f5d720')
# --------------------------------------------------
#
# == OPTIONS
# * _F_achive_name: (defaults to $_F_amsn_name) see util.sh
# * _F_amsn_name: no default it HAS to be set because we use amsn-plugin-xxxx as $pkgname
# * _F_amsn_clean_files: lists files have to be removed from package (e.g: foo.exe )
###
if [ -z "$_F_amsn_name" ]; then
Fmessage "You have to set _F_amsn_name!!"
Fmessage "Issue man amsn.sh for more info"
exit 1
fi
if [ -z "$_F_archive_name" ]; then
_F_archive_name=$_F_amsn_name
fi
###
# == OVERWRITTEN VARIABLES
# * groups
# * up2date
###
up2date="Flastarchive 'http://sourceforge.net/projects/amsn/files/amsn-plugins/0.97/' '\.zip'"
groups=('xapps-extra' 'amsn-plugins')
###
# == APPENDED VARIABLES
# * amsn to depends()
###
depends=(${depends[@]} 'amsn')
###
# == PROVIDED FUNCTIONS
# * Famsn_clean_files(): deletes all files from _F_amsn_clean_files
###
Famsn_clean_files()
{
if [ -n "$_F_amsn_clean_files" ]; then
for broken in "${_F_amsn_clean_files[@]}"
do
Frm usr/share/amsn/plugins/$_F_amsn_name/$broken
done
fi
}
###
# * Fbuild_amsn()
###
Fbuild_amsn()
{
Fmkdir usr/share/amsn/plugins
Fcpr $_F_amsn_name usr/share/amsn/plugins
# Some files are not world readable, so let's fix them
chmod -R a+r $Fdestdir/usr/share/amsn/plugins/* || Fdie
# Clean some junk
find $Fdestdir -name ".svn" | xargs rm -rf || Fdie
find $Fdestdir -name "CVS" | xargs rm -rf || Fdie
find $Fdestdir -name ".git" | xargs rm -rf || Fdie
# Clean more junk
Famsn_clean_files
}
###
# * build(): just calls Fbuild_amsn()
###
build()
{
Fbuild_amsn
}
|
frugalware/xorgtesting
|
source/include/amsn.sh
|
Shell
|
gpl-2.0
| 2,237 |
#!/bin/sh
# Ensure "ls --color" properly colors "normal" text and files.
# I.E. that it uses NORMAL to style non file name output and
# file names with no associated color (unless FILE is also set).
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ls
# Don't let a different umask perturb the results.
umask 22
# Output time as something constant
export TIME_STYLE="+norm"
# helper to strip ls columns up to "norm" time
qls() { sed 's/-r.*norm/norm/'; }
touch exe || framework_failure_
chmod u+x exe || framework_failure_
touch nocolor || framework_failure_
TCOLORS="no=7:ex=01;32"
# Non coloured files inherit NORMAL attributes
LS_COLORS=$TCOLORS ls -gGU --color exe nocolor | qls >> out || fail=1
LS_COLORS=$TCOLORS ls -xU --color exe nocolor >> out || fail=1
LS_COLORS=$TCOLORS ls -gGU --color nocolor exe | qls >> out || fail=1
LS_COLORS=$TCOLORS ls -xU --color nocolor exe >> out || fail=1
# NORMAL does not override FILE though
LS_COLORS=$TCOLORS:fi=1 ls -gGU --color nocolor exe | qls >> out || fail=1
# Support uncolored ordinary files that do _not_ inherit from NORMAL.
# Note there is a redundant RESET output before a non colored
# file in this case which may be removed in future.
LS_COLORS=$TCOLORS:fi= ls -gGU --color nocolor exe | qls >> out || fail=1
LS_COLORS=$TCOLORS:fi=0 ls -gGU --color nocolor exe | qls >> out || fail=1
# A caveat worth noting is that commas (-m), indicator chars (-F)
# and the "total" line, do not currently use NORMAL attributes
LS_COLORS=$TCOLORS ls -mFU --color nocolor exe >> out || fail=1
# Ensure no coloring is done unless enabled
LS_COLORS=$TCOLORS ls -gGU nocolor exe | qls >> out || fail=1
cat -A out > out.display || framework_failure_
mv out.display out || framework_failure_
cat <<\EOF > exp || framework_failure_
^[[0m^[[7mnorm ^[[m^[[01;32mexe^[[0m$
^[[7mnorm nocolor^[[0m$
^[[0m^[[7m^[[m^[[01;32mexe^[[0m ^[[7mnocolor^[[0m$
^[[0m^[[7mnorm nocolor^[[0m$
^[[7mnorm ^[[m^[[01;32mexe^[[0m$
^[[0m^[[7mnocolor^[[0m ^[[7m^[[m^[[01;32mexe^[[0m$
^[[0m^[[7mnorm ^[[m^[[1mnocolor^[[0m$
^[[7mnorm ^[[m^[[01;32mexe^[[0m$
^[[0m^[[7mnorm ^[[m^[[mnocolor^[[0m$
^[[7mnorm ^[[m^[[01;32mexe^[[0m$
^[[0m^[[7mnorm ^[[m^[[0mnocolor^[[0m$
^[[7mnorm ^[[m^[[01;32mexe^[[0m$
^[[0m^[[7mnocolor^[[0m, ^[[7m^[[m^[[01;32mexe^[[0m*$
norm nocolor$
norm exe$
EOF
compare exp out || fail=1
Exit $fail
|
bu2/coreutils
|
tests/ls/color-norm.sh
|
Shell
|
gpl-3.0
| 3,104 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e -x -u
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source ${SCRIPT_DIR}/../base/build-base-docker.sh
IMAGE_NAME="geode/compile:${DOCKER_ENV_VERSION}"
pushd ${SCRIPT_DIR}
docker build -t ${IMAGE_NAME} .
popd
if [ "$(uname -s)" == "Linux" ]; then
USER_NAME=${SUDO_USER:=$USER}
USER_ID=$(id -u "${USER_NAME}")
GROUP_ID=$(id -g "${USER_NAME}")
else # boot2docker uid and gid
USER_NAME=$USER
USER_ID=1000
GROUP_ID=50
fi
docker build -t "${IMAGE_NAME}-${USER_NAME}" - <<UserSpecificDocker
FROM ${IMAGE_NAME}
RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
RUN useradd -g ${GROUP_ID} -u ${USER_ID} -k /root -m ${USER_NAME}
ENV HOME /home/${USER_NAME}
UserSpecificDocker
# Go to root
pushd ${SCRIPT_DIR}/../../..
docker run -i -t \
--rm=true \
-w "/home/${USER_NAME}/incubator-geode" \
-u "${USER_NAME}" \
-v "$PWD:/home/${USER_NAME}/incubator-geode" \
-v "/home/${USER_NAME}/.m2:/home/${USER_NAME}/.m2" \
${IMAGE_NAME}-${USER_NAME} \
bash
popd
|
shankarh/geode
|
dev-tools/docker/compile/start-compile-docker.sh
|
Shell
|
apache-2.0
| 1,800 |
#!/usr/bin/env bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
set -e
VARIABLES="\
ALLUXIO_MASTER_BIND_HOST \
ALLUXIO_MASTER_WEB_BIND_HOST \
ALLUXIO_WORKER_BIND_HOST \
ALLUXIO_WORKER_DATA_BIND_HOST \
ALLUXIO_WORKER_WEB_BIND_HOST \
ALLUXIO_MASTER_HOSTNAME \
ALLUXIO_MASTER_WEB_HOSTNAME \
ALLUXIO_WORKER_HOSTNAME \
ALLUXIO_WORKER_DATA_HOSTNAME \
ALLUXIO_WORKER_WEB_HOSTNAME"
for var in ${VARIABLES}; do
sed -i "s/# ${var}/${var}/g" /alluxio/conf/alluxio-env.sh
done
|
EvilMcJerkface/alluxio
|
integration/vagrant/ansible/roles/mesos/files/config_alluxio.sh
|
Shell
|
apache-2.0
| 929 |
mdast -u mdast-slug -u mdast-validate-links ./*.md
mdast -u mdast-slug -u mdast-validate-links ./**/*.md
|
thanegill/RxSwift
|
scripts/validate-markdown.sh
|
Shell
|
mit
| 104 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libnative-font.${CND_DLIB_EXT}
OUTPUT_BASENAME=libnative-font.${CND_DLIB_EXT}
PACKAGE_TOP_DIR=libnative-font.so/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/libnative-font.so/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libnative-font.so.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libnative-font.so.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
teamfx/openjfx-9-dev-rt
|
netbeans/native-font/nbproject/Package-Release.bash
|
Shell
|
gpl-2.0
| 1,531 |
#!/bin/bash
#
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
# Problem solving commands
# Read before using it.
# https://www.atlassian.com/git/tutorials/undoing-changes/git-reset
# git reset --hard orgin/master
# ONLY if you are very sure and no coworkers are on your github.
# Command that have helped in the past
# Force git to overwrite local files on pull - no merge
# git fetch all
# git push --set-upstream origin master
# git reset --hard orgin/master
# installing git if not installed for specific distro's
if ! location="$(type -p "git")" || [ -z "git" ]; then
echo "#################################################"
echo "installing git for this script to work"
echo "#################################################"
sudo apt install git -y
# check if apt-git is installed
if which apt-get > /dev/null; then
sudo apt-get install -y git
fi
# check if pacman is installed
if which pacman > /dev/null; then
sudo pacman -S --noconfirm git
fi
# check if eopkg is installed
if which eopkg > /dev/null; then
sudo eopkg -y install git
fi
fi
#setting up git
#https://www.atlassian.com/git/tutorials/setting-up-a-repository/git-config
git init
git config --global user.name "Erik Dubois"
git config --global user.email "[email protected]"
sudo git config --system core.editor nano
git config --global credential.helper cache
git config --global credential.helper 'cache --timeout=18000'
git config --global push.default simple
echo "################################################################"
echo "################### T H E E N D ######################"
echo "################################################################"
|
erikdubois/AntergosXfce4
|
setup-git-v1.sh
|
Shell
|
gpl-2.0
| 2,283 |
#!/bin/sh
# 'test cp --update A B' where A and B are both symlinks that point
# to the same file
# Copyright (C) 2000-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
touch file || framework_failure_
ln -s file a || framework_failure_
ln -s file b || framework_failure_
ln -s no-such-file c || framework_failure_
ln -s no-such-file d || framework_failure_
cp --update --no-dereference a b || fail=1
cp --update --no-dereference c d || fail=1
Exit $fail
|
yuxuanchen1997/coreutils
|
tests/cp/slink-2-slink.sh
|
Shell
|
gpl-3.0
| 1,143 |
#!/bin/bash
#
# Copyright 2018 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
# This script adds a pre-push hook to .git/hooks/, which runs some basic tests
# before running "git push".
# To enable it, run this script: "./build-system/common/enable-git-pre-push.sh"
SCRIPT=${BASH_SOURCE[0]}
BUILD_SYSTEM_COMMON_DIR=$(dirname "$SCRIPT")
BUILD_SYSTEM_DIR=$(dirname "$BUILD_SYSTEM_COMMON_DIR")
AMPHTML_DIR=$(dirname "$BUILD_SYSTEM_DIR")
PRE_PUSH_SRC="build-system/common/default-pre-push"
GIT_HOOKS_DIR=".git/hooks"
PRE_PUSH_DEST="$GIT_HOOKS_DIR/pre-push"
PRE_PUSH_BACKUP="$GIT_HOOKS_DIR/pre-push.backup"
GREEN() { echo -e "\033[0;32m$1\033[0m"; }
CYAN() { echo -e "\033[0;36m$1\033[0m"; }
YELLOW() { echo -e "\033[0;33m$1\033[0m"; }
if [[ $SCRIPT != ./build-system/* ]] ;
then
echo $(YELLOW "This script must be run from the root") $(CYAN "amphtml") $(YELLOW "directory. Exiting.")
exit 1
fi
echo $(YELLOW "-----------------------------------------------------------------------------------------------------------------")
echo $(GREEN "Running") $(CYAN $SCRIPT)
echo $(GREEN "This script does the following:")
echo $(GREEN " 1. If already present, makes a backup of") $(CYAN "$PRE_PUSH_DEST") $(GREEN "at") $(CYAN "$PRE_PUSH_BACKUP")
echo $(GREEN " 2. Creates a new file") $(CYAN "$PRE_PUSH_DEST") $(GREEN "which calls") $(CYAN "$PRE_PUSH_SRC")
echo $(GREEN " 3. With this,") $(CYAN "git push") $(GREEN "will first run the checks in") $(CYAN "$PRE_PUSH_SRC")
echo $(GREEN " 4. You can edit") $(CYAN "$PRE_PUSH_DEST") $(GREEN "to change the pre-push hooks that are run before") $(CYAN "git push")
echo $(GREEN " 5. To skip the hook, run") $(CYAN "git push --no-verify")
echo $(GREEN " 6. To remove the hook, delete the file") $(CYAN "$PRE_PUSH_DEST")
echo $(YELLOW "-----------------------------------------------------------------------------------------------------------------")
echo -e "\n"
read -n 1 -s -r -p "$(GREEN 'Press any key to continue...')"
echo -e "\n"
if [ -f "$AMPHTML_DIR/$PRE_PUSH_DEST" ]; then
echo $(GREEN "Found") $(CYAN $PRE_PUSH_DEST)
mv $AMPHTML_DIR/$PRE_PUSH_DEST $AMPHTML_DIR/$PRE_PUSH_BACKUP
echo $(GREEN "Moved it to") $(CYAN $PRE_PUSH_BACKUP)
fi
cat > $AMPHTML_DIR/$PRE_PUSH_DEST <<- EOM
#!/bin/bash
# Pre-push hook for AMPHTML
eval $AMPHTML_DIR/$PRE_PUSH_SRC
EOM
chmod 755 $AMPHTML_DIR/$PRE_PUSH_DEST
echo $(GREEN "Successfully wrote") $(CYAN "$PRE_PUSH_DEST")
|
lannka/amphtml
|
build-system/common/enable-git-pre-push.sh
|
Shell
|
apache-2.0
| 2,961 |
#!/bin/bash
USERDIR=python-codec
VERSION=`PYTHONPATH=$USERDIR/src/ python get_version.py`
USERZIP=$USERDIR-$VERSION-win32.zip
DISTDIR=dist
WINDIR=$USERDIR-windows
rm -Rf $WINDIR
cp -R $USERDIR $WINDIR
svn export http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/WINDOWS-README.txt $WINDIR/README.txt
cd $WINDIR/src
python setup.py bdist_wininst
cp dist/RL_Glue_PythonCodec-$VERSION.win32.exe ../
cd ..
rm -Rf src
unix2dos *.txt
unix2dos ./examples/skeleton/*.py ./examples/skeleton/*.txt
unix2dos ./examples/mines-sarsa-example/*.py ./examples/mines-sarsa-example/*.txt
zip -r $USERZIP PythonCodec.pdf *.exe README.txt examples LICENSE-2.0.txt
#in case there already is one
rm -f ../dist/$USERZIP
cp $USERZIP ../dist
|
pclarke91/rl-glue-ext
|
projects/distribution_tools/Python-Codec/prepare-windows-package.bash
|
Shell
|
apache-2.0
| 738 |
#!/bin/bash
# usage:
# api-expect-error.sh <URL> <params> <message>
# curls the URL with the params, and expects result error="true", with result message if specified
SRC_DIR=$(cd `dirname $0` && pwd)
DIR=${TMP_DIR:-$SRC_DIR}
errorMsg() {
echo "$*" 1>&2
}
requrl="$1"
shift
params="$1"
shift
message="$1"
shift
code="${1:-400}"
shift
set -- $requrl
source $SRC_DIR/include.sh
# get listing
docurl -D $DIR/headers.out $CURL_REQ_OPTS ${requrl}?${params} > $DIR/curl.out
if [ 0 != $? ] ; then
errorMsg "FAIL: failed query request"
exit 2
fi
grep "HTTP/1.1 $code" -q $DIR/headers.out
okheader=$?
if [ 0 != $okheader ] ; then
errorMsg "FAIL: Response was not $code"
grep 'HTTP/1.1' $DIR/headers.out
exit 2
fi
rm $DIR/headers.out
$SHELL $SRC_DIR/api-test-error.sh $DIR/curl.out "$message"
|
tjordanchat/rundeck
|
test/api/api-expect-error.sh
|
Shell
|
apache-2.0
| 820 |
#!/bin/sh
# a u t o g e n . s h
#
# Copyright (c) 2005-2009 United States Government as represented by
# the U.S. Army Research Laboratory.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###
#
# Script for automatically preparing the sources for compilation by
# performing the myriad of necessary steps. The script attempts to
# detect proper version support, and outputs warnings about particular
# systems that have autotool peculiarities.
#
# Basically, if everything is set up and installed correctly, the
# script will validate that minimum versions of the GNU Build System
# tools are installed, account for several common configuration
# issues, and then simply run autoreconf for you.
#
# If autoreconf fails, which can happen for many valid configurations,
# this script proceeds to run manual preparation steps effectively
# providing a POSIX shell script (mostly complete) reimplementation of
# autoreconf.
#
# The AUTORECONF, AUTOCONF, AUTOMAKE, LIBTOOLIZE, ACLOCAL, AUTOHEADER
# environment variables and corresponding _OPTIONS variables (e.g.
# AUTORECONF_OPTIONS) may be used to override the default automatic
# detection behaviors. Similarly the _VERSION variables will override
# the minimum required version numbers.
#
# Examples:
#
# To obtain help on usage:
# ./autogen.sh --help
#
# To obtain verbose output:
# ./autogen.sh --verbose
#
# To skip autoreconf and prepare manually:
# AUTORECONF=false ./autogen.sh
#
# To verbosely try running with an older (unsupported) autoconf:
# AUTOCONF_VERSION=2.50 ./autogen.sh --verbose
#
# Author:
# Christopher Sean Morrison <[email protected]>
#
# Patches:
# Sebastian Pipping <[email protected]>
#
######################################################################
# set to minimum acceptable version of autoconf
if [ "x$AUTOCONF_VERSION" = "x" ] ; then
AUTOCONF_VERSION=2.52
fi
# set to minimum acceptable version of automake
if [ "x$AUTOMAKE_VERSION" = "x" ] ; then
AUTOMAKE_VERSION=1.11
fi
# set to minimum acceptable version of libtool
if [ "x$LIBTOOL_VERSION" = "x" ] ; then
LIBTOOL_VERSION=1.4.2
fi
##################
# ident function #
##################
ident ( ) {
# extract copyright from header
__copyright="`grep Copyright $AUTOGEN_SH | head -${HEAD_N}1 | awk '{print $4}'`"
if [ "x$__copyright" = "x" ] ; then
__copyright="`date +%Y`"
fi
# extract version from CVS Id string
__id="$Id: autogen.sh 33925 2009-03-01 23:27:06Z brlcad $"
__version="`echo $__id | sed 's/.*\([0-9][0-9][0-9][0-9]\)[-\/]\([0-9][0-9]\)[-\/]\([0-9][0-9]\).*/\1\2\3/'`"
if [ "x$__version" = "x" ] ; then
__version=""
fi
echo "autogen.sh build preparation script by Christopher Sean Morrison"
echo " + config.guess download patch by Sebastian Pipping (2008-12-03)"
echo "revised 3-clause BSD-style license, copyright (c) $__copyright"
echo "script version $__version, ISO/IEC 9945 POSIX shell script"
}
##################
# USAGE FUNCTION #
##################
usage ( ) {
echo "Usage: $AUTOGEN_SH [-h|--help] [-v|--verbose] [-q|--quiet] [-d|--download] [--version]"
echo " --help Help on $NAME_OF_AUTOGEN usage"
echo " --verbose Verbose progress output"
echo " --quiet Quiet suppressed progress output"
echo " --download Download the latest config.guess from gnulib"
echo " --version Only perform GNU Build System version checks"
echo
echo "Description: This script will validate that minimum versions of the"
echo "GNU Build System tools are installed and then run autoreconf for you."
echo "Should autoreconf fail, manual preparation steps will be run"
echo "potentially accounting for several common preparation issues. The"
echo "AUTORECONF, AUTOCONF, AUTOMAKE, LIBTOOLIZE, ACLOCAL, AUTOHEADER,"
echo "PROJECT, & CONFIGURE environment variables and corresponding _OPTIONS"
echo "variables (e.g. AUTORECONF_OPTIONS) may be used to override the"
echo "default automatic detection behavior."
echo
ident
return 0
}
##########################
# VERSION_ERROR FUNCTION #
##########################
version_error ( ) {
if [ "x$1" = "x" ] ; then
echo "INTERNAL ERROR: version_error was not provided a version"
exit 1
fi
if [ "x$2" = "x" ] ; then
echo "INTERNAL ERROR: version_error was not provided an application name"
exit 1
fi
$ECHO
$ECHO "ERROR: To prepare the ${PROJECT} build system from scratch,"
$ECHO " at least version $1 of $2 must be installed."
$ECHO
$ECHO "$NAME_OF_AUTOGEN does not need to be run on the same machine that will"
$ECHO "run configure or make. Either the GNU Autotools will need to be installed"
$ECHO "or upgraded on this system, or $NAME_OF_AUTOGEN must be run on the source"
$ECHO "code on another system and then transferred to here. -- Cheers!"
$ECHO
}
##########################
# VERSION_CHECK FUNCTION #
##########################
version_check ( ) {
if [ "x$1" = "x" ] ; then
echo "INTERNAL ERROR: version_check was not provided a minimum version"
exit 1
fi
_min="$1"
if [ "x$2" = "x" ] ; then
echo "INTERNAL ERROR: version check was not provided a comparison version"
exit 1
fi
_cur="$2"
# needed to handle versions like 1.10 and 1.4-p6
_min="`echo ${_min}. | sed 's/[^0-9]/./g' | sed 's/\.\././g'`"
_cur="`echo ${_cur}. | sed 's/[^0-9]/./g' | sed 's/\.\././g'`"
_min_major="`echo $_min | cut -d. -f1`"
_min_minor="`echo $_min | cut -d. -f2`"
_min_patch="`echo $_min | cut -d. -f3`"
_cur_major="`echo $_cur | cut -d. -f1`"
_cur_minor="`echo $_cur | cut -d. -f2`"
_cur_patch="`echo $_cur | cut -d. -f3`"
if [ "x$_min_major" = "x" ] ; then
_min_major=0
fi
if [ "x$_min_minor" = "x" ] ; then
_min_minor=0
fi
if [ "x$_min_patch" = "x" ] ; then
_min_patch=0
fi
if [ "x$_cur_minor" = "x" ] ; then
_cur_major=0
fi
if [ "x$_cur_minor" = "x" ] ; then
_cur_minor=0
fi
if [ "x$_cur_patch" = "x" ] ; then
_cur_patch=0
fi
$VERBOSE_ECHO "Checking if ${_cur_major}.${_cur_minor}.${_cur_patch} is greater than ${_min_major}.${_min_minor}.${_min_patch}"
if [ $_min_major -lt $_cur_major ] ; then
return 0
elif [ $_min_major -eq $_cur_major ] ; then
if [ $_min_minor -lt $_cur_minor ] ; then
return 0
elif [ $_min_minor -eq $_cur_minor ] ; then
if [ $_min_patch -lt $_cur_patch ] ; then
return 0
elif [ $_min_patch -eq $_cur_patch ] ; then
return 0
fi
fi
fi
return 1
}
######################################
# LOCATE_CONFIGURE_TEMPLATE FUNCTION #
######################################
locate_configure_template ( ) {
_pwd="`pwd`"
if test -f "./configure.ac" ; then
echo "./configure.ac"
elif test -f "./configure.in" ; then
echo "./configure.in"
elif test -f "$_pwd/configure.ac" ; then
echo "$_pwd/configure.ac"
elif test -f "$_pwd/configure.in" ; then
echo "$_pwd/configure.in"
elif test -f "$PATH_TO_AUTOGEN/configure.ac" ; then
echo "$PATH_TO_AUTOGEN/configure.ac"
elif test -f "$PATH_TO_AUTOGEN/configure.in" ; then
echo "$PATH_TO_AUTOGEN/configure.in"
fi
}
##################
# argument check #
##################
ARGS="$*"
PATH_TO_AUTOGEN="`dirname $0`"
NAME_OF_AUTOGEN="`basename $0`"
AUTOGEN_SH="$PATH_TO_AUTOGEN/$NAME_OF_AUTOGEN"
LIBTOOL_M4="${PATH_TO_AUTOGEN}/misc/libtool.m4"
if [ "x$HELP" = "x" ] ; then
HELP=no
fi
if [ "x$QUIET" = "x" ] ; then
QUIET=no
fi
if [ "x$VERBOSE" = "x" ] ; then
VERBOSE=no
fi
if [ "x$VERSION_ONLY" = "x" ] ; then
VERSION_ONLY=no
fi
if [ "x$DOWNLOAD" = "x" ] ; then
DOWNLOAD=no
fi
if [ "x$AUTORECONF_OPTIONS" = "x" ] ; then
AUTORECONF_OPTIONS="-i -f"
fi
if [ "x$AUTOCONF_OPTIONS" = "x" ] ; then
AUTOCONF_OPTIONS="-f"
fi
if [ "x$AUTOMAKE_OPTIONS" = "x" ] ; then
AUTOMAKE_OPTIONS="-a -c -f"
fi
ALT_AUTOMAKE_OPTIONS="-a -c"
if [ "x$LIBTOOLIZE_OPTIONS" = "x" ] ; then
LIBTOOLIZE_OPTIONS="--automake -c -f"
fi
ALT_LIBTOOLIZE_OPTIONS="--automake --copy --force"
if [ "x$ACLOCAL_OPTIONS" = "x" ] ; then
ACLOCAL_OPTIONS=""
fi
if [ "x$AUTOHEADER_OPTIONS" = "x" ] ; then
AUTOHEADER_OPTIONS=""
fi
if [ "x$CONFIG_GUESS_URL" = "x" ] ; then
CONFIG_GUESS_URL="http://git.savannah.gnu.org/gitweb/?p=gnulib.git;a=blob_plain;f=build-aux/config.guess;hb=HEAD"
fi
for arg in $ARGS ; do
case "x$arg" in
x--help) HELP=yes ;;
x-[hH]) HELP=yes ;;
x--quiet) QUIET=yes ;;
x-[qQ]) QUIET=yes ;;
x--verbose) VERBOSE=yes ;;
x-[dD]) DOWNLOAD=yes ;;
x--download) DOWNLOAD=yes ;;
x-[vV]) VERBOSE=yes ;;
x--version) VERSION_ONLY=yes ;;
*)
echo "Unknown option: $arg"
echo
usage
exit 1
;;
esac
done
#####################
# environment check #
#####################
# sanity check before recursions potentially begin
if [ ! -f "$AUTOGEN_SH" ] ; then
echo "INTERNAL ERROR: $AUTOGEN_SH does not exist"
if [ ! "x$0" = "x$AUTOGEN_SH" ] ; then
echo "INTERNAL ERROR: dirname/basename inconsistency: $0 != $AUTOGEN_SH"
fi
exit 1
fi
# force locale setting to C so things like date output as expected
LC_ALL=C
# commands that this script expects
for __cmd in echo head tail pwd ; do
echo "test" | $__cmd > /dev/null 2>&1
if [ $? != 0 ] ; then
echo "INTERNAL ERROR: '${__cmd}' command is required"
exit 2
fi
done
echo "test" | grep "test" > /dev/null 2>&1
if test ! x$? = x0 ; then
echo "INTERNAL ERROR: grep command is required"
exit 1
fi
echo "test" | sed "s/test/test/" > /dev/null 2>&1
if test ! x$? = x0 ; then
echo "INTERNAL ERROR: sed command is required"
exit 1
fi
# determine the behavior of echo
case `echo "testing\c"; echo 1,2,3`,`echo -n testing; echo 1,2,3` in
*c*,-n*) ECHO_N= ECHO_C='
' ECHO_T=' ' ;;
*c*,* ) ECHO_N=-n ECHO_C= ECHO_T= ;;
*) ECHO_N= ECHO_C='\c' ECHO_T= ;;
esac
# determine the behavior of head
case "x`echo 'head' | head -n 1 2>&1`" in
*xhead*) HEAD_N="n " ;;
*) HEAD_N="" ;;
esac
# determine the behavior of tail
case "x`echo 'tail' | tail -n 1 2>&1`" in
*xtail*) TAIL_N="n " ;;
*) TAIL_N="" ;;
esac
VERBOSE_ECHO=:
ECHO=:
if [ "x$QUIET" = "xyes" ] ; then
if [ "x$VERBOSE" = "xyes" ] ; then
echo "Verbose output quelled by quiet option. Further output disabled."
fi
else
ECHO=echo
if [ "x$VERBOSE" = "xyes" ] ; then
echo "Verbose output enabled"
VERBOSE_ECHO=echo
fi
fi
# allow a recursive run to disable further recursions
if [ "x$RUN_RECURSIVE" = "x" ] ; then
RUN_RECURSIVE=yes
fi
################################################
# check for help arg and bypass version checks #
################################################
if [ "x`echo $ARGS | sed 's/.*[hH][eE][lL][pP].*/help/'`" = "xhelp" ] ; then
HELP=yes
fi
if [ "x$HELP" = "xyes" ] ; then
usage
$ECHO "---"
$ECHO "Help was requested. No preparation or configuration will be performed."
exit 0
fi
#######################
# set up signal traps #
#######################
untrap_abnormal ( ) {
for sig in 1 2 13 15; do
trap - $sig
done
}
# do this cleanup whenever we exit.
trap '
# start from the root
if test -d "$START_PATH" ; then
cd "$START_PATH"
fi
# restore/delete backup files
if test "x$PFC_INIT" = "x1" ; then
recursive_restore
fi
' 0
# trap SIGHUP (1), SIGINT (2), SIGPIPE (13), SIGTERM (15)
for sig in 1 2 13 15; do
trap '
$ECHO ""
$ECHO "Aborting $NAME_OF_AUTOGEN: caught signal '$sig'"
# start from the root
if test -d "$START_PATH" ; then
cd "$START_PATH"
fi
# clean up on abnormal exit
$VERBOSE_ECHO "rm -rf autom4te.cache"
rm -rf autom4te.cache
if test -f "acinclude.m4.$$.backup" ; then
$VERBOSE_ECHO "cat acinclude.m4.$$.backup > acinclude.m4"
chmod u+w acinclude.m4
cat acinclude.m4.$$.backup > acinclude.m4
$VERBOSE_ECHO "rm -f acinclude.m4.$$.backup"
rm -f acinclude.m4.$$.backup
fi
{ (exit 1); exit 1; }
' $sig
done
#############################
# look for a configure file #
#############################
if [ "x$CONFIGURE" = "x" ] ; then
CONFIGURE="`locate_configure_template`"
if [ ! "x$CONFIGURE" = "x" ] ; then
$VERBOSE_ECHO "Found a configure template: $CONFIGURE"
fi
else
$ECHO "Using CONFIGURE environment variable override: $CONFIGURE"
fi
if [ "x$CONFIGURE" = "x" ] ; then
if [ "x$VERSION_ONLY" = "xyes" ] ; then
CONFIGURE=/dev/null
else
$ECHO
$ECHO "A configure.ac or configure.in file could not be located implying"
$ECHO "that the GNU Build System is at least not used in this directory. In"
$ECHO "any case, there is nothing to do here without one of those files."
$ECHO
$ECHO "ERROR: No configure.in or configure.ac file found in `pwd`"
exit 1
fi
fi
####################
# get project name #
####################
if [ "x$PROJECT" = "x" ] ; then
PROJECT="`grep AC_INIT $CONFIGURE | grep -v '.*#.*AC_INIT' | tail -${TAIL_N}1 | sed 's/^[ ]*AC_INIT(\([^,)]*\).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
if [ "x$PROJECT" = "xAC_INIT" ] ; then
# projects might be using the older/deprecated arg-less AC_INIT .. look for AM_INIT_AUTOMAKE instead
PROJECT="`grep AM_INIT_AUTOMAKE $CONFIGURE | grep -v '.*#.*AM_INIT_AUTOMAKE' | tail -${TAIL_N}1 | sed 's/^[ ]*AM_INIT_AUTOMAKE(\([^,)]*\).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
fi
if [ "x$PROJECT" = "xAM_INIT_AUTOMAKE" ] ; then
PROJECT="project"
fi
if [ "x$PROJECT" = "x" ] ; then
PROJECT="project"
fi
else
$ECHO "Using PROJECT environment variable override: $PROJECT"
fi
$ECHO "Preparing the $PROJECT build system...please wait"
$ECHO
########################
# check for autoreconf #
########################
HAVE_AUTORECONF=no
if [ "x$AUTORECONF" = "x" ] ; then
for AUTORECONF in autoreconf ; do
$VERBOSE_ECHO "Checking autoreconf version: $AUTORECONF --version"
$AUTORECONF --version > /dev/null 2>&1
if [ $? = 0 ] ; then
HAVE_AUTORECONF=yes
break
fi
done
else
HAVE_AUTORECONF=yes
$ECHO "Using AUTORECONF environment variable override: $AUTORECONF"
fi
##########################
# autoconf version check #
##########################
_acfound=no
if [ "x$AUTOCONF" = "x" ] ; then
for AUTOCONF in autoconf ; do
$VERBOSE_ECHO "Checking autoconf version: $AUTOCONF --version"
$AUTOCONF --version > /dev/null 2>&1
if [ $? = 0 ] ; then
_acfound=yes
break
fi
done
else
_acfound=yes
$ECHO "Using AUTOCONF environment variable override: $AUTOCONF"
fi
_report_error=no
if [ ! "x$_acfound" = "xyes" ] ; then
$ECHO "ERROR: Unable to locate GNU Autoconf."
_report_error=yes
else
_version="`$AUTOCONF --version | head -${HEAD_N}1 | sed 's/[^0-9]*\([0-9\.][0-9\.]*\)/\1/'`"
if [ "x$_version" = "x" ] ; then
_version="0.0.0"
fi
$ECHO "Found GNU Autoconf version $_version"
version_check "$AUTOCONF_VERSION" "$_version"
if [ $? -ne 0 ] ; then
_report_error=yes
fi
fi
if [ "x$_report_error" = "xyes" ] ; then
version_error "$AUTOCONF_VERSION" "GNU Autoconf"
exit 1
fi
##########################
# automake version check #
##########################
_amfound=no
if [ "x$AUTOMAKE" = "x" ] ; then
for AUTOMAKE in automake ; do
$VERBOSE_ECHO "Checking automake version: $AUTOMAKE --version"
$AUTOMAKE --version > /dev/null 2>&1
if [ $? = 0 ] ; then
_amfound=yes
break
fi
done
else
_amfound=yes
$ECHO "Using AUTOMAKE environment variable override: $AUTOMAKE"
fi
_report_error=no
if [ ! "x$_amfound" = "xyes" ] ; then
$ECHO
$ECHO "ERROR: Unable to locate GNU Automake."
_report_error=yes
else
_version="`$AUTOMAKE --version | head -${HEAD_N}1 | sed 's/[^0-9]*\([0-9\.][0-9\.]*\)/\1/'`"
if [ "x$_version" = "x" ] ; then
_version="0.0.0"
fi
$ECHO "Found GNU Automake version $_version"
version_check "$AUTOMAKE_VERSION" "$_version"
if [ $? -ne 0 ] ; then
_report_error=yes
fi
fi
if [ "x$_report_error" = "xyes" ] ; then
version_error "$AUTOMAKE_VERSION" "GNU Automake"
exit 1
fi
########################
# check for libtoolize #
########################
HAVE_LIBTOOLIZE=yes
HAVE_ALT_LIBTOOLIZE=no
_ltfound=no
if [ "x$LIBTOOLIZE" = "x" ] ; then
LIBTOOLIZE=libtoolize
$VERBOSE_ECHO "Checking libtoolize version: $LIBTOOLIZE --version"
$LIBTOOLIZE --version > /dev/null 2>&1
if [ ! $? = 0 ] ; then
HAVE_LIBTOOLIZE=no
$ECHO
if [ "x$HAVE_AUTORECONF" = "xno" ] ; then
$ECHO "Warning: libtoolize does not appear to be available."
else
$ECHO "Warning: libtoolize does not appear to be available. This means that"
$ECHO "the automatic build preparation via autoreconf will probably not work."
$ECHO "Preparing the build by running each step individually, however, should"
$ECHO "work and will be done automatically for you if autoreconf fails."
fi
# look for some alternates
for tool in glibtoolize libtoolize15 libtoolize14 libtoolize13 ; do
$VERBOSE_ECHO "Checking libtoolize alternate: $tool --version"
_glibtoolize="`$tool --version > /dev/null 2>&1`"
if [ $? = 0 ] ; then
$VERBOSE_ECHO "Found $tool --version"
_glti="`which $tool`"
if [ "x$_glti" = "x" ] ; then
$VERBOSE_ECHO "Cannot find $tool with which"
continue;
fi
if test ! -f "$_glti" ; then
$VERBOSE_ECHO "Cannot use $tool, $_glti is not a file"
continue;
fi
_gltidir="`dirname $_glti`"
if [ "x$_gltidir" = "x" ] ; then
$VERBOSE_ECHO "Cannot find $tool path with dirname of $_glti"
continue;
fi
if test ! -d "$_gltidir" ; then
$VERBOSE_ECHO "Cannot use $tool, $_gltidir is not a directory"
continue;
fi
HAVE_ALT_LIBTOOLIZE=yes
LIBTOOLIZE="$tool"
$ECHO
$ECHO "Fortunately, $tool was found which means that your system may simply"
$ECHO "have a non-standard or incomplete GNU Autotools install. If you have"
$ECHO "sufficient system access, it may be possible to quell this warning by"
$ECHO "running:"
$ECHO
sudo -V > /dev/null 2>&1
if [ $? = 0 ] ; then
$ECHO " sudo ln -s $_glti $_gltidir/libtoolize"
$ECHO
else
$ECHO " ln -s $_glti $_gltidir/libtoolize"
$ECHO
$ECHO "Run that as root or with proper permissions to the $_gltidir directory"
$ECHO
fi
_ltfound=yes
break
fi
done
else
_ltfound=yes
fi
else
_ltfound=yes
$ECHO "Using LIBTOOLIZE environment variable override: $LIBTOOLIZE"
fi
############################
# libtoolize version check #
############################
_report_error=no
if [ ! "x$_ltfound" = "xyes" ] ; then
$ECHO
$ECHO "ERROR: Unable to locate GNU Libtool."
_report_error=yes
else
_version="`$LIBTOOLIZE --version | head -${HEAD_N}1 | sed 's/[^0-9]*\([0-9\.][0-9\.]*\)/\1/'`"
if [ "x$_version" = "x" ] ; then
_version="0.0.0"
fi
$ECHO "Found GNU Libtool version $_version"
version_check "$LIBTOOL_VERSION" "$_version"
if [ $? -ne 0 ] ; then
_report_error=yes
fi
fi
if [ "x$_report_error" = "xyes" ] ; then
version_error "$LIBTOOL_VERSION" "GNU Libtool"
exit 1
fi
#####################
# check for aclocal #
#####################
if [ "x$ACLOCAL" = "x" ] ; then
for ACLOCAL in aclocal ; do
$VERBOSE_ECHO "Checking aclocal version: $ACLOCAL --version"
$ACLOCAL --version > /dev/null 2>&1
if [ $? = 0 ] ; then
break
fi
done
else
$ECHO "Using ACLOCAL environment variable override: $ACLOCAL"
fi
########################
# check for autoheader #
########################
if [ "x$AUTOHEADER" = "x" ] ; then
for AUTOHEADER in autoheader ; do
$VERBOSE_ECHO "Checking autoheader version: $AUTOHEADER --version"
$AUTOHEADER --version > /dev/null 2>&1
if [ $? = 0 ] ; then
break
fi
done
else
$ECHO "Using AUTOHEADER environment variable override: $AUTOHEADER"
fi
#########################
# check if version only #
#########################
$VERBOSE_ECHO "Checking whether to only output version information"
if [ "x$VERSION_ONLY" = "xyes" ] ; then
$ECHO
ident
$ECHO "---"
$ECHO "Version requested. No preparation or configuration will be performed."
exit 0
fi
#################################
# PROTECT_FROM_CLOBBER FUNCTION #
#################################
protect_from_clobber ( ) {
PFC_INIT=1
# protect COPYING & INSTALL from overwrite by automake. the
# automake force option will (inappropriately) ignore the existing
# contents of a COPYING and/or INSTALL files (depending on the
# version) instead of just forcing *missing* files like it does
# for AUTHORS, NEWS, and README. this is broken but extremely
# prevalent behavior, so we protect against it by keeping a backup
# of the file that can later be restored.
for file in COPYING INSTALL ; do
if test -f ${file} ; then
if test -f ${file}.$$.protect_from_automake.backup ; then
$VERBOSE_ECHO "Already backed up ${file} in `pwd`"
else
$VERBOSE_ECHO "Backing up ${file} in `pwd`"
$VERBOSE_ECHO "cp -p ${file} ${file}.$$.protect_from_automake.backup"
cp -p ${file} ${file}.$$.protect_from_automake.backup
fi
fi
done
}
##############################
# RECURSIVE_PROTECT FUNCTION #
##############################
recursive_protect ( ) {
# for projects using recursive configure, run the build
# preparation steps for the subdirectories. this function assumes
# START_PATH was set to pwd before recursion begins so that
# relative paths work.
# git 'r done, protect COPYING and INSTALL from being clobbered
protect_from_clobber
if test -d autom4te.cache ; then
$VERBOSE_ECHO "Found an autom4te.cache directory, deleting it"
$VERBOSE_ECHO "rm -rf autom4te.cache"
rm -rf autom4te.cache
fi
# find configure template
_configure="`locate_configure_template`"
if [ "x$_configure" = "x" ] ; then
return
fi
# $VERBOSE_ECHO "Looking for configure template found `pwd`/$_configure"
# look for subdirs
# $VERBOSE_ECHO "Looking for subdirs in `pwd`"
_det_config_subdirs="`grep AC_CONFIG_SUBDIRS $_configure | grep -v '.*#.*AC_CONFIG_SUBDIRS' | sed 's/^[ ]*AC_CONFIG_SUBDIRS(\(.*\)).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
CHECK_DIRS=""
for dir in $_det_config_subdirs ; do
if test -d "`pwd`/$dir" ; then
CHECK_DIRS="$CHECK_DIRS \"`pwd`/$dir\""
fi
done
# process subdirs
if [ ! "x$CHECK_DIRS" = "x" ] ; then
$VERBOSE_ECHO "Recursively scanning the following directories:"
$VERBOSE_ECHO " $CHECK_DIRS"
for dir in $CHECK_DIRS ; do
$VERBOSE_ECHO "Protecting files from automake in $dir"
cd "$START_PATH"
eval "cd $dir"
# recursively git 'r done
recursive_protect
done
fi
} # end of recursive_protect
#############################
# RESTORE_CLOBBERED FUNCION #
#############################
restore_clobbered ( ) {
# The automake (and autoreconf by extension) -f/--force-missing
# option may overwrite COPYING and INSTALL even if they do exist.
# Here we restore the files if necessary.
spacer=no
for file in COPYING INSTALL ; do
if test -f ${file}.$$.protect_from_automake.backup ; then
if test -f ${file} ; then
# compare entire content, restore if needed
if test "x`cat ${file}`" != "x`cat ${file}.$$.protect_from_automake.backup`" ; then
if test "x$spacer" = "xno" ; then
$VERBOSE_ECHO
spacer=yes
fi
# restore the backup
$VERBOSE_ECHO "Restoring ${file} from backup (automake -f likely clobbered it)"
$VERBOSE_ECHO "rm -f ${file}"
rm -f ${file}
$VERBOSE_ECHO "mv ${file}.$$.protect_from_automake.backup ${file}"
mv ${file}.$$.protect_from_automake.backup ${file}
fi # check contents
elif test -f ${file}.$$.protect_from_automake.backup ; then
$VERBOSE_ECHO "mv ${file}.$$.protect_from_automake.backup ${file}"
mv ${file}.$$.protect_from_automake.backup ${file}
fi # -f ${file}
# just in case
$VERBOSE_ECHO "rm -f ${file}.$$.protect_from_automake.backup"
rm -f ${file}.$$.protect_from_automake.backup
fi # -f ${file}.$$.protect_from_automake.backup
done
CONFIGURE="`locate_configure_template`"
if [ "x$CONFIGURE" = "x" ] ; then
return
fi
_aux_dir="`grep AC_CONFIG_AUX_DIR $CONFIGURE | grep -v '.*#.*AC_CONFIG_AUX_DIR' | tail -${TAIL_N}1 | sed 's/^[ ]*AC_CONFIG_AUX_DIR(\(.*\)).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
if test ! -d "$_aux_dir" ; then
_aux_dir=.
fi
for file in config.guess config.sub ltmain.sh ; do
if test -f "${_aux_dir}/${file}" ; then
$VERBOSE_ECHO "rm -f \"${_aux_dir}/${file}.backup\""
rm -f "${_aux_dir}/${file}.backup"
fi
done
} # end of restore_clobbered
##############################
# RECURSIVE_RESTORE FUNCTION #
##############################
recursive_restore ( ) {
# restore COPYING and INSTALL from backup if they were clobbered
# for each directory recursively.
# git 'r undone
restore_clobbered
# find configure template
_configure="`locate_configure_template`"
if [ "x$_configure" = "x" ] ; then
return
fi
# look for subdirs
_det_config_subdirs="`grep AC_CONFIG_SUBDIRS $_configure | grep -v '.*#.*AC_CONFIG_SUBDIRS' | sed 's/^[ ]*AC_CONFIG_SUBDIRS(\(.*\)).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
CHECK_DIRS=""
for dir in $_det_config_subdirs ; do
if test -d "`pwd`/$dir" ; then
CHECK_DIRS="$CHECK_DIRS \"`pwd`/$dir\""
fi
done
# process subdirs
if [ ! "x$CHECK_DIRS" = "x" ] ; then
$VERBOSE_ECHO "Recursively scanning the following directories:"
$VERBOSE_ECHO " $CHECK_DIRS"
for dir in $CHECK_DIRS ; do
$VERBOSE_ECHO "Checking files for automake damage in $dir"
cd "$START_PATH"
eval "cd $dir"
# recursively git 'r undone
recursive_restore
done
fi
} # end of recursive_restore
#######################
# INITIALIZE FUNCTION #
#######################
initialize ( ) {
# this routine performs a variety of directory-specific
# initializations. some are sanity checks, some are preventive,
# and some are necessary setup detection.
#
# this function sets:
# CONFIGURE
# SEARCH_DIRS
# CONFIG_SUBDIRS
##################################
# check for a configure template #
##################################
CONFIGURE="`locate_configure_template`"
if [ "x$CONFIGURE" = "x" ] ; then
$ECHO
$ECHO "A configure.ac or configure.in file could not be located implying"
$ECHO "that the GNU Build System is at least not used in this directory. In"
$ECHO "any case, there is nothing to do here without one of those files."
$ECHO
$ECHO "ERROR: No configure.in or configure.ac file found in `pwd`"
exit 1
fi
#####################
# detect an aux dir #
#####################
_aux_dir="`grep AC_CONFIG_AUX_DIR $CONFIGURE | grep -v '.*#.*AC_CONFIG_AUX_DIR' | tail -${TAIL_N}1 | sed 's/^[ ]*AC_CONFIG_AUX_DIR(\(.*\)).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
if test ! -d "$_aux_dir" ; then
_aux_dir=.
else
$VERBOSE_ECHO "Detected auxillary directory: $_aux_dir"
fi
################################
# detect a recursive configure #
################################
CONFIG_SUBDIRS=""
_det_config_subdirs="`grep AC_CONFIG_SUBDIRS $CONFIGURE | grep -v '.*#.*AC_CONFIG_SUBDIRS' | sed 's/^[ ]*AC_CONFIG_SUBDIRS(\(.*\)).*/\1/' | sed 's/.*\[\(.*\)\].*/\1/'`"
for dir in $_det_config_subdirs ; do
if test -d "`pwd`/$dir" ; then
$VERBOSE_ECHO "Detected recursive configure directory: `pwd`/$dir"
CONFIG_SUBDIRS="$CONFIG_SUBDIRS `pwd`/$dir"
fi
done
###########################################################
# make sure certain required files exist for GNU projects #
###########################################################
_marker_found=""
_marker_found_message_intro='Detected non-GNU marker "'
_marker_found_message_mid='" in '
for marker in foreign cygnus ; do
_marker_found_message=${_marker_found_message_intro}${marker}${_marker_found_message_mid}
_marker_found="`grep 'AM_INIT_AUTOMAKE.*'${marker} $CONFIGURE`"
if [ ! "x$_marker_found" = "x" ] ; then
$VERBOSE_ECHO "${_marker_found_message}`basename \"$CONFIGURE\"`"
break
fi
if test -f "`dirname \"$CONFIGURE\"/Makefile.am`" ; then
_marker_found="`grep 'AUTOMAKE_OPTIONS.*'${marker} Makefile.am`"
if [ ! "x$_marker_found" = "x" ] ; then
$VERBOSE_ECHO "${_marker_found_message}Makefile.am"
break
fi
fi
done
if [ "x${_marker_found}" = "x" ] ; then
_suggest_foreign=no
for file in AUTHORS COPYING ChangeLog INSTALL NEWS README ; do
if [ ! -f $file ] ; then
$VERBOSE_ECHO "Touching ${file} since it does not exist"
_suggest_foreign=yes
touch $file
fi
done
if [ "x${_suggest_foreign}" = "xyes" ] ; then
$ECHO
$ECHO "Warning: Several files expected of projects that conform to the GNU"
$ECHO "coding standards were not found. The files were automatically added"
$ECHO "for you since you do not have a 'foreign' declaration specified."
$ECHO
$ECHO "Considered adding 'foreign' to AM_INIT_AUTOMAKE in `basename \"$CONFIGURE\"`"
if test -f "`dirname \"$CONFIGURE\"/Makefile.am`" ; then
$ECHO "or to AUTOMAKE_OPTIONS in your top-level Makefile.am file."
fi
$ECHO
fi
fi
##################################################
# make sure certain generated files do not exist #
##################################################
for file in config.guess config.sub ltmain.sh ; do
if test -f "${_aux_dir}/${file}" ; then
$VERBOSE_ECHO "mv -f \"${_aux_dir}/${file}\" \"${_aux_dir}/${file}.backup\""
mv -f "${_aux_dir}/${file}" "${_aux_dir}/${file}.backup"
fi
done
############################
# search alternate m4 dirs #
############################
SEARCH_DIRS=""
for dir in m4 ; do
if [ -d $dir ] ; then
$VERBOSE_ECHO "Found extra aclocal search directory: $dir"
SEARCH_DIRS="$SEARCH_DIRS -I $dir"
fi
done
######################################
# remove any previous build products #
######################################
if test -d autom4te.cache ; then
$VERBOSE_ECHO "Found an autom4te.cache directory, deleting it"
$VERBOSE_ECHO "rm -rf autom4te.cache"
rm -rf autom4te.cache
fi
# tcl/tk (and probably others) have a customized aclocal.m4, so can't delete it
# if test -f aclocal.m4 ; then
# $VERBOSE_ECHO "Found an aclocal.m4 file, deleting it"
# $VERBOSE_ECHO "rm -f aclocal.m4"
# rm -f aclocal.m4
# fi
} # end of initialize()
##############
# initialize #
##############
# stash path
START_PATH="`pwd`"
# Before running autoreconf or manual steps, some prep detection work
# is necessary or useful. Only needs to occur once per directory, but
# does need to traverse the entire subconfigure hierarchy to protect
# files from being clobbered even by autoreconf.
recursive_protect
# start from where we started
cd "$START_PATH"
# get ready to process
initialize
#########################################
# DOWNLOAD_GNULIB_CONFIG_GUESS FUNCTION #
#########################################
# TODO - should make sure wget/curl exist and/or work before trying to
# use them.
download_gnulib_config_guess () {
# abuse gitweb to download gnulib's latest config.guess via HTTP
config_guess_temp="config.guess.$$.download"
ret=1
for __cmd in wget curl fetch ; do
$VERBOSE_ECHO "Checking for command ${__cmd}"
${__cmd} --version > /dev/null 2>&1
ret=$?
if [ ! $ret = 0 ] ; then
continue
fi
__cmd_version=`${__cmd} --version | head -n 1 | sed -e 's/^[^0-9]\+//' -e 's/ .*//'`
$VERBOSE_ECHO "Found ${__cmd} ${__cmd_version}"
opts=""
case ${__cmd} in
wget)
opts="-O"
;;
curl)
opts="-o"
;;
fetch)
opts="-t 5 -f"
;;
esac
$VERBOSE_ECHO "Running $__cmd \"${CONFIG_GUESS_URL}\" $opts \"${config_guess_temp}\""
eval "$__cmd \"${CONFIG_GUESS_URL}\" $opts \"${config_guess_temp}\"" > /dev/null 2>&1
if [ $? = 0 ] ; then
mv -f "${config_guess_temp}" ${_aux_dir}/config.guess
ret=0
break
fi
done
if [ ! $ret = 0 ] ; then
$ECHO "Warning: config.guess download failed from: $CONFIG_GUESS_URL"
rm -f "${config_guess_temp}"
fi
}
##############################
# LIBTOOLIZE_NEEDED FUNCTION #
##############################
libtoolize_needed () {
ret=1 # means no, don't need libtoolize
for feature in AC_PROG_LIBTOOL AM_PROG_LIBTOOL LT_INIT ; do
$VERBOSE_ECHO "Searching for $feature in $CONFIGURE"
found="`grep \"^$feature.*\" $CONFIGURE`"
if [ ! "x$found" = "x" ] ; then
ret=0 # means yes, need to run libtoolize
break
fi
done
return ${ret}
}
############################################
# prepare build via autoreconf or manually #
############################################
reconfigure_manually=no
if [ "x$HAVE_AUTORECONF" = "xyes" ] ; then
$ECHO
$ECHO $ECHO_N "Automatically preparing build ... $ECHO_C"
$VERBOSE_ECHO "$AUTORECONF $SEARCH_DIRS $AUTORECONF_OPTIONS"
autoreconf_output="`$AUTORECONF $SEARCH_DIRS $AUTORECONF_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$autoreconf_output"
if [ ! $ret = 0 ] ; then
if [ "x$HAVE_ALT_LIBTOOLIZE" = "xyes" ] ; then
if [ ! "x`echo \"$autoreconf_output\" | grep libtoolize | grep \"No such file or directory\"`" = "x" ] ; then
$ECHO
$ECHO "Warning: autoreconf failed but due to what is usually a common libtool"
$ECHO "misconfiguration issue. This problem is encountered on systems that"
$ECHO "have installed libtoolize under a different name without providing a"
$ECHO "symbolic link or without setting the LIBTOOLIZE environment variable."
$ECHO
$ECHO "Restarting the preparation steps with LIBTOOLIZE set to $LIBTOOLIZE"
export LIBTOOLIZE
RUN_RECURSIVE=no
export RUN_RECURSIVE
untrap_abnormal
$VERBOSE_ECHO sh $AUTOGEN_SH "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
sh "$AUTOGEN_SH" "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
exit $?
fi
fi
$ECHO "Warning: $AUTORECONF failed"
if test -f ltmain.sh ; then
$ECHO "libtoolize being run by autoreconf is not creating ltmain.sh in the auxillary directory like it should"
fi
$ECHO "Attempting to run the preparation steps individually"
reconfigure_manually=yes
else
if [ "x$DOWNLOAD" = "xyes" ] ; then
if libtoolize_needed ; then
download_gnulib_config_guess
fi
fi
fi
else
reconfigure_manually=yes
fi
############################
# LIBTOOL_FAILURE FUNCTION #
############################
libtool_failure ( ) {
# libtool is rather error-prone in comparison to the other
# autotools and this routine attempts to compensate for some
# common failures. the output after a libtoolize failure is
# parsed for an error related to AC_PROG_LIBTOOL and if found, we
# attempt to inject a project-provided libtool.m4 file.
_autoconf_output="$1"
if [ "x$RUN_RECURSIVE" = "xno" ] ; then
# we already tried the libtool.m4, don't try again
return 1
fi
if test -f "$LIBTOOL_M4" ; then
found_libtool="`$ECHO $_autoconf_output | grep AC_PROG_LIBTOOL`"
if test ! "x$found_libtool" = "x" ; then
if test -f acinclude.m4 ; then
rm -f acinclude.m4.$$.backup
$VERBOSE_ECHO "cat acinclude.m4 > acinclude.m4.$$.backup"
cat acinclude.m4 > acinclude.m4.$$.backup
fi
$VERBOSE_ECHO "cat \"$LIBTOOL_M4\" >> acinclude.m4"
chmod u+w acinclude.m4
cat "$LIBTOOL_M4" >> acinclude.m4
# don't keep doing this
RUN_RECURSIVE=no
export RUN_RECURSIVE
untrap_abnormal
$ECHO
$ECHO "Restarting the preparation steps with libtool macros in acinclude.m4"
$VERBOSE_ECHO sh $AUTOGEN_SH "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
sh "$AUTOGEN_SH" "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9"
exit $?
fi
fi
}
###########################
# MANUAL_AUTOGEN FUNCTION #
###########################
manual_autogen ( ) {
##################################################
# Manual preparation steps taken are as follows: #
# aclocal [-I m4] #
# libtoolize --automake -c -f #
# aclocal [-I m4] #
# autoconf -f #
# autoheader #
# automake -a -c -f #
##################################################
###########
# aclocal #
###########
$VERBOSE_ECHO "$ACLOCAL $SEARCH_DIRS $ACLOCAL_OPTIONS"
aclocal_output="`$ACLOCAL $SEARCH_DIRS $ACLOCAL_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$aclocal_output"
if [ ! $ret = 0 ] ; then $ECHO "ERROR: $ACLOCAL failed" && exit 2 ; fi
##############
# libtoolize #
##############
if libtoolize_needed ; then
if [ "x$HAVE_LIBTOOLIZE" = "xyes" ] ; then
$VERBOSE_ECHO "$LIBTOOLIZE $LIBTOOLIZE_OPTIONS"
libtoolize_output="`$LIBTOOLIZE $LIBTOOLIZE_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$libtoolize_output"
if [ ! $ret = 0 ] ; then $ECHO "ERROR: $LIBTOOLIZE failed" && exit 2 ; fi
else
if [ "x$HAVE_ALT_LIBTOOLIZE" = "xyes" ] ; then
$VERBOSE_ECHO "$LIBTOOLIZE $ALT_LIBTOOLIZE_OPTIONS"
libtoolize_output="`$LIBTOOLIZE $ALT_LIBTOOLIZE_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$libtoolize_output"
if [ ! $ret = 0 ] ; then $ECHO "ERROR: $LIBTOOLIZE failed" && exit 2 ; fi
fi
fi
###########
# aclocal #
###########
# re-run again as instructed by libtoolize
$VERBOSE_ECHO "$ACLOCAL $SEARCH_DIRS $ACLOCAL_OPTIONS"
aclocal_output="`$ACLOCAL $SEARCH_DIRS $ACLOCAL_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$aclocal_output"
# libtoolize might put ltmain.sh in the wrong place
if test -f ltmain.sh ; then
if test ! -f "${_aux_dir}/ltmain.sh" ; then
$ECHO
$ECHO "Warning: $LIBTOOLIZE is creating ltmain.sh in the wrong directory"
$ECHO
$ECHO "Fortunately, the problem can be worked around by simply copying the"
$ECHO "file to the appropriate location (${_aux_dir}/). This has been done for you."
$ECHO
$VERBOSE_ECHO "cp -p ltmain.sh \"${_aux_dir}/ltmain.sh\""
cp -p ltmain.sh "${_aux_dir}/ltmain.sh"
$ECHO $ECHO_N "Continuing build preparation ... $ECHO_C"
fi
fi # ltmain.sh
if [ "x$DOWNLOAD" = "xyes" ] ; then
download_gnulib_config_guess
fi
fi # libtoolize_needed
############
# autoconf #
############
$VERBOSE_ECHO
$VERBOSE_ECHO "$AUTOCONF $AUTOCONF_OPTIONS"
autoconf_output="`$AUTOCONF $AUTOCONF_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$autoconf_output"
if [ ! $ret = 0 ] ; then
# retry without the -f and check for usage of macros that are too new
ac2_59_macros="AC_C_RESTRICT AC_INCLUDES_DEFAULT AC_LANG_ASSERT AC_LANG_WERROR AS_SET_CATFILE"
ac2_55_macros="AC_COMPILER_IFELSE AC_FUNC_MBRTOWC AC_HEADER_STDBOOL AC_LANG_CONFTEST AC_LANG_SOURCE AC_LANG_PROGRAM AC_LANG_CALL AC_LANG_FUNC_TRY_LINK AC_MSG_FAILURE AC_PREPROC_IFELSE"
ac2_54_macros="AC_C_BACKSLASH_A AC_CONFIG_LIBOBJ_DIR AC_GNU_SOURCE AC_PROG_EGREP AC_PROG_FGREP AC_REPLACE_FNMATCH AC_FUNC_FNMATCH_GNU AC_FUNC_REALLOC AC_TYPE_MBSTATE_T"
macros_to_search=""
ac_major="`echo ${AUTOCONF_VERSION}. | cut -d. -f1 | sed 's/[^0-9]//g'`"
ac_minor="`echo ${AUTOCONF_VERSION}. | cut -d. -f2 | sed 's/[^0-9]//g'`"
if [ $ac_major -lt 2 ] ; then
macros_to_search="$ac2_59_macros $ac2_55_macros $ac2_54_macros"
else
if [ $ac_minor -lt 54 ] ; then
macros_to_search="$ac2_59_macros $ac2_55_macros $ac2_54_macros"
elif [ $ac_minor -lt 55 ] ; then
macros_to_search="$ac2_59_macros $ac2_55_macros"
elif [ $ac_minor -lt 59 ] ; then
macros_to_search="$ac2_59_macros"
fi
fi
configure_ac_macros=__none__
for feature in $macros_to_search ; do
$VERBOSE_ECHO "Searching for $feature in $CONFIGURE"
found="`grep \"^$feature.*\" $CONFIGURE`"
if [ ! "x$found" = "x" ] ; then
if [ "x$configure_ac_macros" = "x__none__" ] ; then
configure_ac_macros="$feature"
else
configure_ac_macros="$feature $configure_ac_macros"
fi
fi
done
if [ ! "x$configure_ac_macros" = "x__none__" ] ; then
$ECHO
$ECHO "Warning: Unsupported macros were found in $CONFIGURE"
$ECHO
$ECHO "The `basename \"$CONFIGURE\"` file was scanned in order to determine if any"
$ECHO "unsupported macros are used that exceed the minimum version"
$ECHO "settings specified within this file. As such, the following macros"
$ECHO "should be removed from configure.ac or the version numbers in this"
$ECHO "file should be increased:"
$ECHO
$ECHO "$configure_ac_macros"
$ECHO
$ECHO $ECHO_N "Ignorantly continuing build preparation ... $ECHO_C"
fi
###################
# autoconf, retry #
###################
$VERBOSE_ECHO
$VERBOSE_ECHO "$AUTOCONF"
autoconf_output="`$AUTOCONF 2>&1`"
ret=$?
$VERBOSE_ECHO "$autoconf_output"
if [ ! $ret = 0 ] ; then
# test if libtool is busted
libtool_failure "$autoconf_output"
# let the user know what went wrong
cat <<EOF
$autoconf_output
EOF
$ECHO "ERROR: $AUTOCONF failed"
exit 2
else
# autoconf sans -f and possibly sans unsupported options succeed so warn verbosely
$ECHO
$ECHO "Warning: autoconf seems to have succeeded by removing the following options:"
$ECHO " AUTOCONF_OPTIONS=\"$AUTOCONF_OPTIONS\""
$ECHO
$ECHO "Removing those options should not be necessary and indicate some other"
$ECHO "problem with the build system. The build preparation is highly suspect"
$ECHO "and may result in configuration or compilation errors. Consider"
if [ "x$VERBOSE_ECHO" = "x:" ] ; then
$ECHO "rerunning the build preparation with verbose output enabled."
$ECHO " $AUTOGEN_SH --verbose"
else
$ECHO "reviewing the minimum GNU Autotools version settings contained in"
$ECHO "this script along with the macros being used in your `basename \"$CONFIGURE\"` file."
fi
$ECHO
$ECHO $ECHO_N "Continuing build preparation ... $ECHO_C"
fi # autoconf ret = 0
fi # autoconf ret = 0
##############
# autoheader #
##############
need_autoheader=no
for feature in AM_CONFIG_HEADER AC_CONFIG_HEADER ; do
$VERBOSE_ECHO "Searching for $feature in $CONFIGURE"
found="`grep \"^$feature.*\" $CONFIGURE`"
if [ ! "x$found" = "x" ] ; then
need_autoheader=yes
break
fi
done
if [ "x$need_autoheader" = "xyes" ] ; then
$VERBOSE_ECHO "$AUTOHEADER $AUTOHEADER_OPTIONS"
autoheader_output="`$AUTOHEADER $AUTOHEADER_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$autoheader_output"
if [ ! $ret = 0 ] ; then $ECHO "ERROR: $AUTOHEADER failed" && exit 2 ; fi
fi # need_autoheader
############
# automake #
############
need_automake=no
for feature in AM_INIT_AUTOMAKE ; do
$VERBOSE_ECHO "Searching for $feature in $CONFIGURE"
found="`grep \"^$feature.*\" $CONFIGURE`"
if [ ! "x$found" = "x" ] ; then
need_automake=yes
break
fi
done
if [ "x$need_automake" = "xyes" ] ; then
$VERBOSE_ECHO "$AUTOMAKE $AUTOMAKE_OPTIONS"
automake_output="`$AUTOMAKE $AUTOMAKE_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$automake_output"
if [ ! $ret = 0 ] ; then
###################
# automake, retry #
###################
$VERBOSE_ECHO
$VERBOSE_ECHO "$AUTOMAKE $ALT_AUTOMAKE_OPTIONS"
# retry without the -f
automake_output="`$AUTOMAKE $ALT_AUTOMAKE_OPTIONS 2>&1`"
ret=$?
$VERBOSE_ECHO "$automake_output"
if [ ! $ret = 0 ] ; then
# test if libtool is busted
libtool_failure "$automake_output"
# let the user know what went wrong
cat <<EOF
$automake_output
EOF
$ECHO "ERROR: $AUTOMAKE failed"
exit 2
fi # automake retry
fi # automake ret = 0
fi # need_automake
} # end of manual_autogen
#####################################
# RECURSIVE_MANUAL_AUTOGEN FUNCTION #
#####################################
recursive_manual_autogen ( ) {
# run the build preparation steps manually for this directory
manual_autogen
# for projects using recursive configure, run the build
# preparation steps for the subdirectories.
if [ ! "x$CONFIG_SUBDIRS" = "x" ] ; then
$VERBOSE_ECHO "Recursively configuring the following directories:"
$VERBOSE_ECHO " $CONFIG_SUBDIRS"
for dir in $CONFIG_SUBDIRS ; do
$VERBOSE_ECHO "Processing recursive configure in $dir"
cd "$START_PATH"
cd "$dir"
# new directory, prepare
initialize
# run manual steps for the subdir and any others below
recursive_manual_autogen
done
fi
}
################################
# run manual preparation steps #
################################
if [ "x$reconfigure_manually" = "xyes" ] ; then
$ECHO
$ECHO $ECHO_N "Preparing build ... $ECHO_C"
recursive_manual_autogen
fi
#########################
# restore and summarize #
#########################
cd "$START_PATH"
# restore COPYING and INSTALL from backup if necessary
recursive_restore
# make sure we end up with a configure script
config_ac="`locate_configure_template`"
config="`echo $config_ac | sed 's/\.ac$//' | sed 's/\.in$//'`"
if [ "x$config" = "x" ] ; then
$VERBOSE_ECHO "Could not locate the configure template (from `pwd`)"
fi
# intltool
#intltoolize --copy --automake
# summarize
$ECHO "done"
$ECHO
if test "x$config" = "x" -o ! -f "$config" ; then
$ECHO "WARNING: The $PROJECT build system should now be prepared but there"
$ECHO "does not seem to be a resulting configure file. This is unexpected"
$ECHO "and likely the result of an error. You should run $NAME_OF_AUTOGEN"
$ECHO "with the --verbose option to get more details on a potential"
$ECHO "misconfiguration."
else
$ECHO "The $PROJECT build system is now prepared. To build here, run:"
$ECHO " $config"
$ECHO " make"
fi
# Local Variables:
# mode: sh
# tab-width: 8
# sh-basic-offset: 4
# sh-indentation: 4
# indent-tabs-mode: t
# End:
# ex: shiftwidth=4 tabstop=8
|
tempbottle/chan
|
autogen.sh
|
Shell
|
apache-2.0
| 47,427 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
if [ "$(uname)" == "Darwin" ]; then
cp bedCommonRegions "$PREFIX/bin"
else
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/bedCommonRegions && make)
cp bin/bedCommonRegions "$PREFIX/bin"
fi
chmod +x "$PREFIX/bin/bedCommonRegions"
|
dmaticzka/bioconda-recipes
|
recipes/ucsc-bedcommonregions/build.sh
|
Shell
|
mit
| 458 |
mkdir -p "${PREFIX}/share"
mkdir -p "${PREFIX}/bin"
cp recognizer.py "${PREFIX}/share"
cp -r resources/* "${PREFIX}/share"
chmod +x "${PREFIX}/share/recognizer.py"
ln -s "${PREFIX}/share/recognizer.py" "${PREFIX}/bin"
|
cokelaer/bioconda-recipes
|
recipes/recognizer/build.sh
|
Shell
|
mit
| 218 |
mysql twelve_monkeys_test -utwelve_monkeys -ptwelve -e 'drop database twelve_monkeys_test; create database twelve_monkeys_test;';
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
mysql twelve_monkeys_test -utwelve_monkeys -ptwelve < create_tables.sql
|
dewrich/traffic_control
|
traffic_ops/app/db/reset.sh
|
Shell
|
apache-2.0
| 814 |
#
# Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 4990825
# @run shell jps-q.sh
# @summary Test that output of 'jps -q' shows only the process ids
. ${TESTSRC-.}/../../jvmstat/testlibrary/utils.sh
setup
verify_os
JPS="${TESTJAVA}/bin/jps"
${JPS} -q | awk -f ${TESTSRC}/jps-q_Output1.awk
|
vicmarcal/JAVA_UNIT
|
test/sun/tools/jps/jps-q.sh
|
Shell
|
gpl-3.0
| 1,285 |
#!/bin/sh
###########################################################################
# lighttpd_debian.sh
# ---------------------
# Date : February 2014
# Copyright : (C) 2014 by Larry Shaffer
# Email : larrys at dakotacarto dot com
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
#from init script
PATH=/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=$2
NAME=lighttpd
DESC="web server"
PIDFILE=$3/var/$NAME.pid
SCRIPTNAME=$NAME
export QGIS_SERVER_TEMP_DIR=$3
if [ ! -z $4 ]; then
DAEMON_OPTS="-f ${4}"
fi
test -x $DAEMON || exit 1
set -e
check_syntax()
{
$DAEMON -t $DAEMON_OPTS > /dev/null || exit $?
}
. /lib/lsb/init-functions
case "$1" in
start)
check_syntax
log_daemon_msg "Starting $DESC" $NAME
if ! start-stop-daemon --start --oknodo --quiet \
--pidfile $PIDFILE --exec $DAEMON -- $DAEMON_OPTS
then
log_end_msg 1
else
log_end_msg 0
fi
;;
stop)
log_daemon_msg "Stopping $DESC" $NAME
if start-stop-daemon --stop --retry 30 --oknodo --quiet \
--pidfile $PIDFILE --exec $DAEMON
then
rm -f $PIDFILE
log_end_msg 0
else
log_end_msg 1
fi
;;
reload|force-reload)
check_syntax
log_daemon_msg "Reloading $DESC configuration" $NAME
if start-stop-daemon --stop --signal INT --quiet \
--pidfile $PIDFILE --exec $DAEMON
then
rm $PIDFILE
if start-stop-daemon --start --quiet \
--pidfile $PIDFILE --exec $DAEMON -- $DAEMON_OPTS ; then
log_end_msg 0
else
log_end_msg 1
fi
else
log_end_msg 1
fi
;;
reopen-logs)
log_daemon_msg "Reopening $DESC logs" $NAME
if start-stop-daemon --stop --signal HUP --oknodo --quiet \
--pidfile $PIDFILE --exec $DAEMON
then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_syntax
$0 stop $2 $3
$0 start $2 $3 $4
;;
status)
status_of_proc -p "$PIDFILE" "$DAEMON" lighttpd && exit 0 || exit $?
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload|status}" >&2
exit 1
;;
esac
exit 0
|
asiersarasua/QGIS
|
tests/testdata/qgis_local_server/lighttpd/scripts/lighttpd_debian.sh
|
Shell
|
gpl-2.0
| 2,973 |
#!/bin/bash
# Install using setuptools
$PYTHON setup.py install
|
gvlproject/bioconda-recipes
|
recipes/pathoscope/build.sh
|
Shell
|
mit
| 65 |
#!/bin/sh
export BASE_DIR="`dirname $0`"
if test -z "$NO_MAKE"; then
make -C "$BASE_DIR/../" > /dev/null || exit 1
fi
if test -z "$CUTTER"; then
CUTTER="`make -s -C "$BASE_DIR" echo-cutter`"
fi
"$CUTTER" --keep-opening-modules -s "$BASE_DIR" "$@" "$BASE_DIR"
# ^^^^^^^^^^^^^^^^^^^^^^
# FIXME: Remove this workaround once cutter has been fixed upstream.
# Bug report:
# http://sourceforge.net/mailarchive/forum.php?thread_name=20100626123941.GA258%40blogreen.org&forum_name=cutter-users-en
|
paulchabanon/libnfc-my
|
test/run-test.sh
|
Shell
|
lgpl-3.0
| 509 |
#! /usr/bin/env node
logfmt = require('./lib/logfmt_parser')
logfmt.debug = true;
var test_string = "foo=bar a=14 baz=\"hello kitty\" cool%story=bro f %^asdf ";
test_string += "code=H12 path=/hello/[email protected]/close";
console.log(logfmt.parse(test_string))
|
alexey0511/munikApp
|
testAuth/node_modules/logfmt/examples/quick.sh
|
Shell
|
bsd-2-clause
| 259 |
#! /bin/bash
# (c) 2015, Quentin Casasnovas <[email protected]>
obj=$1
file ${obj} | grep -q ELF || (echo "${obj} is not and ELF file." 1>&2 ; exit 0)
# Bail out early if there isn't an __ex_table section in this object file.
objdump -hj __ex_table ${obj} 2> /dev/null > /dev/null
[ $? -ne 0 ] && exit 0
white_list=.text,.fixup
suspicious_relocs=$(objdump -rj __ex_table ${obj} | tail -n +6 |
grep -v $(eval echo -e{${white_list}}) | awk '{print $3}')
# No suspicious relocs in __ex_table, jobs a good'un
[ -z "${suspicious_relocs}" ] && exit 0
# After this point, something is seriously wrong since we just found out we
# have some relocations in __ex_table which point to sections which aren't
# white listed. If you're adding a new section in the Linux kernel, and
# you're expecting this section to contain code which can fault (i.e. the
# __ex_table relocation to your new section is expected), simply add your
# new section to the white_list variable above. If not, you're probably
# doing something wrong and the rest of this code is just trying to print
# you more information about it.
function find_section_offset_from_symbol()
{
eval $(objdump -t ${obj} | grep ${1} | sed 's/\([0-9a-f]\+\) .\{7\} \([^ \t]\+\).*/section="\2"; section_offset="0x\1" /')
# addr2line takes addresses in hexadecimal...
section_offset=$(printf "0x%016x" $(( ${section_offset} + $2 )) )
}
function find_symbol_and_offset_from_reloc()
{
# Extract symbol and offset from the objdump output
eval $(echo $reloc | sed 's/\([^+]\+\)+\?\(0x[0-9a-f]\+\)\?/symbol="\1"; symbol_offset="\2"/')
# When the relocation points to the begining of a symbol or section, it
# won't print the offset since it is zero.
if [ -z "${symbol_offset}" ]; then
symbol_offset=0x0
fi
}
function find_alt_replacement_target()
{
# The target of the .altinstr_replacement is the relocation just before
# the .altinstr_replacement one.
eval $(objdump -rj .altinstructions ${obj} | grep -B1 "${section}+${section_offset}" | head -n1 | awk '{print $3}' |
sed 's/\([^+]\+\)+\(0x[0-9a-f]\+\)/alt_target_section="\1"; alt_target_offset="\2"/')
}
function handle_alt_replacement_reloc()
{
# This will define alt_target_section and alt_target_section_offset
find_alt_replacement_target ${section} ${section_offset}
echo "Error: found a reference to .altinstr_replacement in __ex_table:"
addr2line -fip -j ${alt_target_section} -e ${obj} ${alt_target_offset} | awk '{print "\t" $0}'
error=true
}
function is_executable_section()
{
objdump -hwj ${section} ${obj} | grep -q CODE
return $?
}
function handle_suspicious_generic_reloc()
{
if is_executable_section ${section}; then
# We've got a relocation to a non white listed _executable_
# section, print a warning so the developper adds the section to
# the white list or fix his code. We try to pretty-print the file
# and line number where that relocation was added.
echo "Warning: found a reference to section \"${section}\" in __ex_table:"
addr2line -fip -j ${section} -e ${obj} ${section_offset} | awk '{print "\t" $0}'
else
# Something is definitively wrong here since we've got a relocation
# to a non-executable section, there's no way this would ever be
# running in the kernel.
echo "Error: found a reference to non-executable section \"${section}\" in __ex_table at offset ${section_offset}"
error=true
fi
}
function handle_suspicious_reloc()
{
case "${section}" in
".altinstr_replacement")
handle_alt_replacement_reloc ${section} ${section_offset}
;;
*)
handle_suspicious_generic_reloc ${section} ${section_offset}
;;
esac
}
function diagnose()
{
for reloc in ${suspicious_relocs}; do
# Let's find out where the target of the relocation in __ex_table
# is, this will define ${symbol} and ${symbol_offset}
find_symbol_and_offset_from_reloc ${reloc}
# When there's a global symbol at the place of the relocation,
# objdump will use it instead of giving us a section+offset, so
# let's find out which section is this symbol in and the total
# offset withing that section.
find_section_offset_from_symbol ${symbol} ${symbol_offset}
# In this case objdump was presenting us with a reloc to a symbol
# rather than a section. Now that we've got the actual section,
# we can skip it if it's in the white_list.
if [ -z "$( echo $section | grep -v $(eval echo -e{${white_list}}))" ]; then
continue;
fi
# Will either print a warning if the relocation happens to be in a
# section we do not know but has executable bit set, or error out.
handle_suspicious_reloc
done
}
function check_debug_info() {
objdump -hj .debug_info ${obj} 2> /dev/null > /dev/null ||
echo -e "${obj} does not contain debug information, the addr2line output will be limited.\n" \
"Recompile ${obj} with CONFIG_DEBUG_INFO to get a more useful output."
}
check_debug_info
diagnose
if [ "${error}" ]; then
exit 1
fi
exit 0
|
AiJiaZone/linux-4.0
|
virt/scripts/check_extable.sh
|
Shell
|
gpl-2.0
| 5,014 |
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="
ping_ipv4
ecn_test
ecn_test_perband
ecn_nodrop_test
red_test
mc_backlog_test
red_mirror_test
red_trap_test
ecn_mirror_test
"
: ${QDISC:=ets}
source sch_red_core.sh
# do_ecn_test first build 2/3 of the requested backlog and expects no marking,
# and then builds 3/2 of it and does expect marking. The values of $BACKLOG1 and
# $BACKLOG2 are far enough not to overlap, so that we can assume that if we do
# see (do not see) marking, it is actually due to the configuration of that one
# TC, and not due to configuration of the other TC leaking over.
BACKLOG1=200000
BACKLOG2=500000
install_root_qdisc()
{
tc qdisc add dev $swp3 root handle 10: $QDISC \
bands 8 priomap 7 6 5 4 3 2 1 0
}
install_qdisc_tc0()
{
local -a args=("$@")
tc qdisc add dev $swp3 parent 10:8 handle 108: red \
limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \
probability 1.0 avpkt 8000 burst 38 "${args[@]}"
}
install_qdisc_tc1()
{
local -a args=("$@")
tc qdisc add dev $swp3 parent 10:7 handle 107: red \
limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \
probability 1.0 avpkt 8000 burst 63 "${args[@]}"
}
install_qdisc()
{
install_root_qdisc
install_qdisc_tc0 "$@"
install_qdisc_tc1 "$@"
sleep 1
}
uninstall_qdisc_tc0()
{
tc qdisc del dev $swp3 parent 10:8
}
uninstall_qdisc_tc1()
{
tc qdisc del dev $swp3 parent 10:7
}
uninstall_root_qdisc()
{
tc qdisc del dev $swp3 root
}
uninstall_qdisc()
{
uninstall_qdisc_tc0
uninstall_qdisc_tc1
uninstall_root_qdisc
}
ecn_test()
{
install_qdisc ecn
do_ecn_test 10 $BACKLOG1
do_ecn_test 11 $BACKLOG2
uninstall_qdisc
}
ecn_test_perband()
{
install_qdisc ecn
do_ecn_test_perband 10 $BACKLOG1
do_ecn_test_perband 11 $BACKLOG2
uninstall_qdisc
}
ecn_nodrop_test()
{
install_qdisc ecn nodrop
do_ecn_nodrop_test 10 $BACKLOG1
do_ecn_nodrop_test 11 $BACKLOG2
uninstall_qdisc
}
red_test()
{
install_qdisc
# Make sure that we get the non-zero value if there is any.
local cur=$(busywait 1100 until_counter_is "> 0" \
qdisc_stats_get $swp3 10: .backlog)
(( cur == 0 ))
check_err $? "backlog of $cur observed on non-busy qdisc"
log_test "$QDISC backlog properly cleaned"
do_red_test 10 $BACKLOG1
do_red_test 11 $BACKLOG2
uninstall_qdisc
}
mc_backlog_test()
{
install_qdisc
# Note that the backlog numbers here do not correspond to RED
# configuration, but are arbitrary.
do_mc_backlog_test 10 $BACKLOG1
do_mc_backlog_test 11 $BACKLOG2
uninstall_qdisc
}
red_mirror_test()
{
install_qdisc qevent early_drop block 10
do_drop_mirror_test 10 $BACKLOG1 early_drop
do_drop_mirror_test 11 $BACKLOG2 early_drop
uninstall_qdisc
}
red_trap_test()
{
install_qdisc qevent early_drop block 10
do_drop_trap_test 10 $BACKLOG1 early_drop
do_drop_trap_test 11 $BACKLOG2 early_drop
uninstall_qdisc
}
ecn_mirror_test()
{
install_qdisc ecn qevent mark block 10
do_mark_mirror_test 10 $BACKLOG1
do_mark_mirror_test 11 $BACKLOG2
uninstall_qdisc
}
trap cleanup EXIT
setup_prepare
setup_wait
bail_on_lldpad
tests_run
exit $EXIT_STATUS
|
mpe/powerpc
|
tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
|
Shell
|
gpl-2.0
| 3,104 |
#! /bin/bash
FORGE_VERSION="1.7.10-10.13.2.1352-1.7.10"
FORGE_JAR="forge-$FORGE_VERSION-installer.jar"
AS2="LinuxMac-AS2-1.0.33b.zip"
FASTC="fastcraft-1.21.jar"
#aquire the needed files
echo "Downloading and installing forge"
wget "http://files.minecraftforge.net/maven/net/minecraftforge/forge/$FORGE_VERSION/$FORGE_JAR" -O $FORGE_JAR
java -jar $FORGE_JAR --installServer
echo "downloading AS2"
wget "http://minecraft.curseforge.com/modpacks/225550-agrarian-skies-2/files/2233719/download" -O $AS2
unzip -o $AS2
mv minecraft/* .
rm -rf minecraft
rm -f instance.cfg #client only file
echo "disabling client only mods"
while read mod ; do
mv mods/$mod mods/$mod.disabled
done < disabledMods.list
echo "downloading fastcraft"
wget "http://files.player.to/$FASTC" -O mods/$FASTC
echo "cleaning world, copy the map you wan't to play from the maps forlder."
rm -rf world/*
|
gab1one/as2-scripts
|
setup.sh
|
Shell
|
isc
| 877 |
#!/bin/bash -ex
mkdir -p /tmp/P
cp openaddr/VERSION /tmp/P/FULL
cut -f1 -d. /tmp/P/FULL > /tmp/P/MAJOR
docker pull ubuntu:16.04
aws s3 cp --quiet s3://data.openaddresses.io/docker/openaddr-prereqs-`cat /tmp/P/MAJOR`.tar.gz /tmp/img && gunzip -c /tmp/img | docker load || true
docker build -f Dockerfile-prereqs -t openaddr/prereqs:`cat /tmp/P/MAJOR` .
docker build -f Dockerfile-machine -t openaddr/machine:`cat /tmp/P/MAJOR` .
|
openaddresses/machine
|
.circleci/prepare-docker-images.sh
|
Shell
|
isc
| 431 |
docker build -t sczyh30/pistachio .
|
0x5f3759df-Hacker/pistachio-docker
|
build.sh
|
Shell
|
mit
| 36 |
fsuffix="/*.png"
fsuff2="_output.gif"
fsuff3="_input.gif"
BASEPATH=${PWD##*/}
while getopts "h?vf:" opt; do
case "$opt" in
h|\?)
show_help
exit 0
;;
f) BASEPATH=$OPTARG
;;
esac
done
#echo $BASEPATH/exp_*/fittedimages
for f in $BASEPATH/exp_*/fittedimages; do
if [ -d "$f" ]; then
#echo "$f""$fsuffix"
arr1=(${f//// })
arr1len=${arr1}
arrind=${arr1len-1}
dir1=$(dirname "$f")
dataname=$(basename "$dir1")
gifname="$f""/""$dataname""$fsuff2"
echo "Making ""$gifname"
convert "$f""$fsuffix" "$gifname"
fi
done
for f in $BASEPATH/exp_*/Inputimages; do
if [ -d "$f" ]; then
#echo "$f""$fsuffix"
arr1=(${f//// })
arr1len=${arr1}
arrind=${arr1len-1}
dir1=$(dirname "$f")
dataname=$(basename "$dir1")
gifname="$f""/""$dataname""$fsuff3"
echo "Making ""$gifname"
convert "$f""$fsuffix" "$gifname"
fi
done
|
jswoboda/PlaneProcessing
|
convertall.sh
|
Shell
|
mit
| 948 |
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- php-fpm "$@"
fi
if [ "$1" = 'php-fpm' ] || [ "$1" = 'php' ] || [ "$1" = 'bin/console' ]; then
PHP_INI_RECOMMENDED="$PHP_INI_DIR/php.ini-production"
if [ "$APP_ENV" != 'prod' ]; then
PHP_INI_RECOMMENDED="$PHP_INI_DIR/php.ini-development"
fi
ln -sf "$PHP_INI_RECOMMENDED" "$PHP_INI_DIR/php.ini"
mkdir -p var/cache var/log
setfacl -R -m u:www-data:rwX -m u:"$(whoami)":rwX var
setfacl -dR -m u:www-data:rwX -m u:"$(whoami)":rwX var
if [ "$APP_ENV" != 'prod' ]; then
composer install --prefer-dist --no-progress --no-interaction
echo "Making sure public / private keys for JWT exist..."
php bin/console lexik:jwt:generate-keypair --skip-if-exists --no-interaction
setfacl -R -m u:www-data:rX -m u:"$(whoami)":rwX config/jwt
setfacl -dR -m u:www-data:rX -m u:"$(whoami)":rwX config/jwt
fi
if grep -q DATABASE_URL= .env; then
echo "Waiting for database to be ready..."
ATTEMPTS_LEFT_TO_REACH_DATABASE=60
until [ $ATTEMPTS_LEFT_TO_REACH_DATABASE -eq 0 ] || DATABASE_ERROR=$(php bin/console dbal:run-sql -q "SELECT 1" 2>&1); do
if [ $? -eq 255 ]; then
# If the Doctrine command exits with 255, an unrecoverable error occurred
ATTEMPTS_LEFT_TO_REACH_DATABASE=0
break
fi
sleep 1
ATTEMPTS_LEFT_TO_REACH_DATABASE=$((ATTEMPTS_LEFT_TO_REACH_DATABASE - 1))
echo "Still waiting for database to be ready... Or maybe the database is not reachable. $ATTEMPTS_LEFT_TO_REACH_DATABASE attempts left."
done
if [ $ATTEMPTS_LEFT_TO_REACH_DATABASE -eq 0 ]; then
echo "The database is not up or not reachable:"
echo "$DATABASE_ERROR"
exit 1
else
echo "The database is now ready and reachable"
fi
if ls -A migrations/*.php >/dev/null 2>&1; then
echo "Execute migrations"
bin/console doctrine:migrations:migrate --no-interaction
fi
if [ "$APP_ENV" != 'prod' ]; then
echo "Load fixtures"
bin/console hautelook:fixtures:load --no-interaction
fi
fi
fi
exec docker-php-entrypoint "$@"
|
api-platform/demo
|
api/docker/php/docker-entrypoint.sh
|
Shell
|
mit
| 2,052 |
#!/bin/bash
set -xv
#adam-BL# . BonnLogger.sh
#adam-BL# . log_start
# calculated fringe values with the method:
# mode of science frame/mode of background frame
#
# 30.05.04:
# temporary files go to a TEMPDIR directory
# $1: main directory (filter)
# $2: science directory
# $3: chips to be reduced
# preliminary work:
. ${INSTRUMENT:?}.ini
# get the modes of the background images
# create input file and config file on the fly for mode determination:
${S_LISTEXT_PARA} $1/$2/$2 _illum.fits immode $$ "$3"
echo "INPUT" > ${TEMPDIR}/immode.param_$$
echo " name ${TEMPDIR}/@in-immode_$$" >> ${TEMPDIR}/immode.param_$$
echo " dyn_min -66000.0" >> ${TEMPDIR}/immode.param_$$
echo " dyn_max 66000.0 " >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "RASTER" >> ${TEMPDIR}/immode.param_$$
echo " xc ${STATSALLIM[1]}" >> ${TEMPDIR}/immode.param_$$
echo " yc ${STATSALLIM[2]}" >> ${TEMPDIR}/immode.param_$$
echo " sizex ${STATSALLIM[3]}" >> ${TEMPDIR}/immode.param_$$
echo " sizey ${STATSALLIM[4]}" >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "STAT" >> ${TEMPDIR}/immode.param_$$
echo "percent 40" >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "END" >> ${TEMPDIR}/immode.param_$$
${P_IMMODE} ${TEMPDIR}/immode.param_$$ \
${TEMPDIR}/immode.dat_$$ ${TEMPDIR}/immode-stat.dat_$$
# get the factors to normalise flats
MODEILLUM=`${P_GAWK} '($1!="->" && $1!="") {printf ("%f ", $2)}' ${TEMPDIR}/immode.dat_$$`
i=1
for CHIP in $3
do
ACTUMODE=`echo ${MODEILLUM} | ${P_GAWK} '{print $'${i}'}'`
# get the modes of the science frames:
ls -1 $1/$2/*_${CHIP}OFCS.fits > ${TEMPDIR}/@in-immode_$$
echo "INPUT" > ${TEMPDIR}/immode.param_$$
echo " name ${TEMPDIR}/@in-immode_$$" >> ${TEMPDIR}/immode.param_$$
echo " dyn_min -66000.0" >> ${TEMPDIR}/immode.param_$$
echo " dyn_max 66000.0" >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "RASTER" >> ${TEMPDIR}/immode.param_$$
echo " xc ${STATSALLIM[1]}" >> ${TEMPDIR}/immode.param_$$
echo " yc ${STATSALLIM[2]}" >> ${TEMPDIR}/immode.param_$$
echo " sizex ${STATSALLIM[3]}" >> ${TEMPDIR}/immode.param_$$
echo " sizey ${STATSALLIM[4]}" >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "STAT" >> ${TEMPDIR}/immode.param_$$
echo "percent 40" >> ${TEMPDIR}/immode.param_$$
echo "end" >> ${TEMPDIR}/immode.param_$$
echo "END" >> ${TEMPDIR}/immode.param_$$
${P_IMMODE} ${TEMPDIR}/immode.param_$$ ${TEMPDIR}/immode.dat_$$ \
${TEMPDIR}/immode-stat.dat_$$
# write the fringevalues file
${P_GAWK} '($1!="->" && $1!="") {printf ("%s %f\n", $1, $2/'${ACTUMODE}')}' ${TEMPDIR}/immode.dat_$$ \
> ${TEMPDIR}/fringevalues_${CHIP}
i=$(( $i + 1 ))
done
#adam-BL# log_status $?
|
deapplegate/wtgpipeline
|
calculate_fringevalues_para.sh
|
Shell
|
mit
| 3,286 |
#!/bin/sh
#######################################
# #
# https://github.com/pedroguima #
# #
#######################################
function spinner {
local delay=0.5
local msg=$1
local spinstr='|/-\'
echo -e "\n$msg "
while [ true ]; do
local temp=${spinstr#?}
printf "[%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
function header {
echo -e "\n##################### Break My Box. Simple and neat! #####################"
}
function moreinfo {
echo -e "\nTry 'breakmybox.sh help' for more information\n"
}
function usage {
echo -e "\nbreakmybox.sh problem [options]"
}
function random_string {
local size=$1
cat /dev/urandom | tr -dc '0-9a-zA-Z_-' | head -c $size
}
function success {
echo -e "\n\nDone!\n"
exit 0
}
function error {
local msg=$1
if [ -n "$msg" ]; then
echo -e "\n$msg \n"
else
echo -e "\nAn error as occured. Please check."
fi
exit 1
}
function helpme {
header
usage
echo -e "\nOS problems:"
echo -e "\n\t\"nomorepids\"
- Decreases drastically the number of available PIDs leaving the box unable to create further processes."
echo -e "\nFile system problems:"
echo -e "\n\t\"tmf\" \"directory\"
- Too many files - Fills a partition with temporary files until it runs out of inodes"
echo -e "\n\t\"ldf\" \"size in MB\" \"directory\"
- Large deleted file - Creates a deleted open file"
echo -e "\nFunny problems:"
echo -e "\n\t\"chmod\" - [chmod -x chmod]
- Remove execute permissions of $(which chmod)
"
}
function tmf {
local dir=$1
header
if [ ! -n "$dir" -o ! -d "$dir" ]; then
error "Please provide a valid directory."
fi
echo -en "\nAbout to create a lot of files in $dir. Are you sure? (y/n) "
read -n 2 reply
if [[ ! $reply =~ ^[Yy]$ ]]; then
echo "Leaving..."
exit 0
fi
spinner "Filling up $dir with dummy files. Please wait..." &
spinner_pid=$!
trap "kill -9 $spinner_pid $BASHPID" SIGHUP SIGINT SIGTERM SIGQUIT
while [ true ]; do
mktemp -q -p $dir > /dev/null
if [ $? -ne 0 ]; then
kill $spinner_pid
success
fi
done
kill $spinner_pid
error
}
function ldf {
header
local size=$1
local dir=$2
local filename="naughty_file.log"
local naughtypath=$dir/$filename
if [[ ! $size =~ ^[0-9]+$ ]]; then
error "Please provide a valid size."
fi
if [ ! -n "$dir" -o ! -d "$dir" ]; then
error "Please provide a valid directory."
fi
echo -ne "\n\nCreate a $size MB deleted file in $naughtypath? (y/n) "
read -n 2 reply
if [[ ! $reply =~ ^[Yy]$ ]]; then
echo "Leaving..."
exit 0
fi
dd if=/dev/zero of=$naughtypath bs=1M count=$size &> /dev/null
tail -F $naughtypath &> /dev/null &
echo -en "PID locking file: $!\n\n"
rm -f $naughtypath
}
function chmdfun {
header
chmod_path=$(which chmod)
echo -ne "\nRemove execute permissions from $chmod_path? (y/n) "
read -n 2 reply
if [[ ! $reply =~ ^[Yy]$ ]]; then
echo "Leaving..."
exit 0
fi
$chmod_path -x $chmod_path
if [ $? -eq 0 ]; then
success
else
error
fi
}
function nomorepids {
local pid_max="/proc/sys/kernel/pid_max"
local lower_pid="301"
local res=0
header
echo -e "\nThe current value of \"$pid_max\" is $(cat $pid_max)"
echo -ne "Set the value to $lower_pid? (y/n) "
read -n 2 reply
if [[ ! $reply =~ ^[Yy]$ ]]; then
echo "Leaving..."
exit 0
fi
echo $lower_pid > $pid_max
while [ $res -eq 0 ]; do
tail -f /dev/null & disown
done
}
problem=$1
case "$problem" in
"help" )
helpme ;;
"tmf" )
tmf $2 ;;
"ldf" )
ldf $2 $3 ;;
"chmod" )
chmdfun ;;
"nomorepids" )
nomorepids ;;
* )
usage
moreinfo ;;
esac
exit 0
|
pedroguima/breakmybox
|
breakmybox.sh
|
Shell
|
mit
| 3,788 |
#!/bin/bash
sbcl --no-userinit --load dev.lisp
|
leosongwei/game_of_life
|
dev.sh
|
Shell
|
mit
| 48 |
echo "testing installation"
dpkg -i $DEB
echo "testing correct version present after install"
[ $VERSION = $(java -version 2>&1 | head -n1 | cut -d ' ' -f 3 | tr -d '"') ]
echo "testing removal"
dpkg -r $PACKAGE
echo "testing no java available after removal"
if update-alternatives --display java; then
/bin/false
else
/bin/true
fi
|
caligin/jdk-makefile
|
tests.sh
|
Shell
|
mit
| 340 |
"$CLOUD_REBUILD" CMouse 32 dll debug same
|
xylsxyls/xueyelingshuang
|
src/CMouse/version_debug.sh
|
Shell
|
mit
| 41 |
head -"${arg1}" "${sourceFile}"
head -"${arg1}" < "${sourceFile}"
head -n "${arg1}" "${sourceFile}"
head -n "${arg1}" < "${sourceFile}"
head -n"${arg1}" "${sourceFile}"
head -n"${arg1}" < "${sourceFile}"
head -n +"${arg1}" "${sourceFile}"
head -n +"${arg1}" < "${sourceFile}"
head -n+"${arg1}" "${sourceFile}"
head -n+"${arg1}" < "${sourceFile}"
sed -n "1,${arg1}p" "${sourceFile}"
sed -n "1,${arg1}p" < "${sourceFile}"
sed "${arg1}"'q' "${sourceFile}"
sed "${arg1}"'q' < "${sourceFile}"
sed '1,'"${arg1}"'!d' "${sourceFile}"
sed '1,'"${arg1}"'!d' < "${sourceFile}"
awk 'NR > '"${arg1}"' { exit } { print $0 }' "${sourceFile}"
awk 'NR > '"${arg1}"' { exit } { print $0 }' < "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) exit }' "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) exit }' < "${sourceFile}"
awk '{ if( NR > '"${arg1}"' ) exit ; print $0 }' "${sourceFile}"
awk '{ if( NR > '"${arg1}"' ) exit ; print $0 }' < "${sourceFile}"
awk '{ if( NR > '"${arg1}"' ) { exit } print $0 }' "${sourceFile}"
awk '{ if( NR > '"${arg1}"' ) { exit } print $0 }' < "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) exit }' "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) exit }' < "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) { exit } }' "${sourceFile}"
awk '{ print $0 ; if( NR >= '"${arg1}"' ) { exit } }' < "${sourceFile}"
awk 'NR >= '"${arg1}"' { print $0 ; exit } { print $0 }' "${sourceFile}"
awk 'NR >= '"${arg1}"' { print $0 ; exit } { print $0 }' < "${sourceFile}"
awk 'NR <= '"${arg1}"' { print $0 }' "${sourceFile}"
awk 'NR <= '"${arg1}"' { print $0 }' < "${sourceFile}"
awk '{ if( NR <= '"${arg1}"' ) print $0 }' "${sourceFile}"
awk '{ if( NR <= '"${arg1}"' ) print $0 }' < "${sourceFile}"
perl -ne '1..'"${arg1}"' and print' "${sourceFile}"
perl -ne '1..'"${arg1}"' and print' < "${sourceFile}"
|
Sylvain-Bugat/linux-commands-reference
|
line-selections/select-N-first-lines/select-N-first-lines-commands.sh
|
Shell
|
mit
| 1,851 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2014:0783
#
# Security announcement date: 2014-06-23 18:51:07 UTC
# Script generation date: 2017-01-01 21:15:21 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - httpd.x86_64:2.2.22-27.ep6.el5
# - httpd-debuginfo.x86_64:2.2.22-27.ep6.el5
# - httpd-devel.x86_64:2.2.22-27.ep6.el5
# - httpd-manual.x86_64:2.2.22-27.ep6.el5
# - httpd-tools.x86_64:2.2.22-27.ep6.el5
# - mod_ssl.x86_64:2.2.22-27.ep6.el5
#
# Last versions recommanded by security team:
# - httpd.x86_64:2.2.3-92.el5_11
# - httpd-debuginfo.x86_64:2.2.3-92.el5_11
# - httpd-devel.x86_64:2.2.3-92.el5_11
# - httpd-manual.x86_64:2.2.3-92.el5_11
# - httpd-tools.x86_64:2.2.26-41.ep6.el5
# - mod_ssl.x86_64:2.2.3-92.el5_11
#
# CVE List:
# - CVE-2013-6438
# - CVE-2014-0098
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install httpd.x86_64-2.2.3 -y
sudo yum install httpd-debuginfo.x86_64-2.2.3 -y
sudo yum install httpd-devel.x86_64-2.2.3 -y
sudo yum install httpd-manual.x86_64-2.2.3 -y
sudo yum install httpd-tools.x86_64-2.2.26 -y
sudo yum install mod_ssl.x86_64-2.2.3 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2014/RHSA-2014:0783.sh
|
Shell
|
mit
| 1,278 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2813-1
#
# Security announcement date: 2013-12-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:46 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - gimp:2.8.2-2+deb7u1
#
# Last versions recommanded by security team:
# - gimp:2.8.2-2+deb7u2
#
# CVE List:
# - CVE-2013-1913
# - CVE-2013-1978
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade gimp=2.8.2-2+deb7u2 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2013/DSA-2813-1.sh
|
Shell
|
mit
| 623 |
export HOME=/root
mkdir -p /tmp
grep boot.no-mount /proc/cmdline && exec /init-tools/bin/sh
readlink -f /dev/ThinkPadMain/Swap > /sys/power/resume
udevadm trigger -c add
udevadm trigger
udevadm settle
modprobe dm-mod
vgchange -ay
udevadm settle
readlink -f /dev/ThinkPadMain/Swap > /sys/power/resume
for i in /dev/sd?; do hdparm -B 255 $i; done
yes y | mkfs.ext4 /dev/ThinkPadMain/Tmp
mount /dev/ThinkPadMain/SystemRoot /new-root
{
mkdir -p /new-root/var
mount /dev/ThinkPadMain/Var /new-root/var
mkdir -p /new-root/var/db
mkdir -p /new-root/var/log
mount /dev/ThinkPadMain/VarDb /new-root/var/db &
mount /dev/ThinkPadMain/VarLog /new-root/var/log &
} &
mkdir -p /new-root/nix
mkdir -p /new-root/tmp
mkdir -p /new-root/home
mkdir -p /new-root/root
mkdir -p /new-root/boot
{
mount /dev/ThinkPadMain/Nix /new-root/nix
mount /new-root/nix/store /new-root/nix/store -o bind,ro
mount /new-root/nix/store -o remount,ro,bind
} &
mount /dev/ThinkPadMain/Tmp /new-root/tmp &
mount /dev/ThinkPadMain/Home /new-root/home &
mount /dev/ThinkPadMain/Root /new-root/root &
{
mkswap /dev/ThinkPadMain/Swap
swapon $( readlink -f /dev/ThinkPadMain/Swap )
} &
mount /dev/disk/by-label/NIXOS_EFI /new-root/boot &
{
mkdir /dev/pstore
mount pstore -t pstore /dev/pstore
} &
while pgrep mount; do sleep 0.1; done
chmod a+rwxt /new-root/tmp
|
7c6f434c/7c6f434c-configurations
|
init-less-system/my/mount-partitions-thinkpad.sh
|
Shell
|
mit
| 1,357 |
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Stardust protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 31460. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 31460 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 31460 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
ctwiz/stardust
|
contrib/qos/tc.sh
|
Shell
|
mit
| 1,674 |
#!/usr/bin/ksh
#
# audit the loads relation and all other loads. check if the files
# exist under /lcsl100.
#
###############################################################################
#
CMD=${0}
#
integer OKNUM
integer NOTOKNUM
integer TOTALOKNUM
integer TOTALNOTOKNUM
#
verbose=no
exitonerror=no
#
tmp=/tmp/tmp$$
okcpuloads=/tmp/okcpuloads$$
notokcpuloads=/tmp/notokcpuloads$$
#
vecho() {
if [[ "${verbose}" == "yes" ]]
then
echo "${@}"
fi
}
#
usage() {
echo "usage: $CMD [-x?] [-V]"
echo
echo "where:"
echo " -x - enable debug mode"
echo " -? - print usage message"
echo " -V - enable verbose mode"
echo " -X - exit on error"
}
#
set -- $(getopt ?xXV ${*})
if [[ ${?} -ne 0 ]]
then
usage
exit 0
fi
#
for opt in ${*}
do
case "${opt}" in
-x)
set -x
shift
;;
-V)
verbose=yes
shift
;;
-X)
exitonerror=yes
shift
;;
--)
shift
break
;;
esac
done
#
echo
echo "Auditing LCS relation 'loads' and dependent relations ..."
#
if [[ -z "${LCSTOOLSDATA}" ]]
then
echo
echo "LCSTOOLSDATA not defined." >&2
exit 2
fi
#
cd "${LCSTOOLSDATA}"
#
RELATIONS="loads images "
EXIT=no
#
OKNUM=0
NOTOKNUM=0;
#
echo
echo "Verify relations exist ..."
#
for relation in ${RELATIONS}
do
OK=OK
#
vecho "Checking relation ${relation} ..."
#
relfile="${LCSTOOLSDATA}/${relation}"
drelfile="${LCSTOOLSDATA}/D${relation}"
if [[ ! -r "${relfile}" ]]
then
echo "${relfile} not readable." >&2
OK="NOT OK"
EXIT=yes
fi
if [[ ! -r "${drelfile}" ]]
then
echo "${drelfile} not readable." >&2
OK="NOT OK"
EXIT=yes
fi
#
case "${OK}" in
"OK")
OKNUM=$OKNUM+1
;;
"NOT OK")
NOTOKNUM=$NOTOKNUM+1
;;
esac
#
vecho "${OK}."
done
#
TOTALOKNUM=${TOTALOKNUM}+${OKNUM}
TOTALNOTOKNUM=${TOTALNOTOKNUM}+${NOTOKNUM}
#
if [[ "${EXIT}" == "yes" ]]
then
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
echo "Exit ${CMD}."
exit 2
else
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
fi
#
OKNUM=0
NOTOKNUM=0;
#
>${okcpuloads}
>${notokcpuloads}
#
echo
echo "Verify CPU loads exist ..."
#
cat loads |
while read branch cpuload basedir
do
OK=OK
#
cpuloadpath="${basedir}/${branch}/swCPU/${cpuload}_cpu.tar.gz"
vecho "Checking ${cpuloadpath} ..."
#
if [[ ! -r "${cpuloadpath}" ]]
then
echo "CPU load file ${cpuloadpath} does not exist." >&2
OK="NOT OK"
EXIT=${exitonerror}
fi
#
case "${OK}" in
"OK")
OKNUM=$OKNUM+1
echo "$branch $cpuload $basedir" >>${okcpuloads}
;;
"NOT OK")
NOTOKNUM=$NOTOKNUM+1
echo "$branch $cpuload $basedir" >>${notokcpuloads}
;;
esac
#
vecho "${OK}."
done
#
TOTALOKNUM=${TOTALOKNUM}+${OKNUM}
TOTALNOTOKNUM=${TOTALNOTOKNUM}+${NOTOKNUM}
#
if [[ "${EXIT}" == "yes" ]]
then
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
echo "Exit ${CMD}."
exit 2
else
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
fi
#
if [[ -s "${notokcpuloads}" ]]
then
echo
echo "Non-existant load(s) are:"
cat ${notokcpuloads}
fi
#
OKNUM=0
NOTOKNUM=0;
#
>${okcpuloads}
>${notokcpuloads}
#
echo
echo "Verify images exist ..."
#
cat images |
while read branch cpuload imagetype imagename
do
OK=OK
# get base directory for this branch and cpuload
grep "$branch $cpuload " ${okcpuloads} >$tmp
if [[ ! -s "$tmp" ]]
the
echo "No BASEDIR found for $branch $cpuload $imagename"
NOTOKNUM=$NOTOKNUM+1
continue
fi
cpuloadpath="${basedir}/${branch}/swCPU/${cpuload}_cpu.tar.gz"
vecho "Checking ${cpuloadpath} ..."
#
if [[ ! -r "${cpuloadpath}" ]]
then
echo "CPU load file ${cpuloadpath} does not exist." >&2
OK="NOT OK"
EXIT=${exitonerror}
fi
#
case "${OK}" in
"OK")
OKNUM=$OKNUM+1
echo "$branch $cpuload $basedir" >>${okcpuloads}
;;
"NOT OK")
NOTOKNUM=$NOTOKNUM+1
echo "$branch $cpuload $basedir" >>${notokcpuloads}
;;
esac
#
vecho "${OK}."
done
#
TOTALOKNUM=${TOTALOKNUM}+${OKNUM}
TOTALNOTOKNUM=${TOTALNOTOKNUM}+${NOTOKNUM}
#
if [[ "${EXIT}" == "yes" ]]
then
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
echo "Exit ${CMD}."
exit 2
else
echo "OK=$OKNUM, NOTOK=$NOTOKNUM"
fi
#
if [[ -s "${notokcpuloads}" ]]
then
echo
echo "Non-existant load(s) are:"
cat ${notokcpuloads}
fi
#
# all done
#
echo
echo "TOTAL OK=$TOTALOKNUM, TOTAL NOTOK=$TOTALNOTOKNUM"
#
if [[ "${TOTALNOTOKNUM}" == 0 ]]
then
echo "Everything OK."
else
echo "Errors detected."
fi
#
#
exit 0
|
ombt/ombt
|
lts08.lcstools/tools/src/saveload/SAVE/auditloads.sh
|
Shell
|
mit
| 4,198 |
#!/bin/bash
source ./scripts_config
echo "Importing watchers information from $PLAIN_WATCHERS_FILE to redis"
node ./import_watchers_to_redis.js $PLAIN_WATCHERS_FILE
|
anvaka/ghindex
|
03_import_watch_stream_to_redis.sh
|
Shell
|
mit
| 168 |
#!/usr/bin/env sh
vendor/eher/phpunit/bin/phpunit tests
|
app55/app55-php
|
runtests.sh
|
Shell
|
mit
| 56 |
#!/bin/bash
mkdir -p /opt/local/bin/RecordStream
pushd /opt/local/bin/RecordStream
apt-get -y install ia32-libs
if ! curl https://s3.amazonaws.com/breadcrumb.install/recs.tar.gz > recs.tar.gz; then
echo "Failed to download the RecordStream package"
exit 1
fi
rm -rf RecordStream
tar xzf recs.tar.gz
rm recs.tar.gz
for i in `ls RecordStream/bin`; do ln -sf /opt/local/bin/RecordStream/RecordStream/bin/$i /usr/local/bin/$i; done
popd
|
benbernard/HomeDir
|
bin/load-recordstream.sh
|
Shell
|
mit
| 448 |
#!/bin/bash
echo "Instalando o Apache..."
#Instalando o Apache.
apt-get -y install apache2 apache2-utils
#Ativando o mod_rewrite
cd /etc/apache2/mods-enabled
ln -sf ../mods-available/rewrite.load
cd -
#Reiniciando o apache
service apache2 restart
|
leonardothibes/trac
|
conf.d/16-apache/install.sh
|
Shell
|
mit
| 251 |
#!/usr/bin/env bash
set -e
if [ -z "$1" ]; then
echo "Missing PAAS space argument"
echo " deploy.sh staging|production"
exit 1
fi
PAAS_SPACE=$1
wget -q -O - https://packages.cloudfoundry.org/debian/cli.cloudfoundry.org.key | sudo apt-key add -
echo "deb http://packages.cloudfoundry.org/debian stable main" | sudo tee /etc/apt/sources.list.d/cloudfoundry-cli.list
sudo apt-get update && sudo apt-get install cf-cli
cf login -u $PAAS_USER -p $PAAS_PASSWORD -a https://api.cloud.service.gov.uk -o gds-performance-platform -s $PAAS_SPACE
# deploy apps
cf push -f manifest.yml
# create and map routes
cf map-route $PAAS_SERVICE cloudapps.digital --hostname $PAAS_SERVICE-$PAAS_SPACE
|
alphagov/performanceplatform-big-screen-view
|
etc/deploy.sh
|
Shell
|
mit
| 698 |
#!/bin/sh
source ".env"
mkdir -p ~/tmp/mongodump
mongodump -h ${MONGO_REMOTE_HOST} -d ${MONGO_REMOTE_DB} -u ${MONGO_REMOTE_USER} -p ${MONGO_REMOTE_PASS} -o ~/tmp/mongodump
mongorestore -h ${MONGO_LOCAL_HOST} -d ${MONGO_LOCAL_DB} --drop ~/tmp/mongodump/${MONGO_REMOTE_DB}
rm -Rf ~/tmp/mongodump
|
rhythmagency/rhythm.profiles
|
pullmongodb.sh
|
Shell
|
mit
| 297 |
#!/bin/bash
pushd /home/seanneal/tweetbot/
echo beginning tweet bot
export LC_ALL=C.UTF-8
export LANG=C.UTF-8
git pull
pipenv run python tweet_bot.py
popd
|
seanneal/tweetbot
|
scripted_tweets.sh
|
Shell
|
mit
| 157 |
#!/bin/sh
# init
cd src/
rm -rf vendor
mkdir vendor
cd vendor
cp -r ../../vendor/* .
# Doctrine
cd doctrine
rm -rf UPGRADE* build* bin tests tools lib/vendor/doctrine-common/build* lib/vendor/doctrine-common/tests lib/vendor/doctrine-dbal/bin lib/vendor/doctrine-dbal/tests lib/vendor/doctrine-dbal/tools lib/vendor/doctrine-dbal/build* lib/vendor/doctrine-dbal/UPGRADE*
cd ..
# Doctrine migrations
cd doctrine-migrations
rm -rf tests build*
cd ..
# Doctrine MongoDB
cd doctrine-mongodb
rm -rf tests build* tools
cd ..
# Propel
# git clone git://github.com/fzaninotto/propel.git propel
cd propel
rm -rf contrib docs test WHATS_NEW INSTALL CHANGELOG
cd ..
# Phing
cd phing
rm -rf README bin docs etc pear test
cd ..
# Swiftmailer
cd swiftmailer
rm -rf CHANGES README* build* docs notes test-suite tests create_pear_package.php package*
cd ..
# Symfony
cd symfony
rm -rf README phpunit.xml.dist tests
cd ..
# Twig
cd twig
rm -rf AUTHORS CHANGELOG README.markdown bin doc package.xml.tpl phpunit.xml test
cd ..
# Zend Framework
cd zend
rm -rf INSTALL.txt README* bin demos documentation resources tests tools working
mkdir library/tmp
mv library/Zend/Exception.php library/tmp/
mv library/Zend/Log library/tmp/
rm -rf library/Zend
mv library/tmp library/Zend
cd ..
# cleanup
find . -name .git | xargs rm -rf -
find . -name .gitignore | xargs rm -rf -
find . -name .gitmodules | xargs rm -rf -
find . -name .svn | xargs rm -rf -
cd ../..
|
noelg/symfony-news
|
prepare_vendors.sh
|
Shell
|
mit
| 1,446 |
#!/bin/bash
# Provide functions for provisioning.
RED="0;31"
GREEN="0;32"
BLUE="0;34"
# Output a string with a colour
# $1 - colour
# $2 - string
output() {
echo -e "\033[$1m[PROVISIONER] $2\033[0m"
}
box_install_apache2() {
output $BLUE "Installing Apache..."
apt-get -qq -y install apache2
cp /vagrant/vagrant/env/apache2/apache2.conf /etc/apache2/apache2.conf
cp /vagrant/vagrant/env/apache2/site.conf /etc/apache2/sites-available/vagrant.conf
sed -i "s@{DOCUMENT_ROOT}@$web_root_dir@g" /etc/apache2/sites-available/vagrant.conf
sed -i "s@{LOG_DIR}@$web_logs_dir@g" /etc/apache2/sites-available/vagrant
rm /etc/apache2/sites-enabled/*
ln -s /etc/apache2/sites-available/vagrant /etc/apache2/sites-enabled/
a2enmod proxy proxy_fcgi rewrite
service apache2 restart
output $GREEN "Apache installed and configured."
}
box_install_curl() {
output $BLUE "Installing cURL..."
apt-get -qq -y install curl
output $GREEN "cURL installed."
}
box_install_git() {
output $BLUE "Installing Git..."
apt-get -qq -y install git
output $GREEN "Git installed."
}
box_install_mariadb() {
output $BLUE "Installing Mariadb..."
apt-get -qq -y install mariadb-server mariadb-client
cp /vagrant/vagrant/env/mariadb/my.cnf /etc/mysql/my.cnf
service mysql restart
mysqladmin -u root password $db_password
echo "CREATE USER 'root'@'%' IDENTIFIED BY '$db_password';" | mysql -u root -p$db_password
echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" | mysql -u root -p$db_password
echo "DROP USER 'root'@'localhost';" | mysql -u root -p$db_password
echo "FLUSH PRIVILEGES;" | mysql -u root -p$db_password
output $GREEN "Mariadb installed and configured."
}
box_install_memcached() {
output $BLUE "Installing Memcached..."
apt-get -qq -y install memcached php-memcached
service php7.0-fpm restart
output $GREEN "Memcached installed."
}
box_install_mysql() {
output $BLUE "Installing MySQL..."
apt-get -qq -y install mysql-server mysql-client
cp /vagrant/vagrant/env/mysql/my.cnf /etc/mysql/my.cnf
service mysql restart
mysqladmin -u root password $db_password
echo "CREATE USER 'root'@'%' IDENTIFIED BY '$db_password';" | mysql -u root -p$db_password
echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" | mysql -u root -p$db_password
echo "DROP USER 'root'@'localhost';" | mysql -u root -p$db_password
echo "FLUSH PRIVILEGES;" | mysql -u root -p$db_password
output $GREEN "MySQL installed and configured."
}
box_install_nginx() {
output $BLUE "Installing Nginx..."
apt-get -qq -y install nginx
cp /vagrant/vagrant/env/nginx/nginx.conf /etc/nginx/nginx.conf
cp /vagrant/vagrant/env/nginx/site.conf /etc/nginx/sites-available/vagrant
sed -i "s@{DOCUMENT_ROOT}@${web_root_dir}@g" /etc/nginx/sites-available/vagrant
sed -i "s@{LOG_DIR}@${web_logs_dir}@g" /etc/nginx/sites-available/vagrant
rm /etc/nginx/sites-enabled/*
ln -s /etc/nginx/sites-available/vagrant /etc/nginx/sites-enabled/
service nginx restart
output $GREEN "Nginx installed and configured."
}
box_install_php() {
output $BLUE "Installing PHP..."
apt-get -qq -y install php-fpm php-cli php-curl php-gd php-json php-mcrypt \
php-mysql php-xml php-mbstring
sed -i "s/^user\s\=\swww\-data/user = ubuntu/g" /etc/php/7.0/fpm/pool.d/www.conf
sed -i "s/^group\s\=\swww\-data/group = ubuntu/g" /etc/php/7.0/fpm/pool.d/www.conf
cp /vagrant/vagrant/env/php/php.ini /etc/php/7.0/cli/php.ini
cp /vagrant/vagrant/env/php/php.ini /etc/php/7.0/fpm/php.ini
service php7.0-fpm restart
output $GREEN "PHP installed and configured."
}
box_install_php_composer() {
output $BLUE "Installing Composer..."
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
output $GREEN "Composer installed."
}
box_install_ruby() {
output $BLUE "Installing Ruby..."
gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3
curl -sSL https://get.rvm.io | bash -s stable --ruby
output $GREEN "Ruby installed."
}
box_install_zip() {
output $BLUE "Installing Zip..."
apt-get -qq -y install zip
output $GREEN "Zip installed."
}
# Make sure the box stores a hidden file to say when it was installed
box_register() {
echo 'Box provisioned - `date`' > /home/ubuntu/.provisioned
}
# Update the box - common functionality
box_update() {
# Perform /etc/hosts fix
sed -i "s@localhost@localhost ubuntu-xenial@g" /etc/hosts
output $BLUE "Updating apt-get sources..."
apt-get -qq -y update
apt-get -qq -y upgrade
}
|
jamiefdhurst/vagrant-dev
|
vagrant/sh/functions.sh
|
Shell
|
mit
| 4,701 |
#!/bin/bash
#enable all the files in the directory you're working on to read-write-execute
sudo chmod 700 *
|
htoo97/Janus
|
change_permissions.sh
|
Shell
|
mit
| 110 |
#!/bin/bash
DESCRIPTION="HyprIoT Cluster Lab".
DEPENDENCIES="avahi-utils, vlan, dnsmasq"
BUILD_DIR="./"
VERSION="$(cat VERSION)"
PACKAGE_VERSION=${VERSION}-${1:-"1"}
PROJECT_NAME=hypriot-cluster-lab
PACKAGE_NAME="${PROJECT_NAME}_${PACKAGE_VERSION}"
mkdir -p ${BUILD_DIR}/${PACKAGE_NAME}
cp -r package/* ${BUILD_DIR}/${PACKAGE_NAME}/
sed -i'' "s/<VERSION>/${PACKAGE_VERSION}/g" ${BUILD_DIR}/${PACKAGE_NAME}/DEBIAN/control
sed -i'' "s/<NAME>/${PROJECT_NAME}/g" ${BUILD_DIR}/${PACKAGE_NAME}/DEBIAN/control
sed -i'' "s/<SIZE>/60/g" ${BUILD_DIR}/${PACKAGE_NAME}/DEBIAN/control
sed -i'' "s/<DESCRIPTION>/${DESCRIPTION}/g" ${BUILD_DIR}/${PACKAGE_NAME}/DEBIAN/control
sed -i'' "s/<DEPENDS>/${DEPENDENCIES}/g" ${BUILD_DIR}/${PACKAGE_NAME}/DEBIAN/control
cd ${BUILD_DIR} && dpkg-deb --build ${PACKAGE_NAME}
cd -
rm -rf ${BUILD_DIR}/${PACKAGE_NAME}
|
hypriot/cluster-lab
|
build.sh
|
Shell
|
mit
| 843 |
#summary of spree new app local generation
spree [railsappname] -d mysql
mysqladmin create [dbname] -u root -p
cd [railsappname]
#edit config/database.yml
#paste /vendor/lib/smtp_tls.rb for ruby1.8.7 compatibility bug
rake db:bootstrap
rake db:migrate
sudo ruby script/server -p 80 -u
#login to http://localhost/admin/products/ and edit "Available on:" date making it to be the PAST date
Email [[email protected]]:
Password [spree]:
#spree normal installaton )----------------------------------------------------
#NG: gem1.8 update spree
#version 0.9.1 rubygemへの依存度やバージョン管理がややこしくて、updateはおろかinstallすらむずかしい
#gem install spreeで通信応答せず。git cloneでもってきて以下をやるがだめ。
#製作者たちにしかupdate/nstallできないソフトはパッケージとは言えず特注システムでしかない。
sudo gem1.8 update authlogic
sudo gem1.8 install chriseppstein-compass
sudo gem1.8 install rsl-stringex
sudo gem1.8 install javan-whenever
sudo gem1.8 install searchlogic
#mv config/database.yml.sample config/database.yml
#vim config/environment.rb ":
config.gem 'haml', :version => '>=2.2.0'
sudo rake db:migrate
rake db:admin:create
#かりにspreeのアンインストールコマンドが充実していたら、以下の2chnrgとの統合も検討の余地が出てくる:
#rename 2chnrg
#spree -s 2chnrg -d mysql
cd 2chnrg
#migrate config/database.yml
#paste /vendor/lib/smtp_tls.rb for ruby1.8.7 compatibility bug
rake db:bootstrap
rake db:migrate
#spree on heroku (failed)----------------------------------------------------
sudo gem1.8 sources -a http://gems.github.com
cd -
git clone git://github.com/schof/spree.git spree
rake -h;rails -v
vim config/boot.rb
vim lib/tasks/release.rake
rake gems:install
cd spree/config/
cp database.yml.example database.yml
#edit config/boot.rb line58 as follows
load_rails("2.2.2") # note: spree requires rails 2.2.2 (change at your own risk)
touch #RAILS_ROOT/lib/smtp_gmail.rb
#vim smtp_gmail.rb above
#vim database.yml
#rake gems:install
grep -R 1.15 *
vim config/environment.rb
sudo gem1.8 install has_many_polymorphs
sudo gem1.8 install highline --version 1.4.0
sudo gem1.8 install mini_magick
sudo gem1.8 install tlsmail
cp -R calendar_date_select-1.15/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R authlogic-2.0.9/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R activemerchant-1.4.1/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R has_many_polymorphs-2.13/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R highline-1.4.0/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R mini_magick-1.2.3/ /home/you/RoRapps/spree/spree/vendor/gems/
cp -R tlsmail-0.0.1/ /home/you/RoRapps/spree/spree/vendor/gems/
#install postgresql
sudo apt-get install postgresql postgresql-client postgresql-contrib pgadmin3
# PostgreSQLè¨å®ã
# passwd postgres ##postgresã¦ã¼ã¶ã¼ã®ãã¹ã¯ã¼ããè¨å®ã
# su postgres
$ psql template1
template1=# alter user postgres with password '******'; ## ******ã¯é©å½ãªãã¹ã¯ã¼ããè¨å®ã
template1=# \q ## \qã§çµäºã
$ createuser -AdPE hoge ## hogeã¯é©å½ãªä¸è¬ã¦ã¼ã¶ã¼ã¸ãä¸è¬ã¦ã¼ã¶ã¸ã®ãã¼ã¿ãã¼ã¹ä½ææ¨©éè¨å®ã
$ vi /etc/postgresql/8.3/main/postgresql.conf
listen_addresses = 'localhost'
â
listen_addresses = '*' ##'*'ã¸å¤æ´ã
$ vi /etc/postgresql/8.3/main/pg_hba.conf
##以ä¸ããã¡ã¤ã«æä¸é¨ã¸è¿½å ã192.168.11.1ã¯åèªãããã¯ã¼ã¯ç°å¢ã¸ã
local all all trust
host all all 192.168.11.1 255.255.255.255 trust
host all all 0.0.0.0 0.0.0.0 password crypt
$ exit
# /etc/init.d/postgresql-8.3 restart
edit database.yml configuration:
development:
adapter: postgresql
host: localhost
port: 5432
database: postgres
username: postgres
password: [your-password]
rake db:bootstrap
Create the admin user (press enter for defaults).
Email [[email protected]]:
Password [spree]:
Load Sample Data? [y]:
Sample products have been loaded into to the store
Finished.
|
k-takami/dotfiles
|
CHEATSHEETS/_legacy/RoR/spree.sh
|
Shell
|
mit
| 4,927 |
#!/bin/bash
git pull
echo "Updating Sub Repos"
if [ ! -d "./app/drupalcore" ]; then
git clone https://git.drupalcode.org/project/drupal.git ./app/drupalcore
else
cd ./app/drupalcore
git remote update
git remote set-head origin -a
git checkout origin/HEAD
cd ../bin
fi
./cores.rb > ../../dist/next.html
./json.rb > ../../dist/next.json
cd ../../dist
mv next.html index.html
mv next.json index.json
|
lauriii/drupalcores
|
app/bin/build.sh
|
Shell
|
mit
| 414 |
minimal() {
# Use colors, but only if connected to a terminal, and that terminal
# supports them.
if which tput >/dev/null 2>&1; then
ncolors=$(tput colors)
fi
if [ -t 1 ] && [ -n "$ncolors" ] && [ "$ncolors" -ge 8 ]; then
RED="$(tput setaf 1)"
GREEN="$(tput setaf 2)"
YELLOW="$(tput setaf 3)"
BLUE="$(tput setaf 4)"
BOLD="$(tput bold)"
NORMAL="$(tput sgr0)"
else
RED=""
GREEN=""
YELLOW=""
BLUE=""
BOLD=""
NORMAL=""
fi
# Only enable exit-on-error after the non-critical colorization stuff,
# which may fail on systems lacking tput or terminfo
set -e
###################################
# Golang
printf "${BLUE}Installing BZIP2...${NORMAL}\n"
sudo apt-get install bzip2
printf "${BLUE}Installing LiteIDE for Go language...${NORMAL}\n"
wget http://downloads.sourceforge.net/project/liteide/X29/liteidex29.linux-64-qt4.tar.bz2 -O /tmp/liteide.tar.bz2
bzip2 -d /tmp/liteide.tar.bz2
tar -C ~ -xf /tmp/liteide.tar
}
# Check if reboot is needed
minimal
|
jig/myubuntu
|
install-golangliteide.sh
|
Shell
|
mit
| 1,041 |
#!/bin/bash
# simple script to set up an environment to hack on.
# installs all strider-* deps as git clone's from the master branch.
# you can hack on them by editing under node_moduldes/strider-foo
DEPS="strider-custom strider-python strider-node strider-env strider-sauce strider-simple-runner strider-extension-loader strider-github strider-ruby"
# NOTE: There are other modules we should make sure work with 1.4 too:
# DEPS="$DEPS strider-jelly strider-qunit strider-browserstack"
BASE="[email protected]:/Strider-CD"
mkdir -p node_modules
for module in $DEPS
do
if [ "$1" = "up" ]
then
(cd ../$module && git pull && rm -rf node_modules && npm i)
else
git clone $BASE/$module ../$module
(cd ../$module && npm install)
npm link ../$module
fi
done
npm i
echo "> Strider is set up! Hack on modules under node_modules. They are git repos!"
|
d3ming/strider
|
dev.sh
|
Shell
|
mit
| 871 |
#!/bin/bash
QUERY_HASH="42323d64886122307be10013ad2dcc44"
#QUERY_HASH="a5164aed103f24b03e7b7747a2d94e3c"
URL="https://www.instagram.com/graphql/query/"
username="$1"
if [ "x" = "x$username" ]; then
username="modela_asia"
fi
rhx=
graphql() {
json="$1"
magic="$rhx:$json"
md5=$(echo -n "$magic" | md5)
url="$URL?query_hash=$QUERY_HASH&variables=$json"
wget -q -O - -U Mozilla --header "x-instagram-gis: $md5" "$url"
}
get_user() {
user_id=$1; after="$2"
if [ "x" = "x$after" ]; then
json=$(echo "{'id':'$user_id','first':50,'after':null}" | sed "s/'/\"/g")
else
json=$(echo "{'id':'$user_id','first':50,'after':'$after'}" | sed "s/'/\"/g")
fi
graphql "$json"
}
parse_username() {
json="$1"
x1=${json}
x2=${x1#*text\":\"}
while [ "$x1" != "$x2" ]; do
username=${x2%%\"*}
echo $username
x1="$x2"
x2=${x1#*text\":\"}
done | while read n; do
y1=$(echo "$n" | sed 's/\\n/ /g')
y2=${y1#*@}
while [ "$y1" != "$y2" ]; do
echo ${y2%% *}
y1="$y2"
y2=${y1#*@}
done
done
}
page=$(wget -q -O - -U Mozilla "https://www.instagram.com/$username")
data=$(echo "$page" | grep _sharedData)
user_id=${data#*graphql}; user_id=${user_id#*id\":\"}; user_id=${user_id%%\"*}
rhx=${data#*rhx_gis\":\"}; rhx=${rhx%%\"*}
has_next_page="true"
end_cursor=
while [ "$has_next_page" = "true" ]; do
json=$(get_user $user_id $end_cursor)
has_next_page=${json#*has_next_page\":}; has_next_page=${has_next_page%%,*}
end_cursor=${json#*end_cursor\":\"}; end_cursor=${end_cursor%%\"*}
parse_username "$json"
done
|
wrenth04/download-scripts
|
ig/ids.sh
|
Shell
|
mit
| 1,583 |
#!/bin/sh
# This is the main script for provisioning. Out of this
# file, everything that builds the box is executed.
### Software ###
# NodeJS 4.4.5
# MongoDB 3.2.7
# MySQL
# Git
### Build the box
# Make /opt directory owned by vagrant user
sudo chown vagrant:vagrant /opt/
### Update the system
sudo apt-get update
# echo mysql-server mysql-server/root_password password password | sudo debconf-set-selections
# echo mysql-server mysql-server/root_password_again password password | sudo debconf-set-selections
### Install system dependencies
sudo apt-get install -y build-essential curl gcc g++ git libaio1 libaio-dev nfs-common openssl
### NodeJS ###
### Node 4.4.5
# Download the binary
# wget http://nodejs.org/dist/v4.4.5/node-v4.4.5-linux-x64.tar.gz -O /tmp/node-v4.4.5-linux-x64.tar.gz
# Unpack it
# cd /tmp
# tar -zxvf /tmp/node-v4.4.5-linux-x64.tar.gz
# mv /tmp/node-v4.4.5-linux-x64 /opt/node-v4.4.5-linux-x64
# ln -s /opt/node-v4.4.5-linux-x64 /opt/nodejs
# MY NODE INSTALL SHIT
curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
sudo apt-get install -y nodejs
# Set the node_path
export NODE_PATH=/opt/nodejs/lib/node_modules
export NODE_PATH=$NODE_PATH:/opt/dev/node_modules
export NODE_PATH=$NODE_PATH:/opt/dev/lib/node_modules
export NODE_PATH=$NODE_PATH:/usr/local/lib/node_modules
# Install global Node dependencies
# /opt/nodejs/bin/npm install -g n
sudo npm install -g n
# /opt/nodejs/bin/npm config set loglevel http
sudo npm config set loglevel http
### Add binaries to path ###
# First run the command
# export PATH=$PATH:/opt/mongodb/bin:/opt/mysql/server-5.6/bin:/opt/nodejs/bin
export NODE_PATH=/opt/nodejs/lib/node_modules
export NODE_PATH=$NODE_PATH:/opt/dev/node_modules
export NODE_PATH=$NODE_PATH:/opt/dev/lib/node_modules
export NODE_PATH=$NODE_PATH:/usr/local/lib/node_modules/lib/node_modules:/usr/local/lib/node_modules
# Now save to the /etc/bash.bashrc file so it works on reboot
cp /etc/bash.bashrc /tmp/bash.bashrc
printf "\n#Add binaries to path\n\nexport PATH=$PATH:/opt/mongodb/bin:/opt/mysql/server-5.6/bin:/opt/nodejs/bin\nexport NODE_PATH=/opt/nodejs/lib/node_modules\nexport NODE_PATH=$NODE_PATH:/opt/dev/node_modules\nexport NODE_PATH=$NODE_PATH:/opt/dev/lib/node_modules" > /tmp/path
cat /tmp/path >> /tmp/bash.bashrc
sudo chown root:root /tmp/bash.bashrc
sudo mv /tmp/bash.bashrc /etc/bash.bashrc
### Update the /etc/hosts file ###
printf '127.0.0.1 localhost\n127.0.1.1 debian-squeeze.caris.de debian-squeeze nodebox\n\n# The following lines are desirable for IPv6 capable hosts\n::1 ip6-localhost ip6-loopback\nfe00::0 ip6-localnet\nff00::0 ip6-mcastprefix\nff02::1 ip6-allnodes\nff02::2 ip6-allrouters' > /tmp/hosts
sudo mv /tmp/hosts /etc/hosts
### Install Git Aware Prompt ###
mkdir ~/.bash
cd ~/.bash
git clone git://github.com/jimeh/git-aware-prompt.git
printf 'export GITAWAREPROMPT=~/.bash/git-aware-prompt\nsource $GITAWAREPROMPT/main.sh\n\nexport PS1="\${debian_chroot:+(\\$debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\]\\w\\[\\033[00m\\] \\[$txtcyn\\]\\$git_branch\\[$txtred\\]\\$git_dirty\\[$txtrst\\]\$ "' > ~/.bash_profile
### Set a message of the day ###
sudo rm /etc/motd
sudo cp /vagrant/vagrant-scripts/motd.txt /etc/motd
### Test that everything is installed ok ###
printf "\n\n--- Running post-install checks ---\n\n"
node /vagrant/vagrant-scripts/postInstall.js
### Finished ###
printf "\n\n--- NodeBox is now built ---\n\n"
### MY STUFF
# sudo npm install -g npm@latest --no-bin-links
sudo npm install -g node-pre-gyp --no-bin-links
# npm install -g @angular/cli --no-bin-links
sudo npm install http-server -g
sudo npm install [email protected] -g
printf "\n\n--- App system prerequisites are now installed. Running npm install... ---\n\n"
sudo mkdir /home/vagrant/node_modules
sudo ln -s /home/vagrant/node_modules/ /opt/dev/backend/node_modules
sudo rm -rf /opt/dev/backend/node_modules/*
cd /opt/dev/backend/ && sudo npm install --no-bin-links
printf "\n\n--- NPM install for the server completed ---\n Run 'vagrant ssh' to access the VM \n"
# node /opt/dev/backend/index.js &
export PATH=$PATH:~/bin
sudo cp /vagrant/vagrant-scripts/runall.sh /bin
sudo mv /bin/runall.sh /bin/runall
# /vagrant/runall.sh
# cd /opt/dev/backend/ && node index.js &
|
JuroUhlar/kypo-vagrant
|
vagrant-scripts/provision.sh
|
Shell
|
mit
| 4,336 |
#! /usr/bin/env bash
# chkconfig: 2345 99 01
# description: Telegraf daemon
### BEGIN INIT INFO
# Provides: telegraf
# Required-Start: $all
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start telegraf at boot time
### END INIT INFO
# this init script supports three different variations:
# 1. New lsb that define start-stop-daemon
# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc
# 3. Centos installations without lsb-core installed
#
# In the third case we have to define our own functions which are very dumb
# and expect the args to be positioned correctly.
# Command-line options that can be set in /etc/default/telegraf. These will override
# any config file values.
TELEGRAF_OPTS=
if [ -r /lib/lsb/init-functions ]; then
source /lib/lsb/init-functions
fi
DEFAULT=/etc/default/telegraf
if [ -r $DEFAULT ]; then
source $DEFAULT
fi
if [ -z "$STDOUT" ]; then
STDOUT=/var/log/telegraf/telegraf.log
fi
if [ ! -f "$STDOUT" ]; then
mkdir -p `dirname $STDOUT`
fi
if [ -z "$STDERR" ]; then
STDERR=/var/log/telegraf/telegraf.log
fi
if [ ! -f "$STDERR" ]; then
mkdir -p `dirname $STDERR`
fi
OPEN_FILE_LIMIT=65536
function pidofproc() {
if [ $# -ne 3 ]; then
echo "Expected three arguments, e.g. $0 -p pidfile daemon-name"
fi
if [ ! -f "$2" ]; then
return 1
fi
local pidfile=`cat $2`
if [ "x$pidfile" == "x" ]; then
return 1
fi
if ps --pid "$pidfile" | grep -q $(basename $3); then
return 0
fi
return 1
}
function killproc() {
if [ $# -ne 3 ]; then
echo "Expected three arguments, e.g. $0 -p pidfile signal"
fi
pid=`cat $2`
kill -s $3 $pid
}
function log_failure_msg() {
echo "$@" "[ FAILED ]"
}
function log_success_msg() {
echo "$@" "[ OK ]"
}
# Process name ( For display )
name=telegraf
# Daemon name, where is the actual executable
daemon=/usr/bin/telegraf
# pid file for the daemon
pidfile=/var/run/telegraf/telegraf.pid
piddir=`dirname $pidfile`
if [ ! -d "$piddir" ]; then
mkdir -p $piddir
fi
# Configuration file
config=/etc/telegraf/telegraf.conf
confdir=/etc/telegraf/telegraf.d
# If the daemon is not there, then exit.
[ -x $daemon ] || exit 5
case $1 in
start)
# Checked the PID file exists and check the actual status of process
if [ -e $pidfile ]; then
pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?"
# If the status is SUCCESS then don't need to start again.
if [ "x$status" = "x0" ]; then
log_failure_msg "$name process is running"
exit 0 # Exit
fi
fi
# Bump the file limits, before launching the daemon. These will carry over to
# launched processes.
ulimit -n $OPEN_FILE_LIMIT
if [ $? -ne 0 ]; then
log_failure_msg "set open file limit to $OPEN_FILE_LIMIT"
fi
log_success_msg "Starting the process" "$name"
if which start-stop-daemon > /dev/null 2>&1; then
start-stop-daemon --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR &
else
nohup $daemon -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR &
fi
log_success_msg "$name process was started"
;;
stop)
# Stop the daemon.
if [ -e $pidfile ]; then
pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?"
if [ "$status" = 0 ]; then
if killproc -p $pidfile SIGTERM && /bin/rm -rf $pidfile; then
log_success_msg "$name process was stopped"
else
log_failure_msg "$name failed to stop service"
fi
fi
else
log_failure_msg "$name process is not running"
fi
;;
restart)
# Restart the daemon.
$0 stop && sleep 2 && $0 start
;;
status)
# Check the status of the process.
if [ -e $pidfile ]; then
if pidofproc -p $pidfile $daemon > /dev/null; then
log_success_msg "$name Process is running"
exit 0
else
log_failure_msg "$name Process is not running"
exit 1
fi
else
log_failure_msg "$name Process is not running"
exit 3
fi
;;
version)
$daemon -version
;;
*)
# For invalid arguments, print the usage message.
echo "Usage: $0 {start|stop|restart|status|version}"
exit 2
;;
esac
|
aristanetworks/telegraf
|
Arista/ConfigFiles/init.sh
|
Shell
|
mit
| 4,878 |
#!/bin/bash
DEBUG_KEY='check-for-rebooting-machines'
debug() {
if [ -z "$DEBUG" ]; then
return 0
fi
local message="$1"
echo "$DEBUG_KEY: $message"
}
fatal() {
local message="$1"
echo "Error: $message"
exit 1
}
script_directory(){
local source="${BASH_SOURCE[0]}"
local dir=""
while [ -h "$source" ]; do # resolve $source until the file is no longer a symlink
dir="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
[[ $source != /* ]] && source="$dir/$source" # if $source was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
dir="$( cd -P "$( dirname "$source" )" && pwd )"
echo "$dir"
}
get_service_instances() {
local tag="$1"
local region="$AWS_REGION"
if [ -z "$region" ]; then
region='us-west-2'
fi
aws ec2 describe-instances --region="$region" --filters "Name=tag:Name,Values=$tag" | jq --compact-output '.Reservations[].Instances[]'
}
test_ssh() {
local public_ip="$1"
ssh -o ConnectTimeout=5 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -q core@$public_ip exit
}
usage(){
echo 'USAGE: check-for-rebooting-machines.sh <ec2-tag>'
echo ''
echo 'Arguments:'
echo ' -h, --help print this help text'
echo ' -v, --version print the version'
echo 'Enviroment:'
echo ' AWS_REGION: defaults to us-west-2'
echo ''
}
version(){
echo "1.0.0"
}
main() {
local tag="$1"
while [ "$1" != "" ]; do
local param="$1"
local value="$2"
case "$param" in
-h | --help)
usage
exit 0
;;
-v | --version)
version
exit 0
;;
*)
if [ "${param::1}" == '-' ]; then
echo "ERROR: unknown parameter \"$param\""
usage
exit 1
fi
if [ -n "$param" ]; then
tag="$param"
fi
;;
esac
shift
done
if [ -z "$tag" ]; then
echo "Missing EC2 instance tag as first argument"
usage
exit 1
fi
local service_instances=( $(get_service_instances "$tag") )
for service_instance in "${service_instances[@]}"; do
local public_ip="$(echo "$service_instance" | jq -r '.PublicIpAddress')"
local private_ip="$(echo "$service_instance" | jq -r '.PrivateIpAddress')"
if [ -z "$public_ip" -o -z "$private_ip" ]; then
continue;
fi
echo "public ip $public_ip"
echo "private ip $private_ip"
if [ "$private_ip" == "$private_ip" ]; then
test_ssh "$public_ip"
local ssh_exit_code=$?
if [ "$ssh_exit_code" != "0" ]; then
echo "OH NO! I can't ssh into $private_ip $public_ip"
fi
sleep 1
fi
done
}
main "$@"
|
octoblu/the-stack-services
|
scripts/check-for-rebooting-machines.sh
|
Shell
|
mit
| 2,714 |
#! /bin/bash
NAMES=(John Kate Ashish Lee Tom Omar)
echo "Names: "
for NAME in "${NAMES[@]}"
do
printf "\t%s\n" "$NAME"
done
|
jbelmont/unix-programming-and-regular-expressions-workshop
|
scripts/looping/for.sh
|
Shell
|
mit
| 127 |
#!/bin/sh
set -euf -o pipefail
#
# these variables come from docker --link feature
#
root_passwd=$DB_ENV_MYSQL_ROOT_PASSWORD
db_host=$DB_PORT_3306_TCP_ADDR
neuhome=$(readlink -f $(dirname $0)/..)
param() {
local configs="$neuhome/config/config.ini"
if [ x"$(basename "$neuhome")" = x"NeuDev" ] ; then
configs="$configs $neuhome/config/nonprod-config.ini"
fi
awk -vFS='=' '
$1 == "'$1'" {
res = ""
q = ""
for (i = 2; i <= NF ; i++) {
res = res q $i
q = "="
}
}
END {
print res
}
' $configs
}
dnsparam() {
param db_dns | tr ':;' '\n\n' | awk -vFS='=' '
$1 == "'$1'" {
print $2
}
'
}
db_user=$(param db_user)
db_pass=$(param db_pass)
db_name=$(dnsparam dbname)
exec mysql -u$db_user -p$db_pass -h$db_host $db_name
|
iliu-net/NeuSol
|
scripts/sqladm.sh
|
Shell
|
mit
| 798 |
#!/bin/bash
# this should not be found in this case
TEXTFILE=plugin_out.log
grep "this argument is passed to a variadic functions variadic part" $TEXTFILE
if [ $? -eq 0 ] ; then
exit 1
fi
exit 0
|
realincubus/clang_plugin_tests
|
1d/variadic_function_call_no_warning/extra_checks.sh
|
Shell
|
mit
| 201 |
docker exec virtuoso isql-v 1111 dba dba /usr/local/virtuoso-opensource/var/lib/virtuoso/db/script.sql
|
mommi84/horn-concerto
|
install-graph-exec.sh
|
Shell
|
mit
| 103 |
#!/bin/sh
#
# I lock my passwords using `email-lock` since gpg-agent is assumed to be
# running, signing a message with the key will make it accessible to
# offlineimap.
#
KEY_NAME=email-lock
FILE=$(mktemp)
dd iflag=fullblock if=/dev/random of=$FILE bs=128 count=20 2>/dev/null
gpg --local-user $KEY_NAME --sign $FILE
STATUS=$?
[[ $STATUS -eq 0 ]] && offlineimap -f INBOX 1>& /dev/null &
shred -u $FILE{,.gpg} 2> /dev/null || true
exit $STATUS
|
crvs/dotfiles
|
bin/fetchinbox.sh
|
Shell
|
mit
| 449 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3267-1
#
# Security announcement date: 2015-05-22 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:24 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - chromium-browser:43.0.2357.65-1~deb8u1
#
# Last versions recommanded by security team:
# - chromium-browser:43.0.2357.65-1~deb8u1
#
# CVE List:
# - CVE-2015-1251
# - CVE-2015-1252
# - CVE-2015-1253
# - CVE-2015-1254
# - CVE-2015-1255
# - CVE-2015-1256
# - CVE-2015-1257
# - CVE-2015-1258
# - CVE-2015-1259
# - CVE-2015-1260
# - CVE-2015-1261
# - CVE-2015-1262
# - CVE-2015-1263
# - CVE-2015-1264
# - CVE-2015-1265
# - CVE-2015-3910
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade chromium-browser=43.0.2357.65-1~deb8u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_8_(Jessie)/i386/2015/DSA-3267-1.sh
|
Shell
|
mit
| 960 |
export_file vimrc $HOME/.vimrc
|
cprodescu/config
|
packages/vim/init.sh
|
Shell
|
mit
| 31 |
#!/bin/sh
if [ $# -lt 2 ]; then
echo "usage: $0 <build dir> <report file>";
exit -1;
fi
echo "###################### context coroutine (stack using memory pool) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutine_mem_pool 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_coroutine_mem_pool 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutines_mem_pool 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_coroutine_mem_pool 1000 1000 2048 >> "$2";
echo "" >> "$2";
echo "###################### context coroutine (stack using default allocator[mmap]) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutine 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_coroutine 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutine 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_coroutine 1000 1000 2048 >> "$2";
echo "" >> "$2";
echo "###################### context coroutine (stack using malloc/free [ptmalloc]) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutine_malloc 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_coroutine_malloc 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_coroutine_malloc 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_coroutine_malloc 1000 1000 2048 >> "$2";
echo "" >> "$2";
echo "###################### task (stack using memory pool) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_task_mem_pool 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_task_mem_pool 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_task_mem_pool 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_task_mem_pool 1000 1000 2048 >> "$2";
echo "" >> "$2";
echo "###################### task (stack using default allocator[mmap]) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_task 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_task 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_task 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_task 1000 1000 2048 >> "$2";
echo "" >> "$2";
echo "###################### task (stack using malloc/free [ptmalloc]) ###################" >> "$2";
echo "########## Cmd: sample/sample_benchmark_task_malloc 10000 1000 200" >> "$2";
"$1"/sample/sample_benchmark_task_malloc 10000 1000 200 >> "$2";
echo "########## Cmd: sample/sample_benchmark_task_malloc 1000 1000 2048" >> "$2";
"$1"/sample/sample_benchmark_task_malloc 1000 1000 2048 >> "$2";
echo "" >> "$2";
|
sotter/libcopp
|
doc/reports/linux_start_benchmark.sh
|
Shell
|
mit
| 2,635 |
for f in $( ls /var/ ); do
echo $f
done
|
kkdg/bash-jogging
|
02/0223/loop.sh
|
Shell
|
mit
| 42 |
#!/bin/sh
echo "Installing dependencies for building your resume"
brew cask install basictex
sudo tlmgr update --self
sudo tlmgr install fontawesome
sudo tlmgr install nopageno
sudo tlmgr install framed
sudo tlmgr install cantarell
sudo tlmgr install slantsc
sudo tlmgr install numprint
|
lanxx019/resume
|
dependency.sh
|
Shell
|
mit
| 289 |
#!/bin/bash
source ./env.sh
cd $GAE_ROOTDIR
go test ./src/sample1/...
|
uryyyyyyy/GAESample
|
commands/test_sample1.sh
|
Shell
|
mit
| 72 |
#!/bin/sh
#
# Generate a series of sine tones ranging in pitch from C-1 (8.175 Hz)
# to G9 (12543.85395 Hz). All stereo, all 44100 Hz sampling rate.
# File names are xxx.wav, where xxx is the MIDI pitch number for that
# frequency.
#
# Thank you, sox
#
for (( i = 0 ; i < 127 ; i++ ))
do
echo sox -c 2 -r 44100 -n `printf "%03d.wav" $i` synth 1 sin %`expr $i - 69` gain -10
sox -c 2 -r 44100 -n `printf "%03d.wav" $i` synth 1 sin %`expr $i - 69` gain -10
done
|
rdpoor/mu
|
SoundSets/Tones/gentones.sh
|
Shell
|
mit
| 469 |
#! /bin/bash
cd ./PhysicsJS/
npm install
npm install grunt-cli -g
grunt dist
|
peterdemartini/chad-the-dinosaur
|
run-physicsjs.sh
|
Shell
|
mit
| 78 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.