code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
# Paper.js
#
# This file is part of Paper.js, a JavaScript Vector Graphics Library,
# based on Scriptographer.org and designed to be largely API compatible.
# http://scriptographer.org/
#
# Copyright (c) 2011, Juerg Lehni & Jonathan Puckey
# http://lehni.org/ & http://jonathanpuckey.com/
#
# Distributed under the MIT license. See LICENSE file for details.
#
# All rights reserved.
# Usage:
# build.sh MODE
#
# MODE:
# commented Preprocessed but still formated and commented
# stripped Formated but without comments (default)
# compressed Uses UglifyJS to reduce file size
if [ $# -eq 0 ]
then
MODE="stripped"
else
MODE=$1
fi
# Create the dist folder if it does not exist yet.
if [ ! -d ../dist/ ]
then
mkdir ../dist/
fi
./preprocess.sh $MODE ../src/paper.js ../dist/paper.js '{ "browser": true }'
#./preprocess.sh $MODE ../src/paper.js ../dist/paper-server.js '{ "server": true }'
|
NHQ/paper
|
build/build.sh
|
Shell
|
mit
| 904 |
#!/bin/bash
# this script requires an existing installation of Anaconda Python
conda update conda
conda update anaconda
conda update python
conda update --all
|
ChicagoBoothAnalytics/DataWrangling_inR
|
Python/Update-All-Anaconda-Python-Packages.sh
|
Shell
|
mit
| 161 |
#!/bin/bash
set -e -o pipefail
dub build -b release --compiler=$DC --config=${VIBED_DRIVER=libevent}
dub test --compiler=$DC #--config=${VIBED_DRIVER=libevent}
dub test :utils --compiler=$DC #--config=${VIBED_DRIVER}
dub test :data --compiler=$DC #--config=${VIBED_DRIVER}
dub test :core --compiler=$DC --config=${VIBED_DRIVER}
dub test :mail --compiler=$DC #--config=${VIBED_DRIVER}
dub test :http --compiler=$DC #--config=${VIBED_DRIVER}
dub test :diet --compiler=$DC #--config=${VIBED_DRIVER}
dub test :web --compiler=$DC #--config=${VIBED_DRIVER}
dub test :mongodb --compiler=$DC #--config=${VIBED_DRIVER}
dub test :redis --compiler=$DC #--config=${VIBED_DRIVER}
if [ ${BUILD_EXAMPLE=1} -eq 1 ]; then
for ex in $(\ls -1 examples/); do
echo "[INFO] Building example $ex"
(cd examples/$ex && dub build --compiler=$DC && dub clean)
done
fi
if [ ${RUN_TEST=1} -eq 1 ]; then
for ex in `\ls -1 tests/`; do
echo "[INFO] Running test $ex"
(cd tests/$ex && dub --compiler=$DC && dub clean)
done
fi
|
redstar/vibe.d
|
travis-ci.sh
|
Shell
|
mit
| 1,044 |
#!/bin/bash
# Apache
apt-get install -y apache2 libapache2-mod-fcgid
a2enmod rewrite expires headers proxy proxy_http proxy_fcgi actions fastcgi alias ssl
# OpenSSL
echo -n 'cn=' > /tmp/openssl.payload
cat /vagrant/shell_provisioner/config/hosts.txt | grep -v '^#' | cut -d' ' -f2 | grep -v '^$' | sed -n '1p' | tr '\n' '&' >> /tmp/openssl.payload
echo -n 'acn=' >> /tmp/openssl.payload
cat /vagrant/shell_provisioner/config/hosts.txt | grep -v '^#' | cut -d' ' -f2 | grep -v '^$' | sed -n '1!p' | tr '\n' ' ' >> /tmp/openssl.payload
curl -X POST -d@/tmp/openssl.payload http://controller.testing.intracto.local/ca/createcert.php > cert.tar
tar --no-same-owner -xvf cert.tar
mv ${APP_DOMAIN}.crt /etc/ssl/certs/${APP_DOMAIN}.crt
mv ${APP_DOMAIN}.key /etc/ssl/private/${APP_DOMAIN}.key
mv ${APP_DOMAIN}.all.crt /etc/ssl/certs/${APP_DOMAIN}.all.crt
# Activate vhost
a2dissite 000-default
chmod -R a+rX /var/log/apache2
sed -i 's/640/666/' /etc/logrotate.d/apache2
cat ${CONFIG_PATH}/apache/app.vhost.conf > /etc/apache2/sites-available/${APP_DOMAIN}.conf
a2ensite ${APP_DOMAIN}.conf
service apache2 restart
|
JanDC/critical-css-site
|
shell_provisioner/module/apache.sh
|
Shell
|
mit
| 1,115 |
#!/bin/bash
#+======================================================================
# $HeadURL: https://svnpub.iter.org/codac/iter/codac/dev/units/m-iter-units-api/branches/codac-core-4.0/src/main/resources/test_template.sh $
# $Id: test_template.sh 33491 2013-01-20 18:21:08Z zagara $
#
# Project : CODAC Core System
#
# Description : Test script
#
# Author : Cosylab
#
# Copyright (c) : 2010-2018 ITER Organization,
# CS 90 046
# 13067 St. Paul-lez-Durance Cedex
# France
#
# This file is part of ITER CODAC software.
# For the terms and conditions of redistribution or use of this software
# refer to the file ITER-LICENSE.TXT located in the top level directory
# of the distribution package.
#
#-======================================================================
#######################################################
# 1.
# Rename this file to an appropriate name.
# The file name must end with ".sh".
#
#######################################################
TEST_SCRIPT_DIR=`dirname $0`
. ${TEST_SCRIPT_DIR}/util.sh
echo "***** Starting (test name) *****"
error=0
#######################################################
# 2.
# The main test logic must be written here.
#
# Anytime an unrecoverable error occurs, this shell
# script can give up by exiting with non-zero value.
#
# When a sustainable error occurs, "error" variable
# should be set to non-zero value and this script can
# continue executing test procedure.
#
# The example is shown below.
#######################################################
#caput ABC:XXX 10 || exit 1
#
#sleep 1
#
#epics_pv_eq ABC:XXX 10 || error=1
#epics_pv_not_eq ABC:XXX 11 || error=1
echo "***** End of (test name) *****"
#######################################################
# 3.
# Each test script must exit with 0 if there is no
# error, otherwise this must exit with non-zero value.
#######################################################
exit ${error}
|
css-iter/org.csstudio.iter
|
products/org.csstudio.iter.alarm.beast.server.product/demo/m-TEST-BEAST/src/test/epics/test_template.sh
|
Shell
|
epl-1.0
| 1,985 |
#!/bin/bash
ant_nodes=({101..116})
for i in ${ant_nodes[*]}; do clush -w 10.0.0.${i} killall $1; done
|
dpaiton/OpenPV
|
projects/HyPerRetina/shell/killant.sh
|
Shell
|
epl-1.0
| 102 |
#!/bin/sh
export BASE_DIR="$( cd "$( dirname "$0" )" && pwd )"
TOP_DIR="$BASE_DIR/.."
if test x"$NO_MAKE" != x"yes"; then
make -C $TOP_DIR > /dev/null || exit 1
fi
if test -z "$CUTTER"; then
CUTTER="`make -s -C $BASE_DIR echo-cutter`"
fi
export CUTTER
if test -z "$VALGRIND"; then
VALGRIND="`make -s -C $BASE_DIR echo-valgrind`"
fi
export VALGRIND
if test -n "$CUTTER"; then
LOG_DIR=$BASE_DIR/log
if [ ! -d $LOG_DIR ]; then
mkdir $LOG_DIR
fi
LOG_FILE=$LOG_DIR/cutter_report.xml
CUTTER_ARGS="--keep-opening-modules --xml-report=$LOG_FILE -v v"
CUTTER_WRAPPER=""
if test x"$CUTTER_DEBUG" = x"yes"; then
CUTTER_WRAPPER="$TOP_DIR/libtool --mode=execute gdb --args"
elif test x"$CUTTER_CHECK_LEAKS" = x"yes"; then
VALGRIND_ARGS="--leak-check=full --show-reachable=yes -v"
CUTTER_WRAPPER="$TOP_DIR/libtool --mode=execute $VALGRIND $VALGRIND_ARGS"
fi
CUTTER_ARGS="$CUTTER_ARGS -s $BASE_DIR"
for i in `find . -name '*.so'|sed 's/.*\\/\\(.*\\).so/\\1/'`; do
$CUTTER_WRAPPER $CUTTER $CUTTER_ARGS "$@" $BASE_DIR -t $i
done
else
echo "cutter executable not found."
fi
|
insionng/flare
|
test/run-tests.travis.sh
|
Shell
|
gpl-2.0
| 1,109 |
#!/bin/bash
###########################################################################
## ##
## Set eth connection ##
## ##
## This program is free software; you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
###########################################################################
test -r /etc/default/network && source /etc/default/network
[ -z $NETWORK_MANAGER ] && NETWORK_MANAGER=ifupdown
source /usr/share/fabui/ext/bash/${NETWORK_MANAGER}_nm_functions.sh
usage()
{
cat << EOF
usage: $0 options
This script configures ethernet connection.
OPTIONS:
-h Show this message
-i Ethernet interface
-D DHCP address mode
-S STATIC address mode
-a IP address (ex: 192.168.0.15)
-n Netmask (ex: 255.255.255.0)
-g Gateway (ex: 192.168.0.1)
-N No nameservers
EOF
}
IFACE=
MODE=
IP=
NETMASK=
GATEWAY=
NO_NAMESERVER="no"
while getopts “hDSNAi:a:n:g:” OPTION
do
case $OPTION in
h)
usage
exit 1
;;
D)
MODE="dhcp"
;;
S)
MODE="static"
;;
N)
NO_NAMESERVER="yes"
;;
a)
IP=$OPTARG
;;
n)
NETMASK=$OPTARG
;;
g)
GATEWAY=$OPTARG
;;
i)
IFACE=$OPTARG
;;
?)
usage
exit
;;
esac
done
if [[ -z $MODE ]] || [[ -z $IFACE ]]
then
usage
exit 1
fi
if [[ $MODE == "static" ]]; then
#~ if [[ -z $IP ]] || [[ -z $NETMASK ]] || [[ -z $GATEWAY ]]; then
if [[ -z $IP ]] || [[ -z $NETMASK ]]; then
echo "error: In STATIC mode you must provide ip, netmask and gateway"
usage
exit 1
fi
fi
case $MODE in
dhcp)
config_ethernet_dhcp "$IFACE"
;;
static)
config_ethernet_static "$IFACE" "$IP" "$NETMASK" "$GATEWAY" "$NO_NAMESERVER"
;;
*)
echo "error: unknown mode \'$MODE\'"
usage
;;
esac
|
FABtotum/fabui-colibri
|
fabui/ext/bash/set_ethernet.sh
|
Shell
|
gpl-2.0
| 2,587 |
#!/usr/bin/env bash
if [[ -z "$1" ]]; then
echo "Missing Payload URL" 1>&2
exit 1
fi
if [[ -z "$2" ]]; then
echo "Missing output filename " 1>&2
exit 1
fi
# Ensure the payload folder exists
mkdir -p dist/payload
# Basic Authentication
# $3 is a username/password combo
if [[ -z "$3" ]]; then
curl -f -L $1 -o dist/payload/$2
else
curl -f -u "$3" -L $1 -o dist/payload/$2
fi
|
blueliquiddesigns/gravity-forms-pdf-extended
|
bin/json-payload.sh
|
Shell
|
gpl-2.0
| 384 |
#!/bin/sh
#
# Script which tries to bootstrap a Hawk development environment
BASE="$HOME/hawk"
echo "*** Add Virtualization repository"
if grep 'VERSION="Tumbleweed"' < /etc/os-release >/dev/null 2>&1; then
sudo zypper ar http://download.opensuse.org/repositories/Virtualization/openSUSE_Factory/ Virtualization
elif grep 'VERSION="42.1"' < /etc/os-release >/dev/null 2>&1; then
sudo zypper ar http://download.opensuse.org/repositories/Virtualization/openSUSE_Leap_42.1/ Virtualization
elif grep 'VERSION="42.2"' < /etc/os-release >/dev/null 2>&1; then
sudo zypper ar http://download.opensuse.org/repositories/Virtualization/openSUSE_Leap_42.2/ Virtualization
elif grep 'VERSION="42.3"' < /etc/os-release >/dev/null 2>&1; then
sudo zypper ar http://download.opensuse.org/repositories/Virtualization/openSUSE_Leap_42.3/ Virtualization
else
osver="$(grep 'VERSION=' < /etc/os-release)"
echo "Unknown OS version $osver"
exit
fi
echo "*** zypper refresh"
sudo zypper refresh
echo "*** Install development tools"
sudo zypper install git devel_C_C++ ruby-devel vagrant virtualbox nfs-client nfs-kernel-server
cd "$(dirname "$BASE")" || exit
if [ ! -d "$BASE" ]; then
echo "*** Clone hawk repository to $BASE..."
git clone [email protected]:ClusterLabs/hawk "$BASE"
fi
cd "$BASE" || exit
echo "*** Install vagrant-bindfs plugin"
if vagrant plugin list | grep bindfs >/dev/null 2>&1; then
echo "Already installed."
else
vagrant plugin install vagrant-bindfs || exit
fi
echo "*** Starting development VM"
vagrant up webui
|
krig/hawk
|
scripts/bootstrap-development.sh
|
Shell
|
gpl-2.0
| 1,528 |
echo "Start Mysql Dump ..."
ssh root@ber "mysqldump -uroot -pDungaum33! mbox > /tmp/mbox.sql"
echo "Dump finished."
ssh root@ber "bzip2 /tmp/mbox.sql"
echo "Start secure copy ..."
scp root@ber:/tmp/mbox.sql.bz2 /tmp
echo "Secure copy finished."
bunzip2 /tmp/mbox.sql.bz2
echo "Start DB import ..."
mysql -uroot -pDungaum33! mbox -e"drop database mbox; create database mbox;use flt;source /tmp/mbox.sql;"
echo "Import finished."
|
AndreasSamjeske/tipmaster
|
tmsrc/utility/migration/migrateBoxDB.sh
|
Shell
|
gpl-2.0
| 428 |
#!/bin/sh
#
#scripts by robin 2013-11-11
#
#silent install oracle database software
#
#
ouser=`groupmems -g dba -l|awk '{print $1}'`
if [ ! -d /robin ];then
mkdir /robin
fi
umount /dev/cdrom >/dev/null 2>&1
umount /robin >/dev/null 2>&1
mount /dev/cdrom /robin >/dev/null 2>&1
ls /robin/p10404530_112030_Linux-x86-64_{1,2}of7.zip
if [ $? -eq 0 ];then
echo "---------------------install 11.2.0.3 starting -----------------------------"
rm /tmp/database -rf >/dev/null 2>&1
unzip /robin/p10404530_112030_Linux-x86-64_1of7.zip -d /tmp >/dev/null 2>&1
unzip /robin/p10404530_112030_Linux-x86-64_2of7.zip -d /tmp >/dev/null 2>&1
source /home/$ouser/.bash_profile
rm $ORACLE_BASE/oraInventory/logs/silentInstall* -rf >/dev/null 2>&1
su - $ouser -c "/tmp/database/runInstaller -silent -responsefile /tmp/database/response/db_install.rsp oracle.install.option=INSTALL_DB_SWONLY ORACLE_BASE=$ORACLE_BASE ORACLE_HOME=$ORACLE_HOME UNIX_GROUP_NAME=oinstall INVENTORY_LOCATION=$ORACLE_BASE/oraInventory oracle.install.db.InstallEdition=EE SELECTED_LANGUAGES=en,zh_CN oracle.install.db.optionalComponents=oracle.rdbms.partitioning:11.2.0.3.0,oracle.oraolap:11.2.0.3.0,oracle.rdbms.dm:11.2.0.3.0,oracle.rdbms.dv:11.2.0.3.0,oracle.rdbms.lbac:11.2.0.3.0,oracle.rdbms.rat:11.2.0.3.0 DECLINE_SECURITY_UPDATES=true oracle.install.db.DBA_GROUP=dba oracle.install.db.OPER_GROUP=dba ORACLE_HOME_NAME=OraDbHome11g"
ocount=1
until [ -e $ORACLE_BASE/oraInventory/logs/silentInstall`date +%Y-%m-%d`*.log ]
do
sleep 2s
ocount=$ocount+1
done
sleep 30s
$ORACLE_BASE/oraInventory/orainstRoot.sh
echo y|$ORACLE_HOME/root.sh
sed 's/^ORACLE_HOME_LISTNER=/#ORACLE_HOME_LISTNER=/' $ORACLE_HOME/bin/dbshut -i
sed '/#ORACLE_HOME_LISTNER=/ a ORACLE_HOME_LISTNER=$ORACLE_HOME' $ORACLE_HOME/bin/dbshut -i
sed 's/^ORACLE_HOME_LISTNER=/#ORACLE_HOME_LISTNER=/' $ORACLE_HOME/bin/dbstart -i
sed '/#ORACLE_HOME_LISTNER=/ a ORACLE_HOME_LISTNER=$ORACLE_HOME' $ORACLE_HOME/bin/dbstart -i
echo "---------------------install 11.2.0.3 successefull--------------------------"
else
echo "please mounted database 11.2.0.3 software install CD media to /media dir."
echo "such command as : mount /dev/cdrom /media "
fi
rm /tmp/database -rf > /dev/null 2>&1
ummount /robin > /dev/null 2>&1
rm /robin -rf > /dev/null 2>&1
|
sdgdsffdsfff/tools-2
|
oracle-install/install.sh
|
Shell
|
gpl-2.0
| 2,344 |
#!/bin/bash -
# libguestfs
# Copyright (C) 2013 Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
set -e
export LANG=C
canonical="sed s,/dev/vd,/dev/sd,g"
# Allow the test to be skipped since btrfs is often broken.
if [ -n "$SKIP_TEST_MOUNTABLE_INSPECT_SH" ]; then
echo "$0: skipping test because environment variable is set."
exit 77
fi
if [ "$(guestfish get-backend)" = "uml" ]; then
echo "$0: skipping test because uml backend does not support qcow2"
exit 77
fi
# Bail if btrfs is not available.
if ! guestfish -a /dev/null run : available btrfs; then
echo "$0: skipping test because btrfs is not available"
exit 77
fi
rm -f root.tmp test.qcow2 test.output
# Start with the regular (good) fedora image, modify /etc/fstab
# and then inspect it.
guestfish -- \
disk-create test.qcow2 qcow2 -1 \
backingfile:../guests/fedora-btrfs.img backingformat:raw
# Test that basic inspection works and the expected filesystems are
# found
guestfish -a test.qcow2 -i <<'EOF' | sort | $canonical > test.output
inspect-get-roots | head -1 > root.tmp
<! echo inspect-get-mountpoints "`cat root.tmp`"
EOF
if [ "$(cat test.output)" != "/: btrfsvol:/dev/sda2/root
/boot: /dev/sda1
/home: btrfsvol:/dev/sda2/home" ]; then
echo "$0: error #1: unexpected output from inspect-get-mountpoints"
cat test.output
exit 1
fi
# Additional sanity check: did we get the release name right?
guestfish -a test.qcow2 -i <<'EOF' > test.output
inspect-get-roots | head -1 > root.tmp
<! echo inspect-get-product-name "`cat root.tmp`"
EOF
if [ "$(cat test.output)" != "Fedora release 14 (Phony)" ]; then
echo "$0: error #2: unexpected output from inspect-get-product-name"
cat test.output
exit 1
fi
rm root.tmp
rm test.qcow2
rm test.output
|
pombredanne/libguestfs
|
tests/mountable/test-mountable-inspect.sh
|
Shell
|
gpl-2.0
| 2,446 |
#!/bin/sh
mkdir -p mnttmp
rm -f working.dmg
gunzip < template.dmg.gz > working.dmg
hdiutil attach working.dmg -noautoopen -quiet -mountpoint mnttmp
# NOTE: output of hdiutil changes every now and then.
# Verify that this is still working.
DEV=`hdiutil info|tail -1|awk '{print $1}'`
rm -rf mnttmp/Recipes.app
mv ./Recipes.app mnttmp
hdiutil detach ${DEV}
rm -rf mnttmp
rm -f recipes.dmg
hdiutil convert working.dmg -quiet -format UDZO -imagekey zlib-level=9 -o recipes.dmg
rm -f working.dmg
|
matthiasclasen/gr
|
osx/build-osx-installer.sh
|
Shell
|
gpl-3.0
| 492 |
cat <<EOF
>>> This driver uses SNA as the default acceleration method. You can try
falling back to UXA if you run into trouble. To do so, save a file with
the following content as /etc/X11/xorg.conf.d/20-intel.conf :
Section "Device"
Identifier "Intel Graphics"
Driver "intel"
Option "AccelMethod" "uxa"
#Option "AccelMethod" "sna"
EndSection
EOF
|
mkslack/pkgBuildBot
|
buildtools/xorgdrv1/xf86-video-intel/doinst.sh
|
Shell
|
gpl-3.0
| 419 |
#!/bin/bash
VALUE=$1
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TMPDIR="/tmp/ps2_${USER}/"
mkdir -p $TMPDIR
cd $TMPDIR
# Download the pre-trained model file
flock $TMPDIR -c "test ! -e resnet50_coco_best_v2.0.1.h5 && wget https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/resnet50_coco_best_v2.0.1.h5"
OUTPUT_DIR=$SCRATCH/PS2/object_recognition_$SLURM_JOBID
mkdir -p $OUTPUT_DIR
source $SCRATCH/PS2/venv/bin/activate
python $SCRIPT_DIR/FirstDetection.py $VALUE $OUTPUT_DIR
|
ginolhac/tutorials
|
sequential/examples/images/run_object_recognition.sh
|
Shell
|
gpl-3.0
| 513 |
# ------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# ==================================================================
# Helper script to run the IBM JMS performance harness against
# the ActiveMQ message broker.
#
# Sample Usage:
# ./perfharness-activemq.sh -d dynamicQueues/FOO -tc jms.r11.PutGet -nt 6
#
# It assumes that the apache-activemq-5.10.0.jar and
# perfharness.jar files are in the current directory. If they are not,
# set the ACTIVEMQ_HOME and PERFHARNESS_HOME env variable to the correct location.
#
# You can download the perfharness.jar file from:
# http://www.alphaworks.ibm.com/tech/perfharness
#
# By Default the test connects the the vm://localhost broker.
# To change that, use set the BROKER_URL to the broker url you want to use.
#
# ==================================================================
if [ -z "$PERFHARNESS_HOME" ] ; then
PERFHARNESS_HOME=.
fi
if [ -z "$ACTIVEMQ_HOME" ] ; then
ACTIVEMQ_HOME=../..
fi
if [ -z "$BROKER_URL" ] ; then
BROKER_URL='vm://(broker://()/localhost?useJmx=false)/localhost'
fi
java ${JAVA_OPTIONS} -cp ${ACTIVEMQ_HOME}/apache-activemq-5.10.0.jar:${PERFHARNESS_HOME}/perfharness.jar JMSPerfHarness -pc JNDI -ii org.apache.activemq.jndi.ActiveMQInitialContextFactory -iu $BROKER_URL -cf ConnectionFactory -d dynamic$DESTINATION $@
|
ilarischeinin/chipster
|
ext/applications/apache-activemq-5.10.0/examples/other/perfharness/perfharness-activemq.sh
|
Shell
|
gpl-3.0
| 2,190 |
#!/bin/sh
# Ensure "ls --color" properly colors other-writable and sticky directories.
# Before coreutils-6.2, this test would fail, coloring all three
# directories the same as the first one -- but only on a file system
# with dirent.d_type support.
# Copyright (C) 2006-2017 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ls
# Don't let a different umask perturb the results.
umask 22
mkdir d other-writable sticky || framework_failure_
chmod o+w other-writable || framework_failure_
chmod o+t sticky || framework_failure_
TERM=xterm ls --color=always > out || fail=1
cat -A out > o1 || fail=1
mv o1 out || fail=1
cat <<\EOF > exp || fail=1
^[[0m^[[01;34md^[[0m$
^[[34;42mother-writable^[[0m$
out$
^[[37;44msticky^[[0m$
EOF
compare exp out || fail=1
rm exp
# Turn off colors for other-writable dirs and ensure
# we fall back to the color for standard directories.
LS_COLORS="ow=:" ls --color=always > out || fail=1
cat -A out > o1 || fail=1
mv o1 out || fail=1
cat <<\EOF > exp || fail=1
^[[0m^[[01;34md^[[0m$
^[[01;34mother-writable^[[0m$
out$
^[[37;44msticky^[[0m$
EOF
compare exp out || fail=1
Exit $fail
|
adtools/coreutils
|
tests/ls/color-dtype-dir.sh
|
Shell
|
gpl-3.0
| 1,803 |
#!/bin/bash
if [[ -z "$LIB_PATH" ]] ; then
echo "ERROR: No se ha declarado la ruta de librerias"
exit 1
fi
. $LIB_PATH/utils.sh
. $LIB_PATH/hard.sh
. $LIB_PATH/parted.sh
|
Dte-ba/ett-bots
|
lib/source.sh
|
Shell
|
gpl-3.0
| 177 |
#!/bin/bash
#cd ../molmodel_legacy/simbody
#mkdir build-debug
#cd build-debug
#cmake ..
#make -j4
#sudo make install
#cd ../../
cd ../molmodel_legacy # EU
mkdir build-debug
cd build-debug
cmake ..
make -j12
sudo make install
#cd ../../openmm
#mkdir build-debug
#cd build-debug
#cmake ..
#make -j4
#sudo make install
cd ../../build-debug/
cmake ..
make -j12
|
spirilaurentiu/GMolmodel
|
cmake_regenerateMolmodel.sh
|
Shell
|
gpl-3.0
| 364 |
#!/bin/bash
cd `dirname $BASH_SOURCE`
if [ -f log/server.out ]
then
echo 'INFO: Rotating log/server.out ...'
cat log/server.out > log/server.out.1
truncate --size 0 log/server.out
fi
if [ -f log/server.err ]
then
echo 'INFO: Rotating log/server.err ...'
cat log/server.err > log/server.err.1
truncate --size 0 log/server.err
fi
|
bazgu/front-node
|
logrotate.sh
|
Shell
|
agpl-3.0
| 354 |
#!/bin/bash
slots=/sys/devices/bone_capemgr.*/slots
dtbs="gpio_hd44780 gpio_leds tlc5946 gpio_buttons spi1 jd-t18003-ctrl"
for a in $dtbs; do
echo $a > $slots
done
|
piranha32/IOoo
|
examples/apply_overlays.sh
|
Shell
|
lgpl-3.0
| 168 |
#!/bin/bash
pandoc --from markdown --to html -c swiss.css -o index.html llamada.md
|
guatejug/files
|
JavaDay2015/CFP/build.sh
|
Shell
|
apache-2.0
| 84 |
#!/bin/sh
curl http://127.0.0.1:9991/
|
baldrailers/ari
|
_examples/script/sendDtmf.sh
|
Shell
|
apache-2.0
| 39 |
#!/bin/sh
DIR=$(cd $(dirname "$0") && pwd)
cd ${DIR}
case $(uname) in
Linux)
(cd ${DIR} && sudo docker build -t lumifyio/dev dev)
;;
Darwin)
(cd ${DIR} && docker build -t lumifyio/dev dev)
;;
*)
echo "unexpected uname: $(uname)"
exit -1
;;
esac
|
Steimel/lumify
|
docker/build-dev.sh
|
Shell
|
apache-2.0
| 281 |
#!/bin/bash
export https_proxy=$http_proxy
export INSTALL_DIR=$HOME/raven_libs/root/opt/raven_libs
rm -Rvf $INSTALL_DIR
./pip_ve_osx.sh
#Create raven environment script.
mkdir -p $HOME/raven_libs/root/opt/raven_libs/environments
PROFILE_FILE=$HOME/raven_libs/root/opt/raven_libs/environments/raven_libs_profile
cat - > $PROFILE_FILE << RAVEN_PROFILE
source /opt/raven_libs/bin/activate
RAVEN_PROFILE
chmod +x $PROFILE_FILE
mkdir -p $HOME/raven_libs/scripts
cat - > $HOME/raven_libs/scripts/preflight <<PREFLIGHT
#!/bin/bash
rm -Rf /opt/raven_libs/
PREFLIGHT
chmod +x $HOME/raven_libs/scripts/preflight
rm -Rf raven_pip.pkg
pkgbuild --root $HOME/raven_libs/root --identifier raven_libs --scripts $HOME/raven_libs/scripts raven_pip.pkg
|
idaholab/raven
|
developer_tools/packaging/pip_package.sh
|
Shell
|
apache-2.0
| 741 |
#!/bin/bash
set -e
case "$1" in
java8)
echo "https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u312-b07/OpenJDK8U-jdk_x64_linux_hotspot_8u312b07.tar.gz"
;;
java11)
echo "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.13%2B8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.13_8.tar.gz"
;;
java17)
echo "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.1%2B12/OpenJDK17U-jdk_x64_linux_hotspot_17.0.1_12.tar.gz"
;;
*)
echo $"Unknown java version"
exit 1
esac
|
Buzzardo/spring-boot
|
ci/images/get-jdk-url.sh
|
Shell
|
apache-2.0
| 538 |
#!/usr/bin/env bash
TAGS=`git describe --abbrev=0 --tags`
VERSIONS=`echo $TAGS | sed 's/V//'`
RELEASE=`echo $CI_JOB_ID`
run_source() {
./util/makesrc $TAGS
}
run_build() {
mkdir -p ~/rpmbuild/SOURCES/
mv -f ~/nano-${VERSIONS}.tar.gz ~/rpmbuild/SOURCES/.
scl enable llvm-toolset-7 devtoolset-7 'rpmbuild -ba nanocurrency.spec'
scl enable llvm-toolset-7 devtoolset-7 'rpmbuild -ba nanocurrency-beta.spec'
}
run_update() {
for file in ./nanocurrency*.in; do
outfile="$(echo "${file}" | sed 's@\.in$@@')"
echo "Updating \"${outfile}\"..."
rm -f "${file}.new"
awk -v srch="@VERSION@" -v repl="$VERSIONS" -v srch2="@RELEASE@" -v repl2="$RELEASE" '{ sub(srch,repl,$0); sub(srch2,repl2, $0); print $0}' < ${file} > ${file}.new
rm -fr "${outfile}"
cat "${file}.new" > "${outfile}"
rm -f "${file}.new"
chmod 755 "${outfile}"
done
}
run_update
run_source
run_build
|
clemahieu/raiblocks
|
ci/build-centos.sh
|
Shell
|
bsd-2-clause
| 928 |
#!/bin/bash
#
# Copyright (C) 2010 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Author:
# Zhang, GeX <[email protected]>
source $(dirname $0)/Common
func_install_changename widget-version-1.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_install_changename widget-version-1-1.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_install_changename widget-version-2.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_install_changename widget-version-2-1.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_install_changename widget-version-3.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_install_changename widget-version-3-1.wgt
if [ $? -eq 1 ];then
echo "The installation is failed"
exit 1
fi
func_uninstall_changename widget-version-1.wgt
func_uninstall_changename widget-version-1-1.wgt
func_uninstall_changename widget-version-2.wgt
func_uninstall_changename widget-version-2-1.wgt
func_uninstall_changename widget-version-3.wgt
func_uninstall_changename widget-version-3-1.wgt
exit 0
|
xiaojunwu/crosswalk-test-suite
|
wrt/tct-pm-wrt-tests/scripts/wrt_pm_OfflineUpdate_MultiApp.sh
|
Shell
|
bsd-3-clause
| 1,837 |
#!/bin/sh
for package in $(go list ./...)
do
DIR=$(echo $package | sed 's%github.com/materials-commons/gohandy%.%')
mkdir -p docs/$DIR
godoc $package > docs/$DIR/package.txt
done
|
materials-commons/gohandy
|
makedocs.sh
|
Shell
|
mit
| 192 |
#!/bin/bash
#
# vercmptest - a test suite for the vercmp/libalpm program
#
# Copyright (c) 2009-2018 by Pacman Development Team <[email protected]>
# Copyright (c) 2008 by Dan McGee <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
source "$(dirname "$0")"/../tap.sh || exit 1
# default binary if one was not specified as $1
bin=${1:-${PMTEST_UTIL_DIR}vercmp}
# use first arg as our binary if specified
if ! type -p "$bin" &>/dev/null; then
tap_bail "vercmp binary ($bin) could not be located"
exit 1
fi
# args:
# runtest ver1 ver2 expected
tap_runtest() {
local ver1=$1 ver2=$2 exp=$3
tap_is_str "$($bin "$ver1" "$ver2")" "$exp" "$ver1 $ver2"
# and run its mirror case just to be sure
(( exp *= -1 ))
tap_is_str "$($bin "$ver2" "$ver1")" "$exp" "$ver2 $ver1"
}
tap_plan 92
# all similar length, no pkgrel
tap_runtest 1.5.0 1.5.0 0
tap_runtest 1.5.1 1.5.0 1
# mixed length
tap_runtest 1.5.1 1.5 1
# with pkgrel, simple
tap_runtest 1.5.0-1 1.5.0-1 0
tap_runtest 1.5.0-1 1.5.0-2 -1
tap_runtest 1.5.0-1 1.5.1-1 -1
tap_runtest 1.5.0-2 1.5.1-1 -1
# with pkgrel, mixed lengths
tap_runtest 1.5-1 1.5.1-1 -1
tap_runtest 1.5-2 1.5.1-1 -1
tap_runtest 1.5-2 1.5.1-2 -1
# mixed pkgrel inclusion
tap_runtest 1.5 1.5-1 0
tap_runtest 1.5-1 1.5 0
tap_runtest 1.1-1 1.1 0
tap_runtest 1.0-1 1.1 -1
tap_runtest 1.1-1 1.0 1
# alphanumeric versions
tap_runtest 1.5b-1 1.5-1 -1
tap_runtest 1.5b 1.5 -1
tap_runtest 1.5b-1 1.5 -1
tap_runtest 1.5b 1.5.1 -1
# from the manpage
tap_runtest 1.0a 1.0alpha -1
tap_runtest 1.0alpha 1.0b -1
tap_runtest 1.0b 1.0beta -1
tap_runtest 1.0beta 1.0rc -1
tap_runtest 1.0rc 1.0 -1
# going crazy? alpha-dotted versions
tap_runtest 1.5.a 1.5 1
tap_runtest 1.5.b 1.5.a 1
tap_runtest 1.5.1 1.5.b 1
# alpha dots and dashes
tap_runtest 1.5.b-1 1.5.b 0
tap_runtest 1.5-1 1.5.b -1
# same/similar content, differing separators
tap_runtest 2.0 2_0 0
tap_runtest 2.0_a 2_0.a 0
tap_runtest 2.0a 2.0.a -1
tap_runtest 2___a 2_a 1
# epoch included version comparisons
tap_runtest 0:1.0 0:1.0 0
tap_runtest 0:1.0 0:1.1 -1
tap_runtest 1:1.0 0:1.0 1
tap_runtest 1:1.0 0:1.1 1
tap_runtest 1:1.0 2:1.1 -1
# epoch + sometimes present pkgrel
tap_runtest 1:1.0 0:1.0-1 1
tap_runtest 1:1.0-1 0:1.1-1 1
# epoch included on one version
tap_runtest 0:1.0 1.0 0
tap_runtest 0:1.0 1.1 -1
tap_runtest 0:1.1 1.0 1
tap_runtest 1:1.0 1.0 1
tap_runtest 1:1.0 1.1 1
tap_runtest 1:1.1 1.1 1
tap_finish
|
eli-schwartz/pacman
|
test/util/vercmptest.sh
|
Shell
|
gpl-2.0
| 3,201 |
#!/bin/bash
if [ "$1" == "clean" ]; then
read -p "Do you really want to delete existing packages? [y/N]"
[[ $REPLY == [yY] ]] && { rm -fr zoneminder*.build zoneminder*.changes zoneminder*.deb; echo "Existing package files deleted"; } || { echo "Packages have NOT been deleted"; }
exit;
fi
DATE=`date -R`
DISTRO=$1
SNAPSHOT=$2
if [ "$SNAPSHOT" == "stable" ]; then
SNAPSHOT="";
fi;
TYPE=$3
if [ "$TYPE" == "" ]; then
TYPE="source";
fi;
BRANCH=$4
if [ ! -d 'zoneminder_release' ]; then
git clone https://github.com/ZoneMinder/ZoneMinder.git zoneminder_release
fi;
if [ "$BRANCH" != "" ]; then
cd zoneminder_release
if [ "$BRANCH" == "stable" ]; then
BRANCH=$(git describe --tags $(git rev-list --tags --max-count=1));
echo "Latest stable branch is $BRANCH";
fi
git checkout $BRANCH
cd ../
fi;
VERSION=`cat zoneminder_release/version`
if [ $VERSION == "" ]; then
exit 1;
fi;
echo "Doing $TYPE release zoneminder_$VERSION-$DISTRO-$SNAPSHOT";
mv zoneminder_release zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig
cd zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig
git submodule init
git submodule update --init --recursive
if [ $DISTRO == "trusty" ]; then
ln -sf distros/ubuntu1204 debian
else
ln -sf distros/ubuntu1604 debian
fi;
# Auto-install all ZoneMinder's depedencies using the Debian control file
sudo apt-get install devscripts equivs
sudo mk-build-deps -ir ./debian/control
if [ -z `hostname -d` ] ; then
AUTHOR="`getent passwd $USER | cut -d ':' -f 5 | cut -d ',' -f 1` <`whoami`@`hostname`.local>"
else
AUTHOR="`getent passwd $USER | cut -d ':' -f 5 | cut -d ',' -f 1` <`whoami`@`hostname`>"
fi
cat <<EOF > debian/changelog
zoneminder ($VERSION-$DISTRO-$SNAPSHOT) $DISTRO; urgency=medium
*
-- $AUTHOR $DATE
EOF
#rm -rf .git
#rm .gitignore
#cd ../
#tar zcf zoneminder_$VERSION-$DISTRO.orig.tar.gz zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig
#cd zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig
if [ $TYPE == "binary" ]; then
debuild
else
if [ $TYPE == "local" ]; then
debuild -i -us -uc -b
else
debuild -S -sa
fi;
fi;
cd ../
read -p "Do you want to keep the checked out version of Zoneminder (incase you want to modify it later) [y/N]"
[[ $REPLY == [yY] ]] && { mv zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig zoneminder_release; echo "The checked out copy is preserved in zoneminder_release"; } || { rm -fr zoneminder_$VERSION-$DISTRO-$SNAPSHOT.orig; echo "The checked out copy has been deleted"; }
echo "Done!"
|
schrorg/ZoneMinder
|
utils/do_debian_package.sh
|
Shell
|
gpl-2.0
| 2,458 |
#!/usr/bin/env bash
# vim: set sw=4 sts=4 et :
# Copyright (c) 2006, 2007, 2008 Ciaran McCreesh
#
# Based in part upon ebuild.sh from Portage, which is Copyright 1995-2005
# Gentoo Foundation and distributed under the terms of the GNU General
# Public License v2.
#
# This file is part of the Paludis package manager. Paludis is free software;
# you can redistribute it and/or modify it under the terms of the GNU General
# Public License, version 2, as published by the Free Software Foundation.
#
# Paludis is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
default_pkg_prerm()
{
verify_not_called_cross_phase ${FUNCNAME[0]#default_}
:
}
pkg_prerm()
{
default "$@"
}
exheres_internal_prerm()
{
local old_sandbox_write="${SANDBOX_WRITE}"
if [[ -z "${PALUDIS_DO_NOTHING_SANDBOXY}" ]]; then
SANDBOX_WRITE="${SANDBOX_WRITE+${SANDBOX_WRITE}:}${ROOT%/}/"
esandbox check >/dev/null 2>&1 && esandbox allow "${ROOT}"
fi
if hasq "prerm" ${SKIP_FUNCTIONS} ; then
ebuild_section "Skipping pkg_prerm (SKIP_FUNCTIONS)"
else
ebuild_section "Starting pkg_prerm"
pkg_prerm
ebuild_section "Done pkg_prerm"
fi
if [[ -z "${PALUDIS_DO_NOTHING_SANDBOXY}" ]]; then
SANDBOX_WRITE="${old_sandbox_write}"
esandbox check >/dev/null 2>&1 && esandbox disallow "${ROOT}"
fi
true
}
|
chutzimir/paludis
|
paludis/repositories/e/ebuild/exheres-0/pkg_prerm.bash
|
Shell
|
gpl-2.0
| 1,755 |
# Refs of upstream : main(A)
# Refs of workbench: main(A) tags/v123
# git push : next(A) refs/for/main/topic(A)
test_expect_success "proc-receive: no hook, fail to push special ref ($PROTOCOL)" '
test_must_fail git -C workbench push origin \
HEAD:next \
HEAD:refs/for/main/topic \
>out-$test_count 2>&1 &&
make_user_friendly_and_stable_output <out-$test_count >actual &&
cat >expect <<-EOF &&
remote: # pre-receive hook
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/heads/next
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/for/main/topic
remote: error: cannot find hook "proc-receive"
remote: # post-receive hook
remote: post-receive< <ZERO-OID> <COMMIT-A> refs/heads/next
To <URL/of/upstream.git>
* [new branch] HEAD -> next
! [remote rejected] HEAD -> refs/for/main/topic (fail to run proc-receive hook)
EOF
test_cmp expect actual &&
test_cmp_refs -C "$upstream" <<-EOF
<COMMIT-A> refs/heads/main
<COMMIT-A> refs/heads/next
EOF
'
# Refs of upstream : main(A) next(A)
# Refs of workbench: main(A) tags/v123
test_expect_success "cleanup ($PROTOCOL)" '
git -C "$upstream" update-ref -d refs/heads/next
'
# Refs of upstream : main(A)
# Refs of workbench: main(A) tags/v123
# git push --atomic: (B) next(A) refs/for/main/topic(A)
test_expect_success "proc-receive: no hook, all failed for atomic push ($PROTOCOL)" '
test_must_fail git -C workbench push --atomic origin \
$B:main \
HEAD:next \
HEAD:refs/for/main/topic >out-$test_count 2>&1 &&
make_user_friendly_and_stable_output <out-$test_count >actual &&
cat >expect <<-EOF &&
remote: # pre-receive hook
remote: pre-receive< <COMMIT-A> <COMMIT-B> refs/heads/main
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/heads/next
remote: pre-receive< <ZERO-OID> <COMMIT-A> refs/for/main/topic
remote: error: cannot find hook "proc-receive"
To <URL/of/upstream.git>
! [remote rejected] <COMMIT-B> -> main (fail to run proc-receive hook)
! [remote rejected] HEAD -> next (fail to run proc-receive hook)
! [remote rejected] HEAD -> refs/for/main/topic (fail to run proc-receive hook)
EOF
test_cmp expect actual &&
test_cmp_refs -C "$upstream" <<-EOF
<COMMIT-A> refs/heads/main
EOF
'
|
felipec/git
|
t/t5411/test-0011-no-hook-error.sh
|
Shell
|
gpl-2.0
| 2,248 |
#! /bin/sh
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure nobase_* works for libtool libraries and programs as well.
# This is just the libtool equivalent of 'nobase.sh', split up to allow
# greater exposure of that test.
required='cc libtoolize'
. test-init.sh
cat >> configure.ac <<'EOF'
AC_PROG_CC
AM_PROG_AR
AC_PROG_LIBTOOL
AC_OUTPUT
EOF
cat > Makefile.am << 'EOF'
fooexecdir = $(prefix)/foo
fooexec_LTLIBRARIES = sub/libbase.la
nobase_fooexec_LTLIBRARIES = sub/libnobase.la
fooexec_PROGRAMS = sub/base
nobase_fooexec_PROGRAMS = sub/nobase
sub_libbase_la_SOURCES = source2.c
sub_libnobase_la_SOURCES = source2.c
sub_base_SOURCES = source.c
sub_nobase_SOURCES = source.c
test-install-data: install-data
test ! -f inst/foo/sub/libnobase.la
test ! -f inst/foo/libbase.la
test-install-exec: install-exec
test -f inst/foo/sub/libnobase.la
test ! -f inst/foo/libnobase.la
test -f inst/foo/libbase.la
.PHONY: test-install-exec test-install-data
EOF
mkdir sub
cat >source.c <<'EOF'
int main (int argc, char *argv[])
{
return 0;
}
EOF
cp source.c source2.c
rm -f install-sh
libtoolize
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a --copy
./configure --prefix "$(pwd)/inst" --program-prefix=p
$MAKE
$MAKE test-install-data
$MAKE test-install-exec
$MAKE uninstall
test $(find inst/foo -type f -print | wc -l) -eq 0
$MAKE install-strip
# Likewise, in a VPATH build.
$MAKE uninstall
$MAKE distclean
mkdir build
cd build
../configure --prefix "$(pwd)/inst" --program-prefix=p
$MAKE
$MAKE test-install-data
$MAKE test-install-exec
$MAKE uninstall
test $(find inst/foo -type f -print | wc -l) -eq 0
:
|
kuym/openocd
|
tools/automake-1.15/t/nobase-libtool.sh
|
Shell
|
gpl-2.0
| 2,246 |
#!/bin/sh
./sim_isp_orig --out /tmp/out --simtime 10
./sim_background --dir /tmp/ --tau 10 --simtime 10
|
idiot-z/auryn
|
examples/tests.sh
|
Shell
|
gpl-3.0
| 105 |
#!/usr/bin/env bash
set -eux
ANSIBLE_ROLES_PATH=../ ansible-playbook test.yml -e '@../../integration_config.yml' -i inventory "$@"
|
aperigault/ansible
|
test/integration/targets/ansible-runner/runme.sh
|
Shell
|
gpl-3.0
| 133 |
#!/bin/bash -e
oc project demo &>/dev/null
for template in $(oc get templates -n openshift | sed 1d | awk '{print $1;}'); do
echo $template
if oc process -n openshift $template | oc create -f - &>/dev/null; then
for bc in $(oc get bc | sed 1d | awk '{print $1;}'); do
oc start-build --follow $bc
done
fi
oc delete all --all &>/dev/null
done
oc project default &>/dev/null
|
ianmiell/demobuilder
|
layers/rhel-server-7:gui:ose-3.0:offline/@target/dobuilds.sh
|
Shell
|
gpl-3.0
| 395 |
#!/bin/sh
# Show that we've eliminated most of ls' failing getxattr syscalls,
# regardless of how many files are in a directory we list.
# This test is skipped on systems that lack LD_PRELOAD support; that's fine.
# Similarly, on a system that lacks getxattr altogether, skipping it is fine.
# Copyright (C) 2012-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ls
require_gcc_shared_
# Replace each getxattr and lgetxattr call with a call to these stubs.
# Count those and write the total number of calls to the file "x"
# via a global destructor.
cat > k.c <<'EOF' || framework_failure_
#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
static unsigned long int n_calls;
static void __attribute__ ((destructor))
print_call_count (void)
{
FILE *fp = fopen ("x", "w"); if (!fp) return;
fprintf (fp, "%lu\n", n_calls); fclose (fp);
}
static ssize_t incr () { ++n_calls; errno = ENOTSUP; return -1; }
ssize_t getxattr (const char *path, const char *name, void *value, size_t size)
{ return incr (); }
ssize_t lgetxattr(const char *path, const char *name, void *value, size_t size)
{ return incr (); }
EOF
# Then compile/link it:
$CC -shared -fPIC -O2 k.c -o k.so \
|| framework_failure_ 'failed to build shared library'
# Create a few files:
seq 20 | xargs touch || framework_failure_
# Finally, run the test:
LD_PRELOAD=./k.so ls --color=always -l . || fail=1
test -f x || skip_ "internal test failure: maybe LD_PRELOAD doesn't work?"
# Ensure that there were no more than 3 *getxattr calls.
n_calls=$(cat x)
test "$n_calls" -le 3 || fail=1
Exit $fail
|
mmayer/coreutils
|
tests/ls/getxattr-speedup.sh
|
Shell
|
gpl-3.0
| 2,262 |
#!/bin/bash -eu
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
exec ./build-aux/oss-fuzz.sh
|
skia-dev/oss-fuzz
|
projects/spice-usbredir/build.sh
|
Shell
|
apache-2.0
| 704 |
#!/bin/bash
#
# This scripts starts the OpenShift server with a default configuration.
# No registry or router is setup.
# It is intended to test cli commands that may require docker and therefore
# cannot be run under Travis.
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
os::util::environment::setup_time_vars
os::build::setup_env
function cleanup()
{
out=$?
docker rmi test/scratchimage
cleanup_openshift
os::log::info "Exiting"
return "${out}"
}
trap "exit" INT TERM
trap "cleanup" EXIT
os::log::info "Starting server"
os::util::environment::use_sudo
os::util::environment::setup_all_server_vars "test-extended/cmd/"
os::log::system::start
os::start::configure_server
os::start::server
export KUBECONFIG="${ADMIN_KUBECONFIG}"
oc login -u system:admin -n default
# let everyone be able to see stuff in the default namespace
oadm policy add-role-to-group view system:authenticated -n default
os::start::registry
oc rollout status dc/docker-registry
docker_registry="$( oc get service/docker-registry -n default -o jsonpath='{.spec.clusterIP}:{.spec.ports[0].port}' )"
os::test::junit::declare_suite_start "extended/cmd"
os::test::junit::declare_suite_start "extended/cmd/new-app"
os::log::info "Running newapp extended tests"
oc login "${MASTER_ADDR}" -u new-app -p password --certificate-authority="${MASTER_CONFIG_DIR}/ca.crt"
oc new-project new-app
oc delete all --all
# create a local-only docker image for testing
# image is removed in cleanup()
tmp=$(mktemp -d)
pushd "${tmp}"
cat <<-EOF >> Dockerfile
FROM scratch
EXPOSE 80
EOF
docker build -t test/scratchimage .
popd
rm -rf "${tmp}"
# ensure a local-only image gets a docker image(not imagestream) reference created.
VERBOSE=true os::cmd::expect_success "oc new-project test-scratchimage"
os::cmd::expect_success "oc new-app test/scratchimage~https://github.com/openshift/ruby-hello-world.git --strategy=docker"
os::cmd::expect_success_and_text "oc get bc ruby-hello-world -o jsonpath={.spec.strategy.dockerStrategy.from.kind}" "DockerImage"
os::cmd::expect_success_and_text "oc get bc ruby-hello-world -o jsonpath={.spec.strategy.dockerStrategy.from.name}" "test/scratchimage:latest"
os::cmd::expect_success "oc delete project test-scratchimage"
VERBOSE=true os::cmd::expect_success "oc project new-app"
# error due to partial match
os::cmd::expect_failure_and_text "oc new-app test/scratchimage2 -o yaml" "partial match"
# success with exact match
os::cmd::expect_success "oc new-app test/scratchimage"
os::log::info "newapp: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "extended/cmd/variable-expansion"
os::log::info "Running env variable expansion tests"
VERBOSE=true os::cmd::expect_success "oc new-project envtest"
os::cmd::expect_success "oc create -f test/extended/testdata/test-env-pod.json"
os::cmd::try_until_text "oc get pods" "Running"
os::cmd::expect_success_and_text "oc exec test-pod env" "podname=test-pod"
os::cmd::expect_success_and_text "oc exec test-pod env" "podname_composed=test-pod_composed"
os::cmd::expect_success_and_text "oc exec test-pod env" "var1=value1"
os::cmd::expect_success_and_text "oc exec test-pod env" "var2=value1"
os::cmd::expect_success_and_text "oc exec test-pod ps ax" "sleep 120"
os::log::info "variable-expansion: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "extended/cmd/image-pull-secrets"
os::log::info "Running image pull secrets tests"
VERBOSE=true os::cmd::expect_success "oc login '${MASTER_ADDR}' -u pull-secrets-user -p password --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt'"
# create a new project and push a busybox image in there
VERBOSE=true os::cmd::expect_success "oc new-project image-ns"
os::cmd::expect_success "oc delete all --all"
token="$( oc sa get-token builder )"
os::cmd::expect_success "docker login -u imagensbuilder -p ${token} -e [email protected] ${docker_registry}"
os::cmd::expect_success "oc import-image busybox:latest --confirm"
os::cmd::expect_success "docker pull busybox"
os::cmd::expect_success "docker tag docker.io/busybox:latest ${docker_registry}/image-ns/busybox:latest"
os::cmd::expect_success "docker push ${docker_registry}/image-ns/busybox:latest"
os::cmd::expect_success "docker rmi -f ${docker_registry}/image-ns/busybox:latest"
DOCKER_CONFIG_JSON="${HOME}/.docker/config.json"
VERBOSE=true os::cmd::expect_success "oc new-project dc-ns"
os::cmd::expect_success "oc delete all --all"
os::cmd::expect_success "oc delete secrets --all"
os::cmd::expect_success "oc secrets new image-ns-pull .dockerconfigjson=${DOCKER_CONFIG_JSON}"
os::cmd::expect_success "oc secrets new-dockercfg image-ns-pull-old [email protected] --docker-username=imagensbuilder --docker-server=${docker_registry} --docker-password=${token}"
os::cmd::expect_success "oc process -f test/extended/testdata/image-pull-secrets/pod-with-no-pull-secret.yaml --value=DOCKER_REGISTRY=${docker_registry} | oc create -f - "
os::cmd::try_until_text "oc describe pod/no-pull-pod" "Back-off pulling image"
os::cmd::expect_success "oc delete pods --all"
os::cmd::expect_success "oc process -f test/extended/testdata/image-pull-secrets/pod-with-new-pull-secret.yaml --value=DOCKER_REGISTRY=${docker_registry} | oc create -f - "
os::cmd::try_until_text "oc get pods/new-pull-pod -o jsonpath='{.status.containerStatuses[0].imageID}'" "docker"
os::cmd::expect_success "oc delete pods --all"
os::cmd::expect_success "docker rmi -f ${docker_registry}/image-ns/busybox:latest"
os::cmd::expect_success "oc process -f test/extended/testdata/image-pull-secrets/pod-with-old-pull-secret.yaml --value=DOCKER_REGISTRY=${docker_registry} | oc create -f - "
os::cmd::try_until_text "oc get pods/old-pull-pod -o jsonpath='{.status.containerStatuses[0].imageID}'" "docker"
os::cmd::expect_success "oc delete pods --all"
os::cmd::expect_success "docker rmi -f ${docker_registry}/image-ns/busybox:latest"
os::cmd::expect_success "oc process -f test/extended/testdata/image-pull-secrets/dc-with-old-pull-secret.yaml --value=DOCKER_REGISTRY=${docker_registry} | oc create -f - "
os::cmd::try_until_text "oc get pods/my-dc-old-1-hook-pre -o jsonpath='{.status.containerStatuses[0].imageID}'" "docker"
os::cmd::expect_success "oc delete all --all"
os::cmd::expect_success "docker rmi -f ${docker_registry}/image-ns/busybox:latest"
os::cmd::expect_success "oc process -f test/extended/testdata/image-pull-secrets/dc-with-new-pull-secret.yaml --value=DOCKER_REGISTRY=${docker_registry} | oc create -f - "
os::cmd::try_until_text "oc get pods/my-dc-1-hook-pre -o jsonpath='{.status.containerStatuses[0].imageID}'" "docker"
os::cmd::expect_success "oc delete all --all"
os::cmd::expect_success "docker rmi -f ${docker_registry}/image-ns/busybox:latest"
os::test::junit::declare_suite_end
# Test to see that we're reporting the correct commit being used by the build
os::test::junit::declare_suite_start "extended/cmd/new-build"
os::cmd::expect_success "oc new-build https://github.com/openshift/ruby-hello-world.git#bd94cbb228465d30d9d3430e80b503757a2a1d97"
os::cmd::try_until_text "oc logs builds/ruby-hello-world-1" "Commit:[[:space:]]*bd94cbb228465d30d9d3430e80b503757a2a1d97"
os::cmd::expect_success "oc delete all --all"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "extended/cmd/service-signer"
# check to make sure that service serving cert signing works correctly
# nginx currently needs to run as root
os::cmd::expect_success "oc login -u system:admin -n default"
os::cmd::expect_success "oadm policy add-scc-to-user anyuid system:serviceaccount:service-serving-cert-generation:default"
os::cmd::expect_success "oc login -u serving-cert -p asdf"
VERBOSE=true os::cmd::expect_success "oc new-project service-serving-cert-generation"
os::cmd::expect_success 'oc create dc nginx --image=nginx -- sh -c "nginx -c /etc/nginx/nginx.conf && sleep 86400"'
os::cmd::expect_success "oc expose dc/nginx --port=443"
os::cmd::expect_success "oc annotate svc/nginx service.alpha.openshift.io/serving-cert-secret-name=nginx-ssl-key"
os::cmd::expect_success "oc volumes dc/nginx --add --secret-name=nginx-ssl-key --mount-path=/etc/serving-cert"
os::cmd::expect_success "oc create configmap default-conf --from-file=test/extended/testdata/service-serving-cert/nginx-serving-cert.conf"
os::cmd::expect_success "oc set volumes dc/nginx --add --configmap-name=default-conf --mount-path=/etc/nginx/conf.d"
os::cmd::try_until_text "oc get pods -l deployment-config.name=nginx" 'Running'
# only show single pods in status if they are really single
os::cmd::expect_success 'oc create -f test/integration/testdata/test-deployment-config.yaml'
os::cmd::try_until_text 'oc status' 'dc\/test-deployment-config deploys docker\.io\/openshift\/origin-pod:latest' "$(( 2 * TIME_MIN ))"
os::cmd::try_until_text 'oc status' 'deployment #1 deployed.*- 1 pod' "$(( 2 * TIME_MIN ))"
os::cmd::expect_success_and_not_text 'oc status' 'pod\/test-deployment-config-1-[0-9a-z]{5} runs openshift\/origin-pod'
# break mac os
service_ip=$(oc get service/nginx -o=jsonpath={.spec.clusterIP})
os::cmd::try_until_success 'oc run --restart=Never --generator=run-pod/v1 --image=centos centos -- bash -c "curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt https://nginx.service-serving-cert-generation.svc:443"'
os::cmd::try_until_text 'oc get pods/centos -o jsonpath={.status.phase}' "Succeeded"
os::cmd::expect_success_and_text 'oc logs pods/centos' "Welcome to nginx"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "extended/cmd/oc-on-kube"
os::cmd::expect_success "oc login -u system:admin -n default"
os::cmd::expect_success "oc new-project kube"
os::cmd::expect_success "oc create -f test/testdata/kubernetes-server/apiserver.yaml"
os::cmd::try_until_text "oc get pods/kube-apiserver -o 'jsonpath={.status.conditions[?(@.type == "Ready")].status}'" "True"
os::cmd::try_until_text "oc get pods/kube-apiserver -o 'jsonpath={.status.podIP}'" "172"
kube_ip="$(oc get pods/kube-apiserver -o 'jsonpath={.status.podIP}')"
kube_kubectl="${tmp}/kube-kubeconfig"
os::cmd::try_until_text "oc login --config ${kube_kubectl}../kube-kubeconfig https://${kube_ip}:443 --token=secret --insecure-skip-tls-verify=true --loglevel=8" ' as "secret" using the token provided.'
os::test::junit::declare_suite_end
|
jeffvance/origin
|
test/extended/cmd.sh
|
Shell
|
apache-2.0
| 10,393 |
#!/bin/bash
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: [email protected]
##
#
#author Alfonso Tierno
#
#script to test openvim with the creation of flavors and interfaces
#in cloud style
#
echo " Creates 1 flavor, 3 nets, 2 VMs (US)
WITHOUT huge pages, nor NUMA assigment
network attach after creation"
echo
echo "Press enter to continue"
read kk
#image to load
imagePath=/mnt/powervault/virtualization/vnfs/os/US1404.qcow2
#image to load as an extra disk, can be any
imagePath_extra=/mnt/powervault/virtualization/vnfs/os/US1404user.qcow2
#default network to use
network_eth0=default
DIRNAME=`dirname $0`
function del_rubbish(){
echo "Press enter to delete the deployed things"
read kk
[ -n "$DEL_server" ] && ${DIRNAME}/test_openvim.py -f del server $DEL_server
[ -n "$DEL_network" ] && ${DIRNAME}/test_openvim.py -f del network $DEL_network
[ -n "$DEL_flavor" ] && ${DIRNAME}/test_openvim.py -f del flavor $DEL_flavor
[ -n "$DEL_image" ] && ${DIRNAME}/test_openvim.py -f del image $DEL_image
rm -f kk.out
}
function proccess_out(){ # action_text field to retrieve
if egrep -q "\"error\"" kk.out
then
echo "failed to" $1
cat kk.out
del_rubbish
exit -1
fi
if [ -z "$2" ] ; then pattern='"id"' ; else pattern="$2" ; fi
value=`egrep "$pattern" kk.out `
value=${value##* \"}
value=${value%\"*}
if [[ -z "$value" ]]
then
echo "not found the field" $2
cat kk.out
del_rubbish
exit -1
fi
}
#proccess_out "insert server tidgen1" '^ "id"'
#echo $value
#exit 0
echo -n "get ${imagePath##*/} image: "
${DIRNAME}/test_openvim.py -F"path=$imagePath" images > kk.out
proccess_out "get ${imagePath##*/}"
echo $value
image1=$value
echo -n "get ${imagePath_extra##*/} image: "
${DIRNAME}/test_openvim.py -F"path=$imagePath_extra" images > kk.out
proccess_out "get ${imagePath_extra##*/}"
echo $value
image2=$value
echo -n "get ${network_eth0} network: "
${DIRNAME}/test_openvim.py -F"name=$network_eth0" network > kk.out
proccess_out "get ${network_eth0} network"
echo $value
network_eth0=$value
echo -n "insert flavor: "
${DIRNAME}/test_openvim.py new flavor '
---
flavor:
name: CloudVM
description: normal cloud image with 1G, 1core
ram: 1024
vcpus: 1
' > kk.out
proccess_out "insert flavor"
echo $value
flavor1=$value
DEL_flavor="$DEL_flavor $flavor1"
echo
echo "Press enter to continue"
read kk
echo -n "insert bridge network net2: "
${DIRNAME}/test_openvim.py new network '
---
network:
name: network-bridge
type: bridge_data
' > kk.out
proccess_out "insert network 2"
echo $value
network2=$value
DEL_network="$DEL_network $value"
echo -n "insert test VM 1: "
${DIRNAME}/test_openvim.py new server "
---
server:
name: test_VM1
descrition: US 1 core
imageRef: '$image1'
flavorRef: '$flavor1'
networks:
- name: mgmt0
vpci: '0000:00:0a.0'
uuid: ${network_eth0}
mac_address: '10:10:10:10:10:12'
- name: eth0
vpci: '0000:00:0b.0'
uuid: '$network2'
mac_address: '10:10:10:10:10:13'
" > kk.out
proccess_out "insert test VM 2" '^ "id"'
echo $value
server1=$value
DEL_server="$DEL_server $value"
echo
echo "Press enter to continue"
read kk
echo -n "insert test VM 2: "
${DIRNAME}/test_openvim.py new server "
---
server:
name: test_VM2
descrition: US 1G 1core
imageRef: '$image1'
flavorRef: '$flavor1'
ram: 1024
vcpus: 1
networks:
- name: mgmt0
vpci: '0000:00:0a.0'
uuid: ${network_eth0}
mac_address: '10:10:10:10:aa:12'
- name: eth0
vpci: '0000:00:0b.0'
uuid: '$network2'
mac_address: '10:10:10:10:aa:13'
extended:
devices:
- type: disk
imageRef: '$image2'
" > kk.out
proccess_out "insert test VM 2" '^ "id"'
echo $value
server2=$value
DEL_server="$DEL_server $value"
echo
echo finsish. Check connections!!
echo
del_rubbish
exit 0
|
nfvlabs/openmano
|
openvim/test/2VM_NoHuge.sh
|
Shell
|
apache-2.0
| 4,755 |
#! /bin/sh
# Quick hack to figure out where is the space coming from
FILE=$1
if [ -z "$FILE" -o "$FILE" = "-h" ] ; then
echo "Usage: $0 binary"
exit 1
fi
TMPRESULT=`mktemp /tmp/nm_out.XXX`
RESULT=$TMPRESULT
echo "Working on $FILE, intermediate nm result in $RESULT"
nm -C --defined-only --print-size -l $FILE > $RESULT
echo "nm done... analyzing..."
#RESULT=/dev/shm/nm.out
gawk '
{sizeHex="0x" $2; size=strtonum(sizeHex); sum+=size}
/:[0-9]+$/ {
if (size) {
ssum += size;
# not all lines of nm output do find a file with line number this finds
# lines like "xxxx/blah.cc:123" and returns "xxxx/blah"
# for the xxx part it tries to keep it shorter by not allowing .
# (nm sometimes outputs path/./tmp/morepath)
match($0, "([^ \t.]+)(\\.(h|c|cc|tcc|cpp|hpp|S|rl|ipp|y))?:[0-9]+$", arr);
src=arr[1]
# unknown extensions...
if (length(src) < 5) print src, " XXXX ", $0;
# get rid of irrelevant stuff
sub("/tmp/", "/", src)
# somehow lots of ....fbcode...////////morestuf
sub(".*///", "", src)
# x/ or y/ ...
sub("^./", "", src)
# 20/....
sub("^[0-9/]+", "", src)
sizes[src] += size;
match(src, "(.*)/[^/]*$", pkg);
sizes[pkg[1] "/*"]+= size;
}
}
END {
print "Extracted file attribution for", ssum/1024/1024., "Mb out of",
sum/1024/1024., "Mb :", int((1000*ssum/sum+.5))/10, "%"
n = asorti(sizes, siter)
for (s in sizes) {
print s, sizes[s]
}
}' $RESULT | sort -k 2 -n
rm $TMPRESULT
|
ldemailly/wdt
|
build/binary_analyzer.sh
|
Shell
|
bsd-3-clause
| 1,497 |
#!/bin/sh
[ -n "$status_db_type" ] && db_type=$status_db_type
if [ "$db_type" = "mysql" ]; then
ENGINE="ENGINE=MyISAM DEFAULT CHARSET=utf8"
else
ENGINE=
fi
cat <<EOF
CREATE TABLE IF NOT EXISTS status_new (
name VARCHAR(64) NOT NULL,
operation VARCHAR(64) NOT NULL,
progress INTEGER DEFAULT 0,
code INTEGER DEFAULT 0,
resource_uri VARCHAR(128) NOT NULL,
parent_uri VARCHAR(128) DEFAULT "",
ctime INTEGER DEFAULT 0,
mtime INTEGER DEFAULT 0,
ytime INTEGER,
state VARCHAR(20) NOT NULL,
failures INTEGER DEFAULT 0,
PRIMARY KEY(name),
UNIQUE(name)
) $ENGINE;
INSERT INTO status_new SELECT name, operation, progress, code, resource_uri, parent_uri, ctime, mtime, ytime, state, failures FROM status;
DROP TABLE status_event;
ALTER TABLE status_new RENAME TO status;
EOF
|
yahoo/gearbox
|
workers/gearbox/sql/status-migration/6.undo.sql.sh
|
Shell
|
bsd-3-clause
| 830 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_truecolor16.pam MIFF
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_MIFF_truecolor16.sh
|
Shell
|
gpl-2.0
| 366 |
#!/bin/sh
#
# Copyright (C) 2015, 2016 Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
fail=0
if $PERL -e 'use File::Fetch;' 2>/dev/null
then
:
else
echo "I:This test requires the File::Fetch library." >&2
fail=1
fi
exit $fail
|
pecharmin/bind9
|
bin/tests/system/statschannel/prereq.sh
|
Shell
|
mpl-2.0
| 442 |
#!/bin/sh
PATH="/sbin:/bin:/usr/sbin:/usr/bin:/usr/lib:"
# We don't need to duplicate any of the env setup from rcS, since we will only ever run this to *restart* nickel, and not bootstrap it.
# Meaning we've already got most of the necessary env from nickel itself via both our launcher (fmon/KFMon) and our own startup script.
# NOTE: LD_LIBRARY_PATH is the only late export from rcS we don't siphon in koreader.sh, for obvious reasons ;).
export LD_LIBRARY_PATH="/usr/local/Kobo"
# Ditto, 4.28+
export QT_GSTREAMER_PLAYBIN_AUDIOSINK=alsasink
export QT_GSTREAMER_PLAYBIN_AUDIOSINK_DEVICE_PARAMETER=bluealsa:DEV=00:00:00:00:00:00
# Reset PWD, and clear up our own custom stuff from the env while we're there, otherwise, USBMS may become very wonky on newer FW...
# shellcheck disable=SC2164
cd /
unset OLDPWD
unset LC_ALL TESSDATA_PREFIX STARDICT_DATA_DIR EXT_FONT_DIR
unset KO_DONT_GRAB_INPUT
unset FBINK_FORCE_ROTA
# Ensures fmon will restart. Note that we don't have to worry about reaping this, nickel kills on-animator.sh on start.
(
if [ "${PLATFORM}" = "freescale" ] || [ "${PLATFORM}" = "mx50-ntx" ] || [ "${PLATFORM}" = "mx6sl-ntx" ]; then
usleep 400000
fi
/etc/init.d/on-animator.sh
) &
# Make sure we kill the Wi-Fi first, because nickel apparently doesn't like it if it's up... (cf. #1520)
if grep -q "^${WIFI_MODULE}" "/proc/modules"; then
killall -q -TERM restore-wifi-async.sh enable-wifi.sh obtain-ip.sh
cp -a "/etc/resolv.conf" "/tmp/resolv.ko"
old_hash="$(md5sum "/etc/resolv.conf" | cut -f1 -d' ')"
if [ -x "/sbin/dhcpcd" ]; then
env -u LD_LIBRARY_PATH dhcpcd -d -k "${INTERFACE}"
killall -q -TERM udhcpc default.script
else
killall -q -TERM udhcpc default.script dhcpcd
fi
# NOTE: dhcpcd -k waits for the signalled process to die, but busybox's killall doesn't have a -w, --wait flag,
# so we have to wait for udhcpc to die ourselves...
# NOTE: But if all is well, there *isn't* any udhcpc process or script left to begin with...
kill_timeout=0
while pkill -0 udhcpc; do
# Stop waiting after 5s
if [ ${kill_timeout} -ge 20 ]; then
break
fi
usleep 250000
kill_timeout=$((kill_timeout + 1))
done
new_hash="$(md5sum "/etc/resolv.conf" | cut -f1 -d' ')"
# Restore our network-specific resolv.conf if the DHCP client wiped it when releasing the lease...
if [ "${new_hash}" != "${old_hash}" ]; then
mv -f "/tmp/resolv.ko" "/etc/resolv.conf"
else
rm -f "/tmp/resolv.ko"
fi
wpa_cli terminate
[ "${WIFI_MODULE}" = "dhd" ] && wlarm_le -i "${INTERFACE}" down
ifconfig "${INTERFACE}" down
# NOTE: Kobo's busybox build is weird. rmmod appears to be modprobe in disguise, defaulting to the -r flag...
# But since there's currently no modules.dep file being shipped, nor do they include the depmod applet,
# go with what the FW is doing, which is rmmod.
# c.f., #2394?
usleep 250000
rmmod "${WIFI_MODULE}"
if grep -q "^sdio_wifi_pwr" "/proc/modules"; then
if [ -n "${CPUFREQ_DVFS}" ]; then
echo "0" >"/sys/devices/platform/mxc_dvfs_core.0/enable"
# Leave Nickel in its usual state, don't try to use conservative
echo "userspace" >"/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"
cat "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq" >"/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed"
fi
usleep 250000
rmmod sdio_wifi_pwr
fi
# Poke the kernel via ioctl on platforms without the dedicated power module...
if [ ! -e "/drivers/${PLATFORM}/wifi/sdio_wifi_pwr.ko" ]; then
usleep 250000
"${KOREADER_DIR}"/luajit "${KOREADER_DIR}"/frontend/device/kobo/ntx_io.lua 208 0
fi
fi
unset KOREADER_DIR
unset CPUFREQ_DVFS CPUFREQ_CONSERVATIVE
# Recreate Nickel's FIFO ourselves, like rcS does, because udev *will* write to it!
# Plus, we actually *do* want the stuff udev writes in there to be processed by Nickel, anyway.
rm -f "/tmp/nickel-hardware-status"
mkfifo "/tmp/nickel-hardware-status"
# Flush buffers to disk, who knows.
sync
# Handle the sdcard:
# We need to unmount it ourselves, or Nickel wigs out and shows an "unrecognized FS" popup until the next fake sd add event.
# The following udev trigger should then ensure there's a single sd add event enqueued in the FIFO for it to process,
# ensuring it gets sanely detected and remounted RO.
if [ -e "/dev/mmcblk1p1" ]; then
umount /mnt/sd
fi
# And finally, simply restart nickel.
# We don't care about horribly legacy stuff, because if people switch between nickel and KOReader in the first place, I assume they're using a decently recent enough FW version.
# Last tested on an H2O & a Forma running FW 4.7.x - 4.25.x
/usr/local/Kobo/hindenburg &
LIBC_FATAL_STDERR_=1 /usr/local/Kobo/nickel -platform kobo -skipFontLoad &
[ "${PLATFORM}" != "freescale" ] && udevadm trigger &
return 0
|
poire-z/koreader
|
platform/kobo/nickel.sh
|
Shell
|
agpl-3.0
| 5,023 |
#!/usr/bin/env bash
# Copyright 2009 Red Hat Inc., Durham, North Carolina.
# All Rights Reserved.
#
# OpenScap CVSS Module Test Suite.
#
# Authors:
# Maros Barabas <[email protected]>
# Ondrej Moris <[email protected]
. $builddir/tests/test_common.sh
# Test cases.
# check vector against expected value
function test_api_cvss_vector {
./test_api_cvss --base $A $B $C $D $E $F >/dev/null
local ret vector value v
ret=0
while read vector value; do
v=`./test_api_cvss $vector`
[ "$value" != "$v" ] && ret=1
echo "$vector --> $v ($value)"
done <vectors.txt
return $ret
}
# Testing.
test_init
if [ -z ${CUSTOM_OSCAP+x} ] ; then
test_run "test_api_cvss_vector" test_api_cvss_vector
fi
test_exit
|
mpreisler/openscap
|
tests/API/CVSS/test_api_cvss.sh
|
Shell
|
lgpl-2.1
| 768 |
#!/bin/bash -x
# Make sure we are executing in this script's directory
cd "$( cd "$( dirname "$0" )" && pwd )"
rm -R Results
mkdir Results
DIR="$( pwd )"
if [ $# -lt 6 ]
then
# $0 $1 $2 $3 $4 $5 $6 $7 (optional)
echo Usage: $0 \<Application URL\> \<Application Key\> \<device\> \<zumotestuser password\> \<Blob URL\> \<Blob Token\> \<iOSsdkZip\>
echo Where
echo \<Application URL\> is the URL of the Mobile Service
echo \<Application key\> is the app key for that service
echo \<device\> is one of the following:
echo - iPad2Sim - iPadSimAir - iPadSimAir2
echo - iPadSimPro - iPadSimRetina - iPhoneSim4s
echo - iPhoneSim5 - iPhoneSim5s - iPhoneSim6
echo - iPhoneSim6Plus - iPhoneSim6s - iPhoneSim6sWatch
echo - iPhone6sPlus - iPhone6sPlusWatch
echo \<loginPassword\> - the password to use for log in operations \(for zumotestuser account\)
echo \<iOSsdkZip\> is the zip file location of the framework to test against \(optional\)
exit 1
fi
echo "$3"
export DEVICE_ARG=
export APP_NAME=
export DEVICE_CMD_ARG=$3
echo Device: $DEVICE_CMD_ARG
# Build current app to test with
pushd ZumoE2ETestApp
if [ $7 ]
then
# Copy specified framework
cp -f $7 sdk.zip
else
# Copy in current version of the framework
curl --location --output sdk.zip --silent http://aka.ms/gc6fex
fi
unzip -o sdk.zip
xcodebuild -sdk iphonesimulator9.1 || exit 1
popd
if [ "$DEVICE_CMD_ARG" == "iPad2Sim" ]; then
echo Using iPad 2 Simulator
export DEVICE_ARG=iPad\ 2\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPadSimAir" ]; then
echo Using iPad Air Simulator
export DEVICE_ARG=iPad\ Air\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPadSimAir2" ]; then
echo Using iPad Air 2 Simulator
export DEVICE_ARG=iPad\ Air\ 2\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPadSimPro" ]; then
echo Using iPad Pro Simulator
export DEVICE_ARG=iPad\ Pro\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPadSimRetina" ]; then
echo Using iPad Retina Simulator
export DEVICE_ARG=iPad\ Retina\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim4s" ]; then
echo Using iPhone 4s Simulator
export DEVICE_ARG=iPhone\ 4s\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim5" ]; then
echo Using iPhone 5 Simulator
export DEVICE_ARG=iPhone\ 5\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim5s" ]; then
echo Using iPhone 5s Simulator
export DEVICE_ARG=iPhone\ 5s\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6" ]; then
echo Using iPhone 6 Simulator
export DEVICE_ARG=iPhone\ 6\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6Plus" ]; then
echo Using iPhone 6 Plus Simulator
export DEVICE_ARG=iPhone\ 6\ Plus\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6s" ]; then
echo Using iPhone 6s Simulator
export DEVICE_ARG=iPhone\ 6s\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6sWatch" ]; then
echo Using iPhone 6s Simulator + Apple Watch
export DEVICE_ARG=iPhone\ 6s\ \(9.1\)\ +\ Apple\ Watch\ -\ 38mm\ \(2.0\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6sPlus" ]; then
echo Using iPhone 6s Plus Simulator
export DEVICE_ARG=iPhone\ 6s\ \(9.1\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$DEVICE_CMD_ARG" == "iPhoneSim6sPlusWatch" ]; then
echo Using iPhone 6s Plus Simulator + Apple Watch
export DEVICE_ARG=iPhone\ 6s\ Plus\ \(9.1\)\ +\ Apple\ Watch\ -\ 42mm\ \(2.0\)
APP_NAME=$DIR/ZumoE2ETestApp/build/Release-iphonesimulator/ZumoE2ETestApp.app
fi
if [ "$APP_NAME" == "" ]
then
echo Unsupported device: "$3"
exit 1
fi
echo DEVICE_ARG: $DEVICE_ARG
echo APP_NAME: $APP_NAME
EscapedToken=${6//&/\\&}
sed -e "s|--APPLICATION_URL--|$1|g" ZumoAutomationTemplate.js > ZumoAutomationWithData.js
sed -e "s|--APPLICATION_KEY--|$2|g" -i "" ZumoAutomationWithData.js
sed -e "s|--BLOB_URL--|$5|g" -i "" ZumoAutomationWithData.js
sed -e "s|--BLOB_TOKEN--|$EscapedToken|g" -i "" ZumoAutomationWithData.js
sed -e "s|--AUTH_PASSWORD--|$4|g" -i "" ZumoAutomationWithData.js
echo Replaced data on template - now running instruments
echo Args: DEVICE_ARG = $DEVICE_ARG
echo APP_NAME = $APP_NAME
export INSTRUMENT_TEMPLATE=/Applications/Xcode.app/Contents/Applications/Instruments.app/Contents/PlugIns/AutomationInstrument.xrplugin/Contents/Resources/Automation.tracetemplate
echo Running instruments...
instruments -w "$DEVICE_ARG" -t "$INSTRUMENT_TEMPLATE" "$APP_NAME" -e UIASCRIPT "ZumoAutomationWithData.js" -e UIARESULTSPATH "Results" || exit 1
exit 0
|
Azure/azure-mobile-services-test
|
sdk/iOS/RunInstruments.sh
|
Shell
|
apache-2.0
| 5,595 |
#!/bin/bash
#/ trigger local ci test run
. rd_versions.sh
set -euo pipefail
IFS=$'\n\t'
readonly ARGS=("$@")
DOCKER_DIR=$PWD/test/docker
usage() {
grep '^#/' <"$0" | cut -c4- # prints the #/ lines above as usage info
}
die(){
echo >&2 "$@" ; exit 2
}
check_args(){
if [ ${#ARGS[@]} -gt 0 ] ; then
DOCKER_DIR=$1
fi
}
copy_jar(){
local FARGS=("$@")
local DIR=${FARGS[0]}
local -a VERS=( $( rd_get_version ) )
local JAR=rundeck-launcher-${VERS[0]}-${VERS[2]}.jar
local buildJar=$PWD/rundeck-launcher/launcher/build/libs/$JAR
test -f $buildJar || die "Jar file not found $buildJar"
mkdir -p $DIR
cp $buildJar $DIR/rundeck-launcher.jar
echo $DIR/$JAR
}
run_tests(){
local FARGS=("$@")
local DIR=${FARGS[0]}
cd $DIR
bash $DIR/test-api.sh
}
run_docker_test(){
local FARGS=("$@")
local DIR=${FARGS[0]}
local launcherJar=$( copy_jar $DIR ) || die "Failed to copy jar"
run_tests $DIR
}
main() {
check_args
run_docker_test $DOCKER_DIR
}
main
|
damageboy/rundeck
|
run-docker-api-tests.sh
|
Shell
|
apache-2.0
| 984 |
#!/usr/bin/env bash
# The master for this script exists in the Python '2.7' directory. Do
# not edit the version of this script found in other directories. When
# the version of the script in the Python '2.7' directory is modified,
# it must then be be copied into other directories. This is necessary as
# Docker when building an image cannot copy in a file from outside of
# the directory where the Dockerfile resides.
# This script will execute the command passed as arguments.
# Setup the environment if not already done.
if [ x"$WHISKEY_PHASE" = x"" ]; then
. `which mod_wsgi-docker-environ`
fi
# Finally set an environment variable as marker to indicate that the
# environment has been set up.
WHISKEY_PHASE=entrypoint
export WHISKEY_PHASE
# Now execute the command passed as arguments. If running as process ID
# 1, we want to do that as a sub process to the 'tini' process, which
# will perform reaping of zombie processes for us.
if [ $$ = 1 ]; then
TINI="tini --"
fi
exec $TINI "$@"
|
GrahamDumpleton/mod_wsgi-docker
|
3.5/entrypoint.sh
|
Shell
|
apache-2.0
| 1,010 |
#!/bin/bash
# removing un-needed packages and apt cache to reduce disk space consumed
apt-get -y autoremove
apt-get -y clean
apt-get purge linux-headers-$(uname -r) build-essential zlib1g-dev libssl-dev libreadline-gplv2-dev
echo "Cleaning up mongodb"
echo "db.dropDatabase()" | mongo pxe
echo "Cleaning up rackhd log"
sudo pm2 flush
echo "Cleaning up apt cache"
rm -f /etc/apt/apt.conf.d/90aptcache
# Removing leftover leases and persistent rules
echo "cleaning up dhcp leases"
rm /var/lib/dhcp/*
# Make sure Udev doesn't block our network
echo "cleaning up udev rules"
rm -f /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm -f /lib/udev/rules.d/75-persistent-net-generator.rules
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
# Modify the /etc/hosts to align with hostname setting in RackHD/packer/scripts/dep.sh
# by default, the hostname in both /etc/hosts & /etc/hostname are obtained from DHCP server during install.
# in RackHD/packer/scripts/dep.sh, it's modified , but /etc/hosts(127.0.1.1) never get changed.
NEW_HOST_NAME=$(cat /etc/hostname)
sed -i "/127.0.1.1/,/$/c127.0.1.1\t${NEW_HOST_NAME}" /etc/hosts
|
tldavies/RackHD
|
packer/scripts/cleanup.sh
|
Shell
|
apache-2.0
| 1,271 |
#!/bin/bash
unset cloudpv
unset cloudsource
unset nodenumber
unset want_sles12sp1
export cloudpv=/dev/loop0
export cloudsource=develcloud6
export nodenumber='2'
export want_sles12sp1=2
exec /path/to/mkcloud "$@"
|
gosipyan/automation
|
docs/basic-mkcloud-config.sh
|
Shell
|
apache-2.0
| 214 |
#! /bin/bash
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
#
# download test data from:
#
# https://s3.amazonaws.com/public.ged.msu.edu/khmer/iowa-corn-50m.fa.gz
#
KHMER_PATH=$1
export PYTHONPATH=$KHMER_PATH/python
SCRIPTPATH=$KHMER_PATH/scripts
# the next command will create a '50m.ht' and a '50m.tagset',
# representing the de Bruijn graph
${SCRIPTPATH}/load-graph.py -k 32 -N 4 -x 12e9 50m iowa-corn-50m.fa.gz
# this will then partition that graph. should take a while.
# update threads to something higher if you have more cores.
# this creates a bunch of files, 50m.subset.*.pmap
${SCRIPTPATH}/partition-graph.py --threads 4 -s 1e5 50m
# now, merge the pmap files into one big pmap file, 50m.pmap.merged
${SCRIPTPATH}/merge-partitions.py 50m
# next, annotate the original sequences with their partition numbers.
# this will create iowa-corn-50m.fa.gz.part
${SCRIPTPATH}/annotate-partitions.py 50m iowa-corn-50m.fa.gz
# now, extract the partitions in groups into 'iowa-corn-50m.groupNNNN.fa'
${SCRIPTPATH}/extract-partitions.py iowa-corn-50m iowa-corn-50m.fa.gz.part
# at this point, you can assemble the group files individually. Note,
# however, that the last one them is quite big? this is because it's
# the lump! yay!
# if you want to break up the lump, go through the partitioning bit
# on the group file, but this time with a twist:
mv iowa-corn-50m.group0007.fa corn-50m.lump.fa
# create graph,
${SCRIPTPATH}/load-graph.py -x 8e9 lump corn-50m.lump.fa
# create an initial set of stoptags to help in knot-traversal; otherwise,
# partitioning and knot-traversal (which is systematic) is really expensive.
${SCRIPTPATH}/make-initial-stoptags.py lump
# now partition the graph, using the stoptags file
${SCRIPTPATH}/partition-graph.py --stoptags lump.stoptags lump
# use the partitioned subsets to find the k-mers that nucleate the lump
${SCRIPTPATH}/find-knots.py -x 2e8 -N 4 lump
# remove those k-mers from the fasta files
${SCRIPTPATH}/filter-stoptags.py *.stoptags corn-50m.lump.fa
# now, reload the filtered data set in and partition again.
${SCRIPTPATH}/load-graph.py -x 8e9 lumpfilt corn-50m.lump.fa.stopfilt
${SCRIPTPATH}/partition-graph.py -T 4 lumpfilt
${SCRIPTPATH}/merge-partitions.py lumpfilt
${SCRIPTPATH}/annotate-partitions.py lumpfilt corn-50m.lump.fa.stopfilt
${SCRIPTPATH}/extract-partitions.py corn-50m-lump corn-50m.lump.fa.stopfilt.part
# and voila, after all that, you should now have your de-knotted lump in
# corn-50m-lump.group*.fa. The *.group????.fa files can now be
# assembled individually by your favorite assembler.
|
F1000Research/khmer
|
doc/run-corn-50m.sh
|
Shell
|
bsd-3-clause
| 2,777 |
#!/bin/sh
MD5TOOL=../file_md5
TEMP_DIR=/tmp/rdd9test_temp$$
mkdir -p ${TEMP_DIR}
BASE_COMMAND="../../apps/raw2bmx/raw2bmx --regtest -t rdd9 -o ${TEMP_DIR}/rdd9test.mxf -y 10:11:12:13 --clip test --part 12 "
if [ "$5" != "" ]; then
BASE_COMMAND="$BASE_COMMAND -f $5 "
fi
# create essence data
../create_test_essence -t 1 -d $2 ${TEMP_DIR}/pcm.raw
../create_test_essence -t $3 -d $2 ${TEMP_DIR}/test_in.raw
# write and calculate md5sum
if $BASE_COMMAND -a 16:9 --$4 ${TEMP_DIR}/test_in.raw -q 16 --locked true --pcm ${TEMP_DIR}/pcm.raw -q 16 --locked true --pcm ${TEMP_DIR}/pcm.raw >/dev/null
then
$MD5TOOL < ${TEMP_DIR}/rdd9test.mxf > $1/$4$5.md5
RESULT=0
else
RESULT=1
fi
# clean-up
rm -Rf ${TEMP_DIR}
exit $RESULT
|
xunchangguo/BMX
|
test/rdd9_mxf/create.sh
|
Shell
|
bsd-3-clause
| 734 |
#!/bin/bash
curl http://www.usb.org/developers/tools/comp_dump > ../Resources/usb-vendors.txt
|
ajostergaard/ControlPlane
|
Utilities/update-usb-data.sh
|
Shell
|
bsd-3-clause
| 95 |
#! /bin/bash
set -u
function parseGlide() {
cat $1 | grep -A1 $2 | grep -v $2 | awk '{print $2}'
}
# fetch and checkout vendored dep
glide=$1
lib=$2
echo "----------------------------------"
echo "Getting $lib ..."
go get -t github.com/tendermint/$lib/...
VENDORED=$(parseGlide $glide $lib)
cd $GOPATH/src/github.com/tendermint/$lib
MASTER=$(git rev-parse origin/master)
if [[ "$VENDORED" != "$MASTER" ]]; then
echo "... VENDORED != MASTER ($VENDORED != $MASTER)"
echo "... Checking out commit $VENDORED"
git checkout $VENDORED &> /dev/null
fi
|
wangluinc/ethermint
|
vendor/github.com/tendermint/tendermint/scripts/glide/checkout.sh
|
Shell
|
gpl-3.0
| 557 |
#!/bin/sh -ex
#EMBLEM_FILE="emblems.tar.gz"
NAUTILUS_EXTENSION="nautilus_panbox.py"
# copy python extension
if [ -f "$NAUTILUS_EXTENSION" ]
then
mkdir -p ~/.local/share/nautilus-python/extensions
mkdir -p ~/.local/share/nautilus-python/extensions/img && echo "[+] created python extension dirs"
cp nautilus_panbox.py ~/.local/share/nautilus-python/extensions/nautilus_panbox.py && echo "[+] copied python extension"
cp img/panbox-icon_22.png ~/.local/share/nautilus-python/extensions/img/panbox-icon_22.png && echo "[+] copied panbox icon"
cp -r locale/ ~/.local/share/nautilus-python/extensions/ && echo "[+] locale files copied"
else
echo "$NAUTILUS_EXTENSION missing"
fi
## extract symbols
#if [ -f "$EMBLEM_FILE" ]
#then
# mkdir -p ~/.icons && echo "[+] created icon dir"
# tar -xzf emblems.tar.gz -C ~/.icons/ && echo "[+] extracted emblem files"
#else
# echo "$EMBLEM_FILE missing"
#fi
|
tidatida/PanBox
|
panbox-linux/src/org/panbox/desktop/linux/nautilus/install.sh
|
Shell
|
gpl-3.0
| 905 |
#!/bin/sh
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Starts up an impalad or a mini-impala-cluster with the specified command line
# arguments. An optional -build_type parameter can be passed to determine the build
# type to use for the impalad instance.
set -e
set -u
BUILD_TYPE=debug
IMPALAD_ARGS=""
BINARY_BASE_DIR=${IMPALA_HOME}/be/build
GDB_PREFIX=""
IN_PROCESS_BINARY=testutil/mini-impala-cluster
IMPALAD_BINARY=service/impalad
BINARY=${IMPALAD_BINARY}
JVM_DEBUG_PORT=""
JVM_SUSPEND="n"
JVM_ARGS=""
for ARG in $*
do
case "$ARG" in
-build_type=debug)
BUILD_TYPE=debug
;;
-build_type=release)
BUILD_TYPE=release
;;
-build_type=*)
echo "Invalid build type. Valid values are: debug, release"
exit 1
;;
-in-process)
BINARY=${IN_PROCESS_BINARY}
;;
-gdb)
echo "Starting Impala under gdb..."
GDB_PREFIX="gdb --args"
;;
-jvm_debug_port=*)
JVM_DEBUG_PORT="${ARG#*=}"
;;
-jvm_suspend)
JVM_SUSPEND="y"
;;
-jvm_args=*)
JVM_ARGS="${ARG#*=}"
;;
# Pass all other options as an Impalad argument
*)
IMPALAD_ARGS="${IMPALAD_ARGS} ${ARG}"
esac
done
IMPALA_CMD=${BINARY_BASE_DIR}/${BUILD_TYPE}/${BINARY}
# Temporarily disable unbound variable checking in case JAVA_TOOL_OPTIONS is not set.
set +u
# Optionally enable Java debugging.
if [ -n "$JVM_DEBUG_PORT" ]; then
export JAVA_TOOL_OPTIONS="-agentlib:jdwp=transport=dt_socket,address=localhost:${JVM_DEBUG_PORT},server=y,suspend=${JVM_SUSPEND} ${JAVA_TOOL_OPTIONS}"
fi
# Optionally add additional JVM args.
if [ -n "$JVM_ARGS" ]; then
export JAVA_TOOL_OPTIONS="${JAVA_TOOL_OPTIONS} ${JVM_ARGS}"
fi
set -u
. ${IMPALA_HOME}/bin/set-classpath.sh
exec ${GDB_PREFIX} ${IMPALA_CMD} ${IMPALAD_ARGS}
|
gistic/PublicSpatialImpala
|
bin/start-impalad.sh
|
Shell
|
apache-2.0
| 2,330 |
[ "$FREETZ_PATCH_ALARMCLOCK" == "y" ] || return 0
echo1 "adding alarm-clock"
modpatch "$FILESYSTEM_MOD_DIR" "${PATCHES_COND_DIR}/${FREETZ_TYPE_LANGUAGE}/alarmclock_${FREETZ_TYPE_PREFIX}.patch"
|
bs-github/freetz
|
patches/scripts/480-add_3rd_alarmclock.sh
|
Shell
|
gpl-2.0
| 194 |
# Copyright: 2017 Masatake YAMATO
# License: GPL-2
CTAGS="$1 --quiet --options=NONE --fields=+lK"
list_kinds()
{
echo '#'
echo '#' list kinds$2 $1
echo '#'
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--list-kinds$2=$1
}
list_kinds C
list_kinds Event
list_kinds Hook
list_kinds Plugin
list_kinds C -full
list_kinds Event -full
list_kinds Hook -full
list_kinds Plugin -full
echo C only
${CTAGS} -o - input.c
echo
echo C + EVENT
${CTAGS} -o - \
--options=./event.ctags \
input.c
echo
echo C + EVENT + HOOK
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
input.c
echo
echo C + EVENT + HOOK + PLUGIN
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
input.c
echo
echo C + EVENT + HOOK + PLUGIN + UA
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
input.c
echo
echo 'C(disabled)' + EVENT + HOOK + PLUGIN + UA
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--languages=-C \
input.c
echo
echo C + 'EVENT(disabled)' + HOOK + PLUGIN + UA
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--languages=-Event \
input.c
echo
echo C + 'EVENT' + 'HOOK(disabled)' + PLUGIN + UA
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--languages=-Hook \
input.c
echo
echo C + 'EVENT' + 'HOOK' + PLUGIN + 'UA(-v)'
${CTAGS} -o - \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--kinds-UnusedAttr=-v \
input.c
echo List subparsers of C '(' 'EVENT' + 'HOOK' + PLUGIN + 'UA' ')'
${CTAGS} \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--list-subparsers=C
echo List subparsers of C '(' 'EVENT' + 'HOOK' + PLUGIN + 'UA' ')' without the header
${CTAGS} --with-list-header=no \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--list-subparsers=C
echo List subparsers of C '(' 'EVENT' + 'HOOK' + PLUGIN + 'UA' ')' in machinable
${CTAGS} \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--machinable \
--list-subparsers=C
echo List subparsers of C '(' 'EVENT' + 'HOOK' + PLUGIN + 'UA' ')' in machinable without the header
${CTAGS} --with-list-header=no \
--options=./event.ctags \
--options=./hook.ctags \
--options=./plugin.ctags \
--options=./unused-attr.ctags \
--machinable \
--list-subparsers=C
|
masatake/ctags
|
Tmain/nested-subparsers.d/run.sh
|
Shell
|
gpl-2.0
| 2,972 |
#!/bin/sh
# Demonstrate bug when using -d with an existing destination file
# that is a symlink.
# Copyright (C) 2000-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
touch f slink-target || framework_failure_
ln -s slink-target slink || framework_failure_
cp -d f slink || fail=1
Exit $fail
|
ZheJiuShiMing/coreutils
|
tests/cp/deref-slink.sh
|
Shell
|
gpl-3.0
| 982 |
#!/bin/sh
# copy a sparse file to a pipe, to exercise some seldom-used parts of copy.c
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
require_sparse_support_
mkfifo_or_skip_ pipe
timeout 10 cat pipe > copy &
truncate -s1M sparse || framework_failure_
cp sparse pipe || fail=1
# Ensure that the cat has completed before comparing.
wait
cmp sparse copy || fail=1
Exit $fail
|
mmayer/coreutils
|
tests/cp/sparse-to-pipe.sh
|
Shell
|
gpl-3.0
| 1,088 |
#!/usr/bin/env bash
#
# Copyright (c) 2015 Jeromy Johnson
# MIT Licensed; see the LICENSE file in this repository.
#
test_description="test bitswap commands"
. lib/test-lib.sh
test_init_ipfs
test_launch_ipfs_daemon
test_expect_success "'ipfs bitswap stat' succeeds" '
ipfs bitswap stat >stat_out
'
test_expect_success "'ipfs bitswap stat' output looks good" '
cat <<EOF | unexpand -t2 >expected &&
bitswap status
provides buffer: 0 / 256
blocks received: 0
blocks sent: 0
data received: 0
data sent: 0
dup blocks received: 0
dup data received: 0 B
wantlist [0 keys]
partners [0]
EOF
test_cmp expected stat_out
'
test_expect_success "ipfs peer id looks good" '
PEERID=$(ipfs config Identity.PeerID) &&
test_check_peerid "$PEERID"
'
test_expect_success "'ipfs bitswap wantlist -p' works" '
ipfs bitswap wantlist -p "$PEERID" >wantlist_p_out
'
test_expect_success "'ipfs bitswap wantlist -p' output looks good" '
test_must_be_empty wantlist_p_out
'
test_expect_success "hash was removed from wantlist" '
ipfs bitswap wantlist > wantlist_out &&
test_must_be_empty wantlist_out
'
test_expect_success "'ipfs bitswap stat' succeeds" '
ipfs bitswap stat >stat_out
'
test_expect_success "'ipfs bitswap stat' output looks good" '
cat <<EOF | unexpand -t2 >expected &&
bitswap status
provides buffer: 0 / 256
blocks received: 0
blocks sent: 0
data received: 0
data sent: 0
dup blocks received: 0
dup data received: 0 B
wantlist [0 keys]
partners [0]
EOF
test_cmp expected stat_out
'
test_expect_success "'ipfs bitswap wantlist -p' works" '
ipfs bitswap wantlist -p "$PEERID" >wantlist_p_out
'
test_expect_success "'ipfs bitswap wantlist -p' output looks good" '
test_cmp wantlist_out wantlist_p_out
'
test_kill_ipfs_daemon
test_done
|
disorganizer/brig
|
vendor/github.com/ipfs/go-ipfs/test/sharness/t0220-bitswap.sh
|
Shell
|
agpl-3.0
| 1,801 |
#!/bin/sh
PROJECT=libcork
COMMIT="$1"
if [ -z "$COMMIT" ]; then
COMMIT="HEAD"
fi
VERSION=$(git describe ${COMMIT})
git archive --prefix=${PROJECT}-${VERSION}/ --format=tar ${COMMIT} | \
bzip2 -c > ${PROJECT}-${VERSION}.tar.bz2
|
Jigsaw-Code/outline-client
|
third_party/shadowsocks-libev/libcork/make-dist.sh
|
Shell
|
apache-2.0
| 238 |
#! /bin/sh
#
# Copyright (c) 1999, 2006 Tanuki Software Inc.
#
# Java Service Wrapper sh script. Suitable for starting and stopping
# wrapped Java applications on UNIX platforms.
#
#-----------------------------------------------------------------------------
# These settings can be modified to fit the needs of your application
# Application
APP_NAME="Synapse"
APP_LONG_NAME="Apache Synapse (Synapse)"
# Wrapper
WRAPPER_CMD="./bin/native/wrapper"
WRAPPER_CONF="./repository/conf/wrapper.conf"
# Priority at which to run the wrapper. See "man nice" for valid priorities.
# nice is only used if a priority is specified.
PRIORITY=
# Location of the pid file.
PIDDIR="."
# If uncommented, causes the Wrapper to be shutdown using an anchor file.
# When launched with the 'start' command, it will also ignore all INT and
# TERM signals.
#IGNORE_SIGNALS=true
# If specified, the Wrapper will be run as the specified user.
# IMPORTANT - Make sure that the user has the required privileges to write
# the PID file and wrapper.log files. Failure to be able to write the log
# file will cause the Wrapper to exit without any way to write out an error
# message.
# NOTE - This will set the user which is used to run the Wrapper as well as
# the JVM and is not useful in situations where a privileged resource or
# port needs to be allocated prior to the user being changed.
#RUN_AS_USER=
# The following two lines are used by the chkconfig command. Change as is
# appropriate for your application. They should remain commented.
# chkconfig: 2345 20 80
# description: @app.long.name@
# Do not modify anything beyond this point
#-----------------------------------------------------------------------------
# Get the fully qualified path to the script
case $0 in
/*)
SCRIPT="$0"
;;
*)
PWD=`pwd`
SCRIPT="$PWD/$0"
;;
esac
# Resolve the true real path without any sym links.
CHANGED=true
while [ "X$CHANGED" != "X" ]
do
# Change spaces to ":" so the tokens can be parsed.
SAFESCRIPT=`echo $SCRIPT | sed -e 's; ;:;g'`
# Get the real path to this script, resolving any symbolic links
TOKENS=`echo $SAFESCRIPT | sed -e 's;/; ;g'`
REALPATH=
for C in $TOKENS; do
# Change any ":" in the token back to a space.
C=`echo $C | sed -e 's;:; ;g'`
REALPATH="$REALPATH/$C"
# If REALPATH is a sym link, resolve it. Loop for nested links.
while [ -h "$REALPATH" ] ; do
LS="`ls -ld "$REALPATH"`"
LINK="`expr "$LS" : '.*-> \(.*\)$'`"
if expr "$LINK" : '/.*' > /dev/null; then
# LINK is absolute.
REALPATH="$LINK"
else
# LINK is relative.
REALPATH="`dirname "$REALPATH"`""/$LINK"
fi
done
done
if [ "$REALPATH" = "$SCRIPT" ]
then
CHANGED=""
else
SCRIPT="$REALPATH"
fi
done
# Change the current directory to the location of the script
PRGDIR=`dirname "$REALPATH"`
REALDIR=`cd "$PRGDIR/..";pwd`
# If the PIDDIR is relative, set its value relative to the full REALPATH to avoid problems if
# the working directory is later changed.
FIRST_CHAR=`echo $PIDDIR | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
PIDDIR=$REALDIR/$PIDDIR
fi
# Same test for WRAPPER_CMD
FIRST_CHAR=`echo $WRAPPER_CMD | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
WRAPPER_CMD=$REALDIR/$WRAPPER_CMD
fi
# Same test for WRAPPER_CONF
FIRST_CHAR=`echo $WRAPPER_CONF | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
WRAPPER_CONF=$REALDIR/$WRAPPER_CONF
fi
# Process ID
ANCHORFILE="$PIDDIR/$APP_NAME.anchor"
PIDFILE="$PIDDIR/$APP_NAME.pid"
LOCKDIR="/var/lock/subsys"
LOCKFILE="$LOCKDIR/$APP_NAME"
pid=""
# Resolve the location of the 'ps' command
PSEXE="/usr/bin/ps"
if [ ! -x "$PSEXE" ]
then
PSEXE="/bin/ps"
if [ ! -x "$PSEXE" ]
then
echo "Unable to locate 'ps'."
echo "Please report this message along with the location of the command on your system."
exit 1
fi
fi
# Resolve the os
DIST_OS=`uname -s | tr [:upper:] [:lower:] | tr -d [:blank:]`
case "$DIST_OS" in
'sunos')
DIST_OS="solaris"
;;
'hp-ux' | 'hp-ux64')
DIST_OS="hpux"
;;
'darwin')
DIST_OS="macosx"
;;
'unix_sv')
DIST_OS="unixware"
;;
esac
# Resolve the architecture
DIST_ARCH=`uname -p | tr [:upper:] [:lower:] | tr -d [:blank:]`
if [ "$DIST_ARCH" = "unknown" ]
then
DIST_ARCH=`uname -m | tr [:upper:] [:lower:] | tr -d [:blank:]`
fi
case "$DIST_ARCH" in
'amd64' | 'athlon' | 'ia32' | 'ia64' | 'i386' | 'i486' | 'i586' | 'i686' | 'x86_64')
DIST_ARCH="x86"
;;
'ip27')
DIST_ARCH="mips"
;;
'power' | 'powerpc' | 'power_pc' | 'ppc64')
DIST_ARCH="ppc"
;;
'pa_risc' | 'pa-risc')
DIST_ARCH="parisc"
;;
'sun4u' | 'sparcv9')
DIST_ARCH="sparc"
;;
'9000/800')
DIST_ARCH="parisc"
;;
esac
outputFile() {
if [ -f "$1" ]
then
echo " $1 (Found but not executable.)";
else
echo " $1"
fi
}
# Decide on the wrapper binary to use.
# If a 32-bit wrapper binary exists then it will work on 32 or 64 bit
# platforms, if the 64-bit binary exists then the distribution most
# likely wants to use long names. Otherwise, look for the default.
# For macosx, we also want to look for universal binaries.
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
if [ -x "$WRAPPER_TEST_CMD" ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ "$DIST_OS" = "macosx" ]
then
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-universal-32"
if [ -x "$WRAPPER_TEST_CMD" ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
if [ -x "$WRAPPER_TEST_CMD" ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-universal-64"
if [ -x "$WRAPPER_TEST_CMD" ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ ! -x "$WRAPPER_CMD" ]
then
echo "Unable to locate any of the following binaries:"
outputFile "$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
outputFile "$WRAPPER_CMD-$DIST_OS-universal-32"
outputFile "$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
outputFile "$WRAPPER_CMD-$DIST_OS-universal-64"
outputFile "$WRAPPER_CMD"
exit 1
fi
fi
fi
fi
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
if [ -x "$WRAPPER_TEST_CMD" ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ ! -x "$WRAPPER_CMD" ]
then
echo "Unable to locate any of the following binaries:"
outputFile "$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
outputFile "$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
outputFile "$WRAPPER_CMD"
exit 1
fi
fi
fi
fi
# Build the nice clause
if [ "X$PRIORITY" = "X" ]
then
CMDNICE=""
else
CMDNICE="nice -$PRIORITY"
fi
# Build the anchor file clause.
if [ "X$IGNORE_SIGNALS" = "X" ]
then
ANCHORPROP=
IGNOREPROP=
else
ANCHORPROP=wrapper.anchorfile=\"$ANCHORFILE\"
IGNOREPROP=wrapper.ignore_signals=TRUE
fi
# Build the lock file clause. Only create a lock file if the lock directory exists on this platform.
LOCKPROP=
if [ -d $LOCKDIR ]
then
if [ -w $LOCKDIR ]
then
LOCKPROP=wrapper.lockfile=\"$LOCKFILE\"
fi
fi
checkUser() {
# $1 touchLock flag
# $2 command
# Check the configured user. If necessary rerun this script as the desired user.
if [ "X$RUN_AS_USER" != "X" ]
then
# Resolve the location of the 'id' command
IDEXE="/usr/xpg4/bin/id"
if [ ! -x "$IDEXE" ]
then
IDEXE="/usr/bin/id"
if [ ! -x "$IDEXE" ]
then
echo "Unable to locate 'id'."
echo "Please report this message along with the location of the command on your system."
exit 1
fi
fi
if [ "`$IDEXE -u -n`" = "$RUN_AS_USER" ]
then
# Already running as the configured user. Avoid password prompts by not calling su.
RUN_AS_USER=""
fi
fi
if [ "X$RUN_AS_USER" != "X" ]
then
# If LOCKPROP and $RUN_AS_USER are defined then the new user will most likely not be
# able to create the lock file. The Wrapper will be able to update this file once it
# is created but will not be able to delete it on shutdown. If $2 is defined then
# the lock file should be created for the current command
if [ "X$LOCKPROP" != "X" ]
then
if [ "X$1" != "X" ]
then
# Resolve the primary group
RUN_AS_GROUP=`groups $RUN_AS_USER | awk '{print $3}' | tail -1`
if [ "X$RUN_AS_GROUP" = "X" ]
then
RUN_AS_GROUP=$RUN_AS_USER
fi
touch $LOCKFILE
chown $RUN_AS_USER:$RUN_AS_GROUP $LOCKFILE
fi
fi
# Still want to change users, recurse. This means that the user will only be
# prompted for a password once. Variables shifted by 1
su -m $RUN_AS_USER -c "\"$REALPATH\" $2"
# Now that we are the original user again, we may need to clean up the lock file.
if [ "X$LOCKPROP" != "X" ]
then
getpid
if [ "X$pid" = "X" ]
then
# Wrapper is not running so make sure the lock file is deleted.
if [ -f "$LOCKFILE" ]
then
rm "$LOCKFILE"
fi
fi
fi
exit 0
fi
}
getpid() {
if [ -f "$PIDFILE" ]
then
if [ -r "$PIDFILE" ]
then
pid=`cat "$PIDFILE"`
if [ "X$pid" != "X" ]
then
# It is possible that 'a' process with the pid exists but that it is not the
# correct process. This can happen in a number of cases, but the most
# common is during system startup after an unclean shutdown.
# The ps statement below looks for the specific wrapper command running as
# the pid. If it is not found then the pid file is considered to be stale.
pidtest=`$PSEXE -p $pid -o args | grep "$WRAPPER_CMD" | tail -1`
if [ "X$pidtest" = "X" ]
then
# This is a stale pid file.
rm -f "$PIDFILE"
echo "Removed stale pid file: $PIDFILE"
pid=""
fi
fi
else
echo "Cannot read $PIDFILE."
exit 1
fi
fi
}
testpid() {
pid=`$PSEXE -p $pid | grep $pid | grep -v grep | awk '{print $1}' | tail -1`
if [ "X$pid" = "X" ]
then
# Process is gone so remove the pid file.
rm -f "$PIDFILE"
pid=""
fi
}
console() {
echo "Running $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
# The string passed to eval must handles spaces in paths correctly.
COMMAND_LINE="$CMDNICE \"$WRAPPER_CMD\" \"$WRAPPER_CONF\" wrapper.syslog.ident=$APP_NAME wrapper.pidfile=\"$PIDFILE\" $ANCHORPROP $LOCKPROP"
eval $COMMAND_LINE
else
echo "$APP_LONG_NAME is already running."
exit 1
fi
}
start() {
echo "Starting $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
# The string passed to eval must handles spaces in paths correctly.
COMMAND_LINE="$CMDNICE \"$WRAPPER_CMD\" \"$WRAPPER_CONF\" wrapper.syslog.ident=$APP_NAME wrapper.pidfile=\"$PIDFILE\" wrapper.daemonize=TRUE $ANCHORPROP $IGNOREPROP $LOCKPROP"
eval $COMMAND_LINE
else
echo "$APP_LONG_NAME is already running."
exit 1
fi
}
stopit() {
echo "Stopping $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME was not running."
else
if [ "X$IGNORE_SIGNALS" = "X" ]
then
# Running so try to stop it.
kill $pid
if [ $? -ne 0 ]
then
# An explanation for the failure should have been given
echo "Unable to stop $APP_LONG_NAME."
exit 1
fi
else
rm -f "$ANCHORFILE"
if [ -f "$ANCHORFILE" ]
then
# An explanation for the failure should have been given
echo "Unable to stop $APP_LONG_NAME."
exit 1
fi
fi
# We can not predict how long it will take for the wrapper to
# actually stop as it depends on settings in wrapper.conf.
# Loop until it does.
savepid=$pid
CNT=0
TOTCNT=0
while [ "X$pid" != "X" ]
do
# Show a waiting message every 5 seconds.
if [ "$CNT" -lt "5" ]
then
CNT=`expr $CNT + 1`
else
echo "Waiting for $APP_LONG_NAME to exit..."
CNT=0
fi
TOTCNT=`expr $TOTCNT + 1`
sleep 1
testpid
done
pid=$savepid
testpid
if [ "X$pid" != "X" ]
then
echo "Failed to stop $APP_LONG_NAME."
exit 1
else
echo "Stopped $APP_LONG_NAME."
fi
fi
}
status() {
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME is not running."
exit 1
else
echo "$APP_LONG_NAME is running ($pid)."
exit 0
fi
}
dump() {
echo "Dumping $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME was not running."
else
kill -3 $pid
if [ $? -ne 0 ]
then
echo "Failed to dump $APP_LONG_NAME."
exit 1
else
echo "Dumped $APP_LONG_NAME."
fi
fi
}
case "$1" in
'console')
checkUser touchlock $1
console
;;
'start')
checkUser touchlock $1
start
;;
'stop')
checkUser "" $1
stopit
;;
'restart')
checkUser touchlock $1
stopit
start
;;
'status')
checkUser "" $1
status
;;
'dump')
checkUser "" $1
dump
;;
*)
echo "Usage: $0 { console | start | stop | restart | status | dump }"
exit 1
;;
esac
exit 0
|
maheshika/wso2-synapse
|
modules/distribution/src/main/bin/synapse-daemon.sh
|
Shell
|
apache-2.0
| 15,065 |
#!/usr/bin/env bash
echo "installing npm dependencies"
rm -rf node_modules > /dev/null 2>&1
npm cache clean
npm install
echo "running build task"
npm run build
|
kenzanlabs/pipeline-handyman
|
bin/build.sh
|
Shell
|
apache-2.0
| 161 |
#!/bin/bash
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
user="wso2"
action=""
username=""
tenant_domain=""
cartridge=""
tenant_key=""
cartridge_key=""
gitolite_admin="/home/wso2/gitolite-admin/"
git_domain=""
git_repo="/home/git/repositories/"
ADC_repo_notification_url=""
function help {
echo "Usage: manage-git-repo <action> <mandatory arguments>"
echo " Action can be one of the following"
echo " create : create git repo"
echo " destroy: destroy git repo"
echo " delete : delete user from a git repo"
echo " Usage:"
echo " manage-git-repo create <username> <tenant domain> <cartridge alias/name> <ADC repo notification url> <git_domain>"
echo " manage-git-repo destroy <username> <tenant domain> <cartridge alias/name>"
echo " eg:"
echo " manage-git-repo create foo abc.com myphp https://localhost:9445/services/RepoNotificationService/"
echo ""
}
function main {
if [[ ( -z $action || ( -n $action && $action == "help" ) ) ]]; then
help
exit 1
fi
if [[ (( -n $action && $action == "create") && ( -z $tenant_domain || -z $username || -z $cartridge || -z $ADC_repo_notification_url)) ]]; then
help
exit 1
fi
if [[ (( -n $action && $action == "destroy") && ( -z $tenant_domain || -z $username || -z $cartridge)) ]]; then
help
exit 1
fi
}
action=$1
username=$2
tenant_domain=$3
cartridge=$4
ADC_repo_notification_url=$5
git_domain=$6
if [[ $action == "create" ]]; then
# hack until stratos manager support key pair for every user
rm -fr /tmp/${username}*
ssh-keygen -t rsa -N '' -f /tmp/${username}
cd ${gitolite_admin}
git pull
# set public keys
cp -f /tmp/${username}.pub keydir/${username}.pub
#remove temparaly created files
rm /tmp/${username}.pub
# add repo and permission to conf
echo "" > conf/repos/${tenant_domain}-${cartridge}.conf
echo "repo ${tenant_domain}/${cartridge}.git" >> conf/repos/${tenant_domain}-${cartridge}.conf
echo " RW+ = ${username} ${user} daemon" >> conf/repos/${tenant_domain}-${cartridge}.conf
echo " config gitweb.url = git@${git_domain}:${tenant_domain}/${cartridge}" >> conf/repos/${tenant_domain}-${cartridge}.conf
echo " config receive.denyNonFastforwards = true" >> conf/repos/${tenant_domain}-${cartridge}.conf
echo " config receive.denyDeletes = true" >> conf/repos/${tenant_domain}-${cartridge}.conf
echo "" >> conf/repos/${tenant_domain}-${cartridge}.conf
# git operations
git add keydir/${username}.pub
git add conf/repos/${tenant_domain}-${cartridge}.conf
git commit -a -m "${username} keys added and ${tenant_domain}/${cartridge} repo created"
git pull
git push
# set git push trigger
echo "<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsd=\"http://org.apache.axis2/xsd\">
<soapenv:Header/>
<soapenv:Body>
<xsd:notifyRepoUpdate>
<xsd:tenantDomain>${tenant_domain}</xsd:tenantDomain>
<xsd:cartridgeType>${cartridge}</xsd:cartridgeType>
</xsd:notifyRepoUpdate>
</soapenv:Body>
</soapenv:Envelope>" > /tmp/${tenant_domain}-request.xml
echo "#!/bin/bash" > /tmp/${tenant_domain}-post-update
echo "curl -X POST -H \"Content-Type: text/xml\" -d @${git_repo}${tenant_domain}/${cartridge}.git/hooks/request.xml \"${ADC_repo_notification_url}\" --insecure" >> /tmp/${tenant_domain}-post-update
echo "exec git update-server-info" >> /tmp/${tenant_domain}-post-update
sudo mv /tmp/${tenant_domain}-request.xml ${git_repo}${tenant_domain}/${cartridge}.git/hooks/request.xml
sudo mv /tmp/${tenant_domain}-post-update ${git_repo}${tenant_domain}/${cartridge}.git/hooks/post-update
sudo chown git:git ${git_repo}${tenant_domain}/${cartridge}.git/hooks/post-update
sudo chmod 700 ${git_repo}${tenant_domain}/${cartridge}.git/hooks/post-update
fi
if [[ $action == "destroy" ]]; then
cd ${gitolite_admin}
# remove user keys
git rm keydir/${username}.pub
# remove repo from config
git rm conf/repos/${tenant_domain}-${cartridge}.conf
# git push to execute
git pull
git push
# remove repo from repository. ** this should done from above. but it doesnt happend. So removing manualy.
sudo rm -fr /home/git/repositories/${tenant_domain}/${cartridge}.git
fi
main
|
pkdevbox/stratos
|
products/stratos/modules/distribution/src/bin/manage-git-repo.sh
|
Shell
|
apache-2.0
| 5,210 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to fetch latest openapi spec.
# Puts the updated spec at api/openapi-spec/
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
OPENAPI_ROOT_DIR="${KUBE_ROOT}/api/openapi-spec"
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver
function cleanup()
{
[[ -n ${APISERVER_PID-} ]] && kill ${APISERVER_PID} 1>&2 2>/dev/null
kube::etcd::cleanup
kube::log::status "Clean up complete"
}
trap cleanup EXIT SIGINT
kube::golang::setup_env
apiserver=$(kube::util::find-binary "kube-apiserver")
TMP_DIR=$(mktemp -d /tmp/update-openapi-spec.XXXX)
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8050}
API_HOST=${API_HOST:-127.0.0.1}
kube::etcd::start
echo "dummy_token,admin,admin" > $TMP_DIR/tokenauth.csv
# Start kube-apiserver
kube::log::status "Starting kube-apiserver"
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--insecure-bind-address="${API_HOST}" \
--bind-address="${API_HOST}" \
--insecure-port="${API_PORT}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--advertise-address="10.10.10.10" \
--cert-dir="${TMP_DIR}/certs" \
--token-auth-file=$TMP_DIR/tokenauth.csv \
--service-cluster-ip-range="10.0.0.0/24" >/tmp/openapi-api-server.log 2>&1 &
APISERVER_PID=$!
kube::util::wait_for_url "${API_HOST}:${API_PORT}/healthz" "apiserver: "
kube::log::status "Updating " ${OPENAPI_ROOT_DIR}
curl -w "\n" -fs "${API_HOST}:${API_PORT}/swagger.json" > "${OPENAPI_ROOT_DIR}/swagger.json"
kube::log::status "SUCCESS"
# ex: ts=2 sw=2 et filetype=sh
|
kshafiee/kubernetes
|
hack/update-openapi-spec.sh
|
Shell
|
apache-2.0
| 2,222 |
#!/bin/bash
# Datadog Agent install script for Mac OS X.
set -e
logfile=ddagent-install.log
dmg_file=/tmp/datadog-agent.dmg
dmg_url="https://s3.amazonaws.com/dd-agent/datadog-agent-5.4.2.dmg"
# Root user detection
if [ $(echo "$UID") = "0" ]; then
sudo_cmd=''
else
sudo_cmd='sudo'
fi
# get real user (in case of sudo)
real_user=`logname`
export TMPDIR=`sudo -u $real_user getconf DARWIN_USER_TEMP_DIR`
cmd_real_user="sudo -Eu $real_user"
# In order to install with the right user
rm -f /tmp/datadog-install-user
echo $real_user > /tmp/datadog-install-user
function on_error() {
printf "\033[31m$ERROR_MESSAGE
It looks like you hit an issue when trying to install the Agent.
Troubleshooting and basic usage information for the Agent are available at:
http://docs.datadoghq.com/guides/basic_agent_usage/
If you're still having problems, please send an email to [email protected]
with the contents of ddagent-install.log and we'll do our very best to help you
solve your problem.\n\033[0m\n"
}
trap on_error ERR
if [ -n "$DD_API_KEY" ]; then
apikey=$DD_API_KEY
fi
if [ ! $apikey ]; then
printf "\033[31mAPI key not available in DD_API_KEY environment variable.\033[0m\n"
exit 1;
fi
# Install the agent
printf "\033[34m\n* Downloading and installing datadog-agent\n\033[0m"
rm -f $dmg_file
curl $dmg_url > $dmg_file
if [ "$sudo_cmd" = "sudo" ]; then
printf "\033[34m\n Your password is needed to install and configure the agent \n\033[0m"
fi
$sudo_cmd hdiutil detach "/Volumes/datadog_agent" >/dev/null 2>&1 || true
$sudo_cmd hdiutil attach "$dmg_file" -mountpoint "/Volumes/datadog_agent" >/dev/null
cd / && $sudo_cmd /usr/sbin/installer -pkg `find "/Volumes/datadog_agent" -name \*.pkg 2>/dev/null` -target / >/dev/null
$sudo_cmd hdiutil detach "/Volumes/datadog_agent" >/dev/null
# Set the configuration
if egrep 'api_key:( APIKEY)?$' "/opt/datadog-agent/etc/datadog.conf" > /dev/null 2>&1; then
printf "\033[34m\n* Adding your API key to the Agent configuration: datadog.conf\n\033[0m\n"
$sudo_cmd sh -c "sed -i '' 's/api_key:.*/api_key: $apikey/' \"/opt/datadog-agent/etc/datadog.conf\""
$sudo_cmd chown $real_user:admin "/opt/datadog-agent/etc/datadog.conf"
printf "\033[34m* Restarting the Agent...\n\033[0m\n"
$cmd_real_user "/opt/datadog-agent/bin/datadog-agent" restart >/dev/null
else
printf "\033[34m\n* Keeping old datadog.conf configuration file\n\033[0m\n"
fi
# Starting the app
$cmd_real_user open -a 'Datadog Agent.app'
# Wait for metrics to be submitted by the forwarder
printf "\033[32m
Your Agent has started up for the first time. We're currently verifying that
data is being submitted. You should see your Agent show up in Datadog shortly
at:
https://app.datadoghq.com/infrastructure\033[0m
Waiting for metrics..."
c=0
while [ "$c" -lt "30" ]; do
sleep 1
echo -n "."
c=$(($c+1))
done
curl -f http://127.0.0.1:17123/status?threshold=0 > /dev/null 2>&1
success=$?
while [ "$success" -gt "0" ]; do
sleep 1
echo -n "."
curl -f http://127.0.0.1:17123/status?threshold=0 > /dev/null 2>&1
success=$?
done
# Metrics are submitted, echo some instructions and exit
printf "\033[32m
Your Agent is running and functioning properly. It will continue to run in the
background and submit metrics to Datadog.
If you ever want to stop the Agent, please use the Datadog Agent App or
datadog-agent command.
It will start automatically at login, if you want to enable it at startup,
run these commands: (the agent will still run as your user)
sudo cp '/opt/datadog-agent/etc/com.datadoghq.agent.plist' /Library/LaunchDaemons
sudo launchctl load -w /Library/LaunchDaemons/com.datadoghq.agent.plist
\033[0m"
|
yuecong/dd-agent
|
packaging/osx/install.sh
|
Shell
|
bsd-3-clause
| 3,728 |
#!/bin/sh
#
# show_files.sh
export XBPS_TARGET_ARCH="$2" XBPS_DISTDIR=/hostrepo
while read -r pkg; do
for subpkg in $(xsubpkg $pkg); do
/bin/echo -e "\x1b[32mFiles of $subpkg:\x1b[0m"
xbps-query --repository=$HOME/hostdir/binpkgs \
--repository=$HOME/hostdir/binpkgs/nonfree \
-f "$subpkg"
done
done < /tmp/templates
|
necrophcodr/void-packages
|
common/travis/show_files.sh
|
Shell
|
bsd-2-clause
| 339 |
#!/bin/sh
# Test the -I option added to coreutils 6.0
# Copyright (C) 2006-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ rm
mkdir -p dir1-1 dir2-1 dir2-2 || framework_failure_
touch file1-1 file2-1 file2-2 file2-3 file3-1 file3-2 file3-3 file3-4 \
|| framework_failure_
echo y > in-y || framework_failure_
echo n > in-n || framework_failure_
rm -f out err || framework_failure_
# The prompt has a trailing space, and no newline, so an extra
# 'echo .' is inserted after each rm to make it obvious what was asked.
echo 'one file, no recursion' > err || framework_failure_
rm -I file1-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
test -f file1-1 && fail=1
echo 'one file, read only, answer no' >> err || framework_failure_
if ls /dev/stdin >/dev/null 2>&1; then
touch file1-1 || framework_failure_
chmod a-w file1-1 || framework_failure_
if ! test -w file1-1; then
# root won't get prompted
write_prot_msg1="rm: remove write-protected regular empty file 'file1-1'? "
fi
rm ---presume-input-tty -I file1-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
if test "$write_prot_msg1"; then
test -f file1-1 || fail=1
fi
else
echo '.' >> err || framework_failure_
fi
echo 'three files, no recursion' >> err || framework_failure_
rm -I file2-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
test -f file2-1 && fail=1
test -f file2-2 && fail=1
test -f file2-3 && fail=1
echo 'four files, no recursion, answer no' >> err || framework_failure_
rm -I file3-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
test -f file3-1 || fail=1
test -f file3-2 || fail=1
test -f file3-3 || fail=1
test -f file3-4 || fail=1
echo 'four files, no recursion, answer yes' >> err || framework_failure_
rm -I file3-* < in-y >> out 2>> err || fail=1
echo . >> err || fail=1
test -f file3-1 && fail=1
test -f file3-2 && fail=1
test -f file3-3 && fail=1
test -f file3-4 && fail=1
echo 'four files, no recursion, 1 read only, answer yes no' >> err \
|| framework_failure_
if ls /dev/stdin >/dev/null 2>&1; then
touch file3-1 file3-2 file3-3 file3-4 || framework_failure_
echo non_empty > file3-4 || framework_failure_ # to shorten diagnostic
chmod a-w file3-4 || framework_failure_
if ! test -w file3-4; then
# root won't get prompted
write_prot_msg2="rm: remove write-protected regular file 'file3-4'? "
fi
cat in-y in-n | rm ---presume-input-tty -I file3-* >> out 2>> err || fail=1
echo . >> err || fail=1
test -f file3-1 && fail=1
test -f file3-2 && fail=1
test -f file3-3 && fail=1
if test "$write_prot_msg2"; then
test -f file3-4 || fail=1
fi
else
echo 'rm: remove 4 arguments? .' >> err || framework_failure_
fi
echo 'one file, recursion, answer no' >> err || framework_failure_
rm -I -R dir1-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
test -d dir1-1 || fail=1
echo 'one file, recursion, answer yes' >> err || framework_failure_
rm -I -R dir1-* < in-y >> out 2>> err || fail=1
echo . >> err || fail=1
test -d dir1-1 && fail=1
echo 'multiple files, recursion, answer no' >> err || framework_failure_
rm -I -R dir2-* < in-n >> out 2>> err || fail=1
echo . >> err || fail=1
test -d dir2-1 || fail=1
test -d dir2-2 || fail=1
echo 'multiple files, recursion, answer yes' >> err || framework_failure_
rm -I -R dir2-* < in-y >> out 2>> err || fail=1
echo . >> err || fail=1
test -d dir2-1 && fail=1
test -d dir2-2 && fail=1
cat <<\EOF > expout || fail=1
EOF
cat <<EOF > experr || fail=1
one file, no recursion
.
one file, read only, answer no
$write_prot_msg1.
three files, no recursion
.
four files, no recursion, answer no
rm: remove 4 arguments? .
four files, no recursion, answer yes
rm: remove 4 arguments? .
four files, no recursion, 1 read only, answer yes no
rm: remove 4 arguments? $write_prot_msg2.
one file, recursion, answer no
rm: remove 1 argument recursively? .
one file, recursion, answer yes
rm: remove 1 argument recursively? .
multiple files, recursion, answer no
rm: remove 2 arguments recursively? .
multiple files, recursion, answer yes
rm: remove 2 arguments recursively? .
EOF
compare expout out || fail=1
compare experr err || fail=1
Exit $fail
|
kdave/coreutils
|
tests/rm/interactive-once.sh
|
Shell
|
gpl-3.0
| 4,861 |
#!/bin/bash
#
# update-pandoc
#
# Copyright (C) 2009-12 by RStudio, Inc.
#
# Unless you have received this program directly from RStudio pursuant
# to the terms of a commercial license agreement with RStudio, then
# this program is licensed to you under the terms of version 3 of the
# GNU Affero General Public License. This program is distributed WITHOUT
# ANY EXPRESS OR IMPLIED WARRANTY, INCLUDING THOSE OF NON-INFRINGEMENT,
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Please refer to the
# AGPL (http://www.gnu.org/licenses/agpl-3.0.txt) for more details.
#
#
set -e
# install dir
INSTALL_DIR=`pwd`
# script dir
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd $SCRIPT_DIR
# remove then re-install mathjax
MATHJAX_DIR=mathjax-23
rm -rf $MATHJAX_DIR
./install-mathjax
# back to install dir
cd $INSTALL_DIR
|
ppc64le/build-scripts
|
r/rstudio/Dockerfiles/0.99.903_ubuntu_14.04/common/update-mathjax.sh
|
Shell
|
apache-2.0
| 842 |
#!/bin/bash -l
set -eo pipefail
function setup_gpadmin_user() {
./gpdb_src/concourse/scripts/setup_gpadmin_user.bash "$TEST_OS"
}
# Get ssh private key from REMOTE_KEY, which is assumed to
# be encode in base64. We can't pass the key content directly
# since newline doesn't work well for env variable.
function import_remote_key() {
echo -n $REMOTE_KEY | base64 -d > ~/remote.key
chmod 400 ~/remote.key
eval `ssh-agent -s`
ssh-add ~/remote.key
ssh-keyscan -p $REMOTE_PORT $REMOTE_HOST > pubkey
awk '{printf "[%s]:", $1 }' pubkey > tmp
echo -n $REMOTE_PORT >> tmp
awk '{$1 = ""; print $0; }' pubkey >> tmp
cat tmp >> ~/.ssh/known_hosts
}
# Since we are cloning and building on remote machine,
# files won't be deleted as concourse container destroys.
# We have to clean everything for success build.
function cleanup() {
local SESSION_ID=$1
ssh -T -p $REMOTE_PORT $REMOTE_USER@$REMOTE_HOST <<- EOF
rm -rf $GPDB_DIR
rm -rf gpdb-compile/$SESSION_ID
EOF
}
function _main() {
if [ -z "$REMOTE_PORT" ]; then
REMOTE_PORT=22
fi
# Get session id from previous test task
SESSION_ID=$(cat session_id/session_id)
time setup_gpadmin_user
time import_remote_key
time cleanup $SESSION_ID
}
_main "$@"
|
Chibin/gpdb
|
concourse/scripts/aix_remote_cleanup.bash
|
Shell
|
apache-2.0
| 1,291 |
#!/bin/bash -e
[ -d m4 ] || mkdir m4
autopoint --force
libtoolize --copy --force
aclocal -I m4
autoconf
autoheader --force
automake --foreign --add-missing --copy
rm -rf autom4te.cache
( cd widgets && ./autogen.sh )
|
karmix/anaconda
|
autogen.sh
|
Shell
|
gpl-2.0
| 216 |
#!/usr/bin/env bash
# Init vars
if [[ -z "$SERVICE_PHPFPM_OPTS" ]]; then SERVICE_PHPFPM_OPTS=""; fi
source /opt/docker/bin/config.sh
includeScriptDir "/opt/docker/bin/service.d/php-fpm.d/"
exec /usr/local/bin/php-fpm --nodaemonize $SERVICE_PHPFPM_OPTS
|
webdevops/Dockerfile
|
provisioning/php/general/bin/service.d/php-fpm.sh
|
Shell
|
mit
| 256 |
#! /bin/bash
ARDUBLOCK_ZIP=ardublock.zip
ARDUBLOCK_DIR=ardublock.translations
if [ -f $ARDUBLOCK_ZIP ]; then
if [ -d $ARDUBLOCK_DIR ]; then
rm -rf $ARDUBLOCK_DIR
fi
mkdir $ARDUBLOCK_DIR
cd $ARDUBLOCK_DIR
unzip -q ../$ARDUBLOCK_ZIP
translated_files=`ls -l */* | grep -v " 93" | awk '{print $9}'`
for file in $translated_files; do
prop_file=`basename $file .properties`_`dirname $file`.properties
prop_file=`echo $prop_file | sed -e s/-/_/g`
echo "process $file"
native2ascii -encoding UTF8 $file > ../$prop_file
done
cd ..
rm -rf $ARDUBLOCK_DIR
fi
|
Elecrow-keen/Ardublock
|
target/classes/com/ardublock/block/i18n.sh
|
Shell
|
gpl-3.0
| 574 |
#!/bin/bash
if [[ $target_platform =~ linux.* ]] || [[ $target_platform == win-32 ]] || [[ $target_platform == win-64 ]] || [[ $target_platform == osx-64 ]]; then
export DISABLE_AUTOBREW=1
$R CMD INSTALL --build .
else
mkdir -p $PREFIX/lib/R/library/leapp
mv * $PREFIX/lib/R/library/leapp
if [[ $target_platform == osx-64 ]]; then
pushd $PREFIX
for libdir in lib/R/lib lib/R/modules lib/R/library lib/R/bin/exec sysroot/usr/lib; do
pushd $libdir || exit 1
for SHARED_LIB in $(find . -type f -iname "*.dylib" -or -iname "*.so" -or -iname "R"); do
echo "fixing SHARED_LIB $SHARED_LIB"
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5.0-MRO/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/clang4/lib/libomp.dylib "$PREFIX"/lib/libomp.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libgcc_s.1.dylib "$PREFIX"/lib/libgcc_s.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libiconv.2.dylib "$PREFIX"/sysroot/usr/lib/libiconv.2.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libncurses.5.4.dylib "$PREFIX"/sysroot/usr/lib/libncurses.5.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libicucore.A.dylib "$PREFIX"/sysroot/usr/lib/libicucore.A.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libexpat.1.dylib "$PREFIX"/lib/libexpat.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libcurl.4.dylib "$PREFIX"/lib/libcurl.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
done
popd
done
popd
fi
fi
|
ostrokach/bioconda-recipes
|
recipes/r-leapp/build.sh
|
Shell
|
mit
| 2,755 |
#!/bin/sh
set -x -e
make
cp RepeatScout build_lmer_table ${PREFIX}/bin
|
dmaticzka/bioconda-recipes
|
recipes/repeatscout/build.sh
|
Shell
|
mit
| 72 |
#!/usr/bin/env bash
./reinstate.py "$@"
./checkin.py "$@"
|
foobarbazblarg/stayclean
|
stayclean-2021-january/reinstate-and-checkin.sh
|
Shell
|
mit
| 59 |
npm uninstall $REMOVE_DEPS
npm install $DEPS
|
maratonato/slides
|
treinamento1/node_modules/grunt-retro/test/install_deps.sh
|
Shell
|
mit
| 44 |
#!/bin/sh
#
# Copyright (c) 2007 Andy Parkins
#
test_description='for-each-ref test'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-gpg.sh
# Mon Jul 3 15:18:43 2006 +0000
datestamp=1151939923
setdate_and_increment () {
GIT_COMMITTER_DATE="$datestamp +0200"
datestamp=$(expr "$datestamp" + 1)
GIT_AUTHOR_DATE="$datestamp +0200"
datestamp=$(expr "$datestamp" + 1)
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
}
test_expect_success setup '
setdate_and_increment &&
echo "Using $datestamp" > one &&
git add one &&
git commit -m "Initial" &&
setdate_and_increment &&
git tag -a -m "Tagging at $datestamp" testtag &&
git update-ref refs/remotes/origin/master master &&
git remote add origin nowhere &&
git config branch.master.remote origin &&
git config branch.master.merge refs/heads/master &&
git remote add myfork elsewhere &&
git config remote.pushdefault myfork &&
git config push.default current
'
test_atom() {
case "$1" in
head) ref=refs/heads/master ;;
tag) ref=refs/tags/testtag ;;
*) ref=$1 ;;
esac
printf '%s\n' "$3" >expected
test_expect_${4:-success} $PREREQ "basic atom: $1 $2" "
git for-each-ref --format='%($2)' $ref >actual &&
sanitize_pgp <actual >actual.clean &&
test_cmp expected actual.clean
"
}
test_atom head refname refs/heads/master
test_atom head upstream refs/remotes/origin/master
test_atom head push refs/remotes/myfork/master
test_atom head objecttype commit
test_atom head objectsize 171
test_atom head objectname $(git rev-parse refs/heads/master)
test_atom head tree $(git rev-parse refs/heads/master^{tree})
test_atom head parent ''
test_atom head numparent 0
test_atom head object ''
test_atom head type ''
test_atom head '*objectname' ''
test_atom head '*objecttype' ''
test_atom head author 'A U Thor <[email protected]> 1151939924 +0200'
test_atom head authorname 'A U Thor'
test_atom head authoremail '<[email protected]>'
test_atom head authordate 'Mon Jul 3 17:18:44 2006 +0200'
test_atom head committer 'C O Mitter <[email protected]> 1151939923 +0200'
test_atom head committername 'C O Mitter'
test_atom head committeremail '<[email protected]>'
test_atom head committerdate 'Mon Jul 3 17:18:43 2006 +0200'
test_atom head tag ''
test_atom head tagger ''
test_atom head taggername ''
test_atom head taggeremail ''
test_atom head taggerdate ''
test_atom head creator 'C O Mitter <[email protected]> 1151939923 +0200'
test_atom head creatordate 'Mon Jul 3 17:18:43 2006 +0200'
test_atom head subject 'Initial'
test_atom head contents:subject 'Initial'
test_atom head body ''
test_atom head contents:body ''
test_atom head contents:signature ''
test_atom head contents 'Initial
'
test_atom head HEAD '*'
test_atom tag refname refs/tags/testtag
test_atom tag upstream ''
test_atom tag push ''
test_atom tag objecttype tag
test_atom tag objectsize 154
test_atom tag objectname $(git rev-parse refs/tags/testtag)
test_atom tag tree ''
test_atom tag parent ''
test_atom tag numparent ''
test_atom tag object $(git rev-parse refs/tags/testtag^0)
test_atom tag type 'commit'
test_atom tag '*objectname' '67a36f10722846e891fbada1ba48ed035de75581'
test_atom tag '*objecttype' 'commit'
test_atom tag author ''
test_atom tag authorname ''
test_atom tag authoremail ''
test_atom tag authordate ''
test_atom tag committer ''
test_atom tag committername ''
test_atom tag committeremail ''
test_atom tag committerdate ''
test_atom tag tag 'testtag'
test_atom tag tagger 'C O Mitter <[email protected]> 1151939925 +0200'
test_atom tag taggername 'C O Mitter'
test_atom tag taggeremail '<[email protected]>'
test_atom tag taggerdate 'Mon Jul 3 17:18:45 2006 +0200'
test_atom tag creator 'C O Mitter <[email protected]> 1151939925 +0200'
test_atom tag creatordate 'Mon Jul 3 17:18:45 2006 +0200'
test_atom tag subject 'Tagging at 1151939927'
test_atom tag contents:subject 'Tagging at 1151939927'
test_atom tag body ''
test_atom tag contents:body ''
test_atom tag contents:signature ''
test_atom tag contents 'Tagging at 1151939927
'
test_atom tag HEAD ' '
test_expect_success 'Check invalid atoms names are errors' '
test_must_fail git for-each-ref --format="%(INVALID)" refs/heads
'
test_expect_success 'Check format specifiers are ignored in naming date atoms' '
git for-each-ref --format="%(authordate)" refs/heads &&
git for-each-ref --format="%(authordate:default) %(authordate)" refs/heads &&
git for-each-ref --format="%(authordate) %(authordate:default)" refs/heads &&
git for-each-ref --format="%(authordate:default) %(authordate:default)" refs/heads
'
test_expect_success 'Check valid format specifiers for date fields' '
git for-each-ref --format="%(authordate:default)" refs/heads &&
git for-each-ref --format="%(authordate:relative)" refs/heads &&
git for-each-ref --format="%(authordate:short)" refs/heads &&
git for-each-ref --format="%(authordate:local)" refs/heads &&
git for-each-ref --format="%(authordate:iso8601)" refs/heads &&
git for-each-ref --format="%(authordate:rfc2822)" refs/heads
'
test_expect_success 'Check invalid format specifiers are errors' '
test_must_fail git for-each-ref --format="%(authordate:INVALID)" refs/heads
'
cat >expected <<\EOF
'refs/heads/master' 'Mon Jul 3 17:18:43 2006 +0200' 'Mon Jul 3 17:18:44 2006 +0200'
'refs/tags/testtag' 'Mon Jul 3 17:18:45 2006 +0200'
EOF
test_expect_success 'Check unformatted date fields output' '
(git for-each-ref --shell --format="%(refname) %(committerdate) %(authordate)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate)" refs/tags) >actual &&
test_cmp expected actual
'
test_expect_success 'Check format "default" formatted date fields output' '
f=default &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual &&
test_cmp expected actual
'
# Don't know how to do relative check because I can't know when this script
# is going to be run and can't fake the current time to git, and hence can't
# provide expected output. Instead, I'll just make sure that "relative"
# doesn't exit in error
#
#cat >expected <<\EOF
#
#EOF
#
test_expect_success 'Check format "relative" date fields output' '
f=relative &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual
'
cat >expected <<\EOF
'refs/heads/master' '2006-07-03' '2006-07-03'
'refs/tags/testtag' '2006-07-03'
EOF
test_expect_success 'Check format "short" date fields output' '
f=short &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
'refs/heads/master' 'Mon Jul 3 15:18:43 2006' 'Mon Jul 3 15:18:44 2006'
'refs/tags/testtag' 'Mon Jul 3 15:18:45 2006'
EOF
test_expect_success 'Check format "local" date fields output' '
f=local &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
'refs/heads/master' '2006-07-03 17:18:43 +0200' '2006-07-03 17:18:44 +0200'
'refs/tags/testtag' '2006-07-03 17:18:45 +0200'
EOF
test_expect_success 'Check format "iso8601" date fields output' '
f=iso8601 &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
'refs/heads/master' 'Mon, 3 Jul 2006 17:18:43 +0200' 'Mon, 3 Jul 2006 17:18:44 +0200'
'refs/tags/testtag' 'Mon, 3 Jul 2006 17:18:45 +0200'
EOF
test_expect_success 'Check format "rfc2822" date fields output' '
f=rfc2822 &&
(git for-each-ref --shell --format="%(refname) %(committerdate:$f) %(authordate:$f)" refs/heads &&
git for-each-ref --shell --format="%(refname) %(taggerdate:$f)" refs/tags) >actual &&
test_cmp expected actual
'
test_expect_success 'Check format of strftime date fields' '
echo "my date is 2006-07-03" >expected &&
git for-each-ref \
--format="%(authordate:format:my date is %Y-%m-%d)" \
refs/heads >actual &&
test_cmp expected actual
'
test_expect_success 'exercise strftime with odd fields' '
echo >expected &&
git for-each-ref --format="%(authordate:format:)" refs/heads >actual &&
test_cmp expected actual &&
long="long format -- $_z40$_z40$_z40$_z40$_z40$_z40$_z40" &&
echo $long >expected &&
git for-each-ref --format="%(authordate:format:$long)" refs/heads >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
refs/heads/master
refs/remotes/origin/master
refs/tags/testtag
EOF
test_expect_success 'Verify ascending sort' '
git for-each-ref --format="%(refname)" --sort=refname >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
refs/tags/testtag
refs/remotes/origin/master
refs/heads/master
EOF
test_expect_success 'Verify descending sort' '
git for-each-ref --format="%(refname)" --sort=-refname >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
'refs/heads/master'
'refs/remotes/origin/master'
'refs/tags/testtag'
EOF
test_expect_success 'Quoting style: shell' '
git for-each-ref --shell --format="%(refname)" >actual &&
test_cmp expected actual
'
test_expect_success 'Quoting style: perl' '
git for-each-ref --perl --format="%(refname)" >actual &&
test_cmp expected actual
'
test_expect_success 'Quoting style: python' '
git for-each-ref --python --format="%(refname)" >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
"refs/heads/master"
"refs/remotes/origin/master"
"refs/tags/testtag"
EOF
test_expect_success 'Quoting style: tcl' '
git for-each-ref --tcl --format="%(refname)" >actual &&
test_cmp expected actual
'
for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do
test_expect_success "more than one quoting style: $i" "
git for-each-ref $i 2>&1 | (read line &&
case \$line in
\"error: more than one quoting style\"*) : happy;;
*) false
esac)
"
done
cat >expected <<\EOF
master
testtag
EOF
test_expect_success 'Check short refname format' '
(git for-each-ref --format="%(refname:short)" refs/heads &&
git for-each-ref --format="%(refname:short)" refs/tags) >actual &&
test_cmp expected actual
'
cat >expected <<EOF
origin/master
EOF
test_expect_success 'Check short upstream format' '
git for-each-ref --format="%(upstream:short)" refs/heads >actual &&
test_cmp expected actual
'
test_expect_success 'setup for upstream:track[short]' '
test_commit two
'
cat >expected <<EOF
[ahead 1]
EOF
test_expect_success 'Check upstream:track format' '
git for-each-ref --format="%(upstream:track)" refs/heads >actual &&
test_cmp expected actual
'
cat >expected <<EOF
>
EOF
test_expect_success 'Check upstream:trackshort format' '
git for-each-ref --format="%(upstream:trackshort)" refs/heads >actual &&
test_cmp expected actual
'
test_expect_success 'Check that :track[short] cannot be used with other atoms' '
test_must_fail git for-each-ref --format="%(refname:track)" 2>/dev/null &&
test_must_fail git for-each-ref --format="%(refname:trackshort)" 2>/dev/null
'
test_expect_success 'Check that :track[short] works when upstream is invalid' '
cat >expected <<-\EOF &&
EOF
test_when_finished "git config branch.master.merge refs/heads/master" &&
git config branch.master.merge refs/heads/does-not-exist &&
git for-each-ref \
--format="%(upstream:track)$LF%(upstream:trackshort)" \
refs/heads >actual &&
test_cmp expected actual
'
test_expect_success '%(push) supports tracking specifiers, too' '
echo "[ahead 1]" >expected &&
git for-each-ref --format="%(push:track)" refs/heads >actual &&
test_cmp expected actual
'
cat >expected <<EOF
$(git rev-parse --short HEAD)
EOF
test_expect_success 'Check short objectname format' '
git for-each-ref --format="%(objectname:short)" refs/heads >actual &&
test_cmp expected actual
'
test_expect_success 'Check for invalid refname format' '
test_must_fail git for-each-ref --format="%(refname:INVALID)"
'
get_color ()
{
git config --get-color no.such.slot "$1"
}
cat >expected <<EOF
$(git rev-parse --short refs/heads/master) $(get_color green)master$(get_color reset)
$(git rev-parse --short refs/remotes/origin/master) $(get_color green)origin/master$(get_color reset)
$(git rev-parse --short refs/tags/testtag) $(get_color green)testtag$(get_color reset)
$(git rev-parse --short refs/tags/two) $(get_color green)two$(get_color reset)
EOF
test_expect_success 'Check %(color:...) ' '
git for-each-ref --format="%(objectname:short) %(color:green)%(refname:short)" >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
heads/master
tags/master
EOF
test_expect_success 'Check ambiguous head and tag refs (strict)' '
git config --bool core.warnambiguousrefs true &&
git checkout -b newtag &&
echo "Using $datestamp" > one &&
git add one &&
git commit -m "Branch" &&
setdate_and_increment &&
git tag -m "Tagging at $datestamp" master &&
git for-each-ref --format "%(refname:short)" refs/heads/master refs/tags/master >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
heads/master
master
EOF
test_expect_success 'Check ambiguous head and tag refs (loose)' '
git config --bool core.warnambiguousrefs false &&
git for-each-ref --format "%(refname:short)" refs/heads/master refs/tags/master >actual &&
test_cmp expected actual
'
cat >expected <<\EOF
heads/ambiguous
ambiguous
EOF
test_expect_success 'Check ambiguous head and tag refs II (loose)' '
git checkout master &&
git tag ambiguous testtag^0 &&
git branch ambiguous testtag^0 &&
git for-each-ref --format "%(refname:short)" refs/heads/ambiguous refs/tags/ambiguous >actual &&
test_cmp expected actual
'
test_expect_success 'an unusual tag with an incomplete line' '
git tag -m "bogo" bogo &&
bogo=$(git cat-file tag bogo) &&
bogo=$(printf "%s" "$bogo" | git mktag) &&
git tag -f bogo "$bogo" &&
git for-each-ref --format "%(body)" refs/tags/bogo
'
test_expect_success 'create tag with subject and body content' '
cat >>msg <<-\EOF &&
the subject line
first body line
second body line
EOF
git tag -F msg subject-body
'
test_atom refs/tags/subject-body subject 'the subject line'
test_atom refs/tags/subject-body body 'first body line
second body line
'
test_atom refs/tags/subject-body contents 'the subject line
first body line
second body line
'
test_expect_success 'create tag with multiline subject' '
cat >msg <<-\EOF &&
first subject line
second subject line
first body line
second body line
EOF
git tag -F msg multiline
'
test_atom refs/tags/multiline subject 'first subject line second subject line'
test_atom refs/tags/multiline contents:subject 'first subject line second subject line'
test_atom refs/tags/multiline body 'first body line
second body line
'
test_atom refs/tags/multiline contents:body 'first body line
second body line
'
test_atom refs/tags/multiline contents:signature ''
test_atom refs/tags/multiline contents 'first subject line
second subject line
first body line
second body line
'
test_expect_success GPG 'create signed tags' '
git tag -s -m "" signed-empty &&
git tag -s -m "subject line" signed-short &&
cat >msg <<-\EOF &&
subject line
body contents
EOF
git tag -s -F msg signed-long
'
sig='-----BEGIN PGP SIGNATURE-----
-----END PGP SIGNATURE-----
'
PREREQ=GPG
test_atom refs/tags/signed-empty subject ''
test_atom refs/tags/signed-empty contents:subject ''
test_atom refs/tags/signed-empty body "$sig"
test_atom refs/tags/signed-empty contents:body ''
test_atom refs/tags/signed-empty contents:signature "$sig"
test_atom refs/tags/signed-empty contents "$sig"
test_atom refs/tags/signed-short subject 'subject line'
test_atom refs/tags/signed-short contents:subject 'subject line'
test_atom refs/tags/signed-short body "$sig"
test_atom refs/tags/signed-short contents:body ''
test_atom refs/tags/signed-short contents:signature "$sig"
test_atom refs/tags/signed-short contents "subject line
$sig"
test_atom refs/tags/signed-long subject 'subject line'
test_atom refs/tags/signed-long contents:subject 'subject line'
test_atom refs/tags/signed-long body "body contents
$sig"
test_atom refs/tags/signed-long contents:body 'body contents
'
test_atom refs/tags/signed-long contents:signature "$sig"
test_atom refs/tags/signed-long contents "subject line
body contents
$sig"
cat >expected <<EOF
$(git rev-parse refs/tags/master) <[email protected]> refs/tags/master
$(git rev-parse refs/tags/bogo) <[email protected]> refs/tags/bogo
EOF
test_expect_success 'Verify sort with multiple keys' '
git for-each-ref --format="%(objectname) %(taggeremail) %(refname)" --sort=objectname --sort=taggeremail \
refs/tags/bogo refs/tags/master > actual &&
test_cmp expected actual
'
test_done
|
alex-zhang/git
|
t/t6300-for-each-ref.sh
|
Shell
|
gpl-2.0
| 17,048 |
SCRIPT_NAME=elf
OUTPUT_FORMAT="elf32-i386"
NO_RELA_RELOCS=yes
ENTRY=_main
TEXT_BASE=0x0
DYN_TEXT_BASE=0x00400000
TEXT_START_ADDR="(DEFINED(_DYNAMIC) ? ${DYN_TEXT_BASE} : ${TEXT_BASE})"
case ${LD_FLAG} in
n|N) TEXT_START_ADDR=0x1000 ;;
esac
MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
ARCH=i386
MACHINE=
NOP=0x9090
TEMPLATE_NAME=elf32
GENERATE_SHLIB_SCRIPT=yes
ELF_INTERPRETER_NAME=\"/usr/lib/ld.so.1\"
# Leave room of SIZEOF_HEADERS before text.
EMBEDDED=
|
jlspyaozhongkai/Uter
|
third_party_backup/binutils-2.25/ld/emulparams/i386lynx.sh
|
Shell
|
gpl-3.0
| 454 |
#!/usr/bin/env bash
#
# Bash wrapper for running groker.py in crontab
#
test_python () {
if ! ${PYTHON_BIN} -c 'import yaml ; import plumbum' &> /dev/null ; then
echo 'Error: Could not find pyyaml or plumbum modules!' >&2
echo 'Info: Tip: install them in a virtualenv, with:' >&2
echo 'Info: pip install -r requirements.txt' >&2
echo 'Info: See README.md for more details.' >&2
exit 1
fi
}
usage () {
echo 'Usage:'
echo "${SCRIPT} [virtualenv_path] [groker.py options]"
exit 0
}
# First check for --help or -h and print usage if requested
args="$@"
if [ -n "${args}" ] \
&& ([ -z "${args##*"--help"*}" ] || [ -z "${args##*"-h"*}" ]) ; then
usage
fi
# Setup some variables to support more flexible execution
SCRIPT_DIR=$(dirname $0)
SCRIPT="${SCRIPT_DIR%%/}/groker.py"
SCRIPT_ARGUMENTS=""
VIRTUALENV_PATH="${SCRIPT_DIR%%/}/virtualenv"
# Argument handling, make wrapper aware or virtualenv and argument passing
if [ "$#" -gt 0 ] ; then
if [ -d "${1}" ] ; then
VIRTUALENV_PATH="${1}"
shift
SCRIPT_ARGUMENTS="$@"
else
SCRIPT_ARGUMENTS="$@"
fi
fi
# If provided path contains a python exec, use it, else use system python
if [ -x "${VIRTUALENV_PATH%%/}/bin/python" ] ; then
PYTHON_BIN="${VIRTUALENV_PATH%%/}/bin/python"
else
PYTHON_BIN=$(which python)
fi
# Verify that provided python environment have our required modules
test_python
# Execute!
"${PYTHON_BIN}" "${SCRIPT}" ${SCRIPT_ARGUMENTS}
|
eripa/groker
|
groker-wrapper.sh
|
Shell
|
mit
| 1,521 |
for item in `awk -F ' ' '{print $1}' HICP.txt`
do
wget --header="Accept:application/vnd.sdmx.structurespecificdata+xml;version=2.1" https://sdw-wsrest.ecb.europa.eu/service/data/ICP/M.PT.N.$item.4.INX
echo "item $item is done"
done
|
BadWizard/Inflation
|
DATA/temp/PT/script-data.sh
|
Shell
|
mit
| 240 |
java -jar BON.jar -mcp ~/ksrc/githubTCPRKC/MinecraftForge/fml -from OBF -to MCP -side UNIVERSAL -in libs/quivering.zip -out quivernow.jar
|
Robotia/ExtraUtilities
|
ExtraUtilitiesBuilder/deobf.sh
|
Shell
|
mit
| 139 |
curl -o .bash_prompt https://raw.githubusercontent.com/mimiflynn/dotfiles/master/shared/bash/.bash_prompt
curl -o .bash_profile https://raw.githubusercontent.com/mimiflynn/dotfiles/master/shared/bash/.bash_profile
source .bash_profile
|
mimiflynn/dotfiles
|
shared/bash/install.sh
|
Shell
|
mit
| 234 |
#!/bin/bash
what="tests/"
if [[ $# -ge 1 ]]; then
what="$@"
fi
exec python3 -m pytest -v --log-level=DEBUG --junit-xml=unit-test-results.xml --cov=scuba --cov=tests $what
|
JonathonReinhart/scuba
|
run_unit_tests.sh
|
Shell
|
mit
| 176 |
#!/bin/sh
prog=ellip5OneM_test.m
depends="ellip5OneM_test.m test_common.m \
schurexpand.oct schurdecomp.oct schurOneMscale.m \
tf2schurOneMlattice.m schurOneMlatticeNoiseGain.m schurOneMlattice2Abcd.oct \
schurOneMlatticeFilter.m KW.m bin2SD.oct crossWelch.m p2n60.m qroots.m \
qzsolve.oct"
tmp=/tmp/$$
here=`pwd`
if [ $? -ne 0 ]; then echo "Failed pwd"; exit 1; fi
fail()
{
echo FAILED ${0#$here"/"} $prog 1>&2
cd $here
rm -rf $tmp
exit 1
}
pass()
{
echo PASSED ${0#$here"/"} $prog
cd $here
rm -rf $tmp
exit 0
}
trap "fail" 1 2 3 15
mkdir $tmp
if [ $? -ne 0 ]; then echo "Failed mkdir"; exit 1; fi
for file in $depends;do \
cp -R src/$file $tmp; \
if [ $? -ne 0 ]; then echo "Failed cp "$file; fail; fi \
done
cd $tmp
if [ $? -ne 0 ]; then echo "Failed cd"; fail; fi
#
# the output should look like this
#
cat > test.ok << 'EOF'
fc = 0.050000
n60 = 339
k =
-0.9657 0.9906 -0.9805 0.9620 -0.6961
epsilon =
1 1 1 1 1
p =
0.5798 4.3893 0.3018 3.0407 0.4233
c =
4.0898e-01 3.0904e-02 4.9245e-01 1.1926e-02 4.8375e-02 7.3597e-03
S =
0.0014 0 0 0 0 0
-0.0051 0.0053 0 0 0 0
0.0382 -0.0741 0.0386 0 0 0
-0.1923 0.5639 -0.5675 0.1961 0 0
0.6907 -2.7025 4.0505 -2.7547 0.7180 0
-0.6961 3.6326 -7.6908 8.2614 -4.5064 1.0000
ng = 2.5504
ngap = 9.0000
est_varyd = 0.2959
varyd = 0.3006
est_varyapd = 0.8333
varyapd = 0.7960
stdxxf =
129.01 128.33 129.65 129.36 128.47
EOF
if [ $? -ne 0 ]; then echo "Failed output cat"; fail; fi
#
# run and see if the results match
#
echo "Running $prog"
octave --no-gui -q $prog >test.out 2>&1
if [ $? -ne 0 ]; then echo "Failed running $prog"; fail; fi
diff -Bb test.ok test.out
if [ $? -ne 0 ]; then echo "Failed diff -Bb"; fail; fi
#
# this much worked
#
pass
|
robertgj/DesignOfIIRFilters
|
test/01/t0123a.sh
|
Shell
|
mit
| 1,972 |
#!/bin/bash
IFS=$'\n'
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TESTS_DIR="$( cd "${THIS_DIR}/.." && pwd )"
BIN_DIR="${TESTS_DIR}/bin"
NANO_PKG="github.com/pasztorpisti/nano"
NANO_DIR="$( cd "${TESTS_DIR}/../../../.." && pwd )"
SERVERS_DIR="$( cd "${TESTS_DIR}/.." && pwd )"
SERVERS_PKG="${NANO_PKG}/examples/example1/servers"
# The name of the docker network to create. We create a separate docker
# network because the embedded docker DNS works only with custom networks.
# The default docker bridge works in legacy mode without DNS.
NETWORK="nano"
# Docker image to use to run the static linked server executables.
IMAGE_NAME="alpine:3.4"
# Docker image to use to build the server executables.
GOLANG_BUILD_IMAGE="golang:1.7.4"
# Every docker container we create will have this prefix. This makes mass
# deletion of our containers easier by filter for this.
CONTAINER_NAME_PREFIX="nano_test_"
# List of package names to build under the nano/examples/example1/servers directory.
ALL=(
server1
server2a
server2b
server3a
server3b
server3c
server3d
test_client
)
|
pasztorpisti/nano
|
examples/example1/servers/tests/helpers/env.sh
|
Shell
|
mit
| 1,099 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2985-2
#
# Security announcement date: 2016-05-26 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:27 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libc-bin:2.15-0ubuntu10.15
# - libc6-dev:2.15-0ubuntu10.15
# - libc6:2.15-0ubuntu10.15
#
# Last versions recommanded by security team:
# - libc-bin:2.15-0ubuntu10.15
# - libc6-dev:2.15-0ubuntu10.15
# - libc6:2.15-0ubuntu10.15
#
# CVE List:
# - CVE-2014-9761
# - CVE-2013-2207
# - CVE-2016-2856
# - CVE-2014-8121
# - CVE-2015-1781
# - CVE-2015-5277
# - CVE-2015-8776
# - CVE-2015-8777
# - CVE-2015-8778
# - CVE-2015-8779
# - CVE-2016-3075
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libc-bin=2.15-0ubuntu10.15 -y
sudo apt-get install --only-upgrade libc6-dev=2.15-0ubuntu10.15 -y
sudo apt-get install --only-upgrade libc6=2.15-0ubuntu10.15 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2016/USN-2985-2.sh
|
Shell
|
mit
| 1,081 |
#!/bin/bash
set -e
sudo rm /etc/apt/sources.list.d/mongodb*.list
sudo rm /etc/apt/sources.list.d/docker.list
sudo apt-get install hhvm && rm -rf /home/travis/.kiex/
sudo apt-get purge -y mysql-common mysql-server mysql-client
source ~/.nvm/nvm.sh
nvm install v8.10.0
pip install python-coveralls
wget https://raw.githubusercontent.com/frappe/bench/master/playbooks/install.py
sudo python install.py --develop --user travis --without-bench-setup
sudo pip install -e ~/bench
rm $TRAVIS_BUILD_DIR/.git/shallow
cd ~/ && bench init frappe-bench --python $(which python) --frappe-path $TRAVIS_BUILD_DIR
cp -r $TRAVIS_BUILD_DIR/test_sites/test_site ~/frappe-bench/sites/
cp -r $TRAVIS_BUILD_DIR/test_sites/test_site_postgres ~/frappe-bench/sites/
|
RicardoJohann/frappe
|
.travis/install.sh
|
Shell
|
mit
| 745 |
export PATH="$HOME/.plenv/shims:$HOME/.plenv/bin:$PATH"
export PLENV_SHELL=zsh
source "$HOME/.plenv/completions/plenv.zsh"
|
andrewalker/dotfiles
|
plenv/path.zsh
|
Shell
|
mit
| 123 |
#!/bin/bash
read -p "Insert fine or directory name:" VAR
if [ -d "$VAR" ]
then
echo "${VAR} is a directory"
elif [ -f "$VAR" ]
then
echo "${VAR} is a file"
else
echo "${VAR} is something else"
fi
ls -l $VAR
|
riccardotommasini/shell-scripting-course
|
section-two/exe6.sh
|
Shell
|
mit
| 219 |
archweb_get_pkgbase() {
local pkgbase
pkgbase=$(curl -LGs 'https://archlinux.org/packages/search/json/' --data-urlencode "q=$1" |
jq -r --arg pkgname "$1" 'limit(1; .results[] | select(.pkgname == $pkgname).pkgbase)')
[[ $pkgbase ]] || return
printf '%s\n' "$pkgbase"
}
|
falconindy/asp
|
archweb.inc.sh
|
Shell
|
mit
| 286 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:1551
#
# Security announcement date: 2016-08-03 08:14:20 UTC
# Script generation date: 2017-01-25 21:23:48 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:45.3.0-1.el6_8
# - firefox-debuginfo.i686:45.3.0-1.el6_8
# - firefox.x86_64:45.3.0-1.el6_8
# - firefox-debuginfo.x86_64:45.3.0-1.el6_8
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6_8
# - firefox-debuginfo.i686:45.7.0-1.el6_8
# - firefox.x86_64:45.7.0-1.el6_8
# - firefox-debuginfo.x86_64:45.7.0-1.el6_8
#
# CVE List:
# - CVE-2016-2830
# - CVE-2016-2836
# - CVE-2016-2837
# - CVE-2016-2838
# - CVE-2016-5252
# - CVE-2016-5254
# - CVE-2016-5258
# - CVE-2016-5259
# - CVE-2016-5262
# - CVE-2016-5263
# - CVE-2016-5264
# - CVE-2016-5265
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install firefox-debuginfo.i686-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install firefox-debuginfo.x86_64-45.7.0 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2016/RHSA-2016:1551.sh
|
Shell
|
mit
| 1,218 |
#!/usr/bin/env bash
rm -rf bin
mkdir bin
# android
wget --no-check-certificate -P bin/release/android/armeabi-v7a https://s3.amazonaws.com/jlibtorrent1/release/android/armeabi-v7a/libjlibtorrent.so
wget --no-check-certificate -P bin/release/android/arm64-v8a https://s3.amazonaws.com/jlibtorrent1/release/android/arm64-v8a/libjlibtorrent.so
wget --no-check-certificate -P bin/release/android/x86 https://s3.amazonaws.com/jlibtorrent1/release/android/x86/libjlibtorrent.so
wget --no-check-certificate -P bin/release/android/x86_64 https://s3.amazonaws.com/jlibtorrent1/release/android/x86_64/libjlibtorrent.so
# linux
wget --no-check-certificate -P bin/release/linux/x86 https://s3.amazonaws.com/jlibtorrent1/release/linux/x86/libjlibtorrent.so
wget --no-check-certificate -P bin/release/linux/x86_64 https://s3.amazonaws.com/jlibtorrent1/release/linux/x86_64/libjlibtorrent.so
# windows
wget --no-check-certificate -P bin/release/windows/x86 https://s3.amazonaws.com/jlibtorrent1/release/windows/x86/jlibtorrent.dll
wget --no-check-certificate -P bin/release/windows/x86_64 https://s3.amazonaws.com/jlibtorrent1/release/windows/x86_64/jlibtorrent.dll
# macos
wget --no-check-certificate -P bin/release/macosx/x86_64 https://s3.amazonaws.com/jlibtorrent1/release/macosx/x86_64/libjlibtorrent.dylib
|
aldenml/frostwire-jlibtorrent
|
swig/get-binaries.sh
|
Shell
|
mit
| 1,302 |
#!/bin/bash
# URL list generator for YAMA
# Requirements: apt-get install pwgen
MYNAME=`basename $0`
if [ $# -ne 3 ]; then
echo "usage: ${MYNAME} <output file> <format> <opeartion>"
echo "<output file>: output filename to write URL list"
echo "<format>: csv or http"
echo "<operation>: GET operation to generate in URL: info, del, add or random"
exit 1
else
FILENAME=`basename ${1}`
FORMAT=${2}
GET_OP=${3}
fi
while true; do
FIRST=`pwgen 5 1`
DOMAIN=`pwgen $((RANDOM%20)) 1`
PORTS=(80 443 8080)
I_PORT=$(($RANDOM%3))
PATH_RAND=`pwgen $((RANDOM%200+10)) 1`
QUERY_RAND=`pwgen $((RANDOM%60+10)) 1`
if [ "${FORMAT}" == "http" ]; then
if [ "${GET_OP}" == "random" ]; then
OPERATION=$(($RANDOM%3))
if [ ${OPERATION} == "0" ]; then
OP="urlinfo"
OP2="1"
elif [ ${OPERATION} == "1" ]; then
OP="urlupdate"
OP2="add"
elif [ ${OPERATION} == "2" ]; then
OP="urlupdate"
OP2="del"
fi
elif [ "${GET_OP}" == "info" ]; then
OP="urlinfo"
OP2="1"
elif [ "${GET_OP}" == "add" ]; then
OP="urlupdate"
OP2="add"
elif [ "${GET_OP}" == "del" ]; then
OP="urlupdate"
OP2="del"
fi
LINE="http://localhost/${OP}/${OP2}/${FIRST}${DOMAIN}:${PORTS[${I_PORT}]}/path/to/${PATH_RAND}/${QUERY_RAND}"
# Format == csv
else
LINE="${FIRST}${DOMAIN},${PORT},/path/to/something/index.html"
fi
echo "${LINE}" >> "${FILENAME}"
done
|
vitovitolo/yama
|
utils/url_file_generator.sh
|
Shell
|
mit
| 1,381 |
#!/bin/bash
# If executing
# ./7.exercise.11.ch04.04.test.bash | grep -v LIE | cut -f2 | sort -u | wc -l
# returns 100, we know each number from 1 to 100 is reachable on 8 answers
for i in {y,n}{y,n}{y,n}{y,n}{y,n}{y,n}{y,n}{y,n}; do
result=$(echo $i | ./a.out 2>&1 | tail -n2 | grep -o '[0-9]*\|LIE' | paste - -)
echo -e "$i\t$result"
done
|
0p3r4t4/PPPUCPP2nd
|
ch07/7.exercise.11.ch04.04.test.bash
|
Shell
|
mit
| 354 |
docker run -t -i jmarin/supervisor
|
jmarin/devops
|
supervisor/run.sh
|
Shell
|
mit
| 35 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.