code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash -xe
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test_hook function in devstack gate.
function generate_testr_results {
if [ -f .testrepository/0 ]; then
sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit
sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit
sudo .tox/functional/bin/python /usr/local/jenkins/slave_scripts/subunit2html.py $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
sudo gzip -9 $BASE/logs/testrepository.subunit
sudo gzip -9 $BASE/logs/testr_results.html
sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
fi
}
export NOVACLIENT_DIR="$BASE/new/python-novaclient"
sudo chown -R jenkins:stack $NOVACLIENT_DIR
# Get admin credentials
cd $BASE/new/devstack
source openrc admin admin
# pass the appropriate variables via a config file
CREDS_FILE=$NOVACLIENT_DIR/functional_creds.conf
cat <<EOF > $CREDS_FILE
# Credentials for functional testing
[auth]
uri = $OS_AUTH_URL
[admin]
user = $OS_USERNAME
tenant = $OS_TENANT_NAME
pass = $OS_PASSWORD
EOF
# Go to the novaclient dir
cd $NOVACLIENT_DIR
# Run tests
echo "Running novaclient functional test suite"
set +e
# Preserve env for OS_ credentials
sudo -E -H -u jenkins tox -efunctional
EXIT_CODE=$?
set -e
# Collect and parse result
generate_testr_results
exit $EXIT_CODE
|
CCI-MOC/python-novaclient
|
novaclient/tests/functional/hooks/post_test_hook.sh
|
Shell
|
apache-2.0
| 2,059 |
if [ "$1" == "json" ]; then
json="json=1"
shift
elif [ "$1" == "csv" ]; then
json="csv=1"
shift
else
json="bash=1"
fi
if [ "$1" == "script" ]; then
fail="-f"
shift
else
fail=""
fi
curl ${fail} -k -s --data-urlencode "sync=sync" --data-urlencode "username=${USER}" --data-urlencode ${json} --data-urlencode "client=bash" --data-urlencode "q=$1" ${SERVER_URL}
|
indeedeng/iql
|
src/main/resources/iql.sh
|
Shell
|
apache-2.0
| 390 |
#!/bin/bash
# Configure kubectl to work with this vagrant from the host machine
# run "kubctl get nodes" after to test that its working
kubectl config set-cluster vagrant --server=https://172.17.4.99:443 --certificate-authority=${PWD}/ssl/ca.pem
kubectl config set-credentials vagrant-admin --certificate-authority=${PWD}/ssl/ca.pem --client-key=${PWD}/ssl/admin-key.pem --client-certificate=${PWD}/ssl/admin.pem
kubectl config set-context vagrant --cluster=vagrant --user=vagrant-admin
kubectl config use-context vagrant
|
DubFriend/coreos-kubernetes
|
single-node/kubectl.config.sh
|
Shell
|
apache-2.0
| 525 |
#!/system/bin/sh
# enable fm speaker, with volume 73
amixer -Dcodec cset numid=12, 7405641
|
baidurom/devices-Coolpad8720L
|
vendor/system/bin/open_aud_path_fm_speaker.sh
|
Shell
|
apache-2.0
| 93 |
cp ../Font-Awesome/src/assets/js/jquery-1.10.2.min.map output/js/
|
KansasLinuxFest/website
|
installmap.sh
|
Shell
|
apache-2.0
| 66 |
#!/usr/bin/env bash
if [ "`grep server /etc/puppet/puppet.conf`" ]; then
chkconfig puppetagent on
service puppetagent restart
# allow puppet to retreive certificate from server
tries=0
while [ $tries -lt 10 ]; do
x=$(ls -l /var/lib/puppet/ssl/certs | grep `hostname` | cut -d ' ' -f 3)
if [ "$x" == "puppet" ]; then break; fi;
tries=$(($tries + 1))
echo "Waiting to obtain certificate from Puppet master"
sleep 3
done
fi
if [ -f /etc/lsb-release ] && (egrep -q 'DISTRIB_RELEASE.*16.04' /etc/lsb-release); then
#setup script for contrail-control package under systemd
for svc in control control-nodemgr dns named; do
chkconfig contrail-$svc on
service contrail-$svc restart
done
else
#setup script for contrail-control package under supervisord
chkconfig supervisor-control on
service supervisor-control restart
fi
|
Juniper/contrail-provisioning
|
contrail_provisioning/control/scripts/control-server-setup.sh
|
Shell
|
apache-2.0
| 929 |
#!/usr/bin/env bash
if [ ! -d "src-java" ]; then
echo "ERROR: The script must be run from the project directory"
exit 1
fi
if [ -z $(docker images -q kilda/server42dpdk-protobuf:latest) ]; then
cd src-cpp/server42/
docker build -t kilda/server42dpdk-protobuf:latest . -f Dockerfile.protobuf
cd -
fi
if [ ! -f "src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/Control.java" ]; then
docker run -it --rm \
--user $(id -u):$(id -g) \
-v $(pwd)/src-java/server42/server42-control-messaging/src/main/java:/src-java/server42/server42-control-messaging/src/main/java \
-v $(pwd)/src-cpp/server42/src:/src-cpp/server42/src \
kilda/server42dpdk-protobuf:latest \
protoc --java_out src-java/server42/server42-control-messaging/src/main/java --proto_path src-cpp/server42/src \
src-cpp/server42/src/control.proto
fi
if [ ! -f "src-java/server42/server42-stats-messaging/src/main/java/org/openkilda/server42/stats/messaging/flowrtt/Statistics.java" ]; then
docker run -it --rm \
--user $(id -u):$(id -g) \
-v $(pwd)/src-java/server42/server42-stats-messaging/src/main/java:/src-java/server42/server42-stats-messaging/src/main/java \
-v $(pwd)/src-cpp/server42/src:/src-cpp/server42/src \
kilda/server42dpdk-protobuf:latest \
protoc --java_out src-java/server42/server42-stats-messaging/src/main/java --proto_path src-cpp/server42/src \
src-cpp/server42/src/statistics.proto
fi
|
telstra/open-kilda
|
src-cpp/server42/generate_java_protobuf.sh
|
Shell
|
apache-2.0
| 1,466 |
#!/bin/sh
docker pull alpine:latest
docker build -t peez/openhab:local .
|
peez80/docker-openhab
|
build-openhab.sh
|
Shell
|
apache-2.0
| 73 |
#!/bin/bash
IMAGE="shanegibbs/snowy-dev"
|
shanegibbs/snowy-lang
|
scripts/docker-common.sh
|
Shell
|
apache-2.0
| 42 |
#!/bin/sh
: ${cycle_delay_secs:=${1:-30}}
while true; do
sleep ${cycle_delay_secs}
printf '\n***** Kubernetes pod state at %s *****\n%s\n\n' \
"$(date +'%H:%M:%S')" \
"$(kubectl get --all-namespaces pods 2>&1 | grep -v ^kube-system)"
done
|
twosigma/waiter
|
waiter/bin/ci/monitor-pods.sh
|
Shell
|
apache-2.0
| 265 |
#!/bin/sh
python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}'
|
openstack/networking-odl
|
networking_odl/cmd/test_setup_hostconfigs.sh
|
Shell
|
apache-2.0
| 310 |
#!/usr/bin/env bash
# desc: sync tables, install dependencies, collect static files.
# author: Elvin Zeng
# date: 2017-5-29
cd $(cd $(dirname $0) && pwd -P)
cd ../../
./manage.py migrate
pip install -r requirements.txt
python manage.py collectstatic -c -l --no-input
python manage.py crontab remove
python manage.py crontab add
|
elvinzeng/wifi-attendance
|
script/shell/init_env.sh
|
Shell
|
apache-2.0
| 329 |
# -----------------------------------------------------------------------------
#
# Package : clone-stats
# Version : 1.0.0
# Source repo : https://github.com/hughsk/clone-stats
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=clone-stats
PACKAGE_VERSION=1.0.0
PACKAGE_URL=https://github.com/hughsk/clone-stats
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
c/clone-stats/clone-stats_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,060 |
# ----------------------------------------------------------------------------
#
# Package : strong-nginx-controller
# Version : 1.0.2
# Source repo : https://github.com/strongloop/strong-nginx-controller.git
# Tested on : ubuntu_16.04
# Script License: Apache License, Version 2 or later
# Maintainer : Atul Sowani <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
# Install dependencies.
sudo apt-get update -y
sudo apt-get install -y build-essential npm wget git
WDIR=`pwd`
# Build and install node.
cd $WDIR
wget https://nodejs.org/dist/v4.2.3/node-v4.2.3.tar.gz
tar -xzf node-v4.2.3.tar.gz
cd node-v4.2.3
./configure
make
sudo make install
# Clone and build source code.
cd $WDIR
git clone https://github.com/strongloop/strong-nginx-controller.git
cd strong-nginx-controller
npm install && npm test
|
ppc64le/build-scripts
|
s/strong-nginx-controller/strong-nginx-controller_ubuntu_16.04.sh
|
Shell
|
apache-2.0
| 1,180 |
#!/bin/bash
sudo cp /etc/kubernetes/admin.conf ~/.kube/config
sudo chown pirate:pirate ~/.kube/config
|
clim/k8s-anywhere
|
raspberrypi/ansible/setup_config.sh
|
Shell
|
apache-2.0
| 103 |
#!/bin/bash
set -euo pipefail
MAVEN_OPTS="-Duser.name=jenkins -Duser.home=/tmp/spring-plugin-maven-repository" ./mvnw -P${PROFILE} clean dependency:list test -Dsort -B
|
spring-projects/spring-plugin
|
ci/test.sh
|
Shell
|
apache-2.0
| 170 |
#!/bin/sh
if [ -n "$CATALINA_HOME" ]
then
echo "CATALINA_HOME is set, removing it ..."
unset CATALINA_HOME
fi
if [ "${SAKAI_KERNEL_PROPERTIES}" = "" ]
then
export SAKAI_KERNEL_PROPERTIES=`pwd`/localkernel.properties
fi
echo "SAKAI_KERNEL_PROPERTIES is set to $SAKAI_KERNEL_PROPERTIES"
# for YourKit export JAVA_OPTS="-server -Dcom.sun.management.jmxremote -Djava.awt.headless=true -agentlib:yjpagent "
target/runtime/bin/catalina.sh $*
|
sakai-mirror/k2
|
bootstrap/start.sh
|
Shell
|
apache-2.0
| 452 |
# ----------------------------------------------------------------------------
#
# Package : MongoDB-driver-core
# Version : 3.8.1/ 4.1.2
# Source repo : https://github.com/mongodb/mongo-java-driver.git
# Tested on : UBI 8.0
# Script License : Apache License 2.0
# Maintainer : Manik Fulpagar <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# ----------------------------------------------------------------------------
# Prerequisites:
#
# MongoDB running on port 27017
#
# Java version 11 or later must be installed.
#
# ----------------------------------------------------------------------------
# variables
PKG_NAME="MongoDB-driver-core"
PKG_VERSION=r3.8.1
PKG_VERSION_LATEST=r4.3.1
REPOSITORY="https://github.com/mongodb/mongo-java-driver.git"
echo "Usage: $0 [r<PKG_VERSION>]"
echo " PKG_VERSION is an optional paramater whose default value is r3.8.1"
PKG_VERSION="${1:-$PKG_VERSION}"
# install tools and dependent packages
yum -y update
yum install -y git wget curl unzip nano vim make dos2unix
# setup java environment
yum install -y java-11 java-devel
which java
ls /usr/lib/jvm/
export JAVA_HOME=/usr/lib/jvm/$(ls /usr/lib/jvm/ | grep -P '^(?=.*java-11)(?=.*ppc64le)')
echo "JAVA_HOME is $JAVA_HOME"
# update the path env. variable
export PATH=$PATH:$JAVA_HOME/bin
# install gradle
GRADLE_VERSION=6.2.2
wget https://services.gradle.org/distributions/gradle-$GRADLE_VERSION-bin.zip
mkdir -p usr/local/gradle
unzip -d /usr/local/gradle gradle-$GRADLE_VERSION-bin.zip
ls usr/local/gradle/gradle-$GRADLE_VERSION/
rm -rf gradle-$GRADLE_VERSION-bin.zip
export GRADLE_HOME=/usr/local/gradle
# update the path env. variable
export PATH=$PATH:$GRADLE_HOME/gradle-$GRADLE_VERSION/bin
# create folder for saving logs
mkdir -p /logs
LOGS_DIRECTORY=/logs
LOCAL_DIRECTORY=/root
# clone, build and test latest version
cd $LOCAL_DIRECTORY
git clone $REPOSITORY $PKG_NAME-$PKG_VERSION
cd $PKG_NAME-$PKG_VERSION/
git checkout $PKG_VERSION
cd driver-core
gradle build | tee $LOGS_DIRECTORY/$PKG_NAME-$PKG_VERSION.txt
#gradle build -x test | tee $LOGS_DIRECTORY/$PKG_NAME-$PKG_VERSION.txt
|
ppc64le/build-scripts
|
m/MongoDB-driver-core/MongoDB-driver-core_UBI_8.0.sh
|
Shell
|
apache-2.0
| 2,583 |
################## SOURCE THIS FILE #################
#
# !/bin/bash
# !/bin/ksh
#
# Copyright (C) 2002 by John P. Weiss
#
# This package is free software; you can redistribute it and/or modify
# it under the terms of the Artistic License, included as the file
# "LICENSE" in the source code archive.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the file "LICENSE", containing
# the License John Weiss originally placed this program under.
#
# $Id$
############
############
#
# Configuration Variables
#
############
############
#
# Includes & Other Global Variables
#
############
#. some.include.sh
#GREP=grep
#SED=sed
#AWK=awk
#LS=ls
############
#
# Functions
#
############
add_path() {
# Set some defaults
do_GetPathvarVal="echo \${${PATHVAR}}"
do_Modify='${PATHVAR}=${pathvarContents}:${dirs_added}'
PATHVAR="PATH"
# Parse function args.
dirs=""
while [ -n "$1" ]; do
case "$1" in
-a|--app*)
do_Modify='${PATHVAR}=${pathvarContents}:${dirs_added}'
;;
-p|--pre*)
do_Modify='${PATHVAR}=${dirs_added}:${pathvarContents}'
;;
-P)
PATHVAR="PATH"
;;
-[mM])
PATHVAR="MANPATH"
;;
-[lL])
PATHVAR="LD_LIBRARY_PATH"
;;
-[cCjJ])
PATHVAR="CLASSPATH"
;;
-v|--var*)
shift
PATHVAR="$1"
;;
-h|--help)
echo "add_path [-a|-p|-P|-m|-l|-c] [-v <envvar>] <dirs...>"
;;
-*)
# Ignore any other options.
;;
*)
dirs="${dirs}:${1%\/}"
;;
esac
shift
done
# Trim leading ':' off of $dirs.
dirs_added="${dirs#:}"
# The "do nothing" case:
if [ -z "${dirs_added}" ]; then
return 1
fi
# Delete any paths that are already present.
delete_path --eval "PATHVAR=\"${PATHVAR}\"; dirs=\"${dirs_added}\";"
# Get the existing contents of the pathvar
pathvarContents=`eval "${do_GetPathvarVal}"`
# Handle the empty PATHVAR case.
if [ -z "${pathvarContents}" ]; then
do_Modify='${PATHVAR}=${dirs_added}'
fi
# Okay, so this is messy. Here's what it's doing:
# - The inner 'eval' expands the contents of $do_Modify. The "echo"
# merely gives the 'eval' a command to execute.
# - The outer 'eval' expands the variables that were embedded inside of
# $do_Modify before executing the modificaiton command.
eval `eval echo ${do_Modify}`
export "${PATHVAR}"
}
delete_path() {
# Set some defaults
do_GetPathvarVal="echo \${${PATHVAR}}"
PATHVAR="PATH"
# Parse function args.
newpath=""
dirs=""
while [ -n "$1" ]; do
case "$1" in
--eval)
shift
eval "$@"
# Eat the commandline
set --
;;
-P)
PATHVAR="PATH"
;;
-[mM])
PATHVAR="MANPATH"
;;
-[lL])
PATHVAR="LD_LIBRARY_PATH"
;;
-[cCjJ])
PATHVAR="CLASSPATH"
;;
-v|--var*)
shift
PATHVAR="$1"
;;
-h|--help)
echo "delete_path [-a|-P|-m|-l|-c] [-v <envvar>] <dirs...>"
;;
-*)
# Ignore any other options.
;;
*)
dirs="${dirs}:${1%\/}"
;;
esac
shift
done
# Trim leading ':' off of $dirs.
dirs="${dirs#:}"
# The "do nothing" case:
if [ -z "${dirs}" ]; then
return 1
fi
# Get the contents of the path as it stands now. Also reset the IFS.
pathvarContents=`eval "${do_GetPathvarVal}"`
oldIFS="${IFS}"
IFS=":"
# Construct the new path. Yes, it's O(n^2)...
for d in ${pathvarContents}; do
keepdir=1
for delD in ${dirs}; do
# Nullify the old path element under examination if it matches one
# of the deletion targets.
if [ "${d}" == "${delD}" ]; then
keepdir=0
break;
fi
done
if [ ${keepdir} -eq 1 ]; then
newpath="${newpath}:${d}"
fi
done
IFS="${oldIFS}"
unset oldIFS pathvarContents
# Trim leading ':' off of $newpath.
newpath="${newpath#:}"
# Trim leading ' ' off of $dirs.
eval "${PATHVAR}=${newpath}"
export "${PATHVAR}"
}
dedup_path() {
# The "do nothing" case:
if [ -z "$1" ]; then
return 1
fi
# Set some defaults
do_GetPathvarVal="echo \${${PATHVAR}}"
PATHVAR="PATH"
# Parse function args.
case "$1" in
-P)
PATHVAR="PATH"
;;
-[mM])
PATHVAR="MANPATH"
;;
-[lL])
PATHVAR="LD_LIBRARY_PATH"
;;
-[cCjJ])
PATHVAR="CLASSPATH"
;;
*)
PATHVAR="$1"
;;
esac
# Get the contents of the path as it stands now. Quit if there's nothing
# there.
pathvarContents=`eval "${do_GetPathvarVal}"`
if [ -z "${pathvarContents}" ]; then
return 0
fi
# Reset the IFS and set the positional parameters to the reverse-order of
# $PATHVAR
oldIFS="${IFS}"
IFS=":"
newpath=""
for d in ${pathvarContents}; do
newpath="${d}:${newpath}"
done
set -- ${newpath%\:}
newpath=""
# Construct the new path, eliminating duplicates. Yes, it's O(n^2)...
while [ -n "$1" ]; do
d="$1"
keepdir=1
shift
remainingDirs="$*"
for delD in ${remainingDirs}; do
# Nullify the present path element if it matches something else in
# the path. [Remember, ${remainingDirs} is in the reverse-order
# of the PATHVAR.]
if [ "${d}" == "${delD}" ]; then
keepdir=0
break;
fi
done
if [ ${keepdir} -eq 1 ]; then
newpath="${d}:${newpath}"
fi
done
IFS="${oldIFS}"
unset oldIFS pathvarContents
# Trim trailing ':' off of $newpath.
newpath="${newpath%:}"
# Trim leading ' ' off of $dirs.
eval "${PATHVAR}=${newpath}"
export "${PATHVAR}"
}
############
#
# Main
#
############
#################
#
# End
|
jpweiss/tools
|
sh.scripts/pathtools.sh
|
Shell
|
artistic-2.0
| 6,876 |
#!/bin/bash
#
# measures the runtime for a list of commits. will create REPO/results directory with a textfile for each commit and
# input-file.
#
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
COMMITS=$(cat $DIR/old_measurements.txt | grep '^commit' | sed -e 's/commit //')
BUILD_DIR=$DIR/../../build/
REPO_BASE=$DIR/../../
# result and testfiles need to be copied somewhere else. going back in git history will remove them from $DIR/testfiles
RESULT_DIR=$REPO_BASE/results/
TEST_FILES=$REPO_BASE/input_files/
rm -rf $RESULT_DIR
rm -rf $TEST_FILES
mkdir -p $RESULT_DIR
mkdir -p $TEST_FILES
cp $DIR/testfiles/* $TEST_FILES
for c in $COMMITS; do
# empty the build dir
rm -rf $BUILD_DIR
mkdir $BUILD_DIR
cd $BUILD_DIR
git checkout $c
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_PERF=TRUE -DWITH_RECOMMENDER_BOOST=TRUE ..
make -j8
cd examples/
cp $TEST_FILES/* .
./example_recommendation -dataset 32MoviesUID1 -numMeasurements 7 -cpufreq 2300000000 > $RESULT_DIR/${c}_32MoviesUID1.txt
./example_recommendation -dataset 512MoviesUID1 -numMeasurements 7 -cpufreq 2300000000 > $RESULT_DIR/${c}_512MoviesUID1.txt
./example_recommendation -dataset ml100kfull -numMeasurements 7 -cpufreq 2300000000 > $RESULT_DIR/${c}_ml100kfull.txt
done
|
flurischt/libDAI
|
scripts/runtime/measure_runtime.sh
|
Shell
|
bsd-2-clause
| 1,293 |
#!/bin/bash
scripts/benchmark.sh xilinx_serial "DATA_BITS={16,64,256,1024,4096},T={2,4,8,12}"
|
russdill/bch_verilog
|
benchmark/xilinx_serial.sh
|
Shell
|
bsd-2-clause
| 96 |
#!/bin/bash
# Use getopt instead of getopts for long options
set -e
OPTS=`getopt -o o: --long output-dir:,bam-string:,bam-out:,out-script:,standalone -n 'MergeTN.sh' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
#echo "$OPTS"
eval set -- "$OPTS"
MYDIR="$( cd "$( dirname "$0" )" && pwd )"
timestamp=$( date +"%Y-%m-%d_%H-%M-%S_%N" )
keep_intermediates=0
while true; do
case "$1" in
-o | --output-dir )
case "$2" in
"") shift 2 ;;
*) outdir=$2 ; shift 2 ;;
esac ;;
--bam-out )
case "$2" in
"") shift 2 ;;
*) outbam=$2 ; shift 2 ;;
esac ;;
--bam-string )
case "$2" in
"") shift 2 ;;
*) bam_string=$2 ; shift 2 ;;
esac ;;
--out-script )
case "$2" in
"") shift 2 ;;
*) out_script_name=$2 ; shift 2 ;;
esac ;;
--standalone )
standalone=1 ; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
logdir=${outdir}/logs
mkdir -p ${logdir}
if [[ ${out_script_name} ]]
then
out_script="${out_script_name}"
else
out_script="${logdir}/mergeBams.${timestamp}.cmd"
fi
if [[ $standalone ]]
then
echo "#!/bin/bash" > $out_script
echo "" >> $out_script
echo "#$ -o ${logdir}" >> $out_script
echo "#$ -e ${logdir}" >> $out_script
echo "#$ -S /bin/bash" >> $out_script
echo '#$ -l h_vmem=8G' >> $out_script
echo 'set -e' >> $out_script
fi
echo "" >> $out_script
for file in ${bam_string}
do
input_file_string="I=/mnt/${file} ${input_file_string}"
done
# Merge the BAM files
echo "singularity exec --bind /:/mnt docker://lethalfang/bamsurgeon:1.1-3 \\" >> $out_script
echo "java -Xmx8g -jar /usr/local/bin/picard.jar MergeSamFiles \\" >> $out_script
echo "${input_file_string} \\" >> $out_script
echo "ASSUME_SORTED=true \\" >> $out_script
echo "CREATE_INDEX=true \\" >> $out_script
echo "O=/mnt/${outdir}/${outbam}" >> $out_script
echo "" >> $out_script
# Remove temp files
echo "mv ${outdir}/${outbam%.bam}.bai ${outdir}/${outbam}.bai" >> $out_script
echo "" >> $out_script
|
bioinform/somaticseq
|
somaticseq/utilities/singularities/bamSimulator/bamSurgeon/mergeBamFiles.sh
|
Shell
|
bsd-2-clause
| 2,272 |
#!/bin/sh
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update
# to avoid system message about upgrades
# sudo apt-get upgrade -y
# gnome
sudo apt-get install -y gnome-core gnome-screenshot
# test dependencies
# sudo add-apt-repository --yes ppa:jan-simon/pqiv
# sudo apt-get update
sudo apt-get install -y x11-utils xvfb python3-pip
sudo python3 -m pip install tox
# tools
sudo apt-get install -y mc htop
# autologin
echo '
[daemon]
AutomaticLoginEnable = true
AutomaticLogin = vagrant
' > /etc/gdm3/daemon.conf
# Disable Lock Screen and Screen Saver Locking
sudo -H -u vagrant bash -c 'dbus-launch gsettings set org.gnome.desktop.session idle-delay 0'
# disable notifications
sudo -H -u vagrant bash -c 'dbus-launch gsettings set org.gnome.desktop.notifications show-banners false'
# autostart terminal
sudo -H -u vagrant bash -c 'mkdir -p /home/vagrant/.config/autostart/ && cp /usr/share/applications/org.gnome.Terminal.desktop /home/vagrant/.config/autostart/'
|
ponty/pyscreenshot
|
tests/vagrant/debian10.gnome.wayland.sh
|
Shell
|
bsd-2-clause
| 984 |
echo 'rsync tldp ing...'
/home/luyi/bin/rsync-tldp.sh
echo 'done rsync tldp'
echo '======================================='
echo 'tldp:' `date +'%F %T'` >> /home/luyi/code/rsync-logs/logs
echo '======================================='
echo 'rsync sagemath ing...'
/home/luyi/bin/rsync-sagemath.sh
echo 'done rsync sagemath'
echo '======================================='
echo 'sagemath:' `date +'%F %T'` >> /home/luyi/code/rsync-logs/logs
echo '======================================='
#echo 'rsync apache ing...'
#/home/luyi/bin/rsync-apache.sh
#echo 'done rsync apache'
#echo '======================================='
#echo 'apache:' `date +'%F %T'` >> /home/luyi/code/rsync-logs/logs
#echo '======================================='
echo 'rsync pypi ing...'
/home/luyi/bin/rsync-pypi.sh
echo 'done rsync pypi'
echo '======================================='
echo 'pypi:' `date +'%F %T'` >> /home/luyi/code/rsync-logs/logs
echo '======================================='
|
eccstartup/qpalzm
|
bin/rsync-all2.sh
|
Shell
|
bsd-3-clause
| 973 |
#!/bin/bash
#$PYTHON setup.py install --prefix=$PREFIX
mkdir -p $PREFIX/lib/python2.7/site-packages
cp python2/pyinotify.py $PREFIX/lib/python2.7/site-packages
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
|
hajs/pylodger
|
recipes/misc/pyinotify/build.sh
|
Shell
|
bsd-3-clause
| 343 |
rm -rf master branch1 branch2 gitrepo
darcs init --repo master
cd master
echo a > a && darcs add a
echo 'before merges' > b && darcs add b
darcs rec -am 'Initial state'
cd ..
darcs get master branch1
darcs get master branch2
cd branch1
echo branch1 > b && darcs rec -am 'Add b branch1'
darcs tag -m 'darcs-fastconvert merge pre-source: foo_merge_id'
cd ../branch2
echo branch2 > b && darcs rec -am 'Add b branch2'
darcs tag -m 'darcs-fastconvert merge pre-source: foo_merge_id'
cd ../master
echo master > b && darcs rec -am 'Add b master'
darcs tag -m 'darcs-fastconvert merge pre-target: foo_merge_id'
darcs pull -a ../branch1
darcs pull -a ../branch2
darcs rev -a
echo 'master resolution' > b && darcs rec -am 'Resolve b conflict in master'
darcs tag -m 'darcs-fastconvert merge post: foo_merge_id'
cd ..
git init gitrepo
darcs-fastconvert export master branch1 branch2 | (cd gitrepo && git fast-import)
cd gitrepo
git reset --hard
[[ $(head b) == 'master resolution' ]]
git checkout branch1
[[ $(head b) == 'branch1' ]]
git checkout branch2
[[ $(head b) == 'branch2' ]]
git checkout master~1
[[ $(head b) == 'master' ]]
git checkout master~2
[[ $(head b) == 'before merges' ]]
|
nh2/darcs-fastconvert
|
tests/darcs-export-conflicting-merges.sh
|
Shell
|
bsd-3-clause
| 1,207 |
#!/bin/bash
#
# Copyright 2021 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
git clone https://code.videolan.org/videolan/dav1d.git
cd dav1d
mkdir linux-x64
cd linux-x64
meson .. --default-library=static
ninja
|
youtube/cobalt
|
third_party/libdav1d/platforms/linux-x64/libdav1d-linux-x64.sh
|
Shell
|
bsd-3-clause
| 754 |
#!/bin/bash
# Derived from https://github.com/heroku/stack-images/blob/master/bin/cedar-14.sh
echo 'deb http://archive.ubuntu.com/ubuntu trusty main restricted' >/etc/apt/sources.list
echo 'deb http://archive.ubuntu.com/ubuntu trusty-updates main restricted' >>/etc/apt/sources.list
echo 'deb http://archive.ubuntu.com/ubuntu trusty universe' >>/etc/apt/sources.list
echo 'deb http://archive.ubuntu.com/ubuntu trusty-updates universe' >>/etc/apt/sources.list
echo 'deb http://archive.ubuntu.com/ubuntu trusty-security main restricted' >>/etc/apt/sources.list
echo 'deb http://archive.ubuntu.com/ubuntu trusty-security universe' >>/etc/apt/sources.list
apt-get update
apt-get dist-upgrade -y
apt-get install -y --force-yes \
autoconf \
bind9-host \
bison \
build-essential \
coreutils \
curl \
daemontools \
dnsutils \
ed \
git \
imagemagick \
iputils-tracepath \
language-pack-en \
libbz2-dev \
libcurl4-openssl-dev \
libevent-dev \
libglib2.0-dev \
libjpeg-dev \
libmagickwand-dev \
libmysqlclient-dev \
libncurses5-dev \
libpq-dev \
libpq5 \
libreadline6-dev \
libssl-dev \
libxml2-dev \
libxslt-dev \
netcat-openbsd \
openjdk-7-jdk \
openjdk-7-jre-headless \
openssh-client \
openssh-server \
postgresql-server-dev-9.3 \
python \
python-dev \
ruby \
ruby-dev \
socat \
stunnel \
syslinux \
tar \
telnet \
zip \
zlib1g-dev \
pigz
# Install locales
apt-cache search language-pack \
| cut -d ' ' -f 1 \
| grep -v '^language\-pack\-\(gnome\|kde\)\-' \
| grep -v '\-base$' \
| xargs apt-get install -y --force-yes --no-install-recommends
# Workaround for CVE-2016–3714 until new ImageMagick packages come out.
echo '<policymap> <policy domain="coder" rights="none" pattern="EPHEMERAL" /> <policy domain="coder" rights="none" pattern="URL" /> <policy domain="coder" rights="none" pattern="HTTPS" /> <policy domain="coder" rights="none" pattern="MVG" /> <policy domain="coder" rights="none" pattern="MSL" /> <policy domain="coder" rights="none" pattern="TEXT" /> <policy domain="coder" rights="none" pattern="SHOW" /> <policy domain="coder" rights="none" pattern="WIN" /> <policy domain="coder" rights="none" pattern="PLT" /> </policymap>' > /etc/ImageMagick/policy.xml
rm -rf /var/cache/apt/archives/*.deb
rm -rf /root/*
rm -rf /tmp/*
rm /etc/ssh/ssh_host_*
|
philiplb/flynn
|
util/cedarish/img/build.sh
|
Shell
|
bsd-3-clause
| 2,362 |
#!/bin/sh
DCS_HOST=http://localhost:8080/dcs/rest
# HTTP POST multipart
curl $DCS_HOST -# \
-F "dcs.c2stream=@../shared/data-mining.xml" \
-o clusters-from-local-file-multipart.xml
# HTTP POST www-form-urlencoded (less efficient)
curl $DCS_HOST -# \
--data-urlencode "dcs.c2stream@../shared/data-mining.xml" \
-o clusters-from-local-file-formencoded.xml
echo Results saved to clusters-from-local-file-*
|
MjAbuz/carrot2
|
applications/carrot2-dcs/examples/curl/clustering-from-local-file.sh
|
Shell
|
bsd-3-clause
| 426 |
#!/bin/bash
export PATH=/opt/centos/devtoolset-1.0/root/usr/bin:$PATH
# http://qt-project.org/doc/qt-4.8/configure-options.html
chmod +x configure
./configure \
-dbus -svg \
-qt-libjpeg \
-release -fontconfig -verbose \
-no-qt3support -nomake examples -nomake demos \
-qt-libpng -qt-zlib \
-webkit \
-prefix $PREFIX
make
make install
cp $SRC_DIR/bin/* $PREFIX/bin/
cd $PREFIX
#rm -rf doc imports phrasebooks plugins q3porting.xml translations
rm -rf doc imports phrasebooks q3porting.xml translations
cd $PREFIX/bin
#mv ../plugins/* bin
#rmdir ../plugins
rm -f *.bat *.pl qt3to4 qdoc3
echo "[Paths]
prefix=../
#headers=../include/
#plugins=../plugins/
#translations=../translations/
#mkspecs=../mkspecs/
#libraries=../libs/
#[SDK]
#fixupRelativePrefixDefault=1
" > $PREFIX/bin/qt.conf
echo "unset QT_PLUGIN_PATH" > $PREFIX/bin/set_env.disable_kde
chmod +x $PREFIX/bin/set_env.disable_kde
echo "
[qt]
style=QtCurve
" > $PREFIX/Trolltech.conf
|
hajs/pylodger
|
recipes/gui/qt/build.sh
|
Shell
|
bsd-3-clause
| 1,007 |
#! /bin/sh
git clone https://github.com/http2jp/hpack-test-case.git
git clone https://github.com/http2jp/http2-frame-test-case.git
|
kazu-yamamoto/http2
|
test-misc/prepare.sh
|
Shell
|
bsd-3-clause
| 132 |
#!/usr/bin/env bash
#
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
# Make sure default datadir does not exist and is never read by creating a dummy file
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
echo > $HOME/Library/Application\ Support/Bitcoin
else
DOCKER_EXEC echo \> \$HOME/.bitcoin
fi
DOCKER_EXEC mkdir -p ${DEPENDS_DIR}/SDKs ${DEPENDS_DIR}/sdk-sources
if [ -n "$OSX_SDK" ] && [ ! -f ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then
curl --location --fail $SDK_URL/MacOSX${OSX_SDK}.sdk.tar.gz -o ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz
fi
if [ -n "$OSX_SDK" ] && [ -f ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz ]; then
DOCKER_EXEC tar -C ${DEPENDS_DIR}/SDKs -xf ${DEPENDS_DIR}/sdk-sources/MacOSX${OSX_SDK}.sdk.tar.gz
fi
if [[ $HOST = *-mingw32 ]]; then
DOCKER_EXEC update-alternatives --set $HOST-g++ \$\(which $HOST-g++-posix\)
fi
if [ -z "$NO_DEPENDS" ]; then
if [[ $DOCKER_NAME_TAG == centos* ]]; then
# CentOS has problems building the depends if the config shell is not explicitely set
# (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to
# an error as the first command is executed)
SHELL_OPTS="CONFIG_SHELL=/bin/bash"
else
SHELL_OPTS="CONFIG_SHELL="
fi
DOCKER_EXEC $SHELL_OPTS make $MAKEJOBS -C depends HOST=$HOST $DEP_OPTS
fi
if [ "$TEST_PREVIOUS_RELEASES" = "true" ]; then
BEGIN_FOLD previous-versions
DOCKER_EXEC contrib/devtools/previous_release.sh -b -t "$PREVIOUS_RELEASES_DIR" v0.17.1 v0.18.1 v0.19.0.1
END_FOLD
fi
|
gjhiggins/vcoincore
|
ci/test/05_before_script.sh
|
Shell
|
mit
| 1,722 |
#!/bin/bash
for k in `seq 10000`; do
ls
done
|
shelljs/benchmarks
|
test/ls10k/ls10k.sh
|
Shell
|
mit
| 47 |
#!/bin/bash
source globals
# slaves_fn="slaves.txt"
# mpihosts_fn="mpi_hosts"
# provisions the slave nodes
# if [ $# -ne 4 ] ; then
# echo " Usage: provision_slaves.sh num base_hostname domain keyname"
# exit 1
# fi
currentTime=$(date)
before=$(date +%s)
echo `date` .. starting to provision $num slaves
for j in `seq 1 $NUM_SLAVES`; do
./provision_slave.sh ${SLAVES_BASE_HOSTNAME}${j} &
done
echo `date` provision_slaves provisioning kicked off... now waiting...
wait
after=$(date +%s)
delta="$(expr $after - $before)"
echo `date` provision_slaves... all slaves done in $delta
# the slaves wrote the slaves file. Now, we need to update mpi_hosts
# cp -p $slaves_fn $mpihosts_fn
|
bmwshop/simplecluster
|
provision_slaves.sh
|
Shell
|
mit
| 692 |
#!/usr/bin/env bash
# WARNING: FIREBASE_TOKEN should NOT be printed.
set +x -eu -o pipefail
FIREBASE_PROJECT_ID=aio-staging
DEPLOYED_URL=https://$FIREBASE_PROJECT_ID.firebaseapp.com
cd "`dirname $0`/.."
# Build the app
yarn build
# Deploy to staging
firebase use "$FIREBASE_PROJECT_ID" --token "$FIREBASE_TOKEN"
firebase deploy --message "Commit: $TRAVIS_COMMIT" --non-interactive --token "$FIREBASE_TOKEN"
# Run PWA-score tests
# TODO(gkalpak): Figure out why this fails and re-enable.
sleep 10
yarn test-pwa-score -- "$DEPLOYED_URL" "$MIN_PWA_SCORE_STAGING" || true
cd -
|
chrisse27/angular
|
aio/scripts/deploy-staging.sh
|
Shell
|
mit
| 581 |
#!/bin/sh
##################################################################################
# Custom build tool for Realm Objective C binding.
#
# (C) Copyright 2011-2015 by realm.io.
##################################################################################
# Warning: pipefail is not a POSIX compatible option, but on OS X it works just fine.
# OS X uses a POSIX complain version of bash as /bin/sh, but apparently it does
# not strip away this feature. Also, this will fail if somebody forces the script
# to be run with zsh.
set -o pipefail
set -e
# You can override the version of the core library
: ${REALM_CORE_VERSION:=0.89.4} # set to "current" to always use the current build
# You can override the xcmode used
: ${XCMODE:=xcodebuild} # must be one of: xcodebuild (default), xcpretty, xctool
PATH=/usr/local/bin:/usr/bin:/bin:/usr/libexec:$PATH
if ! [ -z "${JENKINS_HOME}" ]; then
XCPRETTY_PARAMS="--no-utf --report junit --output build/reports/junit.xml"
CODESIGN_PARAMS="CODE_SIGN_IDENTITY= CODE_SIGNING_REQUIRED=NO"
fi
export REALM_SKIP_DEBUGGER_CHECKS=YES
usage() {
cat <<EOF
Usage: sh $0 command [argument]
command:
clean: clean up/remove all generated files
download-core: downloads core library (binary version)
build: builds all iOS and OS X frameworks
ios-static: builds fat iOS static framework
ios-dynamic: builds iOS dynamic frameworks
ios-swift: builds RealmSwift frameworks for iOS
osx: builds OS X framework
osx-swift: builds RealmSwift framework for OS X
test: tests all iOS and OS X frameworks
test-all: tests all iOS and OS X frameworks in both Debug and Release configurations
test-ios-static: tests static iOS framework on 32-bit and 64-bit simulators
test-ios-dynamic: tests dynamic iOS framework on 32-bit and 64-bit simulators
test-ios-swift: tests RealmSwift iOS framework on 32-bit and 64-bit simulators
test-ios-devices: tests dynamic and Swift iOS frameworks on all attached iOS devices
test-osx: tests OS X framework
test-osx-swift: tests RealmSwift OS X framework
verify: verifies docs, osx, osx-swift, ios-static, ios-dynamic, ios-swift, ios-device in both Debug and Release configurations
docs: builds docs in docs/output
examples: builds all examples
examples-ios: builds all static iOS examples
examples-ios-swift: builds all Swift iOS examples
examples-osx: builds all OS X examples
browser: builds the Realm Browser
test-browser: tests the Realm Browser
get-version: get the current version
set-version version: set the version
cocoapods-setup: download realm-core and create a stub RLMPlatform.h file to enable building via CocoaPods
argument:
version: version in the x.y.z format
environment variables:
XCMODE: xcodebuild (default), xcpretty or xctool
CONFIGURATION: Debug or Release (default)
REALM_CORE_VERSION: version in x.y.z format or "current" to use local build
EOF
}
######################################
# Xcode Helpers
######################################
xcode() {
mkdir -p build/DerivedData
CMD="xcodebuild -IDECustomDerivedDataLocation=build/DerivedData $@"
echo "Building with command:" $CMD
eval $CMD
}
xc() {
if [[ "$XCMODE" == "xcodebuild" ]]; then
xcode "$@"
elif [[ "$XCMODE" == "xcpretty" ]]; then
mkdir -p build
xcode "$@" | tee build/build.log | xcpretty -c ${XCPRETTY_PARAMS} || {
echo "The raw xcodebuild output is available in build/build.log"
exit 1
}
elif [[ "$XCMODE" == "xctool" ]]; then
xctool "$@"
fi
}
xcrealm() {
PROJECT=Realm.xcodeproj
xc "-project $PROJECT $@"
}
xcrealmswift() {
PROJECT=RealmSwift.xcodeproj
xc "-project $PROJECT $@"
}
build_combined() {
local scheme="$1"
local module_name="$2"
local scope_suffix="$3"
local config="$CONFIGURATION"
# Derive build paths
local build_products_path="build/DerivedData/$module_name/Build/Products"
local product_name="$module_name.framework"
local binary_path="$module_name"
local iphoneos_path="$build_products_path/$config-iphoneos$scope_suffix/$product_name"
local iphonesimulator_path="$build_products_path/$config-iphonesimulator$scope_suffix/$product_name"
local out_path="build/ios$scope_suffix"
# Build for each platform
cmd=$(echo "xc$module_name" | tr '[:upper:]' '[:lower:]') # lowercase the module name to generate command (xcrealm or xcrealmswift)
$cmd "-scheme '$scheme' -configuration $config -sdk iphoneos"
$cmd "-scheme '$scheme' -configuration $config -sdk iphonesimulator ONLY_ACTIVE_ARCH=NO"
# Combine .swiftmodule
if [ -d $iphonesimulator_path/Modules/$module_name.swiftmodule ]; then
cp $iphonesimulator_path/Modules/$module_name.swiftmodule/* $iphoneos_path/Modules/$module_name.swiftmodule/
fi
# Retrieve build products
clean_retrieve $iphoneos_path $out_path $product_name
# Combine ar archives
xcrun lipo -create "$iphonesimulator_path/$binary_path" "$iphoneos_path/$binary_path" -output "$out_path/$product_name/$module_name"
}
clean_retrieve() {
mkdir -p $2
rm -rf $2/$3
cp -R $1 $2
}
######################################
# Device Test Helper
######################################
test_ios_devices() {
serial_numbers_str=$(system_profiler SPUSBDataType | grep "Serial Number: ")
serial_numbers=()
while read -r line; do
number=${line:15} # Serial number starts at position 15
if [[ ${#number} == 40 ]]; then
serial_numbers+=("$number")
fi
done <<< "$serial_numbers_str"
if [[ ${#serial_numbers[@]} == 0 ]]; then
echo "At least one iOS device must be connected to this computer to run device tests"
if [ -z "${JENKINS_HOME}" ]; then
# Don't fail if running locally and there's no device
exit 0
fi
exit 1
fi
cmd="$1"
configuration="$2"
failed=0
for device in "${serial_numbers[@]}"; do
$cmd "-scheme 'iOS Device Tests' -configuration $configuration -destination 'id=$device' test" || failed=1
done
return $failed
}
######################################
# Input Validation
######################################
if [ "$#" -eq 0 -o "$#" -gt 2 ]; then
usage
exit 1
fi
######################################
# Variables
######################################
download_core() {
echo "Downloading dependency: core ${REALM_CORE_VERSION}"
TMP_DIR="$TMPDIR/core_bin"
mkdir -p "${TMP_DIR}"
CORE_TMP_ZIP="${TMP_DIR}/core-${REALM_CORE_VERSION}.zip.tmp"
CORE_ZIP="${TMP_DIR}/core-${REALM_CORE_VERSION}.zip"
if [ ! -f "${CORE_ZIP}" ]; then
curl -L -s "http://static.realm.io/downloads/core/realm-core-${REALM_CORE_VERSION}.zip" -o "${CORE_TMP_ZIP}"
mv "${CORE_TMP_ZIP}" "${CORE_ZIP}"
fi
(
cd "${TMP_DIR}"
rm -rf core
unzip "${CORE_ZIP}"
mv core core-${REALM_CORE_VERSION}
)
rm -rf core-${REALM_CORE_VERSION} core
mv ${TMP_DIR}/core-${REALM_CORE_VERSION} .
ln -s core-${REALM_CORE_VERSION} core
}
COMMAND="$1"
# Use Debug config if command ends with -debug, otherwise default to Release
case "$COMMAND" in
*-debug)
COMMAND="${COMMAND%-debug}"
CONFIGURATION="Debug"
;;
*) CONFIGURATION=${CONFIGURATION:-Release}
esac
export CONFIGURATION
case "$COMMAND" in
######################################
# Clean
######################################
"clean")
find . -type d -name build -exec rm -r "{}" +\;
exit 0
;;
######################################
# Download Core Library
######################################
"download-core")
if [ "$REALM_CORE_VERSION" = "current" ]; then
echo "Using version of core already in core/ directory"
exit 0
fi
if [ -d core -a -d ../realm-core -a ! -L core ]; then
# Allow newer versions than expected for local builds as testing
# with unreleased versions is one of the reasons to use a local build
if ! $(grep -i "${REALM_CORE_VERSION} Release notes" core/release_notes.txt >/dev/null); then
echo "Local build of core is out of date."
exit 1
else
echo "The core library seems to be up to date."
fi
elif ! [ -L core ]; then
echo "core is not a symlink. Deleting..."
rm -rf core
download_core
elif ! $(head -n 1 core/release_notes.txt | grep -i ${REALM_CORE_VERSION} >/dev/null); then
download_core
else
echo "The core library seems to be up to date."
fi
exit 0
;;
######################################
# Building
######################################
"build")
sh build.sh ios-static
sh build.sh ios-dynamic
sh build.sh ios-swift
sh build.sh osx
sh build.sh osx-swift
exit 0
;;
"ios-static")
build_combined iOS Realm
exit 0
;;
"ios-dynamic")
build_combined "iOS Dynamic" Realm "-dynamic" "LD_DYLIB_INSTALL_NAME='@rpath/RealmSwift.framework/Frameworks/Realm.framework/Realm'"
exit 0
;;
"ios-swift")
build_combined "RealmSwift iOS" RealmSwift
mkdir build/ios/swift
cp -R build/ios/RealmSwift.framework build/ios/swift
exit 0
;;
"osx")
xcrealm "-scheme OSX -configuration $CONFIGURATION"
rm -rf build/osx
mkdir build/osx
cp -R build/DerivedData/Realm/Build/Products/$CONFIGURATION/Realm.framework build/osx
exit 0
;;
"osx-swift")
xcrealmswift "-scheme 'RealmSwift OSX' -configuration $CONFIGURATION build"
rm -rf build/osx
mkdir build/osx
cp -R build/DerivedData/RealmSwift/Build/Products/$CONFIGURATION/RealmSwift.framework build/osx
exit 0
;;
######################################
# Testing
######################################
"test")
set +e # Run both sets of tests even if the first fails
failed=0
sh build.sh test-ios-static || failed=1
sh build.sh test-ios-dynamic || failed=1
sh build.sh test-ios-swift || failed=1
sh build.sh test-ios-devices || failed=1
sh build.sh test-osx || failed=1
sh build.sh test-osx-swift || failed=1
exit $failed
;;
"test-all")
set +e
failed=0
sh build.sh test || failed=1
sh build.sh test-debug || failed=1
exit $failed
;;
"test-ios-static")
xcrealm "-scheme iOS -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 6' test"
xcrealm "-scheme iOS -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 4S' test"
exit 0
;;
"test-ios-dynamic")
xcrealm "-scheme 'iOS Dynamic' -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 6' test"
xcrealm "-scheme 'iOS Dynamic' -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 4S' test"
exit 0
;;
"test-ios-swift")
xcrealmswift "-scheme 'RealmSwift iOS' -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 6' test"
xcrealmswift "-scheme 'RealmSwift iOS' -configuration $CONFIGURATION -sdk iphonesimulator -destination 'name=iPhone 4S' test"
exit 0
;;
"test-ios-devices")
failed=0
test_ios_devices xcrealm "$CONFIGURATION" || failed=1
test_ios_devices xcrealmswift "$CONFIGURATION" || failed=1
exit $failed
;;
"test-osx")
xcrealm "-scheme OSX -configuration $CONFIGURATION test GCC_GENERATE_TEST_COVERAGE_FILES=YES GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES"
exit 0
;;
"test-osx-swift")
xcrealmswift "-scheme 'RealmSwift OSX' -configuration $CONFIGURATION test"
exit 0
;;
######################################
# Full verification
######################################
"verify")
sh build.sh verify-docs
sh build.sh verify-osx
sh build.sh verify-osx-debug
sh build.sh verify-osx-swift
sh build.sh verify-osx-swift-debug
sh build.sh verify-ios-static
sh build.sh verify-ios-static-debug
sh build.sh verify-ios-dynamic
sh build.sh verify-ios-dynamic-debug
sh build.sh verify-ios-swift
sh build.sh verify-ios-swift-debug
sh build.sh verify-ios-device
;;
"verify-osx")
sh build.sh test-osx
sh build.sh test-browser
sh build.sh examples-osx
(
cd examples/osx/objc/build/DerivedData/RealmExamples/Build/Products/$CONFIGURATION
DYLD_FRAMEWORK_PATH=. ./JSONImport >/dev/null
) || exit 1
exit 0
;;
"verify-osx-swift")
sh build.sh test-osx-swift
exit 0
;;
"verify-ios-static")
sh build.sh test-ios-static
sh build.sh examples-ios
;;
"verify-ios-dynamic")
sh build.sh test-ios-dynamic
;;
"verify-ios-swift")
sh build.sh test-ios-swift
sh build.sh examples-ios-swift
;;
"verify-ios-device")
sh build.sh test-ios-devices
exit 0
;;
"verify-docs")
sh scripts/build-docs.sh
if [ -s docs/swift_output/undocumented.txt ]; then
echo "Undocumented RealmSwift declarations"
exit 1
fi
exit 0
;;
# FIXME: remove these targets from ci
"verify-ios")
exit 0
;;
######################################
# Docs
######################################
"docs")
sh scripts/build-docs.sh
exit 0
;;
######################################
# Examples
######################################
"examples")
sh build.sh clean
sh build.sh examples-ios
sh build.sh examples-ios-swift
sh build.sh examples-osx
exit 0
;;
"examples-ios")
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme Simple -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme TableView -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme Migration -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme Backlink -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme GroupedTableView -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme Encryption -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
if [ ! -z "${JENKINS_HOME}" ]; then
xc "-project examples/ios/objc/RealmExamples.xcodeproj -scheme Extension -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
fi
exit 0
;;
"examples-ios-swift")
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme Simple -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme TableView -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme Migration -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme Encryption -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme Backlink -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
xc "-project examples/ios/swift/RealmExamples.xcodeproj -scheme GroupedTableView -configuration $CONFIGURATION build ${CODESIGN_PARAMS}"
exit 0
;;
"examples-osx")
xc "-project examples/osx/objc/RealmExamples.xcodeproj -scheme JSONImport -configuration ${CONFIGURATION} build ${CODESIGN_PARAMS}"
;;
######################################
# Browser
######################################
"browser")
xc "-project tools/RealmBrowser/RealmBrowser.xcodeproj -scheme RealmBrowser -configuration $CONFIGURATION clean build ${CODESIGN_PARAMS}"
exit 0
;;
"test-browser")
xc "-project tools/RealmBrowser/RealmBrowser.xcodeproj -scheme RealmBrowser test ${CODESIGN_PARAMS}"
exit 0
;;
######################################
# Versioning
######################################
"get-version")
version_file="Realm/Realm-Info.plist"
echo "$(PlistBuddy -c "Print :CFBundleVersion" "$version_file")"
exit 0
;;
"set-version")
realm_version="$2"
version_files="Realm/Realm-Info.plist tools/RealmBrowser/RealmBrowser/RealmBrowser-Info.plist"
if [ -z "$realm_version" ]; then
echo "You must specify a version."
exit 1
fi
for version_file in $version_files; do
PlistBuddy -c "Set :CFBundleVersion $realm_version" "$version_file"
PlistBuddy -c "Set :CFBundleShortVersionString $realm_version" "$version_file"
done
exit 0
;;
######################################
# CocoaPods
######################################
"cocoapods-setup")
sh build.sh download-core
# CocoaPods doesn't support symlinks
if [ -L core ]; then
mv core core-tmp
mv $(readlink core-tmp) core
rm core-tmp
fi
# CocoaPods doesn't support multiple header_mappings_dir, so combine
# both sets of headers into a single directory
rm -rf include
cp -R core/include include
mkdir -p include/Realm
cp Realm/*.{h,hpp} include/Realm
touch include/Realm/RLMPlatform.h
;;
######################################
# Release packaging
######################################
"package-browser")
cd tightdb_objc
sh build.sh browser
cd ${WORKSPACE}/tightdb_objc/tools/RealmBrowser/build/DerivedData/RealmBrowser/Build/Products/Release
zip -r realm-browser.zip Realm\ Browser.app
mv realm-browser.zip ${WORKSPACE}
;;
"package-examples")
cd tightdb_objc
./scripts/package_examples.rb
zip --symlinks -r realm-examples.zip examples
;;
"package-test-examples")
VERSION=$(file realm-objc-*.zip | grep -o '\d*\.\d*\.\d*')
unzip realm-objc-${VERSION}.zip
cp $0 realm-objc-${VERSION}
cd realm-objc-${VERSION}
sh build.sh examples-ios
sh build.sh examples-osx
cd ..
rm -rf realm-objc-${VERSION}
unzip realm-swift-${VERSION}.zip
cp $0 realm-swift-${VERSION}
cd realm-swift-${VERSION}
sh build.sh examples-ios-swift
cd ..
rm -rf realm-swift-${VERSION}
;;
"package-ios-static")
cd tightdb_objc
sh build.sh test-ios-static
sh build.sh ios-static
cd build/ios
zip --symlinks -r realm-framework-ios.zip Realm.framework
;;
"package-ios-dynamic")
cd tightdb_objc
sh build.sh ios-dynamic
cd build/ios-dynamic
zip --symlinks -r realm-dynamic-framework-ios.zip Realm.framework
;;
"package-osx")
cd tightdb_objc
sh build.sh test-osx
cd build/DerivedData/Realm/Build/Products/Release
zip --symlinks -r realm-framework-osx.zip Realm.framework
;;
"package-ios-swift")
cd tightdb_objc
sh build.sh ios-swift
cd build/ios/swift
zip --symlinks -r realm-swift-framework-ios.zip RealmSwift.framework
;;
"package-osx-swift")
cd tightdb_objc
sh build.sh osx-swift
cd build/osx
zip --symlinks -r realm-swift-framework-osx.zip RealmSwift.framework
;;
"package-release")
LANG="$2"
TEMPDIR=$(mktemp -d $TMPDIR/realm-release-package-${LANG}.XXXX)
cd tightdb_objc
VERSION=$(sh build.sh get-version)
cd ..
FOLDER=${TEMPDIR}/realm-${LANG}-${VERSION}
mkdir -p ${FOLDER}/osx ${FOLDER}/ios ${FOLDER}/browser
if [[ "${LANG}" == "objc" ]]; then
mkdir -p ${FOLDER}/ios/static
mkdir -p ${FOLDER}/ios/dynamic
mkdir -p ${FOLDER}/Swift
(
cd ${FOLDER}/osx
unzip ${WORKSPACE}/realm-framework-osx.zip
)
(
cd ${FOLDER}/ios/static
unzip ${WORKSPACE}/realm-framework-ios.zip
)
(
cd ${FOLDER}/ios/dynamic
unzip ${WORKSPACE}/realm-dynamic-framework-ios.zip
)
else
(
cd ${FOLDER}/osx
unzip ${WORKSPACE}/realm-swift-framework-osx.zip
)
(
cd ${FOLDER}/ios
unzip ${WORKSPACE}/realm-swift-framework-ios.zip
)
fi
(
cd ${FOLDER}/browser
unzip ${WORKSPACE}/realm-browser.zip
)
(
cd ${WORKSPACE}/tightdb_objc
cp -R plugin ${FOLDER}
cp LICENSE ${FOLDER}/LICENSE.txt
if [[ "${LANG}" == "objc" ]]; then
cp Realm/Swift/RLMSupport.swift ${FOLDER}/Swift/
fi
)
(
cd ${FOLDER}
unzip ${WORKSPACE}/realm-examples.zip
cd examples
if [[ "${LANG}" == "objc" ]]; then
rm -rf ios/swift
else
rm -rf ios/objc ios/rubymotion osx
fi
)
cat > ${FOLDER}/docs.webloc <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>URL</key>
<string>https://realm.io/docs/${LANG}/${VERSION}</string>
</dict>
</plist>
EOF
(
cd ${TEMPDIR}
zip --symlinks -r realm-${LANG}-${VERSION}.zip realm-${LANG}-${VERSION}
mv realm-${LANG}-${VERSION}.zip ${WORKSPACE}
)
;;
"test-package-release")
# Generate a release package locally for testing purposes
# Real releases should always be done via Jenkins
if [ -z "${WORKSPACE}" ]; then
echo 'WORKSPACE must be set to a directory to assemble the release in'
exit 1
fi
if [ -d "${WORKSPACE}" ]; then
echo 'WORKSPACE directory should not already exist'
exit 1
fi
REALM_SOURCE=$(pwd)
mkdir $WORKSPACE
cd $WORKSPACE
git clone $REALM_SOURCE tightdb_objc
echo 'Packaging iOS static'
sh tightdb_objc/build.sh package-ios-static
cp tightdb_objc/build/ios/realm-framework-ios.zip .
echo 'Packaging iOS dynamic'
sh tightdb_objc/build.sh package-ios-dynamic
cp tightdb_objc/build/ios-dynamic/realm-dynamic-framework-ios.zip .
echo 'Packaging OS X'
sh tightdb_objc/build.sh package-osx
cp tightdb_objc/build/DerivedData/Realm/Build/Products/Release/realm-framework-osx.zip .
echo 'Packaging examples'
(
cd tightdb_objc/examples
git clean -xfd
)
sh tightdb_objc/build.sh package-examples
cp tightdb_objc/realm-examples.zip .
echo 'Packaging browser'
sh tightdb_objc/build.sh package-browser
echo 'Packaging iOS Swift'
sh tightdb_objc/build.sh package-ios-swift
cp tightdb_objc/build/ios/swift/realm-swift-framework-ios.zip .
echo 'Packaging OS X Swift'
sh tightdb_objc/build.sh package-osx-swift
cp tightdb_objc/build/osx/realm-swift-framework-osx.zip .
echo 'Building final release packages'
sh tightdb_objc/build.sh package-release objc
sh tightdb_objc/build.sh package-release swift
echo 'Testing packaged examples'
sh tightdb_objc/build.sh package-test-examples
;;
*)
echo "Unknown command '$COMMAND'"
usage
exit 1
;;
esac
|
rkawajiri/kNNRealm
|
Example/Pods/RealmSwift/build.sh
|
Shell
|
mit
| 24,980 |
#!/bin/bash
set -e
# See http://stackoverflow.com/a/21189044/1935861
parse_yaml () {
prefix=$2
s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\):|\1|" \
-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
awk -F$fs '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
}
}'
}
url="http://localhost:9000/"
http_code=`curl -sw "%{http_code}\\n" -o /dev/null ${url}`
response=`curl -s $url`
# TODO: Verify version of The Lounge in CLI stdout
if test $http_code != 200; then
printf "FAILURE: HTTP code should be 200, but was ${http_code} instead.\n"
exit 1
fi
if [[ $response != *"<title>The Lounge</title>"* ]]; then
printf "FAILURE: \"<title>The Lounge</title>\" was not found in the HTTP response.\n"
exit 1
fi
exit 0
|
astorije/ansible-role-shout
|
tests/tests.sh
|
Shell
|
mit
| 1,073 |
#!/bin/bash
#######################################
# Echo/log function
# Arguments:
# String: value to log
#######################################
log() {
if [[ "$@" ]]; then echo "[${T3APP_NAME^^}] $@";
else echo; fi
}
#########################################################
# Configure composer
#########################################################
function configure_composer() {
# Increase timeout for composer complete install - it might take a while sometimes to install whole Flow/Neos
composer config --global process-timeout 1800
# This is an automated build, so if there are any changes in vendors packages, discard them w/o asking
composer config --global discard-changes true
}
#########################################################
# Configure git, so all git commands always works.
# Otherwise git shouts about missing configuration.
# Note: the actual values doesn't matter, most important
# is that they are configured.
#########################################################
function configure_git() {
git config --global user.email "${T3APP_USER_NAME}@local"
git config --global user.name $T3APP_USER_NAME
}
#########################################################
# Clone and compose Flow/Neos app
#
# Globals:
# T3APP_BUILD_REPO_URL
# T3APP_BUILD_BRANCH
# T3APP_BUILD_COMPOSER_PARAMS
# Arguments:
# String: target_path, where git clones the repository
#########################################################
function clone_and_compose() {
local target_path=$1
# Pull from Gerrit mirror instead of git.typo3.org (workaround of instabillity of git.typo3.org)
git config --global url."http://git.typo3.org".insteadOf git://git.typo3.org
# Clone TYPO3 app code from provided repository
git clone $T3APP_BUILD_REPO_URL $target_path && cd $target_path
# Do composer install
git checkout $T3APP_BUILD_BRANCH
git log -10 --pretty=format:"%h %an %cr: %s" --graph
composer install $T3APP_BUILD_COMPOSER_PARAMS
echo && echo # Just to add an empty line after `composer` verbose output
}
#########################################################
# If the installed TYPO3 app contains build hook script
# (@see $T3APP_USER_BUILD_SCRIPT), it will run it.
# This script can be used to do all necessary steps to make
# the site up&running, e.g. compile CSS, configure extra
# settings in YAML files etc.
#
# Globals:
# T3APP_USER_BUILD_SCRIPT
# Arguments:
# String: param (optional), e.g. '--post-install'
# String: user to run script as (optional), e.g. 'www'
#########################################################
function hook_user_build_script() {
local param=${1:-''}
local user=${2:-''}
if [[ -f $T3APP_USER_BUILD_SCRIPT ]]; then
chmod +x $T3APP_USER_BUILD_SCRIPT
else
return 0
fi
log && log "Running user hook script with param '$param':"
if [[ -n "$param" && ! $(grep -- $param $T3APP_USER_BUILD_SCRIPT) ]]; then
log "No param '$param' found in $T3APP_USER_BUILD_SCRIPT script content. Skipping..."
return 0
fi
# Run ./build.sh script (as specific user, if provided)
if [[ -n "$user" ]]; then
su $user -c '$T3APP_USER_BUILD_SCRIPT $param'
else
source $T3APP_USER_BUILD_SCRIPT $param
fi
}
|
dimaip/docker-typo3-flow-neos-abstract
|
container-files/build-typo3-app/include-functions-common.sh
|
Shell
|
mit
| 3,231 |
#!/bin/bash
fur favicon "favicon.png" --text="a" --color="n" --font="p" --shape="g" --font-size="92" --format="png"
|
fur-labo/fur-examples
|
example/09-a/render_favicon.sh
|
Shell
|
mit
| 116 |
#!/usr/bin/env bash
#
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for duplicate includes.
# Guard against accidental introduction of new Boost dependencies.
# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
export LC_ALL=C
IGNORE_REGEXP="/(leveldb|secp256k1|univalue|crc32c)/"
# cd to root folder of git repo for git ls-files to work properly
cd "$(dirname $0)/../.." || exit 1
filter_suffix() {
git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "${IGNORE_REGEXP}"
}
EXIT_CODE=0
for HEADER_FILE in $(filter_suffix h); do
DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then
echo "Duplicate include(s) in ${HEADER_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_HEADER_FILE}"
echo
EXIT_CODE=1
fi
done
for CPP_FILE in $(filter_suffix cpp); do
DUPLICATE_INCLUDES_IN_CPP_FILE=$(grep -E "^#include " < "${CPP_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_CPP_FILE} != "" ]]; then
echo "Duplicate include(s) in ${CPP_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_CPP_FILE}"
echo
EXIT_CODE=1
fi
done
INCLUDED_CPP_FILES=$(git grep -E "^#include [<\"][^>\"]+\.cpp[>\"]" -- "*.cpp" "*.h")
if [[ ${INCLUDED_CPP_FILES} != "" ]]; then
echo "The following files #include .cpp files:"
echo "${INCLUDED_CPP_FILES}"
echo
EXIT_CODE=1
fi
EXPECTED_BOOST_INCLUDES=(
boost/algorithm/string.hpp
boost/algorithm/string/classification.hpp
boost/algorithm/string/replace.hpp
boost/algorithm/string/split.hpp
boost/chrono/chrono.hpp
boost/date_time/posix_time/posix_time.hpp
boost/filesystem.hpp
boost/filesystem/fstream.hpp
boost/multi_index/hashed_index.hpp
boost/multi_index/ordered_index.hpp
boost/multi_index/sequenced_index.hpp
boost/multi_index_container.hpp
boost/optional.hpp
boost/preprocessor/cat.hpp
boost/preprocessor/stringize.hpp
boost/signals2/connection.hpp
boost/signals2/last_value.hpp
boost/signals2/signal.hpp
boost/test/unit_test.hpp
boost/thread.hpp
boost/thread/condition_variable.hpp
boost/thread/mutex.hpp
boost/thread/thread.hpp
boost/variant.hpp
boost/variant/apply_visitor.hpp
boost/variant/static_visitor.hpp
)
for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do
IS_EXPECTED_INCLUDE=0
for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
if [[ "${BOOST_INCLUDE}" == "${EXPECTED_BOOST_INCLUDE}" ]]; then
IS_EXPECTED_INCLUDE=1
break
fi
done
if [[ ${IS_EXPECTED_INCLUDE} == 0 ]]; then
EXIT_CODE=1
echo "A new Boost dependency in the form of \"${BOOST_INCLUDE}\" appears to have been introduced:"
git grep "${BOOST_INCLUDE}" -- "*.cpp" "*.h"
echo
fi
done
for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
if ! git grep -q "^#include <${EXPECTED_BOOST_INCLUDE}>" -- "*.cpp" "*.h"; then
echo "Good job! The Boost dependency \"${EXPECTED_BOOST_INCLUDE}\" is no longer used."
echo "Please remove it from EXPECTED_BOOST_INCLUDES in $0"
echo "to make sure this dependency is not accidentally reintroduced."
echo
EXIT_CODE=1
fi
done
QUOTE_SYNTAX_INCLUDES=$(git grep '^#include "' -- "*.cpp" "*.h" | grep -Ev "${IGNORE_REGEXP}")
if [[ ${QUOTE_SYNTAX_INCLUDES} != "" ]]; then
echo "Please use bracket syntax includes (\"#include <foo.h>\") instead of quote syntax includes:"
echo "${QUOTE_SYNTAX_INCLUDES}"
echo
EXIT_CODE=1
fi
exit ${EXIT_CODE}
|
gjhiggins/vcoincore
|
test/lint/lint-includes.sh
|
Shell
|
mit
| 3,903 |
#!/usr/bin/env bash
i=`cut -f1-5 $1 | bedtools intersect -a stdin -b /net/eichler/vol5/home/mchaisso/projects/PacBioSequencing/CHM1Sequencing/interstitial.bed | cut -f 5 | stats.py | tr '\n' ' ' | cut -f 6,7,9,10 -d ' '`
echo "interstitial $i"
c=`cut -f1-5 $1 | bedtools intersect -a stdin -b /net/eichler/vol5/home/mchaisso/projects/PacBioSequencing/CHM1Sequencing/centromeric.hg19.bed | cut -f 5 | stats.py | tr '\n' ' ' | cut -f 6,7,9,10 -d ' '`
echo "centromeric $c"
t=`cut -f1-5 $1 | bedtools intersect -a stdin -b /net/eichler/vol5/home/mchaisso/projects/PacBioSequencing/CHM1Sequencing/telomeric.bed | cut -f 5 | stats.py | tr '\n' ' ' | cut -f 6,7,9,10 -d ' '`
echo "telomeric $t"
|
yunlongliukm/chm1_scripts
|
SummarizeTRF.sh
|
Shell
|
mit
| 697 |
#! /bin/cat
_vm_name () {
VM_NAME=$(VBoxManage list runningvms | cut -d'"' -f2)
if [[ -z $VM_NAME ]]; then
vagrant up
VM_NAME=$(VBoxManage list runningvms | cut -d'"' -f2)
fi
}
v_up () {
_vm_name
vagrant halt
VBoxManage modifyvm $VM_NAME --natdnshostresolver1 on
vagrant up
restarts default
}
v_net () {
_vm_name
VBoxManage modifyvm $VM_NAME --natdnshostresolver1 on
}
|
jalanb/jab
|
src/bash/vbox.sh
|
Shell
|
mit
| 431 |
#!/usr/bin/env bash
## Test for issue1632 - 'darcs changes d/f' should not list any changes,
## where d is part of the repo and f is a non-existent file.
##
## Copyright (C) 2009 Ben Franksen
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use, copy,
## modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
## BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
## ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
## CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
. lib # Load some portability helpers.
rm -rf R # Another script may have left a mess.
darcs init --repo R # Create our test repos.
cd R
mkdir d
darcs record -lam 'added directory d'
# darcs should not list any changes here:
darcs changes non-existent-file > log
not grep 'added directory d' log
# ...and neither here:
darcs changes d/non-existent-file > log
not grep 'added directory d' log
cd ..
|
DavidAlphaFox/darcs
|
tests/failing-issue1632_changes_nonexisting.sh
|
Shell
|
gpl-2.0
| 1,727 |
#!/bin/bash
RZCMD=./sd_report_zones/sd_report_zones
DEVICE=/dev/sdm
if [[ $# -eq 1 ]] ; then
DEVICE=${1}
fi
sudo ./sd_identify/sd_identify ${DEVICE} > /dev/null 2>&1
if [[ $? -eq 0 ]] ; then
sudo ./sd_reset_wp/sd_reset_wp 0xFFFFFFFFFFFFFFFF ${DEVICE} > /dev/null 2>&1
sudo ${RZCMD} ${DEVICE} 2>/dev/null | head -40 | diff - ./gold/empty.txt
if [[ $? -eq 0 ]] ; then
echo "Reset WP success."
fi
echo "... filling zones .. this may take a minute."
sudo dd if=/dev/zero of=${DEVICE} bs=1M count=2560
sudo ${RZCMD} ${DEVICE} 2>/dev/null | head -40 | diff - ./gold/ten.txt
if [[ $? -eq 0 ]] ; then
echo "Fill 10 zones success."
fi
sudo ./sd_reset_wp/sd_reset_wp $((0x80000 * 3)) ${DEVICE}
sudo ${RZCMD} ${DEVICE} 2>/dev/null | head -40 | diff - ./gold/clear-z3.txt
if [[ $? -eq 0 ]] ; then
echo "Reset Zone 3 success."
fi
sudo ./sd_reset_wp/sd_reset_wp $((0x80000 * 5)) ${DEVICE}
sudo ${RZCMD} ${DEVICE} 2>/dev/null | head -40 | diff - ./gold/clear-z5.txt
if [[ $? -eq 0 ]] ; then
echo "Reset Zone 5 success."
fi
else
echo "Not an SMR Device."
sudo ${RZCMD} ${DEVICE} > /dev/null 2>&1
if [[ $? -eq 0 ]] ; then
echo "Broken command."
else
echo "Kernel is good."
fi
fi
|
tejaswanjari/SMR_FS-EXT4
|
tools/sanity.sh
|
Shell
|
gpl-2.0
| 1,203 |
#!/usr/bin/env bash
#
# Manual creation of container assets for testing
#
pkg=$(basename $0)
container_default='buildpyCentOS8'
image='centos8:rpmbuildA'
function _git_root(){
##
## determines full path to current git project root
##
echo "$(git rev-parse --show-toplevel 2>/dev/null)"
}
function container_started(){
##
## check container status
##
if [[ "$(docker ps | grep $container 2>/dev/null)" ]]; then
return 1 # container running
else
return 0 # container stopped
fi
}
# --- main -----------------------------------------------------------------
pkg_path=$(cd "$(dirname $0)"; pwd -P)
source "$(_git_root)/scripts/std_functions.sh"
source "$(_git_root)/scripts/colors.sh"
if [ "$1" ]; then
container="$1"
std_message "Creating container $container" "OK"
elif [ "$(docker ps -a | grep $container_default)" ]; then
tab=' '
std_message "Default container $container_default exists. You must provide a unique name as a parameter
\n${tab}$ sh $pkg 'xlinesCentOS'" "FAIL"
exit 1
else
container=$container_default
std_message "Creating default container name: $container_default" "INFO"
fi
# working directory
cd "$(_git_root)/packaging/docker/centos8" || false
# create image
std_message "Begin image build" "INFO"
docker build -t $image .
# create container
std_message "Creating and running container ($container) -- START" "INFO"
docker run -it \
--user='builder' \
--security-opt='label=disable' \
--publish='80:8080' \
--name=$container -d -v /tmp/rpm:/mnt/rpm $image tail -f /dev/null &
if container_started; then
std_message "Container ${container} started successfully" "OK"
else
std_message "Container ${container} failed to start" "FAIL"
fi
# --- closing -----------------------------------------------------------------
#std_message 'Ensuring host docker volume mnt owned by SUDO_USER (/tmp/rpm)' 'INFO'
#sudo chown -R $USER:$USER /tmp/rpm
# clean
std_message 'Cleaning up intermediate image artifacts' 'INFO'
docker image prune -f
cd $pkg_path || true
exit 0
|
t-stark/ec2cli
|
packaging/docker/test-centos8.sh
|
Shell
|
gpl-2.0
| 2,134 |
#!/bin/bash
# Helper for updating all of the packaging branches
if [ -z "$UBUNTU_RELEASES" ]; then
echo "Configure the distro platforms that you want to"
echo "build with a line like:"
echo ' export UBUNTU_RELEASES="dapper feisty gutsy hardy intrepid jaunty"'
exit 1
fi
for DISTRO in $UBUNTU_RELEASES; do
if [ -d "$PACKAGE-$DISTRO" ] ; then
echo "Updating $PACKAGE-$DISTRO"
bzr update $PACKAGE-$DISTRO
if [ "$PACKAGE" = "bzr-svn" ] ; then
cd $PACKAGE-$DISTRO
bzr merge http://bzr.debian.org/pkg-bazaar/bzr-svn/unstable/
cd ..
fi
else
SRC="lp:~bzr/ubuntu/$DISTRO/$PACKAGE/bzr-ppa"
echo "Checking out $SRC"
bzr co $SRC $PACKAGE-$DISTRO
fi
done
|
Distrotech/bzr
|
tools/packaging/update-packaging-branches.sh
|
Shell
|
gpl-2.0
| 765 |
# use as: sh load_test.sh 2>&1 | grep real
#HOST="localhost:3000"
HOST="verbcoach.herokuapp.com"
while [ 1 ]; do
time curl "http://${HOST}/cloud/generate-answers?semantics=%7B%22synsem%22%3A%7B%22sem%22%3A%7B%22aspect%22%3A%22progressive%22%2C%22subj%22%3A%7B%22mass%22%3Afalse%2C%22furniture%22%3Afalse%2C%22pred%22%3A%22lei%22%2C%22place%22%3Afalse%2C%22drinkable%22%3Afalse%2C%22human%22%3Atrue%2C%22animate%22%3Atrue%2C%22speakable%22%3Afalse%2C%22activity%22%3Afalse%2C%22physical-object%22%3Atrue%2C%22buyable%22%3Afalse%2C%22legible%22%3Afalse%2C%22artifact%22%3Afalse%2C%22edible%22%3Afalse%2C%22part-of-human-body%22%3Afalse%7D%2C%22tense%22%3A%22present%22%2C%22discrete%22%3Afalse%2C%22pred%22%3A%22dormire%22%7D%2C%22subcat%22%3A%5B%5D%2C%22cat%22%3A%22verb%22%7D%7D&_=1417843915349" | jq '.'
sleep 2
done
|
ekoontz/italianquiz
|
load_test.sh
|
Shell
|
gpl-3.0
| 826 |
#!/bin/bash
# Set the error mode so the script fails automatically if any command in it fails.
# This saves us a lot of error checking code down below.
set -e
# Load the build configuration variables.
source /OUTSIDE/script/config.sh
# Set a variable with the directory with the profile-specific files and config.
PROFILE_DIR="/OUTSIDE/profiles/$1"
# Verify command line arguments.
case $# in
0)
>&2 echo ""
>&2 echo "Missing parameter: profile"
>&2 echo "Available profiles:"
for i in profiles/*/
do
i=$(echo "$i" | sed "s/^profiles//")
i=${i::-1}
>&2 echo "* $i"
done
>&2 echo ""
exit 1
;;
1)
if [ ! -e "${PROFILE_DIR}/config" ]
then
>&2 echo ""
>&2 echo "Profile not found: $1"
>&2 echo ""
exit 1
fi
;;
*)
>&2 echo ""
>&2 echo "Too many arguments"
>&2 echo ""
exit 1
esac
# Randomize the work directory name.
OUTPUT_UUID="$(uuidgen)"
# Create the source directory where we will build the image, and switch to it.
mkdir -p "${BUILD_BASEDIR}/"
WORK_DIR="${BUILD_BASEDIR}/${OUTPUT_UUID}"
cd ~
if [ -e "${WORK_DIR}" ]
then
rm -fr -- "${WORK_DIR}/"
fi
mkdir "${WORK_DIR}"
cd "${WORK_DIR}"
# Get the name of the source code cache.
SOURCE_ID=$(echo "${REPO_URL}"@"${REPO_COMMIT}" | md5sum | cut -d ' ' -f 1)
TAR_FILE="/OUTSIDE/bin/source-${SOURCE_ID}.tar.bz2"
# If the source code was already downloaded, use it.
if [ -f "${TAR_FILE}" ]
then
echo "Fetching source code from cache..."
tar -xaf "${TAR_FILE}"
# If not, download it from the git repository.
else
echo "Downloading the source code from the repository..."
# Clone the OpenWrt source code.
git clone --progress "${REPO_URL}" . 2>&1
# If a code freeze is requested, go to that commit.
if [ -z ${REPO_COMMIT+x} ]
then
echo "Using latest commit."
else
echo "Freezing code to commit: ${REPO_COMMIT}"
git reset --hard "${REPO_COMMIT}"
fi
# Download and install the feeds.
./scripts/feeds update -a 2>&1
./scripts/feeds install -a 2>&1
# Delete the git repository data.
# This saves over a hundred megabytes of data, plus it makes everything faster.
rm -fr .git/
# Make a tarfile with a cache of the original code.
# That way we don't need to checkout the repository again on each build.
if [ -e "${TAR_FILE}" ]
then
rm -- "${TAR_FILE}"
fi
tar -caf "${TAR_FILE}" .
fi
# We already have the code, now let's customize it.
echo -e "Applying customizations...\n"
# Apply the OpenWrt patches.
if [ -e "${PROFILE_DIR}/patches" ]
then
grep -v '^[ \t]*#' "${PROFILE_DIR}/patches" | grep -v '^[ \t]*$' | while read p
do
if [ -e "/OUTSIDE/patches/$p.patch" ]
then
git apply -v "/OUTSIDE/patches/$p.patch"
fi
done
fi
# Copy the makefile configuration for this profile.
if [ -e "${PROFILE_DIR}/config" ]
then
cp "${PROFILE_DIR}/config" .config
else
if [ -e "${PROFILE_DIR}/diffconfig" ]
then
cp "${PROFILE_DIR}/diffconfig" .config
else
echo "ERROR: missing configuration file"
exit 1
fi
fi
# Delete the temporary files.
# If we miss this step, sometimes the 'make' command behaves strangely.
if [ -e tmp/ ]
then
rm -fr tmp/
mkdir tmp
fi
# If it was a full configuration file, fix the makefile if it was generated
# using an older version of OpenWrt. If it was a differential configuration
# file, convert it to a full configuration file.
if [ -e "${PROFILE_DIR}/config" ]
then
make oldconfig
else
make defconfig
fi
# Launch the menuconfig.
make menuconfig
# Copy the full and differential configuration files outside the VM.
mkdir -p "/OUTSIDE/profiles/${PROFILE_NAME}"
cp .config "${PROFILE_DIR}/config"
./scripts/diffconfig.sh > "${PROFILE_DIR}/diffconfig"
|
OpenWrt-HappyHacker/happyhacker-builder
|
script/guest/menuconfig.sh
|
Shell
|
gpl-3.0
| 3,872 |
rm 90s.db
rm last7days.db
rm last7daysupdates.db
rm a.db
# decade
./scan -d test.db -x 90s.db -w "where year >= 1990 and year < 2000"
# recently scanned
./scan -d test.db -x last7days.db -w "where (julianday(datetime('now')) - julianday(datetime(inserted, 'unixepoch'))) <= 7"
# recently changed
./scan -d test.db -x last7daysupdates.db -w "where (julianday(datetime('now')) - julianday(datetime(lastmodified, 'unixepoch'))) <= 7"
# alpha
./scan -d test.db -x a.db -w "where substr(lower(albumartist),1,1) = 'a'"
|
henkelis/sonospy
|
sonospy/gui/extracts.sh
|
Shell
|
gpl-3.0
| 523 |
#! /bin/sh
#usage:
#ngt-split.sh <input> <output> <size> <parts>
#It creates <parts> files (named <output.000>, ... <output.999>)
#containing ngram statistics (of <order> length) in Google format
#These files are a partition of the whole set of ngrams
basedir=$IRSTLM
bindir=$basedir/bin
scriptdir=$basedir/scripts
unset par
while [ $# -gt 0 ]
do
echo "$0: arg $1"
par[${#par[@]}]="$1"
shift
done
inputfile=${par[0]}
outputfile=${par[1]}
order=${par[2]}
parts=${par[3]}
dictfile=dict$$
$bindir/dict -i="$inputfile" -o=$dictfile -f=y -sort=n
$scriptdir/split-dict.pl --input $dictfile --output ${dictfile}. --parts $parts
rm $dictfile
for d in `ls ${dictfile}.*` ; do
w=`echo $d | perl -pe 's/.+(\.[0-9]+)$/$1/i'`
w="$outputfile$w"
echo "$bindir/ngt -i="$inputfile" -n=$order -gooout=y -o=$w -fd=$d > /dev/null"
$bindir/ngt -n=$order -gooout=y -o=$w -fd=$d -i="$inputfile" > /dev/null
rm $d
done
exit
|
besacier/WCE-LIG
|
tools/moses/irstlm-5.70.04/bin/split-ngt.sh
|
Shell
|
gpl-3.0
| 922 |
#!/bin/sh
export MROOT=$PWD
echo $MROOT
cd simp
make clean
make rs
cp minisat_static ../hhlmuc
|
swan1649/iprover
|
util/Haifa-HLMUC/build.sh
|
Shell
|
gpl-3.0
| 95 |
#!/bin/bash
usage(){
echo "
Written by Brian Bushnell
Last modified November 14, 2018
Description: Uses kmer counts to assemble contigs, extend sequences,
or error-correct reads. Tadpole has no upper bound for kmer length,
but some values are not supported. Specifically, it allows 1-31,
multiples of 2 from 32-62, multiples of 3 from 63-93, etc.
Please read bbmap/docs/guides/TadpoleGuide.txt for more information.
Usage:
Assembly: tadpole.sh in=<reads> out=<contigs>
Extension: tadpole.sh in=<reads> out=<extended> mode=extend
Correction: tadpole.sh in=<reads> out=<corrected> mode=correct
Extension and correction may be done simultaneously. Error correction on
multiple files may be done like this:
tadpole.sh in=libA_r1.fq,libA_merged.fq in2=libA_r2.fq,null extra=libB_r1.fq out=ecc_libA_r1.fq,ecc_libA_merged.fq out2=ecc_libA_r2.fq,null mode=correct
Extending contigs with reads could be done like this:
tadpole.sh in=contigs.fa out=extended.fa el=100 er=100 mode=extend extra=reads.fq k=62
Input parameters:
in=<file> Primary input file for reads to use as kmer data.
in2=<file> Second input file for paired data.
extra=<file> Extra files for use as kmer data, but not for error-
correction or extension.
reads=-1 Only process this number of reads, then quit (-1 means all).
NOTE: in, in2, and extra may also be comma-delimited lists of files.
Output parameters:
out=<file> Write contigs (in contig mode) or corrected/extended
reads (in other modes).
out2=<file> Second output file for paired output.
outd=<file> Write discarded reads, if using junk-removal flags.
dot=<file> Write a contigs connectivity graph (partially implemented)
dump=<file> Write kmers and their counts.
fastadump=t Write kmers and counts as fasta versus 2-column tsv.
mincounttodump=1 Only dump kmers with at least this depth.
showstats=t Print assembly statistics after writing contigs.
Prefiltering parameters:
prefilter=0 If set to a positive integer, use a countmin sketch
to ignore kmers with depth of that value or lower.
prehashes=2 Number of hashes for prefilter.
prefiltersize=0.2 (pff) Fraction of memory to use for prefilter.
minprobprefilter=t (mpp) Use minprob for the prefilter.
prepasses=1 Use this many prefiltering passes; higher be more thorough
if the filter is very full. Set to 'auto' to iteratively
prefilter until the remaining kmers will fit in memory.
onepass=f If true, prefilter will be generated in same pass as kmer
counts. Much faster but counts will be lower, by up to
prefilter's depth limit.
filtermem=0 Allows manually specifying prefilter memory in bytes, for
deterministic runs. 0 will set it automatically.
Hashing parameters:
k=31 Kmer length (1 to infinity). Memory use increases with K.
prealloc=t Pre-allocate memory rather than dynamically growing;
faster and more memory-efficient. A float fraction (0-1)
may be specified; default is 1.
minprob=0.5 Ignore kmers with overall probability of correctness below this.
minprobmain=t (mpm) Use minprob for the primary kmer counts.
threads=X Spawn X hashing threads (default is number of logical processors).
rcomp=t Store and count each kmer together and its reverse-complement.
coremask=t All kmer extensions share the same hashcode.
fillfast=t Speed up kmer extension lookups.
Assembly parameters:
mincountseed=3 (mcs) Minimum kmer count to seed a new contig or begin extension.
mincountextend=2 (mce) Minimum kmer count continue extension of a read or contig.
It is recommended that mce=1 for low-depth metagenomes.
mincountretain=0 (mincr) Discard kmers with count below this.
maxcountretain=INF (maxcr) Discard kmers with count above this.
branchmult1=20 (bm1) Min ratio of 1st to 2nd-greatest path depth at high depth.
branchmult2=3 (bm2) Min ratio of 1st to 2nd-greatest path depth at low depth.
branchlower=3 (blc) Max value of 2nd-greatest path depth to be considered low.
minextension=2 (mine) Do not keep contigs that did not extend at least this much.
mincontig=auto (minc) Do not write contigs shorter than this.
mincoverage=1 (mincov) Do not write contigs with average coverage below this.
maxcoverage=inf (maxcov) Do not write contigs with average coverage above this.
trimends=0 (trim) Trim contig ends by this much. Trimming by K/2
may yield more accurate genome size estimation.
trimcircular=t Trim one end of contigs ending in LOOP/LOOP by K-1,
to eliminate the overlapping portion.
contigpasses=16 Build contigs with decreasing seed depth for this many iterations.
contigpassmult=1.7 Ratio between seed depth of two iterations.
ownership=auto For concurrency; do not touch.
processcontigs=f Explore the contig connectivity graph. (partially implemented)
Processing modes:
mode=contig contig: Make contigs from kmers.
extend: Extend sequences to be longer, and optionally
perform error correction.
correct: Error correct only.
insert: Measure insert sizes.
discard: Discard low-depth reads, without error correction.
Extension parameters:
extendleft=100 (el) Extend to the left by at most this many bases.
extendright=100 (er) Extend to the right by at most this many bases.
ibb=t (ignorebackbranches) Do not stop at backward branches.
extendrollback=3 Trim a random number of bases, up to this many, on reads
that extend only partially. This prevents the creation
of sharp coverage discontinuities at branches.
Error-correction parameters:
ecc=f Error correct via kmer counts.
reassemble=t If ecc is enabled, use the reassemble algorithm.
pincer=f If ecc is enabled, use the pincer algorithm.
tail=f If ecc is enabled, use the tail algorithm.
eccfull=f If ecc is enabled, use tail over the entire read.
aggressive=f (aecc) Use aggressive error correction settings.
Overrides some other flags like errormult1 and deadzone.
conservative=f (cecc) Use conservative error correction settings.
Overrides some other flags like errormult1 and deadzone.
rollback=t Undo changes to reads that have lower coverage for
any kmer after correction.
markbadbases=0 (mbb) Any base fully covered by kmers with count below
this will have its quality reduced.
markdeltaonly=t (mdo) Only mark bad bases adjacent to good bases.
meo=t (markerrorreadsonly) Only mark bad bases in reads
containing errors.
markquality=0 (mq) Set quality scores for marked bases to this.
A level of 0 will also convert the base to an N.
errormult1=16 (em1) Min ratio between kmer depths to call an error.
errormult2=2.6 (em2) Alternate ratio between low-depth kmers.
errorlowerconst=3 (elc) Use mult2 when the lower kmer is at most this deep.
mincountcorrect=3 (mcc) Don't correct to kmers with count under this.
pathsimilarityfraction=0.45(psf) Max difference ratio considered similar.
Controls whether a path appears to be continuous.
pathsimilarityconstant=3 (psc) Absolute differences below this are ignored.
errorextensionreassemble=5 (eer) Verify this many kmers before the error as
having similar depth, for reassemble.
errorextensionpincer=5 (eep) Verify this many additional bases after the
error as matching current bases, for pincer.
errorextensiontail=9 (eet) Verify additional bases before and after
the error as matching current bases, for tail.
deadzone=0 (dz) Do not try to correct bases within this distance of
read ends.
window=12 (w) Length of window to use in reassemble mode.
windowcount=6 (wc) If more than this many errors are found within a
a window, halt correction in that direction.
qualsum=80 (qs) If the sum of the qualities of corrected bases within
a window exceeds this, halt correction in that direction.
rbi=t (requirebidirectional) Require agreement from both
directions when correcting errors in the middle part of
the read using the reassemble algorithm.
errorpath=1 (ep) For debugging purposes.
Junk-removal parameters (to only remove junk, set mode=discard):
tossjunk=f Remove reads that cannot be used for assembly.
This means they have no kmers above depth 1 (2 for paired
reads) and the outermost kmers cannot be extended.
Pairs are removed only if both reads fail.
tossdepth=-1 Remove reads containing kmers at or below this depth.
Pairs are removed if either read fails.
lowdepthfraction=0 (ldf) Require at least this fraction of kmers to be
low-depth to discard a read; range 0-1. 0 still
requires at least 1 low-depth kmer.
requirebothbad=f (rbb) Only discard pairs if both reads are low-depth.
tossuncorrectable (tu) Discard reads containing uncorrectable errors.
Requires error-correction to be enabled.
Shaving parameters:
shave=t Remove dead ends (aka hair).
rinse=t Remove bubbles.
wash= Set shave and rinse at the same time.
maxshavedepth=1 (msd) Shave or rinse kmers at most this deep.
exploredist=300 (sed) Quit after exploring this far.
discardlength=150 (sdl) Discard shavings up to this long.
Note: Shave and rinse can produce substantially better assemblies
for low-depth data, but they are very slow for large metagenomes.
Overlap parameters (for overlapping paired-end reads only):
merge=f Attempt to merge overlapping reads prior to
kmer-counting, and again prior to correction. Output
will still be unmerged pairs.
ecco=f Error correct via overlap, but do not merge reads.
testmerge=t Test kmer counts around the read merge junctions. If
it appears that the merge created new errors, undo it.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx14g"
z2="-Xms14g"
EA="-ea"
EOOM=""
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 15000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
tadpole() {
if [[ $SHIFTER_RUNTIME == 1 ]]; then
#Ignore NERSC_HOST
shifter=1
elif [[ $NERSC_HOST == genepool ]]; then
module unload oracle-jdk
module load oracle-jdk/1.8_144_64bit
module load pigz
elif [[ $NERSC_HOST == denovo ]]; then
module unload java
module load java/1.8.0_144
module load pigz
elif [[ $NERSC_HOST == cori ]]; then
module use /global/common/software/m342/nersc-builds/denovo/Modules/jgi
module use /global/common/software/m342/nersc-builds/denovo/Modules/usg
module unload java
module load java/1.8.0_144
module load pigz
fi
local CMD="java $EA $EOOM $z $z2 -cp $CP assemble.Tadpole $@"
echo $CMD >&2
eval $CMD
}
tadpole "$@"
|
abiswas-odu/Disco
|
bbmap/tadpole.sh
|
Shell
|
gpl-3.0
| 12,572 |
#!/usr/bin/env bash
thisdir=$(dirname $0)
cd $thisdir
if ! [ -e ./runner ]; then
# turning on -O2 is too variable accross different platforms, so leave off:
#
# the move and sleep steps here help to make sure that we don't get a "text file busy"
# error on the ./runner call below:
#
gcc ./runner.c -lm -o runner.tmp && mv runner.tmp runner && sleep 1
fi
./runner $1
|
Illumina/HapMix
|
pyflow/demo/subWorkflow/testtasks/runner.bash
|
Shell
|
gpl-3.0
| 396 |
#!/bin/bash
BIN_PATH="`dirname \"$0\"`"
DEFAULT_LOG_LEVEL=info
DEFAULT_CELERY_CONCURRENCY=30
LOOM_LOG_LEVEL=${LOOM_LOG_LEVEL:-$DEFAULT_LOG_LEVEL}
LOOM_WORKER_CELERY_CONCURRENCY=${LOOM_WORKER_CELERY_CONCURRENCY:-$DEFAULT_CELERY_CONCURRENCY}
# omitting --without-gossip causes missed heartbeat errors
celery -A loomengine_server.core -P eventlet worker --concurrency=${LOOM_WORKER_CELERY_CONCURRENCY} --loglevel=${LOOM_LOG_LEVEL} --workdir=${BIN_PATH}/../server --without-gossip
|
StanfordBioinformatics/xppf
|
bin/run-worker.sh
|
Shell
|
agpl-3.0
| 481 |
#!/bin/sh
################################################################################
#
# Copyright (C) 2018 Neighborhood Guard, Inc. All rights reserved.
# Original author: Douglas Kerr
#
# This file is part of FTP_Upload.
#
# FTP_Upload is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FTP_Upload is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FTP_Upload. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
# tell the script under test not to proceed with normal execution
#
UNIT_TEST_IN_PROGRESS=1
. ../utils.sh
setUp() {
rm -rf _ttf_* _testingroot uploader.conf # remove the temporary test files
}
# test the functions for setting values in config files
#
test_set_config_value() {
# set up temporary test files
ttf_orig=_ttf_configvals_orig.conf
cat > $ttf_orig << 'END_OF_FILE'
server_name = examle.com
user_name=example_user
value_with_spaces = value with spaces
number1=999
number2 = 9
END_OF_FILE
ttf_expected=_ttf_configvals_expected.conf
cat > $ttf_expected << 'END_OF_FILE'
server_name = realname.org
user_name=realName
value_with_spaces = new value with spaces
number1=111
number2 = 222
END_OF_FILE
# substitute the values
set_config_value $ttf_orig server_name realname.org
set_config_value $ttf_orig user_name realName
set_config_value $ttf_orig value_with_spaces "new value with spaces"
set_config_value $ttf_orig number1 111
set_config_value $ttf_orig number2 222
# check the result
diff $ttf_expected $ttf_orig
assertEquals "config values set correctly" 0 $?
}
# test set_config_value when the specified file doesn't exist
test_set_config_value_no_file() {
set_config_value this_file_doesnt_exist server_name realname.org
assertNotEquals 0 $? # return value should be non-zero
}
# test set_config_value when the specified name doesn't exist in the file
test_set_config_value_no_name() {
ttf_config=_ttf_config.conf
touch $ttf_config
set_config_value "$ttf_config" newname newvalue
assertEquals 0 $? # set_config_value should return success (zero)
value=`get_config "$ttf_config" newname`
assertEquals newvalue "$value"
}
# test the function for returning values from config files
#
test_get_config() {
ttf_config=_ttf_config.py
cat > $ttf_config << 'END_OF_FILE'
[default]
cs_name: gooddomain.org
cs_user: theuser
long_one=string with spaces
var_space = wordWithTrailingSpaces
lastly: string with trailing spaces
END_OF_FILE
result=""
for name in cs_name cs_user long_one var_space lastly
do
result="${result}`get_config "$ttf_config" "$name"` "
done
expected="\
gooddomain.org \
theuser \
string with spaces \
wordWithTrailingSpaces \
string with trailing spaces "
assertEquals "get_config results" "$expected" "$result"
}
# test get_config when the name doesn't exist in the config file
# but the caller has supplied a default value
test_get_config_default_value() {
ttf_config=_ttf_config.conf
cat > $ttf_config << 'END_OF_FILE'
[default]
cs_name: gooddomain.org
cs_user: theuser
END_OF_FILE
stdout=`get_config $ttf_config foo defvalue`
assertEquals 0 $? # get_config returns zero
assertEquals defvalue "$stdout" # stdout is empty string
}
# test get_config when the config file does not exist
test_get_config_no_file() {
stdout=`get_config this_file_doesnt_exist foo`
assertNotEquals 0 $? # get_config returns non-zero
assertNull "$stdout" # stdout is empty string
}
# test get_config when the name doesn't exist in the config file
test_get_config_no_name() {
ttf_config=_ttf_config.conf
cat > $ttf_config << 'END_OF_FILE'
[default]
cs_name: gooddomain.org
cs_user: theuser
END_OF_FILE
stdout=`get_config $ttf_config foo`
assertEquals 0 $? # get_config returns zero
assertNull "$stdout" # stdout is empty string
}
# test get_config when the value doesn't exist in the config file
# but the name does
test_get_config_name_no_value() {
ttf_config=_ttf_config.conf
cat > $ttf_config << 'END_OF_FILE'
[default]
cs_name: gooddomain.org
cs_user:
END_OF_FILE
stdout=`get_config $ttf_config cs_user`
assertEquals 0 $? # get config returns zero
assertNull "$stdout" # stdout is empty string
}
# test the function to create directories owned by root
# even if they already exist. Note: we're not testing
# for ownership by root so that the tests don't have to
# be run as root
#
test_create_dir() {
list="_ttf_dir1 _ttf_dir2 _ttf_dir3"
create_dir $list
for d in $list
do
test -d $d
assertTrue "directory $d exists" $?
done
}
# test the function to find the configuration file
#
test_find_config() {
local d
local troot=_testingroot
TESTING_ROOT=$troot
local tdirs="/etc /etc/ftp_upload /etc/opt/ftp_upload"
local tfile=uploader.conf
# create the testing directories
for d in $tdirs
do
mkdir --parents "$troot$d"
done
# test no config file
local res=`find_config`
assertEquals "find_config: wrong no-file value retuned" \
/etc/opt/ftp_upload/$tfile "$res"
# test config file detection in each standard directory
for d in $tdirs
do
touch "$troot$d/uploader.conf"
res=`find_config`
assertEquals "find_config: wrong value returned" "$troot$d/$tfile" "$res"
done
# test config file detection in current directory
touch uploader.conf
res=`find_config`
assertEquals "find_config: wrong value returned" "./uploader.conf" "$res"
}
. `which shunit2`
|
NeighborhoodGuard/ftp_upload
|
FTP_Upload/configupload/test/testUtils.sh
|
Shell
|
agpl-3.0
| 6,162 |
#!/bin/bash
# card_activation.sh: activate a card and send back login/pwd via SMS
#
# ARGV[1] = SMS from client
#
# ENV[HOME] = Base directory
# ENV[FILE_LOG] = Log file for command
# ENV[MSG_LOG] = Log separator
# ENV[DEBUG] = Enable debugging
# ENV[LDAP_HOST] = LDAP Host
# ENV[LDAP_PASSWORD] = file contenente password for LDAP binding
# ENV[MAIL_TO] = Email Address for workflow
# ENV[MAIL_FROM] = Email Address for workflow
SMS="$1"
if [ -z "${SMS}" ]; then
echo "SMS is empty" >&2
exit 1
fi
if [ -z "${HOME}" ]; then
echo "HOME is not set" >&2
exit 1
fi
cd ${HOME}
if [ ! -f WAGSM_command/.function ]; then
echo "Unable to found WAGSM_command/.function" >&2
exit 1
fi
. WAGSM_command/.function
chk_ENV `basename $0`
# if something wrong exec this...
EXIT_CMD="ESITO=0; echo 'Sorry, service not available'"
# start
DEBUG_INFORMATION="ENV[LDAP_HOST] = \"${LDAP_HOST}\"
ENV[LDAP_PASSWORD]= \"${LDAP_PASSWORD}\"
ENV[MAIL_TO] = \"${MAIL_TO}\"
ENV[MAIL_FROM] = \"${MAIL_FROM}\"
ARGV[1] (SMS) = \"${SMS}\"
"
begin_CMD
# extract information
run_CMD "PIN=`echo \"${SMS}\" | tail -n 1`"
run_CMD "PHONE_NUMBER=`echo ${SMS} | grep 'From:' | cut -f2 -d' '`"
# Search card by pin
run_CMD_with_output CARD "ldapsearch -x -h $LDAP_HOST -p 389 -D uid=unwired-portal-agent,ou=managers,o=unwired-portal -b ou=cards,o=unwired-portal -y $LDAP_PASSWORD -LLL waPin=${PIN}"
if [ -z "$CARD" ]
then
EXIT_CMD="echo 'Pin errato'"
end_CMD 0
fi
# Search card by pin and verify that is not already assigned to a user
run_CMD_with_output CARD "ldapsearch -x -h $LDAP_HOST -p 389 -D uid=unwired-portal-agent,ou=managers,o=unwired-portal -b ou=cards,o=unwired-portal -y $LDAP_PASSWORD -LLL (&(waPin=${PIN})(!(waUsedBy=*)))"
if [ -z "$CARD" ]
then
EXIT_CMD="echo 'Carta in uso!'"
end_CMD 0
fi
CARD_ID=${CARD:10:36}
# Search user by phone number
run_CMD_with_output USER "ldapsearch -x -h $LDAP_HOST -p 389 -D uid=unwired-portal-agent,ou=managers,o=unwired-portal -b ou=users,o=unwired-portal -y $LDAP_PASSWORD -LLL waCell=${PHONE_NUMBER}"
# generate LOGIN/PASSWORD
run_CMD_with_output UUID uuidgen
LOGIN=${UUID:0:8}
PASSWORD=${UUID:28:8}
if [ -n "$USER" ]
then
USER_ID=${USER:10:36}
else
# If user does not exists, generate USER_ID before and then create user
USER_ID=$UUID
LDIF="dn: waUid=${USER_ID},ou=users,o=unwired-portal
objectClass: top
objectClass: waUser
waUid: ${USER_ID}
waCell: ${PHONE_NUMBER}
waActive: TRUE"
run_CMD_with_input "${LDIF}" "ldapadd -x -c -h $LDAP_HOST -p 389 -D cn=admin,o=unwired-portal -y $LDAP_PASSWORD"
fi
# Update card with USER_ID and a new generated LOGIN/PASSWORD
LDIF="dn: waCid=${CARD_ID},ou=cards,o=unwired-portal
waUsedBy: ${USER_ID}
waLogin: ${LOGIN}
waPassword: ${PASSWORD}"
run_CMD_with_input "${LDIF}" "ldapmodify -x -c -h $LDAP_HOST -p 389 -D cn=admin,o=unwired-portal -y $LDAP_PASSWORD"
# send output for SMS to client
unset EXIT_CMD
echo "Login: ${LOGIN}"
echo "Password: ${PASSWORD}"
# sending a mail for workflow
run_CMD "GSMBOX_ID=`echo ${SMS} | grep 'From_SMSC:' | cut -f2 -d' '`"
run_CMD "waCardId=`echo \"${CARD}\" | cut -f5 -d':' | cut -f2 -d' '`"
run_CMD "waValidity=`echo \"${CARD}\" | cut -f8 -d':' | cut -f2 -d' '`"
run_CMD_with_output DATE "date -Iseconds"
MAIL="To: ${MAIL_TO}
From: ${MAIL_FROM}
MIME-Version: 1.0
Subject: cardact
Content-Type: multipart/mixed; boundary=\"_-_as978sd78fd912y_-_\"
--_-_as978sd78fd912y_-_
Content-Type: text/plain;
Content-Disposition: attachment; filename=\"fields.txt\"
waGsmBoxId=$GSMBOX_ID
waCell=$PHONE_NUMBER
waPin=$PIN
waSmsReceptionDate=$DATE
waCid=${CARD_ID}
waCardId=${waCardId}
waValidity=${waValidity}
waUsedBy=${USER_ID}
waLogin=${LOGIN}
waPassword=${PASSWORD}
--_-_as978sd78fd912y_-_
Content-Type: text/plain;
Content-Disposition: attachment; filename=\"sms.txt\"
Content-Transfer-Encoding: binary;
${SMS}
--_-_as978sd78fd912y_-_--"
run_CMD_with_input "${MAIL}" "sendmail -t"
end_CMD 0
|
indraj/ULib
|
tests/examples/WAGSM/WAGSM_command/card_activation.sh
|
Shell
|
lgpl-3.0
| 3,968 |
# Load the oh-my-zsh's library.
antigen use oh-my-zsh
# Bundles from the default repo (robbyrussell's oh-my-zsh).
# Syntax highlighting bundle.
antigen bundle zsh-users/zsh-syntax-highlighting
antigen bundle zsh-users/zsh-autosuggestions
#Load theme
antigen theme bhilburn/powerlevel9k powerlevel9k
# Tell Antigen that you're done.
antigen apply
|
shanril/setup
|
config/home/shanril/zsh/antigen-config.zsh
|
Shell
|
lgpl-3.0
| 350 |
#!/bin/bash
#
# This is job test1_4
#
#
## Start of header for backend 'local'.
#
set -e
set -u
ENVIRONMENT_DIR='.'
#
# Variables declared in MOLGENIS Compute headers/footers always start with an MC_ prefix.
#
declare MC_jobScript="test1_4.sh"
declare MC_jobScriptSTDERR="test1_4.err"
declare MC_jobScriptSTDOUT="test1_4.out"
declare MC_failedFile="molgenis.pipeline.failed"
declare MC_singleSeperatorLine=$(head -c 120 /dev/zero | tr '\0' '-')
declare MC_doubleSeperatorLine=$(head -c 120 /dev/zero | tr '\0' '=')
declare MC_tmpFolder='tmpFolder'
declare MC_tmpFile='tmpFile'
declare MC_tmpFolderCreated=0
#
##
### Header functions.
##
#
function errorExitAndCleanUp() {
local _signal="${1}"
local _problematicLine="${2}"
local _exitStatus="${3:-$?}"
local _executionHost="$(hostname)"
local _format='INFO: Last 50 lines or less of %s:\n'
local _errorMessage="FATAL: Trapped ${_signal} signal in ${MC_jobScript} running on ${_executionHost}. Exit status code was ${_exitStatus}."
if [ "${_signal}" == 'ERR' ]; then
_errorMessage="FATAL: Trapped ${_signal} signal on line ${_problematicLine} in ${MC_jobScript} running on ${_executionHost}. Exit status code was ${_exitStatus}."
fi
_errorMessage=${4:-"${_errorMessage}"} # Optionally use custom error message as 4th argument.
echo "${_errorMessage}"
echo "${MC_doubleSeperatorLine}" > "${MC_failedFile}"
echo "${_errorMessage}" >> "${MC_failedFile}"
if [ -f "${MC_jobScriptSTDERR}" ]; then
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
printf "${_format}" "${MC_jobScriptSTDERR}" >> "${MC_failedFile}"
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
tail -50 "${MC_jobScriptSTDERR}" >> "${MC_failedFile}"
fi
if [ -f "${MC_jobScriptSTDOUT}" ]; then
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
printf "${_format}" "${MC_jobScriptSTDOUT}" >> "${MC_failedFile}"
echo "${MC_singleSeperatorLine}" >> "${MC_failedFile}"
tail -50 "${MC_jobScriptSTDOUT}" >> "${MC_failedFile}"
fi
echo "${MC_doubleSeperatorLine}" >> "${MC_failedFile}"
}
#
# Create tmp dir per script/job.
# To be called with with either a file or folder as first and only argument.
# Defines two globally set variables:
# 1. MC_tmpFolder: a tmp dir for this job/script. When function is called multiple times MC_tmpFolder will always be the same.
# 2. MC_tmpFile: when the first argument was a folder, MC_tmpFile == MC_tmpFolder
# when the first argument was a file, MC_tmpFile will be a path to a tmp file inside MC_tmpFolder.
#
function makeTmpDir {
#
# Compile paths.
#
local _originalPath="${1}"
local _myMD5="$(md5sum ${MC_jobScript} | cut -d ' ' -f 1)"
local _tmpSubFolder="tmp_${MC_jobScript}_${_myMD5}"
local _dir
local _base
if [[ -d "${_originalPath}" ]]; then
_dir="${_originalPath}"
_base=''
else
_base=$(basename "${_originalPath}")
_dir=$(dirname "${_originalPath}")
fi
MC_tmpFolder="${_dir}/${_tmpSubFolder}/"
MC_tmpFile="${MC_tmpFolder}/${_base}"
echo "DEBUG ${MC_jobScript}::makeTmpDir: dir='${_dir}';base='${_base}';MC_tmpFile='${MC_tmpFile}'"
#
# Cleanup the previously created tmpFolder first if this script was resubmitted.
#
if [[ ${MC_tmpFolderCreated} -eq 0 && -d "${MC_tmpFolder}" ]]; then
rm -rf "${MC_tmpFolder}"
fi
#
# (Re-)create tmpFolder.
#
mkdir -p "${MC_tmpFolder}"
MC_tmpFolderCreated=1
}
trap 'errorExitAndCleanUp HUP NA $?' HUP
trap 'errorExitAndCleanUp INT NA $?' INT
trap 'errorExitAndCleanUp QUIT NA $?' QUIT
trap 'errorExitAndCleanUp TERM NA $?' TERM
trap 'errorExitAndCleanUp EXIT NA $?' EXIT
trap 'errorExitAndCleanUp ERR $LINENO $?' ERR
touch "${MC_jobScript}.started"
#
## End of header for backend 'local'
#
#
## Generated header
#
# Assign values to the parameters in this script
# Set taskId, which is the job name of this task
taskId="test1_4"
# Make compute.properties available
rundir="TEST_PROPERTY(project.basedir)/target/test/benchmark/run/testFoldingAssign"
runid="testFoldingAssign"
workflow="src/main/resources/workflows/testfolding/workflow.csv"
parameters="src/main/resources/workflows/testfolding/parameters.csv"
user="TEST_PROPERTY(user.name)"
database="none"
backend="localhost"
port="80"
interval="2000"
path="."
# Connect parameters to environment
chr="2"
chunk="b"
# Validate that each 'value' parameter has only identical values in its list
# We do that to protect you against parameter values that might not be correctly set at runtime.
if [[ ! $(IFS=$'\n' sort -u <<< "${chr[*]}" | wc -l | sed -e 's/^[[:space:]]*//') = 1 ]]; then echo "Error in Step 'test1': input parameter 'chr' is an array with different values. Maybe 'chr' is a runtime parameter with 'more variable' values than what was folded on generation-time?" >&2; exit 1; fi
if [[ ! $(IFS=$'\n' sort -u <<< "${chunk[*]}" | wc -l | sed -e 's/^[[:space:]]*//') = 1 ]]; then echo "Error in Step 'test1': input parameter 'chunk' is an array with different values. Maybe 'chunk' is a runtime parameter with 'more variable' values than what was folded on generation-time?" >&2; exit 1; fi
#
## Start of your protocol template
#
#string chr
#string chunk
#output outputLALA
outputLALA=${chr}_outvalue
#
## End of your protocol template
#
# Save output in environment file: '$ENVIRONMENT_DIR/test1_4.env' with the output vars of this step
if [[ -z "$outputLALA" ]]; then echo "In step 'test1', parameter 'outputLALA' has no value! Please assign a value to parameter 'outputLALA'." >&2; exit 1; fi
echo "test1__has__outputLALA[4]=\"${outputLALA[0]}\"" >> $ENVIRONMENT_DIR/test1_4.env
echo "" >> $ENVIRONMENT_DIR/test1_4.env
chmod 755 $ENVIRONMENT_DIR/test1_4.env
#
## Start of footer for backend 'local'.
#
if [ -d "${MC_tmpFolder:-}" ]; then
echo -n "INFO: Removing MC_tmpFolder ${MC_tmpFolder} ..."
rm -rf "${MC_tmpFolder}"
echo 'done.'
fi
tS=${SECONDS:-0}
tM=$((SECONDS / 60 ))
tH=$((SECONDS / 3600))
echo "On $(date +"%Y-%m-%d %T") ${MC_jobScript} finished successfully after ${tM} minutes." >> molgenis.bookkeeping.log
printf '%s:\t%d seconds\t%d minutes\t%d hours\n' "${MC_jobScript}" "${tS}" "${tM}" "${tH}" >> molgenis.bookkeeping.walltime
mv "${MC_jobScript}".{started,finished}
trap - EXIT
exit 0
|
pneerincx/molgenis-compute
|
molgenis-compute-core/src/test/resources/expected/testFoldingAssign/test1_4.sh
|
Shell
|
lgpl-3.0
| 6,316 |
#!/bin/bash
set -x
SOURCES=(`find src/main -type f -name "**.vala"`)
PACKAGES=('glib-2.0' 'gobject-2.0' 'gio-2.0' 'json-glib-1.0' 'libvala-0.32' 'gmodule-2.0' 'gee-0.8' 'bob-0.0.1')
VAPI_DIRS=('src/library/vapi')
SOURCES_PREFIXED=$(printf "%s " "${SOURCES[@]}")
PACKAGES_PREFIXED=$(printf " --pkg %s" "${PACKAGES[@]}")
VAPI_DIRS_PREFIXED=$(printf " --vapidir %s" "${VAPI_DIRS[@]}")
mkdir -p target/plugins
valac -g -v $SOURCES_PREFIXED -X -Wl,-rpath=\$ORIGIN/../lib -X -Ltarget/lib -X -lbob -X -Isrc/library/c $VAPI_DIRS_PREFIXED $PACKAGES_PREFIXED -o target/bin/bob
|
activey/roger-forger
|
build-cmdline.sh
|
Shell
|
lgpl-3.0
| 569 |
#!/bin/bash
#
# Copyright (c) 2010-2010 LinkedIn, Inc
# Portions Copyright (c) 2013 Yan Pujante
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# locations
CURRDIR=`pwd`
BASEDIR=`cd $(dirname $0)/.. ; pwd`
[email protected]@
cd $BASEDIR
LIB_DIR=lib
shopt -s nullglob
# first time... 'resolve' the links
for file in $LIB_DIR/*.jar.lnk
do
ln -s `head -n 1 $file` $LIB_DIR
rm $file
done
for file in `ls -1 $LIB_DIR/*.jar `; do
if [ -z "$JVM_CLASSPATH" ]; then
JVM_CLASSPATH="-classpath $JAVA_HOME/lib/tools.jar:$file"
else
JVM_CLASSPATH=$JVM_CLASSPATH:$file
fi
done
JVM_LOG4J="-Dlog4j.configuration=file:$BASEDIR/conf/log4j.xml -Djava.util.logging.config.file=$CONF_DIR/logging.properties"
JVM_DEBUG=
#JVM_DEBUG="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005"
if [ -z "$JAVA_CMD" ]; then
if [ -f $JAVA_HOME/bin/java ]; then
JAVA_CMD=$JAVA_HOME/bin/java
else
JAVA_CMD=java
fi
fi
JAVA_VER=$("$JAVA_CMD" -version 2>&1 | grep 'java version' | sed 's/java version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q')
if [ "$JAVA_VER" -lt 17 ]; then
echo "### ERROR START ###########"
echo "### Java @ $JAVA_CMD too old (required java 1.7)"
$JAVA_CMD -version
echo "### ERROR END ###########"
exit 1;
fi
$JAVA_CMD $JVM_LOG4J $JVM_DEBUG $JVM_CLASSPATH -Duser.pwd=$CURRDIR -Dglu.version=$GLU_VERSION org.pongasoft.glu.packaging.setup.SetupMain "$@"
|
pongasoft/glu
|
packaging/org.linkedin.glu.packaging-setup/src/cmdline/resources/bin/setup.sh
|
Shell
|
apache-2.0
| 1,896 |
#!/bin/bash
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Keystone's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -P, --no-pep8 Don't run pep8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " -xintegration Ignore all keystoneclient test cases (integration tests)"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-xintegration) nokeystoneclient=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
noseargs=
noseopts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
nokeystoneclient=0
recreate_db=1
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=keystone"
fi
if [ $nokeystoneclient -eq 1 ]; then
# disable the integration tests
noseopts="$noseopts -I test_keystoneclient*"
fi
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS 2> run_tests.log
# If we get some short import error right away, print the error log directly
RESULT=$?
if [ "$RESULT" -ne "0" ];
then
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
cat run_tests.log
fi
fi
return $RESULT
}
function run_pep8 {
echo "Running pep8 ..."
# Opt-out files from pep8
ignore_scripts="*.sh:"
ignore_files="*eventlet-patch:*pip-requires"
ignore_dirs="*ajaxterm*"
GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs"
srcfiles=`find bin -type f ! -name .*.swp`
srcfiles+=" keystone"
# Just run PEP8 in current environment
${wrapper} pep8 --repeat --show-pep8 --show-source \
--exclude=vcsversion.py ${srcfiles} | tee pep8.txt
}
NOSETESTS="python run_tests.py $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and
# arguments (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi
|
sileht/deb-openstack-keystone
|
run_tests.sh
|
Shell
|
apache-2.0
| 5,273 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
home="$(cd "$(dirname $0)"; pwd)"
java -jar -Dskywalking.plugin.tomcat.collect_http_params=true ${agent_opts} ${home}/../libs/armeria-0.96minus-scenario.jar &
|
wu-sheng/sky-walking
|
test/plugin/scenarios/armeria-0.96minus-scenario/bin/startup.sh
|
Shell
|
apache-2.0
| 958 |
#!/bin/sh
mkdir -p "./download"
wget --continue --directory-prefix="./download" --html-extension --no-parent --recursive --no-directories "redux.js.org/docs/api/"
rm -rf "./download/index.html"
|
souravbadami/zeroclickinfo-fathead
|
lib/fathead/reduxjs/fetch.sh
|
Shell
|
apache-2.0
| 198 |
#!/bin/bash
chmod +x ./.travis/junit-errors-to-stdout.sh
./.travis/junit-errors-to-stdout.sh
|
simpledynamics/openid-connect-server-spring-boot
|
travis-after-failure.sh
|
Shell
|
apache-2.0
| 92 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Bourne shell syntax, this should hopefully run on pretty much anything.
usage() {
echo "run on Windows inside Cygwin, or on Linux"
echo "Usage: cd to uimaj-distr, then do signRelease.sh <version> <passphrase> (e.g., signRelease.sh uimaj-2.2.0-incubating-SNAPSHOT \"Your passphrase\")"
}
if [ -n "$2" ]
then
release=$1
passphrase=$2
else
usage
exit 1
fi
# Create PGP signatures
for i in target/${release}*.zip; do gpg --passphrase "$passphrase" --output $i.asc --detach-sig --armor $i; done
for i in target/${release}*.gz; do gpg --passphrase "$passphrase" --output $i.asc --detach-sig --armor $i; done
for i in target/${release}*.bz2; do gpg --passphrase "$passphrase" --output $i.asc --detach-sig --armor $i; done
# Create MD5 checksums
for i in target/${release}*.zip; do md5sum --binary $i > $i.md5; done
for i in target/${release}*.gz; do md5sum --binary $i > $i.md5; done
for i in target/${release}*.bz2; do md5sum --binary $i > $i.md5; done
# Create SHA1 checksums
for i in target/${release}*.zip; do sha1sum --binary $i > $i.sha1; done
for i in target/${release}*.gz; do sha1sum --binary $i > $i.sha1; done
for i in target/${release}*.bz2; do sha1sum --binary $i > $i.sha1; done
|
apache/uima-sandbox
|
SandboxDistr/annotator-package/build/signRelease.sh
|
Shell
|
apache-2.0
| 2,030 |
loadedgnu=`${MODULESHOME}/bin/modulecmd sh -t list 2>&1 | grep PrgEnv-gnu`
loadedintel=`${MODULESHOME}/bin/modulecmd sh -t list 2>&1 | grep PrgEnv-intel`
loadedcray=`${MODULESHOME}/bin/modulecmd sh -t list 2>&1 | grep PrgEnv-cray`
loadeddarshan=`${MODULESHOME}/bin/modulecmd sh -t list 2>&1 | grep darshan`
if [ "x${loadedintel}" = x ]; then
if [ "x${loadedcray}" != x ]; then
module swap PrgEnv-cray PrgEnv-intel
fi
if [ "x${loadedgnu}" != x ]; then
module swap PrgEnv-gnu PrgEnv-intel
fi
fi
module load gcc
module load git
export CRAYPE_LINK_TYPE=dynamic
|
tskisner/pytoast
|
external/conf/edison-intel.sh
|
Shell
|
bsd-2-clause
| 585 |
#!/bin/sh
#
# Copyright (c) 2004-2005 Poul-Henning Kamp.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD: soc2013/dpl/head/sys/tools/bus_macro.sh 150567 2005-09-24 20:11:07Z phk $
#
# Generate the convenience macros at the bottom of sys/bus.h
#
macro () {
n=${1}
shift
echo -n "#define bus_${n}(r"
for i
do
echo -n ", ${i}"
done
echo ") \\"
echo -n " bus_space_${n}((r)->r_bustag, (r)->r_bushandle"
for i
do
echo -n ", (${i})"
done
echo ")"
}
macro barrier o l f
for w in 1 2 4 8
do
# macro copy_region_$w so dh do c
# macro copy_region_stream_$w ?
# macro peek_$w
for s in "" stream_
do
macro read_$s$w o
macro read_multi_$s$w o d c
macro read_region_$s$w o d c
macro set_multi_$s$w o v c
macro set_region_$s$w o v c
macro write_$s$w o v
macro write_multi_$s$w o d c
macro write_region_$s$w o d c
done
done
|
dplbsd/zcaplib
|
head/sys/tools/bus_macro.sh
|
Shell
|
bsd-2-clause
| 2,104 |
#!/usr/bin/env bash
openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 18262 -out certificate.pem -subj "/UID=com.apple.mgmt.External.18a16429-886b-41f1-9c30-2bd04ae4fc37/CN=APSP:17a16429-886b-41f1-8c90-3bd02ae9fc57/C=US"
|
groob/micromdm
|
pkg/crypto/testdata/create_mock_push_cert.sh
|
Shell
|
mit
| 236 |
#
# Copyright (c) 2001, 2002, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
OS=`uname -s`
case "$OS" in
SunOS | Linux | Darwin )
FS="/"
CHMOD="${FS}bin${FS}chmod"
;;
Windows* | CYGWIN* )
CHMOD="chmod"
;;
* )
echo "Unrecognized system!"
exit 1;
;;
esac
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTSRC=${TESTSRC}"
if [ "${TESTJAVA}" = "" ]
then
echo "TESTJAVA not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTJAVA=${TESTJAVA}"
if [ "${TESTCLASSES}" = "" ]
then
echo "TESTCLASSES not set. Test cannot execute. Failed."
exit 1
fi
echo "TESTCLASSES=${TESTCLASSES}"
echo "CLASSPATH=${CLASSPATH}"
cp ${TESTSRC}/Assert.java .
cp -R ${TESTSRC}/package1 .
cp -R ${TESTSRC}/package2 .
${CHMOD} -R u+w *
${TESTJAVA}/bin/javac Assert.java
${TESTJAVA}/bin/java Assert
result=$?
if [ $result -eq 0 ]
then
echo "Passed"
else
echo "Failed"
fi
exit $result
|
greghaskins/openjdk-jdk7u-jdk
|
test/java/lang/ClassLoader/Assert.sh
|
Shell
|
gpl-2.0
| 1,938 |
#! /bin/sh
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test to make sure AC_CONFIG_AUX_DIR works correctly.
# This test calls AC_CONFIG_AUX_DIR with a '.' argument, thus explicitly
# making the top-level directory the config auxdir.
# Keep this in sync with sister tests 'auxdir6.sh' and 'auxdir8.sh'.
. test-init.sh
cat > configure.ac <<END
AC_INIT([$me], [1.0])
AC_CONFIG_AUX_DIR([.])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile subdir/Makefile])
END
mkdir subdir
cat > Makefile.am << 'END'
pkgdata_DATA =
END
cp Makefile.am subdir/Makefile.am
: > mkinstalldirs
$ACLOCAL
$AUTOMAKE
$FGREP '$(top_srcdir)/mkinstalldirs' Makefile.in
$FGREP '$(top_srcdir)/mkinstalldirs' subdir/Makefile.in
:
|
kuym/openocd
|
tools/automake-1.15/t/auxdir7.sh
|
Shell
|
gpl-2.0
| 1,335 |
# simple test for instanceof
# --output:start
# true
# false
# true
# true
# false
# false
# false
# true
# true
# true
# true
# false
# --output:end
interface Iface1 {}
interface Iface2 {}
class Test1: Iface1 {}
class Test2(Test1): Iface2 {}
class Foo {}
f = Foo()
t1 = Test1()
t2 = Test2()
print(f instanceof Foo)
print(f instanceof Iface1)
print(t1 instanceof Iface1)
print(t1 instanceof Test1)
print(t1 instanceof Test2)
print(t1 instanceof Iface2)
print(t1 instanceof Foo)
print(t2 instanceof Iface1)
print(t2 instanceof Test1)
print(t2 instanceof Test2)
print(t2 instanceof Iface2)
print(t2 instanceof Foo)
|
alexst07/seti
|
test/interpreter/oop/instanceof.sh
|
Shell
|
apache-2.0
| 618 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
exit 1
fi
# CYGINW == 1 if Cygwin is detected, else 0.
if [[ $(uname -a) =~ "CYGWIN" ]]; then
CYGWIN=1
else
CYGWIN=0
fi
if [ -z "$INCLUDE_TEST_JARS" ]; then
INCLUDE_TEST_JARS=false
fi
# Exclude jars not necessary for running commands.
regex="(-(test|src|scaladoc|javadoc)\.jar|jar.asc)$"
should_include_file() {
if [ "$INCLUDE_TEST_JARS" = true ]; then
return 0
fi
file=$1
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
return 0
else
return 1
fi
}
base_dir=$(dirname $0)/..
if [ -z "$SCALA_VERSION" ]; then
SCALA_VERSION=2.10.6
fi
if [ -z "$SCALA_BINARY_VERSION" ]; then
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
fi
# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
shopt -s nullglob
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
do
if [ -z "$CLASSPATH" ] ; then
CLASSPATH="$dir/*"
else
CLASSPATH="$CLASSPATH:$dir/*"
fi
done
for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/clients/build/libs/kafka-clients*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/streams/build/libs/kafka-streams*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/streams/build/dependant-libs-${SCALA_VERSION}/rocksdb*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done
for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
for cc_pkg in "api" "runtime" "file" "json" "tools"
do
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
fi
done
# classpath addition for release
for file in "$base_dir"/libs/*;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
shopt -u nullglob
# JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
fi
# JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
fi
# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi
# Log4j settings
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
# Log to console. This is a tool.
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
else
# create logs directory
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
fi
# If Cygwin is detected, LOG_DIR is converted to Windows format.
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
# Generic jvm settings you want to add
if [ -z "$KAFKA_OPTS" ]; then
KAFKA_OPTS=""
fi
# Set Debug options if enabled
if [ "x$KAFKA_DEBUG" != "x" ]; then
# Use default ports
DEFAULT_JAVA_DEBUG_PORT="5005"
if [ -z "$JAVA_DEBUG_PORT" ]; then
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
fi
# Use the defaults if JAVA_DEBUG_OPTS was not set
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
if [ -z "$JAVA_DEBUG_OPTS" ]; then
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
fi
echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
fi
# Which java to use
if [ -z "$JAVA_HOME" ]; then
JAVA="java"
else
JAVA="$JAVA_HOME/bin/java"
fi
# Memory options
if [ -z "$KAFKA_HEAP_OPTS" ]; then
KAFKA_HEAP_OPTS="-Xmx256M"
fi
# JVM performance options
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+DisableExplicitGC -Djava.awt.headless=true"
fi
while [ $# -gt 0 ]; do
COMMAND=$1
case $COMMAND in
-name)
DAEMON_NAME=$2
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
shift 2
;;
-loggc)
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
GC_LOG_ENABLED="true"
fi
shift
;;
-daemon)
DAEMON_MODE="true"
shift
;;
*)
break
;;
esac
done
# GC options
GC_FILE_SUFFIX='-gc.log'
GC_LOG_FILE_NAME=''
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps "
fi
# If Cygwin is detected, classpath is converted to Windows format.
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
# Launch mode
if [ "x$DAEMON_MODE" = "xtrue" ]; then
nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
else
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
fi
|
eribeiro/kafka
|
bin/kafka-run-class.sh
|
Shell
|
apache-2.0
| 6,939 |
#!/usr/bin/env bash
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd )
$DIR/../gradlew -p $DIR assemble
|
wiltonlazary/kotlin-native
|
samples/calculator/build.sh
|
Shell
|
apache-2.0
| 107 |
#!/bin/sh
# SUMMARY: Namespace stress with multiple instances of 5 concurrent long running TCP/IPv6 connections over a veth pair in reverse order
# LABELS:
# REPEAT:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
clean_up() {
find . -depth -iname "test-ns*" -not -iname "*.yml" -exec rm -rf {} \;
}
trap clean_up EXIT
# Test code goes here
moby build -output kernel+initrd test-ns.yml
RESULT="$(linuxkit run -cpus 2 test-ns)"
echo "${RESULT}" | grep -q "suite PASSED"
exit 0
|
radu-matei/linuxkit
|
test/cases/020_kernel/110_namespace/004_kernel-4.12.x/010_veth/027_echo-tcp-ipv6-long-5con-multi-reverse/test.sh
|
Shell
|
apache-2.0
| 542 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/sh -Eux
# Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR
trap founderror 1 2 3 15 ERR
founderror()
{
exit 1
}
exitscript()
{
#remove lock file
#rm $lockfile
exit 0
}
apt-get -y update
apt-get install -y software-properties-common python-software-properties screen vim git wget
add-apt-repository -y ppa:webupd8team/java
apt-get -y update
/bin/echo debconf shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get -y install oracle-java7-installer oracle-java7-set-default
/vagrant/vagrant/kafka.sh #install kafka
IP=$(ifconfig | grep 'inet addr:'| grep 168 | grep 192|cut -d: -f2 | awk '{ print $1}')
sed 's/broker.id=0/'broker.id=$1'/' /opt/apache/kafka/config/server.properties > /tmp/prop1.tmp
sed 's/#advertised.host.name=<hostname routable by clients>/'advertised.host.name=$IP'/' /tmp/prop1.tmp > /tmp/prop2.tmp
sed 's/#host.name=localhost/'host.name=$IP'/' /tmp/prop2.tmp > /tmp/prop3.tmp
sed 's/zookeeper.connect=localhost:2181/'zookeeper.connect=192.168.22.5:2181'/' /tmp/prop3.tmp > /opt/server.properties
/opt/apache/kafka/bin/kafka-server-start.sh /opt/server.properties 1>> /tmp/broker.log 2>> /tmp/broker.log &
sleep 10
cd /vagrant
mkdir -p log
java -jar /vagrant/target/dropwizard-kafka-http-0.0.1-SNAPSHOT.jar server kafka-http.yml 1>> /vagrant/log/stealthly.log 2>> /vagrant/log/stealthly.log &
|
stealthly/dropwizard-kafka-http
|
vagrant/broker.sh
|
Shell
|
apache-2.0
| 2,195 |
#!/bin/bash
sphinx-autobuild . _build_html
|
jtjeferreira/supler
|
docs/autobuild.sh
|
Shell
|
apache-2.0
| 43 |
#!/usr/bin/env bash
set -u -e -o pipefail
TRAVIS=${TRAVIS:-}
CI_MODE=${CI_MODE:-}
# Setup environment
readonly thisDir=$(cd $(dirname $0); pwd)
source ${thisDir}/_travis-fold.sh
# If the previous commands in the `script` section of .travis.yaml failed, then abort.
# The variable is not set in early stages of the build, so we default to 0 there.
# https://docs.travis-ci.com/user/environment-variables/
if [[ ${TRAVIS_TEST_RESULT=0} == 1 ]]; then
exit 1;
fi
mkdir -p ${LOGS_DIR}
# TODO: install nvm?? it's already on travis so we don't need it
#curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.31.0/install.sh | bash
# Install node
#nvm install ${NODE_VERSION}
# Install version of yarn that we are locked against
travisFoldStart "install-yarn"
curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version "${YARN_VERSION}"
travisFoldEnd "install-yarn"
# Install all npm dependencies according to yarn.lock
travisFoldStart "yarn-install"
(node tools/npm/check-node-modules --purge && yarn postinstall) || yarn install --frozen-lockfile --non-interactive
travisFoldEnd "yarn-install"
# Install bower packages
travisFoldStart "bower-install"
$(npm bin)/bower install
travisFoldEnd "bower-install"
if [[ ${TRAVIS} &&
${CI_MODE} == "aio" ||
${CI_MODE} == "aio_e2e" ||
${CI_MODE} == "aio_tools_test"
]]; then
# angular.io: Install all yarn dependencies according to angular.io/yarn.lock
travisFoldStart "yarn-install.aio"
(
# HACK (don't submit with this): Build Angular
./build.sh --packages=compiler,core,elements --examples=false
cd ${PROJECT_ROOT}/aio
yarn install --frozen-lockfile --non-interactive
)
travisFoldEnd "yarn-install.aio"
fi
# Install Chromium
if [[ ${TRAVIS} &&
${CI_MODE} == "js" ||
${CI_MODE} == "e2e" ||
${CI_MODE} == "e2e_2" ||
${CI_MODE} == "aio" ||
${CI_MODE} == "aio_e2e"
]]; then
travisFoldStart "install-chromium"
(
${thisDir}/install-chromium.sh
# Start xvfb for local Chrome used for testing
if [[ ${TRAVIS} ]]; then
travisFoldStart "install-chromium.xvfb-start"
sh -e /etc/init.d/xvfb start
travisFoldEnd "install-chromium.xvfb-start"
fi
)
travisFoldEnd "install-chromium"
fi
# Install Sauce Connect
if [[ ${TRAVIS}] && (${CI_MODE} == "saucelabs_required" || ${CI_MODE} == "saucelabs_optional") ]]; then
travisFoldStart "install-sauceConnect"
(
${thisDir}/../sauce/sauce_connect_setup.sh
)
travisFoldEnd "install-sauceConnect"
fi
# Install BrowserStack Tunnel
if [[ ${TRAVIS} && (${CI_MODE} == "browserstack_required" || ${CI_MODE} == "browserstack_optional") ]]; then
travisFoldStart "install-browserstack"
(
${thisDir}/../browserstack/start_tunnel.sh
)
travisFoldEnd "install-browserstack"
fi
# Print return arrows as a log separator
travisFoldReturnArrows
|
kara/angular
|
scripts/ci/install.sh
|
Shell
|
mit
| 2,890 |
#!/bin/bash
bash $SCRIPT_DIR/start_proxy.sh
### 测试在admin 端口执行command --help(-h) 不会产生core dump ######
mysql_cmd="$MYSQL -h $MYSQL_PROXY_ADMIN_IP -P $MYSQL_PROXY_ADMIN_PORT -u$MYSQL_PROXY_ADMIN_USER -p$MYSQL_PROXY_ADMIN_PASSWD -ABs -e"
expect_result="ERROR 3026 (42000) at line 1: admin command syntax error"
test_sql1="addbackend --help"
###### 执行测试 ######
ret=$($mysql_cmd "$test_sql1" 2>&1)
if [ "$expect_result" != "$ret ];then
echo "expected ret:$expect_result"
echo "actual ret:$ret"
exit 1
fi
test_sql2="addbackend -h"
###### 执行测试 ######
ret=$($mysql_cmd "$test_sql2" 2>&1)
if [ "$expect_result" != "$ret ];then
echo "expected ret:$expect_result"
echo "actual ret:$ret"
exit 1
fi
ret=0
bash $SCRIPT_DIR/stop_proxy.sh
exit $ret
#eof
|
SOHUDBA/SOHU-DBProxy
|
tests/mytest/test-18-4.sh
|
Shell
|
gpl-2.0
| 804 |
#!/bin/bash
############################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
# File : python-ethtool.sh #
# Description: Test the pethtool and pifconfig command. #
# Author: Kumuda G, [email protected] #
############################################################################################
################################################################################
# source the utility functions
################################################################################
#cd `dirname $0`
#LTPBIN=${PWD%%/testcases/*}/testcases/bin
source $LTPBIN/tc_utils.source
TESTS_DIR="${LTPBIN%/shared}/python_ethtool"
REQUIRED="pethtool pifconfig ethtool ifconfig"
################################################################################
# test functions
################################################################################
function tc_local_setup()
{
tc_exec_or_break $REQUIRED || return
tc_get_iface
interface="$TC_IFACE"
restore_tso=`ethtool -k $interface | grep "tcp-segmentation-offload:"|cut -d":" -f2`
}
function tc_local_cleanup()
{
#restoring the tcp segmentation offload, by default its on
ethtool -K $interface tso $restore_tso >> /dev/null
}
function pethtool_tests()
{
tc_register pethtool_Cc
prev=`pethtool -c $interface|grep rx-usecs:|cut -d" " -f2`
pethtool -C $interface rx-usecs $((prev+1)) 1>$stdout 2>$stderr
res=`ethtool -c $interface|grep rx-usecs:|cut -d" " -f2`
[ $res -gt $prev ]
tc_pass_or_fail $? "Failed to set and/or read rx-usecs"
tc_register pethtool_Kk
[ "$restore_tso" = " off" ] && tso=on || tso=off
pethtool -K $interface tso $tso 1>$stdout 2>$stderr
pethtool -k $interface | grep -q "tcp segmentation offload: $tso"
pret=$?
#pethtool failure is validated with ethtool to confirm whether the TCP off/on'loading
#operation is supported or not by network card, if its not supported then pethtool_Kk
#test will throw an info message for 'not supported operation' without failing in
#regression run. If the operation is supported and pethtool fails, then pethtool_Kk test fails.
if [ $pret -eq 0 ]
then
tc_pass
else
ethtool -K $interface tso $tso 1>$stdout 2>$stderr
grep -qi "Cannot change" $stderr
if [ $? -eq 0 ]
then
tc_info "pethtool_Kk: `cat $stderr` !!"
else
tc_fail "Test fails"
fi
fi
tc_register pethtool_i
pethtool -i $interface 1>$stdout 2>$stderr
tc_pass_or_fail $? "test fail"
}
function pifconfig_test()
{
tc_register pifconfig
ifconf=`ifconfig $interface|grep -w "inet"|awk '{print $2}'`
pifconfig $interface 1>$stdout 2>$stderr
[ `grep "inet addr" $stdout|cut -f2 -d':' | cut -f1 -d' '` = "$ifconf" ]
tc_pass_or_fail $? "test fail"
}
################################################################################
# MAIN
################################################################################
TST_TOTAL=4
tc_get_os_ver
tc_setup
[ $TC_OS_VER -le 73 ] && TST_TOTAL=3 || pifconfig_test
pethtool_tests
|
PoornimaNayak/autotest-client-tests
|
linux-tools/python_ethtool/python-ethtool.sh
|
Shell
|
gpl-2.0
| 5,078 |
#! /bin/sh
# Copyright (C) 2008-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure autoconf version checks in aclocal.m4 are robust.
am_create_testdirs=empty
. test-init.sh
cat > configure.ac <<END
m4_define([AC_AUTOCONF_VERSION], [9999a])
dnl!! m4_define([a], [oops])
AC_INIT([$me], [1.0])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
END
: > Makefile.am
$ACLOCAL
$AUTOCONF 2>stderr || { cat stderr >&2; exit 1; }
cat stderr >&2
grep 'You have another version of autoconf' stderr
grep 'aclocal.m4:.*this file was generated for' stderr
$AUTOMAKE -a
./configure
$MAKE
sed 's/^dnl!! //' < configure.ac > configure.tmp
cmp configure.ac configure.tmp && fatal_ 'failed to edit configure.ac'
mv -f configure.tmp configure.ac
run_make -E
grep 'You have another version of autoconf' stderr
grep 'aclocal.m4:.*this file was generated for autoconf 9999a' stderr
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/aclocal-autoconf-version-check.sh
|
Shell
|
gpl-2.0
| 1,491 |
#!/bin/sh
user_name /dev/mtd=admin
user_home=/tmp/admin_home
mount_point=/tmp/jffs2_green_download
file_got=/tmp/green-download-file
file_new=/tmp/green_download.squashfs
file_prefix=/tmp/green_download_prefix
/bin/config set green_download_upgrade_stat=0
#umount
#flash_size=`cat /proc/flash_size`
#[ $flash_size -lt 16 ] && echo "Flash size is not 16MB, exit ..." && exit
[ ! -f /tmp/green-download-file ] && exit
#run /usr/sbin/green_download.sh stop in httpd.c/post_green_upg()
/bin/umount /dev/mtdblock12 && sleep 1
#do check
dd if=$file_got of=$file_prefix bs=1 count=32
if [ `grep -c "GreenDownload-" $file_prefix` -lt 1 ]; then
rm -f $file_prefix
/bin/config set green_download_upgrade_stat=2
/bin/config commit
echo "Unrecognized file, exit from upgrading..."
exit
fi
rm -f $file_prefix
#write to flash
dd if=$file_got of=$file_new bs=1 skip=32 && rm -rf $file_got
mtd erase /dev/mtd12 && sleep 1
/sbin/mtd write $file_new /dev/mtd12 && rm -rf $file_new
#mount
[ ! -d $mount_point ] && mkdir -p $mount_point && sleep 1
#mount -t jffs2 /dev/mtdblock9 $mount_point && sleep 1 && chown $user_name $mount_point/*
mount /dev/mtdblock12 $mount_point && sleep 1
#user should restart green download app mannually
/usr/sbin/green_download.sh start
/bin/config set green_download_upgrade_stat=1
/bin/config commit
|
paul-chambers/netgear-r7800
|
package/green-download/files/green_download_upgrade.sh
|
Shell
|
gpl-2.0
| 1,319 |
#!/bin/bash
../../src/getmissingandreport -s -c corrections.xml -t veiveg.osm.out -w existingways.osm existingnodes.osm newnodes.osm | tee response1-1.txt.out
../../src/getmissingandreport -s -c corrections.xml -t veiveg.osm.out existingnodes.osm newnodes.osm | tee response1-2.txt.out
../../src/getmissingandreport -s -t veiveg.osm.out existingnodes.osm newnodes.osm | tee response1-3.txt.out
../../src/getmissingandreport -s -t veiveg.osm.out -w existingways.osm existingnodes.osm newnodes.osm | tee response1-4.txt.out
../../src/getmissingandreport -s -c corrections.xml -t veiveg.osm.out -n notmatched.osm.out -d duplicates.osm.out -e otherobjects.osm.out -o output.osm.out -w existingways.osm existingnodes.osm newnodes.osm | tee response1-5.txt.out
../../src/getmissingandreport -s -c corrections.xml -w existingways.osm existingnodes.osm newnodes.osm | tee response1-6.txt.out
diff response1-1.txt response1-1.txt.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff response1-2.txt response1-2.txt.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff response1-3.txt response1-3.txt.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff response1-4.txt response1-4.txt.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff response1-1.txt response1-5.txt.out # The number shall be the same as for 1-1
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff response1-6.txt response1-6.txt.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff output.osm output.osm.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff veiveg.osm veiveg.osm.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
diff notmatched.osm notmatched.osm.out
if [ $? -ne 0 ]
then echo "Failed test"
exit -1
fi
|
rubund/addrnodeimport
|
test/2/runtest.sh
|
Shell
|
gpl-2.0
| 1,737 |
set -e;
BT=${BT-../../bin/bedtools}
FAILURES=0;
check()
{
if diff $1 $2; then
echo ok
else
FAILURES=$(expr $FAILURES + 1);
echo fail
fi
}
###########################################################
# Test that -n option is shown as deperecated
###########################################################
#echo -e " merge.t2...\c"
#echo "***** ERROR: -n option is deprecated. Please see the documentation for the -c and -o column operation options. *****" > exp
#$BT merge -i a.bed -n 2>&1 > /dev/null | head -2 | tail -1 > obs
#check obs exp
#rm obs exp
###########################################################
# Test basic grouping
###########################################################
echo -e " groupby.t1...\c"
echo \
"chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.header.bed -c 5 > obs
check obs exp
rm obs exp
###########################################################
# Test case insensitive grouping works
###########################################################
echo -e " groupby.t2...\c"
echo \
"chr1 0 10 10
cHr1 10 20 5
Chr1 11 21 5
chR1 20 30 45
Chr1 120 130 1
CHr3 0 10 1
cHR3 10 20 2
CHR3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3_case.header.bed -c 5 -ignorecase > obs
check obs exp
rm obs exp
###########################################################
# Test -full option (print all columns, not just grouped
# ones)
###########################################################
echo -e " groupby.t3...\c"
echo \
"chr1 0 10 a1 10 + 10
chr1 10 20 a2 5 + 5
chr1 11 21 a3 5 + 5
chr1 20 30 a4 15 + 45
chr1 120 130 a7 1 + 1
chr3 0 10 a8 1 + 1
chr3 10 20 a9 2 + 2
chr3 20 30 a10 3 + 3
chr3 120 130 a11 4 + 8" > exp
$BT groupby -i values3.header.bed -c 5 -full > obs
check obs exp
rm obs exp
###########################################################
# Test -inheader option
###########################################################
echo -e " groupby.t4...\c"
echo \
"chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.header.bed -c 5 -inheader > obs
check obs exp
rm obs exp
###########################################################
# Test -inheader option when header not marked by
# recognized char
###########################################################
echo -e " groupby.t5...\c"
echo \
"chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.unmarked_header.bed -c 5 -inheader > obs
check obs exp
rm obs exp
###########################################################
# Test -inheader option when no header present will skip
# first line
###########################################################
echo -e " groupby.t6...\c"
echo \
"chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.no_header.bed -c 5 -inheader > obs
check obs exp
rm obs exp
###########################################################
# Test -outheader option will work automatically, even
# without -inheader, if header has normally marked start char.
###########################################################
echo -e " groupby.t7...\c"
echo \
"#chrom start end A B C
chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.header.bed -c 5 -outheader > obs
check obs exp
rm obs exp
###########################################################
# Test that unmarked header will be included by default.
###########################################################
echo -e " groupby.t8...\c"
echo \
"Chromz start end B
chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 15
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 4" > exp
$BT groupby -i values3.unmarked_header.bed.2 -c 5 -o distinct > obs
check obs exp
rm obs exp
###########################################################
# Test that -outheader does nothing with unmarked header
###########################################################
echo -e " groupby.t9...\c"
echo \
"col_1 col_2 col_3 col_4 col_5 col_6
Chromz start end B
chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 15
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 4" > exp
$BT groupby -i values3.unmarked_header.bed.2 -c 5 -o distinct -outheader > obs
check obs exp
rm obs exp
###########################################################
# Test that -header works with unmarked header
###########################################################
echo -e " groupby.t10...\c"
echo \
"Chrom start end A B C
chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 15
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 4" > exp
$BT groupby -i values3.unmarked_header.bed -c 5 -o distinct -header > obs
check obs exp
rm obs exp
###########################################################
# Test that -header works normally with normal header
###########################################################
echo -e " groupby.t11...\c"
echo \
"#chrom start end A B C
chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.header.bed -c 5 -header > obs
check obs exp
rm obs exp
###########################################################
# Test a BedPlus file (7 fields)
###########################################################
echo -e " groupby.t12...\c"
echo \
"chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -i values3.7fields.header.bed -c 5 > obs
check obs exp
rm obs exp
###########################################################
# Test noPosPlus file (8 fields, not starting with
# chr, starte, end
###########################################################
echo -e " groupby.t13...\c"
echo \
"chr1 0 10 10
chr1 10 20 5
chr1 11 21 5
chr1 20 30 45
chr1 120 130 1
chr3 0 10 1
chr3 10 20 2
chr3 20 30 3
chr3 120 130 8" > exp
$BT groupby -g 2-4 -i noPosvalues.header.bed -c 6 > obs
check obs exp
rm obs exp
###########################################################
# Test noPosPlus file with mof columns (iterated and range)
###########################################################
echo -e " groupby.t14...\c"
echo \
"0 10 chr1 10
10 20 chr1 5
11 21 chr1 5
20 30 chr1 45
120 130 chr1 1
0 10 chr3 1
10 20 chr3 2
20 30 chr3 3
120 130 chr3 8" > exp
$BT groupby -g 3-4,2 -i noPosvalues.header.bed -c 6 > obs
check obs exp
rm obs exp
###########################################################
# Test a VCF file
###########################################################
echo -e " groupby.t16...\c"
echo \
"19 G 70.9
19 C 33.71
19 A 21.2" > exp
$BT groupby -i a_vcfSVtest.vcf -g 1,4 -o mean -c 6 > obs
check obs exp
rm obs exp
###########################################################
# Test a BAM file
###########################################################
echo -e " groupby.t17...\c"
echo \
"None chr2L 118.75" > exp
$BT groupby -i gdc.bam -g 1,3 -c 4 -o mean > obs
check obs exp
rm obs exp
###########################################################
# Test a single column of data
###########################################################
echo -e " groupby.t18...\c"
echo \
"chr1 chr1,chr1,chr1" > exp
cut -f 1 test.bed | $BT groupby -g 1 -i - -c 1 -o collapse > obs
check obs exp
rm obs exp
###########################################################
# Test fix for bug 569
###########################################################
echo " groupby.t19...\c"
echo \
"AAAACAATTGGTATTCTTGGAGG 3009041 3009064" > exp
$BT groupby -i bug569_problem.txt -g 1 -c 3,4 -o distinct,min > obs
check obs exp
rm obs exp
###########################################################
# Test fix for bug 569
###########################################################
echo " groupby.t20...\c"
echo "a 1253555555355577777777 7.777788889e+15" > exp
echo "a 1253555555355577777777 7777788888899999" | $BT groupby -i - -g 1 -c 2,3 -o distinct,min > obs
check obs exp
rm obs exp
[[ $FAILURES -eq 0 ]] || exit 1;
|
lindenb/bedtools2
|
test/groupby/test-groupby.sh
|
Shell
|
gpl-2.0
| 8,386 |
current_ip="$(get_ip)"
[ -z "$current_ip" ] && current_ip="-$(lang de:"keine" en:"none")-"
sec_begin 'get_ip'
cat << EOF
<p>
$(lang de:"Immer diese Methode nutzen" en:"Always use this method"): <input type="text" name="get_ip_method" size="20" maxlength="20" value="$(html "$MOD_GET_IP_METHOD")"></p>
</p>
<p>
$(lang de:"Server für STUN/VoIP Methode" en:"Server for STUN/VoIP method"): <input type="text" name="get_ip_stun" size="50" maxlength="250" value="$(html "$MOD_GET_IP_STUN")"></p>
</p>
<p>
<div style='margin-top:6px;'>$(lang de:"Mögliche Methoden" en:"Available methods"):</div>
<pre>
EOF
get_ip --help | grep "^[ \t]\+-" | grep -v -- "--help" | html
cat << EOF
</pre>
</p>
<p>
$(lang de:"Ermittelte IP" en:"Determined IP"): $current_ip
</p>
EOF
sec_end
|
Freetz/freetz
|
make/mod/files/root/usr/lib/cgi-bin/mod/conf/70-get_ip.sh
|
Shell
|
gpl-2.0
| 781 |
#! /bin/sh
$EXTRACTRC *.rc >> rc.cpp
$XGETTEXT *.cpp -o $podir/katemailfilesplugin.pot
|
DickJ/kate
|
addons/mailfiles/Messages.sh
|
Shell
|
lgpl-2.1
| 87 |
#!/bin/bash
ROOT=$1
VERSION=$2
for F in $(grep -rl "${ROOT}" .)
do
sed -i -r "s:import ${ROOT} [0-9]+.[0-9]+:import ${ROOT} ${VERSION}:g" $F
done
|
osechet/qml-material
|
scripts/normalize_imports.sh
|
Shell
|
lgpl-2.1
| 153 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# set EAGLE_HOME
export EAGLE_HOME=$(dirname $0)/..
# The java implementation to use. please use jdk 1.7 or later
# export JAVA_HOME=${JAVA_HOME}
# export JAVA_HOME=/usr/java/jdk1.7.0_80/
# nimbus.host, default is localhost
export EAGLE_NIMBUS_HOST=localhost
# EAGLE_SERVICE_HOST, default is `hostname -f`
export EAGLE_SERVICE_HOST=localhost
# EAGLE_SERVICE_PORT, default is 9099
export EAGLE_SERVICE_PORT=9099
# EAGLE_SERVICE_USER
export EAGLE_SERVICE_USER=admin
# EAGLE_SERVICE_PASSWORD
export EAGLE_SERVICE_PASSWD=secret
export EAGLE_CLASSPATH=$EAGLE_HOME/conf
# Add eagle shared library jars
for file in $EAGLE_HOME/lib/share/*;do
EAGLE_CLASSPATH=$EAGLE_CLASSPATH:$file
done
|
pkuwm/incubator-eagle
|
eagle-hadoop-metric/src/main/resources/eagle-env.sh
|
Shell
|
apache-2.0
| 1,481 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
source $TRAVIS_BUILD_DIR/ci/travis_env_common.sh
pushd $ARROW_C_GLIB_DIR
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ARROW_CPP_INSTALL/lib
if [ $BUILD_SYSTEM = "autotools" ]; then
arrow_c_glib_lib_dir=$ARROW_C_GLIB_INSTALL/lib
else
arrow_c_glib_lib_dir=$ARROW_C_GLIB_INSTALL/lib/$(arch)-linux-gnu
fi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$arrow_c_glib_lib_dir
export GI_TYPELIB_PATH=$arrow_c_glib_lib_dir/girepository-1.0
test/run-test.rb
if [ $BUILD_SYSTEM = "meson" ]; then
exit
fi
export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$ARROW_CPP_INSTALL/lib/pkgconfig
export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$arrow_c_glib_lib_dir/pkgconfig
pushd example/lua
if [ $TRAVIS_OS_NAME = "osx" ]; then
lua write-batch.lua
lua read-batch.lua
lua write-stream.lua
lua read-stream.lua
else
if [ $BUILD_TORCH_EXAMPLE = "yes" ]; then
. ~/torch/install/bin/torch-activate
luajit write-batch.lua
luajit read-batch.lua
luajit write-stream.lua
luajit read-stream.lua
luajit stream-to-torch-tensor.lua
else
lua write-batch.lua
lua read-batch.lua
lua write-stream.lua
lua read-stream.lua
fi
fi
popd
pushd example/go
make generate
make
./write-batch
./read-batch
./write-stream
./read-stream
popd
popd
|
yufeldman/arrow
|
ci/travis_script_c_glib.sh
|
Shell
|
apache-2.0
| 2,053 |
#!/bin/sh
NAME=`oasis query Name 2> /dev/null`
VERSION=`oasis query Version 2> /dev/null`
DARCS_REPO=`pwd`
export DARCS_REPO
exec darcs dist --dist-name $NAME-$VERSION
|
vbmithr/ocaml-text
|
dist.sh
|
Shell
|
bsd-3-clause
| 170 |
#!/bin/sh
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Debug ..
echo "Building with $(nproc) jobs"
make -j2
|
etnlGD/renderdoc
|
scripts/travis/osx_compile.sh
|
Shell
|
mit
| 110 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTest "testTimeMonitor " "$0" "$@"
|
kelchuan/snac_thesis
|
StGermain/Base/Foundation/tests/testTimeMonitor.0of1.sh
|
Shell
|
gpl-2.0
| 198 |
#! /bin/sh
# Copyright (C) 1999-2015 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Another test of conditional include statements.
. test-init.sh
cat >> configure.ac << 'END'
AM_CONDITIONAL([TOBE], [false])
END
cat > Makefile.am << 'END'
if TOBE
include adjunct
endif
END
cat > adjunct << 'END'
target: dependency
rule
endif
END
$ACLOCAL
AUTOMAKE_fails
grep 'adjunct:3: error: too many conditionals closed' stderr
cat > adjunct << 'END'
if TOBE
target: dependency
rule
END
AUTOMAKE_fails
grep 'unterminated conditionals' stderr
cat > adjunct << 'END'
if TOBE
target: dependency
rule
endif
END
$AUTOMAKE
:
|
hika-server/automake
|
t/condinc2.sh
|
Shell
|
gpl-2.0
| 1,227 |
#!/bin/bash
# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey).
# 2013 Xiaohui Zhang
# 2013 Guoguo Chen
# 2014 Vimal Manohar
# Apache 2.0.
# train_pnorm_accel2.sh is a modified form of train_pnorm_simple2.sh (the "2"
# suffix is because they both use the the "new" egs format, created by
# get_egs2.sh). The "accel" part of the name refers to the fact that this
# script uses a number of jobs that can increase during training. You can
# specify --initial-num-jobs and --final-num-jobs to control these separately.
# Also, in this script, the learning rates specified by --initial-learning-rate
# and --final-learning-rate are the "effective learning rates" (defined as the
# learning rate divided by the number of jobs), and the actual learning rates
# used will be the specified learning rates multiplied by the current number
# of jobs. You'll want to set these lower than you normally would previously
# have set the learning rates, by a factor equal to the (previous) number of
# jobs.
# Begin configuration section.
cmd=run.pl
num_epochs=15 # Number of epochs of training;
# the number of iterations is worked out from this.
initial_effective_lrate=0.01
final_effective_lrate=0.001
bias_stddev=0.5
pnorm_input_dim=3000
pnorm_output_dim=300
p=2
minibatch_size=128 # by default use a smallish minibatch size for neural net
# training; this controls instability which would otherwise
# be a problem with multi-threaded update.
samples_per_iter=400000 # each iteration of training, see this many samples
# per job. This option is passed to get_egs.sh
num_jobs_initial=1 # Number of neural net jobs to run in parallel at the start of training.
num_jobs_final=8 # Number of jobs to run in parallel at the end of training.
prior_subset_size=10000 # 10k samples per job, for computing priors. Should be
# more than enough.
num_jobs_compute_prior=10 # these are single-threaded, run on CPU.
get_egs_stage=0
online_ivector_dir=
max_models_combine=20 # The "max_models_combine" is the maximum number of models we give
# to the final 'combine' stage, but these models will themselves be averages of
# iteration-number ranges.
shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples
# on each iter. You could set it to 0 or to a large value for complete
# randomization, but this would both consume memory and cause spikes in
# disk I/O. Smaller is easier on disk and memory but less random. It's
# not a huge deal though, as samples are anyway randomized right at the start.
# (the point of this is to get data in different minibatches on different iterations,
# since in the preconditioning method, 2 samples in the same minibatch can
# affect each others' gradients.
add_layers_period=2 # by default, add new layers every 2 iterations.
num_hidden_layers=3
stage=-4
splice_width=4 # meaning +- 4 frames on each side for second LDA
left_context= # if set, overrides splice-width
right_context= # if set, overrides splice-width.
randprune=4.0 # speeds up LDA.
alpha=4.0 # relates to preconditioning.
update_period=4 # relates to online preconditioning: says how often we update the subspace.
num_samples_history=2000 # relates to online preconditioning
max_change_per_sample=0.075
precondition_rank_in=20 # relates to online preconditioning
precondition_rank_out=80 # relates to online preconditioning
mix_up=0 # Number of components to mix up to (should be > #tree leaves, if
# specified.)
num_threads=16
parallel_opts="-pe smp 16 -l ram_free=1G,mem_free=1G"
# by default we use 16 threads; this lets the queue know.
# note: parallel_opts doesn't automatically get adjusted if you adjust num-threads.
combine_num_threads=8
combine_parallel_opts="-pe smp 8" # queue options for the "combine" stage.
cleanup=true
egs_dir=
lda_opts=
lda_dim=
egs_opts=
io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time.
transform_dir= # If supplied, overrides alidir
postdir=
cmvn_opts= # will be passed to get_lda.sh and get_egs.sh, if supplied.
# only relevant for "raw" features, not lda.
feat_type= # Can be used to force "raw" features.
align_cmd= # The cmd that is passed to steps/nnet2/align.sh
align_use_gpu= # Passed to use_gpu in steps/nnet2/align.sh [yes/no]
realign_times= # List of times on which we realign. Each time is
# floating point number strictly between 0 and 1, which
# will be multiplied by the num-iters to get an iteration
# number.
num_jobs_align=30 # Number of jobs for realignment
srand=0 # random seed used to initialize the nnet
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 4 ]; then
echo "Usage: $0 [opts] <data> <lang> <ali-dir> <exp-dir>"
echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet"
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config file containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --num-epochs <#epochs|15> # Number of epochs of training"
echo " --initial-effective-lrate <lrate|0.02> # effective learning rate at start of training,"
echo " # actual learning-rate is this time num-jobs."
echo " --final-effective-lrate <lrate|0.004> # effective learning rate at end of training."
echo " --num-hidden-layers <#hidden-layers|2> # Number of hidden layers, e.g. 2 for 3 hours of data, 4 for 100hrs"
echo " --add-layers-period <#iters|2> # Number of iterations between adding hidden layers"
echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer,"
echo " # per context-dependent state. Try a number several times #states."
echo " --num-jobs-initial <num-jobs|1> # Number of parallel jobs to use for neural net training, at the start."
echo " --num-jobs-final <num-jobs|8> # Number of parallel jobs to use for neural net training, at the end"
echo " --num-threads <num-threads|16> # Number of parallel threads per job (will affect results"
echo " # as well as speed; may interact with batch size; if you increase"
echo " # this, you may want to decrease the batch size."
echo " --parallel-opts <opts|\"-pe smp 16 -l ram_free=1G,mem_free=1G\"> # extra options to pass to e.g. queue.pl for processes that"
echo " # use multiple threads... note, you might have to reduce mem_free,ram_free"
echo " # versus your defaults, because it gets multiplied by the -pe smp argument."
echo " --io-opts <opts|\"-tc 10\"> # Options given to e.g. queue.pl for jobs that do a lot of I/O."
echo " --minibatch-size <minibatch-size|128> # Size of minibatch to process (note: product with --num-threads"
echo " # should not get too large, e.g. >2k)."
echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per"
echo " # process."
echo " --splice-width <width|4> # Number of frames on each side to append for feature input"
echo " # (note: we splice processed, typically 40-dimensional frames"
echo " --lda-dim <dim|250> # Dimension to reduce spliced features to with LDA"
echo " --realign-epochs <list-of-epochs|\"\"> # A list of space-separated epoch indices the beginning of which"
echo " # realignment is to be done"
echo " --align-cmd (utils/run.pl|utils/queue.pl <queue opts>) # passed to align.sh"
echo " --align-use-gpu (yes/no) # specify is gpu is to be used for realignment"
echo " --num-jobs-align <#njobs|30> # Number of jobs to perform realignment"
echo " --stage <stage|-4> # Used to run a partially-completed training process from somewhere in"
echo " # the middle."
exit 1;
fi
data=$1
lang=$2
alidir=$3
dir=$4
if [ ! -z "$realign_times" ]; then
[ -z "$align_cmd" ] && echo "$0: realign_times specified but align_cmd not specified" && exit 1
[ -z "$align_use_gpu" ] && echo "$0: realign_times specified but align_use_gpu not specified" && exit 1
fi
# Check some files.
for f in $data/feats.scp $lang/L.fst $alidir/final.mdl $alidir/tree; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
[ ! -f $postdir/post.1.scp ] && [ ! -f $alidir/ali.1.gz ] && echo "$0: no (soft) alignments provided" && exit 1;
trap 'for pid in $(jobs -pr); do kill -KILL $pid; done' INT QUIT TERM
# Set some variables.
num_leaves=`tree-info $alidir/tree 2>/dev/null | grep num-pdfs | awk '{print $2}'` || exit 1
[ -z $num_leaves ] && echo "\$num_leaves is unset" && exit 1
[ "$num_leaves" -eq "0" ] && echo "\$num_leaves is 0" && exit 1
nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir...
# in this dir we'll have just one job.
sdata=$data/split$nj
utils/split_data.sh $data $nj
mkdir -p $dir/log
echo $nj > $dir/num_jobs
cp $alidir/tree $dir
extra_opts=()
[ ! -z "$cmvn_opts" ] && extra_opts+=(--cmvn-opts "$cmvn_opts")
[ ! -z "$feat_type" ] && extra_opts+=(--feat-type $feat_type)
[ ! -z "$online_ivector_dir" ] && extra_opts+=(--online-ivector-dir $online_ivector_dir)
[ -z "$transform_dir" ] && transform_dir=$alidir
extra_opts+=(--transform-dir $transform_dir)
[ -z "$left_context" ] && left_context=$splice_width
[ -z "$right_context" ] && right_context=$splice_width
extra_opts+=(--left-context $left_context --right-context $right_context)
if [ $stage -le -4 ]; then
echo "$0: calling get_lda.sh"
steps/nnet2/get_lda.sh $lda_opts "${extra_opts[@]}" --cmd "$cmd" $data $lang $alidir $dir || exit 1;
fi
# these files will have been written by get_lda.sh
feat_dim=$(cat $dir/feat_dim) || exit 1;
ivector_dim=$(cat $dir/ivector_dim) || exit 1;
lda_dim=$(cat $dir/lda_dim) || exit 1;
if [ $stage -le -3 ] && [ -z "$egs_dir" ]; then
echo "$0: calling get_egs2.sh"
steps/nnet2/get_egs2.sh $egs_opts "${extra_opts[@]}" --io-opts "$io_opts" \
--postdir "$postdir" --samples-per-iter $samples_per_iter --stage $get_egs_stage \
--cmd "$cmd" $egs_opts $data $alidir $dir/egs || exit 1;
fi
if [ -z $egs_dir ]; then
egs_dir=$dir/egs
fi
frames_per_eg=$(cat $egs_dir/info/frames_per_eg) || { echo "error: no such file $egs_dir/info/frames_per_eg"; exit 1; }
num_archives=$(cat $egs_dir/info/num_archives) || { echo "error: no such file $egs_dir/info/frames_per_eg"; exit 1; }
# num_archives_expanded considers each separate label-position from
# 0..frames_per_eg-1 to be a separate archive.
num_archives_expanded=$[$num_archives*$frames_per_eg]
[ $num_jobs_initial -gt $num_jobs_final ] && \
echo "$0: --initial-num-jobs cannot exceed --final-num-jobs" && exit 1;
[ $num_jobs_final -gt $num_archives_expanded ] && \
echo "$0: --final-num-jobs cannot exceed #archives $num_archives_expanded." && exit 1;
if ! [ $num_hidden_layers -ge 1 ]; then
echo "Invalid num-hidden-layers $num_hidden_layers"
exit 1
fi
if [ $stage -le -2 ]; then
echo "$0: initializing neural net";
lda_mat=$dir/lda.mat
tot_input_dim=$[$feat_dim+$ivector_dim]
online_preconditioning_opts="alpha=$alpha num-samples-history=$num_samples_history update-period=$update_period rank-in=$precondition_rank_in rank-out=$precondition_rank_out max-change-per-sample=$max_change_per_sample"
initial_lrate=$(perl -e "print ($initial_effective_lrate*$num_jobs_initial);")
stddev=`perl -e "print 1.0/sqrt($pnorm_input_dim);"`
cat >$dir/nnet.config <<EOF
SpliceComponent input-dim=$tot_input_dim left-context=$left_context right-context=$right_context const-component-dim=$ivector_dim
FixedAffineComponent matrix=$lda_mat
AffineComponentPreconditionedOnline input-dim=$lda_dim output-dim=$pnorm_input_dim $online_preconditioning_opts learning-rate=$initial_lrate param-stddev=$stddev bias-stddev=$bias_stddev
PnormComponent input-dim=$pnorm_input_dim output-dim=$pnorm_output_dim p=$p
NormalizeComponent dim=$pnorm_output_dim
AffineComponentPreconditionedOnline input-dim=$pnorm_output_dim output-dim=$num_leaves $online_preconditioning_opts learning-rate=$initial_lrate param-stddev=0 bias-stddev=0
SoftmaxComponent dim=$num_leaves
EOF
# to hidden.config it will write the part of the config corresponding to a
# single hidden layer; we need this to add new layers.
cat >$dir/hidden.config <<EOF
AffineComponentPreconditionedOnline input-dim=$pnorm_output_dim output-dim=$pnorm_input_dim $online_preconditioning_opts learning-rate=$initial_lrate param-stddev=$stddev bias-stddev=$bias_stddev
PnormComponent input-dim=$pnorm_input_dim output-dim=$pnorm_output_dim p=$p
NormalizeComponent dim=$pnorm_output_dim
EOF
$cmd $dir/log/nnet_init.log \
nnet-am-init $alidir/tree $lang/topo "nnet-init --srand=$srand $dir/nnet.config -|" \
$dir/0.mdl || exit 1;
fi
if [ $stage -le -1 ]; then
echo "Training transition probabilities and setting priors"
$cmd $dir/log/train_trans.log \
nnet-train-transitions $dir/0.mdl "ark:gunzip -c $alidir/ali.*.gz|" $dir/0.mdl \
|| exit 1;
fi
# set num_iters so that as close as possible, we process the data $num_epochs
# times, i.e. $num_iters*$avg_num_jobs) == $num_epochs*$num_archives_expanded,
# where avg_num_jobs=(num_jobs_initial+num_jobs_final)/2.
num_archives_to_process=$[$num_epochs*$num_archives_expanded]
num_archives_processed=0
num_iters=$[($num_archives_to_process*2)/($num_jobs_initial+$num_jobs_final)]
echo "$0: Will train for $num_epochs epochs = $num_iters iterations"
finish_add_layers_iter=$[$num_hidden_layers * $add_layers_period]
! [ $num_iters -gt $[$finish_add_layers_iter+2] ] \
&& echo "$0: Insufficient epochs" && exit 1
# mix up at the iteration where we've processed about half the data; this keeps
# the overall training procedure fairly invariant to the number of initial and
# final jobs.
# j = initial, k = final, n = num-iters, x = half-of-data epoch,
# p is proportion of data we want to process (e.g. p=0.5 here).
# solve for x if the amount of data processed by epoch x is p
# times the amount by iteration n.
# put this in wolfram alpha:
# solve { x*j + (k-j)*x*x/(2*n) = p * (j*n + (k-j)*n/2), {x} }
# got: x = (j n-sqrt(-n^2 (j^2 (p-1)-k^2 p)))/(j-k) and j!=k and n!=0
# simplified manually to: n * (sqrt(((1-p)j^2 + p k^2)/2) - j)/(j-k)
mix_up_iter=$(perl -e '($j,$k,$n,$p)=@ARGV; print int(0.5 + ($j==$k ? $n*$p : $n*(sqrt((1-$p)*$j*$j+$p*$k*$k)-$j)/($k-$j))); ' $num_jobs_initial $num_jobs_final $num_iters 0.5)
! [ $mix_up_iter -gt $finish_add_layers_iter ] && \
echo "Mix-up-iter is $mix_up_iter, should be greater than $finish_add_layers_iter -> add more epochs?" \
&& exit 1;
if [ $num_threads -eq 1 ]; then
parallel_suffix="-simple" # this enables us to use GPU code if
# we have just one thread.
parallel_train_opts=
if ! cuda-compiled; then
echo "$0: WARNING: you are running with one thread but you have not compiled"
echo " for CUDA. You may be running a setup optimized for GPUs. If you have"
echo " GPUs and have nvcc installed, go to src/ and do ./configure; make"
fi
else
parallel_suffix="-parallel"
parallel_train_opts="--num-threads=$num_threads"
fi
approx_iters_per_epoch_final=$[$num_archives_expanded/$num_jobs_final]
# First work out how many models we want to combine over in the final
# nnet-combine-fast invocation. This equals
# min(max(max_models_combine, approx_iters_per_epoch_final),
# 2/3 * iters_after_mixup)
num_models_combine=$max_models_combine
if [ $num_models_combine -lt $approx_iters_per_epoch_final ]; then
num_models_combine=$approx_iters_per_epoch_final
fi
iters_after_mixup_23=$[(($num_iters-$mix_up_iter-1)*2)/3]
if [ $num_models_combine -gt $iters_after_mixup_23 ]; then
num_models_combine=$iters_after_mixup_23
fi
first_model_combine=$[$num_iters-$num_models_combine+1]
x=0
for realign_time in $realign_times; do
# Work out the iterations on which we will re-align, if the --realign-times
# option was used. This is slightly approximate.
! perl -e "exit($realign_time > 0.0 && $realign_time < 1.0 ? 0:1);" && \
echo "Invalid --realign-times option $realign_times: elements must be strictly between 0 and 1.";
# the next formula is based on the one for mix_up_iter above.
realign_iter=$(perl -e '($j,$k,$n,$p)=@ARGV; print int(0.5 + ($j==$k ? $n*$p : $n*(sqrt((1-$p)*$j*$j+$p*$k*$k)-$j)/($k-$j))); ' $num_jobs_initial $num_jobs_final $num_iters $realign_time) || exit 1;
realign_this_iter[$realign_iter]=$realign_time
done
cur_egs_dir=$egs_dir
while [ $x -lt $num_iters ]; do
this_num_jobs=$(perl -e "print int(0.5+$num_jobs_initial+($num_jobs_final-$num_jobs_initial)*$x/$num_iters);")
ilr=$initial_effective_lrate; flr=$final_effective_lrate; np=$num_archives_processed; nt=$num_archives_to_process;
this_learning_rate=$(perl -e "print (($x + 1 >= $num_iters ? $flr : $ilr*exp($np*log($flr/$ilr)/$nt))*$this_num_jobs);");
# TODO: remove this line.
echo "On iteration $x, learning rate is $this_learning_rate."
if [ ! -z "${realign_this_iter[$x]}" ]; then
prev_egs_dir=$cur_egs_dir
cur_egs_dir=$dir/egs_${realign_this_iter[$x]}
fi
if [ $x -ge 0 ] && [ $stage -le $x ]; then
if [ ! -z "${realign_this_iter[$x]}" ]; then
time=${realign_this_iter[$x]}
echo "Getting average posterior for purposes of adjusting the priors."
# Note: this just uses CPUs, using a smallish subset of data.
# always use the first egs archive, which makes the script simpler;
# we're using different random subsets of it.
rm $dir/post.$x.*.vec 2>/dev/null
$cmd JOB=1:$num_jobs_compute_prior $dir/log/get_post.$x.JOB.log \
nnet-copy-egs --srand=JOB --frame=random ark:$prev_egs_dir/egs.1.ark ark:- \| \
nnet-subset-egs --srand=JOB --n=$prior_subset_size ark:- ark:- \| \
nnet-compute-from-egs "nnet-to-raw-nnet $dir/$x.mdl -|" ark:- ark:- \| \
matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.$x.JOB.vec || exit 1;
sleep 3; # make sure there is time for $dir/post.$x.*.vec to appear.
$cmd $dir/log/vector_sum.$x.log \
vector-sum $dir/post.$x.*.vec $dir/post.$x.vec || exit 1;
rm $dir/post.$x.*.vec;
echo "Re-adjusting priors based on computed posteriors"
$cmd $dir/log/adjust_priors.$x.log \
nnet-adjust-priors $dir/$x.mdl $dir/post.$x.vec $dir/$x.mdl || exit 1;
sleep 2
steps/nnet2/align.sh --nj $num_jobs_align --cmd "$align_cmd" --use-gpu $align_use_gpu \
--transform-dir "$transform_dir" --online-ivector-dir "$online_ivector_dir" \
--iter $x $data $lang $dir $dir/ali_$time || exit 1
steps/nnet2/relabel_egs2.sh --cmd "$cmd" --iter $x $dir/ali_$time \
$prev_egs_dir $cur_egs_dir || exit 1
if $cleanup && [[ $prev_egs_dir =~ $dir/egs* ]]; then
steps/nnet2/remove_egs.sh $prev_egs_dir
fi
fi
# Set off jobs doing some diagnostics, in the background.
# Use the egs dir from the previous iteration for the diagnostics
$cmd $dir/log/compute_prob_valid.$x.log \
nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.$x.log \
nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/train_diagnostic.egs &
if [ $x -gt 0 ] && [ ! -f $dir/log/mix_up.$[$x-1].log ]; then
[ ! -f $x.mdl ] && sleep 10;
$cmd $dir/log/progress.$x.log \
nnet-show-progress --use-gpu=no $dir/$[$x-1].mdl $dir/$x.mdl \
ark:$cur_egs_dir/train_diagnostic.egs '&&' \
nnet-am-info $dir/$x.mdl &
fi
echo "Training neural net (pass $x)"
if [ $x -gt 0 ] && \
[ $x -le $[($num_hidden_layers-1)*$add_layers_period] ] && \
[ $[($x-1) % $add_layers_period] -eq 0 ]; then
do_average=false # if we've just mixed up, don't do averaging take the best.
mdl="nnet-init --srand=$x $dir/hidden.config - | nnet-insert $dir/$x.mdl - - | nnet-am-copy --learning-rate=$this_learning_rate - -|"
else
do_average=true
if [ $x -eq 0 ]; then do_average=false; fi # on iteration 0, pick the best, don't average.
mdl="nnet-am-copy --learning-rate=$this_learning_rate $dir/$x.mdl -|"
fi
if $do_average; then
this_minibatch_size=$minibatch_size
else
# on iteration zero or when we just added a layer, use a smaller minibatch
# size and just one job: the model-averaging doesn't seem to be helpful
# when the model is changing too fast (i.e. it worsens the objective
# function), and the smaller minibatch size will help to keep
# the update stable.
this_minibatch_size=$[$minibatch_size/2];
fi
rm $dir/.error 2>/dev/null
( # this sub-shell is so that when we "wait" below,
# we only wait for the training jobs that we just spawned,
# not the diagnostic jobs that we spawned above.
# We can't easily use a single parallel SGE job to do the main training,
# because the computation of which archive and which --frame option
# to use for each job is a little complex, so we spawn each one separately.
for n in $(seq $this_num_jobs); do
k=$[$num_archives_processed + $n - 1]; # k is a zero-based index that we'll derive
# the other indexes from.
archive=$[($k%$num_archives)+1]; # work out the 1-based archive index.
frame=$[(($k/$num_archives)%$frames_per_eg)]; # work out the 0-based frame
# index; this increases more slowly than the archive index because the
# same archive with different frame indexes will give similar gradients,
# so we want to separate them in time.
$cmd $parallel_opts $dir/log/train.$x.$n.log \
nnet-train$parallel_suffix $parallel_train_opts \
--minibatch-size=$this_minibatch_size --srand=$x "$mdl" \
"ark:nnet-copy-egs --frame=$frame ark:$cur_egs_dir/egs.$archive.ark ark:-|nnet-shuffle-egs --buffer-size=$shuffle_buffer_size --srand=$x ark:- ark:-|" \
$dir/$[$x+1].$n.mdl || touch $dir/.error &
done
wait
)
# the error message below is not that informative, but $cmd will
# have printed a more specific one.
[ -f $dir/.error ] && echo "$0: error on iteration $x of training" && exit 1;
nnets_list=
for n in `seq 1 $this_num_jobs`; do
nnets_list="$nnets_list $dir/$[$x+1].$n.mdl"
done
if $do_average; then
# average the output of the different jobs.
$cmd $dir/log/average.$x.log \
nnet-am-average $nnets_list $dir/$[$x+1].mdl || exit 1;
else
# choose the best from the different jobs.
n=$(perl -e '($nj,$pat)=@ARGV; $best_n=1; $best_logprob=-1.0e+10; for ($n=1;$n<=$nj;$n++) {
$fn = sprintf($pat,$n); open(F, "<$fn") || die "Error opening log file $fn";
undef $logprob; while (<F>) { if (m/log-prob-per-frame=(\S+)/) { $logprob=$1; } }
close(F); if (defined $logprob && $logprob > $best_logprob) { $best_logprob=$logprob;
$best_n=$n; } } print "$best_n\n"; ' $num_jobs_nnet $dir/log/train.$x.%d.log) || exit 1;
[ -z "$n" ] && echo "Error getting best model" && exit 1;
cp $dir/$[$x+1].$n.mdl $dir/$[$x+1].mdl || exit 1;
fi
if [ "$mix_up" -gt 0 ] && [ $x -eq $mix_up_iter ]; then
# mix up.
echo Mixing up from $num_leaves to $mix_up components
$cmd $dir/log/mix_up.$x.log \
nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \
$dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1;
fi
rm $nnets_list
[ ! -f $dir/$[$x+1].mdl ] && exit 1;
if [ -f $dir/$[$x-1].mdl ] && $cleanup && \
[ $[($x-1)%100] -ne 0 ] && [ $[$x-1] -lt $first_model_combine ]; then
rm $dir/$[$x-1].mdl
fi
fi
x=$[$x+1]
num_archives_processed=$[$num_archives_processed+$this_num_jobs]
done
if [ $stage -le $num_iters ]; then
echo "Doing final combination to produce final.mdl"
# Now do combination.
nnets_list=()
# the if..else..fi statement below sets 'nnets_list'.
if [ $max_models_combine -lt $num_models_combine ]; then
# The number of models to combine is too large, e.g. > 20. In this case,
# each argument to nnet-combine-fast will be an average of multiple models.
cur_offset=0 # current offset from first_model_combine.
for n in $(seq $max_models_combine); do
next_offset=$[($n*$num_models_combine)/$max_models_combine]
sub_list=""
for o in $(seq $cur_offset $[$next_offset-1]); do
iter=$[$first_model_combine+$o]
mdl=$dir/$iter.mdl
[ ! -f $mdl ] && echo "Expected $mdl to exist" && exit 1;
sub_list="$sub_list $mdl"
done
nnets_list[$[$n-1]]="nnet-am-average $sub_list - |"
cur_offset=$next_offset
done
else
nnets_list=
for n in $(seq 0 $[num_models_combine-1]); do
iter=$[$first_model_combine+$n]
mdl=$dir/$iter.mdl
[ ! -f $mdl ] && echo "Expected $mdl to exist" && exit 1;
nnets_list[$n]=$mdl
done
fi
# Below, use --use-gpu=no to disable nnet-combine-fast from using a GPU, as
# if there are many models it can give out-of-memory error; set num-threads to 8
# to speed it up (this isn't ideal...)
num_egs=`nnet-copy-egs ark:$cur_egs_dir/combine.egs ark:/dev/null 2>&1 | tail -n 1 | awk '{print $NF}'`
mb=$[($num_egs+$combine_num_threads-1)/$combine_num_threads]
[ $mb -gt 512 ] && mb=512
# Setting --initial-model to a large value makes it initialize the combination
# with the average of all the models. It's important not to start with a
# single model, or, due to the invariance to scaling that these nonlinearities
# give us, we get zero diagonal entries in the fisher matrix that
# nnet-combine-fast uses for scaling, which after flooring and inversion, has
# the effect that the initial model chosen gets much higher learning rates
# than the others. This prevents the optimization from working well.
$cmd $combine_parallel_opts $dir/log/combine.log \
nnet-combine-fast --initial-model=100000 --num-lbfgs-iters=40 --use-gpu=no \
--num-threads=$combine_num_threads \
--verbose=3 --minibatch-size=$mb "${nnets_list[@]}" ark:$cur_egs_dir/combine.egs \
$dir/final.mdl || exit 1;
# Normalize stddev for affine or block affine layers that are followed by a
# pnorm layer and then a normalize layer.
$cmd $dir/log/normalize.log \
nnet-normalize-stddev $dir/final.mdl $dir/final.mdl || exit 1;
# Compute the probability of the final, combined model with
# the same subset we used for the previous compute_probs, as the
# different subsets will lead to different probs.
$cmd $dir/log/compute_prob_valid.final.log \
nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.final.log \
nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/train_diagnostic.egs &
fi
if [ $stage -le $[$num_iters+1] ]; then
echo "Getting average posterior for purposes of adjusting the priors."
# Note: this just uses CPUs, using a smallish subset of data.
rm $dir/post.$x.*.vec 2>/dev/null
$cmd JOB=1:$num_jobs_compute_prior $dir/log/get_post.$x.JOB.log \
nnet-copy-egs --frame=random --srand=JOB ark:$cur_egs_dir/egs.1.ark ark:- \| \
nnet-subset-egs --srand=JOB --n=$prior_subset_size ark:- ark:- \| \
nnet-compute-from-egs "nnet-to-raw-nnet $dir/final.mdl -|" ark:- ark:- \| \
matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.$x.JOB.vec || exit 1;
sleep 3; # make sure there is time for $dir/post.$x.*.vec to appear.
$cmd $dir/log/vector_sum.$x.log \
vector-sum $dir/post.$x.*.vec $dir/post.$x.vec || exit 1;
rm $dir/post.$x.*.vec;
echo "Re-adjusting priors based on computed posteriors"
$cmd $dir/log/adjust_priors.final.log \
nnet-adjust-priors $dir/final.mdl $dir/post.$x.vec $dir/final.mdl || exit 1;
fi
if [ ! -f $dir/final.mdl ]; then
echo "$0: $dir/final.mdl does not exist."
# we don't want to clean up if the training didn't succeed.
exit 1;
fi
sleep 2
echo Done
if $cleanup; then
echo Cleaning up data
if [[ $cur_egs_dir =~ $dir/egs* ]]; then
steps/nnet2/remove_egs.sh $cur_egs_dir
fi
echo Removing most of the models
for x in `seq 0 $num_iters`; do
if [ $[$x%100] -ne 0 ] && [ $x -ne $num_iters ] && [ -f $dir/$x.mdl ]; then
# delete all but every 100th model; don't delete the ones which combine to form the final model.
rm $dir/$x.mdl
fi
done
fi
|
thorsonlinguistics/german-neutralization
|
steps/nnet2/train_pnorm_accel2.sh
|
Shell
|
apache-2.0
| 29,667 |
#!/bin/bash -eu
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Main deploy functions for the continous build system
# Just source this file and use the various method:
# bazel_build build bazel and run all its test
# bazel_release use the artifact generated by bazel_build and push
# them to github for a release and to GCS for a release candidate.
# Also prepare an email for announcing the release.
# Load common.sh
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
source $(dirname ${SCRIPT_DIR})/release/common.sh
: ${GIT_REPOSITORY_URL:=https://github.com/bazelbuild/bazel}
: ${GCS_BASE_URL:=https://storage.googleapis.com}
: ${GCS_BUCKET:=bucket-o-bazel}
: ${EMAIL_TEMPLATE_RC:=${SCRIPT_DIR}/rc_email.txt}
: ${EMAIL_TEMPLATE_RELEASE:=${SCRIPT_DIR}/release_email.txt}
: ${RELEASE_CANDIDATE_URL:="${GCS_BASE_URL}/${GCS_BUCKET}/%release_name%/rc%rc%/index.html"}
: ${RELEASE_URL="${GIT_REPOSITORY_URL}/releases/tag/%release_name%"}
set -eu
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
if [[ ${PLATFORM} == "darwin" ]]; then
function checksum() {
shasum -a 256 $1 | cut -f 1 -d " "
}
else
function checksum() {
sha256sum $1 | cut -f 1 -d " "
}
fi
GIT_ROOT="$(git rev-parse --show-toplevel)"
BUILD_SCRIPT_PATH="${GIT_ROOT}/compile.sh"
# Returns the full release name in the form NAME(rcRC)?
function get_full_release_name() {
local rc=$(get_release_candidate)
local name=$(get_release_name)
if [ -n "${rc}" ]; then
echo "${name}rc${rc}"
else
echo "${name}"
fi
}
function setup_android_repositories() {
if [ ! -f WORKSPACE.bak ] && [ -n "${ANDROID_SDK_PATH-}" ]; then
cp WORKSPACE WORKSPACE.bak
trap '[ -f WORKSPACE.bak ] && rm WORKSPACE && mv WORKSPACE.bak WORKSPACE' \
EXIT
cat >>WORKSPACE <<EOF
android_sdk_repository(
name = "androidsdk",
path = "${ANDROID_SDK_PATH}",
build_tools_version = "${ANDROID_SDK_BUILD_TOOLS_VERSION:-22.0.1}",
api_level = ${ANDROID_SDK_API_LEVEL:-21},
)
bind(
name = "android_sdk_for_testing",
actual = "@androidsdk//:files",
)
EOF
if [ -n "${ANDROID_NDK_PATH-}" ]; then
cat >>WORKSPACE <<EOF
android_ndk_repository(
name = "androidndk",
path = "${ANDROID_NDK_PATH}",
api_level = ${ANDROID_NDK_API_LEVEL:-21},
)
bind(
name = "android_ndk_for_testing",
actual = "@androidndk//:files",
)
EOF
fi
fi
}
# Main entry point for building bazel.
# It sets the embed label to the release name if any, calls the whole
# test suite, compile the various packages, then copy the artifacts
# to the folder in $1
function bazel_build() {
local release_label="$(get_full_release_name)"
local embed_label_opts=
setup_android_repositories
if [ -n "${release_label}" ]; then
export EMBED_LABEL="${release_label}"
fi
${BUILD_SCRIPT_PATH} ${BAZEL_COMPILE_TARGET:-all} || exit $?
# Build the packages
./output/bazel --bazelrc=${BAZELRC:-/dev/null} --nomaster_bazelrc build \
--embed_label=${release_label} --stamp \
--workspace_status_command=scripts/ci/build_status_command.sh \
//scripts/packages/...
# Copy the results to the output directory
mkdir -p $1/packages
cp output/bazel $1/bazel
cp bazel-bin/scripts/packages/install.sh $1/bazel-${release_label}-installer.sh
cp bazel-genfiles/scripts/packages/README.md $1/README.md
}
# Generate a string from a template and a list of substitutions.
# The first parameter is the template name and each subsequent parameter
# is taken as a couple: first is the string the substitute and the second
# is the result of the substitution.
function generate_from_template() {
local value="$1"
shift
while (( $# >= 2 )); do
value="${value//$1/$2}"
shift 2
done
echo "${value}"
}
# Generate the email for the release.
# The first line of the output will be the recipient, the second line
# the mail subjects and the subsequent lines the mail, its content.
# If no planed release, then this function output will be empty.
function generate_email() {
local release_name=$(get_release_name)
local rc=$(get_release_candidate)
local args=(
"%release_name%" "${release_name}"
"%rc%" "${rc}"
"%relnotes%" "# $(git_commit_msg)"
)
if [ -n "${rc}" ]; then
args+=(
"%url%"
"$(generate_from_template "${RELEASE_CANDIDATE_URL}" "${args[@]}")"
)
generate_from_template "$(cat ${EMAIL_TEMPLATE_RC})" "${args[@]}"
elif [ -n "${release_name}" ]; then
args+=(
"%url%"
"$(generate_from_template "${RELEASE_URL}" "${args[@]}")"
)
generate_from_template "$(cat ${EMAIL_TEMPLATE_RELEASE})" "${args[@]}"
fi
}
# Deploy a github release using a third party tool:
# https://github.com/c4milo/github-release
# This methods expects the following arguments:
# $1..$n files generated by package_build (should not contains the README file)
# Please set GITHUB_TOKEN to talk to the Github API and GITHUB_RELEASE
# for the path to the https://github.com/c4milo/github-release tool.
# This method is also affected by GIT_REPOSITORY_URL which should be the
# URL to the github repository (defaulted to https://github.com/bazelbuild/bazel).
function release_to_github() {
local url="${GIT_REPOSITORY_URL}"
local release_name=$(get_release_name)
local rc=$(get_release_candidate)
local release_tool="${GITHUB_RELEASE:-$(which github-release 2>/dev/null || true)}"
if [ ! -x "${release_tool}" ]; then
echo "Please set GITHUB_RELEASE to the path to the github-release binary." >&2
echo "This probably means you haven't installed https://github.com/c4milo/github-release " >&2
echo "on this machine." >&2
return 1
fi
local github_repo="$(echo "$url" | sed -E 's|https?://github.com/([^/]*/[^/]*).*$|\1|')"
if [ -n "${release_name}" ] && [ -z "${rc}" ]; then
mkdir -p "${tmpdir}/to-github"
cp "${@}" "${tmpdir}/to-github"
"${GITHUB_RELEASE}" "${github_repo}" "${release_name}" "" "# $(git_commit_msg)" "${tmpdir}/to-github/"'*'
fi
}
# Creates an index of the files contained in folder $1 in mardown format
function create_index_md() {
# First, add the README.md
local file=$1/__temp.md
if [ -f $1/README.md ]; then
cat $1/README.md
fi
# Then, add the list of files
echo
echo "## Index of files"
echo
for f in $1/*.sha256; do # just list the sha256 ones
local filename=$(basename $f .sha256);
echo " - [${filename}](${filename}) [[SHA-256](${filename}.sha256)]"
done
}
# Creates an index of the files contained in folder $1 in HTML format
# It supposes hoedown (https://github.com/hoedown/hoedown) is on the path,
# if not, set the HOEDOWN environment variable to the good path.
function create_index_html() {
local hoedown="${HOEDOWN:-$(which hoedown 2>/dev/null || true)}"
# Second line is to trick hoedown to behave as Github
create_index_md "${@}" \
| sed -E 's/^(Baseline.*)$/\1\
/' | sed 's/^ + / - /' \
| "${hoedown}"
}
# Deploy a release candidate to Google Cloud Storage.
# It requires to have gsutil installed. You can force the path to gsutil
# by setting the GSUTIL environment variable. The GCS_BUCKET should be the
# name of the Google cloud bucket to deploy to.
# This methods expects the following arguments:
# $1..$n files generated by package_build
function release_to_gcs() {
local gs="${GSUTIL:-$(which gsutil 2>/dev/null || true) -m}"
local release_name=$(get_release_name)
local rc=$(get_release_candidate)
if [ ! -x "${gs}" ]; then
echo "Please set GSUTIL to the path the gsutil binary." >&2
echo "gsutil (https://cloud.google.com/storage/docs/gsutil/) is the" >&2
echo "command-line interface to google cloud." >&2
return 1
fi
if [ -z "${GCS_BUCKET-}" ]; then
echo "Please set GCS_BUCKET to the name of your Google Cloud Storage bucket." >&2
return 1
fi
if [ -n "${release_name}" ] && [ -n "${rc}" ]; then
# Make a temporary folder with the desired structure
local dir="$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)"
local prev_dir="$PWD"
trap "{ cd ${prev_dir}; rm -fr ${dir}; }" EXIT
mkdir -p "${dir}/${release_name}/rc${rc}"
cp "${@}" "${dir}/${release_name}/rc${rc}"
# Add a index.html file:
create_index_html "${dir}/${release_name}/rc${rc}" \
>"${dir}/${release_name}/rc${rc}"/index.html
cd ${dir}
"${gs}" cp -a public-read -r . "gs://${GCS_BUCKET}"
cd ${prev_dir}
rm -fr ${dir}
trap - EXIT
fi
}
# A wrapper around the release deployment methods.
function deploy_release() {
local github_args=()
# Filters out README.md for github releases
for i in "$@"; do
if ! [[ "$i" =~ README.md$ ]]; then
github_args+=("$i")
fi
done
release_to_github "${github_args[@]}"
release_to_gcs "$@"
}
# A wrapper for the whole release phase:
# Compute the SHA-256, and arrange the input
# Deploy the release
# Generate the email
# Input: $1 $2 [$3 $4 [$5 $6 ...]]
# Each pair denotes a couple (platform, folder) where the platform
# is the platform built for and the folder is the folder where the
# artifacts for this platform are.
# Ouputs:
# RELEASE_EMAIL_RECIPIENT: who to send a mail to
# RELEASE_EMAIL_SUBJECT: the subject of the email to be sent
# RELEASE_EMAIL_CONTENT: the content of the email to be sent
function bazel_release() {
local README=$2/README.md
tmpdir=$(mktemp -d ${TMPDIR:-/tmp}/tmp.XXXXXXXX)
trap 'rm -fr ${tmpdir}' EXIT
while (( $# > 1 )); do
local platform=$1
local folder=$2
shift 2
for file in $folder/*; do
if [ $(basename $file) != README.md ]; then
if [[ "$file" =~ /([^/]*)(\.[^\./]+)$ ]]; then
local destfile=${tmpdir}/${BASH_REMATCH[1]}-${platform}${BASH_REMATCH[2]}
else
local destfile=${tmpdir}/$(basename $file)-${platform}
fi
mv $file $destfile
checksum $destfile > $destfile.sha256
fi
done
done
deploy_release $README $(find ${tmpdir} -type f)
export RELEASE_EMAIL="$(generate_email)"
export RELEASE_EMAIL_RECIPIENT="$(echo "${RELEASE_EMAIL}" | head -1)"
export RELEASE_EMAIL_SUBJECT="$(echo "${RELEASE_EMAIL}" | head -2 | tail -1)"
export RELEASE_EMAIL_CONTENT="$(echo "${RELEASE_EMAIL}" | tail -n +3)"
}
|
manashmndl/bazel
|
scripts/ci/build.sh
|
Shell
|
apache-2.0
| 10,781 |
#!/bin/bash
cd `dirname $0`
cd ..
function getVersion {
local major=$(sed -nE 's/versionMajor = ([0-9]+);/\1/p' src/libclient/client.h | awk '{print $4}')
local minor=$(sed -nE 's/versionMinor = ([0-9]+);/\1/p' src/libclient/client.h | awk '{print $4}')
local patch=$(sed -nE 's/versionPatch = ([0-9]+);/\1/p' src/libclient/client.h | awk '{print $4}')
echo "$major.$minor.$patch"
}
MAINTAINER="Christoph Keller <[email protected]>"
VERSION=$(getVersion)
TIMESTAMP=$(date -R)
BUILDNUMBER=$1
# Debian/Ubuntu/Mint
cd debian
if [ "$BUILDNUMBER" = "" ]; then
echo "You have no BUILDNUMBER variable set, exiting"
exit 1
fi
echo "glimpse ($VERSION-$BUILDNUMBER) unstable; urgency=low" > changelog
echo "" >> changelog
echo " * Snapshot build #$BUILDNUMBER" >> changelog
echo "" >> changelog
echo " -- $MAINTAINER $TIMESTAMP" >> changelog
echo "Changelog for build #$BUILDNUMBER written."
# Fedora/CentOS/OpenSuse
cd ..
sed "s/^Version:.*/Version: $VERSION/" -i glimpse_client.spec
sed "s/^Release:.*/Release: $BUILDNUMBER/" -i glimpse_client.spec
# Archlinux
sed "s/^pkgver=.*/pkgver=$VERSION.$BUILDNUMBER/" -i PKGBUILD
# Windows
sed -E "s/(#define MyAppVersion) \"(.*)\"/\1 \"$VERSION-$BUILDNUMBER\"/" -i setup/glimpse.iss
# Android
sed -E "s/versionName=\"[^\"]+\"/versionName=\"$VERSION\"/" -i src/mobile/android/AndroidManifest.xml
sed -E "s/versionCode=\"[^\"]+\"/versionCode=\"$BUILDNUMBER\"/" -i src/mobile/android/AndroidManifest.xml
|
HSAnet/glimpse_client
|
buildscripts/update-changelog.sh
|
Shell
|
bsd-3-clause
| 1,468 |
#!/bin/bash
# Description: Script to fetch requirements for building this project.
# Last-modified: 2010-12-02 01:28:31
#
# Note that this script is not perfect and does not handle all errors.
# Any improvements are welcome.
# Either Git or Subversion is needed to retrieve Theos.
GIT=$(type -P git)
SVN=$(type -P svn)
if [ -z "$GIT" -a -z "$SVN" ]; then
echo "ERROR: This script requires either 'git' or 'svn' to be installed."
exit 1
fi
# Either wget or curl is needed to download package list and ldid.
WGET=$(type -P wget)
CURL=$(type -P curl)
if [ -z "$WGET" -a -z "$CURL" ]; then
echo "ERROR: This script requires either 'wget' or 'curl' to be installed."
exit 1
fi
# Download Theos
echo "Downloading Theos..."
if [ ! -z "$GIT" ]; then
git clone --quiet git://github.com/DHowett/theos.git theos
else
svn co http://svn.howett.net/svn/theos/trunk theos
fi
# Download MobileSubstrate header
echo "Downloading MobileSubstrate header..."
SUBSTRATE_REPO="http://apt.saurik.com"
pkg=""
if [ ! -z "$WGET" ]; then
wget -q "${SUBSTRATE_REPO}/dists/tangelo-3.7/main/binary-iphoneos-arm/Packages.bz2"
pkg_path=$(bzcat Packages.bz2 | grep "debs/mobilesubstrate" | awk '{print $2}')
pkg=$(basename $pkg_path)
wget -q "${SUBSTRATE_REPO}/${pkg_path}"
else
curl -s -L "${SUBSTRATE_REPO}/dists/tangelo-3.7/main/binary-iphoneos-arm/Packages.bz2" > Packages.bz2
pkg_path=$(bzcat Packages.bz2 | grep "debs/mobilesubstrate" | awk '{print $2}')
pkg=$(basename $pkg_path)
curl -s -L "${SUBSTRATE_REPO}/${pkg_path}" > $pkg
fi
ar -p $pkg data.tar.gz | tar -zxf - ./Library/Frameworks/CydiaSubstrate.framework/Headers/CydiaSubstrate.h
mv ./Library/Frameworks/CydiaSubstrate.framework/Headers/CydiaSubstrate.h theos/include/substrate.h
rm -rf usr Packages.bz2 $pkg
# Download ldid
echo "Downloading ldid..."
if [ "$(uname)" == "Darwin" ]; then
if [ ! -z "$WGET" ]; then
wget -q http://dl.dropbox.com/u/3157793/ldid
else
curl -s http://dl.dropbox.com/u/3157793/ldid > ldid
fi
mv ldid theos/bin/ldid
chmod +x theos/bin/ldid
else
echo "... No pre-built version of ldid is available for your system."
echo "... You will need to provide your own copy of ldid."
fi
# Check if .deb creation tools are available (optional)
echo "Checking for dpkg-deb..."
if [ -z "$(type -P dpkg-deb)" ]; then
echo "... dpkg-deb not found."
echo "... If you wish to create a .deb package, you will need the 'dpkg-deb' tool."
fi
echo "Done."
|
r-plus/MultiIconMover
|
get_requirements.sh
|
Shell
|
bsd-3-clause
| 2,511 |
#!/usr/bin/expect
##
## configure ZodiacFX with recommended settings.
##
# Serial port assigned to ZodiacFX
set port /dev/ttyACM0
# ZodiacFX network settings
set configip "10.0.1.99"
set confignetmask "255.255.255.0"
set configgateway "10.0.1.1"
# OpenFlow controller network settings
set configofcontroller "10.0.1.8"
set configofport 6653
set timeout 5
set prompt {Zodiac_FX\#}
set configprompt {Zodiac_FX\(config\)\#}
set spawned [spawn -open [open $port w+]]
send_user "get initial prompt\n"
send "\r"
send "\r"
expect -re $prompt
send_user "found initial prompt\n"
send "config\r"
expect -re $configprompt
send_user "setting ethertype-filter\n"
send "set ethertype-filter enable\r"
expect -re $configprompt
send_user "setting IP address\n"
send "set ip-address $configip\r"
expect -re $configprompt
send "set netmask $confignetmask\r"
expect -re $configprompt
send "set gateway $configgateway\r"
expect -re $configprompt
send_user "setting OF controller\n"
send "set of-controller $configofcontroller\r"
expect -re $configprompt
send "set of-port $configofport\r"
expect -re $configprompt
send_user "save configuration\n"
send "show config\r"
expect -re $configprompt
send "save\r"
expect -re $configprompt
send "exit\r"
expect -re $prompt
send "restart\r"
expect -re "Restarting"
|
trungdtbk/faucet
|
docs/vendors/northboundnetworks/conf-zodiac.sh
|
Shell
|
apache-2.0
| 1,292 |
#!/bin/bash
# ----------------------------------------------------------------------------
# OGRE Documentation Generation Script
#
# This script generates the manuals and APIs from source files in this folder
# To run this script, you require:
# 1. Doxygen
# 2. Graphviz
# 3. texi2html
# Run from the Docs folder. For example:
# ./src/makedocs.sh
# ----------------------------------------------------------------------------
# Generate API docs using doxygen
doxygen src/html.cfg
# Remove old manual
rm -rf manual vbo-update
# Generate manuals from texi
for f in src/*.texi;
do
texi2html -Verbose --css-include=style.css --output=`basename $f .texi` -split=node -top_file=index.html $f
done
# Copy stylesheet to core docs folder
cp src/style.css .
# Copy images to the manual folder
mkdir -p manual/images
cp src/images/* manual/images/
|
MTASZTAKI/ApertusVR
|
plugins/render/ogreRender/3rdParty/ogre/Docs/src/makedocs.sh
|
Shell
|
mit
| 854 |
python MHDfluid.py
python MHDfluid3d.py
|
wathen/PhD
|
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/test.sh
|
Shell
|
mit
| 40 |
#! /bin/sh
awk=$1
prog=$2
infile=$3
out=$4
# GLIBC gives us ja_JP.EUC-JP but Mac OS X uses ja_JP.eucJP
cp $infile $out # set up default
for locale in ja_JP.EUC-JP ja_JP.eucJP
do
if locale -a 2>/dev/null | grep $locale > /dev/null
then
LANG=$locale
LC_ALL=$locale
export LANG LC_ALL
$awk -f $prog $infile >$out 2>&1 || echo EXIT CODE: $? >> $out
fi
done
|
mwcampbell/gawk
|
test/jarebug.sh
|
Shell
|
gpl-3.0
| 369 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.