code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
THRIFT_DIR=src_thrift
echo "-------------------------------------------------------------------------"
echo "1. generate files"
echo "-------------------------------------------------------------------------"
thrift -o $THRIFT_DIR -strict -r -v --gen erl $THRIFT_DIR/erlang_python.thrift
thrift -out src_py -strict -r -v --gen py $THRIFT_DIR/erlang_python.thrift
echo "-------------------------------------------------------------------------"
echo "2. list content of generated file directories"
echo "-------------------------------------------------------------------------"
ls -l $THRIFT_DIR/gen-erl
echo "-------------------------------------------------------------------------"
echo "3. move the generated files"
echo "-------------------------------------------------------------------------"
mkdir -p include
mv $THRIFT_DIR/gen-erl/*.erl src
mv $THRIFT_DIR/gen-erl/*.hrl include
echo "-------------------------------------------------------------------------"
echo "4. delete generated file directories"
echo "-------------------------------------------------------------------------"
rm -r $THRIFT_DIR/gen-erl
ls -l $THRIFT_DIR
|
walter-weinmann/annlink_thrift
|
thrift_compile.sh
|
Shell
|
apache-2.0
| 1,167 |
#!/bin/sh
java -cp "./common-1.0-SNAPSHOT-shaded.jar:/etc/hadoop/conf:/usr/lib/hadoop/client/*" com.caseystella.util.common.hadoop.ingest.AtomicPut "$@"
|
cestella/analytics_util
|
components/common/src/main/bash/atomic_put.sh
|
Shell
|
apache-2.0
| 154 |
#!/usr/bin/env bash
fission env create --name nodejs --image fission/node-env:0.3.0
kubectl apply -f https://raw.githubusercontent.com/fission/functions/master/slack/function.yaml
fission fn create --name tempconv --env nodejs --deploy ./tempconv.js
fission fn create --name wunderground-conditions --env nodejs --deploy ./wunderground-conditions.js
fission fn create --name formdata2json --env nodejs --deploy ./formdata2json.js
fission fn create --name slackweather --env workflow --src ./slackweather.wf.yaml
fission fn create --name slackslashweather --env workflow --src ./slackslashweather.wf.yaml
|
fission/fission-workflows
|
examples/slackweather/deploy.sh
|
Shell
|
apache-2.0
| 607 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
export CGO_ENABLED=0
TARGETS=$(for d in "$@"; do echo ./$d/...; done)
echo "Running tests:"
if [ -z "${JUNIT_REPORT+x}" ]; then
go test -cover -pkgdir=${GOPATH}/pkg ${TARGETS}
else
go test -v -pkgdir=${GOPATH}/pkg ${TARGETS} | go-junit-report > ${JUNIT_REPORT}
fi
if [ -n "${COBERTURA_REPORT+x}" ]; then
gocov test ${TARGETS} | gocov-xml > ${COBERTURA_REPORT}
fi
echo
echo -n "Checking gofmt: "
ERRS=$(find "$@" -type f -name \*.go | xargs gofmt -l 2>&1 || true)
if [ -n "${ERRS}" ]; then
echo "FAIL - the following files need to be gofmt'ed:"
for e in ${ERRS}; do
echo " $e"
done
echo
exit 1
fi
echo "PASS"
echo
echo -n "Checking go vet: "
ERRS=$(go vet ${TARGETS} 2>&1 || true)
if [ -n "${ERRS}" ]; then
echo "FAIL"
echo "${ERRS}"
echo
exit 1
fi
echo "PASS"
echo
|
Skatteetaten/aoc
|
build/test.sh
|
Shell
|
apache-2.0
| 1,481 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
WORK_DIR="$(dirname "$0")"
case "$1" in
start)
shift 1
sh ${WORK_DIR}/spark-client.sh start "$@"
;;
stop)
shift 1
sh ${WORK_DIR}/spark-client.sh stop "$@"
;;
-h|--help)
shift 1 # past argument
sh ${WORK_DIR}/spark-client.sh --help "$@"
;;
*)
kubectl exec --stdin --tty spark-client -- /bin/bash ./spark-shell.sh "$@"
;;
esac
|
Intel-bigdata/OAP
|
dev/kubernetes/spark/spark-shell-client.sh
|
Shell
|
apache-2.0
| 1,174 |
gunicorn -k flask_sockets.worker chat:app
|
abramhindle/WebSocketsExamples
|
run-chat.sh
|
Shell
|
apache-2.0
| 42 |
#!/bin/bash -l
#SBATCH --job-name="Mallet Pubmed"
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --ntasks-per-node=1
#SBATCH --mem=4096
#SBATCH --time=30:30:00
#SBATCH --partition=batch
#SBATCH --mail-type=ALL
#SBATCH [email protected]
#SBATCH --output=/nfs4/bbp.epfl.ch/user/mazimmer/slurm-mallet-pubmed-stdout.log
#SBATCH --error=/nfs4/bbp.epfl.ch/user/mazimmer/slurm-mallet-pubmed-stderr.log
# In case there are per-group custom initialization files
#. /nfs4/bbp.epfl.ch/group/visualization/module/modules.bash
# Load your required module files here
#module load MODULE_TO_BE_LOADED
# To avoid Kerberos tickets becoming expired, run the following in
# the background to check every 30min and try to renew it
krenew -b -K 30
echo "On which node your job has been scheduled :"
echo $SLURM_JOB_NODELIST
echo "Print current shell limits :"
ulimit -a
./mallet_eval.sh $HOME/private/corpora/pubmed 100
|
BlueBrain/bluima
|
modules/bluima_topic_models/scripts/cluster/evaluation/mallet_pubmed.sh
|
Shell
|
apache-2.0
| 924 |
#!/bin/sh
# SUMMARY: Test build and insertion of kernel modules
# LABELS:
# REPEAT:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=kmod
IMAGE_NAME=kmod-test
clean_up() {
docker rmi ${IMAGE_NAME} || true
rm -rf ${NAME}-*
}
trap clean_up EXIT
# Make sure we have the latest kernel image
docker pull linuxkit/kernel:5.3.2
# Build a package
docker build -t ${IMAGE_NAME} .
# Build and run a LinuxKit image with kernel module (and test script)
linuxkit build -format kernel+initrd -name "${NAME}" test.yml
RESULT="$(linuxkit run ${NAME})"
echo "${RESULT}" | grep -q "Hello LinuxKit"
exit 0
|
yankcrime/linuxkit
|
test/cases/020_kernel/020_kmod_5.3.x/test.sh
|
Shell
|
apache-2.0
| 657 |
# Copyright 2014-2016 Samsung Research America, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
TESTS=src/test/resources/testinput/endtoend/
PERFTESTS=src/test/resources/testinput/perf/
#OUT1=~/phone-bench/benchmarks-endtoend
#OUT2=~/phone-bench/benchmarks-perf
rm -rf ./phone-img
mkdir phone-img
BASE=`pwd`/phone-img
OUT2=$BASE/benchmarks-perf
mkdir $OUT2
gradle depJar
make -C src/main/resources/backend tizen
pushd $BASE
git clone https://github.com/ivmai/bdwgc boehm_hf
pushd boehm_hf
git clone https://github.com/ivmai/libatomic_ops
popd
popd
#for b in `ls -1 $TESTS | cut -f 1 -d '.'`;
#do
# echo Benchmarking $TESTS/$b.js
# cp $TESTS/$b.js $OUT1/$b.js;
# ./benchmark_phone.sh $OUT1/$b.js;
#done
for b in `ls -1 $PERFTESTS | cut -f 1 -d '.'`;
do
echo Benchmarking $PERFTESTS/$b.js
cp $PERFTESTS/$b.js $OUT2/$b.js;
./benchmark_phone.sh $OUT2/$b.js;
done
#cp external/gc/boehm_arm/extra/gc.o $OUT1/
##cp external/gc/boehm_arm/extra/gc.o $OUT2/
#cp armeabi.sh $OUT1/
cp armeabi.sh $OUT2/
#cp -r src/main/resources/backend $OUT1/backend
cp -r src/main/resources/backend $OUT2/backend
#cp phone_armbench.sh $OUT1/
cp phone_armbench.sh $OUT2/
#cp phone_nodebench.sh $OUT1/
cp phone_nodebench.sh $OUT2/
#cp -r external/gc/arm_tizen $OUT1/arm_tizen_gc
#cp -r external/gc/arm_tizen $OUT2/arm_tizen_gc
#cp phonebuild.sh $OUT1/
cp phonebuild.sh $OUT2/
cp phonegc.sh $BASE/
|
csgordon/SJS
|
sjsc/phonebench.sh
|
Shell
|
apache-2.0
| 1,902 |
#!/bin/bash
PROTO_DEPS_DIR=$1
BUILD_DIR=$2
PROJECT_DIR=$(cd "$(dirname "$BASH_SOURCE")"; pwd)
PROTO_DIR=${PROJECT_DIR}/include/pb
mkdir -p ${BUILD_DIR}/include
${PROTO_DEPS_DIR}/bin/protoc --proto_path=${PROTO_DEPS_DIR}/include --proto_path=${PROJECT_DIR}/include \
--cpp_out=${BUILD_DIR}/include ${PROJECT_DIR}/include/yaraft/pb/raft.proto
|
neverchanje/yaraft
|
compile_proto.sh
|
Shell
|
apache-2.0
| 344 |
#!/bin/bash
# Copyright 2016 Port Direct
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
echo "${OS_DISTRO}: Configuring database connection"
################################################################################
. /etc/os-container.env
. /opt/harbor/service-hosts.sh
. /opt/harbor/harbor-common.sh
. /opt/harbor/cinder/vars.sh
################################################################################
check_required_vars CINDER_CONFIG_FILE \
OS_DOMAIN \
CINDER_MARIADB_SERVICE_HOST_SVC \
CINDER_MARIADB_SERVICE_PORT \
CINDER_DB_CA \
CINDER_DB_KEY \
CINDER_DB_CERT \
AUTH_CINDER_DB_USER \
AUTH_CINDER_DB_PASSWORD \
AUTH_CINDER_DB_NAME
################################################################################
crudini --set ${CINDER_CONFIG_FILE} database connection \
"mysql://${AUTH_CINDER_DB_USER}:${AUTH_CINDER_DB_PASSWORD}@${CINDER_MARIADB_SERVICE_HOST_SVC}:${CINDER_MARIADB_SERVICE_PORT}/${AUTH_CINDER_DB_NAME}?charset=utf8&ssl_ca=${CINDER_DB_CA}&ssl_key=${CINDER_DB_KEY}&ssl_cert=${CINDER_DB_CERT}&ssl_verify_cert"
|
portdirect/harbor
|
docker/openstack/openstack-cinder/common-assets/opt/harbor/cinder/config-database.sh
|
Shell
|
apache-2.0
| 1,735 |
#!/bin/bash
# The following command can only retrieve where the bash command starts
current_dir=$( cd "$( dirname "${BASH_SOURCE[1]}" )" && pwd )
echo $current_dir
source_dir=$(dirname $current_dir/$1)
echo $(dirname $current_dir)
g++ $source_dir"/cpp/sort_data.cpp" -o $source_dir"/cpp/cleandata" -std=c++0x
g++ $source_dir"/cpp/generate_retrieval_code.cpp" -o $source_dir"/cpp/generate_retrieval_code" -std=c++0x
python -m py_compile $source_dir"/quote.py" $source_dir"/compete.py"
chmod a+x $source_dir"/quote_start.sh"
chmod a+x $source_dir"/compete_start.sh"
|
xiatian122/stockquote
|
Makefile.sh
|
Shell
|
apache-2.0
| 574 |
#!/bin/bash
set -o errexit
CONTEXT_PATH=""
[ -z $DATABASE_URL ] && echo "No DATABASE_URL provided" && exit 1
urldecode() {
local data=${1//+/ }
printf '%b' "${data//%/\x}"
}
parse_url() {
local prefix=DATABASE
[ -n "$2" ] && prefix=$2
# extract the protocol
local proto="`echo $1 | grep '://' | sed -e's,^\(.*://\).*,\1,g'`"
local scheme="`echo $proto | sed -e 's,^\(.*\)://,\1,g'`"
# remove the protocol
local url=`echo $1 | sed -e s,$proto,,g`
# extract the user and password (if any)
local userpass="`echo $url | grep @ | cut -d@ -f1`"
local pass=`echo $userpass | grep : | cut -d: -f2`
if [ -n "$pass" ]; then
local user=`echo $userpass | grep : | cut -d: -f1`
else
local user=$userpass
fi
# extract the host -- updated
local hostport=`echo $url | sed -e s,$userpass@,,g | cut -d/ -f1`
local port=`echo $hostport | grep : | cut -d: -f2`
if [ -n "$port" ]; then
local host=`echo $hostport | grep : | cut -d: -f1`
else
local host=$hostport
fi
# extract the path (if any)
local full_path="`echo $url | grep / | cut -d/ -f2-`"
local path="`echo $full_path | cut -d? -f1`"
local query="`echo $full_path | grep ? | cut -d? -f2`"
local -i rc=0
[ -n "$proto" ] && eval "export ${prefix}_SCHEME=\"$scheme\"" || rc=$?
[ -n "$user" ] && eval "export ${prefix}_USER=\"`urldecode $user`\"" || rc=$?
[ -n "$pass" ] && eval "export ${prefix}_PASSWORD=\"`urldecode $pass`\"" || rc=$?
[ -n "$host" ] && eval "export ${prefix}_HOST=\"`urldecode $host`\"" || rc=$?
[ -n "$port" ] && eval "export ${prefix}_PORT=\"`urldecode $port`\"" || rc=$?
[ -n "$path" ] && eval "export ${prefix}_NAME=\"`urldecode $path`\"" || rc=$?
[ -n "$query" ] && eval "export ${prefix}_QUERY=\"$query\"" || rc=$?
}
download_mysql_driver() {
local driver="mysql-connector-java-5.1.34"
if [ ! -f "$1/$driver-bin.jar" ]; then
echo "Downloading MySQL JDBC Driver..."
curl -L http://dev.mysql.com/get/Downloads/Connector-J/$driver.tar.gz | tar zxv -C /tmp
cp /tmp/$driver/$driver-bin.jar $1/$driver-bin.jar
fi
}
read_var() {
eval "echo \$$1_$2"
}
extract_database_url() {
local url="$1"
local prefix="$2"
local mysql_install="$3"
eval "unset ${prefix}_PORT"
parse_url "$url" $prefix
case "$(read_var $prefix SCHEME)" in
postgres|postgresql)
if [ -z "$(read_var $prefix PORT)" ]; then
eval "${prefix}_PORT=5432"
fi
local host_port_name="$(read_var $prefix HOST):$(read_var $prefix PORT)/$(read_var $prefix NAME)"
local jdbc_driver="org.postgresql.Driver"
local jdbc_url="jdbc:postgresql://$host_port_name?ssl=true"
local hibernate_dialect="org.hibernate.dialect.PostgreSQLDialect"
local database_type="postgres72"
;;
mysql|mysql2)
download_mysql_driver "$mysql_install"
if [ -z "$(read_var $prefix PORT)" ]; then
eval "${prefix}_PORT=3306"
fi
local host_port_name="$(read_var $prefix HOST):$(read_var $prefix PORT)/$(read_var $prefix NAME)"
local jdbc_driver="com.mysql.jdbc.Driver"
local jdbc_url="jdbc:mysql://$host_port_name?autoReconnect=true&characterEncoding=utf8&useUnicode=true&sessionVariables=storage_engine%3DInnoDB"
local hibernate_dialect="org.hibernate.dialect.MySQLDialect"
local database_type="mysql"
;;
*)
echo "Unsupported database url scheme: $(read_var $prefix SCHEME)"
exit 1
;;
esac
eval "${prefix}_JDBC_DRIVER=\"$jdbc_driver\""
eval "${prefix}_JDBC_URL=\"$jdbc_url\""
eval "${prefix}_DIALECT=\"$hibernate_dialect\""
eval "${prefix}_TYPE=\"$database_type\""
}
chown jira:jira /opt/atlassian-home -R
rm -f /opt/atlassian-home/.jira-home.lock
if [ "$CONTEXT_PATH" == "ROOT" -o -z "$CONTEXT_PATH" ]; then
CONTEXT_PATH=
else
CONTEXT_PATH="/$CONTEXT_PATH"
fi
xmlstarlet ed -u '//Context/@path' -v "$CONTEXT_PATH" /opt/jira/conf/server-backup.xml > /opt/jira/conf/server.xml
if [ -n "$DATABASE_URL" ]; then
extract_database_url "$DATABASE_URL" DB /opt/jira/lib
DB_JDBC_URL="$(xmlstarlet esc "$DB_JDBC_URL")"
SCHEMA=''
if [ "$DB_TYPE" != "mysql" ]; then
SCHEMA='<schema-name>public</schema-name>'
fi
cat <<END > /opt/atlassian-home/dbconfig.xml
<?xml version="1.0" encoding="UTF-8"?>
<jira-database-config>
<name>defaultDS</name>
<delegator-name>default</delegator-name>
<database-type>$DB_TYPE</database-type>
$SCHEMA
<jdbc-datasource>
<url>$DB_JDBC_URL</url>
<driver-class>$DB_JDBC_DRIVER</driver-class>
<username>$DB_USER</username>
<password>$DB_PASSWORD</password>
<pool-min-size>20</pool-min-size>
<pool-max-size>20</pool-max-size>
<pool-max-wait>30000</pool-max-wait>
<pool-max-idle>20</pool-max-idle>
<pool-remove-abandoned>true</pool-remove-abandoned>
<pool-remove-abandoned-timeout>300</pool-remove-abandoned-timeout>
</jdbc-datasource>
</jira-database-config>
END
fi
[ -f /opt/atlassian-home/database.cert ] && /usr/bin/keytool -import -alias database -file /opt/atlassian-home/database.cert -keystore $JAVA_HOME/lib/security/cacerts -storepass changeit -noprompt
cat >/etc/supervisord.d/jira.ini<<EOF
[program:jira]
command=/opt/jira/bin/start-jira.sh -fg
user=jira
autorestart=true
redirect_stderr=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
EOF
supervisord -c /etc/supervisord.conf
|
idi-ops/docker-jira
|
start.sh
|
Shell
|
apache-2.0
| 5,353 |
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Create a standalone toolchain package for Android.
. `dirname $0`/prebuilt-common.sh
PROGRAM_PARAMETERS=""
PROGRAM_DESCRIPTION=\
"Generate a customized Android toolchain installation that includes
a working sysroot. The result is something that can more easily be
used as a standalone cross-compiler, e.g. to run configure and
make scripts."
# For now, this is the only toolchain that works reliably.
TOOLCHAIN_NAME=
register_var_option "--toolchain=<name>" TOOLCHAIN_NAME "Specify toolchain name"
LLVM_VERSION=
register_var_option "--llvm-version=<ver>" LLVM_VERSION "Specify LLVM version"
STL=gnustl
register_var_option "--stl=<name>" STL "Specify C++ STL"
ARCH=
register_option "--arch=<name>" do_arch "Specify target architecture" "arm"
do_arch () { ARCH=$1; }
NDK_DIR=`dirname $0`
NDK_DIR=`dirname $NDK_DIR`
NDK_DIR=`dirname $NDK_DIR`
register_var_option "--ndk-dir=<path>" NDK_DIR "Take source files from NDK at <path>"
if [ -d "$NDK_DIR/prebuilt/$HOST_TAG" ]; then
SYSTEM=$HOST_TAG
else
SYSTEM=$HOST_TAG32
fi
register_var_option "--system=<name>" SYSTEM "Specify host system"
PACKAGE_DIR=/tmp/ndk-$USER
register_var_option "--package-dir=<path>" PACKAGE_DIR "Place package file in <path>"
INSTALL_DIR=
register_var_option "--install-dir=<path>" INSTALL_DIR "Don't create package, install files to <path> instead."
PLATFORM=
register_option "--platform=<name>" do_platform "Specify target Android platform/API level." "android-3"
do_platform () { PLATFORM=$1; }
extract_parameters "$@"
# Check NDK_DIR
if [ ! -d "$NDK_DIR/build/core" ] ; then
echo "Invalid source NDK directory: $NDK_DIR"
echo "Please use --ndk-dir=<path> to specify the path of an installed NDK."
exit 1
fi
# Check ARCH
if [ -z "$ARCH" ]; then
case $TOOLCHAIN_NAME in
arm-*)
ARCH=arm
;;
x86-*)
ARCH=x86
;;
mipsel-*)
ARCH=mips
;;
aarch64-*)
ARCH=arm64
;;
x86_64-*)
ARCH=x86_64
;;
mips64el-*)
ARCH=mips64
;;
*)
ARCH=arm
;;
esac
ARCH_INC=$ARCH
log "Auto-config: --arch=$ARCH"
else
ARCH_INC=$ARCH
case $ARCH in
*arm)
ARCH=arm
;;
*x86)
ARCH=x86
;;
*mips)
ARCH=mips
;;
*arm64)
ARCH=arm64
;;
*x86_64)
ARCH=x86_64
;;
*mips64)
ARCH=mips64
;;
*)
ARCH=arm
;;
esac
fi
ARCH_LIB=$ARCH
ARCH_STL=$ARCH
if [ "$ARCH_INC" != "$ARCH" ]; then
test -n "`echo $ARCH_INC | grep bc$ARCH`" && NEED_BC2NATIVE=yes
test -z "`echo $ARCH_INC | grep $ARCH`" && NEED_BC_LIB=yes
ARCH_INC=$(find_ndk_unknown_archs)
test -z "$ARCH_INC" && ARCH_INC="$ARCH"
test "$NEED_BC_LIB" = "yes" && ARCH_LIB=$ARCH_INC
test "$NEED_BC_LIB" = "yes" -o "$NEED_BC2NATIVE" = "yes" && ARCH_STL=$ARCH_INC
fi
# Check toolchain name
if [ -z "$TOOLCHAIN_NAME" ]; then
TOOLCHAIN_NAME=$(get_default_toolchain_name_for_arch $ARCH)
echo "Auto-config: --toolchain=$TOOLCHAIN_NAME"
fi
if [ "$ARCH_STL" != "$ARCH" ]; then
if [ "$STL" != stlport ]; then
echo "Force-config: --stl=stlport"
STL=stlport
fi
fi
if [ "$ARCH_INC" != "$ARCH" ]; then
TARGET_ABI=$(convert_arch_to_abi $ARCH | tr ',' '\n' | tail -n 1)
if [ -z "$LLVM_VERSION" ]; then
LLVM_VERSION=$DEFAULT_LLVM_VERSION
fi
fi
# Detect LLVM version from toolchain name
if [ -z "$LLVM_VERSION" ]; then
LLVM_VERSION_EXTRACT=$(echo "$TOOLCHAIN_NAME" | grep 'clang[0-9]\.[0-9]$' | sed -e 's/.*-clang//')
if [ -n "$LLVM_VERSION_EXTRACT" ]; then
TOOLCHAIN_NAME=$(get_default_toolchain_name_for_arch $ARCH)
LLVM_VERSION=$LLVM_VERSION_EXTRACT
echo "Auto-config: --toolchain=$TOOLCHAIN_NAME, --llvm-version=$LLVM_VERSION"
fi
fi
# Check PLATFORM
if [ -z "$PLATFORM" -a "$ARCH_INC" = "$ARCH" ] ; then
case $ARCH in
arm) PLATFORM=android-3
;;
x86|mips)
PLATFORM=android-9
;;
arm64|x86_64|mips64)
PLATFORM=android-20
;;
*)
dump "ERROR: Unsupported NDK architecture $ARCH!"
esac
log "Auto-config: --platform=$PLATFORM"
elif [ -z "$PLATFORM" ] ; then
PLATFORM=android-9
log "Auto-config: --platform=$PLATFORM"
fi
if [ ! -d "$NDK_DIR/platforms/$PLATFORM" ] ; then
echo "Invalid platform name: $PLATFORM"
echo "Please use --platform=<name> with one of:" `(cd "$NDK_DIR/platforms" && ls)`
exit 1
fi
# Check toolchain name
TOOLCHAIN_PATH="$NDK_DIR/toolchains/$TOOLCHAIN_NAME"
if [ ! -d "$TOOLCHAIN_PATH" ] ; then
echo "Invalid toolchain name: $TOOLCHAIN_NAME"
echo "Please use --toolchain=<name> with the name of a toolchain supported by the source NDK."
echo "Try one of: " `(cd "$NDK_DIR/toolchains" && ls)`
exit 1
fi
# Extract architecture from platform name
parse_toolchain_name $TOOLCHAIN_NAME
# Check that there are any platform files for it!
(cd $NDK_DIR/platforms && ls -d */arch-$ARCH_INC >/dev/null 2>&1 )
if [ $? != 0 ] ; then
echo "Platform $PLATFORM doesn't have any files for this architecture: $ARCH_INC"
echo "Either use --platform=<name> or --toolchain=<name> to select a different"
echo "platform or arch-dependent toolchain name (respectively)!"
exit 1
fi
# Compute source sysroot
SRC_SYSROOT_INC="$NDK_DIR/platforms/$PLATFORM/arch-$ARCH_INC/usr/include"
SRC_SYSROOT_LIB="$NDK_DIR/platforms/$PLATFORM/arch-$ARCH_LIB/usr/lib"
if [ ! -d "$SRC_SYSROOT_INC" -o ! -d "$SRC_SYSROOT_LIB" ] ; then
echo "No platform files ($PLATFORM) for this architecture: $ARCH"
exit 1
fi
# Check that we have any prebuilts GCC toolchain here
if [ ! -d "$TOOLCHAIN_PATH/prebuilt" ] ; then
echo "Toolchain is missing prebuilt files: $TOOLCHAIN_NAME"
echo "You must point to a valid NDK release package!"
exit 1
fi
if [ ! -d "$TOOLCHAIN_PATH/prebuilt/$SYSTEM" ] ; then
echo "Host system '$SYSTEM' is not supported by the source NDK!"
echo "Try --system=<name> with one of: " `(cd $TOOLCHAIN_PATH/prebuilt && ls) | grep -v gdbserver`
exit 1
fi
TOOLCHAIN_PATH="$TOOLCHAIN_PATH/prebuilt/$SYSTEM"
TOOLCHAIN_GCC=$TOOLCHAIN_PATH/bin/$ABI_CONFIGURE_TARGET-gcc
if [ ! -f "$TOOLCHAIN_GCC" ] ; then
echo "Toolchain $TOOLCHAIN_GCC is missing!"
exit 1
fi
if [ -n "$LLVM_VERSION" ]; then
LLVM_TOOLCHAIN_PATH="$NDK_DIR/toolchains/llvm-$LLVM_VERSION"
# Check that we have any prebuilts LLVM toolchain here
if [ ! -d "$LLVM_TOOLCHAIN_PATH/prebuilt" ] ; then
echo "LLVM Toolchain is missing prebuilt files"
echo "You must point to a valid NDK release package!"
exit 1
fi
if [ ! -d "$LLVM_TOOLCHAIN_PATH/prebuilt/$SYSTEM" ] ; then
echo "Host system '$SYSTEM' is not supported by the source NDK!"
echo "Try --system=<name> with one of: " `(cd $LLVM_TOOLCHAIN_PATH/prebuilt && ls)`
exit 1
fi
LLVM_TOOLCHAIN_PATH="$LLVM_TOOLCHAIN_PATH/prebuilt/$SYSTEM"
fi
# Get GCC_BASE_VERSION. Note that GCC_BASE_VERSION may be slightly different from GCC_VERSION.
# eg. In gcc4.6 GCC_BASE_VERSION is "4.6.x-google"
LIBGCC_PATH=`$TOOLCHAIN_GCC -print-libgcc-file-name`
LIBGCC_BASE_PATH=${LIBGCC_PATH%/*} # base path of libgcc.a
GCC_BASE_VERSION=${LIBGCC_BASE_PATH##*/} # stuff after the last /
# Create temporary directory
TMPDIR=$NDK_TMPDIR/standalone/$TOOLCHAIN_NAME
dump "Copying prebuilt binaries..."
# Now copy the GCC toolchain prebuilt binaries
run copy_directory "$TOOLCHAIN_PATH" "$TMPDIR"
# Replace soft-link mcld by real file
ALL_LDS=`find $TMPDIR -name "*mcld"`
for LD in $ALL_LDS; do
rm -f "$LD"
cp -a "$NDK_DIR/toolchains/llvm-$DEFAULT_LLVM_VERSION/prebuilt/$SYSTEM/bin/ld.mcld" "$LD"
done
# Copy python-related to for gdb.exe
PYTHON=python
PYTHON_x=python$(echo "$DEFAULT_PYTHON_VERSION" | cut -d . -f 1)
PYTHON_xdotx=python$(echo "$DEFAULT_PYTHON_VERSION" | cut -d . -f 1).$(echo "$DEFAULT_PYTHON_VERSION" | cut -d . -f 2)
copy_directory "$NDK_DIR/prebuilt/$SYSTEM/include/$PYTHON_xdotx" "$TMPDIR/include/$PYTHON_xdotx"
copy_directory "$NDK_DIR/prebuilt/$SYSTEM/lib/$PYTHON_xdotx" "$TMPDIR/lib/$PYTHON_xdotx"
copy_file_list "$NDK_DIR/prebuilt/$SYSTEM/bin" "$TMPDIR/bin" "$PYTHON$HOST_EXE" "$PYTHON_x$HOST_EXE" "$PYTHON_xdotx$HOST_EXE"
if [ "$HOST_TAG32" = "windows" ]; then
copy_file_list "$NDK_DIR/prebuilt/$SYSTEM/bin" "$TMPDIR/bin" lib$PYTHON_xdotx.dll
fi
# Copy yasm for x86
if [ "$ARCH" = "x86" ]; then
copy_file_list "$NDK_DIR/prebuilt/$SYSTEM/bin" "$TMPDIR/bin" "yasm$HOST_EXE"
fi
# Clang stuff
dump_extra_compile_commands () {
if [ "$NEED_BC2NATIVE" != "yes" ]; then
return
fi
if [ -z "$HOST_EXE" ]; then
echo '# Call bc2native if needed'
echo ''
echo 'if [ -n "`echo $@ | grep '\'\\ \\-c\''`" ] || [ "$1" = "-c" ]; then'
echo ' exit'
echo 'fi'
echo 'while [ -n "$1" ]; do'
echo ' if [ "$1" = "-o" ]; then'
echo ' output="$2"'
echo ' break'
echo ' fi'
echo ' shift'
echo 'done'
echo 'test -z "$output" && output=a.out'
echo 'if [ -f "`dirname $0`/ndk-bc2native" ]; then'
echo ' `dirname $0`/ndk-bc2native --sysroot=`dirname $0`/../sysroot --abi='$TARGET_ABI' --platform='$PLATFORM' --file $output $output'
echo 'else'
echo ' export PYTHONPATH=`dirname $0`/../lib/python2.7/'
echo ' `dirname $0`/python `dirname $0`/ndk-bc2native.py --sysroot=`dirname $0`/../sysroot --abi='$TARGET_ABI' --platform='$PLATFORM' --file $output $output'
echo 'fi'
else
echo 'rem Call bc2native if needed'
echo ''
echo ' if not "%1" == "-c" goto :keep_going'
echo ' echo %* | grep "\\ \\-c"'
echo ' if ERRORLEVEL 1 goto :keep_going'
echo ' exit'
echo ':keep_going'
echo ':keep_find_output'
echo ' if not "%1" == "-o" goto :check_next'
echo ' set output=%2'
echo ':check_next'
echo ' shift'
echo ' if "%1" == "" goto :keep_find_output'
echo ' if not "%output%" == "" goto :check_done'
echo ' set output=a.out'
echo ':check_done'
echo 'if exist %~dp0\\ndk-bc2native'$HOST_EXE' ('
echo ' %~dp0\\ndk-bc2native'$HOST_EXE' --sysroot=%~dp0\\.\\sysroot --abi='$TARGET_ABI' --platform='$PLATFORM' --file %output% %output'
echo 'else ('
echo ' set PYTHONPATH=%~dp0\\..\\lib\\python2.7\\'
echo ' %~dp0\\python'$HOST_EXE' %~dp0\\ndk-bc2native.py --sysroot=%~dp0\\..\\sysroot --abi='$TARGET_ABI' --platform='$PLATFORM' --file %output% %output%'
echo ')'
fi
}
if [ -n "$LLVM_VERSION" ]; then
# Copy the clang/llvm toolchain prebuilt binaries
run copy_directory "$LLVM_TOOLCHAIN_PATH" "$TMPDIR"
# Move clang and clang++ to clang${LLVM_VERSION} and clang${LLVM_VERSION}++,
# then create scripts linking them with predefined -target flag. This is to
# make clang/++ easier drop-in replacement for gcc/++ in NDK standalone mode.
# Note that the file name of "clang" isn't important, and the trailing
# "++" tells clang to compile in C++ mode
LLVM_TARGET=
case "$ARCH" in
arm) # NOte: -target may change by clang based on the
# presence of subsequent -march=armv7-a and/or -mthumb
LLVM_TARGET=armv5te-none-linux-androideabi
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_arm
;;
x86)
LLVM_TARGET=i686-none-linux-android
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_x86
;;
mips)
LLVM_TARGET=mipsel-none-linux-android
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_mips
;;
arm64)
LLVM_TARGET=aarch64-linux-android
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_arm64
;;
x86_64)
LLVM_TARGET=x86_64-none-linux-android
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_x86_64
;;
mips64)
LLVM_TARGET=mips64el-none-linux-android
TOOLCHAIN_PREFIX=$DEFAULT_ARCH_TOOLCHAIN_PREFIX_mips64
;;
*)
dump "ERROR: Unsupported NDK architecture $ARCH!"
esac
# Need to remove '.' from LLVM_VERSION when constructing new clang name,
# otherwise clang3.3++ may still compile *.c code as C, not C++, which
# is not consistent with g++
LLVM_VERSION_WITHOUT_DOT=$(echo "$LLVM_VERSION" | sed -e "s!\.!!")
mv "$TMPDIR/bin/clang${HOST_EXE}" "$TMPDIR/bin/clang${LLVM_VERSION_WITHOUT_DOT}${HOST_EXE}"
if [ -h "$TMPDIR/bin/clang++${HOST_EXE}" ] ; then
## clang++ is a link to clang. Remove it and reconstruct
rm "$TMPDIR/bin/clang++${HOST_EXE}"
ln -sf "clang${LLVM_VERSION_WITHOUT_DOT}${HOST_EXE}" "$TMPDIR/bin/clang${LLVM_VERSION_WITHOUT_DOT}++${HOST_EXE}"
else
mv "$TMPDIR/bin/clang++${HOST_EXE}" "$TMPDIR/bin/clang$LLVM_VERSION_WITHOUT_DOT++${HOST_EXE}"
fi
EXTRA_CLANG_FLAGS=
EXTRA_CLANGXX_FLAGS=
if [ "$ARCH_STL" != "$ARCH" ]; then
LLVM_TARGET=le32-none-ndk
EXTRA_CLANG_FLAGS="-emit-llvm"
EXTRA_CLANGXX_FLAGS="$EXTRA_CLANG_FLAGS -I\`dirname \$0\`/../include/c++/$GCC_BASE_VERSION"
fi
cat > "$TMPDIR/bin/clang" <<EOF
if [ "\$1" != "-cc1" ]; then
\`dirname \$0\`/clang$LLVM_VERSION_WITHOUT_DOT -target $LLVM_TARGET "\$@" $EXTRA_CLANG_FLAGS
$(dump_extra_compile_commands)
else
# target/triple already spelled out.
\`dirname \$0\`/clang$LLVM_VERSION_WITHOUT_DOT "\$@" $EXTRA_CLANG_FLAGS
fi
EOF
cat > "$TMPDIR/bin/clang++" <<EOF
if [ "\$1" != "-cc1" ]; then
\`dirname \$0\`/clang$LLVM_VERSION_WITHOUT_DOT++ -target $LLVM_TARGET "\$@" $EXTRA_CLANGXX_FLAGS
$(dump_extra_compile_commands)
else
# target/triple already spelled out.
\`dirname \$0\`/clang$LLVM_VERSION_WITHOUT_DOT++ "\$@" $EXTRA_CLANGXX_FLAGS
fi
EOF
chmod 0755 "$TMPDIR/bin/clang" "$TMPDIR/bin/clang++"
cp -a "$TMPDIR/bin/clang" "$TMPDIR/bin/$TOOLCHAIN_PREFIX-clang"
cp -a "$TMPDIR/bin/clang++" "$TMPDIR/bin/$TOOLCHAIN_PREFIX-clang++"
if [ -n "$HOST_EXE" ] ; then
cat > "$TMPDIR/bin/clang.cmd" <<EOF
@echo off
if "%1" == "-cc1" goto :L
%~dp0\\clang${LLVM_VERSION_WITHOUT_DOT}${HOST_EXE} -target $LLVM_TARGET %* $EXTRA_CLANG_FLAGS
$(dump_extra_compile_commands)
if ERRORLEVEL 1 exit /b 1
goto :done
:L
rem target/triple already spelled out.
%~dp0\\clang${LLVM_VERSION_WITHOUT_DOT}${HOST_EXE} %* $EXTRA_CLANG_FLAGS
if ERRORLEVEL 1 exit /b 1
:done
EOF
cat > "$TMPDIR/bin/clang++.cmd" <<EOF
@echo off
if "%1" == "-cc1" goto :L
%~dp0\\clang${LLVM_VERSION_WITHOUT_DOT}++${HOST_EXE} -target $LLVM_TARGET %* $EXTRA_CLANGXX_FLAGS
$(dump_extra_compile_commands)
if ERRORLEVEL 1 exit /b 1
goto :done
:L
rem target/triple already spelled out.
%~dp0\\clang${LLVM_VERSION_WITHOUT_DOT}++${HOST_EXE} %* $EXTRA_CLANGXX_FLAGS
if ERRORLEVEL 1 exit /b 1
:done
EOF
chmod 0755 "$TMPDIR/bin/clang.cmd" "$TMPDIR/bin/clang++.cmd"
cp -a "$TMPDIR/bin/clang.cmd" "$TMPDIR/bin/$TOOLCHAIN_PREFIX-clang.cmd"
cp -a "$TMPDIR/bin/clang++.cmd" "$TMPDIR/bin/$TOOLCHAIN_PREFIX-clang++.cmd"
fi
fi
dump "Copying sysroot headers and libraries..."
# Copy the sysroot under $TMPDIR/sysroot. The toolchain was built to
# expect the sysroot files to be placed there!
run copy_directory_nolinks "$SRC_SYSROOT_INC" "$TMPDIR/sysroot/usr/include"
run copy_directory_nolinks "$SRC_SYSROOT_LIB" "$TMPDIR/sysroot/usr/lib"
# x86_64 toolchain is built multilib.
if [ "$ARCH" = "x86_64" ]; then
run copy_directory_nolinks "$SRC_SYSROOT_LIB/../lib64" "$TMPDIR/sysroot/usr/lib64"
run copy_directory_nolinks "$SRC_SYSROOT_LIB/../libx32" "$TMPDIR/sysroot/usr/libx32"
fi
if [ "$ARCH_INC" != "$ARCH" ]; then
cp -a $NDK_DIR/$GABIXX_SUBDIR/libs/$ABI/* $TMPDIR/sysroot/usr/lib
cp -a $NDK_DIR/$LIBPORTABLE_SUBDIR/libs/$ABI/* $TMPDIR/sysroot/usr/lib
cp -a $NDK_DIR/$GCCUNWIND_SUBDIR/libs/$ABI/* $TMPDIR/sysroot/usr/lib
if [ "$ARCH" = "${ARCH%%64*}" ]; then
cp -a $NDK_DIR/$COMPILER_RT_SUBDIR/libs/$ABI/* $TMPDIR/sysroot/usr/lib
fi
fi
if [ "$ARCH_LIB" != "$ARCH" ]; then
cp -a $NDK_DIR/platforms/$PLATFORM/arch-$ARCH/usr/lib/crt* $TMPDIR/sysroot/usr/lib
fi
dump "Copying libstdc++ headers and libraries..."
GNUSTL_DIR=$NDK_DIR/$GNUSTL_SUBDIR/$GCC_VERSION
GNUSTL_LIBS=$GNUSTL_DIR/libs
STLPORT_DIR=$NDK_DIR/$STLPORT_SUBDIR
STLPORT_LIBS=$STLPORT_DIR/libs
LIBCXX_DIR=$NDK_DIR/$LIBCXX_SUBDIR
LIBCXX_LIBS=$LIBCXX_DIR/libs
SUPPORT_DIR=$NDK_DIR/$SUPPORT_SUBDIR
COMPILER_RT_DIR=$NDK_DIR/$COMPILER_RT_SUBDIR
COMPILER_RT_LIBS=$COMPILER_RT_DIR/libs
ABI_STL="$TMPDIR/$ABI_CONFIGURE_TARGET"
ABI_STL_INCLUDE="$TMPDIR/include/c++/$GCC_BASE_VERSION"
ABI_STL_INCLUDE_TARGET="$ABI_STL_INCLUDE/$ABI_CONFIGURE_TARGET"
# $1: filenames of headers
copy_gabixx_headers () {
for header in $@; do
(cd $ABI_STL_INCLUDE && cp -a ../../gabi++/include/$header $header)
done
}
# Copy common STL headers (i.e. the non-arch-specific ones)
copy_stl_common_headers () {
case $STL in
gnustl)
copy_directory "$GNUSTL_DIR/include" "$ABI_STL_INCLUDE"
;;
libcxx|libc++)
copy_directory "$LIBCXX_DIR/libcxx/include" "$ABI_STL_INCLUDE"
copy_directory "$SUPPORT_DIR/include" "$ABI_STL_INCLUDE"
copy_directory "$STLPORT_DIR/../gabi++/include" "$ABI_STL_INCLUDE/../../gabi++/include"
copy_gabixx_headers cxxabi.h unwind.h unwind-arm.h unwind-itanium.h gabixx_config.h
;;
stlport)
copy_directory "$STLPORT_DIR/stlport" "$ABI_STL_INCLUDE"
copy_directory "$STLPORT_DIR/../gabi++/include" "$ABI_STL_INCLUDE/../../gabi++/include"
copy_gabixx_headers cxxabi.h unwind.h unwind-arm.h unwind-itanium.h gabixx_config.h
;;
esac
}
# $1: Source ABI (e.g. 'armeabi')
# $2: Optional destination directory, default to empty (e.g. "", "thumb", "armv7-a/thumb")
# $3: Optional source directory, default to empty (e.g. "", "thumb", "armv7-a/thumb")
# $4: Optional "yes" (default) or "no" about whether to copy additional header (eg. include/bits)
copy_stl_libs () {
local ABI=$1
local DEST_DIR=$2
local SRC_DIR=$3
local COPY_ADDITIONAL_HEADER=yes
case $STL in
gnustl)
# gnustl has thumb version of libraries. Append ABI with basename($DEST_DIR) if $DEST_DIR contain '/'
ABI_SRC_DIR=$ABI
if [ -n "$SRC_DIR" ]; then
ABI_SRC_DIR=$ABI/$SRC_DIR
else
if [ "$DEST_DIR" != "${DEST_DIR%%/*}" ] ; then
ABI_SRC_DIR=$ABI/`basename $DEST_DIR`
fi
fi
if [ "$COPY_ADDITIONAL_HEADER" != "no" ]; then
copy_directory "$GNUSTL_LIBS/$ABI/include/bits" "$ABI_STL_INCLUDE_TARGET/$DEST_DIR/bits"
fi
copy_file_list "$GNUSTL_LIBS/$ABI_SRC_DIR" "$ABI_STL/lib/$DEST_DIR" "libgnustl_shared.so"
copy_file_list "$GNUSTL_LIBS/$ABI_SRC_DIR" "$ABI_STL/lib/$DEST_DIR" "libsupc++.a"
cp -p "$GNUSTL_LIBS/$ABI_SRC_DIR/libgnustl_static.a" "$ABI_STL/lib/$DEST_DIR/libstdc++.a"
;;
libcxx|libc++)
if [ "$ARCH" = "${ARCH%%64*}" ]; then
copy_file_list "$COMPILER_RT_LIBS/$ABI" "$ABI_STL/lib/$DEST_DIR" "libcompiler_rt_shared.so" "libcompiler_rt_static.a"
fi
copy_file_list "$LIBCXX_LIBS/$ABI" "$ABI_STL/lib/$DEST_DIR" "libc++_shared.so"
cp -p "$LIBCXX_LIBS/$ABI/libc++_static.a" "$ABI_STL/lib/$DEST_DIR/libstdc++.a"
;;
stlport)
if [ "$ARCH_STL" != "$ARCH" ]; then
tmp_lib_dir=$TMPDIR/stl
$NDK_DIR/build/tools/build-cxx-stl.sh --stl=stlport --out-dir=$tmp_lib_dir --abis=unknown
cp -p "`ls $tmp_lib_dir/sources/cxx-stl/stlport/libs/*/libstlport_static.a`" "$ABI_STL/lib/$DEST_DIR/libstdc++.a"
cp -p "`ls $tmp_lib_dir/sources/cxx-stl/stlport/libs/*/libstlport_shared.bc`" "$ABI_STL/lib/$DEST_DIR/libstlport_shared.so"
rm -rf $tmp_lib_dir
else
copy_file_list "$STLPORT_LIBS/$ABI" "$ABI_STL/lib/$DEST_DIR" "libstlport_shared.so"
cp -p "$STLPORT_LIBS/$ABI/libstlport_static.a" "$ABI_STL/lib/$DEST_DIR/libstdc++.a"
fi
;;
*)
dump "ERROR: Unsupported STL: $STL"
exit 1
;;
esac
}
mkdir -p "$ABI_STL_INCLUDE_TARGET"
fail_panic "Can't create directory: $ABI_STL_INCLUDE_TARGET"
copy_stl_common_headers
case $ARCH in
arm)
copy_stl_libs armeabi ""
copy_stl_libs armeabi "/thumb"
copy_stl_libs armeabi-v7a "armv7-a"
copy_stl_libs armeabi-v7a "armv7-a/thumb"
copy_stl_libs armeabi-v7a-hard "armv7-a/hard" "." "no"
copy_stl_libs armeabi-v7a-hard "armv7-a/thumb/hard" "thumb" "no"
;;
arm64)
copy_stl_libs arm64-v8a ""
;;
x86|mips|mips64|x86_64)
copy_stl_libs "$ARCH" ""
;;
*)
dump "ERROR: Unsupported NDK architecture: $ARCH"
exit 1
;;
esac
# Install or Package
if [ -n "$INSTALL_DIR" ] ; then
dump "Copying files to: $INSTALL_DIR"
if [ ! -d "$INSTALL_DIR" ]; then
run move_directory "$TMPDIR" "$INSTALL_DIR"
else
run copy_directory "$TMPDIR" "$INSTALL_DIR"
fi
else
PACKAGE_FILE="$PACKAGE_DIR/$TOOLCHAIN_NAME.tar.bz2"
dump "Creating package file: $PACKAGE_FILE"
pack_archive "$PACKAGE_FILE" "`dirname $TMPDIR`" "$TOOLCHAIN_NAME"
fail_panic "Could not create tarball from $TMPDIR"
fi
dump "Cleaning up..."
run rm -rf $TMPDIR
dump "Done."
|
efortuna/AndroidSDKClone
|
ndk_experimental/build/tools/make-standalone-toolchain.sh
|
Shell
|
apache-2.0
| 22,231 |
./vina_ori --config conf1.txt --log log
|
JSI-CSAR/VinaSC
|
test/ori_run.sh
|
Shell
|
apache-2.0
| 40 |
#!/bin/sh
nohup java -Dserver.port=8090 -jar build/libs/quickat-0.0.1-SNAPSHOT.war > /var/www/log/quickat/quickat.log 2>&1 &
tail -100f /var/www/log/quickat/quickat.log
|
castronu/quickat
|
server/scripts/start.sh
|
Shell
|
apache-2.0
| 170 |
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi/liblinphone.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/libavcodec.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/libavcore.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/libavutil.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/liblincrypto.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/liblinphone.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/liblinssl.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/libsrtp.so .
cp ~/src/fusion/android/linphone/linphone-android/libs/armeabi-v7a/libswscale.so .
|
bayvictor/distributed-polling-system
|
bin/copy_arm7v_ffmeg_so_files__to_here.sh
|
Shell
|
apache-2.0
| 750 |
#!/bin/bash
set -o errexit -o pipefail -o noclobber -o nounset
# Save working directory
OLD_PWD="$PWD"
# Return to saved working directory on error/exit
trap_handler() {
cd "$OLD_PWD" || exit 9
}
trap "trap_handler" ERR EXIT INT TERM
cd "$(dirname "${BASH_SOURCE[0]}")" || return
echo "In case of build errors, please verify that recent versions of docker and docker-compose are installed."
echo ""
# Pull is allowed to fail, ignore if it happens.
! docker-compose -f docker-build/docker-compose.yml pull
if [ -z "$*" ]; then
docker-compose -f docker-build/docker-compose.yml run --rm build-container
else
docker-compose -f docker-build/docker-compose.yml run --rm build-container "$*"
fi
|
industrial-data-space/trusted-connector
|
build.sh
|
Shell
|
apache-2.0
| 698 |
#!/bin/sh
#
# Copyright 2011-2014 eBusiness Information, Groupe Excilys (www.ebusinessinformation.fr)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
USER_ARGS="$@"
echo "Enter number of ramp users:"
read rampusers
echo "Enter ramp up time (seconds):"
read ramptime
echo "Enter password"
read -s password
OLDDIR=`pwd`
BIN_DIR=`dirname $0`
cd "${BIN_DIR}/.." && DEFAULT_GATLING_HOME=`pwd` && cd "${OLDDIR}"
GATLING_HOME="${GATLING_HOME:=${DEFAULT_GATLING_HOME}}"
GATLING_CONF="${GATLING_CONF:=$GATLING_HOME/conf}"
export GATLING_HOME GATLING_CONF
echo "GATLING_HOME is set to ${GATLING_HOME}"
JAVA_OPTS="-server -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -Xms512M -Xmx512M -Xmn100M -XX:+HeapDumpOnOutOfMemoryError -XX:+AggressiveOpts -XX:+OptimizeStringConcat -XX:+UseFastAccessorMethods -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -Djava.net.preferIPv4Stack=true -Djava.net.preferIPv6Addresses=false -Drampusers=$rampusers -Dramptime=$ramptime -Dendpoint=http://52.88.34.54 -Dpass=$password ${JAVA_OPTS}"
COMPILER_OPTS="$JAVA_OPTS -Xss10M"
# Setup classpaths
COMMON_CLASSPATH="$GATLING_CONF:${JAVA_CLASSPATH}"
COMPILER_CLASSPATH="$GATLING_HOME/lib/zinc/*:$COMMON_CLASSPATH"
GATLING_CLASSPATH="$GATLING_HOME/lib/*:$GATLING_HOME/user-files:$COMMON_CLASSPATH"
# Build compilation classpath
COMPILATION_CLASSPATH=`find $GATLING_HOME/lib -maxdepth 1 -name "*.jar" -type f -exec printf :{} ';'`
# Run the compiler
java $COMPILER_OPTS -cp "$COMPILER_CLASSPATH" io.gatling.compiler.ZincCompiler -ccp "$COMPILATION_CLASSPATH" $USER_ARGS 2> /dev/null
# Run Gatling
which java
java $JAVA_OPTS -cp "$GATLING_CLASSPATH" io.gatling.app.Gatling $USER_ARGS
|
pbindels/github_exercise
|
bin/gatling.sh
|
Shell
|
apache-2.0
| 2,190 |
#!/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
rsync -a $DIR/client/outscripts/sbsbundle.js.min.js $DIR/cordova/www/scripts/sbsbundle.min.js
rsync -a $DIR/client/outscripts/sbsbundle.js $DIR/cordova/www/scripts/sbsbundle.js
rsync -a $DIR/client/content/sbsbundle.css.min.css $DIR/cordova/www/css/sbsbundle.min.css
rsync -a $DIR/client/content/sbsbundle.css $DIR/cordova/www/css/sbsbundle.css
rsync -a $DIR/client/content/fonts/ $DIR/cordova/www/fonts
|
vishwa89/sbs-app
|
src/copybundles.sh
|
Shell
|
apache-2.0
| 471 |
#! /bin/sh
echo "running $0 $@"
# Defaults
ml_version=8
install_zip=true
install_java=true
install_mlcp=true
# Load the normalized project properties.
source /tmp/$1.project.properties
# Zip/unzip not required for MLCP (provided through Java)
if [ $install_zip == "true" ]; then
yum -y install zip unzip
elif [ $install_mlcp == "true" ]; then
# but installation does require unzip
yum -y install unzip
fi
if [ $install_mlcp == "true" ]; then
# Java required for MLCP
yum -y install java-1.8.0-openjdk-devel
# Determine installer to use.
if [ -n "${mlcp_installer}" ]; then
installer=${mlcp_installer}
elif [ $ml_version == "8" ]; then
installer=mlcp-8.0.7-bin.zip
elif [ $ml_version == "9" ]; then
installer=mlcp-9.0.4-bin.zip
elif [ $ml_version == "7" ]; then
installer=mlcp-7.0-6.4-bin.zip
else
installer=mlcp-1.3-3-bin.zip
fi
echo "Installing MLCP using $installer ..."
install_dir=$(echo $installer | sed -e "s/-bin.zip//g")
if [ ! -d /opt/$install_dir ]; then
cd /opt && unzip "/space/software/$installer"
fi
if [ ! -h /usr/local/mlcp ]; then
echo "setting sym-link: /opt/$install_dir for mlcp"
cd /usr/local && ln -s "/opt/$install_dir" mlcp
fi
elif [ $install_java == "true" ]; then
yum -y install java-1.8.0-openjdk-devel
fi
|
vladistan/mlvagrant
|
opt/mlvagrant/install-mlcp.sh
|
Shell
|
apache-2.0
| 1,311 |
#!/bin/sh
docker run -d \
--network=host \
--uts=host \
--shm-size=2g \
--ulimit core=-1 \
--ulimit memlock=-1 \
--ulimit nofile=2448:38048 \
--cap-add=IPC_LOCK \
--cap-add=SYS_NICE \
--env 'username_admin_globalaccesslevel=admin' \
--env 'username_admin_password=admin' \
--name=solace solace-app:latest
|
philscanlon/BLUE-GREEN
|
node-rest-wrapper/RESTGet/run-docker-vmr.sh
|
Shell
|
apache-2.0
| 305 |
#!/bin/bash
# 安装Phalcon框架及devtools, 用Root权限.
# ( 不建议使用了,说明如下 )
# @farwish.com BSD-License
# 请根据官方最新文档来安装.
# Example:
#
# git clone -b v3.0.2 https://github.com/phalcon/cphalcon.git
# cd cphalcon/build/php7/64bits/
# /usr/local/php7.0.14/bin/phpize
# ./configure --with-php-config=/usr/lcoal/php7.0.14/bin/php-config
# make && make install
# First step:add extension=phalcon.so in php.ini !
# Then run this script.
# Restart php-fpm.
echo "
----------------------------------------------
| 欢迎使用使用安装脚本. |
| 以下填写 Phalcon 和 phalcon-devtools 的版本. |
----------------------------------------------
"
current_path=`pwd`
cphalcon_url=git://github.com/phalcon/cphalcon.git
phalcon_devtools_url=git://github.com/phalcon/phalcon-devtools.git
phalcon_version=phalcon-v2.0.13
phalcon_tool_version=v2.0.13
read -p "请输入要安装的 Phalcon tag版本( 不填默认 $phalcon_version ):" -t 30 cphalcon_tag
read -p "请输入要安装的 phalcon-devtools tag版本( 不填默认 $phalcon_tool_version ):" -t 30 phalcon_devtools_tag
if [ -z $cphalcon_tag ]; then
cphalcon_tag=$phalcon_version
fi
if [ -z $phalcon_devtools_tag ]; then
phalcon_devtools_tag=$phalcon_tool_version
fi
if [ ! -d ${current_path}/cphalcon ]; then
yum -y install git
echo "下载cphalcon..."
git clone -b ${cphalcon_tag} ${cphalcon_url}
fi
echo "安装phalcon..."
cd ${current_path}/cphalcon/build
./install
cd ${current_path}
if [ ! -d ${current_path}/phalcon-devtools ]; then
echo "下载phalcon-devtools..."
git clone -b ${phalcon_devtools_tag} ${phalcon_devtools_url}
fi
echo "安装phalcon-devtools..."
sh ${current_path}/phalcon-devtools/phalcon.sh
echo "phalcon.php 命令加入/usr/bin/phalcon"
ln -s ${current_path}/phalcon-devtools/phalcon.php /usr/bin/phalcon
echo "Complete! "
|
farwish/delicateShell
|
lnmp/installPhalcon.sh
|
Shell
|
bsd-2-clause
| 1,928 |
KERNCONF=IMX6
TARGET_ARCH=armv6
IMAGE_SIZE=$((1024 * 1000 * 1000))
WANDBOARD_UBOOT_SRC=${TOPDIR}/u-boot-2014.10
WANDBOARD_UBOOT_CONFIG=wandboard_solo_config
WANDBOARD_DT_BASENAME=wandboard-solo
#
# 3 partitions, a reserve one for uboot, a FAT one for the boot loader and a UFS one
#
# the kernel config (WANDBOARD.common) specifies:
# U-Boot stuff lives on slice 1, FreeBSD on slice 2.
# options ROOTDEVNAME=\"ufs:mmcsd0s2a\"
#
wandboard_partition_image ( ) {
disk_partition_mbr
wandboard_uboot_install
disk_fat_create 50m 16 16384
disk_ufs_create
}
strategy_add $PHASE_PARTITION_LWW wandboard_partition_image
#
# Wandboard uses U-Boot.
#
wandboard_check_uboot ( ) {
# Crochet needs to build U-Boot.
uboot_set_patch_version ${WANDBOARD_UBOOT_SRC} ${WANDBOARD_UBOOT_PATCH_VERSION}
uboot_test \
WANDBOARD_UBOOT_SRC \
"$WANDBOARD_UBOOT_SRC/board/wandboard/Makefile"
strategy_add $PHASE_BUILD_OTHER uboot_patch ${WANDBOARD_UBOOT_SRC} `uboot_patch_files`
strategy_add $PHASE_BUILD_OTHER uboot_configure $WANDBOARD_UBOOT_SRC $WANDBOARD_UBOOT_CONFIG
strategy_add $PHASE_BUILD_OTHER uboot_build $WANDBOARD_UBOOT_SRC
}
strategy_add $PHASE_CHECK wandboard_check_uboot
#
# install uboot
#
wandboard_uboot_install ( ) {
echo Installing U-Boot to /dev/${DISK_MD}
dd if=${WANDBOARD_UBOOT_SRC}/u-boot.imx of=/dev/${DISK_MD} bs=512 seek=2
}
#
# ubldr
#
strategy_add $PHASE_BUILD_OTHER freebsd_ubldr_build UBLDR_LOADADDR=0x11000000
strategy_add $PHASE_BOOT_INSTALL freebsd_ubldr_copy_ubldr ubldr
#
# uEnv
#
wandboard_install_uenvtxt(){
echo "Installing uEnv.txt"
cp ${BOARDDIR}/files/uEnv.txt .
}
#strategy_add $PHASE_BOOT_INSTALL wandboard_install_uenvtxt
#
# DTS to FAT file system
#
wandboard_install_dts_fat(){
echo "Installing DTS to FAT"
freebsd_install_fdt $WANDBOARD_DT_BASENAME.dts $WANDBOARD_DT_BASENAME.dts
freebsd_install_fdt $WANDBOARD_DT_BASENAME.dts $WANDBOARD_DT_BASENAME.dtb
}
#strategy_add $PHASE_BOOT_INSTALL wandboard_install_dts_fat
#
# DTS to UFS file system. This is in PHASE_FREEBSD_BOARD_POST_INSTALL b/c it needs to happen *after* the kernel install
#
wandboard_install_dts_ufs(){
echo "Installing DTS to UFS"
freebsd_install_fdt $WANDBOARD_DT_BASENAME.dts boot/kernel/$WANDBOARD_DT_BASENAME.dts
freebsd_install_fdt $WANDBOARD_DT_BASENAME.dts boot/kernel/$WANDBOARD_DT_BASENAME.dtb
}
strategy_add $PHASE_FREEBSD_BOARD_POST_INSTALL wandboard_install_dts_ufs
#
# kernel
#
strategy_add $PHASE_FREEBSD_BOARD_INSTALL board_default_installkernel .
strategy_add $PHASE_FREEBSD_BOARD_INSTALL freebsd_ubldr_copy_ubldr_help boot
#
# Make a /boot/msdos directory so the running image
# can mount the FAT partition. (See overlay/etc/fstab.)
#
strategy_add $PHASE_FREEBSD_BOARD_INSTALL mkdir boot/msdos
#
# build the u-boot scr file
#
strategy_add $PHASE_BOOT_INSTALL uboot_mkimage ${WANDBOARD_UBOOT_SRC} "files/boot.txt" "boot.scr"
|
MattDooner/crochet-freebsd
|
board/Wandboard-Solo/setup.sh
|
Shell
|
bsd-2-clause
| 2,946 |
#!/bin/bash
cd ..
# Update bashrc
if grep -q "# LO-PHI" ~/.bashrc
then
echo "LO-PHI PYTHONPATH variables are already set."
else
echo "# LO-PHI" >> ~/.bashrc
echo "export PYTHONPATH=\$PYTHONPATH:$PWD/python-lophi-1.0" >> ~/.bashrc
echo "export PYTHONPATH=\$PYTHONPATH:$PWD/python-lophi-semanticgap-1.0" >> ~/.bashrc
echo "export PYTHONPATH=\$PYTHONPATH:$PWD/lophi-net-services-1.0" >> ~/.bashrc
echo "export PYTHONPATH=\$PYTHONPATH:$PWD/lophi-automation-1.0" >> ~/.bashrc
echo "export PYTHONPATH=\$PYTHONPATH:$PWD/lophi-analysis-1.0" >> ~/.bashrc
source ~/.bashrc
fi
|
mit-ll/LO-PHI
|
scripts/setup_dev_environment.sh
|
Shell
|
bsd-3-clause
| 591 |
#!/bin/bash
myname=$0
cd ${myname%/*}
if [ -z ${BOARD} ]; then
echo "[ERROR] Please set BOARD".
exit 1
fi
# 既存のebuildとManifestを消す
cd ~/trunk/src/third_party/portage-stable/x11-drivers/xf86-video-vmware
rm *.ebuild
rm Manifest
# 13.1.0のebuildをコピーする
cp /tmp/portage/x11-drivers/xf86-video-vmware/xf86-video-vmware-13.1.0.ebuild .
if [ 0 -ne $? ]; then
echo "[ERROR]copy ebuild to portage-stable failed. Abort."
exit 1
fi
# Manifestを再作成
ebuild-x86-pentiumm xf86-video-vmware-13.1.0.ebuild manifest
if [ 0 -ne $? ]; then
echo "[ERROR]Failed to create Manifest. Abort."
exit 1
fi
# テストコンパイル実行
ebuild-x86-pentiumm xf86-video-vmware-13.1.0.ebuild compile
if [ 0 -ne $? ]; then
echo "[ERROR]Failed to compile. Abort."
exit 1
fi
|
crosbuilder/CustomBuilds
|
script/downgrade_xf86-video-vmware-2.sh
|
Shell
|
bsd-3-clause
| 792 |
#!/bin/sh
# Dumps the call graph, and cil ASTs to the DUMPROOT specified below
# Reads the information from "gcc-log"
# Requires the "duppy" script
# Remember to change RELAYROOT
# Uses the steensgaard callgraph (see dump*.sh to dump anders, etc)
CURROOT=$PWD
DUMPROOT=$PWD/ciltrees
RELAYROOT=/home/jan/research/relay-race
DUPPYROOT=$RELAYROOT/scripts
CILLYROOT=$RELAYROOT/cil/bin
LOG=$DUMPROOT/log.txt
#gcc-log has "cd" and "duppy" commands on each line
CMDS=$PWD/gcc-log.txt
CONFIG=client.cfg.steens
/bin/rm -rf $DUMPROOT
mkdir -p $DUMPROOT
/bin/rm -f $LOG
export CURROOT
export DUMPROOT
export CILLYROOT
export RELAYROOT
SKIP_AFTERCIL=1
export SKIP_AFTERCIL
NODEF=$PWD/nodef.h
export NODEF
# nodef?
# first dump the files
duppy ()
{
echo duppy $*
$DUPPYROOT/duppy $*
}
STARTTIME=$(date +%s)
(. $CMDS) 2>&1 | tee $LOG
# fix variable / struct ids + dump the call graph
cd $RELAYROOT; ./fix_id_cg.exe -su $CONFIG -cg $DUMPROOT >> $LOG 2>&1
# hard-coding the calls file...
cd $RELAYROOT; ./scc_stats.exe -su $CONFIG -cg $DUMPROOT/calls.steens >> $LOG 2>&1
ENDTIME=$(date +%s)
DIFF=$(( $ENDTIME - $STARTTIME ))
echo "Dumped in $DIFF seconds"
|
cogumbreiro/relay
|
scripts/dump-calls.sh
|
Shell
|
bsd-3-clause
| 1,166 |
#!/bin/bash
#
# build PCRE for iOS and iOS simulator
#
# make sure this is not set
unset MACOSX_DEPLOYMENT_TARGET
# be ridiculously conservative with regard to ios features
export IPHONEOS_DEPLOYMENT_TARGET="4.3"
# exit on error
set -e
ME=`basename $0`
DIR="$( cd "$( dirname "$0" )" && pwd )"
SDK_VER="7.0"
#SDK_VER="5.1"
DEST_DIR="${DIR}/../prebuilt/ios/${SDK_VER}-pcre-build"
if [ ! -f pcre_version.c ]; then
echo
echo "Cannot find pcre_version.c"
echo "Run script from within pcre directory:"
echo "pcre-8.31$ ../../../${ME}"
echo
exit
fi
mkdir -p ${DEST_DIR} &> /dev/null
# see http://stackoverflow.com/questions/2424770/floating-point-comparison-in-shell-script
if [ $(bc <<< "$SDK_VER >= 7.0") -eq 1 ]; then
DEV_ARCHS="-arch armv7 -arch armv7s -arch arm64"
elif [ $(bc <<< "$SDK_VER >= 6.1") -eq 1 ]; then
DEV_ARCHS="-arch armv7 -arch armv7s"
elif [ $(bc <<< "$SDK_VER >= 5.1") -eq 1 ]; then
DEV_ARCHS="-arch armv6 -arch armv7"
else
echo
echo "Building for SDK < 5.1 not supported"
exit
fi
#
# Build for Device
#
if [ ! -d ${DEST_DIR}/device ]; then
TOOLCHAIN_ROOT="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer"
SYSROOT="${TOOLCHAIN_ROOT}/SDKs/iPhoneOS${SDK_VER}.sdk"
if [ $(bc <<< "$SDK_VER >= 7.0") -eq 1 ]; then
# export CC="/Applications/Xcode.app/Contents/Developer/usr/bin/gcc"
# export CXX="/Applications/Xcode.app/Contents/Developer/usr/bin/g++"
# export CPPFLAGS="-sysroot ${SYSROOT}"
# export CXXCPPFLAGS="-sysroot ${SYSROOT}"
# export LD=${TOOLCHAIN_ROOT}/usr/bin/ld
export LDFLAGS="-isysroot ${SYSROOT} ${DEV_ARCHS} -lstdc++"
else
export CC=${TOOLCHAIN_ROOT}/usr/bin/gcc
export CXX=${TOOLCHAIN_ROOT}/usr/bin/g++
export LD=${TOOLCHAIN_ROOT}/usr/bin/ld\ -r
export CPP=${TOOLCHAIN_ROOT}/usr/bin/cpp
export CXXCPP=${TOOLCHAIN_ROOT}/usr/bin/cpp
export LDFLAGS="-isysroot ${SYSROOT} ${DEV_ARCHS}"
export AR=${TOOLCHAIN_ROOT}/usr/bin/ar
export AS=${TOOLCHAIN_ROOT}/usr/bin/as
export LIBTOOL=${TOOLCHAIN_ROOT}/usr/bin/libtool
export STRIP=${TOOLCHAIN_ROOT}/usr/bin/strip
export RANLIB=${TOOLCHAIN_ROOT}/usr/bin/ranlib
fi
if [ ! -d ${SYSROOT} ]; then
echo
echo "Cannot find iOS developer tools at ${SYSROOT}."
echo
exit
fi
if [ -f Makefile ]; then
make clean
fi
mkdir -p ${DEST_DIR}/device &> /dev/null
./configure \
CFLAGS="-O -isysroot ${SYSROOT} ${DEV_ARCHS}" \
CXXFLAGS="-O -isysroot ${SYSROOT} ${DEV_ARCHS}" \
--disable-dependency-tracking \
--host=arm-apple-darwin10 \
--target=arm-apple-darwin10 \
--disable-shared \
--enable-utf8 \
--prefix=${DEST_DIR}/device
make -j2 install
else
echo
echo "${DEST_DIR}/device already exists - not rebuilding."
echo
fi
#
# Simulator
#
if [ ! -d ${DEST_DIR}/simulator ]; then
TOOLCHAIN_ROOT="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer"
SYSROOT="${TOOLCHAIN_ROOT}/SDKs/iPhoneSimulator${SDK_VER}.sdk"
if [ $(bc <<< "$SDK_VER >= 7.0") -eq 1 ]; then
# export CXX=${TOOLCHAIN_ROOT}/usr/bin/g++
# export CC=${TOOLCHAIN_ROOT}/usr/bin/gcc
# export AR=/Applications/Xcode.app/Contents/Developer/usr/bin/ar
# export AS=/Applications/Xcode.app/Contents/Developer/usr/bin/as
# export LIBTOOL=/Applications/Xcode.app/Contents/Developer/usr/bin/libtool
# export STRIP=/Applications/Xcode.app/Contents/Developer/usr/bin/strip
# export RANLIB=/Applications/Xcode.app/Contents/Developer/usr/bin/ranlib
# export LD=${TOOLCHAIN_ROOT}/usr/bin/ld
export LDFLAGS="-isysroot ${SYSROOT} -arch i386 -lstdc++"
else
export CXX=${TOOLCHAIN_ROOT}/usr/bin/llvm-g++
export CC=${TOOLCHAIN_ROOT}/usr/bin/llvm-gcc
export AR=${TOOLCHAIN_ROOT}/usr/bin/ar
export AS=${TOOLCHAIN_ROOT}/usr/bin/as
export LIBTOOL=${TOOLCHAIN_ROOT}/usr/bin/libtool
export LDFLAGS="-isysroot ${SYSROOT} -arch i386"
export STRIP=${TOOLCHAIN_ROOT}/usr/bin/strip
export RANLIB=${TOOLCHAIN_ROOT}/usr/bin/ranlib
export LD=${TOOLCHAIN_ROOT}/usr/bin/ld\ -r
fi
if [ ! -d ${SYSROOT} ]; then
echo
echo "Cannot find iOS developer tools at ${SYSROOT}."
echo
exit
fi
if [ -f Makefile ]; then
make clean
fi
mkdir -p ${DEST_DIR}/simulator &> /dev/null
./configure \
CFLAGS="-O -isysroot ${SYSROOT} -arch i386" \
CXXFLAGS="-O -isysroot ${SYSROOT} -arch i386" \
--disable-dependency-tracking \
--disable-shared \
--enable-utf8 \
--prefix=${DEST_DIR}/simulator
make -j2 install
else
echo
echo "${DEST_DIR}/device already exists - not rebuilding."
echo
fi
cp ${DEST_DIR}/device/include/* ${DIR}/../prebuilt/ios/include
echo
echo "- Creating universal binaries --------------------------------------"
echo
LIBS=`find ${DIR}/../prebuilt/ios/*pcre-build* -name *.a`
set +e
for LIB in ${LIBS}; do
LIB_BASE=`basename $LIB .a`
ARCHS=`xcrun -sdk iphoneos lipo -info $LIB`
ARCHS=`expr "$ARCHS" : '.*:\(.*\)$'`
for ARCH in ${ARCHS}; do
mkdir -p ${DIR}/../prebuilt/ios/arch/${ARCH} > /dev/null
xcrun -sdk iphoneos lipo -extract $ARCH $LIB -output ${DIR}/../prebuilt/ios/arch/${ARCH}/${LIB_BASE}.a \
|| cp $LIB ${DIR}/../prebuilt/ios/arch/${ARCH}/${LIB_BASE}.a
UNIQUE_LIBS=`ls ${DIR}/../prebuilt/ios/arch/${ARCH}`
done
done
for LIB in ${UNIQUE_LIBS}; do
FILELIST=""
for ARCH in `ls ${DIR}/../prebuilt/ios/arch/`; do
FILELIST="${FILELIST} ${DIR}/../prebuilt/ios/arch/${ARCH}/${LIB}"
done
xcrun -sdk iphoneos lipo -create ${FILELIST} -output ${DIR}/../prebuilt/ios/lib/${LIB}
done
rm -rf ${DIR}/../prebuilt/ios/arch/
|
tklab-tud/umundo
|
contrib/build-scripts/build-pcre-ios.sh
|
Shell
|
bsd-3-clause
| 5,631 |
#!/bin/bash
#set -x
#
# Copyright (c) 2013-2014, Microsoft Mobile
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
source $(dirname $0)/utils.sh
# Minify JS and CSS files using YUI compressor if Java is present.
# Otherwise just link the min versions to full versions.
# 1. parameter is file type css|js
# 2. parameter is directory path
function minify {
if command_exists java; then
echo "Minifying $1 files ..."
regexp=".$1$:.min.$1"
java -jar $(dirname $0)/$YUI --type $1 -o $regexp $2/*.$1
else
echo "No Java found, just copy & rename JS and CSS files..."
for f in $2/*.$1; do
target=${f%.*}.min.${f##*.}
if [ ! -e $target ]; then
cp $f $target
fi
done
fi
}
|
mabrosim/Haggard
|
tools/minify.sh
|
Shell
|
bsd-3-clause
| 2,232 |
#!/bin/bash
usage="\
Usage: $0 <extract|update|compile>
Extract or compile gettext messages."
if [ -z "$1" ]; then
echo "$usage"
elif [ $1 == "extract" ]; then
echo "Extracting messages into the POT template…"
pybabel extract -F configs/babel.cfg -o tzos/translations/messages.pot tzos/ -c l10n -k _l
elif [ $1 == "update" ]; then
echo "Updating languages from the latest template…"
pybabel update -i tzos/translations/messages.pot -d tzos/translations/
elif [ $1 == "compile" ]; then
echo "Compiling translations…"
pybabel compile -d tzos/translations/
else
echo "Unrecognized option."
echo
echo "$usage"
fi
|
julen/tzos
|
gettext.sh
|
Shell
|
bsd-3-clause
| 658 |
#!/bin/rshell
#tests commands with single commands
./bin/rshell < single_command
|
rrios006/rshell
|
tests/single_command.sh
|
Shell
|
bsd-3-clause
| 84 |
#!/bin/bash
source etc/hosts.cfg
for var in $(grep -o "HOSTS_.*=" etc/hosts.cfg|sed 's/=//g'); do
eval qtd="\${#${var}[@]}"
for((i=0;$i<${qtd};i++)); do
eval echo \${${var}[$i]}
done
done
|
lborguetti/scripts.utils
|
GetParameterForVetorList/GetParameterForVetorList.sh
|
Shell
|
bsd-3-clause
| 195 |
#!/bin/bash
gcc -std=c99 -o main main.c
|
sinomiko/project
|
cpp_project/basic-skills/int2char/exe.sh
|
Shell
|
bsd-3-clause
| 41 |
#!/bin/bash -e
PLUGIN=infamousPlugins
GIT_URI="https://github.com/BlokasLabs/${PLUGIN} -b gui"
TMP_DIR=/tmp/${PLUGIN}
CMAKE_TOOLCHAIN=$(mktemp /tmp/cmake.toolchain.XXXXXXX)
cat << EOT > $CMAKE_TOOLCHAIN
SET(CMAKE_SYSTEM_NAME Linux)
SET(CMAKE_SYSTEM_VERSION 1)
SET(CMAKE_C_COMPILER /usr/bin/arm-linux-gnueabihf-gcc)
SET(CMAKE_CXX_COMPILER /usr/bin/arm-linux-gnueabihf-g++)
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
EOT
export CMAKE_TOOLCHAIN
rm -rf ${TMP_DIR}
git clone --depth 1 ${GIT_URI} ${TMP_DIR}
pushd ${TMP_DIR}
mkdir build
cd build
cmake .. -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN} -DCMAKE_INSTALL_PREFIX=${TMP_DIR}/install
make VERBOSE=1 -j4
make install
PLUGINS_TO_INSTALL="ewham.lv2 powercut.lv2 hip2b.lv2 stuck.lv2"
for p in ${PLUGINS_TO_INSTALL}; do
mv ${TMP_DIR}/install/lib/lv2/${p} ${LV2_DIR}/
done
popd
rm -rf ${TMP_DIR}
|
BlokasLabs/modep
|
stage5/05-infamousPlugins/01-run-chroot.sh
|
Shell
|
bsd-3-clause
| 972 |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
TRAVIS_COMMIT_LOG=$(git log --format=fuller -1)
export TRAVIS_COMMIT_LOG
OUTDIR=$BASE_OUTDIR/$TRAVIS_PULL_REQUEST/$TRAVIS_JOB_NUMBER-$HOST
BITCOIN_CONFIG_ALL="--disable-dependency-tracking --prefix=$TRAVIS_BUILD_DIR/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib"
if [ -z "$NO_DEPENDS" ]; then
DOCKER_EXEC ccache --max-size=$CCACHE_SIZE
fi
BEGIN_FOLD autogen
if [ -n "$CONFIG_SHELL" ]; then
DOCKER_EXEC "$CONFIG_SHELL" -c "./autogen.sh"
else
DOCKER_EXEC ./autogen.sh
fi
END_FOLD
mkdir build
cd build || (echo "could not enter build directory"; exit 1)
BEGIN_FOLD configure
DOCKER_EXEC ../configure --cache-file=config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
END_FOLD
BEGIN_FOLD distdir
DOCKER_EXEC make distdir VERSION=$HOST
END_FOLD
cd "myriadcoin-$HOST" || (echo "could not enter distdir myriadcoin-$HOST"; exit 1)
BEGIN_FOLD configure
DOCKER_EXEC ./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( cat config.log && false)
END_FOLD
set -o errtrace
trap 'DOCKER_EXEC "cat ${TRAVIS_BUILD_DIR}/sanitizer-output/* 2> /dev/null"' ERR
BEGIN_FOLD build
DOCKER_EXEC make $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && DOCKER_EXEC make $GOAL V=1 ; false )
END_FOLD
cd ${TRAVIS_BUILD_DIR} || (echo "could not enter travis build dir $TRAVIS_BUILD_DIR"; exit 1)
|
myriadteam/myriadcoin
|
.travis/test_06_script_a.sh
|
Shell
|
mit
| 1,595 |
java -jar xbox-api-restful-shell-0.0.1-SNAPSHOT.jar
|
elminsterjimmy/XBoxApi
|
RESTfulShell/shell/start.sh
|
Shell
|
mit
| 51 |
#!/bin/bash -x
TEMPLATE_DIR=${CPP_U_TEST}/scripts/templates
LIBRARY=$1
if [ -e ${LIBRARY} ] ; then
echo "The directory ${LIBRARY} already exists"
exit 1;
fi
echo "Copy template project to ${LIBRARY}"
cp -R ${TEMPLATE_DIR}/ProjectTemplate/Project ${LIBRARY}
find ${LIBRARY} -name \.svn | xargs rm -rf
echo "Update to the new LIBRARY name"
substituteProjectName="-e s/Project/${LIBRARY}/g -i .bak"
cd ${LIBRARY}
sed ${substituteProjectName} *.*
sed ${substituteProjectName} Makefile
for name in BuildTime.h BuildTime.cpp BuildTimeTest.cpp ; do
mv Project${name} ${LIBRARY}${name}
done
cd ..
sed -e "s/DIRS = /DIRS = ${LIBRARY} /g" -i .bak Makefile
find ${LIBRARY} -name \*.bak | xargs rm -f
echo "#include \"../${LIBRARY}/AllTests.h\"" >> AllTests/AllTests.cpp
echo "You have to manually add the library reference to the AllTests Makefile"
echo "and maybe change the order of the library builds in the main Makefile"
|
Pindar/common-data-structures-in-c
|
CppUTest/scripts/NewLibrary.sh
|
Shell
|
mit
| 931 |
#!/bin/bash
python ../weaknet.py -r local -s demo -R 127.0.0.1 -w 1 $@
|
vietor/pyweaknet
|
test/start_local.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
go clean
go get && go build
./enforcer -tls -addr=0.0.0.0:443
|
pandrew/enforcer-base
|
hack/maketls.sh
|
Shell
|
mit
| 75 |
@@ -0,0 1,85 @@
#!/bin/bash
#
# This script signs already built AOSP Android jars, and installs them in your local
# Maven repository. See: http://source.android.com/source/building.html for
# more information on building AOSP.
#
# Usage:
# build-android-prebuilt.sh <jar directory path> <android version> <robolectric version>
#
set -ex
function usage() {
echo "Usage: ${0} <jar dir path> <android-version> <robolectric-sub-version>"
}
if [[ $# -ne 3 ]]; then
usage
exit 1
fi
if [[ -z "${SIGNING_PASSWORD}" ]]; then
echo "Please set the GPG passphrase as SIGNING_PASSWORD"
exit 1
fi
JAR_DIR=$1
ANDROID_VERSION=$2
ROBOLECTRIC_SUB_VERSION=$3
SCRIPT_DIR=$(cd $(dirname "$0"); pwd)
ROBOLECTRIC_VERSION=${ANDROID_VERSION}-robolectric-${ROBOLECTRIC_SUB_VERSION}
# Final artifact names
ANDROID_ALL=android-all-${ROBOLECTRIC_VERSION}.jar
ANDROID_ALL_POM=android-all-${ROBOLECTRIC_VERSION}.pom
ANDROID_ALL_SRC=android-all-${ROBOLECTRIC_VERSION}-sources.jar
ANDROID_ALL_DOC=android-all-${ROBOLECTRIC_VERSION}-javadoc.jar
ANDROID_BUNDLE=android-all-${ROBOLECTRIC_VERSION}-bundle.jar
generate_empty_src_javadoc() {
TMP=`mktemp --directory`
cd ${TMP}
jar cf ${JAR_DIR}/${ANDROID_ALL_DOC} .
jar cf ${JAR_DIR}/${ANDROID_ALL_SRC} .
cd ${JAR_DIR}; rm -rf ${TMP}
}
build_signed_packages() {
echo "Robolectric: Building android-all.pom..."
sed s/VERSION/${ROBOLECTRIC_VERSION}/ ${SCRIPT_DIR}/pom_template.xml | sed s/ARTIFACT_ID/android-all/ > ${JAR_DIR}/${ANDROID_ALL_POM}
echo "Robolectric: Signing files with gpg..."
for ext in ".jar" "-javadoc.jar" "-sources.jar" ".pom"; do
( cd ${JAR_DIR} && gpg -ab --passphrase ${SIGNING_PASSWORD} android-all-${ROBOLECTRIC_VERSION}$ext )
done
echo "Robolectric: Creating bundle for Sonatype upload..."
cd ${JAR_DIR}; jar cf ${ANDROID_BUNDLE} *.jar *.pom *.asc
}
mavenize() {
local FILE_NAME_BASE=android-all-${ROBOLECTRIC_VERSION}
mvn install:install-file \
-Dfile=${JAR_DIR}/${FILE_NAME_BASE}.jar \
-DgroupId=org.robolectric \
-DartifactId=android-all \
-Dversion=${ROBOLECTRIC_VERSION} \
-Dpackaging=jar
mvn install:install-file \
-Dfile=${JAR_DIR}/${FILE_NAME_BASE}-sources.jar \
-DgroupId=org.robolectric \
-DartifactId=android-all \
-Dversion=${ROBOLECTRIC_VERSION} \
-Dpackaging=jar \
-Dclassifier=sources
mvn install:install-file \
-Dfile=${JAR_DIR}/${FILE_NAME_BASE}-javadoc.jar \
-DgroupId=org.robolectric \
-DartifactId=android-all \
-Dversion=${ROBOLECTRIC_VERSION} \
-Dpackaging=jar \
-Dclassifier=javadoc
}
generate_empty_src_javadoc
build_signed_packages
mavenize
echo "DONE!!"
|
spotify/robolectric
|
scripts/install-android-prebuilt.sh
|
Shell
|
mit
| 2,728 |
#!/usr/bin/bash
OUTPUT=$( mktemp ) # no-reboot
find . -type f | grep -v -e runtest.sh -e check-tempfiles.sh -e '.git' -e '\.swp' -e 'src/test' -e '\.pyc' -e 'Build/' | \
xargs grep -e mktemp -e mkstemp -e '/tmp/' | \
grep -v -e "# no-reboot" -e "__INTERNAL_PERSISTENT_TMP" &> $OUTPUT
RC=$?
if [ $RC -eq 0 ]
then
echo "Several non-annotated temporary file usages found:"
echo "=================================================="
cat $OUTPUT
echo "=================================================="
echo "Please annotate intentional /tmp directory usage with # no-reboot"
echo "comment, or change the directory to \$__INTERNAL_PERSISTENT_TMP"
rm -f $OUTPUT
exit 1
fi
rm -f $OUTPUT
|
petr-muller/beakerlib
|
check-tempfiles.sh
|
Shell
|
gpl-2.0
| 706 |
#!/usr/bin/env bash
#
# audio_menu.sh - Ripping/encode script.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# $Id: audio_menu.sh 28 2007-01-06 17:05:14Z k0k $
# Read in language file
source ${BBROOTDIR}/lang/${BBLANG}/audio_menu.lang
# Read in common functions
source ${BBROOTDIR}/misc/commonfunctions.sh
# Function: This function lets you swap cds if you only have one device. {{{1
#-----------------------------------------------------------------------------
#(CDwriter and CDreader is same device.)
function insert_new_CD()
{
while true; do
echo $bb_am_enter_2
read temp
if [[ "$temp" = "" ]]; then
break
else
continue
fi
done
}
# Function: Looks for mp3. {{{1
#-----------------------------------------------------------------------------
function check_for_mp3s()
{
cd ${BBBURNDIR}
while read MPTHREE; do
existing="yes"
done < <(find ${BBBURNDIR} -iname "*.[Mm][Pp]3" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_nomp3s${BBBURNDIR}"
else
${BBROOTDIR}/convert/convert_mp3s.sh
fi
}
# Function: Checks for ogg files. {{{1
#-----------------------------------------------------------------------------
function check_for_oggs()
{
cd ${BBBURNDIR}
while read OGGS; do
existing="yup"
done < <(find ${BBBURNDIR} -iname "*.[Oo][Gg][Gg]" | sort)
if [[ "$existing" != "yup" ]]; then
StatusBar "$bb_am_nooggs${BBBURNDIR}"
else
${BBROOTDIR}/convert/convert_oggs.sh
fi
}
# Function: Checks for flac files. {{{1
#-----------------------------------------------------------------------------
function check_for_flacs()
{
cd ${BBBURNDIR}
while read FLACS; do
existing="aight"
done < <(find ${BBBURNDIR} -iname "*.[Ff][Ll][Aa][Cc]" | sort)
if [[ "$existing" != "aight" ]]; then
StatusBar "$bb_am_noflacs${BBBURNDIR}"
else
${BBROOTDIR}/convert/convert_flacs.sh
fi
}
# Function: Adjust the volume of wav audio files to a standard volume level. {{{1
#-----------------------------------------------------------------------------
function normalization()
{
if [[ "$BBNORMALIZE" = "yes" ]]; then
cd ${BBBURNDIR}
for i in *.wav; do
echo;echo -e "${BBTABLECOLOR}|>${BBSUBCOLOR}$bb_am_norm_1$i...${BBCOLOROFF}";
${BB_NORMCMD} -v -m $i;
done
fi
}
# Function: Valide the input of "y" or "n". {{{1
#-----------------------------------------------------------------------------
function conf_yes_no()
{
unset ANSWER
while [ "${ANSWER}" != 'y' ] && [ "${ANSWER}" != 'n' ]
do
echo -n $bb_am_conf_2
read ANSWER
done
}
# Function: Control errors. {{{1
#-----------------------------------------------------------------------------
function conf_error()
{
STDERROR=$?
# If there is any error return to main menu.
if [[ ${STDERROR} -ne 0 ]]; then
echo -e "${BBTABLECOLOR}$bb_am_err_1${BBCOLOROFF}"
sleep 3
exit
fi
}
# Function: Valide confirmation of song names. {{{1
#-----------------------------------------------------------------------------
function confirmation()
{
echo
if [[ ! -f "${BBBURNDIR}/song_name.txt" ]]; then
exit
else
echo -e "${BBTABLECOLOR}|>${BBMAINCOLOR}$bb_am_conf_1${BBCOLOROFF}"
cat -n ${BBBURNDIR}/song_name.txt
echo -e "${BBSUBCOLOR}"
conf_yes_no # Valid input.
echo -e "${BBCOLOROFF}"
if [[ ${ANSWER} = 'n' ]];
then
rm -f ${BBBURNDIR}/song_name.txt
rm -f ${BBBURNDIR}/tracks.txt
exit
fi
fi
}
# Function: Interactive naming of files. {{{1
#-----------------------------------------------------------------------------
function named()
{
# Delete old lists of songs rip.
rm -f ${BBBURNDIR}/*.txt
# cdda2wav show information of tracks to rip.
# it's great for see which track would to be
# rip and the time of duration of the tracks.
echo -e "${BBSUBCOLOR}$bb_am_named_1${BBCOLOROFF}"
sleep 1s
${BB_CDAUDIORIP} -D ${BBCDROM} -H -J -v toc
# If there is any error return to main menu.
conf_error
TRACK=0
while [ "${TRACK}" != "" ]; do
echo;echo -en "${BBMAINCOLOR}$bb_am_named_2"
echo;echo -en "${BBMAINCOLOR}$bb_am_named_3${BBTABLECOLOR}|>${BBCOLOROFF} "
read TRACK
if [ "${TRACK}" != "" ];
then
# Only permit integer numbers standing the format in the numbers of back prompt.
number_track=`printf '%02d' ${TRACK}`
# This line puts track numbers of the input standard into tracks.txt.
echo "${number_track}" >> ${BBBURNDIR}/tracks.txt
else
# If nothing is entered at the prompt then exit loop.
continue
fi
echo
echo -e "${BBMAINCOLOR}$bb_am_named_4"
echo -e "${BBMAINCOLOR}$bb_am_named_5"
echo -en "${BBMAINCOLOR}$bb_am_named_6${number_track} ${BBTABLECOLOR}|>${BBCOLOROFF} "
read song_name
# If the song_name variable = space blank then, change
# fill that with the number of the track to ripped.
if [[ "${song_name}" = "" ]]; then
song_name=`echo "${number_track}.-Track"`
else
# If the song_name variable contained some signs and caracters specials,
# that difficulty the naming in bash shell, to be equal to nothing.
# Read sed man page to see how it work.
song_name=`echo "$song_name" | sed -e 's/(//g' \
-e 's/)//g' -e 's/*//g' \
-e 's/?//g' -e 's/¿//g' \
-e 's/\///g' -e 's/&//g'`
fi
# This line puts song name of the input standard into song_name.txt.
echo ${song_name} >> ${BBBURNDIR}/song_name.txt
done
}
# Function: Rip the tracks or songs selects. {{{1
#-----------------------------------------------------------------------------
function rip()
{
confirmation
cd ${BBBURNDIR}
track=0
while [ "${track}" != "" ]; do
# Read the track to rip of the files in temp directory.
track=`sed -ne '1p' ${BBBURNDIR}/tracks.txt`
if [[ "${track}" = "" ]]; then
continue
else
echo -e "${BBTABLECOLOR}|>${BBSUBCOLOR}$bb_am_rip_1${track}...${BBCOLOROFF}"
# Begin Rip.
${BB_CDAUDIORIP} -D ${BBCDROM} -x -t ${track} -O wav ${track} #Changed from cdparanoia to cdda2wav
sleep 2s
# This two lines add '.wav' to finished of the tracks/song_name variable for rename.
track=`sed -ne '1p' ${BBBURNDIR}/tracks.txt | sed -e 's/$/.wav/g'`
song_name=`sed -ne '1p' ${BBBURNDIR}/song_name.txt | sed -e 's/$/.wav/g'`
# Rename the tracks that has been ripped, by the name
# get back by users in prompt.
mv "${track}" "${song_name}"
# Remove the song that has been ripped.
sed -e '1d' ${BBBURNDIR}/song_name.txt >> ${BBBURNDIR}/temp_song.txt
mv ${BBBURNDIR}/temp_song.txt ${BBBURNDIR}/song_name.txt
sed -e '1d' ${BBBURNDIR}/tracks.txt >> ${BBBURNDIR}/temp_tracks.txt
mv ${BBBURNDIR}/temp_tracks.txt ${BBBURNDIR}/tracks.txt
fi
done
# Remove temp files.
rm -f ${BBBURNDIR}/tracks.txt
rm -f ${BBBURNDIR}/song_name.txt
rm -f ${BBBURNDIR}/*.inf
eject ${BBCDROM}
echo -e "${BBSUBCOLOR}$bb_am_rip_2${BBCOLOROFF}"
sleep 2s
}
# Function: Encode Filter Command. {{{1
#-----------------------------------------------------------------------------
function encode_filter()
{
if [[ "$ENCODEFILTER" != "" ]]; then
echo -e "${BBTABLECOLOR}|>${BBSUBCOLOR}$bb_am_encfilt(${ENCODEFILTER})${BBCOLOROFF}"
`${ENCODEFILTER} ${BBBURNDIR}/*.${format}`
fi
}
# Function: Copy an audio cd. {{{1
#-----------------------------------------------------------------------------
function copy_audio_cd()
{
cd ${BBBURNDIR}
if ${BB_CDAUDIORIP} -D ${BBCDROM} -v all -B -Owav; then #Changed from cdparanoia to cdda2wav
eject ${BBCDROM}
StatusBar "$bb_am_rip_2"
# Normalize WAV's.
normalization
if [[ ${BBNUMDEV} == 1 ]]; then #Check number of devices
insert_new_CD
fi
if eval "${BB_CDBURNCMD} -v dev=${BBCDWRITER} speed=${BBSPEED} \
${BBOPT_ONE:+\"driveropts=$BBOPT_ONE\"} ${BBDTAO} \
-useinfo ${BBBURNDIR}/*.[Ww][Aa][Vv]"; then
StatusBar "$bb_am_ch3_1"
ShowWarn && wait_for_enter
else
StatusBar "$bb_am_ch3_2"
ShowWarn && wait_for_enter
fi
else
StatusBar "$bb_am_ch3_3${BBCDROM}"
ShowWarn && wait_for_enter
fi
}
# Function: Copy an audio to HD. {{{1
#-----------------------------------------------------------------------------
function copy_cd_to_hd()
{
MakeTempFile
cd ${BBBURNDIR}
${BB_CDAUDIORIP} -D ${BBCDROM} -v all -B -Owav > ${TMPFILE} 2>&1 &
$DIALOG --backtitle " ${BACKTITLE} " --title " INFORMATION " \
--tailbox ${TMPFILE} 24 70
StatusBar "Eject ${BBCDROM}" 1.5
eject ${BBCDROM}
# Normalize WAV's.
normalization
$DIALOG --backtitle " ${BACKTITLE} " --title " INFORMATION " \
--msgbox "$bb_am_ch4_1${BBBURNDIR}.$bb_am_ch4_2 $bb_am_ch4_3" 0 0
}
# Function: Create Mp3s from Wavs in BURNDIR. {{{1
#-----------------------------------------------------------------------------
function create_mp3s_from_wavs()
{
cd ${BBBURNDIR}
while read WAV; do
if ${BB_MP3ENC} --preset cd "${WAV}" "${WAV%%.wav}.mp3"; then
StatusBar "${WAV%%.wav}.mp3$bb_am_ch6_1"
else
StatusBar "${WAV}:$bb_am_ch6_2"
fi
existing="yes"
done < <(find "${BBBURNDIR}" -iname "*.[Ww][Aa][Vv]" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}"
else
# Encode Filter Command.
format=mp3
encode_filter
fi
sleep 2s
continue
}
# Function: Create Oggs from Wavs in BURNDIR. {{{1
#-----------------------------------------------------------------------------
function create_oggs_from_wavs()
{
cd ${BBBURNDIR}
while read WAV; do
echo
if ${BB_OGGENC} -b ${BBBITRATE} "${WAV}"; then
StatusBar "$bb_am_ch7_1"
else
StatusBar "${WAV}:$bb_am_ch6_2"
fi
echo
existing="yes"
done < <(find "${BBBURNDIR}" -iname "*.[Ww][Aa][Vv]" | sort)
if [ "$existing" != "yes" ]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}"
else
# Encode Filter Command.
format=ogg
encode_filter
fi
sleep 2s
continue
}
# Function: Create flacs from Wavs in BURNDIR {{{1
#-----------------------------------------------------------------------------
function create_flacs_from_wavs()
{
cd ${BBBURNDIR}
while read WAV; do
echo
if ${BB_FLACCMD} "${WAV}"; then
echo $bb_am_ch7_1
else
echo "${WAV}:$bb_am_ch6_2"
fi
echo
existing="yes"
done < <(find "${BBBURNDIR}" -iname "*.[Ww][Aa][Vv]" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}"
else
#Encode Filter command
format=flac
encode_filter
fi
sleep 2s
continue
}
# Function: Create Mp3s from an audio cd. {{{1
#-----------------------------------------------------------------------------
function create_mp3s_from_cd()
{
#First, name and rip the tracks
# Give name to the tracks.
named
# Rip the tracks in wav audio file.
rip
# Normalize WAV's.
normalization
#Now create the Mp3s
while read WAV; do
echo;echo -e "${BBTABLECOLOR}|>${BBSUBCOLOR}$bb_am_ch9_1${BBCOLOROFF}"
if ${BB_MP3ENC} --preset cd ${WAV} ${WAV%%.wav}.mp3; then
StatusBar "${WAV%%.wav}.mp3$bb_am_ch6_1"
else
StatusBar "${WAV}:$bb_am_ch6_2"
fi
existing="yes"
done < <(find "$BURNDIR" -iname "*.[Ww][Aa][Vv]" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}" 2
continue
else
# Encode Filter Command.
format=mp3
encode_filter
fi
StatusBar "$bb_am_ch9_2${BBBURNDIR}"
ShowWarn && rm ${BBBURNDIR}/*.[Ww][Aa][Vv]
wait_for_enter
}
# Function: Create Oggs from an audio cd. {{{1
#-----------------------------------------------------------------------------
function create_oggs_from_cd()
{
#First, name and rip the tracks
# Give name to the tracks.
named
# Rip the tracks in wav audio file.
rip
# Normalize WAV's.
normalization
#Now create the Oggs.
while read WAV; do
echo;echo -e "${BBTABLECOLOR}|>${BBSUBCOLOR}$bb_am_ch10_1${BBCOLOROFF}"
if ${BB_OGGENC} -b ${BBBITRATE} "${WAV}"; then
echo $bb_am_ch7_1
else
echo "${WAV}:$bb_am_ch6_2"
fi
echo
existing="yes"
done < <(find "${BBBURNDIR}" -iname "*.[Ww][Aa][Vv]" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}" 2
continue
else
# Encode Filter Command.
format=ogg
encode_filter
fi
echo "$bb_am_ch10_2${BBBURNDIR}"
rm ${BBBURNDIR}/*.[Ww][Aa][Vv]
wait_for_enter
}
# Function: Create flacs from cd. {{{1
#-----------------------------------------------------------------------------
function create_flacs_from_cd()
{
# Give name to the tracks.
named
# Rip the tracks in wav audio file.
rip
# Normalize WAV's.
normalization
# Now create Flacs
while read WAV; do
echo
if ${BB_FLACCMD} "${WAV}"; then
echo $bb_am_ch7_1
else
echo "${WAV}:$bb_am_ch6_2"
fi
echo
existing="yes"
done < <(find "${BBBURNDIR}" -iname "*.[Ww][Aa][Vv]" | sort)
if [[ "$existing" != "yes" ]]; then
StatusBar "$bb_am_ch6_3${BBBURNDIR}" 2
continue
else
# Function Filter Command.
format=flac
encode_filter
fi
echo "$bb_am_ch11_1${BBBURNDIR}"
rm ${BBBURNDIR}/*.[Ww][Aa][Vv]
wait_for_enter
}
# Run: Main part. {{{1
#-----------------------------------------------------------------------------
####PROGRAM START#####
MakeTempFile
while true; do
# <menu>
$DIALOG $OPTS --help-label "$bb_help_button" \
--backtitle "${BACKTITLE}" --begin 2 2 \
--title " $bb_am_menu_title " \
--cancel-label $bb_return \
--menu "$bb_menu_input" 0 0 0 \
"1)" "$bb_am_menu_1" \
"2)" "$bb_am_menu_2" \
"3)" "$bb_am_menu_3" \
"4)" "$bb_am_menu_4" \
"5)" "$bb_am_menu_5" \
"6)" "$bb_am_menu_6" \
"7)" "$bb_am_menu_7" \
"8)" "$bb_am_menu_8" \
"9)" "$bb_am_menu_9" \
"10)" "$bb_am_menu_10" \
"11)" "$bb_am_menu_11" 2> ${TMPFILE}
STDOUT=$? # Return status
EventButtons
ReadAction
case $action in
1\)) # Burn Audio from Mp3s
check_for_mp3s
check_for_oggs
check_for_flacs
${BBROOTDIR}/burning/burning.sh --audio
;;
2\)) # Burn Audio Directly
${BBROOTDIR}/burning/burning.sh --pipeline
;;
3\))
copy_audio_cd
;;
4\))
copy_cd_to_hd
;;
5\)) # Burn a xmms playlist
if eval ${BBROOTDIR}/misc/xmmsread.sh; then
${BBROOTDIR}/burning/burning.sh --audio
else
echo $bb_am_ch5
wait_for_enter
fi
;;
6\))
create_mp3s_from_wavs
;;
7\))
create_oggs_from_wavs
;;
8\))
create_flacs_from_wavs
;;
9\))
create_mp3s_from_cd
;;
10\))
create_oggs_from_cd
;;
11\))
create_flacs_from_cd
;;
esac
done
# vim: set ft=sh nowrap nu foldmethod=marker:
|
ashumkin/MyBashBurn
|
menus/audio_menu.sh
|
Shell
|
gpl-2.0
| 15,527 |
#!/bin/bash -e
# This script is based in subsurface's packaging/windows/mxe-based-build.sh and
# works in the same fashion. Building needs to be done in a directory out of
# the source tree and, please, refer to said script for instructions on how to
# build.
#
# Subsurface *MUST* have been built before running that script, as the importer
# links against libsubsurface_corelib.a library.
# Although is possible to build the latest git version of the importer against
# whichever other version of subsurface, this should be avoided, and both
# versions, subsurface and smtk-import should be the same.
#
# Flags and options:
# -i (--installer): Packs a windows installer. This should always be used.
# -t (--tag): Defines which git version we want to build. Defaults to
# latest. E.g. -t v4.6.4
# -b (--build): Values: debug or release. Defines the build we want to do.
# -d (--dir): Specify a directory where a copy of the installer will be
# placed. This is a *must* if the script runs in a VM, and
# refers -usually- to a local dir mounted on the VM.
#
# Examples: (provided Subsurface has been previously cross built)
#
# smtk2ssrf-mxe-build.sh -i -t master
# This will build an release installer of smtk2ssrf placed in a directory under
# the win-build directory where it has been launched, named smtk-import. It will
# build git latest master regardless of subsurface's cross built version.
#
# smtk2ssrf-mxe-build.sh -b debug
# This will build *just* a windows binary (no packing) of the latest master.
#
# smtk2ssrf-mxe-build.sh -i -t v4.6.4 -b relase -d /mnt/data
# As I'm building in a fedora-25 docker VM, this should bring up a release
# installer of the v4.6.4 tag, and put a copy in my local mounted dir. In
# fact this *should* fail to build because of portability issues in v4.6.4.
#
exec 1> >(tee ./winbuild_smtk2ssrf.log) 2>&1
# for debugging
# trap "set +x; sleep 1; set -x" DEBUG
# Set some colors for pretty output
#
BLUE="\033[0;34m"
RED="\033[0;31m"
LIGHT_GRAY="\033[0;37m"
DEFAULT="\033[0m"
SSRF_TAG=""
RELEASE="Release"
# this is important, if we are building in a VM or if we want to get a copy
# of the installer elsewhere out of the building tree.
# In my case this is a mount point on the docker VM.
DATADIR=""
# Adjust desired build parallelism
JOBS="-j1"
EXECDIR=$(pwd)
BASEDIR=$(cd "$EXECDIR/.."; pwd)
BUILDDIR=$(cd "$EXECDIR"; pwd)
GITREPO=""
# Display an error message if we need to bail out
#
function aborting() {
echo -e "$RED----> $1. Aborting.$DEFAULT"
exit 1
}
echo -e "$BLUE-> $BUILDDIR$DEFAULT"
if [[ ! -d "$BASEDIR"/mxe ]] ; then
echo -e "$RED--> Please start this from the right directory"
echo -e "usually a winbuild directory parallel to the mxe directory $DEFAULT"
exit 1
fi
echo -e "$BLUE---> Building in$LIGHT_GRAY $BUILDDIR ...$DEFAULT"
# check for arguments and set options
if [ $# -eq 0 ]; then
echo -e "$BLUE---> No arguments given."
echo -e "---> Building actual git commit and Release type without installer $DEFAULT"
else
while [ $# -gt 0 ]; do
case $1 in
-t|--tag) SSRF_TAG="$2"
shift;;
-i|--installer) INSTALLER="installer"
;;
-b|--build) RELEASE="$2"
shift;;
-d|--dir) DATADIR="$2"
shift;;
-r|--repo) GITREPO="$2"
shift;;
esac
shift
done
echo -e "$BLUE---> Subsurface tagged to:$LIGHT_GRAY $SSRF_TAG"
echo -e "$BLUE---> Building type:$LIGHT_GRAY $RELEASE"
echo -e "$BLUE---> Installer set to:$LIGHT_GRAY $INSTALLER $DEFAULT"
fi
case "$RELEASE" in
debug|Debug) RELEASE=Debug
DLL_SUFFIX="d"
[[ -f Release ]] && rm -rf ./*
touch Debug
;;
release|Release) RELEASE=Release
DLL_SUFFIX=""
[[ -f Debug ]] && rm -rf ./*
touch Release
;;
esac
export PATH="$BASEDIR"/mxe/usr/bin:$PATH:"$BASEDIR"/mxe/usr/i686-w64-mingw32.shared/qt5/bin/
export CXXFLAGS=-std=c++11
export PKG_CONFIG_PATH_i686_w64_mingw32_static="$BASEDIR/mxe/usr/i686-w64-mingw32.static/lib/pkgconfig"
export PKG_CONFIG_PATH_i686_w64_mingw32_shared="$BASEDIR/mxe/usr/i686-w64-mingw32.shared/lib/pkgconfig"
export PKG_CONFIG_PATH="$PKG_CONFIG_PATH_i686_w64_mingw32_static":"$PKG_CONFIG_PATH_i686_w64_mingw32_shared"
#
# mdbtools
#
if [ ! -f "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib/libmdb.a ]; then
echo -e "$BLUE---> Building mdbtools ... $DEFAULT "
mkdir -p --verbose "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/include
mkdir -p --verbose "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib
cd "$BUILDDIR"
[[ -d mdbtools ]] && rm -rf mdbtools
mkdir -p mdbtools
cd mdbtools
if [ ! -f "$BASEDIR"/mdbtools/configure ] ; then
( cd "$BASEDIR"/mdbtools
autoreconf -v -f -i )
fi
"$BASEDIR"/mdbtools/configure --host=i686-w64-mingw32.static \
--srcdir="$BASEDIR"/mdbtools \
--prefix="$BASEDIR"/mxe/usr/i686-w64-mingw32.static \
--enable-shared \
--disable-man \
--disable-gmdb2
make $JOBS >/dev/null || aborting "Building mdbtools failed."
make install
else
echo -e "$BLUE---> Prebuilt mxe mdbtools ... $DEFAULT"
fi
# Subsurface
#
cd "$BASEDIR/subsurface"
git reset --hard master && echo -e "$BLUE---> Uncommited changes to Subsurface (if any) dropped$DEFAULT"
if [ ! -z "$GITREPO" ]; then
git pull --rebase "$GITREPO" master || aborting "git pull failed, Subsurface not updated"
else
git pull --rebase || aborting "git pull failed, Subsurface not updated"
fi
echo -e "$BLUE---> Subsurface updated$DEFAULT"
if [ "$SSRF_TAG" != "" ]; then
git checkout "$SSRF_TAG" || aborting "Failed to checkout Subsurface's $SSRF_TAG."
fi
# Every thing is ok. Go on.
cd "$BUILDDIR"
# Blow up smtk-import binary dir and make it again, just to be extra-clean
rm -rf smtk-import && echo -e "$BLUE---> Deleted$LIGHT_GRAY $BUILDDIR/smtk-import folder$DEFAULT"
mkdir -p smtk-import && echo -e "$BLUE---> Created new$LIGHT_GRAY $BUILDDIR/smtk-import folder$DEFAULT"
# first copy the Qt plugins in place
QT_PLUGIN_DIRECTORIES="$BASEDIR/mxe/usr/i686-w64-mingw32.shared/qt5/plugins/iconengines \
$BASEDIR/mxe/usr/i686-w64-mingw32.shared/qt5/plugins/imageformats \
$BASEDIR/mxe/usr/i686-w64-mingw32.shared/qt5/plugins/platforms"
# This comes from subsurface's mxe-based-build.sh. I'm not sure it is necessary
# but, well, it doesn't hurt.
EXTRA_MANUAL_DEPENDENCIES="$BASEDIR/mxe/usr/i686-w64-mingw32.shared/qt5/bin/Qt5Xml$DLL_SUFFIX.dll"
STAGING_DIR=$BUILDDIR/smtk-import/staging
mkdir -p "$STAGING_DIR"/plugins
for d in $QT_PLUGIN_DIRECTORIES
do
cp -a "$d" "$STAGING_DIR"/plugins
done
for f in $EXTRA_MANUAL_DEPENDENCIES
do
cp "$f" "$STAGING_DIR"
done
# this is absolutely hackish, but necessary. Libmdb (built or prebuilt) is linked against
# shared glib-2.0, but once and again we are trying to link against static lib.
mv -vf "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib/libglib-2.0.a "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib/libglib-2.0.a.bak || \
echo -e "$BLUE------> libglib-2.0.a had been moved in a previous run$DEFAULT"
cd "$BUILDDIR"/smtk-import
mkdir -p staging
echo -e "$BLUE---> Building CMakeCache.txt$DEFAULT"
cmake -DCMAKE_TOOLCHAIN_FILE="$BASEDIR"/mxe/usr/i686-w64-mingw32.shared/share/cmake/mxe-conf.cmake \
-DPKG_CONFIG_EXECUTABLE="/usr/bin/pkg-config" \
-DCMAKE_PREFIX_PATH="$BASEDIR"/mxe/usr/i686-w64-mingw32.shared/qt5 \
-DCMAKE_BUILD_TYPE=$RELEASE \
-DMAKENSIS=i686-w64-mingw32.shared-makensis \
-DSSRF_CORELIB="$BUILDDIR"/subsurface/core/libsubsurface_corelib.a \
"$BASEDIR"/subsurface/smtk-import
echo -e "$BLUE---> Building ...$DEFAULT"
if [ ! -z "$INSTALLER" ]; then
make "$JOBS" "$INSTALLER"
else
make "$JOBS"
fi
# Undo previous hackery
echo -e "$BLUE---> Restoring system to initial state$DEFAULT"
mv -vf "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib/libglib-2.0.a.bak "$BASEDIR"/mxe/usr/i686-w64-mingw32.static/lib/libglib-2.0.a
if [ ! -z "$DATADIR" ]; then
echo -e "$BLUE---> Copying Smtk2ssrf installer to data folder$DEFAULT"
cp -vf "$BUILDDIR"/smtk-import/smtk2ssrf-*.exe "$DATADIR"
fi
echo -e "$RED---> Building smtk2ssrf done$DEFAULT"
|
dirkhh/ssrftest
|
packaging/windows/smtk2ssrf-mxe-build.sh
|
Shell
|
gpl-2.0
| 8,023 |
#!/bin/bash
keep_existing=yes
# Htars 1000 brick files to hpss archive
export backup_dir=htar_backups
# Find all completed bricks
if [ "$NERSC_HOST" == "edison" ]; then
outdir=/scratch1/scratchdirs/desiproc/DRs/data-releases/dr4
else
outdir=/global/cscratch1/sd/desiproc/dr4/data_release/dr4
fi
# Use finished brick list from job_accounting
don=dr4_bricks_done.tmp
# Backing up all bricks existing on Today's date
year=`date|awk '{print $NF}'`
today=`date|awk '{print $3}'`
month=`date +"%F"|awk -F "-" '{print $2}'`
if [ "$keep_existing" == "yes" ];then
echo bam
ontape=dr4_bricks_ontape.tmp
cat ${backup_dir}/fortape_*[pm][0-9][0-9][0-9].txt > $ontape
# List bricks NOT on tape
new=dr4_bricks_new.tmp
rm $new
python ../bin/diff_list.py --completed $don --ontape $ontape --outfn $new
export bricklist=$new
else
export bricklist=$don
fi
# Every 1000 bricks to new file
nbricks=`wc -l $bricklist |awk '{print $1}'`
let chunks=$nbricks/1000
echo Every 1000 bricks
junk=fortape.txt
rm $junk
# loop over chunks each of 1000
echo Splitting $nbricks finished bricks into $chunks files of 1000 each
for i in `seq 1 $chunks`;do
let j=$i-1
let en=1000*$j+1000
let st=1000*$j+1
echo rows $st,$en of $bricklist
sed -n ${st},${en}p $bricklist > $junk
# Give a unique name
unique=`head -n 1 $junk`
fortape=fortape_${year}_${month}_${today}_$unique.txt
if [ ! -e "$fortape" ];then
mv $junk $fortape
# Replace whitespaces with newlines (one file per line)
sed -i -e 's/\s\+/\n/g' $fortape
fi
done
# List all files that exists for each completed brick
echo Looping over files having 1000 bricks, populating with all files to backup
for fn in `ls fortape_${year}_${month}_${today}_*[mp][0-9][0-9][0-9].txt`;do
backup=`echo $fn|sed s/.txt/_allfiles.txt/g`
echo Writing $backup
if [ ! -e "$backup" ]; then
for brick in `cat $fn`;do
bri="$(echo $brick | head -c 3)"
echo $outdir/checkpoints/$bri/$brick.pickle >> $backup
echo $outdir/coadd/$bri/$brick >> $backup
echo $outdir/logs/$bri/$brick >> $backup
echo $outdir/metrics/$bri/*${brick}* >> $backup
echo $outdir/tractor/$bri/*${brick}* >> $backup
echo $outdir/tractor-i/$bri/*${brick}* >> $backup
done
# Replace whitespaces with newlines (one file per line)
sed -i -e 's/\s\+/\n/g' $backup
fi
done
# Write htar commands to file
# need to run them from command line NOT script
cmds=htar_cmds_${year}_${month}_${today}.txt
rm $cmds
# e.g. for cmd in `cat $cmds`;do $cmd;done
echo Htar-ing everything listed in fortape_${year}_${month}_${today}...txt
for fn in `ls fortape_${year}_${month}_${today}_*allfiles.txt`;do
nam=`echo $fn | sed s/.txt/.tar/g`
sout=`echo $fn | sed s/.txt/.out/g`
# If sout exists but htar not successful, rm sout and re-htar
if [ -e "$sout" ];then
good=`tail ${sout}|grep "HTAR SUCCESSFUL"|wc -c`
if [ "$good" -eq 0 ]; then
# Htar didn't work, rm sout and run htar again
rm $sout
echo Rerunning htar for $sout
fi
fi
# Just write core htar command to file, anything else confuses unix!
if [ ! -e "$sout" ];then
echo "${nam} -L ${fn} > ${sout}" >> $cmds
fi
done
echo Htar commands written to: $cmds
echo htar can interact with login nodes at most 8 times simultaneously
echo Submit 7 htars at one time, leaving extra to use hsi
echo when those are successful submit next 7, etc
echo See dr4-qdo-htar.sh for running htar
# sed -n 1,7p ..., sed -n 8,14p..., sed -n 15,21p
#sed -n 1,7p htar_cmds_2017_01_30.txt| while read line;do a=`echo $line|awk -F ">" '{print $1}'`;b=`echo $line|awk -F ">" '{print $2}'`;echo $b;nohup htar -Hcrc -cf $a > $b & done
#echo Execute them from command line with:
#echo echo "for cmd in `cat $cmds`;do $cmd;done"
# HTAR, store checksums
# Confirm HTAR SUCESSFUL then:
#for fn in `find fortape_*_allfiles.txt`;do sout=`echo $fn|sed s/.txt/.out/g`;blist=`echo $fn|sed s/_allfiles//g`;mv $fn $sout $blist htar_backups/;done
|
legacysurvey/pipeline
|
bin/dr4/dr4-htar.sh
|
Shell
|
gpl-2.0
| 4,154 |
#!/usr/bin/env bash
#
#
source `dirname $0`/common.sh
TEST_CASE="ATOS deps target"
cat > build.sh <<EOF
gcc -c $SRCDIR/examples/sha1-c/sha.c -o sha.o
gcc -c $SRCDIR/examples/sha1-c/sha1.c -o sha1.o
gcc -o sha1++-c sha.o sha1.o
EOF
$ROOT/bin/atos-init -r "echo user 1" -b "sh ./build.sh"
|
atos-tools/atos-utils
|
tests/test078.sh
|
Shell
|
gpl-2.0
| 291 |
#!/bin/bash
#PBS -l walltime=4:00:00
#PBS -l nodes=1:ppn=2
#PBS -l vmem=32G
#PBS -N Baltic_2_2_4_500_100_0_0_no_no
cd /zhome/fc/e/102910/maritime-vrp/build
LD_LIBRARY_PATH=/zhome/fc/e/102910/gcc/lib64 ./maritime_vrp ../data/old_thesis_data/program_params.json ../data/new/Baltic_2_2_4_500_100_0_0_no_no.json
|
OR-Bologna/maritime-vrp
|
opt/launchers/Baltic_2_2_4_500_100_0_0_no_no.sh
|
Shell
|
gpl-3.0
| 308 |
#!/bin/bash
# Torc - Copyright 2011 University of Southern California. All Rights Reserved.
# $HeadURL$
# $Id$
# This program is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
# Imports Torc projects into Eclipse from the command line.
#
# Inspired by http://lugendal.wordpress.com/2009/07/22/eclipse-ctd-new-project-fast/ and
# http://stackoverflow.com/questions/1087573/open-a-specific-eclipse-project-from-command-line/6777801#6777801
ECLIPSE_EXECUTABLE=eclipse
WORKSPACE_PATH=`pwd`
PROJECTS_PATH=`pwd`/..
# make sure we know where to find eclipse
if ! command -v ${ECLIPSE_EXECUTABLE} &> /dev/null; then
echo "Eclipse is not present in PATH."
exit 1
fi
# this script will not work if Eclipse is already running
if ps ax | grep eclipse | grep -v grep | grep -v $0 > /dev/null; then
echo "Please quit Eclipse before invoking this script."
exit 1
fi
# import the projects in the parent path
if ! ${ECLIPSE_EXECUTABLE} \
-data ${WORKSPACE_PATH} \
-application org.eclipse.cdt.managedbuilder.core.headlessbuild \
-importAll ${PROJECTS_PATH}; then
echo "Failed to import projects. Have you installed http://www.eclipse.org/cdt?"
exit 1
fi
# open Eclipse in this workspace
${ECLIPSE_EXECUTABLE} \
-data ${WORKSPACE_PATH} &
|
torc-isi/torc
|
eclipse/import-projects.sh
|
Shell
|
gpl-3.0
| 1,813 |
#!/bin/sh
# Just like p-1, but with an absolute path.
# Copyright (C) 1997-2012 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ mkdir
mkdir --parents "$(pwd)/t/u" || fail=1
test -d t/u || fail=1
Exit $fail
|
homer6/gnu_coreutils
|
tests/mkdir/p-2.sh
|
Shell
|
gpl-3.0
| 888 |
#!/usr/bin/env bash
# pipe_trim_adaptors.sh
#
#
# Created by Bruno Costa on 18/11/2016
# Copyright 2016 ITQB / UNL. All rights reserved.
#
# Call: pipe_trim_adaptors.sh [LIB_FIRST] [LIB_LAST]
#Name inputs
set -e
LIB_FIRST=$1
LIB_LAST=$2
#Gets the script directory
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
#Get config settings
. $DIR/"config/workdirs.cfg"
#Setting up log dir
mkdir -p $workdir"log/"
log_file=$workdir"log/"$(date +"%y%m%d:%H%M%S")":PPID$PPID:pipe_trim_adaptors:$1-$2.log"
echo $(date +"%y/%m/%d-%H:%M:%S")" - "$(basename ${log_file})
exec 2>&1 > ${log_file}
SCRIPT_DIR=$DIR"/scripts/"
#Chooses run mode based on input arguments
printf $(date +"%y/%m/%d-%H:%M:%S")" - Starting adaptor trimming for adaptor: ${ADAPTOR}\n\n"
NPROC=0
cycle=$(eval echo {${LIB_FIRST}..${LIB_LAST}})
for i in $cycle
do
#Paralell threading trim-lcschience.sh
NPROC=$(($NPROC+1))
LIB_NOW=$i
printf $(date +"%y/%m/%d-%H:%M:%S")" - Trimming adaptors from Lib${LIB_NOW} fasta\n"
${SCRIPT_DIR}trim-adaptors.sh ${DIR} ${LIB_NOW} &
if [ "$NPROC" -ge "$THREADS" ]; then
wait
NPROC=0
fi
done
wait
printf $(date +"%y/%m/%d-%H:%M:%S")" - Trimmed all libraries.\n"
duration=$(date -u -d @${SECONDS} +"%T")
printf "\n-----------END--------------\nThis script ran in ${duration}\n${SECONDS}sec.\n"
printf $(date +"%y/%m/%d-%H:%M:%S")" - Trimming finished in "${SECONDS}" secs.\n" #Redundant can be killed
ok_log=${log_file/.log/:OK.log}
echo $ok_log
mv $log_file $ok_log
exit 0
|
forestbiotech-lab/miRPursuit
|
pipe_trim_adaptors.sh
|
Shell
|
gpl-3.0
| 1,510 |
#!/bin/sh
#
# Build all files under directory "build".
#
# This should be invoked at "..", top of the source archive.
#
# argument: any configure options except "--enable-setup=..." is allowed.
#
JULIUS_VERSION=4.3.1
######################################################################
mkdir build
dir=`pwd`
defconf="--without-sndfile"
# make julius and other tools with default setting
./configure --prefix=${dir}/build ${defconf} $*
make
make install
# make julius with another setting
rm ${dir}/build/bin/julius
cd julius
make install.bin INSTALLTARGET=julius-${JULIUS_VERSION}
# standard
cd ../libjulius
make distclean
./configure --prefix=${dir}/build ${defconf} --enable-setup=standard $*
make
cd ../julius
make clean
make install.bin INSTALLTARGET=julius-${JULIUS_VERSION}-std
# GMM-VAD
cd ../libjulius
make distclean
./configure --prefix=${dir}/build ${defconf} --enable-gmm-vad $*
make
cd ../julius
make clean
make install.bin INSTALLTARGET=julius-${JULIUS_VERSION}-gmm-vad
# Decoder-VAD
cd ../libjulius
make distclean
./configure --prefix=${dir}/build ${defconf} --enable-decoder-vad --enable-power-reject $*
make
cd ../julius
make clean
make install.bin INSTALLTARGET=julius-${JULIUS_VERSION}-decoder-vad
# finished
cd ..
make distclean
strip build/bin/*
echo '###### FINISHED ######'
|
ytyeung/Invictus
|
julius_pi/julius4/support/build-all.sh
|
Shell
|
gpl-3.0
| 1,307 |
#!/bin/bash
#
# Transforms Dutch local RWS NWB to INSPIRE TN
#
# Author: Just van den Broecke
#
cd ..
# ./local-to-inspire-tn-ro-sd.sh test/hectopunten.gml test/tn-roadnetwork-points.gml
./local-to-inspire-tn-ro-sd.sh test/wegvakken.gml test/tn-roadnetwork.gml
|
justb4/inspire-foss
|
etl/NL.RWS/TransportNetworks/test/transform.sh
|
Shell
|
gpl-3.0
| 269 |
#!/bin/bash
#
# Copyright (c) 2015-2016 Matthew Carter, Ralph M Goodberlet.
#
# This file is part of Zettaknight.
#
# Zettaknight is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Zettaknight is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zettaknight. If not, see <http://www.gnu.org/licenses/>.
#
#needs bc
running_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
setopts="${running_dir}/setopts.sh"
source $setopts || { echo "failed to source $setopts"; exit 1; }
pwd="$(pwd)"
date_time=$(date +'%Y%m%d')
test_file="${date_time}_test_file"
scp="/usr/bin/scp"
function check_previous () {
if [ $? -ne 0 ]; then
echo -e "\n$?: $@\n" | tee -a "$logfile"
exit 1
fi
}
setopts var "-f|--file_size" "file_size" "test file size: eg. 10MB (Mega) or 10M (Mibbi)"
setopts var "-r|--remote_ssh" "remote_ssh" "credentials for remote server i.e <user>@<hostname>"
setopts var "-b|--block_size" "block_size" "block size"
setopts var "-n|--port_num" "port_num" "port number for ssh"
setopts var "-c|--cipher" "cipher" "cipher to use for ssh"
setopts var "-s|--scp" "scp" "alternate path for scp [default: $scp], enter it here ex. [/usr/local/bin/scp]"
setopts var "-d|--remote_dir" "remote_dir" "remote location to write file"
if [[ -z "$file_size" ]] || [[ -z "$block_size" ]] || [[ -z "$scp" ]]; then
show_help
exit 1
fi
if [[ -z "$port_num" ]]; then
port_num=22
fi
if [[ ! -z "$remote_ssh" ]]; then
if [[ -z "$remote_dir" ]]; then
echo "remote_dir is necessary when remote_ssh is specified"
show_help
exit 1
fi
fi
if [[ ! -x "$scp" ]]; then
echo -e "\ncan't use $scp, cannot execute\n"
show_help
exit 1
fi
#calculate block size and count to match file size
file_size_int=$( echo "$file_size" | tr -d "[A-Z][a-z]" ) #remove any non-interger
file_size_suffix=$( echo "$file_size" | tr -d "[0-9]" ) #MB GB KB or TB
if [ "$file_size_suffix" == "KB" ] || [ "$file_size_suffix" == "k" ] || [ "$file_size_suffix" == "K" ]; then
file_size_int=$( echo "$file_size_int * 1000" | bc )
elif [ "$file_size_suffix" == "MB" ] || [ "$file_size_suffix" == "m" ] || [ "$file_size_suffix" == "M" ]; then
file_size_int=$( echo "$file_size_int * 1000 * 1000" | bc )
elif [ "$file_size_suffix" == "GB" ] || [ "$file_size_suffix" == "g" ] || [ "$file_size_suffix" == "G" ]; then
file_size_int=$( echo "$file_size_int * 1000 * 1000 * 1000" | bc )
elif [ "$file_size_suffix" == "TB" ] || [ "$file_size_suffix" == "t" ] || [ "$file_size_suffix" == "T" ]; then
file_size_int=$( echo "$file_size_int * 1000 * 1000 * 1000 * 1000" | bc)
else
echo "acceptable arguments for file size are KB MB GB or TB, exiting"
show_help
exit 1
fi
#echo "$file_size converted to: $file_size_int bytes"
#determine block size in bytes
block_size_int=$( echo "$block_size" | tr -d "[A-Z][a-z]" )
block_size_suffix=$( echo "$block_size" | tr -d "[0-9]" )
if [ "$block_size_suffix" == "KB" ] || [ "$block_size_suffix" == "k" ] || [ "$block_size_suffix" == "K" ]; then
block_size_int=$( echo "$block_size_int * 1024" | bc )
elif [ "$block_size_suffix" == "MB" ] || [ "$block_size_suffix" == "m" ] || [ "$block_size_suffix" == "M" ]; then
block_size_int=$( echo "$block_size_int * 1024 * 1024" | bc )
else
echo "acceptable arguments for block size are KB and MB, exiting"
show_help
exit 1
fi
#echo "$block_size coverted to: $block_size_int bytes"
dd_count=$( echo "$file_size_int / $block_size_int" | bc ) #file size in bytes divided by block size in bytes to determine count
#test 1
#dd if=/dev/zero of=${test_file} bs=${block_size_int} count=${dd_count}
#check_previous "failed to create test file"
#test 2, sync the entire file dd has written one time before returning
echo -e "\ndd if=/dev/zero of=${test_file} bs=${block_size_int} count=${dd_count} conv=fdatasync"
echo "writing file to ${pwd}/${test_file}"
dd if=/dev/zero of="${pwd}/${test_file}" bs=${block_size_int} count=${dd_count} conv=fdatasync
check_previous "dd if=/dev/zero of="${pwd}/${test_file}" bs=${block_size_int} count=${dd_count} conv=fdatasync"
#test 3 each write is commited to disk before returning, write cache is basically unused
#dd if=/dev/zero of=${test_file} bs=${block_size_int} count=${dd_count} oflag=dsync
#check_previous "failed to create test file"
du -h $test_file
if ! [ -z "$remote_ssh" ]; then
echo -e "\ntesting default ssh tunnel: $remote_ssh"
if [[ -z "$cipher" ]]; then
down_speed=$($scp -v -P $port_num $test_file ${remote_ssh}:${remote_dir}/${test_file} 2>&1 | grep "Bytes per second:" | tr -d [A-Z][a-z]:, | awk '{ print $1 }')
else
echo "cipher: $cipher"
down_speed=$($scp -v -c $cipher -P $port_num $test_file ${remote_ssh}:${remote_dir}/${test_file} 2>&1 | grep "Bytes per second:" | tr -d [A-Z][a-z]:, | awk '{ print $1 }')
fi
if ! which bc &> /dev/null; then
echo "bc is not installed, cannot provide MB/s or MiB/s conversions"
echo "down speed: $down_speed B/s"
echo "pull speed: $pull_speed B/s"
else
down_speed_mb=$(echo "($down_speed / 1000) / 1000" | bc)
#down_speed_mib=$(echo "($down_speed / 1024) / 1024" | bc)
echo "speed: $down_speed_mb MB/s"
#echo "speed: $down_speed_mib MiB/s"
fi
ssh $remote_ssh "rm ${remote_dir}/${test_file}"
check_previous "failed to remove remote ${remote_dir}/${test_file}"
rm ${pwd}/${test_file}
check_previous "failed to remove ${pwd}/${test_file}"
fi
|
M-Carter/zettaknight
|
zettaknight.d/ssh_speed_test.sh
|
Shell
|
gpl-3.0
| 5,999 |
#!/bin/bash
#SBATCH -N 1
#SBATCH -C haswell
#SBATCH -p regular
#SBATCH -J 0200-relax
#SBATCH [email protected]
#SBATCH -t 00:59:00
#SBATCH --mem-per-cpu 128
export OMP_NUM_THREADS=1
export OMP_PLACES=threads
export OMP_PROC_BIND=spread
module load espresso
#for i in 100 200 225 250 273 293; do
for i in 200; do
srun -n 32 -c 2 --cpu_bind=cores pw.x < "0$i.lv.relax.in" >> "0$i.lv.relax.out"
#export OMP_NUM_THREADS=1
cat > "0$i.lv.efg.in" << EOF
&inputgipaw
job='efg'
prefix='scf-0$i'
tmp_dir='./scratch/'
verbosity='high'
! spline_ps=.true.
q_gipaw=0.01
q_efg(2)=-8.165
/
EOF
# "$pw_prefix" gipaw.x < "efg.$i.in" >> "efg.$i.out"
srun -n 32 -c 2 --cpu_bind=cores gipaw.x < "0$i.lv.efg.in" >> "0$i.lv.efg.out"
done
|
Altoidnerd/paradichlorobenzene5
|
17.volume_dependence/relax/regular.bash
|
Shell
|
gpl-3.0
| 807 |
#!/bin/sh
set -e
CONFIG=$(readlink -f $(dirname $0)/..)
PROGRAM=".mutt"
cd ~
rm -rf $PROGRAM .mailcap
ln -s $CONFIG $PROGRAM
ln -s $CONFIG/.mailcap
|
mekanix/dotfiles
|
mutt/bin/init.sh
|
Shell
|
gpl-3.0
| 150 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="lr-mame"
rp_module_desc="MAME emulator - MAME (current) port for libretro"
rp_module_help="ROM Extension: .zip\n\nCopy your MAME roms to either $romdir/mame-libretro or\n$romdir/arcade"
rp_module_section="exp"
function _get_params_lr-mame() {
local params=(OSD=retro RETRO=1 NOWERROR=1 OS=linux TARGETOS=linux CONFIG=libretro NO_USE_MIDI=1 TARGET=mame)
isPlatform "64bit" && params+=(PTR64=1)
echo "${params[@]}"
}
function sources_lr-mame() {
gitPullOrClone "$md_build" https://github.com/libretro/mame.git
}
function build_lr-mame() {
rpSwap on 750
local params=($(_get_params_lr-mame) SUBTARGET=arcade)
make clean
make "${params[@]}"
rpSwap off
md_ret_require="$md_build/mamearcade_libretro.so"
}
function install_lr-mame() {
md_ret_files=(
'mamearcade_libretro.so'
)
}
function configure_lr-mame() {
mkRomDir "arcade"
mkRomDir "mame-libretro"
ensureSystemretroconfig "arcade"
ensureSystemretroconfig "mame-libretro"
addSystem 0 "$md_id" "arcade" "$md_inst/mamearcade_libretro.so"
addSystem 0 "$md_id" "mame-libretro arcade mame" "$md_inst/mamearcade_libretro.so"
}
|
j-r0dd/RetroPie-Setup
|
scriptmodules/libretrocores/lr-mame.sh
|
Shell
|
gpl-3.0
| 1,577 |
#!/bin/bash
echo "---------------------------------------------------------------------------------"
echo
echo " S P E C F E M 3 D _ G L O B E - Tests"
echo
echo "---------------------------------------------------------------------------------"
echo "This script runs a set of compilation and unit tests in directory tests/"
echo "It may take a few minutes to execute."
echo
echo "Please consider adding more test units to this directory here ..."
echo "Contributions can be sent to: $(tput bold)http://github.com/geodynamics/specfem3d_globe$(tput sgr0)"
echo
# directories
dir=`pwd`
# changes to subdirectory tests/ if called in root directory SPECFEM3D_GLOBE/
currentdir=`basename $dir`
echo "current directory: $currentdir"
if [ "$currentdir" == "SPECFEM3D_GLOBE" ]; then
cd tests/
dir=`pwd`
fi
# default sub-directories
tests=( compilations \
auxiliaries \
examples
)
# running tests
echo "main directory : $dir"
echo
date
if [ "$1" != "" ]; then
# specified test directory
echo "test $1 starting"
# runs all bash scripts in specified test-subdirectory
./run_tests.sh $1
# checks exit code
if [[ $? -ne 0 ]]; then
dir=`basename $testdir`
echo "ERROR"
echo "ERROR test failed, please check file results.log in tests/$dir"
echo "ERROR"
exit 1
fi
# all test directories
echo
echo "test completed"
else
echo "all tests starting"
# loops over subdirectories
for testdir in ${tests[@]};
do
testdir=${testdir%*/}
if [[ "$testdir" == *buildbot* ]]; then
# skips this test directory
:
else
# runs all bash scripts in test-subdirectory
./run_tests.sh $testdir
# checks exit code
if [[ $? -ne 0 ]]; then
dir=`basename $testdir`
echo "ERROR"
echo "ERROR test failed, please check file results.log in tests/$dir"
echo "ERROR"
exit 1
fi
cd $dir/
fi
done
echo
echo "all tests completed"
fi
echo
date
echo
|
geodynamics/specfem3d_globe
|
tests/run_all_tests.sh
|
Shell
|
gpl-3.0
| 1,943 |
# Specify the path to the optical flow utility here.
# Also check line 44 and 47 whether the arguments are in the correct order.
# deepflow and deepmatching optical flow binaries
flowCommandLine="bash run-deepflow.sh"
if [ -z "$flowCommandLine" ]; then
echo "Please open make-opt-flow.sh and specify the command line for computing the optical flow."
exit 1
fi
if [ ! -f ./consistencyChecker/consistencyChecker ]; then
if [ ! -f ./consistencyChecker/Makefile ]; then
echo "Consistency checker makefile not found."
exit 1
fi
cd consistencyChecker/
make
cd ..
fi
filePattern=$1
folderName=$2
startFrame=${3:-1}
stepSize=${4:-1}
if [ "$#" -le 1 ]; then
echo "Usage: ./make-opt-flow <filePattern> <outputFolder> [<startNumber> [<stepSize>]]"
echo -e "\tfilePattern:\tFilename pattern of the frames of the videos."
echo -e "\toutputFolder:\tOutput folder."
echo -e "\tstartNumber:\tThe index of the first frame. Default: 1"
echo -e "\tstepSize:\tThe step size to create long-term flow. Default: 1"
exit 1
fi
i=$[$startFrame]
j=$[$startFrame + $stepSize]
mkdir -p "${folderName}"
while true; do
file1=$(printf "$filePattern" "$i")
file2=$(printf "$filePattern" "$j")
if [ -a $file2 ]; then
if [ ! -f ${folderName}/forward_${i}_${j}.flo ]; then
eval $flowCommandLine "$file1" "$file2" "${folderName}/forward_${i}_${j}.flo"
fi
if [ ! -f ${folderName}/backward_${j}_${i}.flo ]; then
eval $flowCommandLine "$file2" "$file1" "${folderName}/backward_${j}_${i}.flo"
fi
./consistencyChecker/consistencyChecker "${folderName}/backward_${j}_${i}.flo" "${folderName}/forward_${i}_${j}.flo" "${folderName}/reliable_${j}_${i}.txt"
./consistencyChecker/consistencyChecker "${folderName}/forward_${i}_${j}.flo" "${folderName}/backward_${j}_${i}.flo" "${folderName}/reliable_${i}_${j}.txt"
else
break
fi
i=$[$i +1]
j=$[$j +1]
done
|
cysmith/neural-style-tf
|
video_input/make-opt-flow.sh
|
Shell
|
gpl-3.0
| 1,910 |
#!/bin/bash
set -o errexit
cd $(dirname $0)/..
DBENVS="test
integration"
# posix compliant escape sequence
esc=$'\033'"["
res="${esc}0m"
function print_heading() {
echo
# newline + bold magenta
echo -e "${esc}0;34;1m${1}${res}"
}
function exit_err() {
if [ ! -z "$1" ]; then
echo $1 > /dev/stderr
fi
exit 1
}
function exit_msg() {
# complain to STDERR and exit with error
echo "${*}" >&2
exit 2
}
function get_migrations() {
local db_schemas_path="${1}"
local migrations=()
for file in "${db_schemas_path}"/*.sql; do
[[ -f "${file}" ]] || continue
migrations+=("${file}")
done
if [[ "${migrations[@]}" ]]; then
echo "${migrations[@]}"
else
exit_msg "There are no migrations at path: "\"${db_schemas_path}\"""
fi
}
function create_empty_db() {
local db="${1}"
local dbconn="${2}"
create_script="drop database if exists \`${db}\`; create database if not exists \`${db}\`;"
mysql ${dbconn} -e "${create_script}" || exit_err "unable to create ${db}"
echo "created empty "$db" database"
}
function apply_migrations() {
local migrations="${1}"
local dbpath="${2}"
local dbenv="${3}"
local db="${4}"
if [[ "${migrations[@]}" ]]
then
echo "applying migrations from ${db_mig_path}"
goose -path="${dbpath}" -env="${dbenv}" up || exit_err "unable to migrate ${db} with ${dbpath}"
else
echo "no migrations at ${dbpath}"
fi
}
# set db connection for if running in a separate container or not
dbconn="-u root"
if [[ $MYSQL_CONTAINER ]]; then
dbconn="-u root -h boulder-mysql --port 3306"
fi
# MariaDB sets the default binlog_format to STATEMENT,
# which causes warnings that fail tests. Instead set it
# to the format we use in production, MIXED.
mysql $dbconn -e "SET GLOBAL binlog_format = 'MIXED';"
# MariaDB sets the default @@max_connections value to 100. The SA alone is
# configured to use up to 100 connections. We increase the max connections here
# to give headroom for other components (ocsp-updater for example).
mysql $dbconn -e "SET GLOBAL max_connections = 500;"
for dbenv in $DBENVS; do
db="boulder_sa_${dbenv}"
print_heading "Checking if ${db} exists"
if mysql ${dbconn} -e 'show databases;' | grep "${db}" > /dev/null; then
echo "${db} already exists - skipping create"
else
echo "${db} doesn't exist - creating"
create_empty_db "${db}" "${dbconn}"
fi
# Determine which $dbpath and $db_mig_path to use.
if [[ "${BOULDER_CONFIG_DIR}" == "test/config-next" ]]
then
dbpath="./sa/_db-next"
else
dbpath="./sa/_db"
fi
db_mig_path="${dbpath}/migrations"
# Populate an array with schema files present at $dbpath.
migrations=($(get_migrations "${db_mig_path}"))
# Goose up, this will work if there are schema files present at
# $dbpath with a newer timestamp than the current goose dbversion.
apply_migrations "${migrations}" "${dbpath}" "${dbenv}" "${db}"
# The (actual) latest migration should always be the last file or
# symlink at $db_mig_path.
latest_mig_path_filename="$(basename -- "${migrations[-1]}")"
# Goose's dbversion is the timestamp (first 14 characters) of the file
# that it last migrated to. We can figure out which goose dbversion we
# should be on by parsing the timestamp of the latest file at
# $db_mig_path.
latest_db_mig_version="${latest_mig_path_filename:0:14}"
# Ask Goose the timestamp (dbversion) our database is currently
# migrated to.
goose_dbversion="$(goose -path=${dbpath} -env=${dbenv} dbversion | sed 's/goose: dbversion //')"
# If the $goose_dbversion does not match the $latest_in_db_mig_path,
# trigger recreate
if [[ "${latest_db_mig_version}" != "${goose_dbversion}" ]]; then
print_heading "Detected latest migration version mismatch"
echo "dropping and recreating from migrations at ${db_mig_path}"
create_empty_db "${db}" "${dbconn}"
apply_migrations "${migrations}" "${dbpath}" "${dbenv}" "${db}"
fi
# With MYSQL_CONTAINER, patch the GRANT statements to
# use 127.0.0.1, not localhost, as MySQL may interpret
# 'username'@'localhost' to mean only users for UNIX
# socket connections. Use '-f' to ignore errors while
# we have migrations that haven't been applied but
# add new tables (TODO(#2931): remove -f).
USERS_SQL=test/sa_db_users.sql
if [[ ${MYSQL_CONTAINER} ]]; then
sed -e "s/'localhost'/'%'/g" < ${USERS_SQL} | \
mysql $dbconn -D $db -f || exit_err "unable to add users to ${db}"
else
sed -e "s/'localhost'/'127.%'/g" < $USERS_SQL | \
mysql $dbconn -D $db -f < $USERS_SQL || exit_err "unable to add users to ${db}"
fi
echo "added users to ${db}"
done
echo
echo "database setup complete"
|
letsencrypt/boulder
|
test/create_db.sh
|
Shell
|
mpl-2.0
| 4,692 |
#!/usr/bin/env bash
cd $(dirname $0)
tx pull -l de,es,ja,pt,ru,zh_CN,zh_TW,vi,th_TH,fa,fr,pt_BR,it,cs,pl_PL
translations="translations/bisq-desktop.displaystringsproperties"
i18n="src/main/resources/i18n"
mv "$translations/de.properties" "$i18n/displayStrings_de.properties"
mv "$translations/es.properties" "$i18n/displayStrings_es.properties"
mv "$translations/ja.properties" "$i18n/displayStrings_ja.properties"
mv "$translations/pt.properties" "$i18n/displayStrings_pt.properties"
mv "$translations/ru.properties" "$i18n/displayStrings_ru.properties"
mv "$translations/zh_CN.properties" "$i18n/displayStrings_zh-hans.properties"
mv "$translations/zh_TW.properties" "$i18n/displayStrings_zh-hant.properties"
mv "$translations/vi.properties" "$i18n/displayStrings_vi.properties"
mv "$translations/th_TH.properties" "$i18n/displayStrings_th.properties"
mv "$translations/fa.properties" "$i18n/displayStrings_fa.properties"
mv "$translations/fr.properties" "$i18n/displayStrings_fr.properties"
mv "$translations/pt_BR.properties" "$i18n/displayStrings_pt-br.properties"
mv "$translations/it.properties" "$i18n/displayStrings_it.properties"
mv "$translations/cs.properties" "$i18n/displayStrings_cs.properties"
mv "$translations/pl_PL.properties" "$i18n/displayStrings_pl.properties"
rm -rf $translations
|
bisq-network/exchange
|
core/update_translations.sh
|
Shell
|
agpl-3.0
| 1,308 |
#!/bin/bash
# (c) Copyright 2002-2016 by authors of the Tiki Wiki CMS Groupware Project
#
# All Rights Reserved. See copyright.txt for details and a complete list of authors.
# Licensed under the GNU LESSER GENERAL PUBLIC LICENSE. See license.txt for details.
# $Id$
# Script to remove _htaccess which can be browsed unless hidden
# these files give an attacker useful information
if [ ! -d 'db' ]; then
echo "You must launch this script from your (multi)tiki root dir."
exit 0
fi
find . -name _htaccess -type f -exec rm -f {} \;
echo "Done."
|
oregional/tiki
|
doc/devtools/removehtaccess.sh
|
Shell
|
lgpl-2.1
| 567 |
#!/bin/sh
#
# Build a single large pcm for the entire basic set of ROOT libraries.
# Script takes as optional argument the source directory path.
#
# Copyright (c) 2013 Rene Brun and Fons Rademakers
# Author: Fons Rademakers, 19/2/2013
srcdir=$1
shift
modules=$1
shift
echo
echo Generating the one large pcm for $modules, patience...
echo
rm -f include/allHeaders.h include/allHeaders.h.pch include/allLinkDef.h all.h cppflags.txt include/allLinkDef.h
while ! [ "x$1" = "x" ]; do
echo '#include "'$1'"' >> all.h
shift
done
for dict in `find $modules -name 'G__*.cxx' 2> /dev/null | grep -v /G__Cling.cxx | grep -v core/metautils/src/G__std_`; do
dirname=`dirname $dict` # to get foo/src
dirname=`echo $dirname | sed 's,/src$,,'` # to get foo
awk 'BEGIN{START=-1} /includePaths\[\] = {/, /^0$/ { if (START==-1) START=NR; else if ($0 != "0") { sub(/",/,"",$0); sub(/^"/,"-I",$0); print $0 } }' $dict >> cppflags.txt
echo "// $dict" >> all.h
# awk 'BEGIN{START=-1} /payloadCode =/, /^;$/ { if (START==-1) START=NR; else if ($1 != ";") { code=substr($0,2); sub(/\\n"/,"",code); print code } }' $dict >> all.h
awk 'BEGIN{START=-1} /headers\[\] = {/, /^0$/ { if (START==-1) START=NR; else if ($0 != "0") { sub(/,/,"",$0); print "#include",$0 } }' $dict >> all.h
if ! test "$dirname" = "`echo $dirname| sed 's,/qt,,'`"; then
# something qt; undef emit afterwards
cat <<EOF >> all.h
#ifdef emit
# undef emit
#endif
EOF
elif ! test "$dirname" = "`echo $dirname| sed 's,net/ldap,,'`"; then
# ldap; undef Debug afterwards
cat <<EOF >> all.h
#ifdef Debug
# undef Debug
#endif
#ifdef GSL_SUCCESS
# undef GSL_SUCCESS
#endif
EOF
fi
find $srcdir/$dirname/inc/ -name '*LinkDef*.h' | \
sed -e 's|^|#include "|' -e 's|$|"|' >> alldefs.h
done
mv all.h include/allHeaders.h
mv alldefs.h include/allLinkDef.h
# check if rootcling_tmp exists in the expected location (not the case for CMake builds)
if [ ! -x core/utils/src/rootcling_tmp ]; then
exit 0
fi
cxxflags="-D__CLING__ -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -Iinclude -Ietc -Ietc/cling `cat cppflags.txt | sort | uniq`"
rm cppflags.txt
# generate one large pcm
rm -f allDict.* lib/allDict_rdict.pc*
touch etc/allDict.cxx.h
core/utils/src/rootcling_tmp -1 -f etc/allDict.cxx -noDictSelection -c $cxxflags -I$srcdir include/allHeaders.h include/allLinkDef.h
res=$?
if [ $res -eq 0 ] ; then
mv etc/allDict_rdict.pch etc/allDict.cxx.pch
res=$?
# actually we won't need the allDict.[h,cxx] files
#rm -f allDict.*
fi
exit $res
|
benni0815/root
|
build/unix/makeonepcm.sh
|
Shell
|
lgpl-2.1
| 2,596 |
rm -f log/test.log
target=$1
method=$2
if [ -z $method ]
then
ruby -Itest test/unit/${target}_test.rb
else
ruby -Itest test/unit/${target}_test.rb -n $method
fi
|
KenichiTakemura/admin.okbrisbane
|
unit_test.sh
|
Shell
|
lgpl-3.0
| 163 |
#!/usr/bin/env bash
set -e
if ! [[ "$0" =~ "tests/rkt-monitor/build-stresser.sh" ]]; then
echo "must be run from repository root"
exit 255
fi
stressers="cpu mem log"
if [ -z "${1}" ]; then
echo Specify one of \""${stressers[@]}"\" or all
exit 1
fi
echo "Building worker..."
make rkt-monitor
acbuildEnd() {
export EXIT=$?
if [ -d ".acbuild" ]; then
acbuild --debug end && exit $EXIT
fi
}
buildImages() {
acbuild --debug begin
trap acbuildEnd EXIT
acbuild --debug set-name appc.io/rkt-"${1}"-stresser
acbuild --debug copy build-rkt-1.15.0+git/target/bin/"${1}"-stresser /worker
acbuild --debug set-exec -- /worker
acbuild --debug write --overwrite "${1}"-stresser.aci
acbuild --debug end
}
if [ "${1}" = "all" ]; then
for stresser in ${stressers}; do
buildImages ${stresser}
done
else
buildImages ${1}
fi
|
yifan-gu/rkt
|
tests/rkt-monitor/build-stresser.sh
|
Shell
|
apache-2.0
| 888 |
trap "echo 'FAILED!'; exit 1" ERR
set -x
# cd to the dir of this script, so paths are relative
cd "$(dirname "$0")"
arcyon='../../bin/arcyon'
$arcyon -h
$arcyon comment -h
$arcyon comment-inline -h
$arcyon get-diff -h
$arcyon paste -h
$arcyon query -h
$arcyon raw-diff -h
$arcyon show-config -h
$arcyon update-revision -h
$arcyon task-create -h
$arcyon task-query -h
id="$($arcyon create-revision -t title -p plan --summary ssss -f diff1 --format-id)"
$arcyon get-diff -r $id --ls
$arcyon update-revision $id update -f diff2
$arcyon get-diff -r $id --ls
$arcyon query --format-type ids | grep $id
$arcyon query --ids $id --format-string '$summary' | grep ssss
$arcyon query --format-type ids --order created | grep $id
$arcyon query --format-type ids --order modified | grep $id
diffid="$($arcyon raw-diff diff1)"
diffid2="$($arcyon raw-diff diff2)"
$arcyon get-diff -d $diffid --ls
$arcyon get-diff -d $diffid2 --ls
id2="$($arcyon create-revision -t title2 -p plan --diff-id $diffid --format-id)"
id3=$($arcyon update-revision $id2 update --diff-id $diffid2 --format-id)
$arcyon update-revision $id2 update --diff-id $diffid2 --format-url
$arcyon update-revision $id2 update --diff-id $diffid2 --format-url --ccs phab --reviewers bob
if [ "$id2" != "$id3" ]; then
false
fi
$arcyon query --format-type ids | grep $id2
$arcyon comment $id2 -m 'hello there!'
$arcyon comment-inline $id2 --start-line 51 --end-line-offset 0 --filepath 'bin/arcyon' -m 'inline comment!'
$arcyon comment-inline $id2 --start-line 51 --end-line-offset 0 --filepath 'bin/arcyon' -m 'old-side inline comment!' --left-side
$arcyon comment $id2 --attach-inlines
taskid=$($arcyon task-create 'exercise task-create' -d 'description' -p wish -o alice --ccs phab bob --format-id)
$arcyon task-query
taskid2=$($arcyon task-query --max-results 1 --format-ids)
if [ "$taskid" != "$taskid2" ]; then
false
fi
$arcyon task-create 'exercise task-create again'
$arcyon task-update $taskid -m 'just a comment'
$arcyon task-update $taskid -t 'exercise task-update' -d 'new description' -p low -o bob --ccs phab alice -m 'updated loads'
$arcyon paste "test paste" -f diff1
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
cs-shadow/phabricator-tools
|
testbed/arcyon/exercise_arcyon.sh
|
Shell
|
apache-2.0
| 2,906 |
#!/bin/bash
# Exit immediately if a command exits with a non-zero status
set -e
# check for required environment variables
if [ -z "$DOMAIN_NAME" ] ; then
echo "DOMAIN_NAME environment varibale must be set"
exit 1
fi
if [ -z "$EMAIL" ] ; then
echo "EMAIL environment varibale must be set"
exit 1
fi
function get_certs {
echo "Requesting certificate for domain \"${DOMAIN_NAME}\"..."
certbot certonly \
--standalone \
-d $DOMAIN_NAME \
--email $EMAIL \
--rsa-key-size 4096 \
--text \
--agree-tos \
--verbose \
--server https://acme-v01.api.letsencrypt.org/directory \
--standalone-supported-challenges http-01 \
--renew-by-default
}
function auto_renew_certs {
echo TODO: implement auto renewal of certs
}
if [ ! -e "/etc/letsencrypt/live/$DOMAIN_NAME" ]; then
get_certs
else
auto_renew_certs
fi
|
mrcnc/spatialconnect-server
|
web/certbot/run.sh
|
Shell
|
apache-2.0
| 839 |
#! /bin/bash
echo "The script for installing DNS using BIND9 starts now!"
currCommand='apt-get install -y bind9 bind9utils bind9-doc'
if [ `whoami` != 'root' ]
then
currCommand="sudo ${currCommand}"
fi
${currCommand}
|
anilveeramalli/cloudify-azure-plugin
|
blueprints/clustered-dns/dns/dns_install.sh
|
Shell
|
apache-2.0
| 228 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
S3_ROOT=s3://bucket/folder
S3_DOWNLOADS=$S3_ROOT/test
S3_SYSTEM=$S3_ROOT/test1
IGNITE_DOWNLOAD_URL=$S3_DOWNLOADS/apache-ignite-fabric-1.6.0-SNAPSHOT-bin.zip
IGNITE_ZIP=apache-ignite-fabric-1.6.0-SNAPSHOT-bin.zip
IGNITE_UNZIP_DIR=apache-ignite-fabric-1.6.0-SNAPSHOT-bin
TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
TESTS_PACKAGE_ZIP=ignite-cassandra-tests-1.6.0-SNAPSHOT.zip
TESTS_PACKAGE_UNZIP_DIR=ignite-cassandra-tests
S3_LOGS_URL=$S3_SYSTEM/logs/i-logs
S3_LOGS_TRIGGER_URL=$S3_SYSTEM/logs-trigger
S3_BOOTSTRAP_SUCCESS_URL=$S3_SYSTEM/i-success
S3_BOOTSTRAP_FAILURE_URL=$S3_SYSTEM/i-failure
S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_SYSTEM/c-discovery
S3_IGNITE_NODES_DISCOVERY_URL=$S3_SYSTEM/i-discovery
S3_IGNITE_FIRST_NODE_LOCK_URL=$S3_SYSTEM/i-first-node-lock
S3_IGNITE_NODES_JOIN_LOCK_URL=$S3_SYSTEM/i-join-lock
INSTANCE_REGION=us-west-2
INSTANCE_NAME_TAG=IGNITE-SERVER
[email protected]
INSTANCE_PROJECT_TAG=ignite
terminate()
{
if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then
S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/
fi
if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then
S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/
fi
host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
msg=$host_name
if [ -n "$1" ]; then
echo "[ERROR] $1"
echo "[ERROR]-----------------------------------------------------"
echo "[ERROR] Ignite node bootstrap failed"
echo "[ERROR]-----------------------------------------------------"
msg=$1
reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${host_name}
reportFile=$reportFolder/__error__
else
echo "[INFO]-----------------------------------------------------"
echo "[INFO] Ignite node bootstrap successfully completed"
echo "[INFO]-----------------------------------------------------"
reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${host_name}
reportFile=$reportFolder/__success__
fi
echo $msg > /opt/bootstrap-result
aws s3 rm --recursive $reportFolder
if [ $? -ne 0 ]; then
echo "[ERROR] Failed drop report folder: $reportFolder"
fi
aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
if [ $? -ne 0 ]; then
echo "[ERROR] Failed to report bootstrap result to: $reportFile"
fi
rm -f /opt/bootstrap-result
if [ -n "$1" ]; then
exit 1
fi
exit 0
}
tagInstance()
{
export EC2_HOME=/opt/aws/apitools/ec2
export JAVA_HOME=/opt/jdk1.8.0_77
export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
if [ $? -ne 0 ]; then
terminate "Failed to get instance metadata to tag it"
fi
if [ -n "$INSTANCE_NAME_TAG" ]; then
ec2-create-tags $INSTANCE_ID --tag Name=${INSTANCE_NAME_TAG} --region $INSTANCE_REGION
if [ $code -ne 0 ]; then
terminate "Failed to tag EC2 instance with: Name=${INSTANCE_NAME_TAG}"
fi
fi
if [ -n "$INSTANCE_OWNER_TAG" ]; then
ec2-create-tags $INSTANCE_ID --tag owner=${INSTANCE_OWNER_TAG} --region $INSTANCE_REGION
if [ $code -ne 0 ]; then
terminate "Failed to tag EC2 instance with: owner=${INSTANCE_OWNER_TAG}"
fi
fi
if [ -n "$INSTANCE_PROJECT_TAG" ]; then
ec2-create-tags $INSTANCE_ID --tag project=${INSTANCE_PROJECT_TAG} --region $INSTANCE_REGION
if [ $code -ne 0 ]; then
terminate "Failed to tag EC2 instance with: project=${INSTANCE_PROJECT_TAG}"
fi
fi
}
downloadPackage()
{
echo "[INFO] Downloading $3 package from $1 into $2"
if [[ "$1" == s3* ]]; then
aws s3 cp $1 $2
if [ $? -ne 0 ]; then
echo "[WARN] Failed to download $3 package from first attempt"
rm -Rf $2
sleep 10s
echo "[INFO] Trying second attempt to download $3 package"
aws s3 cp $1 $2
if [ $? -ne 0 ]; then
echo "[WARN] Failed to download $3 package from second attempt"
rm -Rf $2
sleep 10s
echo "[INFO] Trying third attempt to download $3 package"
aws s3 cp $1 $2
if [ $? -ne 0 ]; then
terminate "All three attempts to download $3 package from $1 are failed"
fi
fi
fi
else
curl "$1" -o "$2"
if [ $? -ne 0 ] && [ $? -ne 6 ]; then
echo "[WARN] Failed to download $3 package from first attempt"
rm -Rf $2
sleep 10s
echo "[INFO] Trying second attempt to download $3 package"
curl "$1" -o "$2"
if [ $? -ne 0 ] && [ $? -ne 6 ]; then
echo "[WARN] Failed to download $3 package from second attempt"
rm -Rf $2
sleep 10s
echo "[INFO] Trying third attempt to download $3 package"
curl "$1" -o "$2"
if [ $? -ne 0 ] && [ $? -ne 6 ]; then
terminate "All three attempts to download $3 package from $1 are failed"
fi
fi
fi
fi
echo "[INFO] $3 package successfully downloaded from $1 into $2"
}
if [[ "$S3_IGNITE_NODES_DISCOVERY_URL" != */ ]]; then
S3_IGNITE_NODES_DISCOVERY_URL=${S3_IGNITE_NODES_DISCOVERY_URL}/
fi
if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then
S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/
fi
echo "[INFO]-----------------------------------------------------------------"
echo "[INFO] Bootstrapping Ignite node"
echo "[INFO]-----------------------------------------------------------------"
echo "[INFO] Ignite download URL: $IGNITE_DOWNLOAD_URL"
echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
echo "[INFO] Logs URL: $S3_LOGS_URL"
echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL"
echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY_URL"
echo "[INFO] Ignite first node lock URL: $S3_IGNITE_FIRST_NODE_LOCK_URL"
echo "[INFO] Ignite nodes join lock URL: $S3_IGNITE_NODES_JOIN_LOCK_URL"
echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL"
echo "[INFO] Bootsrap success URL: $S3_BOOTSTRAP_SUCCESS_URL"
echo "[INFO] Bootsrap failure URL: $S3_BOOTSTRAP_FAILURE_URL"
echo "[INFO]-----------------------------------------------------------------"
echo "[INFO] Installing 'wget' package"
yum -y install wget
if [ $? -ne 0 ]; then
terminate "Failed to install 'wget' package"
fi
echo "[INFO] Installing 'net-tools' package"
yum -y install net-tools
if [ $? -ne 0 ]; then
terminate "Failed to install 'net-tools' package"
fi
echo "[INFO] Installing 'python' package"
yum -y install python
if [ $? -ne 0 ]; then
terminate "Failed to install 'python' package"
fi
echo "[INFO] Installing 'unzip' package"
yum -y install unzip
if [ $? -ne 0 ]; then
terminate "Failed to install 'unzip' package"
fi
rm -Rf /opt/jdk1.8.0_77 /opt/jdk-8u77-linux-x64.tar.gz
echo "[INFO] Downloading 'jdk-8u77'"
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz" -O /opt/jdk-8u77-linux-x64.tar.gz
if [ $? -ne 0 ]; then
terminate "Failed to download 'jdk-8u77'"
fi
echo "[INFO] Unzipping 'jdk-8u77'"
tar -xvzf /opt/jdk-8u77-linux-x64.tar.gz -C /opt
if [ $? -ne 0 ]; then
terminate "Failed to untar 'jdk-8u77'"
fi
rm -Rf /opt/jdk-8u77-linux-x64.tar.gz
downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
echo "[INFO] Installing 'pip'"
python /opt/get-pip.py
if [ $? -ne 0 ]; then
terminate "Failed to install 'pip'"
fi
echo "[INFO] Installing 'awscli'"
pip install --upgrade awscli
if [ $? -ne 0 ]; then
echo "[ERROR] Failed to install 'awscli' using pip"
echo "[INFO] Trying to install awscli using zip archive"
echo "[INFO] Downloading awscli zip"
downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
echo "[INFO] Unzipping awscli zip"
unzip /opt/awscli-bundle.zip -d /opt
if [ $? -ne 0 ]; then
terminate "Failed to unzip awscli zip"
fi
rm -fR /opt/awscli-bundle.zip
echo "[INFO] Installing awscli"
/opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
if [ $? -ne 0 ]; then
terminate "Failed to install awscli"
fi
echo "[INFO] Successfully installed awscli from zip archive"
fi
tagInstance
echo "[INFO] Creating 'ignite' group"
exists=$(cat /etc/group | grep ignite)
if [ -z "$exists" ]; then
groupadd ignite
if [ $? -ne 0 ]; then
terminate "Failed to create 'ignite' group"
fi
fi
echo "[INFO] Creating 'ignite' user"
exists=$(cat /etc/passwd | grep ignite)
if [ -z "$exists" ]; then
useradd -g ignite ignite
if [ $? -ne 0 ]; then
terminate "Failed to create 'ignite' user"
fi
fi
rm -Rf /opt/ignite /opt/$IGNITE_ZIP
downloadPackage "$IGNITE_DOWNLOAD_URL" "/opt/$IGNITE_ZIP" "Ignite"
echo "[INFO] Unzipping Ignite package"
unzip /opt/$IGNITE_ZIP -d /opt
if [ $? -ne 0 ]; then
terminate "Failed to unzip Ignite package"
fi
rm -Rf /opt/$IGNITE_ZIP /opt/ignite-start.sh /opt/ignite-env.sh /opt/ignite
mv /opt/$IGNITE_UNZIP_DIR /opt/ignite
chown -R ignite:ignite /opt/ignite
downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/$TESTS_PACKAGE_ZIP" "Tests"
unzip /opt/$TESTS_PACKAGE_ZIP -d /opt
if [ $? -ne 0 ]; then
terminate "Failed to unzip tests package: $TESTS_PACKAGE_DONLOAD_URL"
fi
chown -R ignite:ignite /opt/$TESTS_PACKAGE_UNZIP_DIR
find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.sh" -exec chmod ug+x {} \;
if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-start.sh" ]; then
terminate "There are no ignite-start.sh in tests package"
fi
if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-cassandra-server-template.xml" ]; then
terminate "There are no ignite-cassandra-server-template.xml in tests package"
fi
if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh" ]; then
terminate "There are no logs-collector.sh in tests package"
fi
testsJar=$(find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.jar" | grep ignite-cassandra- | grep tests.jar)
if [ -n "$testsJar" ]; then
echo "[INFO] Coping tests jar $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
cp $testsJar /opt/ignite/libs/optional/ignite-cassandra
if [ $? -ne 0 ]; then
terminate "Failed copy $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
fi
fi
mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-start.sh /opt
mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-cassandra-server-template.xml /opt/ignite/config
mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh /opt
if [ -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-env.sh" ]; then
mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/ignite/ignite-env.sh /opt
chown -R ignite:ignite /opt/ignite-env.sh
fi
rm -Rf /opt/$TESTS_PACKAGE_UNZIP_DIR
chown -R ignite:ignite /opt/ignite-start.sh /opt/logs-collector.sh /opt/ignite/config/ignite-cassandra-server-template.xml
#profile=/home/ignite/.bash_profile
profile=/root/.bash_profile
echo "export JAVA_HOME=/opt/jdk1.8.0_77" >> $profile
echo "export IGNITE_HOME=/opt/ignite" >> $profile
echo "export USER_LIBS=\$IGNITE_HOME/libs/optional/ignite-cassandra/*:\$IGNITE_HOME/libs/optional/ignite-slf4j/*" >> $profile
echo "export PATH=\$JAVA_HOME/bin:\IGNITE_HOME/bin:\$PATH" >> $profile
echo "export S3_BOOTSTRAP_SUCCESS_URL=$S3_BOOTSTRAP_SUCCESS_URL" >> $profile
echo "export S3_BOOTSTRAP_FAILURE_URL=$S3_BOOTSTRAP_FAILURE_URL" >> $profile
echo "export S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_CASSANDRA_NODES_DISCOVERY_URL" >> $profile
echo "export S3_IGNITE_NODES_DISCOVERY_URL=$S3_IGNITE_NODES_DISCOVERY_URL" >> $profile
echo "export S3_IGNITE_NODES_JOIN_LOCK_URL=$S3_IGNITE_NODES_JOIN_LOCK_URL" >> $profile
echo "export S3_IGNITE_FIRST_NODE_LOCK_URL=$S3_IGNITE_FIRST_NODE_LOCK_URL" >> $profile
HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
/opt/logs-collector.sh "/opt/ignite/work/log" "$S3_LOGS_URL/$HOST_NAME" "$S3_LOGS_TRIGGER_URL" > /opt/ignite/logs-collector.log &
cmd="/opt/ignite-start.sh"
#sudo -u ignite -g ignite sh -c "$cmd | tee /opt/ignite/start.log"
$cmd | tee /opt/ignite/start.log
|
kromulan/ignite
|
modules/cassandra/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
|
Shell
|
apache-2.0
| 13,470 |
#!/bin/bash
TEST_SNIPPET="$(cat test.jsonnet)"
echo "Python testing Jsonnet snippet..."
OUTPUT="$(python jsonnet_test_snippet.py "${TEST_SNIPPET}")"
if [ "$?" != "0" ] ; then
echo "Jsonnet execution failed:"
echo "$OUTPUT"
exit 1
fi
if [ "$OUTPUT" != "true" ] ; then
echo "Got bad output:"
echo "$OUTPUT"
exit 1
fi
echo "Python testing Jsonnet file..."
OUTPUT="$(python jsonnet_test_file.py "test.jsonnet")"
if [ "$?" != "0" ] ; then
echo "Jsonnet execution failed:"
echo "$OUTPUT"
exit 1
fi
if [ "$OUTPUT" != "true" ] ; then
echo "Got bad output:"
echo "$OUTPUT"
exit 1
fi
echo "Python test passed."
|
lamuguo/jsonnet
|
python/run_tests.sh
|
Shell
|
apache-2.0
| 653 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bring up a Kubernetes cluster.
# Usage:
# wget -q -O - https://get.k8s.io | bash
# or
# curl -sS https://get.k8s.io | bash
#
# Advanced options
# Set KUBERNETES_PROVIDER to choose between different providers:
# Google Compute Engine [default]
# * export KUBERNETES_PROVIDER=gce; wget -q -O - https://get.k8s.io | bash
# Google Container Engine
# * export KUBERNETES_PROVIDER=gke; wget -q -O - https://get.k8s.io | bash
# Amazon EC2
# * export KUBERNETES_PROVIDER=aws; wget -q -O - https://get.k8s.io | bash
# Microsoft Azure
# * export KUBERNETES_PROVIDER=azure; wget -q -O - https://get.k8s.io | bash
# Vagrant (local virtual machines)
# * export KUBERNETES_PROVIDER=vagrant; wget -q -O - https://get.k8s.io | bash
# VMWare VSphere
# * export KUBERNETES_PROVIDER=vsphere; wget -q -O - https://get.k8s.io | bash
# Rackspace
# * export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash
#
# Set KUBERNETES_SKIP_DOWNLOAD to non-empty to skip downloading a release.
# Set KUBERNETES_SKIP_CONFIRM to skip the installation confirmation prompt.
set -o errexit
set -o nounset
set -o pipefail
function create_cluster {
echo "Creating a kubernetes on ${KUBERNETES_PROVIDER:-gce}..."
(
cd kubernetes
./cluster/kube-up.sh
echo "Kubernetes binaries at ${PWD}/cluster/"
echo "You may want to add this directory to your PATH in \$HOME/.profile"
echo "Installation successful!"
)
}
if [[ "${KUBERNETES_SKIP_DOWNLOAD-}" ]]; then
create_cluster
exit 0
fi
function get_latest_version_number {
local -r latest_url="https://storage.googleapis.com/kubernetes-release/release/stable.txt"
if [[ $(which wget) ]]; then
wget -qO- ${latest_url}
elif [[ $(which curl) ]]; then
curl -Ss ${latest_url}
fi
}
release=$(get_latest_version_number)
release_url=https://storage.googleapis.com/kubernetes-release/release/${release}/kubernetes.tar.gz
uname=$(uname)
if [[ "${uname}" == "Darwin" ]]; then
platform="darwin"
elif [[ "${uname}" == "Linux" ]]; then
platform="linux"
else
echo "Unknown, unsupported platform: (${uname})."
echo "Supported platforms: Linux, Darwin."
echo "Bailing out."
exit 2
fi
machine=$(uname -m)
if [[ "${machine}" == "x86_64" ]]; then
arch="amd64"
elif [[ "${machine}" == "i686" ]]; then
arch="386"
elif [[ "${machine}" == "arm*" ]]; then
arch="arm"
else
echo "Unknown, unsupported architecture (${machine})."
echo "Supported architectures x86_64, i686, arm*"
echo "Bailing out."
exit 3
fi
file=kubernetes.tar.gz
echo "Downloading kubernetes release ${release} to ${PWD}/kubernetes.tar.gz"
if [[ -n "${KUBERNETES_SKIP_CONFIRM-}" ]]; then
echo "Is this ok? [Y]/n"
read confirm
if [[ "$confirm" == "n" ]]; then
echo "Aborting."
exit 0
fi
fi
if [[ $(which wget) ]]; then
wget -O ${file} ${release_url}
elif [[ $(which curl) ]]; then
curl -L -o ${file} ${release_url}
else
echo "Couldn't find curl or wget. Bailing out."
exit 1
fi
echo "Unpacking kubernetes release ${release}"
tar -xzf ${file}
rm ${file}
create_cluster
|
wojtek-t/kubernetes
|
cluster/get-kube.sh
|
Shell
|
apache-2.0
| 3,680 |
curl "http://www.codonsoft.net/OOBP/DefaultFonts/3HOURTOUR.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/A.C.M.E. EXPLOSIVE.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/ARCHITEXT.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/BANNER.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/BART.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/BERNIE.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/CHILLER.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/CHINESE TAKEAWAY.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/CHOPINSCRIPT.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/COTILLION.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/CURLZ MT.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/EDWARDIAN SCRIPT ITC.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/FIFTHAVE.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/FUNSTUFF.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/GASTON.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/GIGI LET.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/HARRINGTON.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/HOOTEROLL.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/INVITATION.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/JESTER.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/JOKERMAN LET.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/KRISTEN ITC.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/PT SCRIPT.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/QUILL.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/RAVIE.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/SIMPSON.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/STAGE COACH.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/TABITHA.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/TANGO.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/TINKER TOY.eot" -O
curl "http://www.codonsoft.net/OOBP/DefaultFonts/VIVALDI.eot" -O
|
chemouna/yui-examples
|
editor45/fonts/get.sh
|
Shell
|
bsd-3-clause
| 2,077 |
#!/bin/bash
#Build ctp/lts/ib api
pushd vnpy/api/ctp
bash build.sh
popd
pushd vnpy/api/lts
bash build.sh
popd
pushd vnpy/api/ib
bash build.sh
popd
#Install Python Modules
pip install -r requirements.txt
#Install Ta-Lib
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
conda config --set show_channel_urls yes
conda install -c quantopian ta-lib=0.4.9
#Install vn.py
python setup.py install
|
jiangjinjinyxt/vnpy
|
install.sh
|
Shell
|
mit
| 432 |
#!/bin/sh
# Example for use of GNU gettext.
# Copyright (C) 2003 Free Software Foundation, Inc.
# This file is in the public domain.
#
# Script for cleaning all autogenerated files.
test ! -f Makefile || make distclean
# Brought in by explicit copy.
rm -f m4/nls.m4
rm -f m4/po.m4
rm -f po/remove-potcdate.sin
# Brought in by explicit copy.
rm -f m4/gcj.m4
rm -f m4/javacomp.m4
rm -f m4/javaexec.m4
rm -f javacomp.sh.in
rm -f javaexec.sh.in
# Generated by aclocal.
rm -f aclocal.m4
# Generated by autoconf.
rm -f configure
# Generated or brought in by automake.
rm -f Makefile.in
rm -f m4/Makefile.in
rm -f po/Makefile.in
rm -f install-sh
rm -f mkinstalldirs
rm -f missing
rm -f po/*.pot
rm -f po/stamp-po
rm -f hello-java-awt*.properties
|
jjenki11/blaze-chem-rendering
|
qca_designer/lib/gettext-0.14/gettext-tools/examples/hello-java-awt/autoclean.sh
|
Shell
|
mit
| 745 |
APP_ROOT="../../../../../../../160-patch";
if [ $# -gt 0 -a -z "$1" ]; then
APP_ROOT=$1
fi
echo App root here: ${APP_ROOT}
echo
echo 'Note: Ensure that the WSC module ('${APP_ROOT}'/core/wsc) source and classes are up-to-date.'
echo
echo "Metadata..."
echo ' jar...'
p4 edit lib/sfdc_metadatawsdl.jar
echo Before update...
ls -Llsrt $APP_ROOT/core/sfdc-test/func/java/ext-gen/sfdc_metadatawsdl.jar lib/sfdc_metadatawsdl.jar
echo 'cp '${APP_ROOT}'/core/sfdc-test/func/java/ext-gen/sfdc_metadatawsdl.jar lib/sfdc_metadatawsdl.jar'
cp $APP_ROOT/core/sfdc-test/func/java/ext-gen/sfdc_metadatawsdl.jar lib/sfdc_metadatawsdl.jar
echo After...
ls -Llsrt lib/sfdc_metadatawsdl.jar
echo ' wsdl...'
ls -Llsrt wsdl/metadata.wsdl
p4 edit wsdl/metadata.wsdl
echo 'cp '${APP_ROOT}'/core/sfdc-test/func/wsdl/metadata.wsdl wsdl/metadata.wsdl'
cp $APP_ROOT/core/sfdc-test/func/wsdl/metadata.wsdl wsdl/metadata.wsdl
ls -Llsrt wsdl/metadata.wsdl
echo
echo "Enterprise..."
echo ' wsdl...'
# wsc classes must exist
if [ ! -d $APP_ROOT/core/wsc/java/classes/com ]; then
echo ${APP_ROOT}'/core/wsc/java/classes/com not found'
exit -1
fi
p4 edit lib/wsc.jar
echo Before update...
ls -Llsrt lib/wsc.jar
echo 'jar cvf lib/wsc.jar -C '${APP_ROOT}'/core/wsc/java/classes .'
jar cvf lib/wsc.jar -C ${APP_ROOT}/core/wsc/java/classes .
echo After...
ls -Llsrt lib/wsc.jar
echo ' wsdl...'
ls -Llsrt wsdl/enterprise.wsdl
p4 edit wsdl/enterprise.wsdl
echo 'cp '${APP_ROOT}'/core/sfdc-test/func/wsdl/enterprise.wsdl wsdl/enterprise.wsdl'
cp $APP_ROOT/core/sfdc-test/func/wsdl/enterprise.wsdl wsdl/enterprise.wsdl
ls -Llsrt wsdl/enterprise.wsdl
echo
echo "Apex..."
echo ' jar...'
p4 edit lib/apexwsdl.jar
echo Before update...
ls -Llsrt $APP_ROOT/core/shared/java/ext-gen/apexwsdl.jar lib/apexwsdl.jar
echo 'cp '${APP_ROOT}'/core/shared/java/ext-gen/apexwsdl.jar lib/apexwsdl.jar'
cp $APP_ROOT/core/shared/java/ext-gen/apexwsdl.jar lib/apexwsdl.jar
echo After...
ls -Llsrt lib/apexwsdl.jar
echo ' wsdl...'
ls -Llsrt wsdl/apex.wsdl
p4 edit wsdl/apex.wsdl
echo 'cp '${APP_ROOT}'/core/sfdc-test/func/wsdl/apex.wsdl wsdl/apex.wsdl'
cp $APP_ROOT/core/sfdc-test/func/wsdl/apex.wsdl wsdl/apex.wsdl
ls -Llsrt wsdl/apex.wsdl
echo
echo "Partner..."
echo ' jar...'
p4 edit lib/wsc_partnerwsdl.jar
echo Before update...
ls -Llsrt $APP_ROOT/core/shared/java/ext-gen/wsc_partnerwsdl.jar lib/wsc_partnerwsdl.jar
echo 'cp '${APP_ROOT}'/core/shared/java/ext-gen/wsc_partnerwsdl.jar lib/wsc_partnerwsdl.jar'
cp $APP_ROOT/core/shared/java/ext-gen/wsc_partnerwsdl.jar lib/wsc_partnerwsdl.jar
echo After...
ls -Llsrt lib/wsc_partnerwsdl.jar
echo ' wsdl...'
ls -Llsrt wsdl/partner.wsdl
p4 edit wsdl/partner.wsdl
echo 'cp '${APP_ROOT}'/core/sfdc-test/func/wsdl/apex.wsdl wsdl/partner.wsdl'
cp $APP_ROOT/core/sfdc-test/func/wsdl/apex.wsdl wsdl/partner.wsdl
ls -Llsrt wsdl/partner.wsdl
echo 'Done'
|
Simha009/idecore
|
com.salesforce.ide.api/refresh-api-jars-wsdls.sh
|
Shell
|
epl-1.0
| 2,850 |
#!/bin/sh
#
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Rebuild all prebuilts. This requires that you have a toolchain source tree
#
. `dirname $0`/prebuilt-common.sh
PROGDIR=`dirname $0`
NDK_DIR=$ANDROID_NDK_ROOT
register_var_option "--ndk-dir=<path>" NDK_DIR "Put binaries into NDK install directory"
BUILD_DIR=/tmp/ndk-$USER/build
register_var_option "--build-dir=<path>" BUILD_DIR "Specify temporary build directory"
ARCHS=$DEFAULT_ARCHS
register_var_option "--arch=<arch>" ARCHS "Specify target architectures"
SYSTEMS=$HOST_TAG32
if [ "$HOST_TAG32" = "linux-x86" ]; then
SYSTEMS=$SYSTEMS",windows"
fi
CUSTOM_SYSTEMS=
register_option "--systems=<list>" do_SYSTEMS "Specify host systems"
do_SYSTEMS () { CUSTOM_SYSTEMS=true; SYSTEMS=$1; }
RELEASE=`date +%Y%m%d`
PACKAGE_DIR=/tmp/ndk-$USER/prebuilt-$RELEASE
register_var_option "--package-dir=<path>" PACKAGE_DIR "Put prebuilt tarballs into <path>."
DARWIN_SSH=
if [ "$HOST_OS" = "linux" ] ; then
register_var_option "--darwin-ssh=<hostname>" DARWIN_SSH "Specify Darwin hostname for remote build."
fi
register_try64_option
PROGRAM_PARAMETERS="<toolchain-src-dir>"
PROGRAM_DESCRIPTION=\
"This script is used to rebuild all host and target prebuilts from scratch.
You will need to give the path of a toolchain source directory, one which
is typically created with the download-toolchain-sources.sh script.
Unless you use the --ndk-dir option, all binaries will be installed to the
current NDK directory.
All prebuilts will then be archived into tarball that will be stored into a
specific 'package directory'. Unless you use the --package-dir option, this
will be: $PACKAGE_DIR
Please read docs/DEV-SCRIPTS-USAGE.TXT for more usage information about this
script.
"
extract_parameters "$@"
SRC_DIR="$PARAMETERS"
check_toolchain_src_dir "$SRC_DIR"
if [ "$DARWIN_SSH" -a -z "$CUSTOM_SYSTEMS" ]; then
SYSTEMS=$SYSTEMS",darwin-x86"
fi
FLAGS=
if [ "$VERBOSE" = "yes" ]; then
FLAGS=$FLAGS" --verbose"
fi
if [ "$VERBOSE2" = "yes" ]; then
FLAGS=$FLAGS" --verbose"
fi
FLAGS=$FLAGS" --ndk-dir=$NDK_DIR"
FLAGS=$FLAGS" --package-dir=$PACKAGE_DIR"
FLAGS=$FLAGS" --arch=$(spaces_to_commas $ARCHS)"
HOST_FLAGS=$FLAGS" --systems=$(spaces_to_commas $SYSTEMS)"
if [ "$TRY64" = "yes" ]; then
HOST_FLAG=$HOST_FLAGS" --try-64"
fi
if [ "$DARWIN_SSH" ]; then
HOST_FLAGS=$HOST_FLAGS" --darwin-ssh=$DARWIN_SSH"
fi
$PROGDIR/build-host-prebuilts.sh $HOST_FLAGS "$SRC_DIR"
fail_panic "Could not build host prebuilts!"
TARGET_FLAGS=$FLAGS
$PROGDIR/build-target-prebuilts.sh $TARGET_FLAGS "$SRC_DIR"
fail_panic "Could not build target prebuilts!"
echo "Done, see $PACKAGE_DIR:"
ls -l $PACKAGE_DIR
exit 0
|
rex-xxx/mt6572_x201
|
ndk/build/tools/rebuild-all-prebuilt.sh
|
Shell
|
gpl-2.0
| 3,230 |
#!/bin/bash
vert='\e[0;32m'
blanc='\e[0;37m'
jaune='\e[1;33m'
neutre='\e[0;m'
rm *~
HOME_DSM_MAIN_DIR=/ccc/scratch/cont003/gen7165/durochtc/Codes/SPECFEM3Ds/specfem3d/utils/EXTERNAL_CODES_coupled_with_SPECFEM3D/DSM_for_SPECFEM3D/
export HOME_DSM_MAIN_DIR
echo " "
echo "Cleaning this directory, and remove the directory bin/ in HOME_DSM_MAIN_DIR"
echo -e "Check the path of HOME_DSM_MAIN_DIR in clean.sh, currently defined as: ${jaune}\033[1m${HOME_DSM_MAIN_DIR}\033[0m${neutre}"
echo " "
echo "To adapt for the LIGHT storage version of DSM using 2D"
|
kbai/specfem3d
|
utils/EXTERNAL_CODES_coupled_with_SPECFEM3D/DSM_for_SPECFEM3D/DSM-2-LIGHT_storage_version_using_2D/clean.sh
|
Shell
|
gpl-2.0
| 555 |
#!/bin/bash
export QGIS_DEBUG=0
export QGIS_LOG_FILE=/tmp/inasafe/realtime/logs/qgis.log
export QGIS_DEBUG_FILE=/tmp/inasafe/realtime/logs/qgis-debug.log
export QGIS_PREFIX_PATH=/usr/local/qgis-master/
export PYTHONPATH=${QGIS_PREFIX_PATH}/share/qgis/python/:`pwd`
export LD_LIBRARY_PATH=${QGIS_PREFIX_PATH}/lib
export INASAFE_WORK_DIR=/home/web/quake
export INASAFE_POPULATION_PATH=`pwd`/realtime/fixtures/exposure/population.tif
export INASAFE_LOCALE=id
for FILE in `xvfb-run -a --server-args="-screen 0, 1024x768x24" python realtime/make_map.py --list | grep -v inp | grep -v Proces`
do
# FILE=`echo $FILE | sed 's/ftp:\/\/118.97.83.243\///g'`
# FILE=`echo $FILE | sed 's/.out.zip//g'`
# simple filter incase there another output except the event ids
if [ 14 == ${#FILE} ] ; then
echo "Running: $FILE"
xvfb-run -a --server-args="-screen 0, 1024x768x24" python realtime/make_map.py $FILE
fi
done
exit
# Memory errors..
#xvfb-run -a --server-args="-screen 0, 1024x768x24" python realtime/make_map.py --run-all
|
rukku/inasafe
|
scripts/make-all-shakemaps.sh
|
Shell
|
gpl-3.0
| 1,036 |
#!/bin/bash
#
# batch-wiki.sh: Example batch processing script.
# This script is tailored for processing wikipedia articles.
# The artciles are assumed to have been stripped of html
# markup already.
#
# This script outputs the so-called "compact format" which
# captures the full range of Link Grammar and RelEx output in a format
# that can be easily post-processed by other systems (typically by
# using regex's). The src/perl/cff-to-opencog.pl perl script provides
# an example of post-processing: it converts this output format into
# OpenCog hypergraphs.
#
# The idea behind the batch processing is that it is costly to parse
# large quantities of text: thus, it is convenient to parse the text
# once, save the results, and then perform post-processing at liesure,
# as needed. Thus, the form of post-processing can be changed at will,
# without requiring texts to be re-processed over and over again.
#
export LANG=en_US.UTF-8
VM_OPTS="-Xmx1024m"
RELEX_OPTS="\
-Djava.library.path=/usr/local/lib:/usr/local/lib/jni \
-DEnglishModelFilename=data/opennlp/models-1.5/en-sent.bin \
"
# -Drelex.algpath=data/relex-semantic-algs.txt \
# -Dwordnet.configfile=data/wordnet/file_properties.xml \
#
CLASSPATH="-classpath \
/usr/local/share/java/relex.jar:\
/usr/local/share/java/opennlp-tools-1.5.0.jar:\
/usr/local/share/java/maxent-3.0.0.jar:\
/usr/local/share/java/trove.jar:\
/usr/local/share/java/jwnl.jar:\
/usr/local/share/java/commons-logging.jar:\
/usr/local/share/java/linkgrammar.jar:\
/usr/share/java/linkgrammar.jar:\
/usr/share/java/gnu-getopt.jar:\
"
# IFS=$(echo -en "\n\b")
lettre=S
filepat=Sa*
FILES=enwiki-20080524-alpha/$lettre/$filepat
for fpath in $FILES
do
f=${fpath##*/}
echo "Processing \"${f}\""
url="http://en.wikipedia.org/wiki/${f}"
echo "url $url"
cat "${fpath}" | \
nice java $VM_OPTS $RELEX_OPTS $CLASSPATH relex.WebFormat -g -n 20 \
--url "${url}" > "parsed/$lettre/${f}.xml" 2> "err/$lettre/${f}"
mv "enwiki-20080524-alpha/$lettre/${f}" done/$lettre
done
|
keskival/2
|
relex/batch-wiki.sh
|
Shell
|
gpl-3.0
| 2,019 |
#!/bin/sh
rm -rf '/usr/local/var/postgres'
|
creatyvtype/upstage
|
delete_old_cluster.sh
|
Shell
|
apache-2.0
| 44 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
bin=$(dirname "${BASH_SOURCE-$0}")
bin=$(cd "${bin}">/dev/null; pwd)
function usage() {
echo "usage) $0 -p <port> -d <interpreter dir to load> -l <local interpreter repo dir to load> -g <interpreter group name>"
}
while getopts "hc:p:d:l:v:u:g:" o; do
case ${o} in
h)
usage
exit 0
;;
d)
INTERPRETER_DIR=${OPTARG}
;;
c)
CALLBACK_HOST=${OPTARG} # This will be used callback host
;;
p)
PORT=${OPTARG} # This will be used callback port
;;
l)
LOCAL_INTERPRETER_REPO=${OPTARG}
;;
v)
. "${bin}/common.sh"
getZeppelinVersion
;;
u)
ZEPPELIN_IMPERSONATE_USER="${OPTARG}"
if [[ -z "$ZEPPELIN_IMPERSONATE_CMD" ]]; then
ZEPPELIN_IMPERSONATE_RUN_CMD=`echo "ssh ${ZEPPELIN_IMPERSONATE_USER}@localhost" `
else
ZEPPELIN_IMPERSONATE_RUN_CMD=$(eval "echo ${ZEPPELIN_IMPERSONATE_CMD} ")
fi
;;
g)
INTERPRETER_GROUP_NAME=${OPTARG}
;;
esac
done
if [ -z "${PORT}" ] || [ -z "${INTERPRETER_DIR}" ]; then
usage
exit 1
fi
. "${bin}/common.sh"
ZEPPELIN_INTP_CLASSPATH="${CLASSPATH}"
# construct classpath
if [[ -d "${ZEPPELIN_HOME}/zeppelin-interpreter/target/classes" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${ZEPPELIN_HOME}/zeppelin-interpreter/target/classes"
fi
# add test classes for unittest
if [[ -d "${ZEPPELIN_HOME}/zeppelin-interpreter/target/test-classes" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${ZEPPELIN_HOME}/zeppelin-interpreter/target/test-classes"
fi
if [[ -d "${ZEPPELIN_HOME}/zeppelin-zengine/target/test-classes" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${ZEPPELIN_HOME}/zeppelin-zengine/target/test-classes"
fi
addJarInDirForIntp "${ZEPPELIN_HOME}/zeppelin-interpreter/target/lib"
addJarInDirForIntp "${ZEPPELIN_HOME}/lib/interpreter"
addJarInDirForIntp "${INTERPRETER_DIR}"
HOSTNAME=$(hostname)
ZEPPELIN_SERVER=org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer
INTERPRETER_ID=$(basename "${INTERPRETER_DIR}")
ZEPPELIN_PID="${ZEPPELIN_PID_DIR}/zeppelin-interpreter-${INTERPRETER_ID}-${ZEPPELIN_IDENT_STRING}-${HOSTNAME}.pid"
ZEPPELIN_LOGFILE="${ZEPPELIN_LOG_DIR}/zeppelin-interpreter-"
if [[ ! -z "$INTERPRETER_GROUP_NAME" ]]; then
ZEPPELIN_LOGFILE+="${INTERPRETER_GROUP_NAME}-"
fi
if [[ ! -z "$ZEPPELIN_IMPERSONATE_USER" ]]; then
ZEPPELIN_LOGFILE+="${ZEPPELIN_IMPERSONATE_USER}-"
fi
ZEPPELIN_LOGFILE+="${INTERPRETER_ID}-${ZEPPELIN_IDENT_STRING}-${HOSTNAME}.log"
JAVA_INTP_OPTS+=" -Dzeppelin.log.file=${ZEPPELIN_LOGFILE}"
if [[ ! -d "${ZEPPELIN_LOG_DIR}" ]]; then
echo "Log dir doesn't exist, create ${ZEPPELIN_LOG_DIR}"
$(mkdir -p "${ZEPPELIN_LOG_DIR}")
fi
# set spark related env variables
if [[ "${INTERPRETER_ID}" == "spark" ]]; then
if [[ -n "${SPARK_HOME}" ]]; then
export SPARK_SUBMIT="${SPARK_HOME}/bin/spark-submit"
SPARK_APP_JAR="$(ls ${ZEPPELIN_HOME}/interpreter/spark/zeppelin-spark*.jar)"
# This will evantually passes SPARK_APP_JAR to classpath of SparkIMain
ZEPPELIN_INTP_CLASSPATH+=":${SPARK_APP_JAR}"
pattern="$SPARK_HOME/python/lib/py4j-*-src.zip"
py4j=($pattern)
# pick the first match py4j zip - there should only be one
export PYTHONPATH="$SPARK_HOME/python/:$PYTHONPATH"
export PYTHONPATH="${py4j[0]}:$PYTHONPATH"
else
# add Hadoop jars into classpath
if [[ -n "${HADOOP_HOME}" ]]; then
# Apache
addEachJarInDirRecursiveForIntp "${HADOOP_HOME}/share"
# CDH
addJarInDirForIntp "${HADOOP_HOME}"
addJarInDirForIntp "${HADOOP_HOME}/lib"
fi
addJarInDirForIntp "${INTERPRETER_DIR}/dep"
pattern="${ZEPPELIN_HOME}/interpreter/spark/pyspark/py4j-*-src.zip"
py4j=($pattern)
# pick the first match py4j zip - there should only be one
PYSPARKPATH="${ZEPPELIN_HOME}/interpreter/spark/pyspark/pyspark.zip:${py4j[0]}"
if [[ -z "${PYTHONPATH}" ]]; then
export PYTHONPATH="${PYSPARKPATH}"
else
export PYTHONPATH="${PYTHONPATH}:${PYSPARKPATH}"
fi
unset PYSPARKPATH
export SPARK_CLASSPATH+=":${ZEPPELIN_INTP_CLASSPATH}"
fi
if [[ -n "${HADOOP_CONF_DIR}" ]] && [[ -d "${HADOOP_CONF_DIR}" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${HADOOP_CONF_DIR}"
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR}
else
# autodetect HADOOP_CONF_HOME by heuristic
if [[ -n "${HADOOP_HOME}" ]] && [[ -z "${HADOOP_CONF_DIR}" ]]; then
if [[ -d "${HADOOP_HOME}/etc/hadoop" ]]; then
export HADOOP_CONF_DIR="${HADOOP_HOME}/etc/hadoop"
elif [[ -d "/etc/hadoop/conf" ]]; then
export HADOOP_CONF_DIR="/etc/hadoop/conf"
fi
fi
fi
elif [[ "${INTERPRETER_ID}" == "hbase" ]]; then
if [[ -n "${HBASE_CONF_DIR}" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${HBASE_CONF_DIR}"
elif [[ -n "${HBASE_HOME}" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${HBASE_HOME}/conf"
else
echo "HBASE_HOME and HBASE_CONF_DIR are not set, configuration might not be loaded"
fi
elif [[ "${INTERPRETER_ID}" == "pig" ]]; then
# autodetect HADOOP_CONF_HOME by heuristic
if [[ -n "${HADOOP_HOME}" ]] && [[ -z "${HADOOP_CONF_DIR}" ]]; then
if [[ -d "${HADOOP_HOME}/etc/hadoop" ]]; then
export HADOOP_CONF_DIR="${HADOOP_HOME}/etc/hadoop"
elif [[ -d "/etc/hadoop/conf" ]]; then
export HADOOP_CONF_DIR="/etc/hadoop/conf"
fi
fi
if [[ -n "${HADOOP_CONF_DIR}" ]] && [[ -d "${HADOOP_CONF_DIR}" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${HADOOP_CONF_DIR}"
fi
# autodetect TEZ_CONF_DIR
if [[ -n "${TEZ_CONF_DIR}" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":${TEZ_CONF_DIR}"
elif [[ -d "/etc/tez/conf" ]]; then
ZEPPELIN_INTP_CLASSPATH+=":/etc/tez/conf"
else
echo "TEZ_CONF_DIR is not set, configuration might not be loaded"
fi
fi
addJarInDirForIntp "${LOCAL_INTERPRETER_REPO}"
if [[ ! -z "$ZEPPELIN_IMPERSONATE_USER" ]]; then
suid="$(id -u ${ZEPPELIN_IMPERSONATE_USER})"
if [[ -n "${suid}" || -z "${SPARK_SUBMIT}" ]]; then
INTERPRETER_RUN_COMMAND=${ZEPPELIN_IMPERSONATE_RUN_CMD}" '"
if [[ -f "${ZEPPELIN_CONF_DIR}/zeppelin-env.sh" ]]; then
INTERPRETER_RUN_COMMAND+=" source "${ZEPPELIN_CONF_DIR}'/zeppelin-env.sh;'
fi
fi
fi
if [[ -n "${SPARK_SUBMIT}" ]]; then
if [[ -n "$ZEPPELIN_IMPERSONATE_USER" ]] && [[ "$ZEPPELIN_IMPERSONATE_SPARK_PROXY_USER" != "false" ]]; then
INTERPRETER_RUN_COMMAND+=' '` echo ${SPARK_SUBMIT} --class ${ZEPPELIN_SERVER} --driver-class-path \"${ZEPPELIN_INTP_CLASSPATH_OVERRIDES}:${ZEPPELIN_INTP_CLASSPATH}\" --driver-java-options \"${JAVA_INTP_OPTS}\" ${SPARK_SUBMIT_OPTIONS} ${ZEPPELIN_SPARK_CONF} --proxy-user ${ZEPPELIN_IMPERSONATE_USER} ${SPARK_APP_JAR} ${CALLBACK_HOST} ${PORT}`
else
INTERPRETER_RUN_COMMAND+=' '` echo ${SPARK_SUBMIT} --class ${ZEPPELIN_SERVER} --driver-class-path \"${ZEPPELIN_INTP_CLASSPATH_OVERRIDES}:${ZEPPELIN_INTP_CLASSPATH}\" --driver-java-options \"${JAVA_INTP_OPTS}\" ${SPARK_SUBMIT_OPTIONS} ${ZEPPELIN_SPARK_CONF} ${SPARK_APP_JAR} ${CALLBACK_HOST} ${PORT}`
fi
else
INTERPRETER_RUN_COMMAND+=' '` echo ${ZEPPELIN_RUNNER} ${JAVA_INTP_OPTS} ${ZEPPELIN_INTP_MEM} -cp ${ZEPPELIN_INTP_CLASSPATH_OVERRIDES}:${ZEPPELIN_INTP_CLASSPATH} ${ZEPPELIN_SERVER} ${CALLBACK_HOST} ${PORT} `
fi
if [[ ! -z "$ZEPPELIN_IMPERSONATE_USER" ]] && [[ -n "${suid}" || -z "${SPARK_SUBMIT}" ]]; then
INTERPRETER_RUN_COMMAND+="'"
fi
eval $INTERPRETER_RUN_COMMAND &
pid=$!
if [[ -z "${pid}" ]]; then
exit 1;
else
echo ${pid} > ${ZEPPELIN_PID}
fi
trap 'shutdown_hook;' SIGTERM SIGINT SIGQUIT
function shutdown_hook() {
local count
count=0
while [[ "${count}" -lt 10 ]]; do
$(kill ${pid} > /dev/null 2> /dev/null)
if kill -0 ${pid} > /dev/null 2>&1; then
sleep 3
let "count+=1"
else
rm -f "${ZEPPELIN_PID}"
break
fi
if [[ "${count}" == "5" ]]; then
$(kill -9 ${pid} > /dev/null 2> /dev/null)
rm -f "${ZEPPELIN_PID}"
fi
done
}
wait
|
astroshim/incubator-zeppelin
|
bin/interpreter.sh
|
Shell
|
apache-2.0
| 8,875 |
#!/bin/sh
#
# Copyright (c) 2015 Angelescu Ovidiu
#
# See COPYING for licence terms.
#
# $FreeBSD$
# $Id: installports.sh,v 1.5 2015/08/19 18:28:56 convbsd Exp $
#
# Install ports listed in the INSTALL_PORTS variable
# in the usual category/portname
# form, e.g.: x11/nvidia-driver audio/emu10kx ...
set -e -u
if [ -z "${LOGFILE:-}" ]; then
echo "This script can't run standalone."
echo "Please use launch.sh to execute it."
exit 1
fi
INSTALL_PORTS=${INSTALL_PORTS:-}
if ! ${USE_JAILS} ; then
if [ -z "$(mount | grep ${BASEDIR}/var/run)" ]; then
mount ${BASEDIR}/var/run
fi
fi
cp /etc/resolv.conf ${BASEDIR}/etc/resolv.conf
if [ ! -z "${INSTALL_PORTS}" ]; then
tmpmakeconf=$(TMPDIR=${BASEDIR}/tmp mktemp -t make.conf)
envvars="BATCH=true"
if [ ! -z "${MAKE_CONF:-}" ]; then
cat ${MAKE_CONF} > ${tmpmakeconf}
envvars="${envvars} __MAKE_CONF=${tmpmakeconf#$BASEDIR}"
fi
for i in ${INSTALL_PORTS}; do
echo "Compiling ${i}"
(script -aq ${LOGFILE} chroot ${BASEDIR} make -C /usr/ports/${i} \
${envvars} clean install clean;) | grep '^===>'
done
fi
if ! ${USE_JAILS} ; then
if [ -n "$(mount | grep ${BASEDIR}/var/run)" ]; then
umount ${BASEDIR}/var/run
fi
fi
rm -f ${BASEDIR}/etc/resolv.conf
cd ${LOCALDIR}
|
asxbsd/ghostbsd-build
|
scripts/installports.sh
|
Shell
|
bsd-2-clause
| 1,265 |
#!/bin/bash -e
# author: Hugh French and Fabian Buske
# date: March 2014
echo ">>>>> Count tables from htseqcount output"
echo ">>>>> startdate "`date`
echo ">>>>> hostname "`hostname`
echo ">>>>> job_name "$JOB_NAME
echo ">>>>> job_id "$JOB_ID
echo ">>>>> $(basename $0) $*"
function usage {
echo -e "usage: $(basename $0) -k NGSANE -f INPUTFILE -o OUTDIR [OPTIONS]"
exit
}
if [ ! $# -gt 3 ]; then usage ; fi
#INPUTS
while [ "$1" != "" ]; do
case $1 in
-k | --toolkit ) shift; CONFIG=$1 ;; # location of the NGSANE repository
-f | --file ) shift; FILES=$1 ;; # input file
-o | --outdir ) shift; OUTDIR=$1 ;; # output dir
--recover-from ) shift; NGSANE_RECOVERFROM=$1 ;; # attempt to recover from log file
-h | --help ) usage ;;
* ) echo "don't understand "$1
esac
shift
done
#PROGRAMS
. $CONFIG
. ${NGSANE_BASE}/conf/header.sh
. $CONFIG
################################################################################
NGSANE_CHECKPOINT_INIT "programs"
# save way to load modules that itself loads other modules
hash module 2>/dev/null && for MODULE in $MODULE_HTSEQCOUNT; do module load $MODULE; done && module list
export PATH=$PATH_HTSEQCOUNT:$PATH
echo "PATH=$PATH"
echo -e "--NGSANE --\n" $(trigger.sh -v 2>&1)
echo -e "--R --\n "$(R --version | head -n 3)
[ -z "$(which R)" ] && echo "[WARN] no R detected"
NGSANE_CHECKPOINT_CHECK
################################################################################
NGSANE_CHECKPOINT_INIT "parameters"
OLDFS=$IFS
IFS=","
DATASETS=""
for f in $FILES; do
# get basename of f
n=${f/%$ASD.bam/}
FILE=${n/$INPUT_HTSEQCOUNT/$TASK_HTSEQCOUNT}
# get directory
d=$(dirname $f)
d=${d##*/} # add to dataset
if [ -n "$FILE" ]; then
DATASETS="${DATASETS[@]} ${FILE[@]}"
fi
done
IFS=" "
echo "[NOTE] Datasets: $DATASETS"
if [[ -n "$HTSEQCOUNT_USECUFFMERGEGTF" ]] && [[ -n "$MERGED_GTF_NAME" ]] && [[ -f $OUT/expression/$TASK_CUFFLINKS/$MERGED_GTF_NAME.gtf ]] ; then
GTF=$OUT/expression/$TASK_CUFFLINKS/$MERGED_GTF_NAME.gtf
echo "[NOTE] Using GTF from cuffmerge"
fi
annoF=${GTF##*/}
anno_version=${annoF%.*}
echo "[NOTE] GTF $anno_version"
# delete old files unless attempting to recover
#remove old files
if [ -z "$NGSANE_RECOVERFROM" ]; then
if [ -d $OUTDIR ]; then rm -rf $OUTDIR/*.csv; fi
fi
mkdir -p $OUTDIR
# unique temp folder that should be used to store temporary files
THISTMP=$TMP"/"$(whoami)"/"$(echo $OUTDIR | md5sum | cut -d' ' -f1)
mkdir -p $THISTMP
NGSANE_CHECKPOINT_CHECK
################################################################################
NGSANE_CHECKPOINT_INIT "recall files from tape"
if [ -n "$DMGET" ]; then
dmget -a ${DATASETS[@]}
dmget -a $OUTDIR/*
# TODO add additional resources that are required and may need recovery from tape
fi
NGSANE_CHECKPOINT_CHECK
################################################################################
NGSANE_CHECKPOINT_INIT "create tables of counts"
if [[ $(NGSANE_CHECKPOINT_TASK) == "start" ]]; then
for GTF in "$anno_version" ; do
for MODE in "union" "intersection-strict" "intersection-nonempty"; do
for ATTR in "gene_id" "transcript_id"; do
[ -f ${THISTMP}/files.txt ] && rm ${THISTMP}/files.txt
touch ${THISTMP}/files.txt
array=(${DATASETS[@]})
array=( "${array[@]/%//${GTF}.${MODE}.${ATTR}}" )
for THISFILE in "${array[@]}"; do
[ -f $THISFILE ] && echo $THISFILE "Found" >> ${THISTMP}/files.txt || echo "Not found" >> ${THISTMP}/files.txt
done
if grep -q "Not found" ${THISTMP}/files.txt;then
echo "[NOTE] ${GTF}.${MODE}.${ATTR} - at least one sample does not have gtf, skipping."
else
echo "[NOTE] ${GTF}.${MODE}.${ATTR} - found, making count table."
[ -f ${THISTMP}/joinedfile.txt ] && rm ${THISTMP}/joinedfile.txt
for i in "${array[@]}"; do
if [ ! -f ${THISTMP}/joinedfile.txt ]; then
cat ${i} > ${THISTMP}/joinedfile.txt
else
cut -f 2 ${i} | paste ${THISTMP}/joinedfile.txt - > ${THISTMP}/tmp.txt
mv ${THISTMP}/tmp.txt ${THISTMP}/joinedfile.txt
fi
done
echo "${array[@]##*${TASK_HTSEQCOUNT}}" | sed 's/ /,/g' | sed "s/\/${GTF}.${MODE}.${ATTR}//g" | sed 's/\///g' > ${THISTMP}/tmp.txt
awk '{print "gene," $0;}' ${THISTMP}/tmp.txt > ${THISTMP}/out.csv
if [[ ! -s ${THISTMP}/joinedfile.txt ]]; then
echo "[ERROR] non of the samples could be joined into a counts table"
exit 1
fi
cat ${THISTMP}/joinedfile.txt | sed 's/\t/,/g' >> ${THISTMP}/out.csv
mv ${THISTMP}/out.csv ${OUTDIR}/${anno_version}.${MODE}.${ATTR}.csv
echo "[NOTE] OUTFILE ${OUTDIR}/${anno_version}.${MODE}.${ATTR}.csv"
fi
done
done
done
# mark checkpoint
NGSANE_CHECKPOINT_CHECK
fi
################################################################################
NGSANE_CHECKPOINT_INIT "MDS plot"
if hash Rscript 2>&- ; then
for TABLE in $OUTDIR/*.csv; do
COLUMNS=$(cat $TABLE| head -n 1 | tr ',' '\n' | wc -l | cut -f 1)
if [[ $COLUMNS < 4 ]]; then
echo "[NOTE] At least 3 columns needed for MDS plot, skipping $TABLE"
else
cat > $TABLE.R <<EOF
library(limma)
pdf("${TABLE}.pdf", width=12, height=3)
dt <- read.csv("$TABLE", row.names = 1)
par(mfrow=c(1,4), mar=c(5,4,2,2))
for (top in c(100, 500, 1000, 5000)) {
plotMDS(dt,main=top, top=top)
}
dev.off()
EOF
Rscript --vanilla $TABLE.R
fi
done
fi
NGSANE_CHECKPOINT_CHECK
################################################################################
NGSANE_CHECKPOINT_INIT "cleanup"
[ -f ${THISTMP}/out.csv ] && rm ${THISTMP}/out.csv
[ -f ${THISTMP}/joinedfile.txt ] && rm ${THISTMP}/joinedfile.txt
[ -f ${THISTMP}/files.txt ] && rm ${THISTMP}/files.txt
NGSANE_CHECKPOINT_CHECK
################################################################################
echo ">>>>> Count tables from htseqcount output - FINISHED"
echo ">>>>> enddate "`date`
|
aehrc/ngsane
|
mods/htseqcount_countsTable.sh
|
Shell
|
bsd-3-clause
| 7,004 |
#!/bin/bash
FN="geneLenDataBase_1.22.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/experiment/src/contrib/geneLenDataBase_1.22.0.tar.gz"
"https://bioarchive.galaxyproject.org/geneLenDataBase_1.22.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-genelendatabase/bioconductor-genelendatabase_1.22.0_src_all.tar.gz"
)
MD5="0b403076ee853de74895940fe6190ac5"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-genelendatabase/post-link.sh
|
Shell
|
mit
| 1,327 |
#!/bin/bash
#
# Developed by Rafael Corrêa Gomes
# Contact [email protected]
#
ready(){
echo "
======================
Executed!
"
}
|
Altamir/simplesh
|
base/message.sh
|
Shell
|
mit
| 149 |
#!/bin/bash
# Add environment java vars
export JAVA_HOME=/usr/lib/jvm/java-8-oracle
export JRE_HOME=/usr/lib/jvm/java-8-oracle
# Download orient
wget -O $HOME/orientdb-community-$ORIENT_VERSION.tar.gz wget http://www.orientechnologies.com/download.php?file=orientdb-community-$ORIENT_VERSION.tar.gz
tar -xzf $HOME/orientdb-community-$ORIENT_VERSION.tar.gz -C $HOME/
#update config with correct user/password
sed -i '/<users>/a <user name="root" password="root" resources="*"><\/user>' $HOME/orientdb-community-$ORIENT_VERSION/config/orientdb-server-config.xml
# run and wait for it to init
$HOME/orientdb-community-$ORIENT_VERSION/bin/server.sh > /dev/null 2>&1 &
sleep 15
|
talib570/spider
|
CI/orient/install.sh
|
Shell
|
mit
| 677 |
# Install/unInstall package files in LAMMPS
if (test $1 = 1) then
cp atom_vec_dipole.cpp ..
cp pair_dipole_cut.cpp ..
cp atom_vec_dipole.h ..
cp pair_dipole_cut.h ..
elif (test $1 = 0) then
rm ../atom_vec_dipole.cpp
rm ../pair_dipole_cut.cpp
rm ../atom_vec_dipole.h
rm ../pair_dipole_cut.h
fi
|
nchong/icliggghts
|
src/DIPOLE/Install.sh
|
Shell
|
gpl-2.0
| 316 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_pallette.miff SUN
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_SUN_pallette.sh
|
Shell
|
gpl-2.0
| 363 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_truecolor10.pam PICT
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_PICT_truecolor10.sh
|
Shell
|
gpl-2.0
| 366 |
#!/bin/bash
dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "${dir}/helpers.bash"
# dir might have been overwritten by helpers.bash
dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
TEST_NAME=$(get_filename_without_extension $0)
LOGS_DIR="${dir}/cilium-files/${TEST_NAME}/logs"
redirect_debug_logs ${LOGS_DIR}
set -ex
NETPERF_IMAGE="tgraf/nettools"
TEST_TIME=30
SERVER_LABEL="id.server"
CLIENT_LABEL="id.client"
SERVER_NAME="server"
CLIENT_NAME="client"
HEADERS=${HEADERS_OFF:+"-P 0"}
# Only run these tests if BENCHMARK=1 and GCE=1 has been set
if [ -z ${BENCHMARK} ] || [ -z ${GCE} ]; then
exit 0
fi
function create_k8s_files {
sed -e "s+NETPERF_IMAGE+${NETPERF_IMAGE}+" \
-e "s+CLIENT_NAME+${CLIENT_NAME}+" \
-e "s+CLIENT_LABEL+${CLIENT_LABEL}+" \
./gce-deployment/client.json.sed > ./gce-deployment/client.json
sed -e "s+NETPERF_IMAGE+${NETPERF_IMAGE}+" \
-e "s+SERVER_NAME+${SERVER_NAME}+" \
-e "s+SERVER_LABEL+${SERVER_LABEL}+" \
./gce-deployment/server.json.sed > ./gce-deployment/server.json
}
create_k8s_files
function cleanup_k8s {
kubectl delete -f ./gce-deployment/client.json || true
kubectl delete -f ./gce-deployment/server.json || true
}
trap cleanup_k8s EXIT
kubectl create -f ./gce-deployment/client.json
kubectl create -f ./gce-deployment/server.json
wait_for_running_pod ${CLIENT_NAME}
wait_for_running_pod ${SERVER_NAME}
echo "Getting Client and Server IPv6, IPv4 and ID from containers"
server_pod=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "${SERVER_NAME}")
client_pod=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "${CLIENT_NAME}")
server_worker=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep "${SERVER_NAME}" | cut -d' ' -f2)
client_worker=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep "${CLIENT_NAME}" | cut -d' ' -f2)
server_cilium=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep cilium | grep "${server_worker}" | cut -d' ' -f1)
client_cilium=$(kubectl get pods --output=jsonpath='{range .items[*]}{.metadata.name} {.spec.nodeName}{"\n"}{end}' | grep cilium | grep "${client_worker}" | cut -d' ' -f1)
echo "..."
function cleanup_cilium {
cleanup_k8s
for line in ${server_cilium} ${client_cilium}; do
kubectl exec -i ${line} -- cilium config DropNotification=true TraceNotification=true Debug=true
done
}
trap cleanup_cilium EXIT
CLIENT_IP=$(kubectl exec ${client_pod} -- ip -6 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::')
CLIENT_IP4=$(kubectl exec ${client_pod} -- ip -4 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::')
CLIENT_ID=$(kubectl exec ${client_cilium} -- cilium endpoint list | grep $CLIENT_LABEL | awk '{ print $1}')
SERVER_IP=$(kubectl exec ${server_pod} -- ip -6 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::')
SERVER_IP4=$(kubectl exec ${server_pod} -- ip -4 a s | grep global | tr -s ' ' | cut -d' ' -f 3 | sed 's:/.*::')
SERVER_ID=$(kubectl exec ${server_cilium} -- cilium endpoint list | grep $SERVER_LABEL | awk '{ print $1}')
HOST_IP=$(echo $SERVER_IP | sed -e 's/:[0-9a-f]\{4\}$/:ffff/')
SERVER_DEV=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep interface-name | awk '{print $2}' | sed 's/"//g' | sed 's/,$//')
NODE_MAC=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep host-mac | awk '{print $2}' | sed 's/"//g' | sed 's/,$//')
LXC_MAC=$(kubectl exec ${server_cilium} -- cilium endpoint get $SERVER_ID | grep mac | awk '{print $2}' | sed 's/"//g' | sed 's/,$//')
echo "... Done"
cat <<EOF | kubectl exec -i "${server_cilium}" -- cilium -D policy import -
[{
"endpointSelector": {"matchLabels":{"k8s:${SERVER_LABEL}":""}},
"ingress": [{
"fromEndpoints": [
{"matchLabels":{"k8s:${CLIENT_LABEL}":""}}
]
}]
}]
EOF
function perf_test() {
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_STREAM -H $SERVER_IP || {
abort "Error: Unable to reach netperf TCP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_STREAM -H $SERVER_IP4 || {
abort "Error: Unable to reach netperf TCP endpoint"
}
fi
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP || {
abort "Error: Unable to reach netperf TCP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP4 || {
abort "Error: Unable to reach netperf TCP endpoint"
}
fi
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t UDP_STREAM -H $SERVER_IP -- -R1 || {
abort "Error: Unable to reach netperf UDP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t UDP_STREAM -H $SERVER_IP4 -- -R1 || {
abort "Error: Unable to reach netperf UDP endpoint"
}
fi
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP -- -m 256 || {
abort "Error: Unable to reach netperf TCP endpoint"
}
kubectl exec ${client_pod} -- super_netperf 8 -6 -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP || {
abort "Error: Unable to reach netperf TCP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- super_netperf 8 -4 -l $TEST_TIME -t TCP_SENDFILE -H $SERVER_IP4 || {
abort "Error: Unable to reach netperf TCP endpoint"
}
fi
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_RR -H $SERVER_IP || {
abort "Error: Unable to reach netperf TCP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_RR -H $SERVER_IP4 || {
abort "Error: Unable to reach netperf TCP endpoint"
}
fi
# FIXME
# kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t TCP_CRR -H $SERVER_IP || {
# abort "Error: Unable to reach netperf TCP endpoint"
# }
#
# if [ $SERVER_IP4 ]; then
# kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t TCP_CRR -H $SERVER_IP4 || {
# abort "Error: Unable to reach netperf TCP endpoint"
# }
# fi
kubectl exec ${client_pod} -- netperf -6 $HEADERS -l $TEST_TIME -t UDP_RR -H $SERVER_IP -- -R1 || {
abort "Error: Unable to reach netperf UDP endpoint"
}
if [ $SERVER_IP4 ]; then
kubectl exec ${client_pod} -- netperf -4 $HEADERS -l $TEST_TIME -t UDP_RR -H $SERVER_IP4 -- -R1 || {
abort "Error: Unable to reach netperf UDP endpoint"
}
fi
}
kubectl exec ${server_cilium} -- cilium config DropNotification=false TraceNotification=false Debug=false
kubectl exec ${client_cilium} -- cilium config DropNotification=false TraceNotification=false Debug=false
kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID DropNotification=false TraceNotification=false Debug=false
kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID DropNotification=false TraceNotification=false Debug=false
perf_test
kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID ConntrackAccounting=false
kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID ConntrackAccounting=false
perf_test
# FIXME
echo "Conntrack=false test won't be run!"
#kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID Conntrack=false
#kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID Conntrack=false
#perf_test
kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID IngressPolicy=false
kubectl exec ${server_cilium} -- cilium endpoint config $SERVER_ID EgressPolicy=false
kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID IngressPolicy=false
kubectl exec ${client_cilium} -- cilium endpoint config $CLIENT_ID EgressPolicy=false
perf_test
kubectl exec ${server_cilium} -- cilium policy delete "${SERVER_LABEL}"
|
eloycoto/cilium
|
tests/09-perf-gce.sh
|
Shell
|
apache-2.0
| 8,116 |
hping3 -c 100 -0 10.0.0.2 -d 12000 -E test.txt
|
tashaband/RYU295
|
hping_9.sh
|
Shell
|
apache-2.0
| 47 |
#!/bin/bash
VERSION=$(nvcc --version | grep release | grep -oEi "release ([0-9]+)\.([0-9])"| sed "s/release //")
if [ "$VERSION" == "9.0" ]; then
DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb"
URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb"
else
DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb"
URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb"
fi
DIR="/nccl2"
mkdir -p $DIR
# we cached the nccl2 deb package in BOS, so we can download it with wget
# install nccl2: http://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html#down
wget -O $DIR/$DEB $URL
cd $DIR && ar x $DEB && tar xf data.tar.xz
DEBS=$(find ./var/ -name "*.deb")
for sub_deb in $DEBS; do
echo $sub_deb
ar x $sub_deb && tar xf data.tar.xz
done
mv -f usr/include/nccl.h /usr/local/include/
mv -f usr/lib/libnccl* /usr/local/lib/
rm -rf $DIR
|
pkuyym/Paddle
|
tools/manylinux1/build_scripts/install_nccl2.sh
|
Shell
|
apache-2.0
| 935 |
#!/bin/bash
# for use this script run
# chmod a+x fakeswap.sh
# and
# sh ./fakeswap.sh 4086
# for adding 4086MB swapsize
SWAP="${1:-512}"
NEW="$[SWAP*1024]"; TEMP="${NEW//?/ }"; OLD="${TEMP:1}0"
umount /proc/meminfo 2> /dev/null
sed "/^Swap\(Total\|Free\):/s,$OLD,$NEW," /proc/meminfo > /etc/fake_meminfo
mount --bind /etc/fake_meminfo /proc/meminfo
free -m
|
tkyryliuk/cibox
|
files/fakeswap/fakeswap.sh
|
Shell
|
bsd-3-clause
| 364 |
#!/bin/bash
FN="LRBase.Mmu.eg.db_2.0.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/LRBase.Mmu.eg.db_2.0.0.tar.gz"
"https://bioarchive.galaxyproject.org/LRBase.Mmu.eg.db_2.0.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-lrbase.mmu.eg.db/bioconductor-lrbase.mmu.eg.db_2.0.0_src_all.tar.gz"
)
MD5="91845d7309638f8800f0e21e72718dcd"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-lrbase.mmu.eg.db/post-link.sh
|
Shell
|
mit
| 1,328 |
#!/bin/sh
INDENT_PROGRAM="uncrustify"
DIR="tools"
CFG="cheese-indent.cfg"
LANG="VALA"
[[ $# -lt 1 ]] && { echo "$0 [files]" 1>&2; exit 1; }
if ! which $INDENT_PROGRAM > /dev/null 2>&1; then
echo "$INDENT_PROGRAM was not found on your computer, please install it"
exit 1
fi
$INDENT_PROGRAM -l $LANG -c $DIR/$CFG --no-backup --replace $@
|
Patriciasc/Cheese-GTK-3-Port
|
tools/indent.sh
|
Shell
|
gpl-2.0
| 344 |
#!/bin/sh
#
# Copyright (c) 2005 Junio C Hamano
#
test_description='Pathspec restrictions
Prepare:
file0
path1/file1
'
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh ;# test-lib chdir's into trash
test_expect_success \
setup \
'echo frotz >file0 &&
mkdir path1 &&
echo rezrov >path1/file1 &&
before0=$(git hash-object file0) &&
before1=$(git hash-object path1/file1) &&
git update-index --add file0 path1/file1 &&
tree=$(git write-tree) &&
echo "$tree" &&
echo nitfol >file0 &&
echo yomin >path1/file1 &&
after0=$(git hash-object file0) &&
after1=$(git hash-object path1/file1) &&
git update-index file0 path1/file1'
cat >expected <<\EOF
EOF
test_expect_success \
'limit to path should show nothing' \
'git diff-index --cached $tree -- path >current &&
compare_diff_raw current expected'
cat >expected <<EOF
:100644 100644 $before1 $after1 M path1/file1
EOF
test_expect_success \
'limit to path1 should show path1/file1' \
'git diff-index --cached $tree -- path1 >current &&
compare_diff_raw current expected'
cat >expected <<EOF
:100644 100644 $before1 $after1 M path1/file1
EOF
test_expect_success \
'limit to path1/ should show path1/file1' \
'git diff-index --cached $tree -- path1/ >current &&
compare_diff_raw current expected'
cat >expected <<EOF
:100644 100644 $before1 $after1 M path1/file1
EOF
test_expect_success \
'"*file1" should show path1/file1' \
'git diff-index --cached $tree -- "*file1" >current &&
compare_diff_raw current expected'
cat >expected <<EOF
:100644 100644 $before0 $after0 M file0
EOF
test_expect_success \
'limit to file0 should show file0' \
'git diff-index --cached $tree -- file0 >current &&
compare_diff_raw current expected'
cat >expected <<\EOF
EOF
test_expect_success \
'limit to file0/ should emit nothing.' \
'git diff-index --cached $tree -- file0/ >current &&
compare_diff_raw current expected'
test_expect_success 'diff-tree pathspec' '
tree2=$(git write-tree) &&
echo "$tree2" &&
git diff-tree -r --name-only $tree $tree2 -- pa path1/a >current &&
test_must_be_empty current
'
test_expect_success 'diff-tree with wildcard shows dir also matches' '
git diff-tree --name-only $EMPTY_TREE $tree -- "f*" >result &&
echo file0 >expected &&
test_cmp expected result
'
test_expect_success 'diff-tree -r with wildcard' '
git diff-tree -r --name-only $EMPTY_TREE $tree -- "*file1" >result &&
echo path1/file1 >expected &&
test_cmp expected result
'
test_expect_success 'diff-tree with wildcard shows dir also matches' '
git diff-tree --name-only $tree $tree2 -- "path1/f*" >result &&
echo path1 >expected &&
test_cmp expected result
'
test_expect_success 'diff-tree -r with wildcard from beginning' '
git diff-tree -r --name-only $tree $tree2 -- "path1/*file1" >result &&
echo path1/file1 >expected &&
test_cmp expected result
'
test_expect_success 'diff-tree -r with wildcard' '
git diff-tree -r --name-only $tree $tree2 -- "path1/f*" >result &&
echo path1/file1 >expected &&
test_cmp expected result
'
test_expect_success 'setup submodules' '
test_tick &&
git init submod &&
( cd submod && test_commit first ) &&
git add submod &&
git commit -m first &&
( cd submod && test_commit second ) &&
git add submod &&
git commit -m second
'
test_expect_success 'diff-tree ignores trailing slash on submodule path' '
git diff --name-only HEAD^ HEAD submod >expect &&
git diff --name-only HEAD^ HEAD submod/ >actual &&
test_cmp expect actual &&
git diff --name-only HEAD^ HEAD -- submod/whatever >actual &&
test_must_be_empty actual
'
test_expect_success 'diff multiple wildcard pathspecs' '
mkdir path2 &&
echo rezrov >path2/file1 &&
git update-index --add path2/file1 &&
tree3=$(git write-tree) &&
git diff --name-only $tree $tree3 -- "path2*1" "path1*1" >actual &&
cat <<-\EOF >expect &&
path1/file1
path2/file1
EOF
test_cmp expect actual
'
test_expect_success 'diff-cache ignores trailing slash on submodule path' '
git diff --name-only HEAD^ submod >expect &&
git diff --name-only HEAD^ submod/ >actual &&
test_cmp expect actual
'
test_done
|
tacker66/git
|
t/t4010-diff-pathspec.sh
|
Shell
|
gpl-2.0
| 4,204 |
#!@PERL_PATH@
# Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301, USA
# mysqldumpslow - parse and summarize the MySQL slow query log
# Original version by Tim Bunce, sometime in 2000.
# Further changes by Tim Bunce, 8th March 2001.
# Handling of strings with \ and double '' by Monty 11 Aug 2001.
use strict;
use Getopt::Long;
# t=time, l=lock time, r=rows
# at, al, and ar are the corresponding averages
my %opt = (
s => 'at',
h => '*',
);
GetOptions(\%opt,
'v|verbose+',# verbose
'help+', # write usage info
'd|debug+', # debug
's=s', # what to sort by (al, at, ar, c, t, l, r)
'r!', # reverse the sort order (largest last instead of first)
't=i', # just show the top n queries
'a!', # don't abstract all numbers to N and strings to 'S'
'n=i', # abstract numbers with at least n digits within names
'g=s', # grep: only consider stmts that include this string
'h=s', # hostname of db server for *-slow.log filename (can be wildcard)
'i=s', # name of server instance (if using mysql.server startup script)
'l!', # don't subtract lock time from total time
) or usage("bad option");
$opt{'help'} and usage();
unless (@ARGV) {
my $defaults = `my_print_defaults mysqld`;
my $basedir = ($defaults =~ m/--basedir=(.*)/)[0]
or die "Can't determine basedir from 'my_print_defaults mysqld' output: $defaults";
warn "basedir=$basedir\n" if $opt{v};
my $datadir = ($defaults =~ m/--datadir=(.*)/)[0];
my $slowlog = ($defaults =~ m/--slow-query-log-file=(.*)/)[0];
if (!$datadir or $opt{i}) {
# determine the datadir from the instances section of /etc/my.cnf, if any
my $instances = `my_print_defaults instances`;
die "Can't determine datadir from 'my_print_defaults mysqld' output: $defaults"
unless $instances;
my @instances = ($instances =~ m/^--(\w+)-/mg);
die "No -i 'instance_name' specified to select among known instances: @instances.\n"
unless $opt{i};
die "Instance '$opt{i}' is unknown (known instances: @instances)\n"
unless grep { $_ eq $opt{i} } @instances;
$datadir = ($instances =~ m/--$opt{i}-datadir=(.*)/)[0]
or die "Can't determine --$opt{i}-datadir from 'my_print_defaults instances' output: $instances";
warn "datadir=$datadir\n" if $opt{v};
}
if ( -f $slowlog ) {
@ARGV = ($slowlog);
die "Can't find '$slowlog'\n" unless @ARGV;
} else {
@ARGV = <$datadir/$opt{h}-slow.log>;
die "Can't find '$datadir/$opt{h}-slow.log'\n" unless @ARGV;
}
}
warn "\nReading mysql slow query log from @ARGV\n";
my @pending;
my %stmt;
$/ = ";\n#"; # read entire statements using paragraph mode
while ( defined($_ = shift @pending) or defined($_ = <>) ) {
warn "[[$_]]\n" if $opt{d}; # show raw paragraph being read
my @chunks = split /^\/.*Version.*started with[\000-\377]*?Time.*Id.*Command.*Argument.*\n/m;
if (@chunks > 1) {
unshift @pending, map { length($_) ? $_ : () } @chunks;
warn "<<".join(">>\n<<",@chunks).">>" if $opt{d};
next;
}
s/^#? Time: \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(Z|[+-]\d{2}:\d{2}).*\n//;
my ($user,$host,$dummy,$thread_id) = s/^#? User\@Host:\s+(\S+)\s+\@\s+(\S+)\s+\S+(\s+Id:\s+(\d+))?.*\n// ? ($1,$2,$3,$4) : ('','','','','');
s/^# Query_time: ([0-9.]+)\s+Lock_time: ([0-9.]+)\s+Rows_sent: ([0-9.]+).*\n//;
my ($t, $l, $r) = ($1, $2, $3);
$t -= $l unless $opt{l};
# remove fluff that mysqld writes to log when it (re)starts:
s!^/.*Version.*started with:.*\n!!mg;
s!^Tcp port: \d+ Unix socket: \S+\n!!mg;
s!^Time.*Id.*Command.*Argument.*\n!!mg;
s/^use \w+;\n//; # not consistently added
s/^SET timestamp=\d+;\n//;
s/^[ ]*\n//mg; # delete blank lines
s/^[ ]*/ /mg; # normalize leading whitespace
s/\s*;\s*(#\s*)?$//; # remove trailing semicolon(+newline-hash)
next if $opt{g} and !m/$opt{g}/io;
unless ($opt{a}) {
s/\b\d+\b/N/g;
s/\b0x[0-9A-Fa-f]+\b/N/g;
s/''/'S'/g;
s/""/"S"/g;
s/(\\')//g;
s/(\\")//g;
s/'[^']+'/'S'/g;
s/"[^"]+"/"S"/g;
# -n=8: turn log_20001231 into log_NNNNNNNN
s/([a-z_]+)(\d{$opt{n},})/$1.('N' x length($2))/ieg if $opt{n};
# abbreviate massive "in (...)" statements and similar
s!(([NS],){100,})!sprintf("$2,{repeated %d times}",length($1)/2)!eg;
}
my $s = $stmt{$_} ||= { users=>{}, hosts=>{} };
$s->{c} += 1;
$s->{t} += $t;
$s->{l} += $l;
$s->{r} += $r;
$s->{users}->{$user}++ if $user;
$s->{hosts}->{$host}++ if $host;
warn "{{$_}}\n\n" if $opt{d}; # show processed statement string
}
foreach (keys %stmt) {
my $v = $stmt{$_} || die;
my ($c, $t, $l, $r) = @{ $v }{qw(c t l r)};
$v->{at} = $t / $c;
$v->{al} = $l / $c;
$v->{ar} = $r / $c;
}
my @sorted = sort { $stmt{$b}->{$opt{s}} <=> $stmt{$a}->{$opt{s}} } keys %stmt;
@sorted = @sorted[0 .. $opt{t}-1] if $opt{t};
@sorted = reverse @sorted if $opt{r};
foreach (@sorted) {
my $v = $stmt{$_} || die;
my ($c, $t,$at, $l,$al, $r,$ar) = @{ $v }{qw(c t at l al r ar)};
my @users = keys %{$v->{users}};
my $user = (@users==1) ? $users[0] : sprintf "%dusers",scalar @users;
my @hosts = keys %{$v->{hosts}};
my $host = (@hosts==1) ? $hosts[0] : sprintf "%dhosts",scalar @hosts;
printf "Count: %d Time=%.2fs (%ds) Lock=%.2fs (%ds) Rows=%.1f (%d), $user\@$host\n%s\n\n",
$c, $at,$t, $al,$l, $ar,$r, $_;
}
sub usage {
my $str= shift;
my $text= <<HERE;
Usage: mysqldumpslow [ OPTS... ] [ LOGS... ]
Parse and summarize the MySQL slow query log. Options are
--verbose verbose
--debug debug
--help write this text to standard output
-v verbose
-d debug
-s ORDER what to sort by (al, at, ar, c, l, r, t), 'at' is default
al: average lock time
ar: average rows sent
at: average query time
c: count
l: lock time
r: rows sent
t: query time
-r reverse the sort order (largest last instead of first)
-t NUM just show the top n queries
-a don't abstract all numbers to N and strings to 'S'
-n NUM abstract numbers with at least n digits within names
-g PATTERN grep: only consider stmts that include this string
-h HOSTNAME hostname of db server for *-slow.log filename (can be wildcard),
default is '*', i.e. match all
-i NAME name of server instance (if using mysql.server startup script)
-l don't subtract lock time from total time
HERE
if ($str) {
print STDERR "ERROR: $str\n\n";
print STDERR $text;
exit 1;
} else {
print $text;
exit 0;
}
}
|
sensssz/mysql-server
|
scripts/mysqldumpslow.sh
|
Shell
|
gpl-2.0
| 7,462 |
#!/bin/bash
BASEDIR=$(dirname $0)
WARS=`find . -name '*.war' -print`
for i in $WARS
do
asadmin --passwordfile $BASEDIR/gf-password.txt deploy --force $i
done
|
Daniel-Dos/ozark
|
bin/deploy-tests-gf.sh
|
Shell
|
apache-2.0
| 165 |
#!/bin/bash
#this script does the following:
#- copies the bgpd.conf file to the Quagga directory of the lxc container
#- turns off the ospf daemon of quagga
#- turns on the bgp daemon of quagga
#- restarts the quagga service
# this file should be run after the necessary bgp configuration has been done
if [ "$EUID" != "0" ]; then
echo "You must be root to run this script. Sorry, dude!"
exit 1
fi
QUAGGA_DIR="/etc/quagga"
CUR_DIR=`pwd`
#copying the bgpd.conf file to the quagga directory
cp -f $CUR_DIR/bgpd.conf $QUAGGA_DIR
#opening the file 'daemons'
#restarting the quagga service
|
ralph-mikera/routeflow5
|
rftest/confd_socket/bgp.sh
|
Shell
|
apache-2.0
| 605 |
# $NetBSD: t_change.sh,v 1.4 2013/02/19 21:08:25 joerg Exp $
#
# Copyright (c) 2011 The NetBSD Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
netserver=\
"rump_server -lrumpnet -lrumpnet_net -lrumpnet_netinet"
export RUMP_SERVER=unix://commsock
atf_test_case reject2blackhole cleanup
reject2blackhole_head()
{
atf_set "descr" "Change a reject route to blackhole"
atf_set "require.progs" "rump_server"
}
reject2blackhole_body()
{
atf_check -s exit:0 ${netserver} ${RUMP_SERVER}
atf_check -s exit:0 -o ignore \
rump.route add 207.46.197.32 127.0.0.1 -reject
atf_check -s exit:0 -o match:UGHR -x \
"rump.route -n show -inet | grep ^207.46"
atf_check -s exit:0 -o ignore \
rump.route change 207.46.197.32 127.0.0.1 -blackhole
atf_check -s exit:0 -o match:' UGHBS ' -e ignore -x \
"rump.netstat -rn -f inet | grep ^207.46| grep ^207.46"
}
reject2blackhole_cleanup()
{
env RUMP_SERVER=unix://commsock rump.halt
}
atf_init_test_cases()
{
atf_add_test_case reject2blackhole
}
|
veritas-shine/minix3-rpi
|
tests/net/route/t_change.sh
|
Shell
|
apache-2.0
| 2,289 |
#!/bin/bash
assert_ok "$FLOW" get-def --strip-root --json ignore/test.js 3 2
assert_ok "$FLOW" type-at-pos --strip-root --json ignore/test.js 3 2
assert_ok "$FLOW" get-def --strip-root --json no_flow/test.js 3 2
assert_ok "$FLOW" type-at-pos --strip-root --json no_flow/test.js 3 2
|
AgentME/flow
|
tests/contents/test.sh
|
Shell
|
bsd-3-clause
| 282 |
#!/bin/sh
## (GEN004540: CAT II) The SA will ensure the help sendmail command is
## disabled.
echo '==================================================='
echo ' Patching GEN004540: Disable sendmail help.'
echo '==================================================='
if [ -e /etc/mail/helpfile ]; then
mv /etc/mail/helpfile /etc/mail/helpfile.bak
echo "" > /etc/mail/helpfile
sed -i '/HelpFile/s/^/#/' /etc/mail/sendmail.cf
fi
|
jpschaaf/hardening-script-el6
|
scripts/gen004540.sh
|
Shell
|
gpl-2.0
| 427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.