code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
. /opt/kolla/kolla-common.sh
check_required_vars ADMIN_TENANT_NAME \
GLANCE_DB_NAME \
GLANCE_DB_PASSWORD \
GLANCE_DB_USER \
GLANCE_KEYSTONE_PASSWORD \
GLANCE_KEYSTONE_USER \
KEYSTONE_PUBLIC_SERVICE_HOST \
MARIADB_SERVICE_HOST
dump_vars
cat > /openrc <<EOF
export OS_AUTH_URL="http://${KEYSTONE_PUBLIC_SERVICE_HOST}:5000/v2.0"
export OS_USERNAME="${GLANCE_KEYSTONE_USER}"
export OS_PASSWORD="${GLANCE_KEYSTONE_PASSWORD}"
export OS_TENANT_NAME="${ADMIN_TENANT_NAME}"
EOF
for cfg in /etc/glance/glance-api.conf /etc/glance/glance-registry.conf; do
crudini --set $cfg \
DEFAULT \
log_file \
""
for option in auth_protocol auth_host auth_port; do
crudini --del $cfg \
keystone_authtoken \
$option
done
crudini --set $cfg \
keystone_authtoken \
auth_uri \
"http://${KEYSTONE_PUBLIC_SERVICE_HOST}:5000/"
crudini --set $cfg \
keystone_authtoken \
admin_tenant_name \
"${ADMIN_TENANT_NAME}"
crudini --set $cfg \
keystone_authtoken \
admin_user \
"${GLANCE_KEYSTONE_USER}"
crudini --set $cfg \
keystone_authtoken \
admin_password \
"${GLANCE_KEYSTONE_PASSWORD}"
crudini --set $cfg \
paste_deploy \
flavor \
keystone
crudini --set $cfg \
database \
connection \
"mysql://${GLANCE_DB_USER}:${GLANCE_DB_PASSWORD}@${MARIADB_SERVICE_HOST}/${GLANCE_DB_NAME}"
done
|
invenfantasy/kolla
|
docker/common/glance/glance-base/config-glance.sh
|
Shell
|
apache-2.0
| 1,652 |
#!/bin/bash
# Copyright 2017 The Nuclio Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# exit on failure
set -o errexit
# show command before execute
set -o xtrace
# remove python cached
find ./py \
-name ".pytest_cache" -type d \
-o -name "*.pyc" \
-o -name "__pycache__" -type d \
-print0 \
| xargs --null rm -rf
# run tests
python -m pytest -v .
|
nuclio/nuclio
|
pkg/processor/runtime/python/test/test.sh
|
Shell
|
apache-2.0
| 882 |
#! /bin/bash
set -o errexit
ROOT=$PWD
rm -rf temp
mkdir temp
if [ ! -d lib/bundles ]; then
mkdir lib/bundles
fi
cd temp
git clone https://github.com/Microsoft/vscode
# cp -r /Users/denofevil/WebstormProjects/vscode .
cd vscode/extensions
for f in *; do
if [ -d "$f/syntaxes" ]; then
echo "Adding $f"
cp -r "$f" "$ROOT/lib/bundles"
rm -rf "$ROOT/lib/bundles/$f/test"
rm -rf "$ROOT/lib/bundles/$f/build"
rm -rf "$ROOT/lib/bundles/$f/resources"
fi
done
rm -rf $ROOT/temp
|
leafclick/intellij-community
|
plugins/textmate/loadVSCBundles.sh
|
Shell
|
apache-2.0
| 500 |
#!/bin/sh
##export http_proxy="http://user:[email protected]:8080"
wget \
--recursive \
--no-clobber \
--limit-rate=20k \
--wait=45 \
--page-requisites \
--html-extension \
--convert-links \
--restrict-file-names=windows \
--domains website.org \
--no-parent \
www.website.org/tutorials/html/
|
jeffedlund/my_scripts
|
wget_impl_2.sh
|
Shell
|
apache-2.0
| 351 |
#!/bin/bash
docker run \
-it \
-v $(pwd):/home/pshmem \
quay.io/pypa/manylinux2014_x86_64:latest \
/bin/bash
# export PATH=/opt/python/cp38-cp38/bin:${PATH}
# python3 -m pip install --upgrade pip
# yum -y update
# yum -y install mpich-3.2-devel.x86_64 mpich-3.2-autoload.x86_64
# source /etc/profile.d/modules.sh
# source /etc/profile.d/mpich-3.2-x86_64.sh
|
tskisner/mpi_shmem
|
test_scripts/test_cibuild.sh
|
Shell
|
bsd-2-clause
| 360 |
#!/bin/bash
#
# vim: set ts=4 sw=4 et:
#
# Passed arguments:
# $1 - pkgname to build [REQUIRED]
# $2 - cross target [OPTIONAL]
if [ $# -lt 1 -o $# -gt 2 ]; then
echo "$(basename $0): invalid number of arguments: pkgname [cross-target]"
exit 1
fi
PKGNAME="$1"
XBPS_CROSS_BUILD="$2"
for f in $XBPS_SHUTILSDIR/*.sh; do
. $f
done
setup_pkg "$PKGNAME" $XBPS_CROSS_BUILD
for f in $XBPS_COMMONDIR/environment/build/*.sh; do
source_file "$f"
done
if [ -z $pkgname -o -z $version ]; then
msg_error "$1: pkgname/version not set in pkg template!\n"
exit 1
fi
XBPS_BUILD_DONE="$wrksrc/.xbps_${XBPS_CROSS_BUILD}_build_done"
XBPS_PRE_BUILD_DONE="$wrksrc/.xbps_${XBPS_CROSS_BUILD}_pre_build_done"
XBPS_POST_BUILD_DONE="$wrksrc/.xbps_${XBPS_CROSS_BUILD}_post_build_done"
if [ -f "$XBPS_BUILD_DONE" ]; then
exit 0
fi
cd $wrksrc || msg_error "$pkgver: cannot access wrksrc directory [$wrksrc]\n"
if [ -n "$build_wrksrc" ]; then
cd $build_wrksrc || \
msg_error "$pkgver: cannot access build_wrksrc directory [$build_wrksrc]\n"
fi
run_pkg_hooks pre-build
# Run pre_build()
if [ ! -f $XBPS_PRE_BUILD_DONE ]; then
if declare -f pre_build >/dev/null; then
run_func pre_build
touch -f $XBPS_PRE_BUILD_DONE
fi
fi
# Run do_build()
if declare -f do_build >/dev/null; then
run_func do_build
else
if [ -n "$build_style" ]; then
if [ ! -r $XBPS_BUILDSTYLEDIR/${build_style}.sh ]; then
msg_error "$pkgver: cannot find build helper $XBPS_BUILDSTYLEDIR/${build_style}.sh!\n"
fi
. $XBPS_BUILDSTYLEDIR/${build_style}.sh
if declare -f do_build >/dev/null; then
run_func do_build
fi
fi
fi
touch -f $XBPS_BUILD_DONE
# Run post_build()
if [ ! -f $XBPS_POST_BUILD_DONE ]; then
if declare -f post_build >/dev/null; then
run_func post_build
touch -f $XBPS_POST_BUILD_DONE
fi
fi
run_pkg_hooks post-build
exit 0
|
kokakolako/void-packages
|
common/xbps-src/libexec/xbps-src-dobuild.sh
|
Shell
|
bsd-2-clause
| 1,948 |
#!/bin/bash
set -eux
set -o pipefail
if [ ! -f /.dockerenv ]; then
echo "This should be run within the container";
exit 0;
fi
source ~/openrc.sh;
# Delete all the things. Who needs things anyway
openstack port delete $(openstack port list -c ID -f value --device-owner=network:dhcp) || true;
openstack security group delete $(openstack security group list -c Name -f value) || true;
openstack stack delete -y --wait $(openstack stack list -c ID -f value) || true;
openstack server delete $(openstack server list -c Name -f value) || true;
openstack keypair delete $(openstack keypair list -c Name -f value) || true;
openstack network delete $(openstack network list -c Name -f value) || true;
|
d0ugal/tripleo-util
|
clean-all.sh
|
Shell
|
bsd-2-clause
| 705 |
#! /bin/bash
prefix=$1
# source info
mac_src="00:60:dd:45:39:c7"
ip_src=192.168.2.1
port_src=5000
# dstination info
mac_dst="00:60:dd:45:39:5a"
ip_dst=192.168.2.2
port_dst=5000
# packet info
pkt_cnt=150000
idle=13738
pkt_len=1518
#SONIC_SERVER="sonic2.cs.cornell.edu" # FIXME
SONIC_PATH="$HOME/afrl/sonic/driver"
echo
echo "Demo 2: Covert Channel (epsilon=256)"
epsilon=256
COVERT_FILE=$SONIC_PATH/bin/sonic.py # FIXME
COVERT_GENERATED=$SONIC_PATH/bin/tmp.info
$SONIC_PATH/bin/covert.py -i $COVERT_FILE -o $COVERT_GENERATED -e $epsilon -g $idle -l $pkt_len -n $pkt_cnt
$SONIC_PATH/bin/run_sonic.sh -m pkt_rpt,pkt_cap -d 5 -w 2 --mac_src $mac_src --ip_src $ip_src --port_src $port_src --mac_dst $mac_dst --ip_dst $ip_dst --port_dst $port_dst -c $pkt_cnt -l $pkt_len -i $idle -e $epsilon -g 3 -p $SONIC_PATH/result/${prefix}_covert_${epsilon} -r $SONIC_PATH/bin/tmp.info
$SONIC_PATH/bin/ber_separate2.py -i $SONIC_PATH/result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}.tmp -d $idle -m $COVERT_FILE -n $pkt_cnt -o $SONIC_PATH/result/decoded
#scp -i ~/.ssh/id_rsa2 $SONIC_SERVER:$SONIC_PATH/result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}* result/.
#gnuplot -e "input_file1=\"result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}.tmp.one.ipd\"; input_file2=\"result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}.tmp.zero.ipd\"; output_file=\"result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}_pdf.eps\";" covert1.gnuplot
#open result/${prefix}_covert_${epsilon}_${pkt_len}_${idle}_pdf.eps
|
hanw/sonic
|
demo/demo3_local.sh
|
Shell
|
bsd-3-clause
| 1,522 |
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Application" -w -s
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Accueil" -w -s
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Core/" -w -s
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Commun/" -w -s
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Bnet/" -w -s
php /var/www/Generator/bin/classmap_generator.php -l "/var/www/mystra-pve/module/Backend/" -w -s
|
antarus/mystra-pve
|
bin/generate_Classmap_Module.sh
|
Shell
|
bsd-3-clause
| 576 |
#!/bin/bash
set -e
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $SCRIPT_DIR/../../functional-tests/common.sh
${HADOOP_HOME}/bin/hdfs dfs -rm -f -r /input
${HADOOP_HOME}/bin/hdfs dfs -rm -f -r /converted
${HADOOP_HOME}/bin/hdfs dfs -mkdir /input
${HADOOP_HOME}/bin/hdfs dfs -put $SPARK_DATASETS/imagenet/4.zipped/* /input/
spark-submit --class ImagenetMultiplier --jars ${SWAT_JARS} \
--master spark://localhost:7077 \
--conf "spark.driver.maxResultSize=4g" \
--conf "spark.storage.memoryFraction=0.3" \
${SWAT_HOME}/dataset-transformations/imagenet/target/imagenet-0.0.0.jar \
hdfs://$(hostname):54310/input hdfs://$(hostname):54310/converted 6
rm -rf $SPARK_DATASETS/imagenet/5.multiply
${HADOOP_HOME}/bin/hdfs dfs -get /converted $SPARK_DATASETS/imagenet/5.multiply
|
agrippa/spark-swat
|
dataset-transformations/imagenet/5.multiply.sh
|
Shell
|
bsd-3-clause
| 837 |
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir="$testdir/../../.."
source $rootdir/scripts/autotest_common.sh
$testdir/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut
$testdir/unit/nvme_c/nvme_ut
$testdir/unit/nvme_qpair_c/nvme_qpair_ut
$testdir/unit/nvme_ctrlr_c/nvme_ctrlr_ut
$testdir/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut
$testdir/aer/aer
process_core
$rootdir/examples/nvme/identify/identify
process_core
$rootdir/examples/nvme/perf/perf -q 128 -w read -s 4096 -t 5
process_core
|
changhuilin/spdk
|
test/lib/nvme/nvme.sh
|
Shell
|
bsd-3-clause
| 494 |
@INCLUDE_COMMON@
echo
echo ELEKTRA CHECK EXTERNAL HIGHLEVEL
echo
if command -v pkg-config; then
if ! pkg-config elektra; then
echo "Elektra not installed, will skip"
exit 0
fi
else
echo "pkg-config not installed, will skip"
exit 0
fi
if ! kdb plugin-list | grep -xq ni; then
echo "ni plugin not found, will skip"
exit 0
fi
check_version
EXTERNAL_FOLDER="@CMAKE_SOURCE_DIR@/examples/highlevel"
do_tests() {
KEY=/sw/example/highlevel/#0/current
UKEY="user$KEY"
SPECKEY="spec$KEY"
"$KDB" umount "$SPECKEY"
"$KDB" umount "$KEY"
"$KDB" rm -r "$UKEY"
"$KDB" rm -r "$SPECKEY"
"$KDB" mount "highlevel_spec.ini" "$SPECKEY" ni
"$KDB" import "$SPECKEY" ni < "$EXTERNAL_FOLDER/spec.ini"
"$KDB" spec-mount "$KEY"
./application
succeed_if "application could not read default config (spec)"
MYSTRING="Hello World"
MYINT="29"
MYDOUBLE="4.4242"
MYFLOATSIZE="5"
MYFLOAT0="3.14"
MYFLOAT1="15"
MYFLOAT2="9265.359"
MYFLOAT3="2.718282"
MYFLOAT4="1.4142"
"$KDB" set "$UKEY/mystring" "$MYSTRING"
"$KDB" set "$UKEY/myint" "$MYINT"
"$KDB" set "$UKEY/mydouble" "$MYDOUBLE"
"$KDB" set "$UKEY/myfloatarray/#0" "$MYFLOAT0"
"$KDB" set "$UKEY/myfloatarray/#1" "$MYFLOAT1"
"$KDB" set "$UKEY/myfloatarray/#2" "$MYFLOAT2"
"$KDB" set "$UKEY/myfloatarray/#3" "$MYFLOAT3"
"$KDB" set "$UKEY/myfloatarray/#4" "$MYFLOAT4"
"$KDB" meta-set "$UKEY/myfloatarray" array "#4"
./application
succeed_if "application could not read changed config"
"$KDB" set "$UKEY/print" "1"
./application
succeed_if "application could not read changed config"
./application | grep "mystring: $MYSTRING"
succeed_if "application did not print mystring"
./application | grep "myint: $MYINT"
succeed_if "application did not print myint"
./application | grep "mydouble: $MYDOUBLE"
succeed_if "application did not print mydouble"
./application | grep "sizeof(myfloatarray): $MYFLOATSIZE"
succeed_if "application did not print size of myfloatarray"
./application | grep "myfloatarray\\[0\\]: $MYFLOAT0"
succeed_if "application did not print myfloatarray[0]"
./application | grep "myfloatarray\\[1\\]: $MYFLOAT1"
succeed_if "application did not print myfloatarray[1]"
./application | grep "myfloatarray\\[2\\]: $MYFLOAT2"
succeed_if "application did not print myfloatarray[2]"
./application | grep "myfloatarray\\[3\\]: $MYFLOAT3"
succeed_if "application did not print myfloatarray[3]"
./application | grep "myfloatarray\\[4\\]: $MYFLOAT4"
succeed_if "application did not print myfloatarray[4]"
"$KDB" rm -r "$UKEY"
"$KDB" rm -r "$SPECKEY"
"$KDB" umount "$SPECKEY"
"$KDB" umount "$KEY"
}
echo "Testing build with cmake"
cd "$EXTERNAL_FOLDER"
mkdir build
cd build
# manually set Elektra_DIR and KDB to support non-standard install locations
cmake ../cmake -DElektra_DIR:PATH="$(realpath $(dirname $0)/../../cmake/Elektra)"
succeed_if "could not run cmake"
cmake --build .
succeed_if "could not build cmake project"
do_tests
do_tests
cd ..
rm -r build
echo "Testing build with pkgconfig"
cd "$EXTERNAL_FOLDER/pkgconfig"
make
succeed_if "could not build pkgconfig project"
do_tests
do_tests
rm application
end_script gen
|
BernhardDenner/libelektra
|
tests/shell/external/example_highlevel.sh
|
Shell
|
bsd-3-clause
| 3,138 |
rm -rf build
rm -rf bin
rm -rf doc/html
rm -f *~
|
AADC-Fruit/AADC_2015_FRUIT
|
Freestyle/clsquare/scripts/do_clean.sh
|
Shell
|
bsd-3-clause
| 50 |
#!/bin/bash -e
#
# Usage (assuming: shopt -s extglob):
#
# $ cd examples/ && ../scripts/render_index.sh !(index).html
#
mkdir -p thumbs
tmpdir=$(mktemp -d)
trap "rm -r $tmpdir" INT TERM EXIT
cat <<EOF>index.html
<!DOCTYPE html>
<html>
<head>
<title>Notebook gallery</title>
</head>
<body>
EOF
for f in $@; do
if [[ $f == _* ]]; then
continue # don't include notebooks starting with underscore
fi
img=$(basename $f .html).png
QT_QPA_PLATFORM=offscreen phantomjs $(unset CDPATH && cd "$(dirname "$0")" && echo $PWD)/rasterize.js $f $tmpdir/$img 1200px*900px
convert $tmpdir/$img -resize 400x300 thumbs/$img
cat <<EOF>>index.html
<p style='text-align: center'>
<a href='$f' style='font-family: sans-serif'>
<img src='thumbs/$img'><br>
$f
</a></p>
EOF
done
cat <<EOF>>index.html
</body>
</html>
EOF
|
sympy/scipy-2017-codegen-tutorial
|
bin/render_index.sh
|
Shell
|
bsd-3-clause
| 833 |
#!/bin/sh
BASE_DIR=`pwd`
JEMALLOC_PATH="$BASE_DIR/deps/jemalloc-4.1.0"
LEVELDB_PATH="$BASE_DIR/deps/leveldb-1.18"
SNAPPY_PATH="$BASE_DIR/deps/snappy-1.1.0"
if test -z "$TARGET_OS"; then
TARGET_OS=`uname -s`
fi
if test -z "$MAKE"; then
MAKE=make
fi
if test -z "$CC"; then
CC=gcc
fi
if test -z "$CXX"; then
CXX=g++
fi
case "$TARGET_OS" in
Darwin)
#PLATFORM_CLIBS="-pthread"
#PLATFORM_CFLAGS=""
;;
Linux)
PLATFORM_CLIBS="-pthread -lrt"
;;
OS_ANDROID_CROSSCOMPILE)
PLATFORM_CLIBS="-pthread"
SNAPPY_HOST="--host=i386-linux"
;;
CYGWIN_*)
PLATFORM_CLIBS="-lpthread"
;;
SunOS)
PLATFORM_CLIBS="-lpthread -lrt"
;;
FreeBSD)
PLATFORM_CLIBS="-lpthread"
MAKE=gmake
;;
NetBSD)
PLATFORM_CLIBS="-lpthread -lgcc_s"
;;
OpenBSD)
PLATFORM_CLIBS="-pthread"
;;
DragonFly)
PLATFORM_CLIBS="-lpthread"
;;
HP-UX)
PLATFORM_CLIBS="-pthread"
;;
*)
echo "Unknown platform!" >&2
exit 1
esac
DIR=`pwd`
cd $SNAPPY_PATH
if [ ! -f Makefile ]; then
echo ""
echo "##### building snappy... #####"
./configure $SNAPPY_HOST
# FUCK! snappy compilation doesn't work on some linux!
find . | xargs touch
make
echo "##### building snappy finished #####"
echo ""
fi
cd "$DIR"
case "$TARGET_OS" in
CYGWIN*|FreeBSD|OS_ANDROID_CROSSCOMPILE)
echo "not using jemalloc on $TARGET_OS"
;;
*)
DIR=`pwd`
cd $JEMALLOC_PATH
if [ ! -f Makefile ]; then
echo ""
echo "##### building jemalloc... #####"
sh ./autogen.sh
./configure
make
echo "##### building jemalloc finished #####"
echo ""
fi
cd "$DIR"
;;
esac
rm -f src/version.h
echo "#ifndef SSDB_DEPS_H" >> src/version.h
echo "#ifndef SSDB_VERSION" >> src/version.h
echo "#define SSDB_VERSION \"`cat version`\"" >> src/version.h
echo "#endif" >> src/version.h
echo "#endif" >> src/version.h
case "$TARGET_OS" in
CYGWIN*|FreeBSD)
;;
OS_ANDROID_CROSSCOMPILE)
echo "#define OS_ANDROID 1" >> src/version.h
;;
*)
echo "#ifndef IOS" >> src/version.h
echo "#include <stdlib.h>" >> src/version.h
echo "#include <jemalloc/jemalloc.h>" >> src/version.h
echo "#endif" >> src/version.h
;;
esac
rm -f build_config.mk
echo CC=$CC >> build_config.mk
echo CXX=$CXX >> build_config.mk
echo "MAKE=$MAKE" >> build_config.mk
echo "LEVELDB_PATH=$LEVELDB_PATH" >> build_config.mk
echo "JEMALLOC_PATH=$JEMALLOC_PATH" >> build_config.mk
echo "SNAPPY_PATH=$SNAPPY_PATH" >> build_config.mk
echo "CFLAGS=" >> build_config.mk
echo "CFLAGS = -DNDEBUG -D__STDC_FORMAT_MACROS -Wall -O2 -Wno-sign-compare" >> build_config.mk
echo "CFLAGS += ${PLATFORM_CFLAGS}" >> build_config.mk
echo "CFLAGS += -I \"$LEVELDB_PATH/include\"" >> build_config.mk
echo "CLIBS=" >> build_config.mk
echo "CLIBS += \"$LEVELDB_PATH/libleveldb.a\"" >> build_config.mk
echo "CLIBS += \"$SNAPPY_PATH/.libs/libsnappy.a\"" >> build_config.mk
case "$TARGET_OS" in
CYGWIN*|FreeBSD|OS_ANDROID_CROSSCOMPILE)
;;
*)
echo "CLIBS += \"$JEMALLOC_PATH/lib/libjemalloc.a\"" >> build_config.mk
echo "CFLAGS += -I \"$JEMALLOC_PATH/include\"" >> build_config.mk
;;
esac
echo "CLIBS += ${PLATFORM_CLIBS}" >> build_config.mk
if test -z "$TMPDIR"; then
TMPDIR=/tmp
fi
g++ -x c++ - -o $TMPDIR/ssdb_build_test.$$ 2>/dev/null <<EOF
#include <unordered_map>
int main() {}
EOF
if [ "$?" = 0 ]; then
echo "CFLAGS += -DNEW_MAC" >> build_config.mk
fi
|
left2right/ssdb
|
build.sh
|
Shell
|
bsd-3-clause
| 3,492 |
#!/bin/sh
EGOS=noos EGARCH=cortexm3 EGTARGET=f40_41xxx egc $@
|
ziutek/emgo
|
egpath/src/stm32/examples/emw3162/build.sh
|
Shell
|
bsd-3-clause
| 62 |
#!/bin/sh
SPACE="space search key int number attributes bit01, bit02, bit03, bit04, bit05, bit06, bit07, bit08, bit09, bit10, bit11, bit12, bit13, bit14, bit15, bit16, bit17, bit18, bit19, bit20, bit21, bit22, bit23, bit24, bit25, bit26, bit27, bit28, bit29, bit30, bit31, bit32 primary_index bit01, bit02, bit03, bit04, bit05, bit06, bit07, bit08, bit09, bit10, bit11, bit12, bit13, bit14, bit15, bit16, bit17, bit18, bit19, bit20, bit21, bit22, bit23, bit24, bit25, bit26, bit27, bit28, bit29, bit30, bit31, bit32 create 8 partitions tolerate 1 failures"
exec python "${HYPERDEX_SRCDIR}"/test/runner.py --daemons=8 --space="${SPACE}" -- \
"${HYPERDEX_BUILDDIR}"/test/search-stress-test --quiet -h {HOST} -p {PORT} -k int
|
hyc/HyperDex
|
test/sh/search.index.keytype=int,daemons=8.fault-tolerance=1.sh
|
Shell
|
bsd-3-clause
| 764 |
#!/bin/bash
# (C)opyright L.P.Klyne 2013
source /opt/fileserver/config.sh
source /opt/fileserver/functions.sh
if is_mounted $DISKA && is_mounted $DISKB ; then
do_rsync "$DISKA/*" "$DISKB/"
fi
|
LawrenceK/fs_monitor
|
rsync_a_b.sh
|
Shell
|
bsd-3-clause
| 194 |
#!/bin/sh
case "$1" in
pack)
npm run clean
npm run build &
npm run dev
;;
pack:dev)
npm run clean
npm run build:dev &
npm run dev
;;
stop)
kill $(ps aux | grep 'npm' | awk '{print $2}')
kill $(ps aux | grep 'rails s' | awk '{print $2}')
#kill $(ps aux | grep 'ssh -ND 3128' | awk '{print $2}')
;;
esac
|
motephyr/socketio_template
|
serv.sh
|
Shell
|
mit
| 360 |
#!/bin/bash
#Author: mirage335
#Date: 07-01-2011 (MM-DD-YYYY)
#Version: 1.0 (Minor versions reflect compatible updates.)
#Dependencies: ubiquitous_bash.sh
#Usage: enterChroot.sh
#Purpose: Enters the associated ChRoot folder.
. ubiquitous_bash.sh
mustBeRoot #Non-superuser has no ability to mount filesystems or execute chroot.
ChRootDir="$(getScriptAbsoluteFolder)/ChRoot"/
env -i HOME="/root" TERM="${TERM}" SHELL="/bin/bash" PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" DISPLAY="$DISPLAY" $(which chroot) "$ChRootDir" /bin/bash
|
mirage335/mirage335OS
|
enterChRoot.sh
|
Shell
|
mit
| 569 |
#!/bin/bash
# Handle command-line arguments
source $KDBTESTS/flagparse.sh
# Path to test directory
testpath=${KDBTESTS}/dataaccess/common
# Start procs
${TORQHOME}/torq.sh start discovery1
# Start test proc
/usr/bin/rlwrap q ${TORQHOME}/torq.q \
-proctype rdb -procname dailyrdb1 \
-test ${testpath} \
-load ${KDBTESTS}/helperfunctions.q ${testpath}/../settings.q ${testpath}/settings.q ${testpath}/../mockdata.q \
-testresults ${KDBTESTS}/results/ \
-runtime $run \
-procfile ${testpath}/config/process.csv \
-dataaccess ${testpath}/config/tableproperties.csv \
$debug $stop $write $quiet
|
FlyingOE/TorQ
|
tests/dataaccess/common/run.sh
|
Shell
|
mit
| 610 |
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2015 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
# Similar to double-quoted2.zsh
ZSH_HIGHLIGHT_STYLES[back-dollar-quoted-argument]=$unused_highlight
# This test checks that the '1' gets highlighted correctly. Do not append to the BUFFER.
BUFFER=": \$'\xa1"
expected_region_highlight=(
"3 4 ${(q-)ZSH_HIGHLIGHT_STYLES[dollar-quoted-argument]}" # $'
"5 8 ${(q-)ZSH_HIGHLIGHT_STYLES[back-dollar-quoted-argument]}" # \xa1
)
|
admean/dotfiles
|
.oh-my-zsh/custom/plugins/zsh-syntax-highlighting/highlighters/main/test-data/dollar-quoted3.zsh
|
Shell
|
mit
| 2,370 |
#!/bin/sh
# rpm-chksec
#
# Copyright (c) 2011-2013 Steve Grubb. ALL RIGHTS RESERVED.
# [email protected]
#
# This software may be freely redistributed under the terms of the GNU
# public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Given an rpm, it will look at each file to check that its compiled with
# the intended flags to make it more secure. Things that are green are OK.
# Anything in yellow could be better but is passable. Anything in red needs
# attention.
#
# If the --all option is given, it will generate a list of rpms and then
# summarize the rpm's state. For yes, then all files are in the expected
# state. Just one file not compiled with the right flags can turn the
# answer to no. Re-run passing that package (instead of --all) for the details.
#
# To save to file: ./rpm-chksec | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g" | tee output.txt
VERSION="0.5.2"
usage () {
echo "rpm-chksec [--version|--all|<rpmname>...]"
if [ ! -x /usr/bin/filecap ] ; then
echo "You need to install libcap-ng-utils to test capabilities"
fi
if [ $EUID != 0 ] ; then
echo "You might need to be root to read some files"
fi
exit 0
}
if [ "$1" = "--help" -o $# -eq 0 ] ; then
usage
fi
if [ "$1" = "--version" ] ; then
echo "rpm-chksec $VERSION"
exit 0
fi
if [ "$1" = "--all" ] ; then
MODE="all"
else
MODE="single"
fi
do_one () {
if ! rpm -q $1 >/dev/null 2>&1 ; then
if [ "$MODE" = "single" ] ; then
echo "$1 is not installed"
exit 1
else
echo "not installed"
return
fi
fi
files=`rpm -ql $1`
# Look for daemons, need this for later...
DAEMON=""
for f in $files
do
if [ ! -f "$f" ] ; then
continue
fi
if [ `echo "$f" | grep '\/etc\/rc.d\/init.d'` ] ; then
n=`basename "$f"`
t=`which "$n" 2>/dev/null`
if [ x"$t" != "x" ] ; then
DAEMON="$DAEMON $t"
continue
fi
t=`which "$n"d 2>/dev/null`
if [ x"$t" != "x" ] ; then
DAEMON="$DAEMON $t"
continue
fi
t=`cat "$f" 2>/dev/null | grep 'bin' | grep 'exit 5' | grep -v '\$'`
if [ x"$t" != "x" ] ; then
DAEMON="$DAEMON $t"
continue
fi
if [ "$MODE" = "single" ] ; then
echo "Can't find the executable in $f but daemon rules would apply"
fi
elif [ `echo "$f" | grep '\/lib\/systemd\/'` ] ; then
t=`cat "$f" | grep -i '^ExecStart=' | tr '=' ' ' | awk '{ print $2 }'`
if [ x"$t" != "x" ] ; then
DAEMON="$DAEMON $t"
continue
fi
fi
done
# Prevent garbled output when doing --all.
skip_current=0
for f in $files
do
if [ ! -f "$f" ] ; then
continue
fi
# Some packages have files with ~ in them. This avoids it.
if ! echo "$f" | grep '^/' >/dev/null ; then
continue
fi
if [ ! -r "$f" ] && [ $EUID != 0 ] ; then
if [ $MODE = "single" ] ; then
echo "Please re-test $f as the root user"
else
# Don't print results.
skip_current=1
echo "Please re-test $1 as the root user"
fi
continue
fi
if ! file "$f" | grep -qw 'ELF'; then
continue
fi
RELRO="no"
if readelf -l "$f" 2>/dev/null | grep -q 'GNU_RELRO'; then
RELRO="partial"
fi
if readelf -d "$f" 2>/dev/null | grep -q 'BIND_NOW'; then
RELRO="full"
fi
PIE="no"
if readelf -h "$f" 2>/dev/null | grep -q 'Type:[[:space:]]*DYN'; then
PIE="DSO"
if readelf -d "$f" 2>/dev/null | grep -q '(DEBUG)'; then
PIE="yes"
fi
fi
APP=""
if [ x"$DAEMON" != "x" ] ; then
for d in $DAEMON
do
if [ "$f" = "$d" ] ; then
APP="daemon"
break
fi
done
fi
if [ x"$APP" = "x" ] ; then
# See if this is a library or a setuid app
if [ `echo "$f" | grep '\/lib' | grep '\.so'` ] ; then
APP="library"
elif [ `find "$f" -perm -004000 -type f -print` ] ; then
APP="setuid"
elif [ `find "$f" -perm -002000 -type f -print` ] ; then
APP="setgid"
elif [ -x /usr/bin/filecap ] && [ `filecap "$f" 2> /dev/null | wc -w` -gt 0 ] ; then
APP="setcap"
else
syms1=`/usr/bin/readelf -s "$f" 2>/dev/null | egrep ' connect@.*GLIBC| listen@.*GLIBC| accept@.*GLIBC|accept4@.*GLIBC'`
syms2=`/usr/bin/readelf -s "$f" 2>/dev/null | egrep ' getaddrinfo@.*GLIBC| getnameinfo@.*GLIBC| getservent@.*GLIBC| getservbyname@.*GLIBC| getservbyport@.*GLIBC|gethostbyname@.*GLIBC| gethostbyname2@.*GLIBC| gethostbyaddr@.*GLIBC| gethostbyaddr2@.*GLIBC'`
if [ x"$syms1" != "x" ] ; then
if [ x"$syms2" != "x" ] ; then
APP="network-ip"
else
APP="network-local"
fi
fi
fi
fi
if [ x"$APP" = "x" ] ; then
APP="exec"
fi
# OK, ready for the output
if [ "$MODE" = "single" ] ; then
printf "%-56s %-10s " "$f" $APP
if [ "$APP" = "daemon" -o "$APP" = "setuid" -o "$APP" = "setgid" -o "$APP" = "setcap" -o "$APP" = "network-ip" -o "$APP" = "network-local" ] ; then
if [ "$RELRO" = "full" ] ; then
printf "\033[32m%-7s\033[m " $RELRO
elif [ "$RELRO" = "partial" ] ; then
printf "\033[33m%-7s\033[m " $RELRO
else
printf "\033[31m%-7s\033[m " $RELRO
fi
if [ "$PIE" = "yes" ] ; then
printf "\033[32m%-4s\033[m" $PIE
else
printf "\033[31m%-4s\033[m" $PIE
fi
elif [ "$APP" = "library" ] ; then
if [ "$RELRO" = "full" -o "$RELRO" = "partial" ] ; then
printf "\033[32m%-7s\033[m " $RELRO
else
printf "\033[31m%-7s\033[m " $RELRO
fi
printf "\033[32m%-4s\033[m" $PIE
else
# $APP = exec - we want partial relro
if [ "$RELRO" = "no" ] ; then
printf "\033[31m%-7s\033[m " $RELRO
else
printf "\033[32m%-7s\033[m " $RELRO
fi
printf "\033[32m%-4s\033[m" $PIE
fi
echo
else
if [ "$APP" = "daemon" -o "$APP" = "setuid" -o "$APP" = "setgid" -o "$APP" = "setcap" -o "$APP" = "network-ip" -o "$APP" = "network-local" ] ; then
if [ "$RELRO" = "no" ] ; then
RELRO_SUM="no"
APP_SUM="$APP"
fi
if [ "$PIE" = "no" ] ; then
PIE_SUM="no"
APP_SUM="$APP"
fi
elif [ "$APP" = "library" ] ; then
if [ "$RELRO" = "no" ] ; then
RELRO_SUM="no"
APP_SUM="$APP"
fi
# $APP = exec - must have partial or full relro
elif [ "$RELRO" = "no" ] ; then
RELRO_SUM="no"
APP_SUM="$APP"
fi
fi
done
}
if [ "$MODE" = "single" ] ; then
printf "%-56s %-10s %-7s %-4s" "FILE" "TYPE" "RELRO" "PIE"
echo
for i; do
f=$(basename $1)
# Strip the .rpm extension, if present.
do_one ${f%%.rpm}
shift
done
exit 0
fi
# Skip the kernel as its special
packages=`rpm -qa | egrep -v 'kernel.|debuginfo.|.noarch|gpg-pubkey' | sort`
printf "%-50s %-5s %-4s %-14s" "PACKAGE" "RELRO" "PIE" "CLASS"
echo
for p in $packages
do
RELRO_SUM="yes"
PIE_SUM="yes"
APP_SUM=""
printf "%-50s " $p
do_one $p
if [[ $skip_current -eq 1 ]] ; then
continue
fi
if [ "$RELRO_SUM" = "yes" ] ; then
printf "\033[32m%-5s\033[m " "$RELRO_SUM"
else
printf "\033[31m%-5s\033[m " "$RELRO_SUM"
fi
if [ "$PIE_SUM" = "yes" ] ; then
printf "\033[32m%-4s\033[m" "$PIE_SUM"
if [ "$RELRO_SUM" = "no" ] ; then
printf " %-14s" "$APP_SUM"
fi
else
if [ "$APP_SUM" = "network-local" ] ; then
printf "\033[33m%-4s\033[m %-14s" "$PIE_SUM" "$APP_SUM"
else
printf "\033[31m%-4s\033[m %-14s" "$PIE_SUM" "$APP_SUM"
fi
fi
echo
done
exit 0
|
akuster/meta-security
|
recipes-security/redhat-security/files/rpm-chksec.sh
|
Shell
|
mit
| 7,353 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
local source="${BUILT_PRODUCTS_DIR}/Pods-BSImagePickerExampleTests/$1"
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source=$(readlink "${source}")
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers/" --filter "- PrivateHeaders/" --filter "- Modules/" ${source} ${destination}"
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers/" --filter "- PrivateHeaders/" --filter "- Modules/" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
if [ "${CODE_SIGNING_REQUIRED}" == "YES" ]; then
code_sign "${destination}/$1"
fi
# Embed linked Swift runtime libraries
local basename
basename=$(echo $1 | sed -E s/\\..+// && exit ${PIPESTATUS[0]})
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/$1/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
if [ "${CODE_SIGNING_REQUIRED}" == "YES" ]; then
code_sign "${destination}/${lib}"
fi
done
}
# Signs a framework with the provided identity
code_sign() {
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements $1"
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'BSImagePicker.framework'
install_framework 'UIImageViewModeScaleAspect.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'BSImagePicker.framework'
install_framework 'UIImageViewModeScaleAspect.framework'
fi
|
aaronsakowski/BSImagePicker
|
Example/Pods/Target Support Files/Pods-BSImagePickerExampleTests/Pods-BSImagePickerExampleTests-frameworks.sh
|
Shell
|
mit
| 2,491 |
#!/bin/sh
# download-tools-sources.sh
#
# Copyright (c) 2012, 2013, 2014 Anthony Green
#
# The above named program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# The above named program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this work; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301, USA.
# A basic script to download the upstream GNU toolchain sources.
echo "Downloading GCC sources..."
if [ ! -d gcc ]
then
svn checkout svn://gcc.gnu.org/svn/gcc/trunk gcc
else
( cd gcc && svn up )
fi
echo "Downloading binutils sources..."
if [ ! -d binutils-gdb ]
then
git clone --depth=1 git://sourceware.org/git/binutils-gdb.git
else
( cd binutils-gdb && git checkout -f && git pull )
fi
echo "Downloading newlib and libgloss..."
cvs -z3 -d:pserver:[email protected]:/cvs/src co \
newlib \
libgloss
echo "Updating binutils and newlib/libgloss with gcc config.sub"
cp gcc/config.sub binutils-gdb
cp gcc/config.sub src
|
jgarzik/moxiebox
|
contrib/download-tools-sources.sh
|
Shell
|
mit
| 1,391 |
#!/bin/bash
FN="RnaSeqSampleSizeData_1.20.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/experiment/src/contrib/RnaSeqSampleSizeData_1.20.0.tar.gz"
"https://bioarchive.galaxyproject.org/RnaSeqSampleSizeData_1.20.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-rnaseqsamplesizedata/bioconductor-rnaseqsamplesizedata_1.20.0_src_all.tar.gz"
)
MD5="a4808cb5915eaecf7d1a89afba751c7f"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-rnaseqsamplesizedata/post-link.sh
|
Shell
|
mit
| 1,352 |
# -*- mode: bash; tab-width: 2; -*-
# vim: ts=2 sw=2 ft=bash noet
# Copy the code into the live directory which will be used to run the app
publish_release() {
nos_print_bullet "Moving code into app directory..."
rsync -a $(nos_code_dir)/ $(nos_app_dir)
}
# Determine the nodejs runtime to install. This will first check
# within the Boxfile, then will rely on default_runtime to
# provide a sensible default
runtime() {
echo $(nos_validate \
"$(nos_payload "config_runtime")" \
"string" "$(default_runtime)")
}
# Provide a default nodejs version.
default_runtime() {
packagejs_runtime=$(package_json_runtime)
if [[ "$packagejs_runtime" = "false" ]]; then
echo "nodejs-6.11"
else
echo $packagejs_runtime
fi
}
# Determine the python runtime to install. This will first check
# within the boxfile.yml, then will rely on python_default_runtime to
# provide a sensible default
python_version() {
echo $(nos_validate \
"$(nos_payload "config_python_version")" \
"string" "$(default_python_version)")
}
# Provide a default python version.
default_python_version() {
echo "python-3.6"
}
# todo: extract the contents of package.json
# Will need https://stedolan.github.io/jq/
# https://github.com/heroku/heroku-buildpack-nodejs/blob/master/lib/json.sh#L17
# https://github.com/heroku/heroku-buildpack-nodejs/blob/master/bin/compile#L73
package_json_runtime() {
echo "false"
}
# Determine which dependency manager to use (yarn/npm)
dep_manager() {
echo $(nos_validate \
"$(nos_payload "config_dep_manager")" \
"string" "$(default_dep_manager)")
}
# Use yarn as the default dep manager
default_dep_manager() {
echo "yarn"
}
# Install the nodejs runtime along with any dependencies.
install_runtime_packages() {
pkgs=("$(runtime)" "$(python_version)")
# add any client dependencies
pkgs+=("$(query_dependencies)")
nos_install ${pkgs[@]}
}
# Uninstall build dependencies
uninstall_build_dependencies() {
# pkgin doesn't removing packages with partial version numbers.
nos_uninstall "python"
}
# compiles a list of dependencies that will need to be installed
query_dependencies() {
deps=()
# mysql
if [[ `grep 'mysql' $(nos_code_dir)/package.json` ]]; then
deps+=(mysql-client)
fi
# memcache
if [[ `grep 'memcache' $(nos_code_dir)/package.json` ]]; then
deps+=(libmemcached)
fi
# postgres
if [[ `grep 'postgres' $(nos_code_dir)/package.json` ]]; then
deps+=(postgresql94-client)
fi
# redis
if [[ `grep 'redis\|spade\|rebridge' $(nos_code_dir)/package.json` ]]; then
deps+=(redis)
fi
echo "${deps[@]}"
}
# installs npm deps via yarn or npm
install_npm_deps() {
# if yarn is available, let's use that
if [[ "$(dep_manager)" = "yarn" ]]; then
yarn_install
else # fallback to npm (slow)
npm_install
fi
}
# install dependencies via yarn
yarn_install() {
if [[ -f $(nos_code_dir)/package.json ]]; then
cd $(nos_code_dir)
nos_run_process "Installing npm modules" "yarn"
cd - > /dev/null
fi
}
# Installing dependencies from the package.json is done with npm install.
npm_install() {
if [[ -f $(nos_code_dir)/package.json ]]; then
cd $(nos_code_dir)
nos_run_process "Installing npm modules" "npm install"
cd - > /dev/null
fi
}
|
nanobox-io/nanobox-engine-nodejs
|
lib/nodejs.sh
|
Shell
|
mit
| 3,296 |
# XXX #0f0 should show up green
# XXX f00 should not show up red
# FIXME ff0 should show up yellow but are known not to
echo '#0f0'
echo ##0f0
echo \#ff0
echo #f00
echo ##0f0
echo # #0f0
cmd '#0f0'
cmd "#0f0"
cmd \#ff0
cmd #f00
cmd ##0f0
cmd # #0f0
#f00
##0f0 XXX
# #0f0 XXX
# XXX #0f0 XXX
cat << ''
#0f0
|
ruchee/vimrc
|
vimfiles/bundle/css-color/tests/example.zsh
|
Shell
|
mit
| 310 |
#!/bin/bash
## Give the Job a descriptive name
#PBS -N testjob
## Output and error files
#PBS -o testjob.out
#PBS -e testjob.err
## Limit memory, runtime etc.
#PBS -l walltime=01:00:00
#PBS -l pmem=100mb
## How many nodes should we get?
#PBS -l nodes=8
## Start
#echo "PBS_NODEFILE = $PBS_NODEFILE"
#cat $PBS_NODEFILE
## Run the job (use full paths to make sure we execute the correct thing)
mpirun -np 8 -hostfile $PBS_NODEFILE --bynode /home/parallel/parlab40/fw/fw_MPI 2048
|
VHarisop/Parallel
|
ex1/code/scripts/old_scripts/mpirun_on_lab.sh
|
Shell
|
gpl-2.0
| 486 |
#!/bin/bash
set -eu
find ../../ '(' \
-iname '*.pas' -or \
-iname '*.inc' -or \
-iname '*.css' -or \
-iname '*.dpr' ')' \
-execdir sed --in-place -e 's|Copyright 1998-2016 PasDoc developers|Copyright 1998-2018 PasDoc developers|' '{}' ';'
|
pasdoc/pasdoc
|
source/tools/adjust_copyrights.sh
|
Shell
|
gpl-2.0
| 250 |
#!/bin/bash
set -e # exit immediately if a command returns with a nonzero exit code
echo "*** Building distcc/base image"
docker build -t distcc/base -f base/Dockerfile base
if [ $# -eq 0 ]; then
compilers=("gcc-4.8" "gcc-5" "clang-3.8")
else
compilers=("$1")
fi
for compiler in "${compilers[@]}"
do
echo "*** Building distcc/$compiler image"
docker build -t distcc/$compiler -f compilers/Dockerfile.$compiler .
done
echo "*** Building distcc"
for compiler in "${compilers[@]}"
do
echo "*** Building distcc with distcc/$compiler image"
set -x
docker run --rm -it -v /tmp:/tmp -v `pwd`/..:/src:rw -w /src distcc/$compiler bash -c "./autogen.sh && ./configure && make clean && make && make install && make check" &> distcc-$compiler.log
set +x
done
|
itensionanders/distcc
|
docker/build.sh
|
Shell
|
gpl-2.0
| 768 |
#!/bin/bash
header "User"
ROLE_NAME="tmp_user_role_$RAND"
#testing user
test_success "user update" user update --username="$TEST_USER" --password=password
test_success "user list" user list
test_success "user info" user info --username="$TEST_USER"
#test role assignment
test_success "user_role create" user_role create --name="$ROLE_NAME"
test_success "user assign_role" user assign_role --username="$TEST_USER" --role="$ROLE_NAME"
test_success "user unassign_role" user unassign_role --username="$TEST_USER" --role="$ROLE_NAME"
# test default environment
TEST_USER2=${TEST_USER}2
test_success "user create with default environment" user create --username="$TEST_USER2" [email protected] \
--password=password --default_organization="$TEST_ORG" --default_environment="$TEST_ENV"
test_own_cmd_success "user has default environment" $CMD user info --username="$TEST_USER2" | grep "$TEST_ENV"
test_success "user default_environment_update" user update --username="$TEST_USER2" \
--default_organization="$TEST_ORG" --default_environment="$TEST_ENV_2"
test_own_cmd_success "user has default environment" $CMD user info --username="$TEST_USER2" | grep "$TEST_ENV_2"
test_success "user default_environment_update" user update --username="$TEST_USER2" \
--no_default_environment
test_own_cmd_success "user has default environment" $CMD user info --username="$TEST_USER2" | grep "$TEST_ENV_2"
test_success "user deletion" user delete --username="$TEST_USER2"
|
beav/katello
|
scripts/system-test/cli_tests/user.sh
|
Shell
|
gpl-2.0
| 1,474 |
#!/bin/bash
TOPDIR=/N/u/bcmcpher/Karst/nhp_reg
dim=3
m=bias2dwi.nii.gz
f=t2.nii.gz
its=[15x50x15,1e-6,10]
smth=2x1x0
down=3x2x1
antsRegistration -d $dim \
--metric CC[$f,$m,1,4] \
--transform SyN[0.1,3.0,0] \
--convergence $its \
--smoothing-sigmas $smth \
--shrink-factors $down \
--use-histogram-matching 0 \
--write-composite-transform 0 --output [antsDemo_,antsDemo_diff.nii.gz,antsDemo_diff_inv.nii.gz]
CreateWarpedGridImage $dim antsDemo_0InverseWarp.nii.gz antsDemo_grid.nii.gz
#ConvertToJpg antsDemo_grid.nii.gz antsDemo_grid.png
CreateJacobianDeterminantImage $dim antsDemo_0Warp.nii.gz antsDemo_jac.nii.gz 0 1
CreateJacobianDeterminantImage $dim antsDemo_0InverseWarp.nii.gz antsDemo_jac_inv.nii.gz 0 1
|
bacaron/pestillilab_projects
|
nhp/shell/examples/antsDemoReg.sh
|
Shell
|
gpl-2.0
| 752 |
#! /bin/bash
if [ -z "$LIOBIN" ] ; then
LIOBIN=../../../liosolo/liosolo
fi
SALIDA=output
if [ -n "$1" ]
then
SALIDA=$1
fi
$LIOBIN -i chloride.in -c chloride.xyz -v > $SALIDA
|
ramirezfranciscof/lio
|
test/LIO_test/07_TDDFTHCL/run.sh
|
Shell
|
gpl-2.0
| 185 |
#!/bin/bash
# run as root...
NIC=$1
ifconfig $NIC down
ifconfig $NIC up
ethtool -G ethX rx 4096
ethtool -K $NIC tso off
ethtool -K $NIC gro off
ethtool -K $NIC lro off
ethtool -K $NIC gso off
ethtool -K $NIC rx off
ethtool -K $NIC tx off
ethtool -K $NIC sg off
|
google-code/amico
|
utils/turn_offload_off.sh
|
Shell
|
gpl-2.0
| 265 |
#!/bin/bash
echo "Configuring..."
source configure.sh
./notifyCommit.sh
echo "Building..."
./gradlew clean assembleRelease
[ -z "$BUILD_TAG" ] || python3.5 gen_manifest.py
[ $? -eq 0 ] || echo "Build Failed!"
|
MSF-Jarvis/AndroidFileHost_Browser
|
buildReleaseApp.sh
|
Shell
|
gpl-3.0
| 213 |
#!/bin/bash
read -p 'What date (YYYY-MM-DD) should we get contributions since? (i.e. date of previous release): ' from_date
read -sp 'Provide a personal access token (you must): ' auth_token
ignored_users="renovate-bot,apps/renovate,renovate,renovate[bot]"
output_file="contributors.html"
common_arguments="--owner woocommerce --fromDate $from_date --authToken $auth_token --cols 6 --sortBy contributions --format html --sortOrder desc --showlogin true --filter $ignored_users"
echo ""
echo "<h2>WooCommerce core</h2>" > $output_file
echo "Generating contributor list for WC core since $from_date"
./node_modules/.bin/githubcontrib --repo woocommerce $common_arguments >> $output_file
echo "<h2>WooCommerce Admin</h2>" >> $output_file
echo "Generating contributor list for WC Admin since $from_date"
./node_modules/.bin/githubcontrib --repo woocommerce-admin $common_arguments >> $output_file
echo "<h2>WooCommerce Blocks</h2>" >> $output_file
echo "Generating contributor list for WC Blocks since $from_date"
./node_modules/.bin/githubcontrib --repo woocommerce-gutenberg-products-block $common_arguments >> $output_file
echo "<h2>Action Scheduler</h2>" >> $output_file
echo "Generating contributor list for Action Scheduler since $from_date"
./node_modules/.bin/githubcontrib --repo action-scheduler $common_arguments >> $output_file
echo "<h2>REST API</h2>" >> $output_file
echo "Generating contributor list for REST API since $from_date"
./node_modules/.bin/githubcontrib --repo woocommerce-rest-api $common_arguments >> $output_file
echo "Output generated to $output_file."
|
greguly/woocommerce
|
bin/contributors.sh
|
Shell
|
gpl-3.0
| 1,589 |
#!/bin/sh
xrandr --output HDMI1 --off --output DP1 --mode 1920x1080 --pos 1920x0 --rotate left --output eDP1 --primary --mode 1920x1080 --pos 0x840 --rotate normal --output VIRTUAL1 --off
|
TheKK/customize-settings
|
.screenlayout/right_rotate_multi.sh
|
Shell
|
gpl-3.0
| 188 |
for a; do
echo $a
done
|
legionus/shell_parser
|
t/shfmt/shell-0030.sh
|
Shell
|
gpl-3.0
| 27 |
#!/bin/bash
#===================================================================================
#
# FILE: compile.sh
#
# USAGE: compile.sh <FILE> [FOLDER]
#
# DESCRIPTION: Creates the default build directory for the QSS Solver GUI
# and runs the MicroModelica compiler
#
# PARAMETERS: <FILE> MicroModelica file to be compiled.
# OPTIONS: [FOLDER] path where the MicroModelica file is located.
# REQUIREMENTS: $MMOC_BUILD must point to the default build directory
# used by the QSS Solver GUI.
# $MMOC_BIN must point to the default bin directory
# used by the QSS Solver GUI.
# NOTES: ---
# AUTHOR: Joaquin Fernandez, [email protected]
# PROJECT: QSS Solver
# VERSION: 3.2
#===================================================================================
FILE=$1
FOLDER=$2
PARALLEL=$3
mkdir -p $MMOC_BUILD/$FILE
if [ "$PARALLEL" == "true" ]; then
FLAGS="-p"
else
FLAGS=""
fi
if [ -z $FOLDER ]; then
$MMOC_BIN/mmoc $FLAGS -o $MMOC_BUILD/$FILE/$FILE $FILE.mo
else
$MMOC_BIN/mmoc $FLAGS -o $MMOC_BUILD/$FILE/$FILE $FOLDER/$FILE.mo
fi
cd $MMOC_BUILD/$FILE
make -f $FILE.makefile clean
make -f $FILE.makefile
|
CIFASIS/qss-solver
|
deploy/linux/scripts/compile.sh
|
Shell
|
gpl-3.0
| 1,217 |
#!/bin/sh
#
# file: distclean.sh
#
# author: Copyright (C) 2015-2018 Kamil Szczygiel http://www.distortec.com http://www.freddiechopin.info
#
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not
# distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
set -e
set -u
if [ -d output ]; then
echo 'Removing build outputs...'
rm -rf output/
fi
if [ -d documentation/html ]; then
echo 'Removing doxygen outputs...'
rm -rf documentation/html/
fi
if [ -f selectedConfiguration.mk ]; then
echo 'Removing selectedConfiguration.mk file...'
rm -f selectedConfiguration.mk
fi
echo 'Project cleaned successfully.'
|
jasmin-j/distortos
|
scripts/distclean.sh
|
Shell
|
mpl-2.0
| 702 |
#!/bin/bash
#docker run -t --rm -e "JENKINS_TOKEN_NAME=$JENKINS_TOKEN_NAME" -v $(pwd):/work -w /work smashwilson/curl /bin/bash get_latest_version_of_dependencies.sh
set -e
TEMPDIR=`mktemp -d`
#for each line in the nightly-repos folder
#get the latest file from the repo
curl -o ${TEMPDIR}/nightlies.xml http://openrov-software-nightlies.s3-us-west-2.amazonaws.com
ls ${TEMPDIR}/nightlies.xml
while read package; do
#need to parse package name and prefix out of nightlies
S3prefix=package | cut -d= -f2
packagename=package | cut -d' ' -f1
echo "Package:" $packagename
echo "Prefix" $S3prefix
# cat ${TEMPDIR}/nightlies.xml | ./getLatestFileFromS3.sh ${package}
cat ${TEMPDIR}/nightlies.xml | ./getLatestFileFromS3.sh ${S3prefix} >> ${TEMPDIR}/latest_files.txt
done < nightly-repos
#publish those files to the debian repo
BASEURL=http://openrov-software-nightlies.s3-website-us-west-2.amazonaws.com/
IFS=$'\n'
BRANCH='master'
cat ${TEMPDIR}/latest_files.txt
#get the list of dependent files
while read item; do
DEBURL=${BASEURL}${item}
echo $item
echo "publishing $DEBURL"
curl -g http://openrov-build-test.elasticbeanstalk.com:8080/job/OpenROV-generic-upload-deb-to-repo/buildWithParameters -d token=$JENKINS_TOKEN_NAME -d urlToDebPackage=$DEBURL -d branch=$BRANCH
done < ${TEMPDIR}/latest_files.txt
rm -rf $TEMPDIR
|
OpenROV/openrov-rov-suite
|
get_latest_version_of_dependencies.sh
|
Shell
|
agpl-3.0
| 1,342 |
#!/bin/bash -e
SVNFLAGS="-q"
CURDIR=$(pwd)
SCRIPT_DIR=$(cd $(dirname $0) && pwd)
cd $SCRIPT_DIR
./setup.sh
cd svn-test/checkout/testrepo
FILE_CONTENT=$1
echo $1 > README.txt
svn add $SVNFLAGS README.txt
svn commit $SVNFLAGS -m "Added readme." # r1
svn rm $SVNFLAGS README.txt
svn commit $SVNFLAGS -m "Deleting file." # r2
svn cp $SVNFLAGS README.txt@1 OTHER.txt
svn commit $SVNFLAGS -m "Copied readme from r1." # r3
svn mkdir $SVNFLAGS dir1
svn cp $SVNFLAGS OTHER.txt dir1/OTHER.txt
svn commit $SVNFLAGS -m "Copied readme again." # r4
svn rm $SVNFLAGS dir1
svn commit $SVNFLAGS -m "Deleting directory." # r5
svn cp $SVNFLAGS dir1@4 otherdir1
svn commit $SVNFLAGS -m "Copied directory from r4." # r6
svn cp $SVNFLAGS otherdir1/OTHER.txt otherdir1/NEWNAME.txt
svn commit $SVNFLAGS -m "Renamed file again." # r7
cd $SCRIPT_DIR
./export.sh
cd $CURDIR
|
cstroe/svndumpapi
|
src/test/resources/dumps/scripts/svn_copy_and_delete.sh
|
Shell
|
agpl-3.0
| 861 |
#! /bin/bash
# cycle various about setting and test success
# environment :
# STORE_ACCOUNT : account name
# STORE_URL : host http url
# STORE_REPOSITORY : individual repository
curl -w "%{http_code}\n" -f -s -X PUT \
-H "Content-Type: application/x-www-form-urlencoded" \
--data-urlencode @- \
${STORE_URL}/accounts/${STORE_ACCOUNT}/repositories/${STORE_REPOSITORY}/about?auth_token=${STORE_TOKEN}<<EOF \
| fgrep -q "204"
_method=PUT&repository[summary]=three word summary&repository[license_url]=http://creativecommons.org/publicdomain/zero/1.0
EOF
curl -f -s -S -X GET\
-H "Accept: application/json" \
${STORE_URL}/accounts/${STORE_ACCOUNT}/repositories/${STORE_REPOSITORY}/about?auth_token=${STORE_TOKEN} \
| json_reformat -m | fgrep mem-rdf | fgrep commons | fgrep -q three
initialize_about | fgrep -q 204
curl -f -s -S -X GET \
-H "Accept: application/json" \
${STORE_URL}/accounts/${STORE_ACCOUNT}/repositories/${STORE_REPOSITORY}/about?auth_token=${STORE_TOKEN} \
| json_reformat -m \
| fgrep '"name":"mem-rdf"' \
| fgrep '"homepage":"http://example.org/test"' \
| fgrep '"summary":"a summary"' \
| fgrep '"description":"a description"' \
| fgrep -q '"license_url":"http://unlicense.org"'
|
dydra/http-api-tests
|
accounts-api/accounts/openrdf-sesame/repositories/mem-rdf/profile/PUT-form.sh
|
Shell
|
unlicense
| 1,261 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cp ../../../bigtop_toolchain/bin/puppetize.sh .
docker build -t bigtop/puppet:opensuse-42.1 .
|
Altiscale/bigtop
|
docker/bigtop-puppet/opensuse-42.1/build.sh
|
Shell
|
apache-2.0
| 875 |
sudo dpkg -i trendz-1.7.0.deb
|
volodymyr-babak/thingsboard.github.io
|
docs/trendz/install/resources/1.7.0/trendz-ubuntu-installation.sh
|
Shell
|
apache-2.0
| 29 |
#!/bin/bash
set -e
function get_admin_key {
# No-op for static
log "static: does not generate the admin key, so we can not get it."
log "static: make it available with the help of your configuration management system."
log "static: ceph-ansible is a good candidate to deploy a containerized version of Ceph."
log "static: ceph-ansible will help you fetching the keys and push them on the right nodes."
log "static: if you're interested, please visit: https://github.com/ceph/ceph-ansible"
}
function get_mon_config {
# IPv4 is the default unless we specify it
IP_LEVEL=${1:-4}
if [ ! -e /etc/ceph/"${CLUSTER}".conf ]; then
local fsid
fsid=$(uuidgen)
if [[ "$CEPH_DAEMON" == demo ]]; then
fsid=$(uuidgen)
cat <<ENDHERE >/etc/ceph/"${CLUSTER}".conf
[global]
fsid = $fsid
mon initial members = ${MON_NAME}
mon host = ${MON_IP}
osd crush chooseleaf type = 0
osd journal size = 100
public network = ${CEPH_PUBLIC_NETWORK}
cluster network = ${CEPH_PUBLIC_NETWORK}
log file = /dev/null
osd pool default size = 1
ENDHERE
# For ext4
if [ "$(findmnt -n -o FSTYPE -T /var/lib/ceph)" = "ext4" ]; then
cat <<ENDHERE >> /etc/ceph/"${CLUSTER}".conf
osd max object name len = 256
osd max object namespace len = 64
ENDHERE
fi
else
cat <<ENDHERE >/etc/ceph/"${CLUSTER}".conf
[global]
fsid = $fsid
mon initial members = ${MON_NAME}
mon host = ${MON_IP}
public network = ${CEPH_PUBLIC_NETWORK}
cluster network = ${CEPH_CLUSTER_NETWORK}
osd journal size = ${OSD_JOURNAL_SIZE}
log file = /dev/null
ENDHERE
fi
if [ "$IP_LEVEL" -eq 6 ]; then
echo "ms bind ipv6 = true" >> /etc/ceph/"${CLUSTER}".conf
fi
else
# extract fsid from ceph.conf
fsid=$(grep "fsid" /etc/ceph/"${CLUSTER}".conf | awk '{print $NF}')
fi
CLI+=("--set-uid=0")
if [ ! -e "$ADMIN_KEYRING" ]; then
if [ -z "$ADMIN_SECRET" ]; then
# Automatically generate administrator key
CLI+=(--gen-key)
else
# Generate custom provided administrator key
CLI+=("--add-key=$ADMIN_SECRET")
fi
ceph-authtool "$ADMIN_KEYRING" --create-keyring -n client.admin "${CLI[@]}" --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
fi
if [ ! -e "$MON_KEYRING" ]; then
# Generate the mon. key
ceph-authtool "$MON_KEYRING" --create-keyring --gen-key -n mon. --cap mon 'allow *'
fi
if [ ! -e "$OSD_BOOTSTRAP_KEYRING" ]; then
# Generate the OSD bootstrap key
ceph-authtool "$OSD_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-osd --cap mon 'allow profile bootstrap-osd'
fi
if [ ! -e "$MDS_BOOTSTRAP_KEYRING" ]; then
# Generate the MDS bootstrap key
ceph-authtool "$MDS_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-mds --cap mon 'allow profile bootstrap-mds'
fi
if [ ! -e "$RGW_BOOTSTRAP_KEYRING" ]; then
# Generate the RGW bootstrap key
ceph-authtool "$RGW_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rgw --cap mon 'allow profile bootstrap-rgw'
fi
if [ ! -e "$RBD_MIRROR_BOOTSTRAP_KEYRING" ]; then
# Generate the RBD Mirror bootstrap key
ceph-authtool "$RBD_MIRROR_BOOTSTRAP_KEYRING" --create-keyring --gen-key -n client.bootstrap-rbd --cap mon 'allow profile bootstrap-rbd'
fi
# Apply proper permissions to the keys
chown "${CHOWN_OPT[@]}" ceph. "$MON_KEYRING" "$OSD_BOOTSTRAP_KEYRING" "$MDS_BOOTSTRAP_KEYRING" "$RGW_BOOTSTRAP_KEYRING" "$RBD_MIRROR_BOOTSTRAP_KEYRING"
if [ ! -e "$MONMAP" ]; then
if [ -e /etc/ceph/monmap ]; then
# Rename old monmap
mv /etc/ceph/monmap "$MONMAP"
else
# Generate initial monitor map
monmaptool --create --add "${MON_NAME}" "${MON_IP}:6789" --fsid "${fsid}" "$MONMAP"
fi
chown "${CHOWN_OPT[@]}" ceph. "$MONMAP"
fi
}
function get_config {
# No-op for static
log "static: does not generate config"
}
|
ceph/ceph-docker
|
ceph-releases/mimic/daemon/config.static.sh
|
Shell
|
apache-2.0
| 3,919 |
#!/bin/bash
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
DIR="$( cd "$(dirname "$0")" ; pwd -P )"
cd $DIR
mkdir model
cd model
echo "Downloading ResNet models..."
for file in resnet_50.tar.gz resnet_101.tar.gz resnet_152.tar.gz mean_meta_224.tar.gz
do
wget http://paddlepaddle.bj.bcebos.com/model_zoo/imagenet/$file
tar -xvf $file
rm $file
done
echo "Done."
|
alvations/Paddle
|
demo/model_zoo/resnet/get_model.sh
|
Shell
|
apache-2.0
| 933 |
#!/bin/bash
#
# Publishes the coverage report to http://coveralls.io.
#
# Copyright 2015 Mohiva Organisation (license at mohiva dot com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -o nounset -o errexit
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo ""
echo "Publish coverage report"
bash ${SCRIPTS_DIR}/sbt.sh coveralls
echo ""
echo "Report published"
echo ""
|
mohiva/swagger-codegen-play-scala
|
scripts/coveralls.sh
|
Shell
|
apache-2.0
| 894 |
#!/bin/sh
# $NetBSD: prepare-import.sh,v 1.1 2014/03/09 16:58:03 christos Exp $
# Copy the FreeBSD src/lib/elftwoolchain directory contents to dist. Run
# this script and you're done. This does not add NetBSD RCSID's just cleans
# existing ones to avoid conflicts in the future
#
# lib/ is built as SUBDIR from lib/Makefile.
#
# Use the following template to import
# cvs import src/external/bsd/elftoolchain/dist FreeBSD FreeBSD-X-Y-Z
#
# don't forget to bump the lib/shlib_version if necessary
#
set -e
if [ -z "$1" ]
then
echo "$0: <distdir>" 1>&2
exit 1
fi
cleantags $1
|
execunix/vinos
|
external/bsd/elftoolchain/prepare-import.sh
|
Shell
|
apache-2.0
| 582 |
#!/usr/bin/env bash
#
# Copyright © 2012-2014 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# quick and dirty test suite to validate bash json key lookups
find "test" -type f -name "*.sh" ! -name '_expect*' | while read TESTSH
do
TESTJSON="${TESTSH%.sh}.json"
echo "--- running tests `basename $TESTSH` ---" >&2
./coopr_wrapper.sh $TESTJSON $TESTSH
done
|
caskdata/coopr-provisioner
|
lib/provisioner/worker/plugins/automators/shell_automator/lib/test.sh
|
Shell
|
apache-2.0
| 900 |
#!/bin/bash
plotkmerspectrum.py ../repeatresolutionpaper/counts-validationgenomedata/SRR000333.fastq.21 -g 25
plotkmerspectrum.py ../repeatresolutionpaper/counts-validationgenomedata/SRR000333.fastq.21 -g 26
cp ../repeatresolutionpaper/counts-validationgenomedata/SRR000333.fastq.21.26.pdf .
cp ../repeatresolutionpaper/counts-validationgenomedata/SRR000333.fastq.21.25.pdf .
|
MG-RAST/kmerspectrumanalyzer
|
img/test-25.sh
|
Shell
|
bsd-2-clause
| 377 |
#!/bin/sh
maj_min=1
maj_max=8
base=clang-format
format=""
#Redirect output to stderr.
exec 1>&2
#check if clang - format is installed
type "$base" >/dev/null 2>&1 && format="$base"
#if not, check all possible versions
#(i.e.clang - format - <$maj_min - $maj_max> - <0 - 9>)
if [ -z "$format" ]
then
for j in `seq $maj_max -1 $maj_min`
do
for i in `seq 0 9`
do
type "$base-$j.$i" >/dev/null 2>&1 && format="$base-$j.$i" && break
done
[ -z "$format" ] || break
done
fi
#no versions of clang - format are installed
if [ -z "$format" ]
then
echo "$base is not installed. Pre-commit hook will not be executed."
exit 0
fi
#do the formatting
for file in `find -name *.cpp -or -name *.h -or -name *.hpp`
do
echo $file
"$format" -i "$file"
done
|
cogsys-tuebingen/csapex_core_plugins
|
.apply_clang_format.sh
|
Shell
|
bsd-3-clause
| 815 |
#!/bin/bash
echo -e "Initializing symbolic link between SimpleStorefront and public_html folders.";
cd /home/vagrant/
rm -rf public_html
ln -s SimpleStorefront public_html
cd /home/vagrant/public_html
echo -e "Setting up SSH.";
chmod 700 ~/.ssh/id_rsa*
eval $(ssh-agent -s)
ssh-add
echo -e "Cleaning scripts in bin.";
chmod +x ./bin/*.sh
perl -p -i -e 's/\r\n$/\n/g' ./bin/*.sh
echo -e "Running composer install and update and bower installs";
composer config --global discard-changes true
composer install
composer update
|
ethos71/LemonadeStand
|
bin/initialize.sh
|
Shell
|
mit
| 527 |
###########################################################################
# before_install.sh
# ---------------------
# Date : March 2016
# Copyright : (C) 2016 by Matthias Kuhn
# Email : matthias at opengis dot ch
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
export DEBIAN_FRONTEND=noninteractive
pushd ${HOME}
curl -L https://github.com/opengisch/osgeo4travis/archive/qt4bin.tar.gz | tar -xzC /home/travis --strip-components=1
curl -L https://cmake.org/files/v3.5/cmake-3.5.0-Linux-x86_64.tar.gz | tar --strip-components=1 -zxC /home/travis/osgeo4travis
popd
pip install --user autopep8 nose2 pyyaml mock future
|
alexbruy/QGIS
|
ci/travis/linux/qt4/before_install.sh
|
Shell
|
gpl-2.0
| 1,240 |
#! /bin/sh
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This test checks that dependent files are updated before including
# in the distribution. 'parse.c' depends on 'parse.y'. The latter is
# updated so that 'parse.c' should be rebuilt. Then we are running
# 'make' and 'make distdir' and check whether the version of 'parse.c'
# to be distributed is up to date.
# Please keep this in sync with sister test 'yaccvpath.sh'.
required='cc yacc'
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_PROG_YACC
AC_OUTPUT
END
cat > Makefile.am << 'END'
bin_PROGRAMS = foo
foo_SOURCES = parse.y foo.c
AM_YFLAGS = -d
END
# Original parser, with 'foobar'.
cat > parse.y << 'END'
%{
int yylex () { return 0; }
void yyerror (char *s) {}
%}
%token FOOBAR
%%
foobar : 'f' 'o' 'o' 'b' 'a' 'r' {};
END
cat > foo.c << 'END'
#include "parse.h"
int main () { return 0; }
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
$YACC -d parse.y
mv y.tab.c parse.c
mv y.tab.h parse.h
# Sanity checks.
grep foobar parse.c
grep FOOBAR parse.h
mkdir sub
cd sub
../configure
$sleep
# New parser, with 'fubar'.
cat > ../parse.y << 'END'
%{
int yylex () { return 0; }
void yyerror (char *s) {}
%}
%token FUBAR
%%
fubar : 'f' 'u' 'b' 'a' 'r' {};
END
$MAKE
$MAKE distdir
$FGREP fubar $distdir/parse.c
$FGREP FUBAR $distdir/parse.h
# Now check to make sure that 'make dist' will rebuild the parser.
$sleep
# New parser, with 'maude'.
cat > ../parse.y << 'END'
%{
int yylex () { return 0; }
void yyerror (char *s) {}
%}
%token MAUDE
%%
maude : 'm' 'a' 'u' 'd' 'e' {};
END
$MAKE distdir
$FGREP maude $distdir/parse.c
$FGREP MAUDE $distdir/parse.h
:
|
kuym/openocd
|
tools/automake-1.15/t/yacc-d-vpath.sh
|
Shell
|
gpl-2.0
| 2,259 |
#!/bin/sh
echo "static const char selfsignedCer[] = " > selfsigned.cer.h
cat selfsigned.cer | sed 's/^/"/' | sed 's/$/\\n"/' >> selfsigned.cer.h
echo ";" >> selfsigned.cer.h
echo "static const char selfsignedKey[] = " > selfsigned.key.h
cat selfsigned.key | sed 's/^/"/' | sed 's/$/\\n"/' >> selfsigned.key.h
echo ";" >> selfsigned.key.h
|
MoSync/MoSync
|
tools/makesis-4/src/makecertheader.sh
|
Shell
|
gpl-2.0
| 340 |
#!/bin/sh
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Test parallel use of lvm commands and check locks aren't dropped
# RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1049296
. lib/inittest
which mkfs.ext3 || skip
aux prepare_vg
lvcreate -L10 -n $lv1 $vg
lvcreate -l1 -n $lv2 $vg
mkfs.ext3 "$DM_DEV_DIR/$vg/$lv1"
# Slowdown PV for resized LV
aux delay_dev "$dev1" 20 20
lvresize -L-5 -r $vg/$lv1 &
# Let's wait till resize starts
sleep 2
lvremove -f $vg/$lv2
wait
aux enable_dev "$dev1"
# Check removed $lv2 does not reappear
not check lv_exists $vg $lv2
vgremove -ff $vg
|
vgmoose/lvm
|
test/shell/lock-parallel.sh
|
Shell
|
gpl-2.0
| 997 |
#!/bin/bash
. xsl-fo.sh
/cygdrive/c/Applications/fop/fop.bat -dpi 150 -fo cg3.fo -pdf cg3.pdf
|
TinoDidriksen/cg3
|
manual/pdf.sh
|
Shell
|
gpl-3.0
| 94 |
#!/bin/sh -ex
rm -rf bootstrap
mkdir bootstrap
cd bootstrap
curl -OL http://caml.inria.fr/pub/distrib/ocaml-4.01/ocaml-4.01.0.tar.gz
tar -zxvf ocaml-4.01.0.tar.gz
cd ocaml-4.01.0
./configure -prefix `pwd`/../ocaml
make world opt
make install
|
chambart/opam
|
shell/bootstrap-ocaml.sh
|
Shell
|
gpl-3.0
| 243 |
#!/bin/sh
# base16-shell (https://github.com/chriskempson/base16-shell)
# Base16 Shell template by Chris Kempson (http://chriskempson.com)
# Chalk scheme by Chris Kempson (http://chriskempson.com)
color00="15/15/15" # Base 00 - Black
color01="fb/9f/b1" # Base 08 - Red
color02="ac/c2/67" # Base 0B - Green
color03="dd/b2/6f" # Base 0A - Yellow
color04="6f/c2/ef" # Base 0D - Blue
color05="e1/a3/ee" # Base 0E - Magenta
color06="12/cf/c0" # Base 0C - Cyan
color07="d0/d0/d0" # Base 05 - White
color08="50/50/50" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="f5/f5/f5" # Base 07 - Bright White
color16="ed/a9/87" # Base 09
color17="de/af/8f" # Base 0F
color18="20/20/20" # Base 01
color19="30/30/30" # Base 02
color20="b0/b0/b0" # Base 04
color21="e0/e0/e0" # Base 06
color_foreground="d0/d0/d0" # Base 05
color_background="15/15/15" # Base 00
if [ -n "$TMUX" ]; then
# Tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
put_template() { printf '\033Ptmux;\033\033]4;%d;rgb:%s\033\033\\\033\\' $@; }
put_template_var() { printf '\033Ptmux;\033\033]%d;rgb:%s\033\033\\\033\\' $@; }
put_template_custom() { printf '\033Ptmux;\033\033]%s%s\033\033\\\033\\' $@; }
elif [ "${TERM%%[-.]*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
put_template() { printf '\033P\033]4;%d;rgb:%s\007\033\\' $@; }
put_template_var() { printf '\033P\033]%d;rgb:%s\007\033\\' $@; }
put_template_custom() { printf '\033P\033]%s%s\007\033\\' $@; }
elif [ "${TERM%%-*}" = "linux" ]; then
put_template() { [ $1 -lt 16 ] && printf "\e]P%x%s" $1 $(echo $2 | sed 's/\///g'); }
put_template_var() { true; }
put_template_custom() { true; }
else
put_template() { printf '\033]4;%d;rgb:%s\033\\' $@; }
put_template_var() { printf '\033]%d;rgb:%s\033\\' $@; }
put_template_custom() { printf '\033]%s%s\033\\' $@; }
fi
# 16 color space
put_template 0 $color00
put_template 1 $color01
put_template 2 $color02
put_template 3 $color03
put_template 4 $color04
put_template 5 $color05
put_template 6 $color06
put_template 7 $color07
put_template 8 $color08
put_template 9 $color09
put_template 10 $color10
put_template 11 $color11
put_template 12 $color12
put_template 13 $color13
put_template 14 $color14
put_template 15 $color15
# 256 color space
put_template 16 $color16
put_template 17 $color17
put_template 18 $color18
put_template 19 $color19
put_template 20 $color20
put_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
put_template_custom Pg d0d0d0 # foreground
put_template_custom Ph 151515 # background
put_template_custom Pi d0d0d0 # bold color
put_template_custom Pj 303030 # selection color
put_template_custom Pk d0d0d0 # selected text color
put_template_custom Pl d0d0d0 # cursor
put_template_custom Pm 151515 # cursor text
else
put_template_var 10 $color_foreground
if [ "$BASE16_SHELL_SET_BACKGROUND" != false ]; then
put_template_var 11 $color_background
if [ "${TERM%%-*}" = "rxvt" ]; then
put_template_var 708 $color_background # internal border (rxvt)
fi
fi
put_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset -f put_template
unset -f put_template_var
unset -f put_template_custom
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
|
longshorej/dotfiles
|
src/os/linux/.config/scripts/base16-chalk.sh
|
Shell
|
gpl-3.0
| 3,958 |
#!/bin/bash
## note: you have to be in the dir with the build.xml to run this
cmd="ant -DshortHost=rc remote-int-test"
echo $cmd
eval $cmd
|
esdc-esac-esa-int/caom2db
|
caom2-repo-server/test/scripts/int-test-rc.sh
|
Shell
|
agpl-3.0
| 146 |
#!/bin/bash
set -e -o pipefail
name=$(basename $0 .sh)
result=$(mktemp ${name}.out.XXXXXX)
echo "result file: $result"
stderr=$(mktemp ${name}.err.XXXXXX)
echo "stderr file: $stderr"
$OSCAP oval eval --results $result --variables $srcdir/external_variables.xml $srcdir/$name.oval.xml 2> $stderr
[ ! -s $stderr ] && rm $stderr
[ -s $result ]
assert_exists 10 '/oval_results/oval_definitions/variables/external_variable'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]/possible_restriction'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]/possible_restriction[@hint="hint"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]/possible_restriction/restriction'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]/possible_restriction/restriction[@operation="pattern match"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:1"]/possible_restriction/restriction[text()="^[0-9]{3}-[0-9]{3}-[0-9]{4}$"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]'
assert_exists 2 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction'
assert_exists 2 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction/restriction'
assert_exists 2 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction/restriction[@operation="pattern match"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction[@hint="This restricts the variable value(s) to the 10 digit telephone number format xxx-xxx-xxxx"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction[@hint="This restricts the variable value(s) to the 1 plus 10 digit telephone number format x-xxx-xxx-xxxx"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction/restriction[text()="^[0-9]{3}-[0-9]{3}-[0-9]{4}$"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:2"]/possible_restriction/restriction[text()="^1-[0-9]{3}-[0-9]{3}-[0-9]{4}$"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:3"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:3"]/possible_value'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:3"]/possible_value[text()="0"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]'
assert_exists 2 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]/possible_value'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]/possible_value[@hint="This restricts the variable value(s) to 0."]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]/possible_value[text()="0"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]/possible_value[text()="1"]'
assert_exists 1 '/oval_results/oval_definitions/variables/external_variable[@id="oval:x:var:4"]/possible_value[@hint="This restricts the variable value(s) to 1."]'
CO='/oval_results/results/system/oval_system_characteristics/collected_objects'
assert_exists 10 $CO'/object'
assert_exists 1 $CO'/object[@id="oval:x:obj:1"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:1"][@flag="does not exist"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:1"]/message'
assert_exists 1 $CO'/object[@id="oval:x:obj:1"]/message[text()="Referenced variable has no values (oval:x:var:1)."]'
assert_exists 1 $CO'/object[@id="oval:x:obj:2"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:2"]/variable_value'
assert_exists 1 $CO'/object[@id="oval:x:obj:2"]/variable_value[@variable_id="oval:x:var:2"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:2"]/variable_value[text()="123-456-7890"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:2"]/reference'
assert_exists 1 $CO'/object[@id="oval:x:obj:3"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:3"][@flag="does not exist"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:3"]/message'
assert_exists 1 $CO'/object[@id="oval:x:obj:3"]/message[text()="Referenced variable has no values (oval:x:var:3)."]'
assert_exists 1 $CO'/object[@id="oval:x:obj:4"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:4"]/variable_value'
assert_exists 1 $CO'/object[@id="oval:x:obj:4"]/variable_value[@variable_id="oval:x:var:4"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:4"]/variable_value[text()="1"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:4"]/reference'
assert_exists 1 $CO'/object[@id="oval:x:obj:5"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:5"]/variable_value'
assert_exists 1 $CO'/object[@id="oval:x:obj:5"]/variable_value[@variable_id="oval:x:var:5"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:5"]/variable_value[text()="13"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:5"]/reference'
assert_exists 1 $CO'/object[@id="oval:x:obj:6"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:6"][@flag="does not exist"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:6"]/message'
assert_exists 1 $CO'/object[@id="oval:x:obj:6"]/message[text()="Referenced variable has no values (oval:x:var:6)."]'
assert_exists 1 $CO'/object[@id="oval:x:obj:7"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:7"]/variable_value'
assert_exists 1 $CO'/object[@id="oval:x:obj:7"]/variable_value[@variable_id="oval:x:var:7"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:7"]/variable_value[text()="300"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:7"]/reference'
assert_exists 1 $CO'/object[@id="oval:x:obj:8"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:8"][@flag="does not exist"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:8"]/message'
assert_exists 1 $CO'/object[@id="oval:x:obj:8"]/message[text()="Referenced variable has no values (oval:x:var:8)."]'
assert_exists 1 $CO'/object[@id="oval:x:obj:9"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:9"][@flag="does not exist"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:9"]/message'
assert_exists 1 $CO'/object[@id="oval:x:obj:9"]/message[text()="Referenced variable has no values (oval:x:var:9)."]'
assert_exists 1 $CO'/object[@id="oval:x:obj:10"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:10"]/variable_value'
assert_exists 1 $CO'/object[@id="oval:x:obj:10"]/variable_value[@variable_id="oval:x:var:10"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:10"]/variable_value[text()="17"]'
assert_exists 1 $CO'/object[@id="oval:x:obj:10"]/reference'
SD='/oval_results/results/system/oval_system_characteristics/system_data'
assert_exists 5 $SD'/ind-sys:variable_item'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:var_ref[text()="oval:x:var:2"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:value[text()="123-456-7890"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:var_ref[text()="oval:x:var:4"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:value[text()="1"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:var_ref[text()="oval:x:var:5"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:value[text()="13"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:var_ref[text()="oval:x:var:7"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:value[text()="300"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:var_ref[text()="oval:x:var:10"]'
assert_exists 1 $SD'/ind-sys:variable_item/ind-sys:value[text()="17"]'
rm $result
|
openprivacy/openscap
|
tests/API/OVAL/unittests/test_external_variable.sh
|
Shell
|
lgpl-2.1
| 7,866 |
#!/bin/bash
#This is a quickly thrown together utility to build the message file that
# we send off to be translated.
#It will likely need to be updated each time it is used.
#The output is written to `pwd`/messages_$$.txt
if [ `uname` != "Linux" ]; then
echo "ERROR: this script is for Linux only."
exit 1
fi
function process() {
grep "public static final StringId " $file | perl -ne '$line = $_; chomp($line); $line =~ /new StringId\s*\(\s*(\d+)\s*,\s*(\".*\")\s*\);\s*$/; printf("%-6d = %s\n", $1, $2);' | sort -n
#End of inlined perl
if [ "x$?" != "x0" ]; then
echo "ERROR: error processing file=$file"
exit 3
fi
}
base="`echo $0 | xargs dirname | xargs dirname | xargs dirname`/src"
package=com/gemstone/gemfire/internal/i18n
output=`pwd`/messages_$$.txt
files="$base/$package/LocalizedStrings.java $base/$package/JGroupsStrings.java"
for file in $files; do
if [ ! -f $file ]; then
echo "ERROR: missing file: base=$base, file=$file, output=$output"
exit 2
fi
process >> $output
done
exit 0
|
SnappyDataInc/snappy-store
|
gemfirexd/tools/bin/i18nBuildMsgFile.sh
|
Shell
|
apache-2.0
| 1,036 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# A utility script to start clean up the servers created by servers.sh
source servers.conf
sudo killall dnsmasq
sudo killall freeradius
sudo ip netns del ${NS}
|
zhiweicen/link022
|
demo/util/cleanup_servers.sh
|
Shell
|
apache-2.0
| 749 |
# npm install latest versions
for dir in $(ls -d */)
do
cd $dir
echo $dir
npm install
echo ------------------------------
cd ..
done
|
mehulsbhatt/nscale
|
scripts/dev/npminstall.sh
|
Shell
|
artistic-2.0
| 146 |
#!/bin/bash
# From: https://circleci.com/docs/1.0/nightly-builds/
_project=$1
_branch=$2
_circle_token=$3
if [ "$_circle_token" == "" ]; then
echo "Skip triggering $_project"
exit 0
fi
trigger_build_url=https://circleci.com/api/v1.1/project/github/${_project}/tree/${_branch}?circle-token=${_circle_token}
post_data=$(cat <<EOF
{
"build_parameters": {
"TRIGGERED_BUILD": "true",
"ETA_BRANCH": "$4",
"ETA_TAG": "$5",
"ETA_PR": "$6"
}
}
EOF
)
curl \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data "${post_data}" \
--request POST ${trigger_build_url}
|
pparkkin/eta
|
utils/scripts/circleci-trigger.sh
|
Shell
|
bsd-3-clause
| 619 |
#! /bin/bash
PAGES=(ethernet features wireless)
for page in ${PAGES[@]}; do
rm ${page}.html
pandoc -s --toc -w html --email-obfuscation=javascript -c header.css -o ${page}.html $page
done
|
iphitus/netcfg-updates
|
docs/make.sh
|
Shell
|
bsd-3-clause
| 197 |
#!/bin/sh
# Copyright (C) 2006 OpenWrt.org
# Copyright (C) 2006 Fokus Fraunhofer <[email protected]>
alias debug=${DEBUG:-:}
alias mount='busybox mount'
# newline
N="
"
_C=0
NO_EXPORT=1
LOAD_STATE=1
LIST_SEP=" "
hotplug_dev() {
env -i ACTION=$1 INTERFACE=$2 /sbin/hotplug-call net
}
append() {
local var="$1"
local value="$2"
local sep="${3:- }"
eval "export ${NO_EXPORT:+-n} -- \"$var=\${$var:+\${$var}\${value:+\$sep}}\$value\""
}
list_contains() {
local var="$1"
local str="$2"
local val
eval "val=\" \${$var} \""
[ "${val%% $str *}" != "$val" ]
}
list_remove() {
local var="$1"
local remove="$2"
local val
eval "val=\" \${$var} \""
val1="${val%% $remove *}"
[ "$val1" = "$val" ] && return
val2="${val##* $remove }"
[ "$val2" = "$val" ] && return
val="${val1## } ${val2%% }"
val="${val%% }"
eval "export ${NO_EXPORT:+-n} -- \"$var=\$val\""
}
config_load() {
[ -n "$IPKG_INSTROOT" ] && return 0
# uci_load "$@"
}
reset_cb() {
config_cb() { return 0; }
option_cb() { return 0; }
list_cb() { return 0; }
}
reset_cb
package() {
return 0
}
config () {
local cfgtype="$1"
local name="$2"
export ${NO_EXPORT:+-n} CONFIG_NUM_SECTIONS=$(($CONFIG_NUM_SECTIONS + 1))
name="${name:-cfg$CONFIG_NUM_SECTIONS}"
append CONFIG_SECTIONS "$name"
[ -n "$NO_CALLBACK" ] || config_cb "$cfgtype" "$name"
export ${NO_EXPORT:+-n} CONFIG_SECTION="$name"
export ${NO_EXPORT:+-n} "CONFIG_${CONFIG_SECTION}_TYPE=$cfgtype"
}
option () {
local varname="$1"; shift
local value="$*"
export ${NO_EXPORT:+-n} "CONFIG_${CONFIG_SECTION}_${varname}=$value"
[ -n "$NO_CALLBACK" ] || option_cb "$varname" "$*"
}
list() {
local varname="$1"; shift
local value="$*"
local len
config_get len "$CONFIG_SECTION" "${varname}_LENGTH"
len="$((${len:-0} + 1))"
config_set "$CONFIG_SECTION" "${varname}_ITEM$len" "$value"
config_set "$CONFIG_SECTION" "${varname}_LENGTH" "$len"
append "CONFIG_${CONFIG_SECTION}_${varname}" "$value" "$LIST_SEP"
list_cb "$varname" "$*"
}
config_rename() {
local OLD="$1"
local NEW="$2"
local oldvar
local newvar
[ -n "$OLD" -a -n "$NEW" ] || return
for oldvar in `set | grep ^CONFIG_${OLD}_ | \
sed -e 's/\(.*\)=.*$/\1/'` ; do
newvar="CONFIG_${NEW}_${oldvar##CONFIG_${OLD}_}"
eval "export ${NO_EXPORT:+-n} \"$newvar=\${$oldvar}\""
unset "$oldvar"
done
export ${NO_EXPORT:+-n} CONFIG_SECTIONS="$(echo " $CONFIG_SECTIONS " | sed -e "s, $OLD , $NEW ,")"
[ "$CONFIG_SECTION" = "$OLD" ] && export ${NO_EXPORT:+-n} CONFIG_SECTION="$NEW"
}
config_unset() {
config_set "$1" "$2" ""
}
config_clear() {
local SECTION="$1"
local oldvar
list_remove CONFIG_SECTIONS "$SECTION"
export ${NO_EXPORT:+-n} CONFIG_SECTIONS="${SECTION:+$CONFIG_SECTIONS}"
for oldvar in `set | grep ^CONFIG_${SECTION:+${SECTION}_} | \
sed -e 's/\(.*\)=.*$/\1/'` ; do
unset $oldvar
done
}
config_get() {
case "$3" in
"") eval "echo \"\${CONFIG_${1}_${2}}\"";;
*) eval "export ${NO_EXPORT:+-n} -- \"$1=\${CONFIG_${2}_${3}}\"";;
esac
}
# config_get_bool <variable> <section> <option> [<default>]
config_get_bool() {
local _tmp
config_get "_tmp" "$2" "$3"
case "$_tmp" in
1|on|true|enabled) export ${NO_EXPORT:+-n} "$1=1";;
0|off|false|disabled) export ${NO_EXPORT:+-n} "$1=0";;
*) eval "$1=$4";;
esac
}
config_set() {
local section="$1"
local option="$2"
local value="$3"
local old_section="$CONFIG_SECTION"
CONFIG_SECTION="$section"
option "$option" "$value"
CONFIG_SECTION="$old_section"
}
config_foreach() {
local function="$1"
[ "$#" -ge 1 ] && shift
local type="$1"
[ "$#" -ge 1 ] && shift
local section cfgtype
[ -z "$CONFIG_SECTIONS" ] && return 0
for section in ${CONFIG_SECTIONS}; do
config_get cfgtype "$section" TYPE
[ -n "$type" -a "x$cfgtype" != "x$type" ] && continue
eval "$function \"\$section\" \"\$@\""
done
}
config_list_foreach() {
[ "$#" -ge 3 ] || return 0
local section="$1"; shift
local option="$1"; shift
local function="$1"; shift
local val
local len
local c=1
config_get len "${section}" "${option}_LENGTH"
[ -z "$len" ] && return 0
while [ $c -le "$len" ]; do
config_get val "${section}" "${option}_ITEM$c"
eval "$function \"\$val\" \"$@\""
c="$(($c + 1))"
done
}
load_modules() {
[ -d /etc/modules.d ] && {
cd /etc/modules.d
sed 's/^[^#]/insmod &/' $* | ash 2>&- || :
}
}
include() {
local file
for file in $(ls $1/*.sh 2>/dev/null); do
. $file
done
}
find_mtd_part() {
local PART="$(grep "\"$1\"" /proc/mtd | awk -F: '{print $1}')"
local PREFIX=/dev/mtdblock
PART="${PART##mtd}"
[ -d /dev/mtdblock ] && PREFIX=/dev/mtdblock/
echo "${PART:+$PREFIX$PART}"
}
strtok() { # <string> { <variable> [<separator>] ... }
local tmp
local val="$1"
local count=0
shift
while [ $# -gt 1 ]; do
tmp="${val%%$2*}"
[ "$tmp" = "$val" ] && break
val="${val#$tmp$2}"
export ${NO_EXPORT:+-n} "$1=$tmp"; count=$((count+1))
shift 2
done
if [ $# -gt 0 -a -n "$val" ]; then
export ${NO_EXPORT:+-n} "$1=$val"; count=$((count+1))
fi
return $count
}
jffs2_mark_erase() {
local part="$(find_mtd_part "$1")"
[ -z "$part" ] && {
echo Partition not found.
return 1
}
echo -e "\xde\xad\xc0\xde" | mtd -qq write - "$1"
}
#uci_apply_defaults() {(
# cd /etc/uci-defaults || return 0
# files="$(ls)"
# [ -z "$files" ] && return 0
# mkdir -p /tmp/.uci
# for file in $files; do
# ( . "./$(basename $file)" ) && rm -f "$file"
# done
# uci commit
#)}
#
#[ -z "$IPKG_INSTROOT" -a -f /lib/config/uci.sh ] && . /lib/config/uci.sh
if_timeout_check_lock() {
nsecs=$1
lock_file=$2
pid_be_check=$3
looptimes=0
#wait for 10 rounds
while [ $looptimes -lt $nsecs ];
do
sleep 1
LOCK_STAT=$(ps | sed '/grep/d' | grep "lock $lock_file" 2>&1 | wc -l)
[ ! -f "${lock_file}.pid" ] && {
#nothing to wait, leave
lock -u $lock_file
[ -f "${lock_file}.pid" ] && rm ${lock_file}.pid
return
}
WAIT_PID=$(cat ${lock_file}.pid)
[ $LOCK_STAT -gt 0 ] && {
[ ! -f ${lock_file}.pid -o ! -d /proc/$WAIT_PID ] && {
#process had gone, unlock and let our task keep go
lock -u $lock_file
[ -f "${lock_file}.pid" ] && rm ${lock_file}.pid
return
}
}
[ "$LOCK_STAT" = "0" ] && return
looptimes=$(($looptimes + 1))
done
#before we left, unlock for timeout
[ $LOCK_STAT -gt 0 ] && {
lock -u $lock_file
[ -f "${lock_file}.pid" ] && rm ${lock_file}.pid
}
}
if_serialize_lock() {
lock_file=$1
pid_be_check=$2
LOCK_STAT=$(ps | sed '/grep/d' | grep "lock $lock_file" 2>&1 | wc -l)
# If interface action is processing and serialization is locked
if [ $LOCK_STAT -gt 0 ]; then
if [ -f ${lock_file}.pid ]; then
# check if the process is still running
WAIT_PID=$(cat ${lock_file}.pid)
if [ -d /proc/$WAIT_PID ]; then
# the process is running, wait for 10 seconds
if_timeout_check_lock 60 $lock_file $pid_be_check &
[ -n "$pid_be_check" -a -d "/proc/${pid_be_check}" ] && {
# lock and wait
lock $lock_file
echo "$pid_be_check" > ${lock_file}.pid
if_timeout_check_lock 20 $lock_file $pid_be_check &
}
else
[ -n "$pid_be_check" -a -d "/proc/${pid_be_check}" ] && {
# if the process is not running, then we know this daemon we
# are waiting for comes with a wrong case, eq : pppoe but fail
# to get an IP.
# In this case, we just need to unlock and lock again
# lock -u $lock_file
# lock $lock_file
# So we reuse the lock
echo "$pid_be_check" > ${lock_file}.pid
if_timeout_check_lock 20 $lock_file $pid_be_check &
}
fi
else
[ -n "$pid_be_check" -a -d "/proc/${pid_be_check}" ] && {
# Basically here we should do
# lock -u ${lock_file}
# lock ${lock_file}
# Just leave it and replace the lock and been killed process
# So we reuse the lock
echo "$pid_be_check" > ${lock_file}.pid
if_timeout_check_lock 20 $lock_file $pid_be_check &
}
fi
else
[ -n "$pid_be_check" -a -d "/proc/${pid_be_check}" ] && {
# Just lock it and record the process to be killed
lock $lock_file
# if we are asked to wait something, then wait for timeout
# else, we just lock
echo "$pid_be_check" > ${lock_file}.pid
if_timeout_check_lock 20 $lock_file $pid_be_check &
}
fi
}
if_serialize_ulock() {
lock_file=$1
pid_be_check=$2
LOCK_STAT=$(ps | sed '/grep/d' | grep "lock $lock_file" 2>&1 | wc -l)
[ "$LOCK_STAT" -gt "0" ] && {
lock -u $lock_file
[ -f "${lock_file}.pid" ] && rm ${lock_file}.pid
}
}
if_serialize_check() {
lock_file=$1
pid_file=${lock_file}.pid
LOCK_STAT=$(ps | sed '/grep/d' | grep "lock $lock_file" 2>&1 | wc -l)
# we should check three things :
# 1. locked
# 2. waiting for some process
# 3. the process is alive
[ "$LOCK_STAT" -gt "0" -a -f "$pid_file" ] && {
pid_be_check="$(cat $pid_file)"
[ -d "/proc/${pid_be_check}" ] && {
lock -w $lock_file
return
}
}
[ "$LOCK_STAT" -gt "0" ] && lock -u $lock_file
[ -f "$pid_file" ] && rm $pid_file
}
#kill_and_retry <process_name> <max times> <when to start force kill>
kill_and_retry() {
local oldprocess=$1
local maxtimes=$2
local forcetimes=$3
kill $(pidof $oldprocess)
local count=0
while [ -n "$(pidof $oldprocess)" ]
do
count=$(($count +1))
[ "$count" -gt "$maxtimes" ] && {
return
} || {
[ "$count" -gt "$forcetimes" ] && {
kill -9 $(pidof $oldprocess)
}
}
sleep 1
done
}
#killpid_and_retry <process_id> <max times> <when to start force kill>
killpid_and_retry() {
local oldpid=$1
local maxtimes=$2
local forcetimes=$3
kill $oldpid
local count=0
while [ -d "/proc/$oldpid" ]
do
count=$(($count +1))
[ "$count" -gt "$maxtimes" ] && {
return
} || {
[ "$count" -gt "$forcetimes" ] && {
kill -9 $oldpid
}
}
sleep 1
done
}
kill_and_wait() {
oldpid=$1
kill $oldpid
local count=0
while [ -d "/proc/$oldpid" ]
do
count=$(($count +1))
[ "$count" -gt "30" ] && {
err=1
return
}
sleep 1
done
}
# added by Murphy on 2011/12/09
# we want to get system uptime with 1/100 accuracy
# so we get it from /proc/uptime and get the 1st one then x 100
#
get_system_time() {
now=$(cat /proc/uptime | tr -s " "| tr -d . | cut -d" " -f1)
echo $now
}
get_3g_stats() {
local AC341U="$(cat /proc/bus/usb/devices | grep "Vendor=1199 ProdID=9057")"
local AC340U="$(cat /proc/bus/usb/devices | grep "Vendor=1199 ProdID=9051")"
[ -n "$AC341U" ] && {
ezpcom -s /etc/chatscripts/script.GobiSig > /tmp/conn.info
# LTE signal
let RSRP_dBm=$(grep "RSRP" /tmp/conn.info | sed 's/^.*(dBm): //g' | sed 's/TAC.*$//g')
# if 4G(X), try to find 3G signal
if [ -z "$RSRP_dBm" ]; then
let RSSI_dBm=$(grep "RSSI" /tmp/conn.info | sed 's/^.*(dBm): //g' | sed 's/ECIO.*$//g')
[ -z "$RSSI_dBm" ] && {
RSSI_dBm=-115
}
# dBm to Quality:
local RSSI=0
RSSI=$((($RSSI_dBm + 115) *20 / 9))
local SIG=0
[ -n "$RSSI" ] && {
[ $RSSI -lt 0 ] && SIG=0
[ $RSSI -ge 0 ] && [ $RSSI -lt 15 ] && SIG=0
[ $RSSI -ge 15 ] && [ $RSSI -lt 32 ] && SIG=1
[ $RSSI -ge 32 ] && [ $RSSI -lt 49 ] && SIG=2
[ $RSSI -ge 49 ] && [ $RSSI -lt 66 ] && SIG=3
[ $RSSI -ge 66 ] && [ $RSSI -le 83 ] && SIG=4
[ $RSSI -gt 83 ] && SIG=5
nvram replace attr wan_wwan_probe_rule $2 signal "$RSSI"
nvram replace attr wan_wwan_probe_rule $2 sig "$SIG"
}
else
# dBm to Quality:
local RSRP=0
RSRP=$((($RSRP_dBm * 217 / 100) + 256))
local SIG=0
[ -n "$RSRP" ] && {
[ $RSRP -lt 0 ] && SIG=0
[ $RSRP -ge 0 ] && [ $RSRP -lt 15 ] && SIG=0
[ $RSRP -ge 15 ] && [ $RSRP -lt 32 ] && SIG=1
[ $RSRP -ge 32 ] && [ $RSRP -lt 49 ] && SIG=2
[ $RSRP -ge 49 ] && [ $RSRP -lt 66 ] && SIG=3
[ $RSRP -ge 66 ] && [ $RSRP -le 83 ] && SIG=4
[ $RSRP -gt 83 ] && SIG=5
nvram replace attr wan_wwan_probe_rule $2 signal "$RSRP"
nvram replace attr wan_wwan_probe_rule $2 sig "$SIG"
}
fi
nvram replace attr wan_wwan_probe_rule $2 provider "Sprint"
nvram replace attr wan_status_rule $2 trycount 0
return
}
[ -n "$AC340U" ] && {
nvram replace attr wan_wwan_probe_rule $2 provider "AT&T"
nvram replace attr wan_status_rule $2 trycount 0
return
}
local UML295="$(cat /proc/bus/usb/devices | grep "Vendor=10a9 ProdID=6064")"
[ -n "$UML295" ] && {
curl http://192.168.32.2/condata > /tmp/conn.info
let LTE_RSSI_TO_PERCENT=$(grep "signal strength" /tmp/conn.info | sed 's/\(.*\)\(<percent>\)\(.*\)\(<\/percent>\)\(.*\)/\3/')
local LTE_SIG=0
[ -n "$LTE_RSSI_TO_PERCENT" ] && {
[ $LTE_RSSI_TO_PERCENT -lt 0 ] && LTE_SIG=0
[ $LTE_RSSI_TO_PERCENT -ge 0 ] && [ $LTE_RSSI_TO_PERCENT -lt 15 ] && LTE_SIG=0
[ $LTE_RSSI_TO_PERCENT -ge 15 ] && [ $LTE_RSSI_TO_PERCENT -lt 32 ] && LTE_SIG=1
[ $LTE_RSSI_TO_PERCENT -ge 32 ] && [ $LTE_RSSI_TO_PERCENT -lt 49 ] && LTE_SIG=2
[ $LTE_RSSI_TO_PERCENT -ge 49 ] && [ $LTE_RSSI_TO_PERCENT -lt 66 ] && LTE_SIG=3
[ $LTE_RSSI_TO_PERCENT -ge 66 ] && [ $LTE_RSSI_TO_PERCENT -le 83 ] && LTE_SIG=4
[ $LTE_RSSI_TO_PERCENT -gt 83 ] && LTE_SIG=5
nvram replace attr wan_wwan_probe_rule $2 signal "$LTE_RSSI_TO_PERCENT"
nvram replace attr wan_wwan_probe_rule $2 sig "$LTE_SIG"
}
local LTE_PROVIDER=$(awk '/<network><serving><name>/,/<\/name>/' /tmp/conn.info | sed 's/\(.*\)\(<network><serving><name>\)\(.*\)\(<\/name>\)\(.*\)/\3/')
nvram replace attr wan_wwan_probe_rule $2 provider "$LTE_PROVIDER"
nvram replace attr wan_status_rule $2 trycount 0
return
}
times=$(($times+1))
/usr/sbin/ezpcom stats -d /dev/$1 > /tmp/3g_stats
[ -s "/tmp/3g_stats" ] && {
IMEI=$(cat /tmp/3g_stats | sed -n '1p' | awk '{FS=":";print $2}' | tr -d " ")
# some datacards will return the IMEI value is ERROR, change it to UNKNOWN
echo $IMEI | grep -q "ERROR" && IMEI='UNKNOWN'
PROVIDER=$(cat /tmp/3g_stats | sed -n '2p' | awk '{FS="\"";print $2}' | tr -d "\"")
# sometimes, AT&T will be retrieved incorrectly as AT& or AT
# if AT& or AT retrieved, we will fix it to AT&T
echo $PROVIDER | grep -q "AT" && PROVIDER='AT&T'
RSSI=$(cat /tmp/3g_stats | sed -n '3p' | awk '{FS=":";print $2$3}' | awk '{FS=",";print $1}' | tr -d " " | tr -d "+CSQ")
FIRMWARE=$(cat /tmp/3g_stats | sed -n '4p' | awk '{FS=":";print $2}' | tr -d " ")
APN=$(cat /tmp/3g_stats | sed -n '5p' | awk '{FS="\"";print $4}')
let dBm=0
let RSSI_TO_PERCENT="NA"
[ -n "$RSSI" ] && {
let dBm=$RSSI*2-113
let RSSI_TO_PERCENT=$RSSI*100/31
[ $RSSI -ge 0 ] && [ $RSSI -lt 6 ] && SIG=1
[ $RSSI -ge 6 ] && [ $RSSI -lt 12 ] && SIG=2
[ $RSSI -ge 12 ] && [ $RSSI -lt 18 ] && SIG=3
[ $RSSI -ge 18 ] && [ $RSSI -lt 24 ] && SIG=4
[ $RSSI -ge 24 ] && [ $RSSI -le 31 ] && SIG=5
[ $RSSI -eq 99 ] && SIG=0 && RSSI_TO_PERCENT=0
}
[ "$RSSI_TO_PERCENT" = "NA" -o "$RSSI_TO_PERCENT" = "0" ] && \
[ "$times" -lt "5" ] && {
sleep 1
get_3g_stats $1 $2
return
}
nvram replace attr wan_wwan_probe_rule $2 signal "$RSSI_TO_PERCENT"
nvram replace attr wan_wwan_probe_rule $2 sig "$SIG"
nvram replace attr wan_wwan_probe_rule $2 imei "$IMEI"
nvram replace attr wan_wwan_probe_rule $2 provider "$PROVIDER"
nvram replace attr wan_wwan_probe_rule $2 firmware "$FIRMWARE"
nvram replace attr wan_wwan_probe_rule $2 apn "$APN"
nvram replace attr wan_status_rule $2 trycount 0
logger EZP_USR 3G Signal Strength [RSSI:$RSSI][\($dBm dBm\)]
return
}
}
ping_get_host(){
ping -c 1 -w 1 $1 | grep PING | cut -d'(' -f2 | cut -d')' -f1
}
togo_find(){
local MAC=$1
local PROTO=$2
local TOGO_NUM=$3
local i=0
local NUM=-1
while [ $i -lt $TOGO_NUM ]; do
[ "$(nvram show togo_rule $i mac)" = "$MAC" -a \
"$(nvram show togo_rule $i proto)" = "$PROTO" ] && {
NUM=$i
break;
}
i=$(($i+1))
done
echo $NUM
}
togo_add(){
local NAME=$1
local PROTO=$2
local IP=$3
local PORT=$4
local MAC=$5
local CONFIG=$6
local TOGO="$(nvram get togo_rule)"
local TOGO_NUM="$(nvram get togo_rule_num)"
local PIPE="|"
NUM=$(togo_find $MAC $PROTO $TOGO_NUM)
[ "$NUM" = "-1" ] && {
[ "$TOGO_NUM" = "0" ] && PIPE=""
nvram set togo_rule="${TOGO}${PIPE}1^$NAME^$PROTO^$IP^$PORT^$MAC^$CONFIG"
nvram set togo_rule_num=$(($TOGO_NUM+1))
} || {
nvram replace attr togo_rule $NUM status 1
nvram replace attr togo_rule $NUM name $NAME
nvram replace attr togo_rule $NUM proto $PROTO
nvram replace attr togo_rule $NUM ip $IP
nvram replace attr togo_rule $NUM port $PORT
nvram replace attr togo_rule $NUM mac $MAC
nvram replace attr togo_rule $NUM config $CONFIG
}
}
togo_del(){
local MAC=$1
local PROTO=$2
local TOGO_NUM="$(nvram get togo_rule_num)"
NUM=$(togo_find $MAC $PROTO $TOGO_NUM)
[ "$NUM" != "-1" ] && nvram replace attr togo_rule $NUM status 0
}
USBSTORAGE_SERVICE() {
sleep 1
[ -z "$(mount | grep /tmp/mnt)" ] && {
SERVICE_ACTION=stop
} || {
SERVICE_ACTION=start
}
for i in /etc/rc.d/U*; do
[ -x $i ] && /etc/rc.common $i $SERVICE_ACTION 2>&1
done
}
|
choushane/MaRa-a1a0a5aNaL
|
package/base-files/files/etc/functions.sh
|
Shell
|
gpl-2.0
| 19,110 |
#!/bin/sh -x
#
# minor helper script for the fwknop test suite in --enable-cores-pattern mode
#
DIR=/tmp/fwknop-cores
service apport stop
ulimit -c unlimited
mkdir $DIR
echo "$DIR/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
exit
|
akerl/fwknop
|
test/generate_cores.sh
|
Shell
|
gpl-2.0
| 238 |
#!/bin/sh
#This script will be called via mini X session on behalf of file owner, after
#installed in /etc/mini_x/session.d/. Any auto start jobs including X apps can
#be put here
# start hob here
export PSEUDO_PREFIX=/usr
export PSEUDO_LOCALSTATEDIR=/home/builder/pseudo
export PSEUDO_LIBDIR=/usr/lib/pseudo/lib64
export GIT_PROXY_COMMAND=/home/builder/poky/scripts/oe-git-proxy
#start pcmanfm in daemon mode to allow asynchronous launch
pcmanfm -d&
#register handlers for some file types
if [ ! -d /home/builder/.local/share/applications ]; then
mkdir -p /home/builder/.local/share/applications/
#register folders to open with PCManFM filemanager
xdg-mime default pcmanfm.desktop inode/directory
#register html links and files with epiphany
xdg-mime default epiphany.desktop x-scheme-handler/http
xdg-mime default epiphany.desktop x-scheme-handler/https
xdg-mime default epiphany.desktop text/html
#register text files with l3afpad text editor
xdg-mime default l3afpad.desktop text/plain
fi
cd /home/builder/poky
. ./oe-init-build-env
matchbox-terminal&
|
bkauler/oe-qky-src
|
sumo/openembedded-core/meta/recipes-graphics/builder/files/builder_session.sh
|
Shell
|
gpl-3.0
| 1,099 |
#!/bin/bash
# This script builds bits of text for HTML page, to load and display examples
ls ../ezhil_tests/*.n -c1 | cut -d'/' -f3 | sort -u > examples.txt
for i in `cat examples.txt`
do
echo '<option value="'"$i"'">'$i'</option>'
done
echo "we have a total of " `wc -l examples.txt` " to show the user on Ezhil website"
rm examples.txt
|
kracekumar/Ezhil-Lang
|
website/build_examples.sh
|
Shell
|
gpl-3.0
| 344 |
#!/bin/bash
rm -rf docs/api/; ringo-doc --file-urls -s lib/ -d docs/api/ -p package.json -n "Stick API"
|
waybarrios/worldmap
|
src/geonode-client/externals/stick/update-docs.sh
|
Shell
|
gpl-3.0
| 105 |
#!/bin/bash
# This script holds library functions for setting up the shell environment for OpenShift scripts
# os::util::environment::use_sudo updates $USE_SUDO to be 'true', so that later scripts choosing between
# execution using 'sudo' and execution without it chose to use 'sudo'
#
# Globals:
# None
# Arguments:
# None
# Returns:
# - export USE_SUDO
function os::util::environment::use_sudo() {
USE_SUDO=true
export USE_SUDO
}
readonly -f os::util::environment::use_sudo
# os::util::environment::setup_time_vars sets up environment variables that describe durations of time
# These variables can be used to specify times for other utility functions
#
# Globals:
# None
# Arguments:
# None
# Returns:
# - export TIME_MS
# - export TIME_SEC
# - export TIME_MIN
function os::util::environment::setup_time_vars() {
TIME_MS=1
export TIME_MS
TIME_SEC="$(( 1000 * ${TIME_MS} ))"
export TIME_SEC
TIME_MIN="$(( 60 * ${TIME_SEC} ))"
export TIME_MIN
}
readonly -f os::util::environment::setup_time_vars
# os::util::environment::setup_all_server_vars sets up all environment variables necessary to configure and start an OpenShift server
#
# Globals:
# - OS_ROOT
# - PATH
# - TMPDIR
# - LOG_DIR
# - ARTIFACT_DIR
# - KUBELET_SCHEME
# - KUBELET_BIND_HOST
# - KUBELET_HOST
# - KUBELET_PORT
# - BASETMPDIR
# - ETCD_PORT
# - ETCD_PEER_PORT
# - API_BIND_HOST
# - API_HOST
# - API_PORT
# - API_SCHEME
# - PUBLIC_MASTER_HOST
# - USE_IMAGES
# Arguments:
# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made
# Returns:
# - export PATH
# - export BASETMPDIR
# - export LOG_DIR
# - export VOLUME_DIR
# - export ARTIFACT_DIR
# - export FAKE_HOME_DIR
# - export HOME
# - export KUBELET_SCHEME
# - export KUBELET_BIND_HOST
# - export KUBELET_HOST
# - export KUBELET_PORT
# - export ETCD_PORT
# - export ETCD_PEER_PORT
# - export ETCD_DATA_DIR
# - export API_BIND_HOST
# - export API_HOST
# - export API_PORT
# - export API_SCHEME
# - export SERVER_CONFIG_DIR
# - export MASTER_CONFIG_DIR
# - export NODE_CONFIG_DIR
# - export USE_IMAGES
# - export TAG
function os::util::environment::setup_all_server_vars() {
local subtempdir=$1
os::util::environment::setup_tmpdir_vars "${subtempdir}"
os::util::environment::setup_kubelet_vars
os::util::environment::setup_etcd_vars
os::util::environment::setup_server_vars
os::util::environment::setup_images_vars
}
readonly -f os::util::environment::setup_all_server_vars
# os::util::environment::update_path_var updates $PATH so that OpenShift binaries are available
#
# Globals:
# - OS_ROOT
# - PATH
# Arguments:
# None
# Returns:
# - export PATH
function os::util::environment::update_path_var() {
PATH="${OS_OUTPUT_BINPATH}/$(os::util::host_platform):${PATH}"
export PATH
}
readonly -f os::util::environment::update_path_var
# os::util::environment::setup_tmpdir_vars sets up temporary directory path variables
#
# Globals:
# - TMPDIR
# - LOG_DIR
# - ARTIFACT_DIR
# - USE_SUDO
# Arguments:
# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made
# Returns:
# - export BASETMPDIR
# - export LOG_DIR
# - export VOLUME_DIR
# - export ARTIFACT_DIR
# - export FAKE_HOME_DIR
# - export HOME
function os::util::environment::setup_tmpdir_vars() {
local sub_dir=$1
BASETMPDIR="${TMPDIR:-/tmp}/openshift/${sub_dir}"
export BASETMPDIR
LOG_DIR="${LOG_DIR:-${BASETMPDIR}/logs}"
export LOG_DIR
VOLUME_DIR="${BASETMPDIR}/volumes"
export VOLUME_DIR
ARTIFACT_DIR="${ARTIFACT_DIR:-${BASETMPDIR}/artifacts}"
export ARTIFACT_DIR
# change the location of $HOME so no one does anything naughty
FAKE_HOME_DIR="${BASETMPDIR}/openshift.local.home"
export FAKE_HOME_DIR
HOME="${FAKE_HOME_DIR}"
export HOME
# ensure that the directories are clean
for target in $( ${USE_SUDO:+sudo} findmnt --output TARGET --list ); do
if [[ "${target}" == "${BASETMPDIR}"* ]]; then
${USE_SUDO:+sudo} umount "${target}"
fi
done
for directory in "${BASETMPDIR}" "${LOG_DIR}" "${VOLUME_DIR}" "${ARTIFACT_DIR}" "${HOME}"; do
${USE_SUDO:+sudo} rm -rf "${directory}"
mkdir -p "${directory}"
done
}
readonly -f os::util::environment::setup_tmpdir_vars
# os::util::environment::setup_kubelet_vars sets up environment variables necessary for interacting with the kubelet
#
# Globals:
# - KUBELET_SCHEME
# - KUBELET_BIND_HOST
# - KUBELET_HOST
# - KUBELET_PORT
# Arguments:
# None
# Returns:
# - export KUBELET_SCHEME
# - export KUBELET_BIND_HOST
# - export KUBELET_HOST
# - export KUBELET_PORT
function os::util::environment::setup_kubelet_vars() {
KUBELET_SCHEME="${KUBELET_SCHEME:-https}"
export KUBELET_SCHEME
KUBELET_BIND_HOST="${KUBELET_BIND_HOST:-$(openshift start --print-ip || echo "127.0.0.1")}"
export KUBELET_BIND_HOST
KUBELET_HOST="${KUBELET_HOST:-${KUBELET_BIND_HOST}}"
export KUBELET_HOST
KUBELET_PORT="${KUBELET_PORT:-10250}"
export KUBELET_PORT
}
readonly -f os::util::environment::setup_kubelet_vars
# os::util::environment::setup_etcd_vars sets up environment variables necessary for interacting with etcd
#
# Globals:
# - BASETMPDIR
# - ETCD_HOST
# - ETCD_PORT
# - ETCD_PEER_PORT
# Arguments:
# None
# Returns:
# - export ETCD_HOST
# - export ETCD_PORT
# - export ETCD_PEER_PORT
# - export ETCD_DATA_DIR
function os::util::environment::setup_etcd_vars() {
ETCD_HOST="${ETCD_HOST:-127.0.0.1}"
export ETCD_HOST
ETCD_PORT="${ETCD_PORT:-4001}"
export ETCD_PORT
ETCD_PEER_PORT="${ETCD_PEER_PORT:-7001}"
export ETCD_PEER_PORT
ETCD_DATA_DIR="${BASETMPDIR}/etcd"
export ETCD_DATA_DIR
mkdir -p "${ETCD_DATA_DIR}"
}
readonly -f os::util::environment::setup_etcd_vars
# os::util::environment::setup_server_vars sets up environment variables necessary for interacting with the server
#
# Globals:
# - BASETMPDIR
# - KUBELET_HOST
# - API_BIND_HOST
# - API_HOST
# - API_PORT
# - API_SCHEME
# - PUBLIC_MASTER_HOST
# Arguments:
# None
# Returns:
# - export API_BIND_HOST
# - export API_HOST
# - export API_PORT
# - export API_SCHEME
# - export SERVER_CONFIG_DIR
# - export MASTER_CONFIG_DIR
# - export NODE_CONFIG_DIR
function os::util::environment::setup_server_vars() {
# turn on cache mutation detector every time we start a server
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
API_BIND_HOST="${API_BIND_HOST:-$(openshift start --print-ip || echo "127.0.0.1")}"
export API_BIND_HOST
API_HOST="${API_HOST:-${API_BIND_HOST}}"
export API_HOST
API_PORT="${API_PORT:-8443}"
export API_PORT
API_SCHEME="${API_SCHEME:-https}"
export API_SCHEME
MASTER_ADDR="${API_SCHEME}://${API_HOST}:${API_PORT}"
export MASTER_ADDR
PUBLIC_MASTER_HOST="${PUBLIC_MASTER_HOST:-${API_HOST}}"
export PUBLIC_MASTER_HOST
SERVER_CONFIG_DIR="${BASETMPDIR}/openshift.local.config"
export SERVER_CONFIG_DIR
MASTER_CONFIG_DIR="${SERVER_CONFIG_DIR}/master"
export MASTER_CONFIG_DIR
NODE_CONFIG_DIR="${SERVER_CONFIG_DIR}/node-${KUBELET_HOST}"
export NODE_CONFIG_DIR
mkdir -p "${SERVER_CONFIG_DIR}" "${MASTER_CONFIG_DIR}" "${NODE_CONFIG_DIR}"
}
readonly -f os::util::environment::setup_server_vars
# os::util::environment::setup_images_vars sets up environment variables necessary for interacting with release images
#
# Globals:
# - OS_ROOT
# - USE_IMAGES
# Arguments:
# None
# Returns:
# - export USE_IMAGES
# - export TAG
# - export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY
function os::util::environment::setup_images_vars() {
# Use either the latest release built images, or latest.
if [[ -z "${USE_IMAGES-}" ]]; then
TAG='latest'
export TAG
USE_IMAGES="openshift/origin-\${component}:latest"
export USE_IMAGES
if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then
TAG="$(cat "${OS_ROOT}/_output/local/releases/.commit")"
export TAG
USE_IMAGES="openshift/origin-\${component}:${TAG}"
export USE_IMAGES
fi
fi
export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY="${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-3}"
}
readonly -f os::util::environment::setup_images_vars
|
chmouel/origin
|
hack/lib/util/environment.sh
|
Shell
|
apache-2.0
| 8,444 |
#!/bin/bash -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# shellcheck disable=SC1090
source "$DIR/common.env.sh"
# shellcheck disable=SC1090
source "$DIR/../awsutil.sh"
# if $VERSION is not set, then we should not continue
if [ -z "${VERSION}" ]; then
echo "VERSION needs to be set. Exiting."
exit 1
else
echo "Found tag ${VERSION}, retrieving binaries from S3"
fi
# if AWS credentials are not set, then we should not continue
check_aws_creds
# get_bin <bucket> <remote> <dest>
function get_bin() {
mkdir -p "$(dirname "$3")"
aws_download_file "$2" "$3" "$1" "application/octet-stream"
chmod +x "$3"
}
#get_bin "$TECTONIC_BINARY_BUCKET" "build-artifacts/installer/$VERSION/bin/windows/installer.exe" "$INSTALLER_RELEASE_DIR/windows/installer.exe"
get_bin "$TECTONIC_BINARY_BUCKET" "build-artifacts/installer/$VERSION/bin/darwin/installer" "$INSTALLER_RELEASE_DIR/darwin/installer"
get_bin "$TECTONIC_BINARY_BUCKET" "build-artifacts/installer/$VERSION/bin/linux/installer" "$INSTALLER_RELEASE_DIR/linux/installer"
|
estroz/tectonic-installer
|
installer/scripts/release/get_installer_bins.sh
|
Shell
|
apache-2.0
| 1,067 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - Updated by Shannon Mitchell([email protected])
# on 14-jan-2012 to modify user check for users >= 500, fixed the dotfiles to
# be excluded and added the fix to the find. Fixed secondary gid check.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-22351
#Group Title: GEN001550
#Rule ID: SV-26453r1_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001550
#Rule Title: All files and directories contained in user home directories
#must be group-owned by a group of which the home directory's owner is a member.
#
#Vulnerability Discussion: If a user's files are group-owned by a
#group of which the user is not a member, unintended users may be able
#to access them.
#
#Responsibility: System Administrator
#IAControls: ECLP-1
#
#Check Content:
#Check the contents of user home directories for files group-owned by a
#group of which the home directory's owner is not a member.
#1. List the user accounts.
# cut -d : -f 1/etc/passwd
#2. For each user account, get a list of GIDs for files in the user's
#home directory.
# find ~username -printf %G\\n | sort | uniq
#3. Obtain the list of GIDs where the user is a member.
# id -G username
#4. Check the GID lists. If there are GIDs in the file list not present
#in the user list, this is a finding.
#Fix Text: Change the group of a file not group-owned by a group of
#which the home directory's owner is a member.
# chgrp <group with user as member> <file with bad group ownership>
#Document all changes.
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN001550
#Start-Lockdown
DotFiles='( -name .cshrc
-o -name .login
-o -name .logout
-o -name .profile
-o -name .bash_profile
-o -name .bbashrc
-o -name .env
-o -name .dtprofile
-o -name .dispatch
-o -name .emacs
-o -name .exrc )'
for UserName in `awk -F':' '!/nfsnobody/{if($3 >= 500) print $1}' /etc/passwd`
do
if [ `echo $UserName | cut -c1` != '+' ]
then
PwTest=`grep "^${UserName}:" /etc/passwd | cut -d: -f6`
PwHomeDir=${PwTest:-NOVALUE}
if [ "${PwHomeDir}" != "NOVALUE" -a "${PwHomeDir}" != " " ]
then
if [ -d ${PwHomeDir} ]
then
if [ ${PwHomeDir} = '/' ]
then
echo 'WARNING: Home directory for "'${UserName}'"' \
'("'${PwHomeDir}'") excluded from check.'
else
# The rules says 'all' files and directories so here we go
# -xdev keeps it on the same filesystem
# ! -fstype nfs: keep from changing nfs filesystem files ?
# ! ${DotFiles}: do not check the . user init files ?
# ! -gid ( -gid primary_gid -o -gid secondary_gid...
# makes sure the file is grp owned by either the primary
# or one of the secondary groups of the user.
PWGID=`grep "^${UserName}:" /etc/passwd | cut -d: -f4`
GIDLINE="! ( -gid ${PWGID}"
for CGID in `id -G ${UserName}`
do
if [ "$CGID" != "$PWGID" ]
then
GIDLINE="${GIDLINE} -o -gid ${CGID}"
fi
done
GIDLINE="$GIDLINE )"
find ${PwHomeDir} -xdev ${GIDLINE} ! -fstype nfs ! ${DotFiles} -exec chgrp ${PWGID} {} \;
fi
fi
fi
fi
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN001550.sh
|
Shell
|
apache-2.0
| 5,022 |
#!/bin/bash
FN="RTCGA.miRNASeq_1.20.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/experiment/src/contrib/RTCGA.miRNASeq_1.20.0.tar.gz"
"https://bioarchive.galaxyproject.org/RTCGA.miRNASeq_1.20.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-rtcga.mirnaseq/bioconductor-rtcga.mirnaseq_1.20.0_src_all.tar.gz"
)
MD5="ad24ae391e1b6491b6828447b23f06e4"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-rtcga.mirnaseq/post-link.sh
|
Shell
|
mit
| 1,322 |
# Install/unInstall package files in LAMMPS
if (test $1 = 1) then
cp fix_append_atoms.cpp ..
cp fix_msst.cpp ..
cp fix_nphug.cpp ..
cp fix_wall_piston.cpp ..
cp fix_append_atoms.h ..
cp fix_msst.h ..
cp fix_nphug.h ..
cp fix_wall_piston.h ..
elif (test $1 = 0) then
rm -f ../fix_append_atoms.cpp
rm -f ../fix_msst.cpp
rm -f ../fix_nphug.cpp
rm -f ../fix_wall_piston.cpp
rm -f ../fix_append_atoms.h
rm -f ../fix_msst.h
rm -f ../fix_nphug.h
rm -f ../fix_wall_piston.h
fi
|
tm1249wk/WASHLIGGGHTS-2.3.7
|
src/SHOCK/Install.sh
|
Shell
|
gpl-2.0
| 508 |
#### This script is meant to be sourced by ltconfig.
# ltcf-gcj.sh - Create a GCJ compiler specific configuration
#
# Copyright (C) 1996-1999, 2000, 2001, 2003 Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# Original GCJ support by:
# Alexandre Oliva <[email protected]>
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Source file extension for Java test sources.
ac_ext=java
# Object file extension for compiled Java test sources.
objext=o
# Code to be used in simple compile tests
lt_simple_compile_test_code="class foo {}"
# Code to be used in simple link tests
lt_simple_link_test_code='public class conftest { public static void main(String[] argv) {}; }'
## Linker Characteristics
case $host_os in
cygwin* | mingw*)
# FIXME: the MSVC++ port hasn't been tested in a loooong time
# When not using gcc, we currently assume that we are using
# Microsoft Visual C++.
if test "$with_gcc" != yes; then
with_gnu_ld=no
fi
;;
esac
ld_shlibs=yes
if test "$with_gnu_ld" = yes; then
# If archive_cmds runs LD, not CC, wlarc should be empty
wlarc='${wl}'
# See if GNU ld supports shared libraries.
case $host_os in
aix3* | aix4* | aix5*)
# On AIX/PPC, the GNU linker is very broken
if test "$host_cpu" != ia64; then
ld_shlibs=no
cat <<EOF 1>&2
*** Warning: the GNU linker, at least up to release 2.9.1, is reported
*** to be unable to reliably create shared libraries on AIX.
*** Therefore, libtool is disabling shared libraries support. If you
*** really care for shared libraries, you may want to modify your PATH
*** so that a non-GNU linker is found, and then restart.
EOF
fi
;;
amigaos*)
archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
# Samuel A. Falvo II <[email protected]> reports
# that the semantics of dynamic libraries on AmigaOS, at least up
# to version 4, is to share data among multiple programs linked
# with the same dynamic library. Since this doesn't match the
# behavior of shared libraries on other platforms, we can use
# them.
ld_shlibs=no
;;
beos*)
if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
allow_undefined_flag=unsupported
# Joseph Beckenbach <[email protected]> says some releases of gcc
# support --undefined. This deserves some investigation. FIXME
archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
else
ld_shlibs=no
fi
;;
cygwin* | mingw*)
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
hardcode_libdir_flag_spec='-L$libdir'
allow_undefined_flag=unsupported
always_export_symbols=yes
extract_expsyms_cmds='test -f $output_objdir/impgen.c || \
sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //; p; }" -e d < $0 > $output_objdir/impgen.c~
test -f $output_objdir/impgen.exe || (cd $output_objdir && \
if test "x$BUILD_CC" != "x" ; then $BUILD_CC -o impgen impgen.c ; \
else $CC -o impgen impgen.c ; fi)~
$output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def'
old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib'
# cygwin and mingw dlls have different entry points and sets of symbols
# to exclude.
# FIXME: what about values for MSVC?
dll_entry=__cygwin_dll_entry@12
dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~
case $host_os in
mingw*)
# mingw values
dll_entry=_DllMainCRTStartup@12
dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~
;;
esac
# mingw and cygwin differ, and it's simplest to just exclude the union
# of the two symbol sets.
dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12
# recent cygwin and mingw systems supply a stub DllMain which the user
# can override, but on older systems we have to supply one (in ltdll.c)
if test "x$lt_cv_need_dllmain" = "xyes"; then
ltdll_obj='$output_objdir/$soname-ltdll.'"$objext "
ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $output_objdir/$soname-ltdll.c~
test -f $output_objdir/$soname-ltdll.$objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~'
else
ltdll_obj=
ltdll_cmds=
fi
# Extract the symbol export list from an `--export-all' def file,
# then regenerate the def file from the symbol export list, so that
# the compiled dll only exports the symbol export list.
# Be careful not to strip the DATA tag left be newer dlltools.
export_symbols_cmds="$ltdll_cmds"'
$DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~
sed -e "1,/EXPORTS/d" -e "s/ @ [0-9]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols'
# If the export-symbols file already is a .def file (1st line
# is EXPORTS), use it as is.
# If DATA tags from a recent dlltool are present, honour them!
archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then
cp $export_symbols $output_objdir/$soname-def;
else
echo EXPORTS > $output_objdir/$soname-def;
_lt_hint=1;
cat $export_symbols | while read symbol; do
set dummy \$symbol;
case \[$]# in
2) echo " \[$]2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;;
*) echo " \[$]2 @ \$_lt_hint \[$]3 ; " >> $output_objdir/$soname-def;;
esac;
_lt_hint=`expr 1 + \$_lt_hint`;
done;
fi~
'"$ltdll_cmds"'
$CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
$DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~
$CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~
$DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~
$CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags'
;;
netbsd* | knetbsd*-gnu)
if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
wlarc=
else
archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
fi
;;
solaris* | sysv5*)
if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then
ld_shlibs=no
cat <<EOF 1>&2
*** Warning: The releases 2.8.* of the GNU linker cannot reliably
*** create shared libraries on Solaris systems. Therefore, libtool
*** is disabling shared libraries support. We urge you to upgrade GNU
*** binutils to release 2.9.1 or newer. Another option is to modify
*** your PATH or compiler configuration so that the native linker is
*** used, and then restart.
EOF
elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
ld_shlibs=no
fi
;;
sunos4*)
archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
wlarc=
hardcode_direct=yes
hardcode_shlibpath_var=no
;;
*)
if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then
archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
ld_shlibs=no
fi
;;
esac
if test "$ld_shlibs" = yes; then
runpath_var=LD_RUN_PATH
hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
export_dynamic_flag_spec='${wl}--export-dynamic'
case $host_os in
cygwin* | mingw*)
# dlltool doesn't understand --whole-archive et. al.
whole_archive_flag_spec=
;;
*)
# ancient GNU ld didn't support --whole-archive et. al.
if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then
whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
else
whole_archive_flag_spec=
fi
;;
esac
fi
else
# PORTME fill in a description of your system's linker (not GNU ld)
case $host_os in
aix3*)
allow_undefined_flag=unsupported
always_export_symbols=yes
archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
# Note: this linker hardcodes the directories in LIBPATH if there
# are no directories specified by -L.
hardcode_minus_L=yes
if test "$with_gcc" = yes && test -z "$link_static_flag"; then
# Neither direct hardcoding nor static linking is supported with a
# broken collect2.
hardcode_direct=unsupported
fi
;;
aix4* | aix5*)
hardcode_direct=yes
hardcode_libdir_separator=':'
link_all_deplibs=yes
# When large executables or shared objects are built, AIX ld can
# have problems creating the table of contents. If linking a library
# or program results in "error TOC overflow" add -mminimal-toc to
# CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
# enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
if test "$with_gcc" = yes; then
case $host_os in aix4.[012]|aix4.[012].*)
# We only want to do this on AIX 4.2 and lower, the check
# below for broken collect2 doesn't work under 4.3+
collect2name=`${CC} -print-prog-name=collect2`
if test -f "$collect2name" && \
strings "$collect2name" | grep resolve_lib_name >/dev/null
then
# We have reworked collect2
hardcode_direct=yes
else
# We have old collect2
hardcode_direct=unsupported
# It fails to find uninstalled libraries when the uninstalled
# path is not listed in the libpath. Setting hardcode_minus_L
# to unsupported forces relinking
hardcode_minus_L=yes
hardcode_libdir_flag_spec='-L$libdir'
hardcode_libdir_separator=
fi
esac
shared_flag='-shared'
else
# not using gcc
if test "$host_cpu" = ia64; then
shared_flag='${wl}-G'
else
shared_flag='${wl}-bM:SRE'
fi
fi
if test "$host_cpu" = ia64; then
# On IA64, the linker does run time linking by default, so we don't
# have to do anything special.
aix_use_runtimelinking=no
if test $with_gnu_ld = no; then
exp_sym_flag='-Bexport'
no_entry_flag=""
fi
else
# Test if we are trying to use run time linking, or normal AIX style linking.
# If -brtl is somewhere in LDFLAGS, we need to do run time linking.
aix_use_runtimelinking=no
for ld_flag in $LDFLAGS; do
if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl" ); then
aix_use_runtimelinking=yes
break
fi
done
exp_sym_flag='-bexport'
no_entry_flag='-bnoentry'
fi
# -bexpall does not export symbols beginning with underscore (_)
always_export_symbols=yes
if test "$aix_use_runtimelinking" = yes; then
# Warning - without using the other run time loading flags (-brtl), -berok will
# link without error, but may produce a broken library.
allow_undefined_flag=' ${wl}-berok'
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols"
else
if test "$host_cpu" = ia64; then
if test $with_gnu_ld = no; then
hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
allow_undefined_flag="-z nodefs"
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols"
fi
else
allow_undefined_flag=' ${wl}-berok'
# -bexpall does not export symbols beginning with underscore (_)
always_export_symbols=yes
# Exported symbols can be pulled into shared objects from archives
whole_archive_flag_spec=' '
build_libtool_need_lc=yes
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
# This is similar to how AIX traditionally builds it's shared libraries.
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
fi
fi
;;
amigaos*)
archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
# see comment about different semantics on the GNU ld section
ld_shlibs=no
;;
cygwin* | mingw*)
# When not using gcc, we currently assume that we are using
# Microsoft Visual C++.
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
hardcode_libdir_flag_spec=' '
allow_undefined_flag=unsupported
# Tell ltmain to make .lib files, not .a files.
libext=lib
# FIXME: Setting linknames here is a bad hack.
archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames='
# The linker will automatically build a .lib file if we build a DLL.
old_archive_from_new_cmds='true'
# FIXME: Should let the user specify the lib program.
old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs'
fix_srcfile_path='`cygpath -w "$srcfile"`'
;;
freebsd1*)
ld_shlibs=no
;;
# FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
# support. Future versions do this automatically, but an explicit c++rt0.o
# does not break anything, and helps significantly (at the cost of a little
# extra space).
freebsd2.2*)
archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
hardcode_shlibpath_var=no
;;
# Unfortunately, older versions of FreeBSD 2 do not have this feature.
freebsd2*)
archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
hardcode_direct=yes
hardcode_minus_L=yes
hardcode_shlibpath_var=no
;;
# FreeBSD 3 and greater uses gcc -shared to do shared libraries.
freebsd* | kfreebsd*-gnu)
archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
hardcode_shlibpath_var=no
;;
hpux9* | hpux10* | hpux11*)
case $host_os in
hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
*) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;;
esac
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
hardcode_libdir_separator=:
hardcode_direct=yes
hardcode_minus_L=yes # Not in the search PATH, but as the default
# location of the library.
export_dynamic_flag_spec='${wl}-E'
;;
irix5* | irix6*)
if test "$with_gcc" = yes; then
archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
else
archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
fi
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
link_all_deplibs=yes
;;
netbsd* | knetbsd*-gnu)
if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out
else
archive_cmds='$LD -shared -nodefaultlibs -o $lib $libobjs $deplibs $linker_flags' # ELF
fi
hardcode_libdir_flag_spec='${wl}-R$libdir'
hardcode_direct=yes
hardcode_shlibpath_var=no
;;
openbsd*)
archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
hardcode_shlibpath_var=no
;;
os2*)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
allow_undefined_flag=unsupported
archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
;;
osf3*)
if test "$with_gcc" = yes; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
else
allow_undefined_flag=' -expect_unresolved \*'
archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
fi
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
;;
osf4* | osf5*) # as osf3* with the addition of -msym flag
if test "$with_gcc" = yes; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
else
allow_undefined_flag=' -expect_unresolved \*'
archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
fi
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
;;
sco3.2v5*)
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
hardcode_shlibpath_var=no
runpath_var=LD_RUN_PATH
hardcode_runpath_var=yes
;;
solaris*)
no_undefined_flag=' ${wl}-z ${wl}defs'
archive_cmds='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
$CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmds="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | egrep \"\-L\""
hardcode_libdir_flag_spec='${wl}-R $wl$libdir'
hardcode_shlibpath_var=no
case $host_os in
solaris2.[0-5] | solaris2.[0-5].*) ;;
*) # Supported since Solaris 2.6 (maybe 2.5.1?)
whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;;
esac
link_all_deplibs=yes
;;
sunos4*)
archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
hardcode_libdir_flag_spec='-L$libdir'
hardcode_direct=yes
hardcode_minus_L=yes
hardcode_shlibpath_var=no
;;
sysv4)
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
runpath_var='LD_RUN_PATH'
hardcode_shlibpath_var=no
hardcode_direct=no #Motorola manual says yes, but my tests say they lie
;;
sysv4.3*)
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
hardcode_shlibpath_var=no
export_dynamic_flag_spec='-Bexport'
;;
sysv5*)
no_undefined_flag=' -z text'
# $CC -shared without GNU ld will not create a library from C++
# object files and a static libstdc++, better avoid it by now
archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
$LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp'
hardcode_libdir_flag_spec=
hardcode_shlibpath_var=no
runpath_var='LD_RUN_PATH'
;;
uts4*)
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
hardcode_libdir_flag_spec='-L$libdir'
hardcode_shlibpath_var=no
;;
dgux*)
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
hardcode_libdir_flag_spec='-L$libdir'
hardcode_shlibpath_var=no
;;
sysv4*MP*)
if test -d /usr/nec; then
archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
hardcode_shlibpath_var=no
runpath_var=LD_RUN_PATH
hardcode_runpath_var=yes
ld_shlibs=yes
fi
;;
sysv4.2uw2*)
archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
hardcode_direct=yes
hardcode_minus_L=no
hardcode_shlibpath_var=no
hardcode_runpath_var=yes
runpath_var=LD_RUN_PATH
;;
sysv5uw7* | unixware7*)
no_undefined_flag='${wl}-z ${wl}text'
if test "$GCC" = yes; then
archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
else
archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
fi
runpath_var='LD_RUN_PATH'
hardcode_shlibpath_var=no
;;
*)
ld_shlibs=no
;;
esac
fi
## Compiler Characteristics: PIC flags, static flags, etc
# We don't use cached values here since only the C compiler
# characteristics should be cached.
ac_cv_prog_cc_pic=
ac_cv_prog_cc_shlib=
ac_cv_prog_cc_wl=
ac_cv_prog_cc_static=
ac_cv_prog_cc_no_builtin=
ac_cv_prog_cc_can_build_shared=$can_build_shared
ac_cv_prog_cc_wl='-Wl,'
ac_cv_prog_cc_static='-static'
case $host_os in
beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
# PIC is the default for these OSes.
;;
aix*)
# Below there is a dirty hack to force normal static linking with -ldl
# The problem is because libdl dynamically linked with both libc and
# libC (AIX C++ library), which obviously doesn't included in libraries
# list by gcc. This cause undefined symbols with -static flags.
# This hack allows C programs to be linked with "-static -ldl", but
# we not sure about C++ programs.
ac_cv_prog_cc_static="$ac_cv_prog_cc_static ${ac_cv_prog_cc_wl}-lC"
;;
*djgpp*)
# DJGPP does not suppot shared libraries at all
ac_cv_prog_cc_pic=
;;
cygwin* | mingw* | os2*)
# This hack is so that the source file can tell whether it is being
# built for inclusion in a dll (and should export symbols for example).
ac_cv_prog_cc_pic='-DDLL_EXPORT'
;;
amigaos*)
# FIXME: we need at least 68020 code to build shared libraries, but
# adding the `-m68020' flag to GCC prevents building anything better,
# like `-m68040'.
ac_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
;;
sysv4*MP*)
if test -d /usr/nec; then
ac_cv_prog_cc_pic=-Kconform_pic
fi
;;
*)
ac_cv_prog_cc_pic='-fPIC'
;;
esac
# GCJ did not exist at the time GCC didn't implicitly link libc in.
need_lc=no
# All existing releases of GCJ support `-c -o'.
lt_cv_compiler_c_o=yes
|
ZHAW-INES/rioxo-uClinux-dist
|
user/gdb/ltcf-gcj.sh
|
Shell
|
gpl-2.0
| 27,419 |
#!/bin/bash
echo "installing autossh..."
apt-get -y install autossh
echo "..DONE.."
exit
|
xtr4nge/module_meterpreter
|
includes/install.sh
|
Shell
|
gpl-3.0
| 91 |
#!/bin/sh
# Show that split -a works.
# Copyright (C) 2002-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ split
a_z='a b c d e f g h i j k l m n o p q r s t u v w x y z'
# Generate a 27-byte file
printf %s $a_z 0 |tr -d ' ' > in || framework_failure_
files=
for i in $a_z; do
files="${files}xa$i "
done
files="${files}xba"
for f in $files; do
printf "creating file '%s'"'\n' $f
done > exp || framework_failure_
echo split: output file suffixes exhausted \
> exp-too-short || framework_failure_
# This should fail.
split -b 1 -a 1 in 2> err && fail=1
test -f xa || fail=1
test -f xz || fail=1
test -f xaa && fail=1
test -f xaz && fail=1
rm -f x*
compare exp-too-short err || fail=1
# With a longer suffix, it must succeed.
split --verbose -b 1 -a 2 in > err || fail=1
compare exp err || fail=1
# Ensure that xbb is *not* created.
test -f xbb && fail=1
# Ensure that the 27 others files *were* created, and with expected contents.
n=1
for f in $files; do
expected_byte=$(cut -b $n in)
b=$(cat $f) || fail=1
test "$b" = "$expected_byte" || fail=1
n=$(expr $n + 1)
done
# Ensure that -a is independent of -[bCl]
split -a2 -b1000 < /dev/null || fail=1
split -a2 -l1000 < /dev/null || fail=1
split -a2 -C1000 < /dev/null || fail=1
# Ensure that -a fails early with a -n that is too large
rm -f x*
split -a2 -n1000 < /dev/null && fail=1
test -f xaa && fail=1
Exit $fail
|
houwentaoff/coreutils
|
tests/split/suffix-length.sh
|
Shell
|
gpl-3.0
| 2,076 |
#!/usr/bin/env bash
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script is intended to be run inside a docker container to provide a
# hermetic process. See release.sh for the expected invocation.
RELEASE_URL_PREFIX="https://storage.googleapis.com/tensorflow/libtensorflow"
TF_ECOSYSTEM_URL="https://github.com/tensorflow/ecosystem.git"
# By default we deploy to both ossrh and bintray. These two
# environment variables can be set to skip either repository.
DEPLOY_BINTRAY="${DEPLOY_BINTRAY:-true}"
DEPLOY_OSSRH="${DEPLOY_OSSRH:-true}"
IS_SNAPSHOT="false"
if [[ "${TF_VERSION}" == *"-SNAPSHOT" ]]; then
IS_SNAPSHOT="true"
# Bintray does not allow snapshots.
DEPLOY_BINTRAY="false"
fi
PROTOC_RELEASE_URL="https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-linux-x86_64.zip"
if [[ "${DEPLOY_BINTRAY}" != "true" && "${DEPLOY_OSSRH}" != "true" ]]; then
echo "Must deploy to at least one of Bintray or OSSRH" >&2
exit 2
fi
set -ex
clean() {
# Clean up any existing artifacts
# (though if run inside a clean docker container, there won't be any dirty
# artifacts lying around)
mvn -q clean
rm -rf libtensorflow_jni/src libtensorflow_jni/target libtensorflow_jni_gpu/src libtensorflow_jni_gpu/target \
libtensorflow/src libtensorflow/target tensorflow-android/target proto/src proto/target \
hadoop/src hadoop/target spark-connector/src spark-connector/target
}
update_version_in_pom() {
mvn versions:set -DnewVersion="${TF_VERSION}"
}
# Fetch a property from pom files for a given profile.
# Arguments:
# profile - name of the selected profile.
# property - name of the property to be retrieved.
# Output:
# Echo property value to stdout
mvn_property() {
local profile="$1"
local prop="$2"
mvn -q --non-recursive exec:exec -P "${profile}" \
-Dexec.executable='echo' \
-Dexec.args="\${${prop}}"
}
download_libtensorflow() {
if [[ "${IS_SNAPSHOT}" == "true" ]]; then
URL="http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=cpu-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow-src.jar"
else
URL="${RELEASE_URL_PREFIX}/libtensorflow-src-${TF_VERSION}.jar"
fi
curl -L "${URL}" -o /tmp/src.jar
cd "${DIR}/libtensorflow"
jar -xvf /tmp/src.jar
rm -rf META-INF
cd "${DIR}"
}
# Fetch the android aar artifact from the CI build system, and update
# its associated pom file.
update_tensorflow_android() {
TARGET_DIR="${DIR}/tensorflow-android/target"
mkdir -p "${TARGET_DIR}"
python "${DIR}/tensorflow-android/update.py" \
--version "${TF_VERSION}" \
--template "${DIR}/tensorflow-android/pom-android.xml.template" \
--dir "${TARGET_DIR}"
}
download_libtensorflow_jni() {
NATIVE_DIR="${DIR}/libtensorflow_jni/src/main/resources/org/tensorflow/native"
mkdir -p "${NATIVE_DIR}"
cd "${NATIVE_DIR}"
mkdir linux-x86_64
mkdir windows-x86_64
mkdir darwin-x86_64
if [[ "${IS_SNAPSHOT}" == "true" ]]; then
# Nightly builds from http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/
# and http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow-windows/
curl -L "http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=cpu-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow_jni-cpu-linux-x86_64.tar.gz" | tar -xvz -C linux-x86_64
curl -L "http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=mac-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow_jni-cpu-darwin-x86_64.tar.gz" | tar -xvz -C darwin-x86_64
curl -L "http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow-windows/lastSuccessfulBuild/artifact/lib_package/libtensorflow_jni-cpu-windows-x86_64.zip" -o /tmp/windows.zip
else
curl -L "${RELEASE_URL_PREFIX}/libtensorflow_jni-cpu-linux-x86_64-${TF_VERSION}.tar.gz" | tar -xvz -C linux-x86_64
curl -L "${RELEASE_URL_PREFIX}/libtensorflow_jni-cpu-darwin-x86_64-${TF_VERSION}.tar.gz" | tar -xvz -C darwin-x86_64
curl -L "${RELEASE_URL_PREFIX}/libtensorflow_jni-cpu-windows-x86_64-${TF_VERSION}.zip" -o /tmp/windows.zip
fi
unzip /tmp/windows.zip -d windows-x86_64
rm -f /tmp/windows.zip
# Updated timestamps seem to be required to get Maven to pick up the file.
touch linux-x86_64/*
touch darwin-x86_64/*
touch windows-x86_64/*
cd "${DIR}"
}
download_libtensorflow_jni_gpu() {
NATIVE_DIR="${DIR}/libtensorflow_jni_gpu/src/main/resources/org/tensorflow/native"
mkdir -p "${NATIVE_DIR}"
cd "${NATIVE_DIR}"
mkdir linux-x86_64
if [[ "${IS_SNAPSHOT}" == "true" ]]; then
# Nightly builds from http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/
# and http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow-windows/
curl -L "http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=gpu-linux/lastSuccessfulBuild/artifact/lib_package/libtensorflow_jni-gpu-linux-x86_64.tar.gz" | tar -xvz -C linux-x86_64
else
curl -L "${RELEASE_URL_PREFIX}/libtensorflow_jni-gpu-linux-x86_64-${TF_VERSION}.tar.gz" | tar -xvz -C linux-x86_64
fi
# Updated timestamps seem to be required to get Maven to pick up the file.
touch linux-x86_64/*
cd "${DIR}"
}
# Ideally, the .jar for generated Java code for TensorFlow protocol buffer files
# would have been produced by bazel rules. However, protocol buffer library
# support in bazel is in flux. Once
# https://github.com/bazelbuild/bazel/issues/2626 has been resolved, perhaps
# TensorFlow can move to something like
# https://bazel.build/blog/2017/02/27/protocol-buffers.html
# for generating C++, Java and Python code for protocol buffers.
#
# At that point, perhaps the libtensorflow build scripts
# (tensorflow/tools/ci_build/builds/libtensorflow.sh) can build .jars for
# generated code and this function would not need to download protoc to generate
# code.
generate_java_protos() {
# Clean any previous attempts
rm -rf "${DIR}/proto/tmp"
# Download protoc
curl -L "${PROTOC_RELEASE_URL}" -o "/tmp/protoc.zip"
mkdir -p "${DIR}/proto/tmp/protoc"
unzip -d "${DIR}/proto/tmp/protoc" "/tmp/protoc.zip"
rm -f "/tmp/protoc.zip"
# Download the release archive of TensorFlow protos.
if [[ "${IS_SNAPSHOT}" == "true" ]]; then
URL="http://ci.tensorflow.org/view/Nightly/job/nightly-libtensorflow/TYPE=cpu-slave/lastSuccessfulBuild/artifact/lib_package/libtensorflow_proto.zip"
else
URL="${RELEASE_URL_PREFIX}/libtensorflow_proto-${TF_VERSION}.zip"
fi
curl -L "${URL}" -o /tmp/libtensorflow_proto.zip
mkdir -p "${DIR}/proto/tmp/src"
unzip -d "${DIR}/proto/tmp/src" "/tmp/libtensorflow_proto.zip"
rm -f "/tmp/libtensorflow_proto.zip"
# Generate Java code
mkdir -p "${DIR}/proto/src/main/java"
find "${DIR}/proto/tmp/src" -name "*.proto" | xargs \
${DIR}/proto/tmp/protoc/bin/protoc \
--proto_path="${DIR}/proto/tmp/src" \
--java_out="${DIR}/proto/src/main/java"
# Cleanup
rm -rf "${DIR}/proto/tmp"
}
# Download the TensorFlow ecosystem source from git.
# The pom files from this repo do not inherit from the parent pom so the maven version
# is updated for each module.
download_tf_ecosystem() {
ECOSYSTEM_DIR="/tmp/tensorflow-ecosystem"
HADOOP_DIR="${DIR}/hadoop"
SPARK_DIR="${DIR}/spark-connector"
# Clean any previous attempts
rm -rf "${ECOSYSTEM_DIR}"
# Clone the TensorFlow ecosystem project
mkdir -p "${ECOSYSTEM_DIR}"
cd "${ECOSYSTEM_DIR}"
git clone "${TF_ECOSYSTEM_URL}"
cd ecosystem
git checkout r${TF_VERSION}
# Copy the TensorFlow Hadoop source
cp -r "${ECOSYSTEM_DIR}/ecosystem/hadoop/src" "${HADOOP_DIR}"
cp "${ECOSYSTEM_DIR}/ecosystem/hadoop/pom.xml" "${HADOOP_DIR}"
cd "${HADOOP_DIR}"
update_version_in_pom
# Copy the TensorFlow Spark connector source
cp -r "${ECOSYSTEM_DIR}/ecosystem/spark/spark-tensorflow-connector/src" "${SPARK_DIR}"
cp "${ECOSYSTEM_DIR}/ecosystem/spark/spark-tensorflow-connector/pom.xml" "${SPARK_DIR}"
cd "${SPARK_DIR}"
update_version_in_pom
# Cleanup
rm -rf "${ECOSYSTEM_DIR}"
cd "${DIR}"
}
# Deploy artifacts using a specific profile.
# Arguments:
# profile - name of selected profile.
# Outputs:
# n/a
deploy_profile() {
local profile="$1"
# Deploy the non-android pieces.
mvn deploy -P"${profile}"
# Determine the correct pom file property to use
# for the repository url.
local rtype
if [[ "${IS_SNAPSHOT}" == "true" ]]; then
rtype='snapshotRepository'
else
rtype='repository'
fi
local url=$(mvn_property "${profile}" "project.distributionManagement.${rtype}.url")
local repositoryId=$(mvn_property "${profile}" "project.distributionManagement.${rtype}.id")
mvn gpg:sign-and-deploy-file \
-Dfile="${DIR}/tensorflow-android/target/tensorflow.aar" \
-DpomFile="${DIR}/tensorflow-android/target/pom-android.xml" \
-Durl="${url}" \
-DrepositoryId="${repositoryId}"
}
# If successfully built, try to deploy.
# If successfully deployed, clean.
# If deployment fails, debug with
# ./release.sh ${TF_VERSION} ${SETTINGS_XML} bash
# To get a shell to poke around the maven artifacts with.
deploy_artifacts() {
# Deploy artifacts to ossrh if requested.
if [[ "${DEPLOY_OSSRH}" == "true" ]]; then
deploy_profile 'ossrh'
fi
# Deploy artifacts to bintray if requested.
if [[ "${DEPLOY_BINTRAY}" == "true" ]]; then
deploy_profile 'bintray'
fi
# Clean up when everything works
clean
}
if [ -z "${TF_VERSION}" ]
then
echo "Must set the TF_VERSION environment variable"
exit 1
fi
DIR="$(realpath $(dirname $0))"
cd "${DIR}"
# The meat of the script.
# Comment lines out appropriately if debugging/tinkering with the release
# process.
# gnupg2 is required for signing
apt-get -qq update && apt-get -qqq install -y gnupg2 git
clean
update_version_in_pom
download_libtensorflow
download_libtensorflow_jni
download_libtensorflow_jni_gpu
update_tensorflow_android
generate_java_protos
download_tf_ecosystem
# Build the release artifacts
mvn verify
# Push artifacts to repository
deploy_artifacts
set +ex
if [[ "${IS_SNAPSHOT}" == "false" ]]; then
echo "Uploaded to the staging repository"
echo "After validating the release: "
if [[ "${DEPLOY_OSSRH}" == "true" ]]; then
echo "* Login to https://oss.sonatype.org/#stagingRepositories"
echo "* Find the 'org.tensorflow' staging release and click either 'Release' to release or 'Drop' to abort"
fi
if [[ "${DEPLOY_BINTRAY}" == "true" ]]; then
echo "* Login to https://bintray.com/google/tensorflow/tensorflow"
echo "* Either 'Publish' unpublished items to release, or 'Discard' to abort"
fi
else
echo "Uploaded to the snapshot repository"
fi
|
gojira/tensorflow
|
tensorflow/java/maven/run_inside_container.sh
|
Shell
|
apache-2.0
| 11,272 |
#!/bin/sh
cat ../nuklear.h|./doc > nuklear.html
|
OpenMusicKontrollers/sherlock.lv2
|
subprojects/nk_pugl/nuklear/doc/build.sh
|
Shell
|
artistic-2.0
| 50 |
#!/bin/sh
# test splitting into newline delineated chunks (-n l/...)
# Copyright (C) 2010-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ split
# invalid number of chunks
echo "split: invalid number of chunks: '1o'" > exp
returns_ 1 split -n l/1o 2>err || fail=1
compare exp err || fail=1
echo "split: -: cannot determine file size" > exp
: | returns_ 1 split -n l/1 2>err || fail=1
compare exp err || fail=1
# N can be greater than the file size
# in which case no data is extracted, or empty files are written
split -n l/10 /dev/null || fail=1
test "$(stat -c %s x* | uniq -c | sed 's/^ *//; s/ /x/')" = "10x0" || fail=1
rm x??
# 'split' should reject any attempt to create an infinitely
# long output file.
returns_ 1 split -n l/2 /dev/zero || fail=1
rm x??
# Repeat the above, but with 1/2, not l/2:
returns_ 1 split -n 1/2 /dev/zero || fail=1
rm x??
# Ensure --elide-empty-files is honored
split -e -n l/10 /dev/null || fail=1
returns_ 1 stat x?? 2>/dev/null || fail=1
# 80 bytes. ~ transformed to \n below
lines=\
12345~1~12345~1~12345~1~12345~1~12345~~~12345~1~12345~1~12345~1~12345~1~12345~1~
printf "%s" "$lines" | tr '~' '\n' > in || framework_failure_
echo "split: invalid chunk number: '16'" > exp
returns_ 1 split -n l/16/15 in 2>err.t || fail=1
sed "s/': .*/'/" < err.t > err || framework_failure_
compare exp err || fail=1
printf '%s' "\
14 16 09 15 16 10
14 08 08 10 14 08 08 10
06 08 08 02 06 08 08 02 06 08 08 10
06 08 02 06 08 00 08 02 06 08 02 06 08 00 10
06 00 08 00 02 06 00 02 06 00 08 00 01 07 00 02 06 00 08 00 02 16
" > exp || framework_failure_
sed 's/00 *//g' exp > exp.elide_empty || framework_failure_
DEBUGGING=
test "$DEBUGGING" && test "$VERBOSE" && set +x
for ELIDE_EMPTY in '' '-e'; do
for IO_BLKSIZE in 1 2 5 10 80 100; do
> out
test "$DEBUGGING" && printf "\n---io-blk-size=$IO_BLKSIZE $ELIDE_EMPTY\n"
for N in 6 8 12 15 22; do
rm -f x*
if test -z "$ELIDE_EMPTY"; then
split ---io-blksize=$IO_BLKSIZE -n l/2/$N in > chunk.k
returns_ 1 stat x* 2>/dev/null || fail=1
fi
split ---io-blksize=$IO_BLKSIZE $ELIDE_EMPTY -n l/$N in
echo $(stat -c "%02s" x*) >> out
if test -z "$ELIDE_EMPTY"; then
compare chunk.k xab || fail=1
fi
if test "$DEBUGGING"; then
# Output partition pattern
size=$(printf "%s" "$lines" | wc -c)
chunk_size=$(($size/$N))
end_size=$(($chunk_size + ($size % $N)))
{
yes "$(printf %${chunk_size}s ])" | head -n$(($N-1))
printf %${end_size}s ]
} | tr -d '\n' | sed "s/\\(^.\\{1,$size\\}\\).*/\\1/"
echo
# Output pattern generated for comparison
for s in $(stat -c "%s" x*); do
#s=0 transitions are not shown
test "$m" = "_" && m=- || m=_
printf "%${s}s" '' | tr ' ' $m
done
echo
# Output lines for reference
echo "$lines"
fi
done
test -z "$ELIDE_EMPTY" && EXP=exp || EXP=exp.elide_empty
compare out $EXP || fail=1
done
done
test "$DEBUGGING" && test "$VERBOSE" && set -x
# Check extraction of particular chunks
> out
printf '1\n12345\n' > exp
split -n l/13/15 in > out
compare exp out || fail=1
> out
printf '' > exp
split -n l/14/15 in > out
compare exp out || fail=1
> out
printf '1\n12345\n1\n' > exp
split -n l/15/15 in > out
compare exp out || fail=1
# test input with no \n at end
printf '12\n34\n5' > in
printf '5' > exp
split -n l/7/7 in > out
compare exp out || fail=1
Exit $fail
|
mfragkoulis/coreutils
|
tests/split/l-chunk.sh
|
Shell
|
gpl-3.0
| 4,207 |
#!/bin/sh
#
# Copyright (C) 2014 Free Software Foundation, Inc.
# Contributed by ARM Ltd.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
# Generate aarch64-builtin-iterators.h, a file containing a series of
# BUILTIN_<ITERATOR> macros, which expand to VAR<N> Macros covering the
# same set of modes as the iterator in iterators.md
echo "/* -*- buffer-read-only: t -*- */"
echo "/* Generated automatically by geniterators.sh from iterators.md. */"
echo "#ifndef GCC_AARCH64_ITERATORS_H"
echo "#define GCC_AARCH64_ITERATORS_H"
# Strip newlines, create records marked ITERATOR, and strip junk (anything
# which does not have a matching brace because it contains characters we
# don't want to or can't handle (e.g P, PTR iterators change depending on
# Pmode and ptr_mode).
cat $1 | tr "\n" " " \
| sed 's/(define_mode_iterator \([A-Za-z0-9_]*\) \([]\[A-Z0-9 \t]*\)/\n#define BUILTIN_\1(T, N, MAP) \\ \2\n/g' \
| grep '#define [A-Z0-9_(), \\]* \[[A-Z0-9[:space:]]*]' \
| sed 's/\t//g' \
| sed 's/ \+/ /g' \
| sed 's/ \[\([A-Z0-9 ]*\)]/\n\L\1/' \
| awk ' BEGIN { FS = " " ; OFS = ", "} \
/#/ { print } \
! /#/ { $1 = $1 ; printf " VAR%d (T, N, MAP, %s)\n", NF, $0 }'
echo "#endif /* GCC_AARCH64_ITERATORS_H */"
|
xinchoubiology/gcc
|
gcc/config/aarch64/geniterators.sh
|
Shell
|
gpl-2.0
| 1,880 |
# Set Hadoop-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use. Required.
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
# Extra Java CLASSPATH elements. Optional.
# export HADOOP_CLASSPATH=
# The maximum amount of heap to use, in MB. Default is 1000.
# export HADOOP_HEAPSIZE=2000
# Extra Java runtime options. Empty by default.
# export HADOOP_OPTS=-server
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=8997 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false $HADOOP_JOBTRACKER_OPTS"
export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
# The only user who can start hadoop daemons.
# If this is not set, any user can start hadoop daemons.
export HADOOP_USERNAME=""
# Java Runtime garbage collection options to pass to all Hadoop
# servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
# with a colon ; to which the dynamically generated gc log filename will
# be appended to. The below defaults work for the Sun JVM, for example
# in IBM GC, use '-Xverbosegclog:'.
#export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
export HADOOP_TASKTRACKER_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=8994 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
# export HADOOP_CLIENT_OPTS
# Extra ssh options. Empty by default.
# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
# Where log files are stored. $HADOOP_HOME/logs by default.
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
# host:path where hadoop code should be rsync'd from. Unset by default.
# export HADOOP_MASTER=master:/home/$USER/src/hadoop
# Seconds to sleep between slave commands. Unset by default. This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HADOOP_SLAVE_SLEEP=0.1
# The directory where pid files are stored. /tmp by default.
# export HADOOP_PID_DIR=/var/hadoop/pids
# A string representing this instance of hadoop. $USER by default.
# export HADOOP_IDENT_STRING=$USER
# The scheduling priority for daemon processes. See 'man nice'.
# export HADOOP_NICENESS=10
|
shakamunyi/hadoop-20
|
singleNodeHadoop/mapredConf/hadoop-env.sh
|
Shell
|
apache-2.0
| 3,307 |
#!/bin/bash
set -x -e
#export CXXFLAGS="${CXXFLAGS} -std=c++11 -stdlib=libstdc++ -stdlib=libc++ -DUSE_BOOST"
export CXXFLAGS="${CXXFLAGS} -std=c++11 -DUSE_BOOST -I${PREFIX}/include/bamtools"
mkdir -p ${PREFIX}/bin
mkdir -p ${PREFIX}/scripts
mkdir -p ${PREFIX}/config
## Make the software
if [ "$(uname)" = Darwin ] ; then
# SQLITE disabled due to compile issue, see: https://svn.boost.org/trac10/ticket/13501
sqlite=
else
sqlite='SQLITE=true'
fi
make \
CC="${CC}" \
CXX="${CXX}" \
BAMTOOLS_CC="${CC}" \
BAMTOOLS_CXX="${CXX}" \
BAMTOOLS="${PREFIX}" \
COMPGENPRED=true \
$sqlite
## Build Perl
mkdir perl-build
find scripts -name "*.pl" | xargs -I {} mv {} perl-build
cd perl-build
# affects tests for Augustus 3.3:
# https://github.com/Gaius-Augustus/Augustus/commit/7ca3ab
sed -i'' -e '1s/perl -w/perl/' *.pl
cp ${RECIPE_DIR}/Build.PL ./
perl ./Build.PL
perl ./Build manifest
perl ./Build install --installdirs site
cd ..
## End build perl
mv bin/* $PREFIX/bin/
mv scripts/* $PREFIX/bin/
mv config/* $PREFIX/config/
## Set AUGUSTUS variables on env activation
mkdir -p ${PREFIX}/etc/conda/activate.d ${PREFIX}/etc/conda/deactivate.d
cat <<EOF >> ${PREFIX}/etc/conda/activate.d/augustus.sh
export AUGUSTUS_CONFIG_PATH=${PREFIX}/config/
export AUGUSTUS_SCRIPTS_PATH=${PREFIX}/bin/
export AUGUSTUS_BIN_PATH=${PREFIX}/bin/
EOF
cat <<EOF >> ${PREFIX}/etc/conda/deactivate.d/augustus.sh
unset AUGUSTUS_CONFIG_PATH
unset AUGUSTUS_SCRIPTS_PATH
unset AUGUSTUS_BIN_PATH
EOF
|
ostrokach/bioconda-recipes
|
recipes/augustus/build.sh
|
Shell
|
mit
| 1,520 |
#!/bin/sh
# set jvm startup argument
JAVA_OPTS="-Xms2g \
-Xmx2g \
-Xmn1g \
-XX:PermSize=128m \
-XX:MaxPermSize=256m \
-XX:-DisableExplicitGC \
-Djava.awt.headless=true \
-Dcom.sun.management.jmxremote.port=8333 \
-Dcom.sun.management.jmxremote.authenticate=false \
-Dcom.sun.management.jmxremote.ssl=false \
-Dfile.encoding=utf-8 \
-XX:+PrintGC \
-XX:+PrintGCDetails \
-XX:+PrintGCDateStamps \
-Xloggc:../logs/gc.log \
-XX:-OmitStackTraceInFastThrow \
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/logs/hermes/
"
export JAVA_OPTS=${JAVA_OPTS}
|
ctripcorp/hermes
|
hermes-example/script/env.sh
|
Shell
|
apache-2.0
| 756 |
#!/bin/bash
#
# Creating an ASDF tarball of a source directory in the rl-glue Lisp codec
# src path by omitting the .svn directories.
#
# $Revision$
# $Date$
if [ ${#} != 1 ]; then
echo "Usage: ${0} <asdf-system-name>"
echo "Example: ${0} rl-glue-codec"
exit -1
fi
tooldir="`dirname ${0}`"
package="${1}"
###############################################################################
function make_asdf_package {
local system=${1}
echo -n "Creating package ${system}.tar.gz ... "
tar -zcf ../${system}.tar.gz --exclude '.svn/*' --exclude '.svn' ${system}
if [ ${?} = 0 ]; then
echo "done"
else
echo "failed (${?})"
fi
}
###############################################################################
cd ${tooldir}/../src
make_asdf_package ${package}
cd - >/dev/null
exit 0
|
GarethNelson/rl-glue-ext
|
projects/codecs/Lisp/tools/make-asdf-package.sh
|
Shell
|
apache-2.0
| 834 |
#!/bin/bash
#Bash disables history in noninteractive shells by default, but we turn it on here.
HISTFILE=~/.bash_history
set -o history
cd /opt/QA/sources/suricata/ && \
/usr/bin/suricata --build-info
|
pevma/SQARD-S
|
staging-area/tests/ubuntu/sequence/01-generic/60-verify/01-verify.sh
|
Shell
|
gpl-2.0
| 202 |
#!/bin/bash
# Make sure VUFIND_HOME is set:
if [ -z "$VUFIND_HOME" ]
then
# set VUFIND_HOME to the absolute path of the directory containing this script
# https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself
export VUFIND_HOME="$(cd "$(dirname "$0")" && pwd -P)"/..
if [ "$VUFIND_HOME" = /.. ]
then
exit 1
fi
fi
# Find harvest directory for future use
HARVEST_DIR="$VUFIND_LOCAL_DIR/harvest"
if [ ! -d $HARVEST_DIR ]
then
HARVEST_DIR="$VUFIND_HOME/harvest"
fi
BASEPATH_UNDER_HARVEST=true
LOGGING=true
MOVE_DATA=true
function usage {
cat <<EOF
This script processes a batch of harvested MARC records.
Usage: $(basename $0) [-dhmz] [-p properties_file] _harvest_subdirectory_
_harvest_subdirectory_ is a directory name created by the OAI-PMH harvester.
This script will search the harvest subdirectories of the directories defined
by the VUFIND_LOCAL_DIR or VUFIND_HOME environment variables.
Example: $(basename $0) oai_source
Options:
-d: Use the directory path as-is, do not append it to $HARVEST_DIR.
Useful for non-OAI batch loading.
-h: Print this message
-m: Do not move the data files after importing.
-p: Used specified SolrMarc configuration properties file
-z: No logging.
EOF
}
while getopts ":dhmp:z" OPT
do
case $OPT in
d) BASEPATH_UNDER_HARVEST=false;;
h) usage;
exit 0;;
m) MOVE_DATA=false;;
p) PROPERTIES_FILE="$OPTARG"; export PROPERTIES_FILE;;
z) LOGGING=false;;
:)
echo "argument to '-$OPTARG' is missing" >&2
exit -1;;
\?) echo "Unrecognized option '-$OPTARG'" >&2;;
esac
done
#Decrement the argument pointer so it points to next argument
shift $(($OPTIND - 1))
# Make sure command line parameter was included:
if [ -z "$1" ]
then
usage
exit 1
fi
# Set up BASEPATH and check if the path is valid:
if [ $BASEPATH_UNDER_HARVEST == false ]
then
BASEPATH=$1
else
BASEPATH="$HARVEST_DIR/$1"
fi
if [ ! -d $BASEPATH ]
then
echo "Directory $BASEPATH does not exist!"
exit 1
fi
# Create log/processed directories as needed:
if [ $LOGGING == true ]
then
if [ ! -d $BASEPATH/log ]
then
mkdir $BASEPATH/log
fi
fi
if [ $MOVE_DATA == true ]
then
if [ ! -d $BASEPATH/processed ]
then
mkdir $BASEPATH/processed
fi
fi
# The log() function can be redefined to suit a variety of logging needs
# Positional parameters must be consistent:
# $1 = name of the file being imported
if [ $LOGGING == false ]
then
function log {
cat - > /dev/null
}
else
function log {
local FILE=$1
cat -u - > $BASEPATH/log/`basename $FILE`.log
}
fi
# Process all the files in the target directory:
for file in $BASEPATH/*.xml $BASEPATH/*.mrc
do
if [ -f $file ]
then
# Logging output handled by log() function
# PROPERTIES_FILE passed via environment
$VUFIND_HOME/import-marc.sh $file 2> >(log $file)
if [ $MOVE_DATA == true ]
then
mv $file $BASEPATH/processed/`basename $file`
fi
fi
done
|
j4lib/vufind
|
harvest/batch-import-marc.sh
|
Shell
|
gpl-2.0
| 3,000 |
_opam_add()
{
_opam_reply="$_opam_reply $1"
}
_opam_add_f()
{
local cmd
cmd=$1; shift
_opam_add "$($cmd "$@" 2>/dev/null)"
}
_opam_flags()
{
opam "$@" --help=groff 2>/dev/null | \
sed -n \
-e 's%\\-%-%g' \
-e 's%^\\fB\(-[^,= ]*\)\\fR.*%\1%p'
}
_opam_commands()
{
opam "$@" --help=groff 2>/dev/null | \
sed -n \
-e 's%\\-%-%g' \
-e '/^\.SH COMMANDS$/,/^\.SH/ s%^\\fB\([^,= ]*\)\\fR.*%\1%p'
echo '--help'
}
_opam_vars()
{
opam config list --safe 2>/dev/null | \
sed -n \
-e '/^PKG:/d' \
-e 's%^\([^# ][^ ]*\).*%\1%p'
}
_opam()
{
local cmd subcmd cur prev compgen_opt
COMPREPLY=()
cmd=${COMP_WORDS[1]}
subcmd=${COMP_WORDS[2]}
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
compgen_opt=""
_opam_reply=""
if [ $COMP_CWORD -eq 1 ]; then
_opam_add_f opam help topics
COMPREPLY=( $(compgen -W "$_opam_reply" -- $cur) )
unset _opam_reply
return 0
fi
case "$cmd" in
install|show|info)
_opam_add_f opam list --safe -a -s
if [ $COMP_CWORD -gt 2 ]; then
_opam_add_f _opam_flags "$cmd"
fi;;
reinstall|remove|uninstall)
_opam_add_f opam list --safe -i -s
if [ $COMP_CWORD -gt 2 ]; then
_opam_add_f _opam_flags "$cmd"
fi;;
upgrade)
_opam_add_f opam list --safe -i -s
_opam_add_f _opam_flags "$cmd"
;;
switch)
case $COMP_CWORD in
2)
_opam_add_f _opam_commands "$cmd"
_opam_add_f opam switch list --safe -s -i;;
3)
case "$subcmd" in
install|set)
_opam_add_f opam switch list --safe -s -a;;
remove|reinstall)
_opam_add_f opam switch list --safe -s -i;;
import|export)
compgen_opt="-o filenames -f";;
*)
_opam_add_f _opam_flags "$cmd"
esac;;
*)
_opam_add_f _opam_flags "$cmd"
esac;;
config)
if [ $COMP_CWORD -eq 2 ]; then
_opam_add_f _opam_commands "$cmd"
else
if [ $COMP_CWORD -eq 3 ] && [ "$subcmd" = "var" ]; then
_opam_add_f _opam_vars
else
_opam_add_f _opam_flags "$cmd"
fi
fi;;
repository|remote)
case $COMP_CWORD in
2)
_opam_add_f _opam_commands "$cmd";;
3)
case "$subcmd" in
add)
if [ $COMP_CWORD -gt 3 ]; then
compgen_opt="-o filenames -f"
fi;;
remove|priority|set-url)
_opam_add_f opam repository list --safe -s;;
*)
_opam_add_f _opam_flags "$cmd"
esac;;
*)
_opam_add_f _opam_flags "$cmd"
case "$subcmd" in
set-url|add) compgen_opt="-o filenames -f";;
esac;;
esac;;
update)
_opam_add_f opam repository list --safe -s
_opam_add_f opam pin list --safe -s
;;
source)
_opam_add_f opam list --safe -A -s
_opam_add_f _opam_flags "$cmd"
;;
pin)
case $COMP_CWORD in
2)
_opam_add_f _opam_commands "$cmd";;
3)
case "$subcmd" in
add)
_opam_add_f opam list --safe -A -s;;
remove|edit)
_opam_add_f opam pin list --safe -s;;
*)
_opam_add_f _opam_flags "$cmd"
esac;;
*)
case "$subcmd" in
add)
compgen_opt="-o filenames -f";;
*)
_opam_add_f _opam_flags "$cmd"
esac
esac;;
unpin)
if [ $COMP_CWORD -eq 2 ]; then
_opam_add_f opam pin list --safe -s
else
_opam_add_f _opam_flags "$cmd"
fi;;
*)
_opam_add_f _opam_commands "$cmd"
_opam_add_f _opam_flags "$cmd"
esac
COMPREPLY=( $(compgen -W "$_opam_reply" $compgen_opt -- "$cur") )
unset _opam_reply
return 0
}
complete -F _opam opam
|
monkeytest15/opam
|
shell/opam_completion.sh
|
Shell
|
gpl-3.0
| 4,673 |
#!/bin/bash
set -e -x -o pipefail
mkdir -p $PREFIX/bin
if [ "$(uname)" == "Darwin" ]; then
echo "Platform: Mac"
# export CFLAGS='-O3'
# export CXXFLAGS='-O3'
cp $SRC_DIR/bin/* $PREFIX/bin
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
echo "Platform: Linux"
cp $SRC_DIR/bin/* $PREFIX/bin/
fi
chmod +x $PREFIX/bin/*
|
hardingnj/bioconda-recipes
|
recipes/blast/2.2.21/build.sh
|
Shell
|
mit
| 367 |
# Source this script to set up the ROOT build that this script is part of.
#
# Conveniently an alias like this can be defined in .bashrc:
# alias thisroot=". bin/thisroot.sh"
#
# This script if for the bash like shells, see thisroot.csh for csh like shells.
#
# Author: Fons Rademakers, 18/8/2006
drop_from_path()
{
# Assert that we got enough arguments
if test $# -ne 2 ; then
echo "drop_from_path: needs 2 arguments"
return 1
fi
local p=$1
local drop=$2
newpath=`echo $p | sed -e "s;:${drop}:;:;g" \
-e "s;:${drop}\$;;g" \
-e "s;^${drop}:;;g" \
-e "s;^${drop}\$;;g"`
}
clean_environment()
{
if [ -n "${old_rootsys}" ] ; then
if [ -n "${PATH}" ]; then
drop_from_path "$PATH" "${old_rootsys}/bin"
PATH=$newpath
fi
if [ -n "${LD_LIBRARY_PATH}" ]; then
drop_from_path "$LD_LIBRARY_PATH" "${old_rootsys}/lib"
LD_LIBRARY_PATH=$newpath
fi
if [ -n "${DYLD_LIBRARY_PATH}" ]; then
drop_from_path "$DYLD_LIBRARY_PATH" "${old_rootsys}/lib"
DYLD_LIBRARY_PATH=$newpath
fi
if [ -n "${SHLIB_PATH}" ]; then
drop_from_path "$SHLIB_PATH" "${old_rootsys}/lib"
SHLIB_PATH=$newpath
fi
if [ -n "${LIBPATH}" ]; then
drop_from_path "$LIBPATH" "${old_rootsys}/lib"
LIBPATH=$newpath
fi
if [ -n "${PYTHONPATH}" ]; then
drop_from_path "$PYTHONPATH" "${old_rootsys}/lib"
PYTHONPATH=$newpath
fi
if [ -n "${MANPATH}" ]; then
drop_from_path "$MANPATH" "${old_rootsys}/man"
MANPATH=$newpath
fi
if [ -n "${CMAKE_PREFIX_PATH}" ]; then
drop_from_path "$CMAKE_PREFIX_PATH" "${old_rootsys}"
CMAKE_PREFIX_PATH=$newpath
fi
if [ -n "${JUPYTER_PATH}" ]; then
drop_from_path "$JUPYTER_PATH" "${old_rootsys}/etc/notebook"
JUPYTER_PATH=$newpath
fi
if [ -n "${JUPYTER_CONFIG_DIR}" ]; then
drop_from_path "$JUPYTER_CONFIG_DIR" "${old_rootsys}/etc/notebook"
JUPYTER_CONFIG_DIR=$newpath
fi
fi
if [ -z "${MANPATH}" ]; then
# Grab the default man path before setting the path to avoid duplicates
if command -v manpath >/dev/null; then
default_manpath=`manpath`
elif command -v man >/dev/null; then
default_manpath=`man -w 2> /dev/null`
else
default_manpath=""
fi
fi
}
set_environment()
{
if [ -z "${PATH}" ]; then
PATH=@bindir@; export PATH
else
PATH=@bindir@:$PATH; export PATH
fi
if [ -z "${LD_LIBRARY_PATH}" ]; then
LD_LIBRARY_PATH=@libdir@
export LD_LIBRARY_PATH # Linux, ELF HP-UX
else
LD_LIBRARY_PATH=@libdir@:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
fi
if [ -z "${DYLD_LIBRARY_PATH}" ]; then
DYLD_LIBRARY_PATH=@libdir@
export DYLD_LIBRARY_PATH # Linux, ELF HP-UX
else
DYLD_LIBRARY_PATH=@libdir@:$DYLD_LIBRARY_PATH
export DYLD_LIBRARY_PATH
fi
if [ -z "${SHLIB_PATH}" ]; then
SHLIB_PATH=@libdir@
export SHLIB_PATH # Linux, ELF HP-UX
else
SHLIB_PATH=@libdir@:$SHLIB_PATH
export SHLIB_PATH
fi
if [ -z "${LIBPATH}" ]; then
LIBPATH=@libdir@
export LIBPATH # Linux, ELF HP-UX
else
LIBPATH=@libdir@:$LIBPATH
export LIBPATH
fi
if [ -z "${PYTHONPATH}" ]; then
PYTHONPATH=@libdir@
export PYTHONPATH # Linux, ELF HP-UX
else
PYTHONPATH=@libdir@:$PYTHONPATH
export PYTHONPATH
fi
if [ -z "${MANPATH}" ]; then
MANPATH=@mandir@:${default_manpath}; export MANPATH
else
MANPATH=@mandir@:$MANPATH; export MANPATH
fi
if [ -z "${CMAKE_PREFIX_PATH}" ]; then
CMAKE_PREFIX_PATH=$ROOTSYS; export CMAKE_PREFIX_PATH # Linux, ELF HP-UX
else
CMAKE_PREFIX_PATH=$ROOTSYS:$CMAKE_PREFIX_PATH; export CMAKE_PREFIX_PATH
fi
if [ -z "${JUPYTER_PATH}" ]; then
JUPYTER_PATH=$ROOTSYS/etc/notebook; export JUPYTER_PATH # Linux, ELF HP-UX
else
JUPYTER_PATH=$ROOTSYS/etc/notebook:$JUPYTER_PATH; export JUPYTER_PATH
fi
if [ -z "${JUPYTER_CONFIG_DIR}" ]; then
JUPYTER_CONFIG_DIR=$ROOTSYS/etc/notebook; export JUPYTER_CONFIG_DIR # Linux, ELF HP-UX
else
JUPYTER_CONFIG_DIR=$ROOTSYS/etc/notebook:$JUPYTER_CONFIG_DIR; export JUPYTER_CONFIG_DIR
fi
}
### main ###
if [ -n "${ROOTSYS}" ] ; then
old_rootsys=${ROOTSYS}
fi
SOURCE=${BASH_ARGV[0]}
if [ "x$SOURCE" = "x" ]; then
SOURCE=${(%):-%N} # for zsh
fi
if [ "x${SOURCE}" = "x" ]; then
if [ -f bin/thisroot.sh ]; then
ROOTSYS="$PWD"; export ROOTSYS
elif [ -f ./thisroot.sh ]; then
ROOTSYS=$(cd .. > /dev/null; pwd); export ROOTSYS
else
echo ERROR: must "cd where/root/is" before calling ". bin/thisroot.sh" for this version of bash!
ROOTSYS=; export ROOTSYS
return 1
fi
else
# get param to "."
thisroot=$(dirname ${SOURCE})
ROOTSYS=$(cd ${thisroot}/.. > /dev/null;pwd); export ROOTSYS
fi
clean_environment
set_environment
# Prevent Cppyy from checking the PCH (and avoid warning)
export CLING_STANDARD_PCH=none
if [ "x`root-config --arch | grep -v win32gcc | grep -i win32`" != "x" ]; then
ROOTSYS="`cygpath -w $ROOTSYS`"
fi
unset old_rootsys
unset thisroot
unset -f drop_from_path
unset -f clean_environment
unset -f set_environment
|
root-mirror/root
|
config/thisroot.sh
|
Shell
|
lgpl-2.1
| 5,473 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# Copyright (C) 2012-4 Michael D. Taht, Toke Høiland-Jørgensen, Sebastian Moeller
#improve the logread output
sqm_logger() {
logger -t SQM -s ${1}
}
insmod() {
lsmod | grep -q ^$1 || $INSMOD $1
}
ipt() {
d=`echo $* | sed s/-A/-D/g`
[ "$d" != "$*" ] && {
iptables $d > /dev/null 2>&1
ip6tables $d > /dev/null 2>&1
}
d=`echo $* | sed s/-I/-D/g`
[ "$d" != "$*" ] && {
iptables $d > /dev/null 2>&1
ip6tables $d > /dev/null 2>&1
}
iptables $* > /dev/null 2>&1
ip6tables $* > /dev/null 2>&1
}
do_modules() {
#sm TODO: check first whether the modules exist and only load then
insmod act_ipt
insmod sch_$QDISC
insmod sch_ingress
insmod act_mirred
insmod cls_fw
insmod sch_htb
}
# You need to jiggle these parameters. Note limits are tuned towards a <10Mbit uplink <60Mbup down
[ -z "$UPLINK" ] && UPLINK=2302
[ -z "$DOWNLINK" ] && DOWNLINK=14698
[ -z "$IFACE" ] && IFACE=ge00
[ -z "$QDISC" ] && QDISC=fq_codel
[ -z "$LLAM" ] && LLAM="tc_stab"
[ -z "$LINKLAYER" ] && LINKLAYER="none"
[ -z "$OVERHEAD" ] && OVERHEAD=0
[ -z "$STAB_MTU" ] && STAB_MTU=2047
[ -z "$STAB_MPU" ] && STAB_MPU=0
[ -z "$STAB_TSIZE" ] && STAB_TSIZE=512
[ -z "$AUTOFLOW" ] && AUTOFLOW=0
[ -z "$LIMIT" ] && LIMIT=1001 # sane global default for *LIMIT for fq_codel on a small memory device
[ -z "$ILIMIT" ] && ILIMIT=
[ -z "$ELIMIT" ] && ELIMIT=
[ -z "$ITARGET" ] && ITARGET=
[ -z "$ETARGET" ] && ETARGET=
[ -z "$IECN" ] && IECN="ECN"
[ -z "$EECN" ] && EECN="NOECN"
[ -z "$SQUASH_DSCP" ] && SQUASH_DSCP="1"
[ -z "$SQUASH_INGRESS" ] && SQUASH_INGRESS="1"
[ -z "$IQDISC_OPTS" ] && IQDISC_OPTS=""
[ -z "$EQDISC_OPTS" ] && EQDISC_OPTS=""
[ -z "$TC" ] && TC=`which tc`
#[ -z "$TC" ] && TC="sqm_logger tc"# this redirects all tc calls into the log
[ -z "$IP" ] && IP=$( which ip )
[ -z "$INSMOD" ] && INSMOD=`which insmod`
[ -z "$TARGET" ] && TARGET="5ms"
[ -z "$IPT_MASK" ] && IPT_MASK="0xff"
[ -z "$IPT_MASK_STRING" ] && IPT_MASK_STRING="/${IPT_MASK}" # for set-mark
#sqm_logger "${0} IPT_MASK: ${IPT_MASK_STRING}"
# find the ifb device associated with a specific interface, return nothing of no ifb is associated with IF
get_ifb_associated_with_if() {
CUR_IF=$1
# CUR_IFB=$( tc -p filter show parent ffff: dev ${CUR_IF} | grep -o -e ifb'[[:digit:]]\+' )
CUR_IFB=$( tc -p filter show parent ffff: dev ${CUR_IF} | grep -o -e ifb'[^)]\+' ) # my editor's syntax coloration is limitied so I need a single quote in this line (between eiditor and s)
sqm_logger "ifb associated with interface ${CUR_IF}: ${CUR_IFB}"
echo ${CUR_IFB}
}
# ATTENTION, IFB names can only be 15 chararcters, so we chop of excessive characters at the start of the interface name
# if required
create_new_ifb_for_if() {
CUR_IF=$1
MAX_IF_NAME_LENGTH=15
IFB_PREFIX="ifb4"
NEW_IFB="${IFB_PREFIX}${CUR_IF}"
IFB_NAME_LENGTH=${#NEW_IFB}
if [ ${IFB_NAME_LENGTH} -gt ${MAX_IF_NAME_LENGTH} ];
then
sqm_logger "The requsted IFB name ${NEW_IFB} is longer than the allowed 15 characters, trying to make it shorter"
OVERLIMIT=$(( ${#NEW_IFB} - ${MAX_IF_NAME_LENGTH} ))
NEW_IFB=${IFB_PREFIX}${CUR_IF:${OVERLIMIT}:$(( ${MAX_IF_NAME_LENGTH} - ${#IFB_PREFIX} ))}
fi
sqm_logger "trying to create new IFB: ${NEW_IFB}"
$IP link add name ${NEW_IFB} type ifb #>/dev/null 2>&1 # better be verbose
echo ${NEW_IFB}
}
# the best match is either the IFB already associated with the current interface or a new named IFB
get_ifb_for_if() {
CUR_IF=$1
# if an ifb is already associated return that
CUR_IFB=$( get_ifb_associated_with_if ${CUR_IF} )
[ -z "$CUR_IFB" ] && CUR_IFB=$( create_new_ifb_for_if ${CUR_IF} )
[ -z "$CUR_IFB" ] && sqm_logger "Could not find existing IFB for ${CUR_IF}, nor create a new IFB instead..."
echo ${CUR_IFB}
}
#sm: we need the functions above before trying to set the ingress IFB device
[ -z "$DEV" ] && DEV=$( get_ifb_for_if ${IFACE} ) # automagically get the right IFB device for the IFACE"
get_htb_adsll_string() {
ADSLL=""
if [ "$LLAM" = "htb_private" -a "$LINKLAYER" != "none" ];
then
# HTB defaults to MTU 1600 and an implicit fixed TSIZE of 256, but HTB as of around 3.10.0
# does not actually use a table in the kernel
ADSLL="mpu ${STAB_MPU} linklayer ${LINKLAYER} overhead ${OVERHEAD} mtu ${STAB_MTU}"
sqm_logger "ADSLL: ${ADSLL}"
fi
echo ${ADSLL}
}
get_stab_string() {
STABSTRING=""
if [ "${LLAM}" = "tc_stab" -a "$LINKLAYER" != "none" ];
then
STABSTRING="stab mtu ${STAB_MTU} tsize ${STAB_TSIZE} mpu ${STAB_MPU} overhead ${OVERHEAD} linklayer ${LINKLAYER}"
sqm_logger "STAB: ${STABSTRING}"
fi
echo ${STABSTRING}
}
sqm_stop() {
$TC qdisc del dev $IFACE ingress
$TC qdisc del dev $IFACE root
$TC qdisc del dev $DEV root
}
# Note this has side effects on the prio variable
# and depends on the interface global too
fc() {
$TC filter add dev $interface protocol ip parent $1 prio $prio u32 match ip tos $2 0xfc classid $3
prio=$(($prio + 1))
$TC filter add dev $interface protocol ipv6 parent $1 prio $prio u32 match ip6 priority $2 0xfc classid $3
prio=$(($prio + 1))
}
fc_pppoe() {
PPPOE_SESSION_ETHERTYPE="0x8864"
PPPOE_DISCOVERY_ETHERTYPE="0x8863"
PPP_PROTO_IP4="0x0021"
PPP_PROTO_IP6="0x0057"
ARP_PROTO_IP4="0x0806"
$TC filter add dev $interface protocol ip parent $1 prio $prio u32 match ip tos $2 0xfc classid $3
$TC filter add dev $interface parent $1 protocol ${PPPOE_SESSION_ETHERTYPE} prio $(( 400 + ${prio} )) u32 \
match u16 ${PPP_PROTO_IP4} 0xffff at 6 \
match u8 $2 0xfc at 9 \
flowid $3
prio=$(($prio + 1))
$TC filter add dev $interface protocol ipv6 parent $1 prio $prio u32 match ip6 priority $2 0xfc classid $3
$TC filter add dev $interface parent $1 protocol ${PPPOE_SESSION_ETHERTYPE} prio $(( 600 + ${prio} )) u32 \
match u16 ${PPP_PROTO_IP6} 0xffff at 6 \
match u16 0x0${2:2:2}0 0x0fc0 at 8 \
flowid $3
prio=$(($prio + 1))
}
# FIXME: actually you need to get the underlying MTU on PPOE thing
get_mtu() {
BW=$2
F=`cat /sys/class/net/$1/mtu`
if [ -z "$F" ]
then
F=1500
fi
if [ $BW -gt 20000 ]
then
F=$(($F * 2))
fi
if [ $BW -gt 30000 ]
then
F=$(($F * 2))
fi
if [ $BW -gt 40000 ]
then
F=$(($F * 2))
fi
if [ $BW -gt 50000 ]
then
F=$(($F * 2))
fi
if [ $BW -gt 60000 ]
then
F=$(($F * 2))
fi
if [ $BW -gt 80000 ]
then
F=$(($F * 2))
fi
echo $F
}
# FIXME should also calculate the limit
# Frankly I think Xfq_codel can pretty much always run with high numbers of flows
# now that it does fate sharing
# But right now I'm trying to match the ns2 model behavior better
# So SET the autoflow variable to 1 if you want the cablelabs behavior
get_flows() {
if [ "$AUTOFLOW" -eq "1" ]
then
FLOWS=8
[ $1 -gt 999 ] && FLOWS=16
[ $1 -gt 2999 ] && FLOWS=32
[ $1 -gt 7999 ] && FLOWS=48
[ $1 -gt 9999 ] && FLOWS=64
[ $1 -gt 19999 ] && FLOWS=128
[ $1 -gt 39999 ] && FLOWS=256
[ $1 -gt 69999 ] && FLOWS=512
[ $1 -gt 99999 ] && FLOWS=1024
case $QDISC in
codel|ns2_codel|pie|*fifo|pfifo_fast) ;;
fq_codel|*fq_codel|sfq) echo flows $FLOWS ;;
esac
fi
}
# set the target parameter, also try to only take well formed inputs
# Note, the link bandwidth in the current direction (ingress or egress)
# is required to adjust the target for slow links
get_target() {
local CUR_TARGET=${1}
local CUR_LINK_KBPS=${2}
[ ! -z "$CUR_TARGET" ] && sqm_logger "cur_target: ${CUR_TARGET} cur_bandwidth: ${CUR_LINK_KBPS}"
CUR_TARGET_STRING=
# either e.g. 100ms or auto
CUR_TARGET_VALUE=$( echo ${CUR_TARGET} | grep -o -e \^'[[:digit:]]\+' )
CUR_TARGET_UNIT=$( echo ${CUR_TARGET} | grep -o -e '[[:alpha:]]\+'\$ )
#[ ! -z "$CUR_TARGET" ] && sqm_logger "CUR_TARGET_VALUE: $CUR_TARGET_VALUE"
#[ ! -z "$CUR_TARGET" ] && sqm_logger "CUR_TARGET_UNIT: $CUR_TARGET_UNIT"
AUTO_TARGET=
UNIT_VALID=
case $QDISC in
*codel|*pie)
if [ ! -z "${CUR_TARGET_VALUE}" -a ! -z "${CUR_TARGET_UNIT}" ];
then
case ${CUR_TARGET_UNIT} in
# permissible units taken from: tc_util.c get_time()
s|sec|secs|ms|msec|msecs|us|usec|usecs)
CUR_TARGET_STRING="target ${CUR_TARGET_VALUE}${CUR_TARGET_UNIT}"
UNIT_VALID="1"
;;
esac
fi
# empty field in GUI or undefined GUI variable now defaults to auto
if [ -z "${CUR_TARGET_VALUE}" -a -z "${CUR_TARGET_UNIT}" ];
then
if [ ! -z "${CUR_LINK_KBPS}" ];
then
TMP_TARGET_US=$( adapt_target_to_slow_link $CUR_LINK_KBPS )
TMP_INTERVAL_STRING=$( adapt_interval_to_slow_link $TMP_TARGET_US )
CUR_TARGET_STRING="target ${TMP_TARGET_US}us ${TMP_INTERVAL_STRING}"
AUTO_TARGET="1"
sqm_logger "get_target defaulting to auto."
else
sqm_logger "required link bandwidth in kbps not passed to get_target()."
fi
fi
# but still allow explicit use of the keyword auto for backward compatibility
case ${CUR_TARGET_UNIT} in
auto|Auto|AUTO)
if [ ! -z "${CUR_LINK_KBPS}" ];
then
TMP_TARGET_US=$( adapt_target_to_slow_link $CUR_LINK_KBPS )
TMP_INTERVAL_STRING=$( adapt_interval_to_slow_link $TMP_TARGET_US )
CUR_TARGET_STRING="target ${TMP_TARGET_US}us ${TMP_INTERVAL_STRING}"
AUTO_TARGET="1"
else
sqm_logger "required link bandwidth in kbps not passed to get_target()."
fi
;;
esac
case ${CUR_TARGET_UNIT} in
default|Default|DEFAULT)
if [ ! -z "${CUR_LINK_KBPS}" ];
then
CUR_TARGET_STRING="" # return nothing so the default target is not over-ridden...
AUTO_TARGET="1"
#sqm_logger "get_target using qdisc default, no explicit target string passed."
else
sqm_logger "required link bandwidth in kbps not passed to get_target()."
fi
;;
esac
if [ ! -z "${CUR_TARGET}" ];
then
if [ -z "${CUR_TARGET_VALUE}" -o -z "${UNIT_VALID}" ];
then
[ -z "$AUTO_TARGET" ] && sqm_logger "${CUR_TARGET} is not a well formed tc target specifier; e.g.: 5ms (or s, us), or one of the strings auto or default."
fi
fi
;;
esac
# sqm_logger "target: ${CUR_TARGET_STRING}"
echo $CUR_TARGET_STRING
}
# for low bandwidth links fq_codels default target of 5ms does not work too well
# so increase target for slow links (note below roughly 2500kbps a single packet will \
# take more than 5 ms to be tansfered over the wire)
adapt_target_to_slow_link() {
CUR_LINK_KBPS=$1
CUR_EXTENDED_TARGET_US=
MAX_PAKET_DELAY_IN_US_AT_1KBPS=$(( 1000 * 1000 *1540 * 8 / 1000 ))
CUR_EXTENDED_TARGET_US=$(( ${MAX_PAKET_DELAY_IN_US_AT_1KBPS} / ${CUR_LINK_KBPS} )) # note this truncates the decimals
# do not change anything for fast links
[ "$CUR_EXTENDED_TARGET_US" -lt 5000 ] && CUR_EXTENDED_TARGET_US=5000
case ${QDISC} in
*codel|pie)
echo "${CUR_EXTENDED_TARGET_US}"
;;
esac
}
# codel looks at a whole interval to figure out wether observed latency stayed below target
# if target >= interval that will not work well, so increase interval by the same amonut that target got increased
adapt_interval_to_slow_link() {
CUR_TARGET_US=$1
case ${QDISC} in
*codel)
CUR_EXTENDED_INTERVAL_US=$(( (100 - 5) * 1000 + ${CUR_TARGET_US} ))
echo "interval ${CUR_EXTENDED_INTERVAL_US}us"
;;
pie)
## not sure if pie needs this, probably not
#CUR_EXTENDED_TUPDATE_US=$(( (30 - 20) * 1000 + ${CUR_TARGET_US} ))
#echo "tupdate ${CUR_EXTENDED_TUPDATE_US}us"
;;
esac
}
# set quantum parameter if available for this qdisc
get_quantum() {
case $QDISC in
*fq_codel|fq_pie|drr) echo quantum $1 ;;
*) ;;
esac
}
# only show limits to qdiscs that can handle them...
# Note that $LIMIT contains the default limit
get_limit() {
CURLIMIT=$1
case $QDISC in
*codel|*pie|pfifo_fast|sfq|pfifo) [ -z ${CURLIMIT} ] && CURLIMIT=${LIMIT} # use the global default limit
;;
bfifo) [ -z "$CURLIMIT" ] && [ ! -z "$LIMIT" ] && CURLIMIT=$(( ${LIMIT} * $( cat /sys/class/net/${IFACE}/mtu ) )) # bfifo defaults to txquelength * MTU,
;;
*) sqm_logger "${QDISC} does not support a limit"
;;
esac
sqm_logger "get_limit: $1 CURLIMIT: ${CURLIMIT}"
if [ ! -z "$CURLIMIT" ]
then
echo "limit ${CURLIMIT}"
fi
}
get_ecn() {
CURECN=$1
#sqm_logger CURECN: $CURECN
case ${CURECN} in
ECN)
case $QDISC in
*codel|*pie|*red)
CURECN=ecn
;;
*)
CURECN=""
;;
esac
;;
NOECN)
case $QDISC in
*codel|*pie|*red)
CURECN=noecn
;;
*)
CURECN=""
;;
esac
;;
*)
sqm_logger "ecn value $1 not handled"
;;
esac
#sqm_logger "get_ECN: $1 CURECN: ${CURECN} IECN: ${IECN} EECN: ${EECN}"
echo ${CURECN}
}
# This could be a complete diffserv implementation
diffserv() {
interface=$1
prio=1
# Catchall
$TC filter add dev $interface parent 1:0 protocol all prio 999 u32 \
match ip protocol 0 0x00 flowid 1:12
# Find the most common matches fast
#fc_pppoe() instead of fc() with effectice ingress classification for pppoe is very expensive and destroys LUL
fc 1:0 0x00 1:12 # BE
fc 1:0 0x20 1:13 # CS1
fc 1:0 0x10 1:11 # IMM
fc 1:0 0xb8 1:11 # EF
fc 1:0 0xc0 1:11 # CS3
fc 1:0 0xe0 1:11 # CS6
fc 1:0 0x90 1:11 # AF42 (mosh)
# Arp traffic
$TC filter add dev $interface protocol arp parent 1:0 prio $prio handle 500 fw flowid 1:11
prio=$(($prio + 1))
}
diffserv_pppoe() {
interface=$1
prio=1
# Catchall
$TC filter add dev $interface parent 1:0 protocol all prio 999 u32 \
match ip protocol 0 0x00 flowid 1:12
# Find the most common matches fast
#fc_pppoe() instead of fc() with effectice ingress classification for pppoe is very expensive and destroys LUL
fc_pppoe 1:0 0x00 1:12 # BE
fc_pppoe 1:0 0x20 1:13 # CS1
fc_pppoe 1:0 0x10 1:11 # IMM
fc_pppoe 1:0 0xb8 1:11 # EF
fc_pppoe 1:0 0xc0 1:11 # CS3
fc_pppoe 1:0 0xe0 1:11 # CS6
fc_pppoe 1:0 0x90 1:11 # AF42 (mosh)
# Arp traffic
$TC filter add dev $interface protocol arp parent 1:0 prio $prio handle 500 fw flowid 1:11
prio=$(($prio + 1))
}
|
clehner/openwrt-packages
|
net/sqm-scripts/files/usr/lib/sqm/functions.sh
|
Shell
|
gpl-2.0
| 14,652 |
#!/bin/sh
COMMIT_SHA=$(git describe --dirty)
TARGET_DIR=/Users/admin/dev_src/APMPlanner
WEBSITE_USER=TeamCityAdmin
mv apm_planner2_$BUILD_NUMBER\_osx.dmg ${TARGET_DIR}/apm_planner_${COMMIT_SHA}_osx.dmg
cp ${TARGET_DIR}/apm_planner_${COMMIT_SHA}_osx.dmg ${TARGET_DIR}/apm_planner2_latest_osx.dmg
rsync -avh --password-file=../../../data/rsyncpass ${TARGET_DIR}/apm_planner_${COMMIT_SHA}_osx.dmg ${WEBSITE_USER}@firmware.diydrones.com::APMPlanner/
rsync -avh --password-file=../../../data/rsyncpass ${TARGET_DIR}/apm_planner2_latest_osx.dmg ${WEBSITE_USER}@firmware.diydrones.com::APMPlanner/
|
duststorm/apm_planner
|
scripts/upload_stable_osx.sh
|
Shell
|
agpl-3.0
| 594 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ rtu34cdf
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-rtu34cdf/pre-unlink.sh
|
Shell
|
mit
| 55 |
#!/bin/bash
. ./common.sh
if [ -z $1 ]; then echo "Specify DB node to kill."; exit 1; fi
killNode $1
|
thomasdarimont/keycloak
|
testsuite/performance/db-failover/kill-node.sh
|
Shell
|
apache-2.0
| 103 |
#!/bin/sh
(
cd ~/src/$1 &&
# Get current branch name so we can make sure it's master
# http://stackoverflow.com/questions/6245570/get-current-branch-name
branch=`git rev-parse --abbrev-ref HEAD`
if [ "master" != "$branch" ]; then
echo "Not on the master branch!"
exit 1
fi
npm version patch &&
git push &&
npm publish &&
echo "Done."
)
|
punkave/punkave-jobs
|
scripts/bump.sh
|
Shell
|
mit
| 365 |
#!/bin/bash
export CFLAGS="-I$PREFIX/include"
export LDFLAGS="-L$PREFIX/lib"
./configure --prefix=$PREFIX
make
make install
|
gvlproject/bioconda-recipes
|
recipes/libbambamc/0.5.00/build.sh
|
Shell
|
mit
| 126 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.