code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
make
#for i in 1 2 3 4 5 6 7 8 9 10 11 12; do
i=1;
while [ $i -ne 41 ]; do
#for i in 9 10 11 12; do
for j in 1 2 3 4; do
echo $i $j
echo "Hello" | ./qrencode $i $j >Hello$i$j.pbm
diff -q Hello$i$j.pbm regout/
done
i=$[$i+1]
done
echo
#md5sum Hello??.pbm base/Hello??.pbm | sort | uniq -c -w32 | sort -nr
|
tz1/qrduino
|
regtest.sh
|
Shell
|
gpl-3.0
| 351 |
#!/bin/sh
mkdir $HOME/gnupg_
tar -jxvf gnupg-1.4.22.tar.bz2
cd gnupg-1.4.22/
./configure --prefix=$HOME/gnupg_
make -j $NUM_CPU_JOBS
echo $? > ~/install-exit-status
make install
cd ..
rm -rf gnupg-1.4.22/
rm -rf gnupg_/share/
echo pts-1234567890 > passphrase
echo "#!/bin/sh
./gnupg_/bin/gpg -c --no-options --passphrase-file passphrase -o /dev/null encryptfile 2>&1" > gnupg
chmod +x gnupg
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/gnupg-2.4.0/install.sh
|
Shell
|
gpl-3.0
| 395 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="cgenius"
rp_module_desc="Commander Genius - Modern Interpreter for the Commander Keen Games (Vorticon and Galaxy Games)"
rp_module_licence="GPL2 https://raw.githubusercontent.com/gerstrong/Commander-Genius/master/COPYRIGHT"
rp_module_section="exp"
function depends_cgenius() {
getDepends build-essential libvorbis-dev libogg-dev libsdl2-dev libsdl2-image-dev libboost-dev
}
function sources_cgenius() {
wget -O- -q "https://github.com/gerstrong/Commander-Genius/archive/v180release.tar.gz" | tar -xvz --strip-components=1 -C "$md_build"
}
function build_cgenius() {
cd $md_build
cmake -DUSE_SDL2=yes -DCMAKE_INSTALL_PREFIX="$md_inst"
make
md_ret_require="$md_build"
}
function install_cgenius() {
md_ret_files=(
'hqp'
'vfsroot/games'
'src/Build/LINUX/CGeniusExe'
)
}
function configure_cgenius() {
addPort "$md_id" "cgenius" "Commander Genius" "pushd $md_inst; ./CGeniusExe; popd"
mkRomDir "ports/$md_id"
moveConfigDir "$home/.CommanderGenius" "$md_conf_root/$md_id"
mv "$md_inst/games" "$romdir/ports/$md_id/"
mv "$md_inst/hqp" "$romdir/ports/$md_id/"
ln -snf "$romdir/ports/$md_id/games" "$md_inst"
chown -R $user:$user "$romdir/ports/$md_id"
}
|
superjamie/RetroPie-Setup
|
scriptmodules/ports/cgenius.sh
|
Shell
|
gpl-3.0
| 1,664 |
#!/bin/bash
mode=$1
clientname=$2
if ! [ $mode ] ; then
echo "Usage: $0 newserver|newclient [clientname]"
exit 1
fi
if [ "$mode" == "newserver" ] ; then
openssl genrsa -out server.key 2048
openssl ecparam -genkey -name secp384r1 -out server.key
openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
fi
if [ "$mode" == "newclient" ] ; then
if ! [ $clientname ] ; then
echo "Please provide client name"
exit 1
fi
openssl genrsa -out "$clientname".key 2048
yes "" | head -n 9 | \
openssl req -new -key "$clientname".key -out "$clientname".csr
# Sign the csr
openssl x509 -req -days 3650 -in "$clientname".csr -CA server.crt -CAkey server.key \
-CAcreateserial -CAserial ca.seq -out "$clientname".crt
fi
|
co60ca/beaconpi
|
etc/x509/generate-keys.sh
|
Shell
|
gpl-3.0
| 762 |
#!/bin/bash
# BLink PCB L.E.D. upon scanning
# gcc -Wall -o greenLight greenLight.c -lwiringPi
# sudo ./greenLight
# Server Connection
HOST='*********'
USER='*********'
PASSWD='*******'
FILE='entry.csv'
# Vars
datamtx=''
# Current Working directory to var
cwd=$(pwd)
# tmp data-matrix holder
tmp="$cwd/zbartmp"
# Name of scan results file
ScanResult="$cwd/entry.csv"
# Name of temp file
Temp="$cwd/tmp.txt"
function scan()
{
rm $ScanResult
rm $Temp
clear
zbarcam --raw --prescale=320x240 /dev/video0 > $tmp &
# Last job running in background eg. zbarcam
pid=$!
# Sleep loop until $tmp file has content
while [[ ! -s $tmp ]]
do
sleep 1
# cleanup - add a trap that will remove $tmp and kill zbarcam
# if any of the signals - SIGHUP SIGINT SIGTERM it received.
trap "rm -f $tmp; kill -s 9 $pid; exit" SIGHUP SIGINT SIGTERM
done
# Kill tasks, free up space and call test.py to blink L.E.D.
kill -s 9 $pid
# python test.py
datamtx=$(cat $tmp)
rm -f $tmp
# Append scan results to file
echo $datamtx >> $ScanResult
LastScanned=`cat $ScanResult`
# Search for appointments
clear
echo -e "\nSearching for appointments for" $LastScanned "..."
ftp -n $HOST <<END_SCRIPT
quote USER $USER
quote PASS $PASSWD
put $FILE
quit
END_SCRIPT
curl www.***********/***.php -# -o $Temp | tail -n 1
echo ""
# Fetch results from the server
Result=`awk '{print $1}' $Temp | tail -n 1`
while :
do
if [ "$Result" == "Success!" ]||[ "$Result" == "Denied!" ];
then
cat $Temp | tail -n 1
sleep 5
echo -e "\n\nAuto-refreshing in 10 seconds"
sleep 8
scan
else echo -e "\nError estashblishing connection with the server."
exit
fi
done
exit
}
# Call the scan function
scan
|
mndk0076/mndk0076.github.io
|
Final LabFlow (safe).sh
|
Shell
|
agpl-3.0
| 1,724 |
#!/bin/bash
python3 manage.py collectstatic --noinput
python3 manage.py migrate --noinput
if [[ "${DJANGO_DEBUG}" == "1" ]]; then
echo "Starting debug server..."
python manage.py runserver 0.0.0.0:8000
else
echo "Starting uwsgi server..."
uwsgi --ini uwsgi.ini
fi
|
coddingtonbear/inthe.am
|
run_django.sh
|
Shell
|
agpl-3.0
| 281 |
#!/bin/sh
if [ $# -eq 0 ]; then
echo "Finds the millisecond stuff for a smil/mp3 combo in a daisy book"
echo " Usage maketestdata.sh path-to-file"
exit
fi
if [ ! -f $1.smil ]; then
echo $1.smil does not exist
fi
if [ ! -f $1.mp3 ]; then
echo $1.mp3 does not exist
fi
for a in `cat $1.smil |grep clip-begin | cut -c45-82 | sed -e 's/clip-end=\"npt=//g' | sed -e 's/[\.\"s]//g' | sed -e 's/id//g' | sed -e 's/[=i]//g' | sed -e 's/ /,/g' `; do
echo "urls.push_back(playitem(\"$1.mp3\",$a ));" | sed -e 's/, //g'
done
|
kolibre/libkolibre-player
|
tests/scripts/maketestdata.sh
|
Shell
|
lgpl-2.1
| 531 |
#!/bin/sh
cd ..
python3 -m src.hingechat.main --relay 127.0.0.1 --port 34484
|
HingeChat/HingeChat
|
darwin/client.sh
|
Shell
|
lgpl-3.0
| 77 |
#!/bin/sh
cd deployment
deploy.py oncasu_two_arena_xinhib_cross_between.assisi
cd /home/rmm/repos/gh/rmm-assisi-workshops/2015_graz/binary_choice/two_arenas_real_real
for f in casu_utils.py loggers.py parsing.py two-real-with-cross.conf xinhib_heaters.py; do
scp ${f} assisi@casu-001:deploy/sym_breaking_arena/casu-001
scp ${f} assisi@casu-003:deploy/sym_breaking_arena/casu-003
scp ${f} assisi@casu-004:deploy/sym_breaking_arena/casu-004
scp ${f} assisi@casu-006:deploy/sym_breaking_arena/casu-006
done
cd -
|
rmm-fcul/workshops
|
2015_graz/binary_choice/two_arenas_real_real/_deploy_two_real_xinhib.sh
|
Shell
|
lgpl-3.0
| 528 |
#!/bin/zsh
source ~/.credentials.sh
source teamcity_env
DB_CONFIG_FILE=${TEAMCITY_BIN_PATH}/conf/database.properties
AGENT_DB_CONFIG_FILE=${TEAMCITY_AGENT_PATH}/conf/database.properties
echo 'connectionUrl=jdbc:mysql://127.0.0.1/teamcity' > ${AGENT_DB_CONFIG_FILE}
echo 'connectionProperties.user=tcuser' >> ${AGENT_DB_CONFIG_FILE}
echo 'connectionProperties.password='${TEAMCITY_PWD} >> ${AGENT_DB_CONFIG_FILE}
echo 'connectionUrl=jdbc:mysql://127.0.0.1/teamcity' > ${DB_CONFIG_FILE}
echo 'connectionProperties.user=tcuser' >> ${DB_CONFIG_FILE}
echo 'connectionProperties.password='${TEAMCITY_PWD} >> ${DB_CONFIG_FILE}
|
stormleoxia/leoxiadotcom
|
scripts/teamcity/config_db.sh
|
Shell
|
lgpl-3.0
| 631 |
#!/bin/bash
pause(){
read -p "Press [Enter] key to continue..." fackEnterKey
}
build(){
./gradlew jar
pause
}
solveHSP(){
read -p "Enter domain file [path to the file]: " domainFile
read -p "Enter problem file [path to the file]: " problemFile
read -p "Timeout [int]: " timeOut
show_heuristic
read -p "Choose heuristic [0 - 8]: " heuristic
java -javaagent:build/libs/pddl4j-3.8.3.jar -server -Xms2048m -Xmx2048m fr.uga.pddl4j.planners.statespace.StateSpacePlannerFactory -p 0 -o $domainFile -f $problemFile -t $timeOut -u $heuristic
pause
}
solveFF(){
read -p "Enter domain file [path to the file]: " domainFile
read -p "Enter problem file [path to the file]: " problemFile
read -p "Timeout [int]: " timeOut
show_heuristic
read -p "Choose heuristic [0 - 8]: " heuristic
java -javaagent:build/libs/pddl4j-3.8.3.jar -server -Xms2048m -Xmx2048m fr.uga.pddl4j.planners.statespace.StateSpacePlannerFactory -p 1 -o $domainFile -f $problemFile -t $timeOut -u $heuristic
pause
}
about() {
show_title
echo ""
echo "PDDL4J is an open source library under LGPL license."
echo ""
echo "The purpose of PDDL4J is to facilitate the development of JAVA tools for Automated Planning based on PDDL language (Planning Domain Description Language). Automated planning and scheduling, in the relevant literature often denoted as simply planning, is a branch of artificial intelligence that concerns the realization of strategies or action sequences, typically for execution by intelligent agents, autonomous robots and unmanned vehicles."
echo ""
echo "PDDL was originally developed by Drew McDermott and the 1998 planning competition committee. It was inspired by the need to encourage the empirical comparison of planning systems and the exchange of planning benchmarks within the community. Its development improved the communication of research results and triggered an explosion in performance, expressivity and robustness of planning systems."
echo ""
echo "PDDL has become a de facto standard language for describing planning domains, not only for the competition but more widely, as it offers an opportunity to carry out empirical evaluation of planning systems on a growing collection of generally adopted standard benchmark domains. The emergence of a language standard will have an impact on the entire field, influencing what is seen as central and what peripheral in the development of planning systems."
pause
}
clean() {
./gradlew clean
pause
}
show_title() {
clear
echo " _____ ____ ____ __ ___ __ "
echo "| _ | \| \| | | | |__| |"
echo "| __| | | | | |__|_ | | |"
echo "|__| |____/|____/|_____| |_|_____|"
}
show_status() {
echo "|"
echo "| PDDL4J `awk '/^version/' build.gradle`"
if [ -d build/libs ]; then
echo -e "| PDDL4J status [\e[92mBuilt\e[39m]"
echo "|"
echo " ----------"
else
echo -e "| PDDL4J status [\e[91mNot built\e[39m]"
echo "|"
echo " ----------"
fi
}
show_menus_l() {
echo "| 0. Build jar"
echo "| ..."
echo "| 4. About"
echo "| 5. Exit"
echo " ----------"
}
read_options_l(){
local choice
read -p "Enter choice [0 - 5] : " choice
case $choice in
0) build ;;
4) about ;;
5) exit 0;;
*) echo "Error..." && sleep 1
esac
}
show_menus() {
echo "| 0. Build jar"
echo "| 1. Solve with HSP"
echo "| 2. Solve with FF"
echo "| 3. Clean"
echo "| 4. About"
echo "| 5. Exit"
echo " ----------"
}
show_heuristic() {
echo "0. ff heuristic"
echo "1. sum heuristic"
echo "2. sum mutex heuristic"
echo "3. djusted sum heuristic"
echo "4. adjusted sum 2 heuristic"
echo "5. adjusted sum 2M heuristic"
echo "6. combo heuristic"
echo "7. max heuristic"
echo "8. set-level heuristic"
}
read_options(){
local choice
read -p "Enter choice [0 - 5] : " choice
case $choice in
0) build ;;
1) solveHSP ;;
2) solveFF ;;
3) clean ;;
4) about ;;
5) exit 0;;
*) echo "Error..." && sleep 1
esac
}
# ----------------------------------------------
# Trap CTRL+C, CTRL+Z and quit singles
# ----------------------------------------------
trap '' SIGINT SIGQUIT SIGTSTP
# -----------------------------------
# Main logic - infinite loop
# ------------------------------------
while true
do
show_title
show_status
if [ -d build/libs ]; then
show_menus
read_options
else
show_menus_l
read_options_l
fi
done
|
pellierd/pddl4j
|
pddl4j.sh
|
Shell
|
lgpl-3.0
| 4,481 |
#!/bin/bash
source /root/bin/setup.conf
mkdir -p $WEBROOT/lib
echo "<?php
// Import the necessary classes
use Cartalyst\Sentinel\Native\Facades\Sentinel;
use Illuminate\Database\Capsule\Manager as Capsule;
// Include the composer autoload file
require '$WEBROOT/vendor/autoload.php';
// Setup a new Eloquent Capsule instance
$capsule = new Capsule;
$capsule->addConnection([
'driver' => 'mysql',
'host' => 'localhost',
'database' => '$SENTINELDB',
'username' => '$DEFAULTSITEDBUSER',
'password' => '$DEFAULTSITEDBPASSWORD',
'charset' => 'utf8',
'collation' => 'utf8_unicode_ci',
]);
$capsule->bootEloquent();
?>" > "$WEBROOT/lib/defaultDbConn.php"
echo "<?php
require $WEBROOT/lib/defaultDbConn.php;
class userRegistration {
$email;
$password;
$error;
$errorType;
$errorDescription;
public function registerUser(dirtyEmail, dirtyPassword) {
this.email=saniVali::validateEmail(email);
this.email=saniVali::sanitizePassword(password);
if (isset(this.email) {
// Register a new user
}
else {
Sentinel::register([
'email' => '[email protected]',
'password' => 'foobar',
]);
}
|
bradchesney79/2015DebianJessieWebserverProject
|
scripts/implement-sentinel.sh
|
Shell
|
unlicense
| 1,182 |
#!/bin/sh
sed -i 's/\/\/ #define DUMB_LOGGING_NO_MACROS/#define DUMB_LOGGING_NO_MACROS/' logging.h
sed -i 's/\/\/ #define DUMB_LOGGING_FUNCTIONS/#define DUMB_LOGGING_FUNCTIONS/' logging.h
|
sirnuke/dumb-c-logging
|
tests/only-functions/setup.sh
|
Shell
|
unlicense
| 188 |
#!/Ruby22/bin/ruby
#
#
irb "hello world"
|
keenes/rubycrown
|
.install.sh
|
Shell
|
unlicense
| 44 |
#!/bin/bash
#
#
#Author : tensorchen
#Time : 2016年05月15日
#
read -p "Please input your first name: " firstname
read -p "Please input your last name: " lastname
echo -e "\nYour full name is: $firstname $lastname"
|
tensorchen/language-lab
|
shell/study/output_your_name.sh
|
Shell
|
apache-2.0
| 223 |
function open_url () {
cmd //c start "${@//&/^&}"
}
function one_ping () {
ping -n 1 $1
}
function credential_helper () {
echo wincred
}
function is_admin() {
net session > /dev/null 2>&1
}
function install_git_lfs () {
local KIT_PATH=$1
local VERSION=$2
export GIT_LFS_INSTALLER_LIB="$KIT_PATH/install-helper.ps1"
export GIT_LFS_INSTALLER_URL="https://github.com/git-lfs/git-lfs/releases/download/v$VERSION/git-lfs-windows-$VERSION.exe"
export GIT_LFS_INSTALLER_SHA256='f11ee43eae6ae33c258418e6e4ee221eb87d2e98955c498f572efa7b607f9f9b'
# Previous versions of this installer installed Git LFS into the wrong
# directory. The current installer wouldn't update these files. If they
# are earlier in the $PATH then Git would always find an outdated Git LFS
# binary.
rm -f /cmd/git-lfs.exe
powershell -InputFormat None -ExecutionPolicy Bypass -File "$KIT_PATH/lib/win/install-git-lfs.ps1"
check_git_lfs no-install
}
function install_git () {
local USERNAME=$1
local TOKEN=$2
warning 'The upgrade will close all your git-bash windows.'
read -n 1 -s -r -p "Press any key to continue"
INSTALLBAT=$(mktemp -t "git-install-XXXXXXX.bat")
cp "$KIT_PATH/install.bat" "$INSTALLBAT"
# remove the first two arguments from the arguments array so that they
# can be re-arranged.
shift 2
start "" "$INSTALLBAT" -password $TOKEN -username $USERNAME "$@"
}
|
Autodesk/enterprise-config-for-git
|
lib/win/setup_helpers.sh
|
Shell
|
apache-2.0
| 1,449 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
usage() {
echo "
usage: $0 <options>
Required not-so-options:
--distro-dir=DIR path to distro specific files (debian/RPM)
--build-dir=DIR path to dist dir
--prefix=PREFIX path to install into
Optional options:
--doc-dir=DIR path to install docs into [/usr/share/doc/solr]
--lib-dir=DIR path to install bits [/usr/lib/solr]
--installed-lib-dir=DIR path where lib-dir will end up on target system
--bin-dir=DIR path to install bins [/usr/bin]
--examples-dir=DIR path to install examples [doc-dir/examples]
... [ see source for more similar options ]
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'prefix:' \
-l 'distro-dir:' \
-l 'doc-dir:' \
-l 'lib-dir:' \
-l 'installed-lib-dir:' \
-l 'bin-dir:' \
-l 'examples-dir:' \
-l 'build-dir:' -- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "$OPTS"
while true ; do
case "$1" in
--prefix)
PREFIX=$2 ; shift 2
;;
--distro-dir)
DISTRO_DIR=$2 ; shift 2
;;
--build-dir)
BUILD_DIR=$2 ; shift 2
;;
--doc-dir)
DOC_DIR=$2 ; shift 2
;;
--lib-dir)
LIB_DIR=$2 ; shift 2
;;
--installed-lib-dir)
INSTALLED_LIB_DIR=$2 ; shift 2
;;
--bin-dir)
BIN_DIR=$2 ; shift 2
;;
--examples-dir)
EXAMPLES_DIR=$2 ; shift 2
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
for var in PREFIX BUILD_DIR DISTRO_DIR ; do
if [ -z "$(eval "echo \$$var")" ]; then
echo Missing param: $var
usage
fi
done
MAN_DIR=${MAN_DIR:-/usr/share/man/man1}
DOC_DIR=${DOC_DIR:-/usr/share/doc/solr}
LIB_DIR=${LIB_DIR:-/usr/lib/solr}
INSTALLED_LIB_DIR=${INSTALLED_LIB_DIR:-/usr/lib/solr}
EXAMPLES_DIR=${EXAMPLES_DIR:-$DOC_DIR/examples}
BIN_DIR=${BIN_DIR:-/usr/bin}
CONF_DIR=${CONF_DIR:-/etc/solr/conf}
DEFAULT_DIR=${ETC_DIR:-/etc/default}
VAR_DIR=$PREFIX/var
install -d -m 0755 $PREFIX/$LIB_DIR
cp -ra ${BUILD_DIR}/dist $PREFIX/$LIB_DIR/lib
# create symlink as configsets and post script reference 'dist' dir
ln -s $LIB_DIR/lib $PREFIX/$LIB_DIR/dist
install -d -m 0755 $PREFIX/$LIB_DIR/contrib
cp -ra ${BUILD_DIR}/contrib/* $PREFIX/$LIB_DIR/contrib
install -d -m 0755 $PREFIX/$LIB_DIR/server
cp -ra ${BUILD_DIR}/server/* $PREFIX/$LIB_DIR/server
install -d -m 0755 $PREFIX/$LIB_DIR/bin
cp -a ${BUILD_DIR}/bin/solr $PREFIX/$LIB_DIR/bin
cp -a ${BUILD_DIR}/bin/post $PREFIX/$LIB_DIR/bin
cp -a ${BUILD_DIR}/bin/oom_solr.sh $PREFIX/$LIB_DIR/bin
#cp -a ${BUILD_DIR}/server/scripts/cloud-scripts/*.sh $PREFIX/$LIB_DIR/bin
#cp -a $DISTRO_DIR/zkcli.sh $PREFIX/$LIB_DIR/bin
chmod 755 $PREFIX/$LIB_DIR/bin/*
install -d -m 0755 $PREFIX/$LIB_DIR/licenses
cp -a ${BUILD_DIR}/licenses/* $PREFIX/$LIB_DIR/licenses
install -d -m 0755 $PREFIX/$DOC_DIR
cp -a ${BUILD_DIR}/*.txt $PREFIX/$DOC_DIR
cp -ra ${BUILD_DIR}/docs/* $PREFIX/$DOC_DIR
cp -ra ${BUILD_DIR}/example/ $PREFIX/$DOC_DIR/
# Copy in the configuration files
install -d -m 0755 $PREFIX/$DEFAULT_DIR
cp $DISTRO_DIR/solr.default $PREFIX/$DEFAULT_DIR/solr
install -d -m 0755 $PREFIX/${CONF_DIR}.dist
#cp -a ${BUILD_DIR}/server/resources/log4j.properties $PREFIX/${CONF_DIR}.dist
cp -a ${BUILD_DIR}/server/resources/* $PREFIX/${CONF_DIR}.dist
# Copy in the wrapper
cp -a ${DISTRO_DIR}/solrd $PREFIX/$LIB_DIR/bin/solrd
chmod 755 $PREFIX/$LIB_DIR/bin/solrd
# installing the only script that goes into /usr/bin
install -D -m 0755 $DISTRO_DIR/solrctl.sh $PREFIX/usr/bin/solrctl
# precreating /var layout
install -d -m 0755 $VAR_DIR/log/solr
install -d -m 0755 $VAR_DIR/run/solr
install -d -m 0755 $VAR_DIR/lib/solr
|
arenadata/bigtop
|
bigtop-packages/src/common/solr/install_solr.sh
|
Shell
|
apache-2.0
| 4,637 |
#!/bin/bash
set -e
set -x
DIR=$(realpath $(dirname "$0"))/../
MANIFESTDIR=$(realpath "$DIR")/manifest
CHARTS="fission-all"
source $(realpath "${DIR}"/test/init_tools.sh)
doit() {
echo "! $*"
"$@"
}
check_charts_repo() {
local chartsrepo=$1
if [ ! -d "$chartsrepo" ]; then
echo "Error finding chart repo at $chartsrepo"
exit 1
fi
echo "check_charts_repo == PASSED"
}
update_chart_version() {
pushd "$DIR"/charts
local version=$1
for c in $CHARTS; do
sed -i "s/^version.*/version\: ${version}/" $c/Chart.yaml
sed -i "s/appVersion.*/appVersion\: ${version}/" $c/Chart.yaml
sed -i "s/\bimageTag:.*/imageTag\: ${version}/" $c/values.yaml
done
popd
}
lint_charts() {
pushd "$DIR"/charts
for c in $CHARTS; do
doit helm lint $c
if [ $? -ne 0 ]; then
echo "helm lint failed"
exit 1
fi
done
popd
}
build_charts() {
mkdir -p "$MANIFESTDIR"/charts
pushd "$DIR"/charts
find . -iname *.~?~ | xargs -r rm
for c in $CHARTS; do
doit helm package -u $c/
mv ./*.tgz "$MANIFESTDIR"/charts/
done
popd
}
build_yamls() {
local version=$1
mkdir -p "${MANIFESTDIR}"/yamls
pushd "${DIR}"/charts
find . -iname *.~?~ | xargs -r rm
releaseName=fission-$(echo "${version}" | sed 's/\./-/g')
for c in $CHARTS; do
# fetch dependencies
pushd ${c}
doit helm dependency update
popd
echo "Release name", "$releaseName"
cmdprefix="helm template ${releaseName} ${c} --namespace fission --validate"
# for minikube and other environments that don't support LoadBalancer
command="$cmdprefix --set analytics=false,analyticsNonHelmInstall=true,serviceType=NodePort,routerServiceType=NodePort"
echo "$command"
$command >${c}-"${version}"-minikube.yaml
# for environments that support LoadBalancer
command="$cmdprefix --set analytics=false,analyticsNonHelmInstall=true"
echo "$command"
$command >${c}-"${version}".yaml
# for OpenShift
command="$cmdprefix --set analytics=false,analyticsNonHelmInstall=true,logger.enableSecurityContext=true"
echo "$command"
$command >${c}-"${version}"-openshift.yaml
# copy yaml files to build directory
mv ./*.yaml "${MANIFESTDIR}"/yamls/
done
popd
}
update_github_charts_repo() {
local version=$1
local chartsrepo=$2
pushd "$chartsrepo"
for c in $CHARTS; do
cp "$MANIFESTDIR"/charts/$c-"${version}".tgz .
./index.sh
done
popd
}
version=$1
if [ -z "$version" ]; then
echo "Release version not mentioned"
exit 1
fi
echo "Current version for release: $version"
chartsrepo="$DIR../fission-charts"
check_charts_repo "$chartsrepo"
# Build manifests and charts
lint_charts
update_chart_version "$version"
lint_charts
build_yamls "$version"
build_charts
update_github_charts_repo "$version" "$chartsrepo"
|
fission/fission
|
hack/generate-helm-manifest.sh
|
Shell
|
apache-2.0
| 3,039 |
#!/bin/bash
# This script compares two KAMs
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2011 Selventa, Inc. All rights reserved.
. $(dirname $0)/../setenv.sh
java $JAVA_OPTS -classpath $BELCOMPILER_CLASSPATH org.openbel.framework.tools.kamstore.KamComparator "$@"
|
OpenBEL/openbel-framework
|
org.openbel.framework.tools/tools/KamComparator.sh
|
Shell
|
apache-2.0
| 554 |
#!/bin/sh
# [email protected]
# 2014.03.22
#
PORT=<%= config.port %>
ENV=development
CONFIG="`pwd`/config.json"
LOGNAME="<%= config.logName %>"
cd app/
node app.js --env $ENV --configfile $CONFIG --logfile "${HOME}/logs/${LOGNAME}-dev.log"
|
darrylwest/platform-mvc
|
templates/node-server-standard/bin/run.sh
|
Shell
|
apache-2.0
| 244 |
#!/bin/bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
# get Flink config
. "$bin"/config.sh
if [ "$FLINK_IDENT_STRING" = "" ]; then
FLINK_IDENT_STRING="$USER"
fi
JVM_ARGS="$JVM_ARGS -Xmx512m"
# auxilliary function to construct a lightweight classpath for the
# Flink CLI client
constructCLIClientClassPath() {
for jarfile in $FLINK_LIB_DIR/*.jar ; do
if [[ $CC_CLASSPATH = "" ]]; then
CC_CLASSPATH=$jarfile;
else
CC_CLASSPATH=$CC_CLASSPATH:$jarfile
fi
done
echo $CC_CLASSPATH
}
CC_CLASSPATH=`manglePathList $(constructCLIClientClassPath)`
log=$FLINK_LOG_DIR/flink-$FLINK_IDENT_STRING-yarn-session-$HOSTNAME.log
log_setting="-Dlog.file="$log" -Dlog4j.configuration=file:"$FLINK_CONF_DIR"/log4j-yarn-session.properties -Dlogback.configurationFile=file:"$FLINK_CONF_DIR"/logback-yarn.xml"
export FLINK_CONF_DIR
$JAVA_RUN $JVM_ARGS -classpath $CC_CLASSPATH $log_setting org.apache.flink.yarn.Client -ship $bin/../ship/ -confDir $FLINK_CONF_DIR -j $FLINK_LIB_DIR/*yarn-uberjar.jar $*
|
citlab/vs.msc.ws14
|
flink-0-7-custom/flink-dist/src/main/flink-bin/yarn-bin/yarn-session.sh
|
Shell
|
apache-2.0
| 1,971 |
#!/bin/bash
echo ""
echo "The run_tests script is deprecated and will be removed in the "
echo "Queens Release Cycle (13.0), in favor of tox."
echo ""
set -o errexit
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Horizon's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically"
echo " if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local"
echo " environment"
echo " -c, --coverage Generate reports using Coverage"
echo " -f, --force Force a clean re-build of the virtual"
echo " environment. Useful when dependencies have"
echo " been added."
echo " -m, --manage Run a Django management command."
echo " --makemessages Create/Update English translation files."
echo " --compilemessages Compile all translation files."
echo " --check-only Do not update translation files (--makemessages only)."
echo " --pseudo Pseudo translate a language."
echo " -p, --pep8 Just run pep8"
echo " -8, --pep8-changed [<basecommit>]"
echo " Just run PEP8 and HACKING compliance check"
echo " on files changed since HEAD~1 (or <basecommit>)"
echo " -P, --no-pep8 Don't run pep8 by default"
echo " -t, --tabs Check for tab characters in files."
echo " -y, --pylint Just run pylint"
echo " -e, --eslint Just run eslint"
echo " -k, --karma Just run karma"
echo " -q, --quiet Run non-interactively. (Relatively) quiet."
echo " Implies -V if -N is not set."
echo " --only-selenium Run only the Selenium unit tests"
echo " --with-selenium Run unit tests including Selenium tests"
echo " --selenium-headless Run Selenium tests headless"
echo " --selenium-phantomjs Run Selenium tests using phantomjs (headless)"
echo " --integration Run the integration tests (requires a running "
echo " OpenStack environment)"
echo " --runserver Run the Django development server for"
echo " openstack_dashboard in the virtual"
echo " environment."
echo " --docs Just build the documentation"
echo " --backup-environment Make a backup of the environment on exit"
echo " --restore-environment Restore the environment before running"
echo " --destroy-environment Destroy the environment and exit"
echo " -h, --help Print this usage message"
echo ""
echo "Note: with no options specified, the script will try to run the tests in"
echo " a virtual environment, If no virtualenv is found, the script will ask"
echo " if you would like to create one. If you prefer to run tests NOT in a"
echo " virtual environment, simply pass the -N option."
exit
}
# DEFAULTS FOR RUN_TESTS.SH
#
root=`pwd -P`
venv=$root/.venv
with_venv=tools/with_venv.sh
included_dirs="openstack_dashboard horizon"
always_venv=0
backup_env=0
command_wrapper=""
destroy=0
force=0
just_pep8=0
just_pep8_changed=0
no_pep8=0
just_pylint=0
just_docs=0
just_tabs=0
just_eslint=0
just_karma=0
never_venv=0
quiet=0
restore_env=0
runserver=0
only_selenium=0
with_selenium=0
selenium_headless=0
selenium_phantomjs=0
integration=0
testopts=""
testargs=""
with_coverage=0
makemessages=0
compilemessages=0
check_only=0
pseudo=0
manage=0
# NOTE(tonyb): the release team will automatically update tox.ini to point at
# the correct requirements branch when creating stable/* from master. So go to
# a little effort to get the deault from there to avoid dift and having to
# update this when branching
_default_uc=$(sed -n 's/^.*{env:UPPER_CONSTRAINTS_FILE\:\([^}]*\)}.*$/\1/p' \
tox.ini | head -n1)
# Jenkins sets a "JOB_NAME" variable, if it's not set, we'll make it "default"
[ "$JOB_NAME" ] || JOB_NAME="default"
function process_option {
# If running manage command, treat the rest of options as arguments.
if [ $manage -eq 1 ]; then
testargs="$testargs $1"
return 0
fi
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-p|--pep8) just_pep8=1;;
-8|--pep8-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-y|--pylint) just_pylint=1;;
-e|--eslint) just_eslint=1;;
-k|--karma) just_karma=1;;
-f|--force) force=1;;
-t|--tabs) just_tabs=1;;
-q|--quiet) quiet=1;;
-c|--coverage) with_coverage=1;;
-m|--manage) manage=1;;
--makemessages) makemessages=1;;
--compilemessages) compilemessages=1;;
--check-only) check_only=1;;
--pseudo) pseudo=1;;
--only-selenium) only_selenium=1;;
--with-selenium) with_selenium=1;;
--selenium-headless) selenium_headless=1;;
--selenium-phantomjs) selenium_phantomjs=1;;
--integration) integration=1;;
--docs) just_docs=1;;
--runserver) runserver=1;;
--backup-environment) backup_env=1;;
--restore-environment) restore_env=1;;
--destroy-environment) destroy=1;;
-*) testopts="$testopts $1";;
*) testargs="$testargs $1"
esac
}
function run_management_command {
${command_wrapper} python $root/manage.py $testopts $testargs
}
function run_server {
echo "Starting Django development server..."
${command_wrapper} python $root/manage.py runserver $testopts $testargs
echo "Server stopped."
}
function run_pylint {
echo "Running pylint ..."
PYTHONPATH=$root ${command_wrapper} pylint --rcfile=.pylintrc -f parseable $included_dirs > pylint.txt || true
CODE=$?
grep Global -A2 pylint.txt
if [ $CODE -lt 32 ]; then
echo "Completed successfully."
exit 0
else
echo "Completed with problems."
exit $CODE
fi
}
function run_eslint {
echo "Running eslint ..."
if [ "`which npm`" == '' ] ; then
echo "npm is not present; please install, e.g. sudo apt-get install npm"
else
npm install
npm run lint
fi
}
function run_karma {
echo "Running karma ..."
npm install
npm run test
}
function warn_on_flake8_without_venv {
set +o errexit
${command_wrapper} python -c "import hacking" 2>/dev/null
no_hacking=$?
set -o errexit
if [ $never_venv -eq 1 -a $no_hacking -eq 1 ]; then
echo "**WARNING**:" >&2
echo "OpenStack hacking is not installed on your host. Its detection will be missed." >&2
echo "Please install or use virtual env if you need OpenStack hacking detection." >&2
fi
}
function run_pep8 {
echo "Running flake8 ..."
warn_on_flake8_without_venv
DJANGO_SETTINGS_MODULE=openstack_dashboard.test.settings ${command_wrapper} flake8
}
function run_pep8_changed {
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
# --diff argument behaves surprisingly as well, because although you feed it a
# diff, it actually checks the file on disk anyway.
local base_commit=${testargs:-HEAD~1}
files=$(git diff --name-only $base_commit | tr '\n' ' ')
echo "Running flake8 on ${files}"
warn_on_flake8_without_venv
diff -u --from-file /dev/null ${files} | DJANGO_SETTINGS_MODULE=openstack_dashboard.test.settings ${command_wrapper} flake8 --diff
exit
}
function run_sphinx {
echo "Building sphinx..."
DJANGO_SETTINGS_MODULE=openstack_dashboard.test.settings ${command_wrapper} python setup.py build_sphinx
echo "Build complete."
}
function tab_check {
TAB_VIOLATIONS=`find $included_dirs -type f -regex ".*\.\(css\|js\|py\|html\)" -print0 | xargs -0 awk '/\t/' | wc -l`
if [ $TAB_VIOLATIONS -gt 0 ]; then
echo "TABS! $TAB_VIOLATIONS of them! Oh no!"
HORIZON_FILES=`find $included_dirs -type f -regex ".*\.\(css\|js\|py|\html\)"`
for TABBED_FILE in $HORIZON_FILES
do
TAB_COUNT=`awk '/\t/' $TABBED_FILE | wc -l`
if [ $TAB_COUNT -gt 0 ]; then
echo "$TABBED_FILE: $TAB_COUNT"
fi
done
fi
return $TAB_VIOLATIONS;
}
function destroy_venv {
echo "Cleaning environment..."
echo "Removing virtualenv..."
rm -rf $venv
echo "Virtualenv removed."
}
function sanity_check {
# Anything that should be determined prior to running the tests, server, etc.
# Don't sanity-check anything environment-related in -N flag is set
if [ $never_venv -eq 0 ]; then
if [ ! -e ${venv} ]; then
echo "Virtualenv not found at $venv. Did install_venv.py succeed?"
exit 1
fi
fi
# Remove .pyc files. This is sanity checking because they can linger
# after old files are deleted.
find . -name "*.pyc" -exec rm -rf {} \;
}
function backup_environment {
if [ $backup_env -eq 1 ]; then
echo "Backing up environment \"$JOB_NAME\"..."
if [ ! -e ${venv} ]; then
echo "Environment not installed. Cannot back up."
return 0
fi
if [ -d /tmp/.horizon_environment/$JOB_NAME ]; then
mv /tmp/.horizon_environment/$JOB_NAME /tmp/.horizon_environment/$JOB_NAME.old
rm -rf /tmp/.horizon_environment/$JOB_NAME
fi
mkdir -p /tmp/.horizon_environment/$JOB_NAME
cp -r $venv /tmp/.horizon_environment/$JOB_NAME/
# Remove the backup now that we've completed successfully
rm -rf /tmp/.horizon_environment/$JOB_NAME.old
echo "Backup completed"
fi
}
function restore_environment {
if [ $restore_env -eq 1 ]; then
echo "Restoring environment from backup..."
if [ ! -d /tmp/.horizon_environment/$JOB_NAME ]; then
echo "No backup to restore from."
return 0
fi
cp -r /tmp/.horizon_environment/$JOB_NAME/.venv ./ || true
echo "Environment restored successfully."
fi
}
function install_venv {
# Install with install_venv.py
export UPPER_CONSTRAINTS_FILE=${UPPER_CONSTRAINTS_FILE:-$_default_uc}
export PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE-/tmp/.pip_download_cache}
export PIP_USE_MIRRORS=true
if [ $quiet -eq 1 ]; then
export PIP_NO_INPUT=true
fi
echo "Fetching new src packages..."
rm -rf $venv/src
python tools/install_venv.py
command_wrapper="$root/${with_venv}"
# Make sure it worked and record the environment version
sanity_check
chmod -R 754 $venv
}
function run_tests {
sanity_check
if [ $with_selenium -eq 1 ]; then
export WITH_SELENIUM=1
elif [ $only_selenium -eq 1 ]; then
export WITH_SELENIUM=1
export SKIP_UNITTESTS=1
fi
if [ $with_selenium -eq 0 -a $integration -eq 0 ]; then
testopts="$testopts --exclude-dir=openstack_dashboard/test/integration_tests"
fi
if [ $selenium_headless -eq 1 ]; then
export SELENIUM_HEADLESS=1
fi
if [ $selenium_phantomjs -eq 1 ]; then
export SELENIUM_PHANTOMJS=1
fi
if [ -z "$testargs" ]; then
run_tests_all
else
run_tests_subset
fi
}
function run_tests_subset {
project=`echo $testargs | awk -F. '{print $1}'`
${command_wrapper} python $root/manage.py test --settings=$project.test.settings $testopts $testargs
}
function run_tests_all {
echo "Running Horizon application tests"
export NOSE_XUNIT_FILE=horizon/nosetests.xml
if [ "$NOSE_WITH_HTML_OUTPUT" = '1' ]; then
export NOSE_HTML_OUT_FILE='horizon_nose_results.html'
fi
if [ $with_coverage -eq 1 ]; then
${command_wrapper} python -m coverage.__main__ erase
coverage_run="python -m coverage.__main__ run -p"
fi
${command_wrapper} ${coverage_run} $root/manage.py test horizon --settings=horizon.test.settings $testopts
# get results of the Horizon tests
HORIZON_RESULT=$?
echo "Running openstack_dashboard tests"
export NOSE_XUNIT_FILE=openstack_dashboard/nosetests.xml
if [ "$NOSE_WITH_HTML_OUTPUT" = '1' ]; then
export NOSE_HTML_OUT_FILE='dashboard_nose_results.html'
fi
${command_wrapper} ${coverage_run} $root/manage.py test openstack_dashboard --settings=openstack_dashboard.test.settings $testopts
# get results of the openstack_dashboard tests
DASHBOARD_RESULT=$?
if [ $with_coverage -eq 1 ]; then
echo "Generating coverage reports"
${command_wrapper} python -m coverage.__main__ combine
${command_wrapper} python -m coverage.__main__ xml -i --include="horizon/*,openstack_dashboard/*" --omit='/usr*,setup.py,*egg*,.venv/*'
${command_wrapper} python -m coverage.__main__ html -i --include="horizon/*,openstack_dashboard/*" --omit='/usr*,setup.py,*egg*,.venv/*' -d reports
fi
# Remove the leftover coverage files from the -p flag earlier.
rm -f .coverage.*
PEP8_RESULT=0
if [ $no_pep8 -eq 0 ] && [ $only_selenium -eq 0 ]; then
run_pep8
PEP8_RESULT=$?
fi
TEST_RESULT=$(($HORIZON_RESULT || $DASHBOARD_RESULT || $PEP8_RESULT))
if [ $TEST_RESULT -eq 0 ]; then
echo "Tests completed successfully."
else
echo "Tests failed."
fi
exit $TEST_RESULT
}
function run_integration_tests {
export INTEGRATION_TESTS=1
if [ $selenium_headless -eq 1 ]; then
export SELENIUM_HEADLESS=1
fi
if [ $selenium_phantomjs -eq 1 ]; then
export SELENIUM_PHANTOMJS=1
fi
echo "Running Horizon integration tests..."
if [ -z "$testargs" ]; then
${command_wrapper} nosetests openstack_dashboard/test/integration_tests/tests
else
${command_wrapper} nosetests $testargs
fi
exit 0
}
function babel_extract {
local MODULE_NAME=$1
local DOMAIN=$2
local KEYWORDS="-k gettext_noop -k gettext_lazy -k ngettext_lazy:1,2"
KEYWORDS+=" -k ugettext_noop -k ugettext_lazy -k ungettext_lazy:1,2"
KEYWORDS+=" -k npgettext:1c,2,3 -k pgettext_lazy:1c,2 -k npgettext_lazy:1c,2,3"
${command_wrapper} pybabel extract -F babel-${DOMAIN}.cfg \
--add-comments Translators: -o $MODULE_NAME/locale/${DOMAIN}.pot \
$KEYWORDS $MODULE_NAME
}
function run_makemessages {
echo -n "horizon: "
babel_extract horizon django
HORIZON_PY_RESULT=$?
echo -n "horizon javascript: "
babel_extract horizon djangojs
HORIZON_JS_RESULT=$?
echo -n "openstack_dashboard: "
babel_extract openstack_dashboard django
DASHBOARD_RESULT=$?
echo -n "openstack_dashboard javascript: "
babel_extract openstack_dashboard djangojs
DASHBOARD_JS_RESULT=$?
if [ $check_only -eq 1 ]; then
rm horizon/locale/django*.pot
rm openstack_dashboard/locale/django*.pot
fi
exit $(($HORIZON_PY_RESULT || $HORIZON_JS_RESULT || $DASHBOARD_RESULT || $DASHBOARD_JS_RESULT))
}
function run_compilemessages {
cd horizon
${command_wrapper} $root/manage.py compilemessages
HORIZON_PY_RESULT=$?
cd ../openstack_dashboard
${command_wrapper} $root/manage.py compilemessages
DASHBOARD_RESULT=$?
exit $(($HORIZON_PY_RESULT || $DASHBOARD_RESULT))
}
function run_pseudo {
for lang in $testargs
# Use English pot file as the source file/pot file just like real Horizon translations
do
${command_wrapper} $root/tools/pseudo.py openstack_dashboard/locale/django.pot openstack_dashboard/locale/$lang/LC_MESSAGES/django.po $lang
${command_wrapper} $root/tools/pseudo.py openstack_dashboard/locale/djangojs.pot openstack_dashboard/locale/$lang/LC_MESSAGES/djangojs.po $lang
${command_wrapper} $root/tools/pseudo.py horizon/locale/django.pot horizon/locale/$lang/LC_MESSAGES/django.po $lang
${command_wrapper} $root/tools/pseudo.py horizon/locale/djangojs.pot horizon/locale/$lang/LC_MESSAGES/djangojs.po $lang
done
exit $?
}
# ---------PREPARE THE ENVIRONMENT------------ #
# PROCESS ARGUMENTS, OVERRIDE DEFAULTS
for arg in "$@"; do
process_option $arg
done
if [ $quiet -eq 1 ] && [ $never_venv -eq 0 ] && [ $always_venv -eq 0 ]
then
always_venv=1
fi
# If destroy is set, just blow it away and exit.
if [ $destroy -eq 1 ]; then
destroy_venv
exit 0
fi
# Ignore all of this if the -N flag was set
if [ $never_venv -eq 0 ]; then
# Restore previous environment if desired
if [ $restore_env -eq 1 ]; then
restore_environment
fi
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
destroy_venv
fi
# Create or update venv.
install_venv
# Create a backup of the up-to-date environment if desired
if [ $backup_env -eq 1 ]; then
backup_environment
fi
fi
# ---------EXERCISE THE CODE------------ #
# Run management commands
if [ $manage -eq 1 ]; then
run_management_command
exit $?
fi
# Build the docs
if [ $just_docs -eq 1 ]; then
run_sphinx
exit $?
fi
# Update translation files
if [ $makemessages -eq 1 ]; then
run_makemessages
exit $?
fi
# Compile translation files
if [ $compilemessages -eq 1 ]; then
run_compilemessages
exit $?
fi
# Generate Pseudo translation
if [ $pseudo -eq 1 ]; then
run_pseudo
exit $?
fi
# PEP8
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit $?
fi
if [ $just_pep8_changed -eq 1 ]; then
run_pep8_changed
exit $?
fi
# Pylint
if [ $just_pylint -eq 1 ]; then
run_pylint
exit $?
fi
# ESLint
if [ $just_eslint -eq 1 ]; then
run_eslint
exit $?
fi
# Karma
if [ $just_karma -eq 1 ]; then
run_karma
exit $?
fi
# Tab checker
if [ $just_tabs -eq 1 ]; then
tab_check
exit $?
fi
# Integration tests
if [ $integration -eq 1 ]; then
run_integration_tests
exit $?
fi
# Django development server
if [ $runserver -eq 1 ]; then
run_server
exit $?
fi
# Full test suite
run_tests || exit
|
kogotko/carburetor
|
run_tests.sh
|
Shell
|
apache-2.0
| 17,636 |
#!/bin/bash
pushd ./base
./buildimage.sh
popd
pushd ./basenginx
./buildimage.sh
popd
pushd ./baseservice
./buildimage.sh
popd
|
antoinehage/soajs.buildImages
|
images/buildimages.sh
|
Shell
|
apache-2.0
| 128 |
#!/usr/bin/env bash
#-------------------------------------------------------------------
# test script for watchdog
source $TESTLIBS
TESTDIR=master
rm -fr $TESTDIR
mkdir $TESTDIR
cd $TESTDIR
# create master environment
echo -n "creating master pgpool..."
$PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1
echo "done."
source ./bashrc.ports
cat ../master.conf >> etc/pgpool.conf
./startall
wait_for_pgpool_startup
cd ..
# create standby environment
sdir=standby
rm -fr $sdir
mkdir $sdir
cd $sdir
echo -n "creating standby pgpool..."
$PGPOOL_SETUP -m s -n 2 -p 11100|| exit 1
echo "done."
source ./bashrc.ports
cat ../standby.conf >> etc/pgpool.conf
egrep 'backend_data_directory0|backend_data_directory1|failover_command|follow_master_command' ../$TESTDIR/etc/pgpool.conf >> etc/pgpool.conf
./startall
wait_for_pgpool_startup
cd ..
# stop master pgpool and see if standby take over
$PGPOOL_INSTALL_DIR/bin/pgpool -f master/etc/pgpool.conf -m f stop
echo "Standby pgpool-II is detecting master went down and is escalating to master..."
for i in 1 2 3 4 5 6 7 8 9 10
do
RESULT=`grep "watchdog escalation successful, escalated to master pgpool" standby/log/pgpool.log`
if [ ! -z "$RESULT" ]; then
echo "Master escalation done."
break;
fi
done
cd master
./shutdownall
cd ../standby
./shutdownall
if [ -z "$RESULT" ]; then
exit 1
fi
exit 0
|
treasure-data/prestogres
|
src/test/regression/tests/004.watchdog/test.sh
|
Shell
|
apache-2.0
| 1,347 |
#!/bin/bash
set -x
set -e
if [ -z "$SOURCE_DIR" ] ; then
echo "Expected SOURCE_DIR in environment"
exit 1
fi
if [ -z "$BUILD_DIR" ] ; then
echo "Expected BUILD_DIR in environment"
exit 1
fi
if test -d $BUILD_DIR ; then
rm -rf $BUILD_DIR/*
fi
# SETUP development environment
yum groupinstall -y 'Development Tools'
yum install -y libpcap libpcap-devel \
java-1.7-openjdk java-1.7.0-openjdk-devel \
zlib-devel zlib \
jansson-devel
# NOTE: only needed when building ndt from svn-source
# pushd $SOURCE_DIR/I2util/
# ./bootstrap.sh
# ./configure --prefix=$BUILD_DIR/build
# make
# make install
# popd
# NOTE: unpacked from tar-archives by bootstrap.sh
pushd $SOURCE_DIR/web100_userland-1.8
./configure --prefix=$BUILD_DIR/build --disable-gtk2 --disable-gtktest
make
make install
popd
# NOTE: unpacked from tar-archives by bootstrap.sh
pushd $SOURCE_DIR/ndt
export CPPFLAGS="-I$BUILD_DIR/build/include -I$BUILD_DIR/build/include/web100"
export LDFLAGS="-L$BUILD_DIR/build/lib"
export LD_LIBRARY_PATH="$BUILD_DIR/build/lib"
export NDT_HOSTNAME="localhost"
./bootstrap
./configure --enable-fakewww --prefix=$BUILD_DIR/build
# Run unit tests on source before making and installing
make
make install
# The Node.js WebSocket tests in "make check" require these modules
pushd $SOURCE_DIR/ndt/src/node_tests
npm install [email protected] minimist
popd
make check || (echo "Unit testing of the source code failed." && exit 1)
# Applet gets remade if we do this before 'make install'
# NOTE: call helper script for signing jar
# NOTE: but, skip for now
while true; do
$SOURCE_DIR/init/signedpackage.sh $BUILD_DIR/build/ndt/Tcpbw100.jar
if [[ $? -eq 0 ]]; then
break
fi
echo "Opening a new shell so that you can sign a newly-produced jar, and/or investigate further."
echo "When you are done, simply exit the shell, and the package build process will proceed."
bash
done
popd
cp -r $SOURCE_DIR/init $BUILD_DIR/
cp $SOURCE_DIR/tcpbw100.html $BUILD_DIR/
cp $SOURCE_DIR/flashpolicy.xml $BUILD_DIR/
install -m 0755 $SOURCE_DIR/flashpolicyd.py $BUILD_DIR/
# NOTE: admin.html is automatically generated and should not be included.
rm -f $BUILD_DIR/build/ndt/admin.html
|
nkinkade/ndt-support
|
init/prepare.sh
|
Shell
|
apache-2.0
| 2,404 |
#!/bin/bash
# Copyright 2015 The QingYuan Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export ELASTICSEARCH_URL=${ELASTICSEARCH_URL:-"http://localhost:9200"}
echo ELASTICSEARCH_URL=${ELASTICSEARCH_URL}
/kibana-4.0.2-linux-x64/bin/kibana -e ${ELASTICSEARCH_URL}
|
qingyuancloud/qingyuan
|
cluster/addons/fluentd-elasticsearch/kibana-image/run.sh
|
Shell
|
apache-2.0
| 792 |
#/bin/sh
etcdctl --peers http://etcd:4001 get /coreos.com/network/config
|
justasabc/kubernetes-ubuntu
|
ke/scripts/get_flanneld_network.sh
|
Shell
|
apache-2.0
| 73 |
#!/bin/sh
echo "hello, $BUILD1!"
|
gravitational/workshop
|
docker/busybox-arg/hello.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/bash
# sudo portion of npm package installations
echo "Running npm root installation"
ROOT_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $ROOT_SCRIPT_DIR/rootcheck.sh
SCRIPT_DIR=$1
ME=$2
node-gyp -g install
# put yelp_uri in back to override downloaded version
for repo in ijavascript jupyter/configurable-http-proxy ; do
npm install -g $repo
done
|
linkmax91/bitquant
|
web/scripts/install-npm-sudo.sh
|
Shell
|
apache-2.0
| 376 |
#!/bin/bash
# At least a very basic sanity check on installation.
# We don't have a full unit test coverage. -bcg'17
(cd src/DOM141M && make)
./dominions.py install --version 141 /tmp/141
./dominions.py install --version 500 /tmp/500
./dominions.py install --version 2000 /tmp/2000
docker build --build-arg VERSION=141 -t dominions141 .
docker build --build-arg VERSION=500 -t dominions500 .
docker build --build-arg VERSION=2000 -t dominions2000 .
|
braddockcg/dominions
|
test.sh
|
Shell
|
apache-2.0
| 452 |
#!/bin/bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
# Always run the cleanup script, regardless of the success of bouncing into
# the container.
function cleanup() {
chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
echo "cleanup";
}
trap cleanup EXIT
$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
$(dirname $0)/trampoline_v2.sh
|
googleapis/repo-automation-bots
|
.kokoro/trampoline.sh
|
Shell
|
apache-2.0
| 954 |
#!/bin/bash -e
# bash imports
source ./virtualbox_env.sh
set -x
if !hash ruby 2> /dev/null ; then
echo 'No ruby in path!'
exit 1
fi
if [[ -f ./proxy_setup.sh ]]; then
. ./proxy_setup.sh
fi
# CURL is exported by proxy_setup.sh
if [[ -z "$CURL" ]]; then
echo 'CURL is not defined'
exit 1
fi
# BOOTSTRAP_NAME is exported by automated_install.sh
if [[ -z "$BOOTSTRAP_NAME" ]]; then
echo 'BOOTSTRAP_NAME is not defined'
exit 1
fi
# Bootstrap VM Defaults (these need to be exported for Vagrant's Vagrantfile)
export BOOTSTRAP_VM_MEM=${BOOTSTRAP_VM_MEM-2048}
export BOOTSTRAP_VM_CPUs=${BOOTSTRAP_VM_CPUs-1}
# Use this if you intend to make an apt-mirror in this VM (see the
# instructions on using an apt-mirror towards the end of bootstrap.md)
# -- Vagrant VMs do not use this size --
#BOOTSTRAP_VM_DRIVE_SIZE=120480
# Is this a Hadoop or Kafka cluster?
# (Kafka clusters, being 6 nodes, will require more RAM.)
export CLUSTER_TYPE=${CLUSTER_TYPE:-Hadoop}
# Cluster VM Defaults
export CLUSTER_VM_MEM=${CLUSTER_VM_MEM-2048}
export CLUSTER_VM_CPUs=${CLUSTER_VM_CPUs-1}
export CLUSTER_VM_EFI=${CLUSTER_VM_EFI:-true}
export CLUSTER_VM_DRIVE_SIZE=${CLUSTER_VM_DRIVE_SIZE-20480}
if !hash vagrant 2> /dev/null ; then
echo 'Vagrant not detected - we need Vagrant!' >&2
exit 1
fi
# Gather override_attribute bcpc/cluster_name or an empty string
environments=( ./environments/*.json )
if (( ${#environments[*]} > 1 )); then
echo 'Need one and only one environment in environments/*.json; got: ' \
"${environments[*]}" >&2
exit 1
fi
# The root drive on cluster nodes must allow for a RAM-sized swap volume.
CLUSTER_VM_ROOT_DRIVE_SIZE=$((CLUSTER_VM_DRIVE_SIZE + CLUSTER_VM_MEM - 2048))
VBOX_DIR="`dirname ${BASH_SOURCE[0]}`/vbox"
[[ -d $VBOX_DIR ]] || mkdir $VBOX_DIR
VBOX_DIR_PATH=`python -c "import os.path; print os.path.abspath(\"${VBOX_DIR}/\")"`
# Populate the VM list array from cluster.txt
code_to_produce_vm_list="
require './lib/cluster_data.rb';
include BACH::ClusterData;
cp=ENV.fetch('BACH_CLUSTER_PREFIX', '');
cp += '-' unless cp.empty?;
vms = parse_cluster_txt(File.readlines('cluster.txt'))
puts vms.map{|e| cp + e[:hostname]}.join(' ')
"
export VM_LIST=( $(/usr/bin/env ruby -e "$code_to_produce_vm_list") )
######################################################
# Function to download files necessary for VM stand-up
#
function download_VM_files {
pushd $VBOX_DIR_PATH
# Grab the Ubuntu 14.04 installer image
if [[ ! -f ubuntu-14.04-mini.iso ]]; then
$CURL -o ubuntu-14.04-mini.iso http://archive.ubuntu.com/ubuntu/dists/trusty-updates/main/installer-amd64/current/images/trusty-netboot/mini.iso
fi
# Can we create the bootstrap VM via Vagrant
if [[ ! -f trusty-server-cloudimg-amd64-vagrant-disk1.box ]]; then
$CURL -o trusty-server-cloudimg-amd64-vagrant-disk1.box http://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box
fi
popd
}
################################################################################
# Function to snapshot VirtualBox VMs
# Argument: name of snapshot to take
# Post-Condition: If snapshot did not previously exist for VM: VM snapshot taken
# If snapshot previously exists for that VM: Nothing for that VM
function snapshotVMs {
local snapshot_name="$1"
printf "Snapshotting ${snapshot_name}\n"
for vm in ${VM_LIST[*]} ${BOOTSTRAP_NAME}; do
$VBM snapshot $vm list --machinereadable | grep -q "^SnapshotName=\"${snapshot_name}\"\$" || \
$VBM snapshot $vm take "${snapshot_name}" &
done
wait && printf "Done Snapshotting\n"
}
################################################################################
# Function to enumerate VirtualBox hostonly interfaces
# in use from VM's.
# Argument: name of an associative array defined in calling context
# Post-Condition: Updates associative array provided by name with keys being
# all interfaces in use and values being the number of VMs on
# each network
function discover_VBOX_hostonly_ifs {
# make used_ifs a typeref to the passed-in associative array
local -n used_ifs=$1
for net in $($VBM list hostonlyifs | grep '^Name:' | sed 's/^Name:[ ]*//'); do
used_ifs[$net]=0
done
for vm in $($VBM list vms | sed -e 's/^[^{]*{//' -e 's/}$//'); do
ifs=$($VBM showvminfo --machinereadable $vm | \
egrep '^hostonlyadapter[0-9]*' | \
sed -e 's/^hostonlyadapter[0-9]*="//' -e 's/"$//')
for interface in $ifs; do
used_ifs[$interface]=$((${used_ifs[$interface]} + 1))
done
done
}
################################################################################
# Function to remove VirtualBox DHCP servers
# By default, checks for any DHCP server on networks without VM's & removes them
# (expecting if a remove fails the function should bail)
# If a network is provided, removes that network's DHCP server
# (or passes the vboxmanage error and return code up to the caller)
#
function remove_DHCPservers {
local network_name=${1-}
if [[ -z "$network_name" ]]; then
local vms=$($VBM list vms|sed 's/^.*{\([0-9a-f-]*\)}/\1/')
# will produce a list of networks like ^vboxnet0$|^vboxnet1$ which are in use by VMs
local existing_nets_reg_ex=$(sed -e 's/^/^/' -e '/$/$/' -e 's/ /$|^/g' <<< $(for vm in $vms; do $VBM showvminfo --details --machinereadable $vm | grep -i 'adapter[2-9]=' | sed -e 's/^.*=//' -e 's/"//g'; done | sort -u))
$VBM list dhcpservers | grep -E "^NetworkName:\s+HostInterfaceNetworking" | sed 's/^.*-//' |
while read -r network_name; do
[[ -n $existing_nets_reg_ex ]] && ! egrep -q $existing_nets_reg_ex <<< $network_name && continue
remove_DHCPservers $network_name
done
else
$VBM dhcpserver remove --ifname "$network_name" && local return=0 || local return=$?
return $return
fi
}
###################################################################
# Function to create the bootstrap VM
#
function create_bootstrap_VM {
pushd $VBOX_DIR_PATH
remove_DHCPservers
echo "Vagrant detected - using Vagrant to initialize bcpc-bootstrap VM"
cp ../Vagrantfile .
if [[ -f ../Vagrantfile.local.rb ]]; then
cp ../Vagrantfile.local.rb .
fi
if [[ ! -f insecure_private_key ]]; then
# Ensure that the private key has been created by running vagrant at least once
vagrant status
cp $HOME/.vagrant.d/insecure_private_key .
fi
vagrant up --provision
popd
}
###################################################################
# Function to create the ipxe disk
# Args: Location to use for the ipxe disk
# Post-Condition: The ipxe disk is added as a hdd in virtualbox
#
function create_vbox_ipxe_disk {
cp files/default/ipxe.vdi $1
$VBM modifyhd -type immutable $1
}
###################################################################
# Function to create the BCPC cluster VMs
#
function create_cluster_VMs {
# Gather VirtualBox networks in use by bootstrap VM
oifs="$IFS"
IFS=$'\n'
bootstrap_interfaces=($($VBM showvminfo ${BOOTSTRAP_NAME} \
--machinereadable | \
egrep '^hostonlyadapter[0-9]=' | \
sort | \
sed -e 's/.*=//' -e 's/"//g'))
IFS="$oifs"
VBN0="${bootstrap_interfaces[0]?Need a Virtualbox network 1 for the bootstrap}"
VBN1="${bootstrap_interfaces[1]?Need a Virtualbox network 2 for the bootstrap}"
VBN2="${bootstrap_interfaces[2]?Need a Virtualbox network 3 for the bootstrap}"
if [[ $CLUSTER_VM_EFI == true ]]; then
#
# Add the ipxe USB key to the vbox storage registry as an immutable
# disk, so we can share it between several VMs.
#
current_ipxe=$(vboxmanage list hdds | egrep '^Location:.*ipxe.vdi$')
# we have an ipxe disk added
if [[ -n "$current_ipxe" ]]; then
ipxe_location=$(echo "$current_ipxe" | sed 's/^Location:[ ]*//')
# ensure the location is available -- if not blow it away and recreate
if $VBM showmediuminfo "$ipxe_location" | egrep -q '^State:.*inaccessible'; then
$VBM closemedium disk "$ipxe_location"
# update if we changed ipxe_location to the local workspace
ipxe_location="$VBOX_DIR_PATH/ipxe.vdi"
create_vbox_ipxe_disk "$ipxe_location"
fi
else
ipxe_location="$VBOX_DIR_PATH/ipxe.vdi"
create_vbox_ipxe_disk "$ipxe_location"
fi
# provide the IPXE disk location so we know if it is from
# another cluster
echo "NOTE: Using IPXE volume at: $ipxe_location"
fi
# Create each VM
for vm in ${VM_LIST[*]}; do
# Only if VM doesn't exist
if ! $VBM list vms | grep "^\"${vm}\"" ; then
$VBM createvm --name $vm --ostype Ubuntu_64 \
--basefolder $VBOX_DIR_PATH --register
$VBM modifyvm $vm --memory $CLUSTER_VM_MEM
$VBM modifyvm $vm --cpus $CLUSTER_VM_CPUs
if [[ $CLUSTER_VM_EFI == true ]]; then
# Force UEFI firmware.
$VBM modifyvm $vm --firmware efi
fi
# Add the network interfaces
$VBM modifyvm $vm --nic1 hostonly --hostonlyadapter1 "$VBN0"
$VBM modifyvm $vm --nic2 hostonly --hostonlyadapter2 "$VBN1"
$VBM modifyvm $vm --nic3 hostonly --hostonlyadapter3 "$VBN2"
# Create a disk controller to hang disks off of.
DISK_CONTROLLER="SATA_Controller"
$VBM storagectl $vm --name $DISK_CONTROLLER --add sata
#
# Create the root disk, /dev/sda.
#
# (/dev/sda is hardcoded into the preseed file.)
#
port=0
DISK_PATH=$VBOX_DIR_PATH/$vm/$vm-a.vdi
$VBM createhd --filename $DISK_PATH \
--size $CLUSTER_VM_ROOT_DRIVE_SIZE
$VBM storageattach $vm --storagectl $DISK_CONTROLLER \
--device 0 --port $port --type hdd --medium $DISK_PATH
port=$((port+1))
if [[ $CLUSTER_VM_EFI == true ]]; then
# Attach the iPXE boot medium as /dev/sdb.
$VBM storageattach $vm --storagectl $DISK_CONTROLLER \
--device 0 --port $port --type hdd --medium $ipxe_location
port=$((port+1))
else
# If we're not using EFI, force the BIOS to boot net.
$VBM modifyvm $vm --boot1 net
fi
#
# Create our data disks
#
# For these to be used properly, we will need to override
# the attribute default[:bcpc][:hadoop][:disks] in a role or
# environment.
#
for disk in c d e f; do
DISK_PATH=$VBOX_DIR_PATH/$vm/$vm-$disk.vdi
$VBM createhd --filename $DISK_PATH \
--size $CLUSTER_VM_DRIVE_SIZE
$VBM storageattach $vm --storagectl $DISK_CONTROLLER \
--device 0 --port $port --type hdd --medium $DISK_PATH
port=$((port+1))
done
# Set hardware acceleration options
$VBM modifyvm $vm \
--largepages on \
--vtxvpid on \
--hwvirtex on \
--nestedpaging on \
--ioapic on
# Add serial ports
$VBM modifyvm $vm --uart1 0x3F8 4
$VBM modifyvm $vm --uartmode1 server /tmp/serial-${vm}-ttyS0
fi
done
# update cluster.txt to match VirtualBox MAC's
./vm-to-cluster.sh
}
###################################################################
# Function to setup the bootstrap VM
# Assumes cluster VMs are created
#
function install_cluster {
environment=${1-Test-Laptop}
ip=${2-10.0.100.3}
# N.B. As of Aug 2013, grub-pc gets confused and wants to prompt re: 3-way
# merge. Sigh.
#vagrant ssh -c "sudo ucf -p /etc/default/grub"
#vagrant ssh -c "sudo ucfr -p grub-pc /etc/default/grub"
vagrant ssh -c "test -f /etc/default/grub.ucf-dist && sudo mv /etc/default/grub.ucf-dist /etc/default/grub" || true
# Duplicate what d-i's apt-setup generators/50mirror does when set in preseed
if [ -n "$http_proxy" ]; then
proxy_found=true
vagrant ssh -c "grep Acquire::http::Proxy /etc/apt/apt.conf" || proxy_found=false
if [ $proxy_found == "false" ]; then
vagrant ssh -c "echo 'Acquire::http::Proxy \"$http_proxy\";' | sudo tee -a /etc/apt/apt.conf"
fi
fi
echo "Bootstrap complete - setting up Chef server"
echo "N.B. This may take approximately 30-45 minutes to complete."
vagrant ssh -c 'sudo rm -f /var/chef/cache/chef-stacktrace.out'
./bootstrap_chef.sh --vagrant-remote $ip $environment
if vagrant ssh -c 'sudo grep -i no_lazy_load /var/chef/cache/chef-stacktrace.out'; then
vagrant ssh -c 'sudo rm /var/chef/cache/chef-stacktrace.out'
elif vagrant ssh -c 'test -e /var/chef/cache/chef-stacktrace.out' || \
! vagrant ssh -c 'test -d /etc/chef-server'; then
echo '========= Failed to Chef!' >&2
exit 1
fi
vagrant ssh -c 'cd chef-bcpc; ./cluster-enroll-cobbler.sh remove' || true
vagrant ssh -c 'cd chef-bcpc; ./cluster-enroll-cobbler.sh add'
}
# only execute functions if being run and not sourced
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
download_VM_files
create_bootstrap_VM
create_cluster_VMs
install_cluster
fi
|
NinjaLabib/chef-bach
|
vbox_create.sh
|
Shell
|
apache-2.0
| 12,856 |
#!/bin/bash
container_domain=
container_hostname=
container_image=rstiller/wordpress
container_command=/start.sh
container_name=$container_hostname
container_port=
|
rstiller/docker-wordpress
|
modules/server/files/vars.sh
|
Shell
|
apache-2.0
| 165 |
su
mount -o rw,remount /system
rm /system/media/bootanimation.zip
rm -r /system/media/audio -f
rm -r /system/customizecenter -f
cd system
unzip -o /system/storage/deepmod/deepazure.zip
chmod -R 755 /system
find /system -type f -exec chmod 644 {} \;
|
Zavgorodnij/01030140120931204214
|
1.0.0.2/alternative1.3.sh
|
Shell
|
apache-2.0
| 258 |
#!/bin/sh
# Builds and runs tests for a particular target passed as an argument to this
# script.
set -ex
TARGET=$1
case "$TARGET" in
*-apple-ios)
cargo rustc --manifest-path libc-test/Cargo.toml --target $TARGET -- \
-C link-args=-mios-simulator-version-min=7.0
;;
*)
cargo build --manifest-path libc-test/Cargo.toml --target $TARGET
;;
esac
case "$TARGET" in
arm-linux-androideabi)
emulator @arm-18 -no-window &
adb wait-for-device
adb push /tmp/$TARGET/debug/libc-test /data/libc-test
adb shell /data/libc-test 2>&1 | tee /tmp/out
grep "^PASSED .* tests" /tmp/out
;;
arm-unknown-linux-gnueabihf)
qemu-arm -L /usr/arm-linux-gnueabihf libc-test/target/$TARGET/debug/libc-test
;;
mips-unknown-linux-gnu)
qemu-mips -L /usr/mips-linux-gnu libc-test/target/$TARGET/debug/libc-test
;;
aarch64-unknown-linux-gnu)
qemu-aarch64 -L /usr/aarch64-linux-gnu/ \
libc-test/target/$TARGET/debug/libc-test
;;
*-rumprun-netbsd)
rumprun-bake hw_virtio /tmp/libc-test.img /tmp/$TARGET/debug/libc-test
qemu-system-x86_64 -nographic -vga none -m 64 \
-kernel /tmp/libc-test.img 2>&1 | tee /tmp/out &
sleep 5
grep "^PASSED .* tests" /tmp/out
;;
*-apple-ios)
libc-test/target/$TARGET/debug/libc-test
;;
*)
cargo run --manifest-path libc-test/Cargo.toml --target $TARGET
;;
esac
|
DiamondLovesYou/libc
|
ci/run.sh
|
Shell
|
apache-2.0
| 1,407 |
# Copyright 2020 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# Provide the number of times you want allocation test to be run
testRunsCount=3
if [ -z "$1" ]
then
echo "No test run count provided, using default which is 3"
else
testRunsCount=$1
if ! [[ $testRunsCount =~ ^[0-9]+$ ]] ; then
echo "error: Not a positive number provided" >&2; exit 1
fi
fi
counter=1
while [ $counter -le $testRunsCount ]
do
echo "Run number: " $counter
go run allocationload.go 2>>./allocation_test_results.txt
sleep 500
((counter++))
done
|
googleforgames/agones
|
test/load/allocation/runAllocation.sh
|
Shell
|
apache-2.0
| 1,135 |
#!/bin/bash
#
# Author: Ambud Sharma
#
#
source /etc/sidewinder/sidewinder-env.sh
$JAVA_HOME/bin/java $JAVA_OPTS -cp $SIDEWINDER_HOME/lib/*.jar com.srotya.sidewinder.core.SidewinderServer server $SIDEWINDER_CONF/config.yaml
|
srotya/sidewinder
|
dist/standalone/src/main/resources/install/sidewinder.sh
|
Shell
|
apache-2.0
| 226 |
#!/bin/bash -ex
# Set current ip to nats server
NISE_IP_ADDRESS=${NISE_IP_ADDRESS:-`ip addr | grep 'inet .*global' | cut -f 6 -d ' ' | cut -f1 -d '/' | head -n 1`}
NISE_IP_ADDRESS=${NISE_IP_ADDRESS} ./common/launch_nsie_bosh.sh
(
cd nise_bosh
bundle install
# Old spec format
sudo env PATH=$PATH bundle exec ./bin/nise-bosh -y ../cf-services-contrib-release ../manifests/deploy.yml micro
)
|
yudai/cf_nise_installer_services
|
local/launch_nise_bosh.sh
|
Shell
|
apache-2.0
| 409 |
#!/usr/bin/env bash
if ([ "$TRAVIS_BRANCH" = 'master' ] && [ "$TRAVIS_PULL_REQUEST" == 'false' ]) || [ "$TRAVIS_TAG" != '' ] ; then
openssl aes-256-cbc -K $encrypted_b4f84f3a0738_key -iv $encrypted_b4f84f3a0738_iv -in codesigning.asc.enc -out codesigning.asc -d
gpg --fast-import codesigning.asc
fi
|
mgargadennec/blossom
|
cd/before-deploy.sh
|
Shell
|
apache-2.0
| 308 |
#!/bin/bash
# REPLACE WITH YOUR CHIME2 PATH HERE
chime2_path='/data1/swisdom/chime2/chime2-wsj0'
# write taskfiles for training data
find ${chime2_path}/isolated/si_tr_s -name '*.wav' -type f | sort -u > taskfile_chime2_train_noisy.txt
find ${chime2_path}/scaled/si_tr_s -name '*.wav' -type f | sort -u > taskfile_chime2_train_clean.txt
# write taskfiles for development (validation) data
find ${chime2_path}/isolated/si_dt_05 -name '*.wav' -type f | sort -u > taskfile_chime2_valid_noisy.txt
find ${chime2_path}/scaled/si_dt_05 -name '*.wav' -type f | sort -u > taskfile_chime2_valid_clean.txt
# write taskfiles for evaluation data
find ${chime2_path}/isolated/si_et_05 -name '*.wav' -type f | sort -u > taskfile_chime2_test_noisy.txt
find ${chime2_path}/scaled/si_et_05 -name '*.wav' -type f | sort -u > taskfile_chime2_test_clean.txt
|
stwisdom/dr-nmf
|
create_taskfiles.sh
|
Shell
|
apache-2.0
| 841 |
#!/bin/bash
if [ -z "$DOCKER_BUILD_OPTS" ] ; then
docker build -t udiabon/centos-dev-env .
else
docker build $DOCKER_BUILD_OPTS -t udiabon/centos-dev-env .
fi
|
udiabon/centos-nginx-php-fpm
|
build.sh
|
Shell
|
apache-2.0
| 167 |
#!/bin/bash -e
# Starting OpenBSD Secure Shell server: sshd
service ssh start
# Format the filesystem
hdfs namenode -format
# Start NameNode daemon and DataNode daemon
start-dfs.sh
# Make the HDFS directories required to execute MapReduce jobs
hdfs dfs -mkdir /user \
&& hdfs dfs -mkdir /user/`whoami`
# Start ResourceManager daemon and NodeManager daemon
start-yarn.sh
|
ljishen/SRL
|
docker/hadoop/master/entrypoint.sh
|
Shell
|
apache-2.0
| 381 |
#!/bin/sh
#
# See the paper "Recursive Make Considered Harmful"
# by Peter Miller (http://aegis.sourceforge.net/auug97.pdf)
#
DIR="$1"
shift 1
case "$DIR" in
"" | ".")
gcc -MM -MG "$@" | sed -e "s!^\(.*\)\.o:!\1.s \1.d \1.o:!"
;;
*)
gcc -MM -MG "$@" | sed -e "s!^\(.*\)\.o:!$DIR/\1.s $DIR/\1.d $DIR/\1.o:!"
;;
esac
|
libamqp/libamqp
|
src/depend.sh
|
Shell
|
apache-2.0
| 328 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
STAGE_COMPILE="compile"
STAGE_CORE="core"
STAGE_PYTHON="python"
STAGE_LIBRARIES="libraries"
STAGE_BLINK_PLANNER="blink_planner"
STAGE_CONNECTORS="connectors"
STAGE_KAFKA_GELLY="kafka/gelly"
STAGE_TESTS="tests"
STAGE_MISC="misc"
STAGE_CLEANUP="cleanup"
STAGE_LEGACY_SLOT_MANAGEMENT="legacy_slot_management"
MODULES_CORE="\
flink-annotations,\
flink-test-utils-parent/flink-test-utils,\
flink-state-backends/flink-statebackend-rocksdb,\
flink-clients,\
flink-core,\
flink-java,\
flink-optimizer,\
flink-runtime,\
flink-runtime-web,\
flink-scala,\
flink-streaming-java,\
flink-streaming-scala,\
flink-metrics,\
flink-metrics/flink-metrics-core,\
flink-external-resources,\
flink-external-resources/flink-external-resource-gpu"
MODULES_LIBRARIES="\
flink-libraries/flink-cep,\
flink-libraries/flink-cep-scala,\
flink-libraries/flink-state-processing-api,\
flink-table/flink-table-common,\
flink-table/flink-table-api-java,\
flink-table/flink-table-api-scala,\
flink-table/flink-table-api-java-bridge,\
flink-table/flink-table-api-scala-bridge,\
flink-table/flink-table-planner,\
flink-table/flink-sql-client"
MODULES_BLINK_PLANNER="\
flink-table/flink-table-planner-blink,\
flink-table/flink-table-runtime-blink"
MODULES_CONNECTORS="\
flink-contrib/flink-connector-wikiedits,\
flink-filesystems,\
flink-filesystems/flink-fs-hadoop-shaded,\
flink-filesystems/flink-hadoop-fs,\
flink-filesystems/flink-mapr-fs,\
flink-filesystems/flink-oss-fs-hadoop,\
flink-filesystems/flink-s3-fs-base,\
flink-filesystems/flink-s3-fs-hadoop,\
flink-filesystems/flink-s3-fs-presto,\
flink-filesystems/flink-swift-fs-hadoop,\
flink-fs-tests,\
flink-formats,\
flink-formats/flink-avro-confluent-registry,\
flink-formats/flink-avro,\
flink-formats/flink-parquet,\
flink-formats/flink-sequence-file,\
flink-formats/flink-json,\
flink-formats/flink-csv,\
flink-formats/flink-orc,\
flink-formats/flink-orc-nohive,\
flink-connectors/flink-connector-hbase-base,\
flink-connectors/flink-connector-hbase-1.4,\
flink-connectors/flink-connector-hbase-2.2,\
flink-connectors/flink-hcatalog,\
flink-connectors/flink-hadoop-compatibility,\
flink-connectors,\
flink-connectors/flink-connector-jdbc,\
flink-connectors/flink-connector-cassandra,\
flink-connectors/flink-connector-elasticsearch5,\
flink-connectors/flink-connector-elasticsearch6,\
flink-connectors/flink-connector-elasticsearch7,\
flink-connectors/flink-sql-connector-elasticsearch6,\
flink-connectors/flink-sql-connector-elasticsearch7,\
flink-connectors/flink-connector-elasticsearch-base,\
flink-connectors/flink-connector-nifi,\
flink-connectors/flink-connector-rabbitmq,\
flink-connectors/flink-connector-twitter,\
flink-connectors/flink-connector-kinesis,\
flink-metrics/flink-metrics-dropwizard,\
flink-metrics/flink-metrics-graphite,\
flink-metrics/flink-metrics-jmx,\
flink-metrics/flink-metrics-influxdb,\
flink-metrics/flink-metrics-prometheus,\
flink-metrics/flink-metrics-statsd,\
flink-metrics/flink-metrics-datadog,\
flink-metrics/flink-metrics-slf4j,\
flink-queryable-state/flink-queryable-state-runtime,\
flink-queryable-state/flink-queryable-state-client-java"
MODULES_KAFKA_GELLY="\
flink-libraries/flink-gelly,\
flink-libraries/flink-gelly-scala,\
flink-libraries/flink-gelly-examples,\
flink-connectors/flink-connector-kafka,\
flink-connectors/flink-sql-connector-kafka,"
MODULES_TESTS="\
flink-tests"
MODULES_LEGACY_SLOT_MANAGEMENT=${MODULES_CORE},${MODULES_TESTS}
# we can only build the Scala Shell when building for Scala 2.11
if [[ $PROFILE == *"scala-2.11"* ]]; then
MODULES_CORE="$MODULES_CORE,flink-scala-shell"
fi
function get_compile_modules_for_stage() {
local stage=$1
case ${stage} in
(${STAGE_CORE})
echo "-pl $MODULES_CORE -am"
;;
(${STAGE_LIBRARIES})
echo "-pl $MODULES_LIBRARIES -am"
;;
(${STAGE_BLINK_PLANNER})
echo "-pl $MODULES_BLINK_PLANNER -am"
;;
(${STAGE_CONNECTORS})
echo "-pl $MODULES_CONNECTORS -am"
;;
(${STAGE_KAFKA_GELLY})
echo "-pl $MODULES_KAFKA_GELLY -am"
;;
(${STAGE_TESTS})
echo "-pl $MODULES_TESTS -am"
;;
(${STAGE_MISC})
# compile everything; using the -am switch does not work with negated module lists!
# the negation takes precedence, thus not all required modules would be built
echo ""
;;
(${STAGE_PYTHON})
# compile everything for PyFlink.
echo ""
;;
(${STAGE_LEGACY_SLOT_MANAGEMENT})
echo "-pl $MODULES_LEGACY_SLOT_MANAGEMENT -am"
;;
esac
}
function get_test_modules_for_stage() {
local stage=$1
local modules_core=$MODULES_CORE
local modules_libraries=$MODULES_LIBRARIES
local modules_blink_planner=$MODULES_BLINK_PLANNER
local modules_connectors=$MODULES_CONNECTORS
local modules_tests=$MODULES_TESTS
local negated_core=\!${MODULES_CORE//,/,\!}
local negated_libraries=\!${MODULES_LIBRARIES//,/,\!}
local negated_blink_planner=\!${MODULES_BLINK_PLANNER//,/,\!}
local negated_kafka_gelly=\!${MODULES_KAFKA_GELLY//,/,\!}
local negated_connectors=\!${MODULES_CONNECTORS//,/,\!}
local negated_tests=\!${MODULES_TESTS//,/,\!}
local modules_misc="$negated_core,$negated_libraries,$negated_blink_planner,$negated_connectors,$negated_kafka_gelly,$negated_tests"
local modules_legacy_slot_management=$MODULES_LEGACY_SLOT_MANAGEMENT
case ${stage} in
(${STAGE_CORE})
echo "-pl $modules_core"
;;
(${STAGE_LIBRARIES})
echo "-pl $modules_libraries"
;;
(${STAGE_BLINK_PLANNER})
echo "-pl $modules_blink_planner"
;;
(${STAGE_CONNECTORS})
echo "-pl $modules_connectors"
;;
(${STAGE_KAFKA_GELLY})
echo "-pl $MODULES_KAFKA_GELLY"
;;
(${STAGE_TESTS})
echo "-pl $modules_tests"
;;
(${STAGE_MISC})
echo "-pl $modules_misc"
;;
(${STAGE_LEGACY_SLOT_MANAGEMENT})
echo "-pl $modules_legacy_slot_management"
::
esac
}
|
aljoscha/flink
|
tools/ci/stage.sh
|
Shell
|
apache-2.0
| 7,168 |
#!/bin/bash -
#===============================================================================
#
# FILE: vi2html.sh
#
# USAGE: ./vi2html.sh file
#
# DESCRIPTION: turns a file into a html syntax-colored file.
#
# OPTIONS: file - the file that would appear colored in vim.
# REQUIREMENTS: vim with syntax coloring
# BUGS: ---
# NOTES: ---
# AUTHOR: Cláudio Sampaio (Patola), [email protected]
# ORGANIZATION: IBM
# CREATED: 07-08-2012 18:51:05 BRT
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
file="$1";vim ${file} -e -s -c 'set background=light' -c 'runtime! syntax/syntax.vim' -c 'runtime! syntax/2html.vim' -c "w ${file}.html" -c 'q!' -c 'q!' > /dev/null
|
Patola/patolascripts
|
vi2html.sh
|
Shell
|
apache-2.0
| 861 |
#!/bin/bash
function print_help {
echo "Usage:
--all Display info for all wallets
--key {key} Display info for wallet with key: {key}"
}
if [[ $1 == "--all" ]];
then
curl http://stg.parzam.com:8000/api/services/cryptocurrency/v1/wallets
elif [[ $1 == "--key" ]];
then
curl http://stg.parzam.com:8000/api/services/cryptocurrency/v1/wallet/$2
else
print_help
exit
fi
|
sadonsergey/exonum-test
|
examples/wallets_info2.sh
|
Shell
|
apache-2.0
| 417 |
#!/bin/bash
if [[ "${REPORT_DATE}" == "" || "${REPORT_DATE}" == "LATEST" ]]
then
REPORT_DATE=$(date +%Y_%m_01)
CRUX_REPORT_DATE=$(date -d "-1 month" +%Y_%m_01)
else
CRUX_REPORT_DATE="${REPORT_DATE}"
fi
FAIL=0
NUM_TESTS=0
FAIL_LOG=""
TITLE=""
# These dated report URLs are tested for 200 status
# We test the first and last report for each lens
REPORT_MONTHLY_URLS=$(cat <<-END
https://cdn.httparchive.org/reports/${REPORT_DATE}/bootupJs.json
https://cdn.httparchive.org/reports/${REPORT_DATE}/vulnJs.json
https://cdn.httparchive.org/reports/drupal/${REPORT_DATE}/bootupJs.json
https://cdn.httparchive.org/reports/drupal/${REPORT_DATE}/vulnJs.json
https://cdn.httparchive.org/reports/magento/${REPORT_DATE}/bootupJs.json
https://cdn.httparchive.org/reports/magento/${REPORT_DATE}/vulnJs.json
https://cdn.httparchive.org/reports/wordpress/${REPORT_DATE}/bootupJs.json
https://cdn.httparchive.org/reports/wordpress/${REPORT_DATE}/vulnJs.json
https://cdn.httparchive.org/reports/${CRUX_REPORT_DATE}/cruxCls.json
https://cdn.httparchive.org/reports/${CRUX_REPORT_DATE}/cruxOl.json
END
)
# These timeseries URLs are tested if the date exists in the returned body
# We test the first and last report for each lens
TIMESERIES_URLS=$(cat <<-END
https://cdn.httparchive.org/reports/numUrls.json
https://cdn.httparchive.org/reports/a11yButtonName.json
https://cdn.httparchive.org/reports/drupal/numUrls.json
https://cdn.httparchive.org/reports/drupal/a11yButtonName.json
https://cdn.httparchive.org/reports/magento/numUrls.json
https://cdn.httparchive.org/reports/magento/a11yButtonName.json
https://cdn.httparchive.org/reports/wordpress/numUrls.json
https://cdn.httparchive.org/reports/wordpress/a11yButtonName.json
END
)
# These timeseries URLs are tested if the date exists in the returned body
# For CrUX we always test the month before (unless an explicit date was passed)
# We test the first and last report
CRUX_TIMESERIES_URLS=$(cat <<-END
https://cdn.httparchive.org/reports/cruxFastDcl.json
https://cdn.httparchive.org/reports/cruxSmallCls.json
END
)
echo "Starting testing"
for TEST_URL in ${REPORT_MONTHLY_URLS}
do
NUM_TESTS=$((NUM_TESTS+1))
STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" "${TEST_URL}")
if [[ "${STATUS_CODE}" == "200" ]]
then
echo "200 Status code found for ${TEST_URL}"
else
echo "Incorrect Status code ${STATUS_CODE} found for ${TEST_URL}"
FAIL_LOG="${FAIL_LOG}Incorrect Status code ${STATUS_CODE} found for ${TEST_URL}\n"
FAIL=$((FAIL+1))
fi
done
for TEST_URL in ${TIMESERIES_URLS}
do
NUM_TESTS=$((NUM_TESTS+1))
if curl -s "${TEST_URL}" | grep -q "${REPORT_DATE}"
then
echo "${REPORT_DATE} found in body for ${TEST_URL}"
else
echo "${REPORT_DATE} not found in body for ${TEST_URL}"
FAIL_LOG="${FAIL_LOG}${REPORT_DATE} not found in body for ${TEST_URL}\n"
FAIL=$((FAIL+1))
fi
done
for TEST_URL in ${CRUX_TIMESERIES_URLS}
do
NUM_TESTS=$((NUM_TESTS+1))
if curl -s "${TEST_URL}" | grep -q "${CRUX_REPORT_DATE}"
then
echo "${CRUX_REPORT_DATE} found in body for ${TEST_URL}"
else
echo "${CRUX_REPORT_DATE} not found in body for ${TEST_URL}"
FAIL_LOG="${FAIL_LOG}${CRUX_REPORT_DATE} not found in body for ${TEST_URL}\n"
FAIL=$((FAIL+1))
fi
done
FAIL_LOG="${FAIL_LOG}\nSee latest log in [GitHub Actions](https://github.com/HTTPArchive/httparchive.org/actions/workflows/monthly-report-checks.yml)
"
if [[ ${FAIL} -ne 0 && ${FAIL} -eq ${NUM_TESTS} ]]
then
TITLE="All reports have failed for ${REPORT_DATE}"
elif [[ ${FAIL} -ne 0 && ${FAIL} -lt ${NUM_TESTS} ]]
then
TITLE="Some reports have failed for ${REPORT_DATE}"
fi
# Export the number of fails to GitHub env
if [[ "$GITHUB_ENV" ]]
then
# shellcheck disable=SC2129
echo "REPORT_TITLE=${TITLE}" >> "$GITHUB_ENV"
echo "REPORT_FAILS=${FAIL}" >> "$GITHUB_ENV"
echo "REPORT_FAIL_LOG<<EOF" >> "$GITHUB_ENV"
echo -e "${FAIL_LOG}" >> "$GITHUB_ENV"
echo "EOF" >> "$GITHUB_ENV"
fi
if [[ ${FAIL} -gt 0 ]]
then
echo "${FAIL} test(s) failed. Exiting 1"
exit 1
fi
echo "All tests passed"
exit 0
|
HTTPArchive/beta.httparchive.org
|
tools/scripts/test_reports.sh
|
Shell
|
apache-2.0
| 4,180 |
#/**
#* Copyright IBM Corporation 2016
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
#**/
INPUT_FILE=$1
OUTPUT_FILE=$2
echo "--- Generating ${OUTPUT_FILE}"
cat <<'EOF' > ${OUTPUT_FILE}
/**
* Copyright IBM Corporation 2016
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
// MARK Router
extension Router {
EOF
for VERB in `sed '/^$/d' ${INPUT_FILE} | sed '/^#/d'`; do
VERB_LOW_CASE=`echo $VERB | cut -c1 | tr '[:upper:]' '[:lower:]'``echo $VERB | cut -c2-`
VERB_UPPER_CASE=`echo $VERB | tr '[:lower:]' '[:upper:]'`
if [ "${VERB_UPPER_CASE}" == "ALL" ]; then
DOC_TEXT_1="any"
DOC_TEXT_2=""
else
DOC_TEXT_1="HTTP $VERB_UPPER_CASE"
DOC_TEXT_2="s"
fi
cat <<EOF >> ${OUTPUT_FILE}
// MARK: $VERB
/// Setup a set of one or more closures of the type \`RouterHandler\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server. If a path pattern is
/// specified, the handlers will only be invoked if the pattern is matched.
///
/// - Parameter path: An optional String specifying the pattern that needs to be
/// matched, in order for the handlers to be invoked.
/// - Parameter handler: A comma delimited set of \`RouterHandler\`s that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server.
@discardableResult
public func $VERB_LOW_CASE(_ path: String?=nil, handler: RouterHandler...) -> Router {
return routingHelper(.$VERB_LOW_CASE, pattern: path, handler: handler)
}
/// Setup an array of one or more closures of the type \`RouterHandler\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server. If a path pattern is
/// specified, the handlers will only be invoked if the pattern is matched.
///
/// - Parameter path: An optional String specifying the pattern that needs to be
/// matched, in order for the handlers to be invoked.
/// - Parameter handler: The array of \`RouterHandler\`s that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server.
@discardableResult
public func $VERB_LOW_CASE(_ path: String?=nil, handler: [RouterHandler]) -> Router {
return routingHelper(.$VERB_LOW_CASE, pattern: path, handler: handler)
}
/// Setup a set of one or more \`RouterMiddleware\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server. If a path pattern is
/// specified, the \`RouterMiddleware\` will only be invoked if the pattern is matched.
///
/// - Parameter path: An optional String specifying the pattern that needs to be
/// matched, in order for the \`RouterMiddleware\` to be invoked.
/// - Parameter allowPartialMatch: A Bool that indicates whether or not a partial match of
/// the path by the pattern is sufficient.
/// - Parameter handler: A comma delimited set of \`RouterMiddleware\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server.
@discardableResult
public func $VERB_LOW_CASE(_ path: String?=nil, allowPartialMatch: Bool = true, middleware: RouterMiddleware...) -> Router {
return routingHelper(.$VERB_LOW_CASE, pattern: path, allowPartialMatch: allowPartialMatch, middleware: middleware)
}
/// Setup an array of one or more \`RouterMiddleware\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server. If a path pattern is
/// specified, the \`RouterMiddleware\` will only be invoked if the pattern is matched.
///
/// - Parameter path: An optional String specifying the pattern that needs to be
/// matched, in order for the \`RouterMiddleware\` to be invoked.
/// - Parameter allowPartialMatch: A Bool that indicates whether or not a partial match of
/// the path by the pattern is sufficient.
/// - Parameter handler: The array of \`RouterMiddleware\` that will be
/// invoked when $DOC_TEXT_1 request$DOC_TEXT_2 comes to the server.
@discardableResult
public func $VERB_LOW_CASE(_ path: String?=nil, allowPartialMatch: Bool = true, middleware: [RouterMiddleware]) -> Router {
return routingHelper(.$VERB_LOW_CASE, pattern: path, allowPartialMatch: allowPartialMatch, middleware: middleware)
}
EOF
done
echo "}" >> ${OUTPUT_FILE}
|
IBM-Swift/Kitura
|
Scripts/generate_router_verbs.sh
|
Shell
|
apache-2.0
| 5,499 |
#!/bin/bash
set -e
source /bd_build/buildconfig
set -x
apt-get update
# Install common scripts
# cp -rT /bd_build/stations/scripts/ /usr/local/bin
# cp -rT /bd_build/stations/startup_scripts/. /etc/my_init.d/
cp -rT /bd_build/stations/service.minimal/. /etc/service.minimal/
# cp -rT /bd_build/stations/service.full/. /etc/service.full/
# Run service setup for all setup scripts
for f in /bd_build/stations/setup/*.sh; do
bash "$f" -H
done
|
AzuraCast/AzuraCast
|
util/docker/stations/setup.sh
|
Shell
|
apache-2.0
| 449 |
PREPS=(
'echo "ERROR: CDIST2700E The operator requires an attribute called analysisResult on the output port"'
'copyOnly'
)
STEPS=(
"splCompileInterceptAndError"
'linewisePatternMatchInterceptAndSuccess "$TT_evaluationFile" "" "*CDIST2700E*"'
)
|
IBMStreams/streamsx.sparkMLLib
|
tests/frameworktests/tests/StreamsxSparkmllib/TranslationCompile/SparkCollaborativeFilteringALS/TestCase.sh
|
Shell
|
apache-2.0
| 252 |
#! /bin/bash
echo --- start apache spark
/Library/spark-1.3.0-SNAPSHOT-hadoop2.2.0/sbin/start-all.sh
|
faustineinsun/SparkPlayground
|
Scala/scripts/startApacheSpark.sh
|
Shell
|
apache-2.0
| 103 |
#!/usr/bin/env bash
ID=$(basename "$0" | sed "s/.sh$//g")
ABS_PATH=$(readlink -f $0)
cd $(dirname $(dirname $(dirname ${ABS_PATH})))
MYDIR=logs/${ID}
mkdir -p ${MYDIR}
cp ${ABS_PATH} ${MYDIR}
CUDA_VISIBLE_DEVICES=3 \
python -u main.py \
--experiment_id ${ID} \
--data_name scan \
--train_file SCAN/add_prim_split/tasks_train_addprim_turn_left.txt \
--test_file SCAN/add_prim_split/tasks_test_addprim_turn_left.txt \
--model_name rand_reg \
--random_seed 11 \
--batch_size 4096 \
--switch_temperature 0.1 \
--attention_temperature 1 \
--num_units 64 \
--epochs 5000 \
--learning_rate 0.01 \
--max_gradient_norm 1.0 \
--use_input_length \
--use_embedding \
--embedding_size 8 \
--function_embedding_size 8 \
--bidirectional_encoder \
--random_batch \
--decay_steps 100 \
--remove_switch \
--function_noise \
--content_noise_coe 0.1 \
--noise_weight 1 \
--sample_wise_content_noise \
--masked_attention \
--random_random \
| tee ${MYDIR}/log.txt
python attention_visualization.py \
--hide_switch \
--experiment_id ${ID}
|
yli1/CGPS
|
experiments/turn_ablation_prim/turn_ablation_prim_D.sh
|
Shell
|
apache-2.0
| 1,020 |
#!/bin/bash
/Volumes/Data/Code/ARK/PARKive/pri/PARK-make/Intro-To-HARK/Intro-To-HARK-make.sh /Volumes/Data/Code/ARK/PARKive/pri/PARK-make/Intro-To-HARK/Latest Intro-To-HARK /Volumes/Data/Code/ARK/PARKive/pri/PARK/Intro-To-HARK-Custom
|
econ-ark/PARK
|
source/Intro-To-HARK/Intro-To-HARK-make-Custom.sh
|
Shell
|
apache-2.0
| 237 |
#!/bin/bash
# strict mode http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
# ensure a modern version of sort is installed
# get the version of sort, find the line that has the version number
# get the last word with awk, which is the version number
# if that version number is greater than 7, we're good,
# else, we need to install coreutils
if [ $(echo "$(sort --version | grep '(GNU' | awk 'NF>1{print $NF}') > 7" | bc) != 1 ]; then
echo "Installing coreutils to give you a modern shell experience"
brew install coreutils
fi
function lint(){
eslint --no-eslintrc --config .eslintrc ${@-.} --ext .jsx --ext .js --ext .es6
}
function git_require_clean_work_tree(){
git diff --exit-code
}
function find_changelog_file(){
# find the changelog file
local CHANGELOG=""
if test "$CHANGELOG" = ""; then
CHANGELOG="$(ls | egrep '^(change|history)' -i | head -n1)"
if test "$CHANGELOG" = ""; then
CHANGELOG="CHANGELOG.md";
fi
fi
echo $CHANGELOG
}
function find_last_git_tag(){
PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH" && git tag -l | sort -V | tail -n 1
}
# based on https://github.com/tj/git-extras/blob/master/bin/git-changelog
function generate_git_changelog(){
GIT_LOG_OPTS="--no-merges"
local DATE=$(date +'%Y-%m-%d')
local HEAD='## '
# get the commits between the most recent tag and the second most recent
local lasttag=$(find_last_git_tag)
local version=$(git describe --tags --abbrev=0 "$lasttag" 2>/dev/null)
local previous_version=$(git describe --tags --abbrev=0 "$lasttag^" 2>/dev/null)
# if we don't have a previous version to look at
if test -z "$version"; then
local head="$HEAD$DATE"
local changes=$(git log $GIT_LOG_OPTS --pretty="format:* %s%n" 2>/dev/null)
# the more common case, there's a version to git the changes betwen
else
local head="$HEAD$version | $DATE"
# tail to get remove the first line, which will always just be the version commit
# awk to remove empty lines
local changes=$(tail -n +2 <<< "$(git log $GIT_LOG_OPTS --pretty="format:* %s%n" "$previous_version..$version" 2>/dev/null)" | awk NF)
fi
local CHANGELOG=$(find_changelog_file)
echo "Editing $CHANGELOG"
# insert the changes after the header (assumes markdown)
# this shells out to node b/c I couldn't figure out how to do it with awk
local tmp_changelog=/tmp/changelog
node -e "console.log(require('fs').readFileSync(process.argv[1]).toString().replace(/(#.*?\n\n)/, '\$1' + process.argv.slice(2).join('\n') + '\n\n'))" "$CHANGELOG" "$head" "$changes" > $tmp_changelog
echo $EDITOR
# open the changelog in the editor for editing
test -n "$EDITOR" && $EDITOR $tmp_changelog
mv $tmp_changelog "$CHANGELOG"
}
function git_ammend_tag(){
git add "$(find_changelog_file)"
git commit --amend --no-edit --no-verify
git tag "$(find_last_git_tag)" -f
}
function npm_release(){
local version
if [ -z "${1:-}" ]; then
version="patch"
else
version="$1"
fi
npm version "$version" && generate_git_changelog && git_ammend_tag && npm run gitPush && npm publish
}
|
Getable/rentable-footer
|
scripts.sh
|
Shell
|
artistic-2.0
| 3,130 |
#! /bin/bash
cd build
ninja tidy
|
kaimast/yael
|
ci/clang-tidy.sh
|
Shell
|
bsd-2-clause
| 34 |
#!/usr/bin/env bash
#exec 1> >(logger -p error -t $(basename $0)) 2>&1
# by default we only get the full repo every 6 minutes
# which if the cron job runs every 3 mins, then it is every other time
fulleveryxmins=6
myminarg=$1
if [ $myminarg ] ; then
fulleveryxmins=$1
fi
cleanup() {
/usr/bin/sqlite3 ~/.offlineimap/Account-mswork/LocalStatus-sqlite/INBOX 'pragma integrity_check;'
}
# Check every ten seconds if the process identified as $1 is still
# running. After 5 checks (~60 seconds), kill it. Return non-zero to
# indicate something was killed.
monitor() {
local pid=$1 i=0
while ps $pid &>/dev/null; do
# wait for at least as many minutes as we expect a run instead of a fixed amount
if (( i++ > ${fulleveryxmins} )); then
logger -p crit "$USER mailrun Max checks reached. Sending SIGKILL to ${pid}..."
# echo "Max checks reached. Sending SIGKILL to ${pid}..." >&2
kill -9 $pid; return 1
fi
# wait for up to a minute instead of just 10 seconds now that we get large archives
sleep 59
# cleanup
done
return 0
}
read -r pid < ~/.offlineimap/pid
if ps $pid &>/dev/null; then
logger -p crit "$USER mailrun Process ${pid} already running. Exiting..."
# echo "Process $pid already running. Exiting..." >&2
exit 1
fi
repo=mswork
verbosity=quiet
if [ $USER == 'aditya' ] ; then
repo=gmail-grot
fi
#every fulleveryxmins mins get the full repository, otherwise only the INBOX and moc
if [ $(( 10#$(date +%M) % ${fulleveryxmins} )) -eq 0 ] ; then
folders=""
else
if [ $repo == 'mswork' ] ; then
folders="-f INBOX,moc"
else
folders="-f INBOX"
fi
fi
logger -p info -t ${USER}-imap "[${$}] start ${repo} ${folders}"
`offlineimap -o -u ${verbosity} -q -a ${repo} ${folders}| sed -e 's/\; skipping it. Please see FAQ and manual on how to handle this.//g' | sed -e 's/\;//g' | logger -t ${USER}-imap -p error` & monitor $!
logger -p info -t ${USER}-imap "[${$}] end"
|
rpaditya/dotfiles
|
bin/mailrun.sh
|
Shell
|
bsd-2-clause
| 1,958 |
# Global aliases
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
alias -g C='| wc -l'
alias -g H='| head'
alias -g L="| less"
alias -g N="| /dev/null"
alias -g S='| sort'
alias -g G='| grep' # now you can do: ls foo G something
# Functions
#
# (f)ind by (n)ame
# usage: fn foo
# to find all files containing 'foo' in the name
function fn() {
fd $1 | fpp
}
alias yt='noglob yt'
function yt() {
youtube-dl -f 'bestvideo[ext=mp4][height <=? 720]+bestaudio[ext=m4a]/mp4' $1 -o '~/Movies/youtube/%(uploader)s - %(title)s.%(ext)s' &>/dev/null &
}
alias ytlist='noglob ytlist'
function ytlist() {
youtube-dl --yes-playlist --ignore-errors -f 'bestvideo[ext=mp4][height <=? 720]+bestaudio[ext=m4a]/mp4' $1 -o '~/Movies/youtube/%(playlist)s/%(playlist)s - %(playlist_index)s - %(title)s.%(ext)s'
}
alias ytaudio='noglob ytaudio'
function ytaudio() {
youtube-dl -f 'bestaudio[ext=m4a]' $1 -o '~/Music/youtubemusic/%(title)s.%(ext)s' &>/dev/null &
}
alias ytaudiolist='noglob ytaudiolist'
function ytaudiolist() {
youtube-dl --yes-playlist --ignore-errors -f 'bestaudio[ext=m4a]' $1 -o '~/Movies/youtube/%(playlist)s/%(playlist)s - %(playlist_index)s - %(title)s.%(ext)s'
}
function gif() {
gifify -r 30@2 -p 215:447 $1
}
function fix_mosh_server() {
local fw='/usr/libexec/ApplicationFirewall/socketfilterfw'
local mosh_sym="$(which mosh-server)"
local mosh_abs="$(greadlink -f $mosh_sym)"
sudo "$fw" --setglobalstate off
sudo "$fw" --add "$mosh_sym"
sudo "$fw" --unblockapp "$mosh_sym"
sudo "$fw" --add "$mosh_abs"
sudo "$fw" --unblockapp "$mosh_abs"
sudo "$fw" --setglobalstate on
}
|
tkuichooseyou/dotfiles
|
zsh/zsh-aliases.zsh
|
Shell
|
bsd-2-clause
| 1,641 |
export GUSTO_PROJECTS_ROOT=/Users/matan.zruya/workspace/gusto
function zp() {
cd $GUSTO_PROJECTS_ROOT/zenpayroll
}
function ste() {
if [ "$*" = 'run' ]; then
(ste; ./bin/ste_docker)
else
cd $GUSTO_PROJECTS_ROOT/ste
fi
}
|
mzruya/dotfiles
|
gusto/aliases.zsh
|
Shell
|
bsd-2-clause
| 238 |
#!/bin/bash
qemu-img create test.img 100M
|
emhj/opendev
|
qemu/workshop_1/2_demo/script_1.sh
|
Shell
|
bsd-3-clause
| 44 |
#!/usr/local/bin/bash
font='Roboto Medium:size=13'
workspace() {
let desktop=$(xprop -root -notype _NET_CURRENT_DESKTOP | awk -F' = ' '{print $2}';)+1
echo "^fn(DroidSansMonoForPowerline Nerd Font:size=22)^fn($font) $desktop"
return
}
while :; do
buf=""
buf="${buf}^pa(1620) $(workspace)"
echo $buf
sleep 0.25
done | dzen2 -h 28 -w 1920 -fn 'Roboto Medium:size=13' -ta l -e "button7=exit"
|
dszidi/knoxbug-dzen
|
scripts/workspace.sh
|
Shell
|
bsd-3-clause
| 411 |
#!/bin/sh -login
#PBS -l nodes=1:ppn=4,mem=24gb,walltime=12:00:00
#PBS -M [email protected]
#PBS -m abe
#PBS -N Bowtie
module load bowtie/1.0.0
cd ${PBS_O_WORKDIR}
bowtie -M 100 -S -t -p 4 ${index} ${input} ${output}
|
likit/gimme_protocols
|
bowtie_job.sh
|
Shell
|
bsd-3-clause
| 217 |
#!/bin/sh -
#
# Copyright (c) 1992 Diomidis Spinellis.
# Copyright (c) 1992, 1993
# The Regents of the University of California. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 4. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# @(#)sed.test 8.1 (Berkeley) 6/6/93
#
# $FreeBSD$
#
# sed Regression Tests
#
# The directory regress.test.out contains the expected test results
#
# These are the regression tests mostly created during the development
# of the BSD sed. Each test should have a unique mark name, which is
# used for naming the corresponding file in regress.multitest.out.
SRCDIR=$(dirname $0)
main()
{
REGRESS=${SRCDIR}/regress.multitest.out
DICT=/usr/share/dict/words
awk 'END { for (i = 1; i < 15; i++) print "l1_" i}' </dev/null >lines1
awk 'END { for (i = 1; i < 10; i++) print "l2_" i}' </dev/null >lines2
echo "1..129"
exec 4>&1 5>&2
tests
exec 1>&4 2>&5
# Remove temporary files
rm -f current.out lines[1-4] script[1-2]
}
tests()
{
SED=sed
MARK=0
test_args
test_addr
test_group
test_acid
test_branch
test_pattern
test_print
test_subst
test_error
# Handle the result of the last test
result
}
# Display a test's result
result()
{
if [ "$TODO" = '1' ] ; then
TODO='TODO '
else
TODO=''
fi
if ! [ -r $REGRESS/${TESTNAME} ] ; then
echo "Seeding $REGRESS/${TESTNAME} with current result" 1>&2
cp current.out $REGRESS/${TESTNAME}
fi
if diff -c $REGRESS/${TESTNAME} current.out ; then
echo "ok $MARK $TESTNAME # $TODO$COMMENT"
else
echo "not ok $MARK $TESTNAME # $TODO$COMMENT"
fi 1>&4 2>&5
}
# Mark the beginning of each test
mark()
{
[ $MARK -gt 0 ] && result
MARK=`expr $MARK + 1`
TESTNAME=$1
exec 1>&4 2>&5
exec >"current.out"
}
test_args()
{
COMMENT='Argument parsing - first type'
mark '1.1'
$SED 's/^/e1_/p' lines1
mark '1.2' ; $SED -n 's/^/e1_/p' lines1
mark '1.3'
$SED 's/^/e1_/p' <lines1
mark '1.4' ; $SED -n 's/^/e1_/p' <lines1
COMMENT='Argument parsing - second type'
mark '1.4.1'
$SED -e '' <lines1
echo 's/^/s1_/p' >script1
echo 's/^/s2_/p' >script2
mark '1.5'
$SED -f script1 lines1
mark '1.6'
$SED -f script1 <lines1
mark '1.7'
$SED -e 's/^/e1_/p' lines1
mark '1.8'
$SED -e 's/^/e1_/p' <lines1
mark '1.9' ; $SED -n -f script1 lines1
mark '1.10' ; $SED -n -f script1 <lines1
mark '1.11' ; $SED -n -e 's/^/e1_/p' lines1
mark '1.12'
$SED -n -e 's/^/e1_/p' <lines1
mark '1.13'
$SED -e 's/^/e1_/p' -e 's/^/e2_/p' lines1
mark '1.14'
$SED -f script1 -f script2 lines1
mark '1.15'
$SED -e 's/^/e1_/p' -f script1 lines1
mark '1.16'
$SED -e 's/^/e1_/p' lines1 lines1
# POSIX D11.2:11251
mark '1.17' ; $SED p <lines1 lines1
cat >script1 <<EOF
#n
# A comment
p
EOF
mark '1.18' ; $SED -f script1 <lines1 lines1
}
test_addr()
{
COMMENT='Address ranges'
mark '2.1' ; $SED -n -e '4p' lines1
mark '2.2' ; $SED -n -e '20p' lines1 lines2
mark '2.3' ; $SED -n -e '$p' lines1
mark '2.4' ; $SED -n -e '$p' lines1 lines2
mark '2.5' ; $SED -n -e '$a\
hello' /dev/null
mark '2.6' ; $SED -n -e '$p' lines1 /dev/null lines2
# Should not print anything
mark '2.7' ; $SED -n -e '20p' lines1
mark '2.8' ; $SED -n -e '/NOTFOUND/p' lines1
mark '2.9' ; $SED -n '/l1_7/p' lines1
mark '2.10' ; $SED -n ' /l1_7/ p' lines1
mark '2.11' ; $SED -n '\_l1\_7_p' lines1
mark '2.12' ; $SED -n '1,4p' lines1
mark '2.13' ; $SED -n '1,$p' lines1 lines2
mark '2.14' ; $SED -n '1,/l2_9/p' lines1 lines2
mark '2.15' ; $SED -n '/4/,$p' lines1 lines2
mark '2.16' ; $SED -n '/4/,20p' lines1 lines2
mark '2.17' ; $SED -n '/4/,/10/p' lines1 lines2
mark '2.18' ; $SED -n '/l2_3/,/l1_8/p' lines1 lines2
mark '2.19' ; $SED -n '12,3p' lines1 lines2
mark '2.20' ; $SED -n '/l1_7/,3p' lines1 lines2
mark '2.21' ; $SED -n '13,+4p' lines1 lines2
mark '2.22' ; $SED -n '/l1_6/,+2p' lines1 lines2
}
test_group()
{
COMMENT='Brace and other grouping'
mark '3.1' ; $SED -e '
4,12 {
s/^/^/
s/$/$/
s/_/T/
}' lines1
mark '3.2' ; $SED -e '
4,12 {
s/^/^/
/6/,/10/ {
s/$/$/
/8/ s/_/T/
}
}' lines1
mark '3.3' ; $SED -e '
4,12 !{
s/^/^/
/6/,/10/ !{
s/$/$/
/8/ !s/_/T/
}
}' lines1
mark '3.4' ; $SED -e '4,12!s/^/^/' lines1
}
test_acid()
{
COMMENT='Commands a c d and i'
mark '4.1' ; $SED -n -e '
s/^/before_i/p
20i\
inserted
s/^/after_i/p
' lines1 lines2
mark '4.2' ; $SED -n -e '
5,12s/^/5-12/
s/^/before_a/p
/5-12/a\
appended
s/^/after_a/p
' lines1 lines2
mark '4.3'
$SED -n -e '
s/^/^/p
/l1_/a\
appended
8,10N
s/$/$/p
' lines1 lines2
mark '4.4' ; $SED -n -e '
c\
hello
' lines1
mark '4.5' ; $SED -n -e '
8c\
hello
' lines1
mark '4.6' ; $SED -n -e '
3,14c\
hello
' lines1
# SunOS and GNU sed behave differently. We follow POSIX
mark '4.7' ; $SED -n -e '
8,3c\
hello
' lines1
mark '4.8' ; $SED d <lines1
}
test_branch()
{
COMMENT='Labels and branching'
mark '5.1' ; $SED -n -e '
b label4
:label3
s/^/label3_/p
b end
:label4
2,12b label1
b label2
:label1
s/^/label1_/p
b
:label2
s/^/label2_/p
b label3
:end
' lines1
mark '5.2'
$SED -n -e '
s/l1_/l2_/
t ok
b
:ok
s/^/tested /p
' lines1 lines2
# SunOS and GNU sed behave as follows: lines 9-$ aren't printed at all
mark '5.3' ; $SED -n -e '
5,8b inside
1,5 {
s/^/^/p
:inside
s/$/$/p
}
' lines1
# Check that t clears the substitution done flag
mark '5.4' ; $SED -n -e '
1,8s/^/^/
t l1
:l1
t l2
s/$/$/p
b
:l2
s/^/ERROR/
' lines1
# Check that reading a line clears the substitution done flag
mark '5.5'
$SED -n -e '
t l2
1,8s/^/^/p
2,7N
b
:l2
s/^/ERROR/p
' lines1
mark '5.6' ; $SED 5q lines1
mark '5.7' ; $SED -e '
5i\
hello
5q' lines1
# Branch across block boundary
mark '5.8' ; $SED -e '
{
:b
}
s/l/m/
tb' lines1
}
test_pattern()
{
COMMENT='Pattern space commands'
# Check that the pattern space is deleted
mark '6.1' ; $SED -n -e '
c\
changed
p
' lines1
mark '6.2' ; $SED -n -e '
4d
p
' lines1
mark '6.3'
$SED -e 'N;N;N;D' lines1
mark '6.4' ; $SED -e '
2h
3H
4g
5G
6x
6p
6x
6p
' lines1
mark '6.5' ; $SED -e '4n' lines1
mark '6.6' ; $SED -n -e '4n' lines1
}
test_print()
{
COMMENT='Print and file routines'
awk 'END {for (i = 1; i < 256; i++) printf("%c", i);print "\n"}' \
</dev/null >lines3
# GNU and SunOS sed behave differently here
mark '7.1'
$SED -n l lines3
mark '7.2' ; $SED -e '/l2_/=' lines1 lines2
rm -f lines4
mark '7.3' ; $SED -e '3,12w lines4' lines1
COMMENT='w results'
cat lines4
mark '7.4' ; $SED -e '4r lines2' lines1
mark '7.5' ; $SED -e '5r /dev/dds' lines1
mark '7.6' ; $SED -e '6r /dev/null' lines1
mark '7.7'
sed '200q' $DICT | sed 's$.*$s/^/&/w tmpdir/&$' >script1
rm -rf tmpdir
mkdir tmpdir
$SED -f script1 lines1
cat tmpdir/*
rm -rf tmpdir
mark '7.8'
echo line1 > lines3
echo "" >> lines3
TODO=1
$SED -n -e '$p' lines3 /dev/null
}
test_subst()
{
COMMENT='Substitution commands'
mark '8.1' ; $SED -e 's/./X/g' lines1
mark '8.2' ; $SED -e 's,.,X,g' lines1
# SunOS sed thinks we are escaping . as wildcard, not as separator
mark '8.3'
$SED -e 's.\..X.g' lines1
mark '8.4' ; $SED -e 's/[\/]/Q/' lines1
mark '8.5' ; $SED -e 's_\__X_' lines1
mark '8.6' ; $SED -e 's/./(&)/g' lines1
mark '8.7' ; $SED -e 's/./(\&)/g' lines1
mark '8.8' ; $SED -e 's/\(.\)\(.\)\(.\)/x\3x\2x\1/g' lines1
mark '8.9' ; $SED -e 's/_/u0\
u1\
u2/g' lines1
mark '8.10'
$SED -e 's/./X/4' lines1
rm -f lines4
mark '8.11' ; $SED -e 's/1/X/w lines4' lines1
COMMENT='s wfile results'
cat lines4
mark '8.12' ; $SED -e 's/[123]/X/g' lines1
mark '8.13' ; $SED -e 'y/0123456789/9876543210/' lines1
mark '8.14' ;
$SED -e 'y10\123456789198765432\101' lines1
mark '8.15' ; $SED -e '1N;2y/\n/X/' lines1
mark '8.16'
echo 'eeefff' | $SED -e '
p
s/e/X/p
:x
s//Y/p
# Establish limit counter in the hold space
# GNU sed version 3.02 enters into an infinite loop here
x
/.\{10\}/ {
s/.*/ERROR/
b
}
s/.*/&./
x
/f/bx
'
# POSIX does not say that this should work,
# but it does for GNU, BSD, and SunOS
mark '8.17' ; $SED -e 's/[/]/Q/' lines1
COMMENT='[ as an s delimiter and its escapes'
mark '8.18' ; $SED -e 's[_[X[' lines1
# This is a matter of interpretation
# POSIX 1003.1, 2004 says "Within the BRE and the replacement,
# the BRE delimiter itself can be used as a *literal* character
# if it is preceded by a backslash"
# SunOS 5.1 /usr/bin/sed and Mac OS X follow the literal POSIX
# interpretation.
# GNU sed version 4.1.5 treats \[ as the beginning of a character
# set specification (both with --posix and without).
mark '8.19' ; sed 's/l/[/' lines1 | $SED -e 's[\[.[X['
mark '8.20' ; sed 's/l/[/' lines1 | $SED -e 's[\[.[X\[['
COMMENT='\ in y command'
mark '8.21'
echo 'a\b(c' |
$SED 'y%ABCDEFGHIJKLMNOPQRSTUVWXYZ, /\\()"%abcdefghijklmnopqrstuvwxyz,------%'
COMMENT='\n in a character class and a BRE'
mark '8.22' ; (echo 1; echo 2) | $SED -n '1{;N;s/[\n]/X/;p;}'
mark '8.23' ; (echo 1; echo 2) | $SED -n '1{;N;s/\n/X/;p;}'
}
test_error()
{
COMMENT='Error cases'
mark '9.1' ; $SED -x 2>/dev/null ; echo $?
mark '9.2' ; $SED -f 2>/dev/null ; echo $?
mark '9.3' ; $SED -e 2>/dev/null ; echo $?
mark '9.4' ; $SED -f /dev/xyzzyxyzy 2>/dev/null ; echo $?
mark '9.5' ; $SED p /dev/xyzzyxyzy 2>/dev/null ; echo $?
mark '9.6' ; $SED -f /bin/sh 2>/dev/null ; echo $?
mark '9.7' ; $SED '{' 2>/dev/null ; echo $?
mark '9.8' ; $SED '{' 2>/dev/null ; echo $?
mark '9.9' ; $SED '/hello/' 2>/dev/null ; echo $?
mark '9.10' ; $SED '1,/hello/' 2>/dev/null ; echo $?
mark '9.11' ; $SED -e '-5p' 2>/dev/null ; echo $?
mark '9.12' ; $SED '/jj' 2>/dev/null ; echo $?
mark '9.13' ; $SED 'a hello' 2>/dev/null ; echo $?
mark '9.14' ; $SED 'a \ hello' 2>/dev/null ; echo $?
mark '9.15' ; $SED 'b foo' 2>/dev/null ; echo $?
mark '9.16' ; $SED 'd hello' 2>/dev/null ; echo $?
mark '9.17' ; $SED 's/aa' 2>/dev/null ; echo $?
mark '9.18' ; $SED 's/aa/' 2>/dev/null ; echo $?
mark '9.19' ; $SED 's/a/b' 2>/dev/null ; echo $?
mark '9.20' ; $SED 's/a/b/c/d' 2>/dev/null ; echo $?
mark '9.21' ; $SED 's/a/b/ 1 2' 2>/dev/null ; echo $?
mark '9.22' ; $SED 's/a/b/ 1 g' 2>/dev/null ; echo $?
mark '9.23' ; $SED 's/a/b/w' 2>/dev/null ; echo $?
mark '9.24' ; $SED 'y/aa' 2>/dev/null ; echo $?
mark '9.25' ; $SED 'y/aa/b/' 2>/dev/null ; echo $?
mark '9.26' ; $SED 'y/aa/' 2>/dev/null ; echo $?
mark '9.27' ; $SED 'y/a/b' 2>/dev/null ; echo $?
mark '9.28' ; $SED 'y/a/b/c/d' 2>/dev/null ; echo $?
mark '9.29' ; $SED '!' 2>/dev/null ; echo $?
mark '9.30' ; $SED supercalifrangolisticexprialidociussupercalifrangolisticexcius 2>/dev/null ; echo $?
mark '9.31' ; $SED '' /dev/null 2>/dev/null ; echo $?
}
main
|
jrobhoward/SCADAbase
|
usr.bin/sed/tests/multi_test.sh
|
Shell
|
bsd-3-clause
| 11,866 |
export MONGO_URL="mongodb://localhost:27017/MedBook"
|
UCSC-MedBook/MedBook_
|
wrangler/config/development/env.sh
|
Shell
|
bsd-3-clause
| 53 |
#!/usr/bin/env bash
# Sets up gfal plugins for use in a CMSSW environment.
# In fact, all plugins seem to work out of the box except for the xrootd plugin.
# Therefore, all other plugins are symlinked into a specified directory, and a precompiled xrootd
# plugin is copied from eos through a CERNBox link.
# Tested with SCRAM_ARCH's slc{6,7}_amd64_gcc{630,700,820} and CMSSW {9,10}.
# Arguments:
# 1. the absolute path to the new gfal plugin directory
# 2. (optional) the path to the initial gfal plugin directory, defaults to $GFAL_PLUGIN_DIR or, when
# empty, to the default plugin directory, obtained with _law_gfal2_default_plugin_dir
action() {
local dst_dir="$1"
if [ -z "$dst_dir" ]; then
>&2 echo "please provide the path to the new GFAL_PLUGIN_DIR"
return "1"
fi
local src_dir="$2"
if [ -z "$src_dir" ]; then
echo "no plugin source directory passed"
if [ ! -z "$GFAL_PLUGIN_DIR" ]; then
echo "using GFAL_PLUGIN_DIR variable ($GFAL_PLUGIN_DIR)"
src_dir="$GFAL_PLUGIN_DIR"
else
src_dir="$( _law_gfal2_default_plugin_dir )"
if [ -z "$src_dir" ]; then
>&2 echo "could not detect the default gfal2 plugin directory"
return "1"
fi
echo "detected the default gfal2 plugin directory ($src_dir)"
fi
fi
# check of the src_dir exists
if [ ! -d "$src_dir" ]; then
>&2 echo "source directory '$src_dir' does not exist"
return "1"
fi
# create the dst_dir if required
mkdir -p "$dst_dir"
if [ "$?" != "0" ]; then
>&2 echo "destination directory '$dst_dir' could not be created"
return "1"
fi
# symlink all plugins
( \
cd "$dst_dir" && \
ln -s $src_dir/libgfal_plugin_*.so .
)
# remove the xrootd plugin and download a version that was compiled ontop of CMSSW
rm -f "$dst_dir/libgfal_plugin_xrootd.so"
local plugin_url="https://cernbox.cern.ch/index.php/s/qgrogVY4bwcuCXt/download"
if [ ! -z "$( type curl 2> /dev/null )" ]; then
( \
cd "$dst_dir" && \
curl "$plugin_url" > libgfal_plugin_xrootd.so
)
elif [ ! -z "$( type wget 2> /dev/null )" ]; then
( \
cd "$dst_dir" && \
wget "$plugin_url" && \
mv download libgfal_plugin_xrootd.so
)
else
>&2 echo "could not download xrootd plugin, neither wget nor curl installed"
return "1"
fi
# export the new GFAL_PLUGIN_DIR
export GFAL_PLUGIN_DIR="$dst_dir"
echo "new GFAL_PLUGIN_DIR is '$GFAL_PLUGIN_DIR'"
}
_law_gfal2_default_plugin_dir() {
# there is no easy way to access the default gfal2 plugin dir within python
# in fact, the value is set through a preprocessor variable during gfal2 compilation and only
# used here: https://gitlab.cern.ch/dmc/gfal2/blob/a8a64e16427ec5a718bd77bcdbf80abe96de995e/src/core/common/gfal_plugin.c#L290
# although it is very dirty, one can parse the "[gfal_module_load]" log
local pycmd="\
import os, logging, gfal2\n\
try: from cStringIO import StringIO\n\
except: from io import StringIO\n\
s = StringIO()\n\
logger = logging.getLogger('gfal2')\n\
logger.addHandler(logging.StreamHandler(s))\n\
logger.setLevel(logging.DEBUG)\n\
gfal2.creat_context()\n\
for line in s.getvalue().split('\\\\n'):\n\
line = line.strip()\n\
start = '[gfal_module_load] plugin '\n\
end = ' loaded with success'\n\
if line.startswith(start) and line.endswith(end):\n\
plugin = line[len(start):-len(end)].strip()\n\
plugin_dir = os.path.dirname(os.path.normpath(plugin))\n\
print(plugin_dir)\n\
break\n\
"
GFAL_PLUGIN_DIR= echo -e "$pycmd" | python
}
action "$@"
|
riga/law
|
law/contrib/cms/scripts/setup_gfal_plugins.sh
|
Shell
|
bsd-3-clause
| 3,787 |
#! /bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# update PATH to include kconfiglib scripts
export PATH=${DIR}:${PATH}
make --no-print-directory --silent CONFIG_ARCH_BOARD_CUSTOM=y CONFIG_APPS_DIR="../apps" olddefconfig
|
PX4/Firmware
|
platforms/nuttx/NuttX/tools/px4_nuttx_make_olddefconfig.sh
|
Shell
|
bsd-3-clause
| 260 |
#!/bin/bash
curl http://localhost:8080/myapp/conceptmapper/test.txt
|
croeder/ccp_nlp
|
ws/curlcm.sh
|
Shell
|
bsd-3-clause
| 70 |
#!/bin/bash
set -e
examples="\
la_umfpack01 \
la_sparsecomplex01 \
num_deriv01 \
vtk_isosurf01 "
for ex in $examples; do
echo
echo
echo "[1;32m>>> running $ex <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<[0m"
go run "$ex".go
done
|
PaddySchmidt/gosl
|
examples/all.bash
|
Shell
|
bsd-3-clause
| 252 |
#!/usr/bin/env bash
set -eux
mkdir ${WORKDIR}/test-loop
ln -s ${WORKDIR}/test-loop ${WORKDIR}/test-loop/test-loop
set +e
# check do not start inifinity-loop
ttap ${WORKDIR}
if [[ $? == 0 ]]; then
exit 1
fi
|
fjkz/ttap
|
test/scripts/test-loop.sh
|
Shell
|
mit
| 212 |
#! /bin/bash
#-------------------------------------------------------
export OPENSARKIT="change me"
# path to snap installation folder
export SNAP="change me"
export SNAP_EXE="${SNAP}/bin/gpt"
# Folder of OFST database
# 1.) download the database here: https://www.dropbox.com/s/qvujm3l0ba0frch/OFST_db.sqlite
# 2.) place somewhere
# e.g. if placed into ${OPENSARKIT}/Database folder it should look like this:
# export DB_GLOBAL="${OPENSARKIT}/Database/OFST_db.sqlite"
export DB_GLOBAL="change me"
#-------------------------------------------------------
# this does not have to be changed
# versionin etc
export OSK_VERSION=0.1-beta
# source worklows/graphs
export SNAP_GRAPHS="${OPENSARKIT}/workflows/SNAP"
# source bins
export SNAP_BIN="${OPENSARKIT}/bins/SNAP"
export ASF_BIN="${OPENSARKIT}/bins/ASF"
export KC_BIN="${OPENSARKIT}/bins/KC"
export DOWNLOAD_BIN="${OPENSARKIT}/bins/Download"
# export to Path
export PATH=$PATH:${PYTHON_BIN}:${ASF_BIN}:${SNAP_BIN}:${GDAL_BIN}:${DOWNLOAD_BIN}:${ASF_EXE}:${SNAP}:${KC_BIN}:${REMOTE_BIN}:${SAGA_BIN}:${POLSAR_BIN}:${NEST_BIN}
#----------------------------------------------------------
|
BuddyVolly/OpenSARKit
|
template_source.bash
|
Shell
|
mit
| 1,143 |
#!/bin/sh
TIMES=0
while [ $TIMES -lt 2 ]
do
sleep 2
TIMES=$((TIMES+1))
echo "$TIMES"
done
echo "------------"
|
junkainiu/learning
|
bash/joy.sh
|
Shell
|
mit
| 146 |
start=`date +%s.%N`
cntk configFile=AlexNet.cntk configName=AlexNet >1GPU.log 2>&1
end=`date +%s.%N`
runtime=$( echo "$end - $start" | bc -l )
echo "finished with execute time: ${runtime}" >>1GPU.log
|
linmajia/dlbench
|
synthetic/experiments/cntk/cnn/alexnet/alexnet_1GPU.sh
|
Shell
|
mit
| 201 |
#!/bin/bash
#
# The Unix toolset used in the script is installed along with
# MSysGit/Git for Windows.
#
set -e
set -u
CURDIR=$(dirname "$0")
cd $CURDIR/..
APPNAME=$(basename `pwd`)
./build/Debug/$APPNAME.exe
|
mrts/snippets-cpp
|
template/scripts/run.sh
|
Shell
|
mit
| 211 |
#!/bin/bash
FN="faahKO_1.24.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/experiment/src/contrib/faahKO_1.24.0.tar.gz"
"https://bioarchive.galaxyproject.org/faahKO_1.24.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-faahko/bioconductor-faahko_1.24.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-faahko/bioconductor-faahko_1.24.0_src_all.tar.gz"
)
MD5="c2d5a234c32448986696671867e5fbca"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
jfallmann/bioconda-recipes
|
recipes/bioconductor-faahko/post-link.sh
|
Shell
|
mit
| 1,388 |
#!/usr/bin/env bash
PWD=`pwd`
zip -r function.zip package.json node_modules/* lib/* index.js environment/* environment/.env
aws lambda update-function-code \
--function-name distributed-job-runner \
--zip-file fileb://$PWD/function.zip \
--region us-east-1
rm function.zip
aws s3 sync ./client s3://tesera.svc.distributed-job-runner/client
|
tesera/distributed-job-runner-awslambda
|
deploy.sh
|
Shell
|
mit
| 349 |
#!/bin/bash
sudo apt-get install autoconf2.64 automake1.11 gcc-4.6-multilib libtool-bin texinfo gperf bison flex
|
LazyOxen/redox
|
libc/ubuntu.sh
|
Shell
|
mit
| 113 |
#
# Sets key bindings.
#
# Authors:
# Sorin Ionescu <[email protected]>
#
# Return if requirements are not found.
if [[ "$TERM" == 'dumb' ]]; then
return 1
fi
#
# Options
#
# Beep on error in line editor.
setopt BEEP
#
# Variables
#
# Treat these characters as part of a word.
WORDCHARS='*?_-.[]~&;!#$%^(){}<>'
# Use human-friendly identifiers.
zmodload zsh/terminfo
typeset -gA key_info
key_info=(
'Control' '\C-'
'ControlLeft' '\e[1;5D \e[5D \e\e[D \eOd'
'ControlRight' '\e[1;5C \e[5C \e\e[C \eOc'
'ControlPageUp' '\e[5;5~'
'ControlPageDown' '\e[6;5~'
'Escape' '\e'
'Meta' '\M-'
'Backspace' "^?"
'Delete' "^[[3~"
'F1' "$terminfo[kf1]"
'F2' "$terminfo[kf2]"
'F3' "$terminfo[kf3]"
'F4' "$terminfo[kf4]"
'F5' "$terminfo[kf5]"
'F6' "$terminfo[kf6]"
'F7' "$terminfo[kf7]"
'F8' "$terminfo[kf8]"
'F9' "$terminfo[kf9]"
'F10' "$terminfo[kf10]"
'F11' "$terminfo[kf11]"
'F12' "$terminfo[kf12]"
'Insert' "$terminfo[kich1]"
'Home' "$terminfo[khome]"
'PageUp' "$terminfo[kpp]"
'End' "$terminfo[kend]"
'PageDown' "$terminfo[knp]"
'Up' "$terminfo[kcuu1]"
'Left' "$terminfo[kcub1]"
'Down' "$terminfo[kcud1]"
'Right' "$terminfo[kcuf1]"
'BackTab' "$terminfo[kcbt]"
)
# Set empty $key_info values to an invalid UTF-8 sequence to induce silent
# bindkey failure.
for key in "${(k)key_info[@]}"; do
if [[ -z "$key_info[$key]" ]]; then
key_info[$key]='�'
fi
done
#
# External Editor
#
# Allow command line editing in an external editor.
autoload -Uz edit-command-line
zle -N edit-command-line
#
# Functions
#
# Runs bindkey but for all of the keymaps. Running it with no arguments will
# print out the mappings for all of the keymaps.
function bindkey-all {
local keymap=''
for keymap in $(bindkey -l); do
[[ "$#" -eq 0 ]] && printf "#### %s\n" "${keymap}" 1>&2
bindkey -M "${keymap}" "$@"
done
}
# Exposes information about the Zsh Line Editor via the $editor_info associative
# array.
function editor-info {
# Clean up previous $editor_info.
unset editor_info
typeset -gA editor_info
if [[ "$KEYMAP" == 'vicmd' ]]; then
zstyle -s ':prezto:module:editor:info:keymap:alternate' format 'REPLY'
editor_info[keymap]="$REPLY"
else
zstyle -s ':prezto:module:editor:info:keymap:primary' format 'REPLY'
editor_info[keymap]="$REPLY"
if [[ "$ZLE_STATE" == *overwrite* ]]; then
zstyle -s ':prezto:module:editor:info:keymap:primary:overwrite' format 'REPLY'
editor_info[overwrite]="$REPLY"
else
zstyle -s ':prezto:module:editor:info:keymap:primary:insert' format 'REPLY'
editor_info[overwrite]="$REPLY"
fi
fi
unset REPLY
zle zle-reset-prompt
}
zle -N editor-info
# Reset the prompt based on the current context and
# the ps-context option.
function zle-reset-prompt {
if zstyle -t ':prezto:module:editor' ps-context; then
# If we aren't within one of the specified contexts, then we want to reset
# the prompt with the appropriate editor_info[keymap] if there is one.
if [[ $CONTEXT != (select|cont) ]]; then
zle reset-prompt
zle -R
fi
else
zle reset-prompt
zle -R
fi
}
zle -N zle-reset-prompt
# Updates editor information when the keymap changes.
function zle-keymap-select {
zle editor-info
}
zle -N zle-keymap-select
# Enables terminal application mode and updates editor information.
function zle-line-init {
# The terminal must be in application mode when ZLE is active for $terminfo
# values to be valid.
if (( $+terminfo[smkx] )); then
# Enable terminal application mode.
echoti smkx
fi
# Update editor information.
zle editor-info
}
zle -N zle-line-init
# Disables terminal application mode and updates editor information.
function zle-line-finish {
# The terminal must be in application mode when ZLE is active for $terminfo
# values to be valid.
if (( $+terminfo[rmkx] )); then
# Disable terminal application mode.
echoti rmkx
fi
# Update editor information.
zle editor-info
}
zle -N zle-line-finish
# Toggles emacs overwrite mode and updates editor information.
function overwrite-mode {
zle .overwrite-mode
zle editor-info
}
zle -N overwrite-mode
# Enters vi insert mode and updates editor information.
function vi-insert {
zle .vi-insert
zle editor-info
}
zle -N vi-insert
# Moves to the first non-blank character then enters vi insert mode and updates
# editor information.
function vi-insert-bol {
zle .vi-insert-bol
zle editor-info
}
zle -N vi-insert-bol
# Enters vi replace mode and updates editor information.
function vi-replace {
zle .vi-replace
zle editor-info
}
zle -N vi-replace
# Expands .... to ../..
function expand-dot-to-parent-directory-path {
if [[ $LBUFFER = *.. ]]; then
LBUFFER+='/..'
else
LBUFFER+='.'
fi
}
zle -N expand-dot-to-parent-directory-path
# Displays an indicator when completing.
function expand-or-complete-with-indicator {
local indicator
zstyle -s ':prezto:module:editor:info:completing' format 'indicator'
# This is included to work around a bug in zsh which shows up when interacting
# with multi-line prompts.
if [[ -z "$indicator" ]]; then
zle expand-or-complete
return
fi
print -Pn "$indicator"
zle expand-or-complete
zle redisplay
}
zle -N expand-or-complete-with-indicator
# Inserts 'sudo ' at the beginning of the line.
function prepend-sudo {
if [[ "$BUFFER" != su(do|)\ * ]]; then
BUFFER="sudo $BUFFER"
(( CURSOR += 5 ))
fi
}
zle -N prepend-sudo
# Expand aliases
function glob-alias {
zle _expand_alias
zle expand-word
zle magic-space
}
zle -N glob-alias
# Reset to default key bindings.
bindkey -d
#
# Emacs Key Bindings
#
for key in "$key_info[Escape]"{B,b} "${(s: :)key_info[ControlLeft]}"
bindkey -M emacs "$key" emacs-backward-word
for key in "$key_info[Escape]"{F,f} "${(s: :)key_info[ControlRight]}"
bindkey -M emacs "$key" emacs-forward-word
# Kill to the beginning of the line.
for key in "$key_info[Escape]"{K,k}
bindkey -M emacs "$key" backward-kill-line
# Redo.
bindkey -M emacs "$key_info[Escape]_" redo
# Search previous character.
bindkey -M emacs "$key_info[Control]X$key_info[Control]B" vi-find-prev-char
# Match bracket.
bindkey -M emacs "$key_info[Control]X$key_info[Control]]" vi-match-bracket
# Edit command in an external editor.
bindkey -M emacs "$key_info[Control]X$key_info[Control]E" edit-command-line
if (( $+widgets[history-incremental-pattern-search-backward] )); then
bindkey -M emacs "$key_info[Control]R" \
history-incremental-pattern-search-backward
bindkey -M emacs "$key_info[Control]S" \
history-incremental-pattern-search-forward
fi
#
# Vi Key Bindings
#
# Edit command in an external editor emacs style (v is used for visual mode)
bindkey -M vicmd "$key_info[Control]X$key_info[Control]E" edit-command-line
# Undo/Redo
bindkey -M vicmd "u" undo
bindkey -M vicmd "$key_info[Control]R" redo
if (( $+widgets[history-incremental-pattern-search-backward] )); then
bindkey -M vicmd "?" history-incremental-pattern-search-backward
bindkey -M vicmd "/" history-incremental-pattern-search-forward
else
bindkey -M vicmd "?" history-incremental-search-backward
bindkey -M vicmd "/" history-incremental-search-forward
fi
#
# Emacs and Vi Key Bindings
#
# Unbound keys in vicmd and viins mode will cause really odd things to happen
# such as the casing of all the characters you have typed changing or other
# undefined things. In emacs mode they just insert a tilde, but bind these keys
# in the main keymap to a noop op so if there is no keybind in the users mode
# it will fall back and do nothing.
function _prezto-zle-noop { ; }
zle -N _prezto-zle-noop
local -a unbound_keys
unbound_keys=(
"${key_info[F1]}"
"${key_info[F2]}"
"${key_info[F3]}"
"${key_info[F4]}"
"${key_info[F5]}"
"${key_info[F6]}"
"${key_info[F7]}"
"${key_info[F8]}"
"${key_info[F9]}"
"${key_info[F10]}"
"${key_info[F11]}"
"${key_info[F12]}"
"${key_info[PageUp]}"
"${key_info[PageDown]}"
"${key_info[ControlPageUp]}"
"${key_info[ControlPageDown]}"
)
for keymap in $unbound_keys; do
bindkey -M viins "${keymap}" _prezto-zle-noop
bindkey -M vicmd "${keymap}" _prezto-zle-noop
done
# Keybinds for all keymaps
for keymap in 'emacs' 'viins' 'vicmd'; do
bindkey -M "$keymap" "$key_info[Home]" beginning-of-line
bindkey -M "$keymap" "$key_info[End]" end-of-line
done
# Keybinds for all vi keymaps
for keymap in viins vicmd; do
# Ctrl + Left and Ctrl + Right bindings to forward/backward word
for key in "${(s: :)key_info[ControlLeft]}"
bindkey -M "$keymap" "$key" vi-backward-word
for key in "${(s: :)key_info[ControlRight]}"
bindkey -M "$keymap" "$key" vi-forward-word
done
# Keybinds for emacs and vi insert mode
for keymap in 'emacs' 'viins'; do
bindkey -M "$keymap" "$key_info[Insert]" overwrite-mode
bindkey -M "$keymap" "$key_info[Delete]" delete-char
bindkey -M "$keymap" "$key_info[Backspace]" backward-delete-char
bindkey -M "$keymap" "$key_info[Left]" backward-char
bindkey -M "$keymap" "$key_info[Right]" forward-char
# Expand history on space.
bindkey -M "$keymap" ' ' magic-space
# Clear screen.
bindkey -M "$keymap" "$key_info[Control]L" clear-screen
# Expand command name to full path.
for key in "$key_info[Escape]"{E,e}
bindkey -M "$keymap" "$key" expand-cmd-path
# Duplicate the previous word.
for key in "$key_info[Escape]"{M,m}
bindkey -M "$keymap" "$key" copy-prev-shell-word
# Use a more flexible push-line.
for key in "$key_info[Control]Q" "$key_info[Escape]"{q,Q}
bindkey -M "$keymap" "$key" push-line-or-edit
# Bind Shift + Tab to go to the previous menu item.
bindkey -M "$keymap" "$key_info[BackTab]" reverse-menu-complete
# Complete in the middle of word.
bindkey -M "$keymap" "$key_info[Control]I" expand-or-complete
# Expand .... to ../..
if zstyle -t ':prezto:module:editor' dot-expansion; then
bindkey -M "$keymap" "." expand-dot-to-parent-directory-path
fi
# Display an indicator when completing.
bindkey -M "$keymap" "$key_info[Control]I" \
expand-or-complete-with-indicator
# Insert 'sudo ' at the beginning of the line.
bindkey -M "$keymap" "$key_info[Control]X$key_info[Control]S" prepend-sudo
# control-space expands all aliases, including global
bindkey -M "$keymap" "$key_info[Control] " glob-alias
done
# Delete key deletes character in vimcmd cmd mode instead of weird default functionality
bindkey -M vicmd "$key_info[Delete]" delete-char
# Do not expand .... to ../.. during incremental search.
if zstyle -t ':prezto:module:editor' dot-expansion; then
bindkey -M isearch . self-insert 2> /dev/null
fi
#
# Layout
#
# Set the key layout.
zstyle -s ':prezto:module:editor' key-bindings 'key_bindings'
if [[ "$key_bindings" == (emacs|) ]]; then
bindkey -e
elif [[ "$key_bindings" == vi ]]; then
bindkey -v
else
print "prezto: editor: invalid key bindings: $key_bindings" >&2
fi
unset key{,map,_bindings}
|
vbwx/prezto
|
modules/editor/init.zsh
|
Shell
|
mit
| 11,240 |
#!/bin/sh
# update-itar.sh - update from the interim trust anchor repository
# Copyright 2009, W.C.A. Wijngaards
# This file is BSD licensed, see doc/LICENSE.
# --- Some settings
# directory where unbound works
thedir="."
# where is the file that unbound is going to read
ub_ta_file="$thedir/anchors.mf"
# where is the itar master file format
itar_url="ftp://ftp.iana.org/itar/anchors.mf"
# where is the itar PGP signature
itar_sig="ftp://ftp.iana.org/itar/anchors.mf.sig"
# which command to fetch urls, cmd $dest $url. "wget -O" "curl -o"
fetch_cmd="wget -O"
# file with pgp public key
pgp_pub_key_file="$thedir/update-itar.key"
# our pgp keyring (goes into .gnupg directory)
pgp_keyring_file="update-itar.ring"
# pgp command to use
pgp_cmd="gpg"
# --- The script is below
usage ( )
{
echo "usage: update-itar"
echo " Updates the trust anchors from the interim trust"
echo " anchor repository, https://itar.iana.org, and checks PGP sig."
echo
echo " Updates $ub_ta_file with the latest keys."
echo " Read that file from the unbound config with"
echo " trust-anchor-file: "'"'"$ub_ta_file"'"'
echo
echo " Exit code 0 means anchors updated, 1 no changes, "
echo " others are errors. So, in a cronjob you can do:"
echo " cd /usr/local/etc/unbound # your unbound work dir"
echo " ./update-itar.sh && unbound-control reload"
exit 2
}
if test $# -ne 0; then
usage
fi
tmpf="/tmp/update-itar.$$"
# one argument: explanation string
error_exit ( )
{
if test -f $tmpf.log; then cat $tmpf.log; fi
rm -f $tmpf $tmpf.sig $tmpf.log
echo "Error updating trust anchors: $1"
exit 2
}
if test ! -f $pgp_pub_key_file || test ! -f $HOME/.gnupg/$pgp_keyring_file || \
test "$pgp_pub_key_file" -nt $HOME/.gnupg/$pgp_keyring_file; then
# default key contents right here
if test ! -f $pgp_pub_key_file; then
echo "creating default IANA ITAR pgp key file"
cat >$pgp_pub_key_file <<EOF
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.5
mQGiBElr2DcRBAC+6YK6eSP7rzstvnMPQXMrpvVfuIR5FeTpGuwae9JP78V/iOXr
N0yW8Dn6kdAztCMuRizL1Ij9IgaD7pjn8h09VgR4cN4LDv75rcQeWLzNxKy4UNRF
aStL77RcIoTblBeCgHAK9FLzd0XfTGZCNaLNy9BYVSLyADOVDIqgBcrvBwCglz03
QhOMIgaSx/XuRh6kYtynZ6kD/2GXx6pFs57b7rww8yOpdurCSOMB1wuEXiIXznTI
06ARiib0G5VDvOdpy0LDU2526Q9f/WAERlhcExTgnTFigG4mRksUiDrrai4GIr+6
JaivcGFVYdZZ4mZ088jcwujS/UY3C0ryGR9ufYUDAnfx6frhSl6o6j5is+jeGndF
JYRAA/9B/1OXNVwFSiIxnP2aPUwsT1li1vaW8dhA/5PcuPLOVvEjPc1Pc16HGLhE
8CRmMn66LqB1ccInE5hLKGGvV3pctjan+IOhaq3OHt/a+buDtTPgykchMZ2k1AzT
RYk+gksxpIl6yTZsBH4hoRt8auxEJW8AiYbNtXXkNuWcoQL40bQsSUFOQSBUcnVz
dCBBbmNob3IgUmVwb3NpdG9yeSA8aXRhckBpYW5hLm9yZz6IYAQTEQIAIAUCSWvY
NwIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEPR9+zCB1GT0GUAAn29/TacF
Teh87dls8pmkjxS4pKf1AKCJS/MvzR54AblO4DNMyc9q0G4frrkCDQRJa9g3EAgA
ywoLxF4HVb9o926UIXw8JxNIzDPkt8galAcKRUmHQMNa/QA80WMl9Ia6DIxavGlV
I5O1fvms297RV2KSSBjKWn6G+0me80A7aw0eHNg7habM5VtzDz5DhJbQFdJV9aYV
DoTSnY6uR6iSSRrdZNaYdlCwDS8lBCnOKoGMArHzVCa2EdCBeCUY/eObOXtu8Gm3
nDkuWeLPv08/0lvtr6d6VoDUEVPEsJAPONEYtpz/D+EZddUraF+3JscWqfRApBOz
/8WHaeTNdzIH+h1ntktiizA6eH40EM6coQQxtIRsxg1DPTxofdovreMkcMI0EUUP
awDn8gDtgG3g6Ud5zCdknwAEDQf/W3rxrEN6OZxJvWti8Iu6KOgxtuINiAsimPPX
qy9KHowyEE9EMPzgADjWC9Reyusr4CwcootjWw7ryUwU0fXvVULKhg32NzLsx/Ci
WtgCPSR58WZ1KKhnoB05+PTrwzhU+u64Cd/vJtFqGxSrANl2FAkPswHJMr8dMwAz
uni7zzLJ+homW1T5AaixwmN3jeDHWshJH9E9JIhr5Y/8AzMl1V10r2u1c2ej0lBJ
Y4GegI5cYAEBerS9d+mrbPlvbJ8AfuAuEf0y/PWJh0z1+Kck5qIbXMn/rpMBYvLJ
Uj5CfqWlh8+hxqSYJDXtLm8hBiQFiMEew0jOc2Tw4F91OZ+jyYhJBBgRAgAJBQJJ
a9g3AhsMAAoJEPR9+zCB1GT0AUwAn2ZtBwAyVxppdeTqilXufUvAkvjbAJ9dUpR1
9a17/5BvDDJcIxSEKTJmDw==
=zCNZ
-----END PGP PUBLIC KEY BLOCK-----
EOF
fi
# import the new key
$pgp_cmd --no-default-keyring --keyring $pgp_keyring_file \
--primary-keyring $pgp_keyring_file \
--import $pgp_pub_key_file >$tmpf.log 2>&1 \
|| error_exit "could not import pgp public key into keyring"
fi
$fetch_cmd $tmpf $itar_url >$tmpf.log 2>&1 \
|| error_exit "fetching $itar_url failed"
tail -2 $tmpf | grep "; End of file" >/dev/null 2>&1 || \
error_exit "The file fetched from $itar_url was partial"
$fetch_cmd $tmpf.sig $itar_sig >$tmpf.log 2>&1 \
|| error_exit "fetching $itar_sig failed"
# check the file with pgp
$pgp_cmd --no-default-keyring --keyring $pgp_keyring_file \
--verify $tmpf.sig $tmpf >$tmpf.log 2>&1 \
|| error_exit "the PGP signature failed!"
# check for differences
val=1
if diff "$ub_ta_file" $tmpf 2>/dev/null ; then
# echo "The interim trust anchor repository did not change."
:
else
echo "Updating $ub_ta_file"
cp $tmpf $ub_ta_file
val=0
fi
rm -f $tmpf $tmpf.sig $tmpf.log
exit $val
|
davidhubbard/cwave
|
unbound-svn/contrib/update-itar.sh
|
Shell
|
gpl-2.0
| 4,577 |
#!/sbin/sh
#Features:
#maxkhz/minkhz/gov/maxscroff added to the kernels cmdline
#clean cmdline of foreigns in case of something wicked is going on in there
#(supports my kernel edits, so that the kernel boots with that values)
##Get CPU MINCLOCK from aroma tmp
val=$(cat /tmp/aroma-data/minkhz.prop | cut -d"=" -f2)
case $val in
1)
minkhz="192000"
;;
2)
minkhz="384000"
;;
3)
minkhz="432000"
;;
esac
##end Get cpu minclock from aroma tmp
##Get CPU MAXCLOCK from aroma tmp
val=$(cat /tmp/aroma-data/maxkhz.prop | cut -d"=" -f2)
case $val in
1)
maxkhz="1512000"
;;
2)
maxkhz="1539000"
;;
3)
maxkhz="1674000"
;;
4)
maxkhz="1755000"
;;
5)
maxkhz="1782000"
;;
6)
maxkhz="1809000"
;;
7)
maxkhz="1836000"
;;
8)
maxkhz="1863000"
;;
9)
maxkhz="1890000"
;;
10)
maxkhz="1910000"
;;
esac
##end Get cpu maxclock from aroma tmp
##Get CPU max screen off clock from aroma tmp
val=$(cat /tmp/aroma-data/scroff.prop | cut -d"=" -f2)
case $val in
1)
maxscroff="594000"
;;
2)
maxscroff="540000"
;;
3)
maxscroff="486000"
;;
esac
##end Get CPU max screen off clock from aroma tmp
##Get 3dgpuoc from aroma tmp
val=$(cat /tmp/aroma-data/3dgpu.prop | cut -d"=" -f2)
case $val in
1)
gpu3d="550000000"
;;
2)
gpu3d="500000000"
;;
3)
gpu3d="450000000"
;;
4)
gpu3d="400000000"
;;
5)
gpu3d="300000000"
;;
6)
gpu3d="266667000"
;;
esac
##end Get 3dgpuoc from aroma tmp
##Get 2dgpuoc from aroma tmp
val=$(cat /tmp/aroma-data/2dgpu.prop | cut -d"=" -f2)
case $val in
1)
gpu2d="300000000"
;;
2)
gpu2d="266667000"
;;
3)
gpu2d="200000000"
;;
esac
##end Get 2dgpuoc from aroma tmp
##Get governor from aroma tmp
val=$(cat /tmp/aroma-data/gov.prop | cut -d"=" -f2)
case $val in
1)
gov="ondemand"
;;
2)
gov="Lionheart"
;;
3)
gov="conservative"
;;
4)
gov="userspace"
;;
5)
gov="powersave"
;;
6)
gov="performance"
;;
7)
gov="lagfree"
;;
8)
gov="msm-dcvs"
;;
9)
gov="smartassv2"
;;
10)
gov="interactivex2"
;;
11)
gov="intellidemand"
;;
12)
gov="badass"
;;
13)
gov="interactive"
;;
esac
##end Get governor from aroma tmp
#clean cmdline from foreigns. failsafe
#needed since some cmdlines are full of rubbish :)
sed -i 's/no_console_suspend=1[^$]*$/no_console_suspend=1/g' /tmp/boot.img-cmdline
#Add maxkhz to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="maxkhz="
maxkhz="maxkhz="$maxkhz
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -re 's/maxkhz=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $maxkhz>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $maxkhz>/tmp/boot.img-cmdline
;;
esac
#end maxkhz
#Add minkhz to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="minkhz="
minkhz="minkhz="$minkhz
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -re 's/minkhz=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $minkhz>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $minkhz>/tmp/boot.img-cmdline
;;
esac
#end minkhz
#Add gov to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="gov="
gov="gov="$gov
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -e 's/gov=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $gov>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $gov>/tmp/boot.img-cmdline
;;
esac
#end gov
#Add maxscroff to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="maxscroff="
maxscroff="maxscroff="$maxscroff
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -e 's/maxscroff=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $maxscroff>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $maxscroff>/tmp/boot.img-cmdline
;;
esac
#end maxscroff
#Add 3dgpu to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="3dgpu="
gpu3d="3dgpu="$gpu3d
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -e 's/3dgpu=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $gpu3d>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $gpu3d>/tmp/boot.img-cmdline
;;
esac
#end 3dgpu
#Add 2dgpu to the kernels cmdline.
cmdline=$(cat /tmp/boot.img-cmdline)
searchString="2dgpu="
gpu2d="2dgpu="$gpu2d
case $cmdline in
"$searchString"* | *" $searchString"*)
echo $(cat /tmp/boot.img-cmdline | sed -e 's/2dgpu=[^ ]\+//')>/tmp/boot.img-cmdline
echo $(cat /tmp/boot.img-cmdline)\ $gpu2d>/tmp/boot.img-cmdline
;;
*)
echo $(cat /tmp/boot.img-cmdline)\ $gpu2d>/tmp/boot.img-cmdline
;;
esac
mv /system/bin/thermald /system/bin/thermald_old
mv /system/bin/mpdecision /system/bin/mpdecision_old
#end 2dgpu
|
Team-Blackout/temp
|
dna/tools/cmdline.sh
|
Shell
|
gpl-2.0
| 5,219 |
#! /usr/bin/env bash
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Bash Script: Script to compile the simulation libraries from Xilinx Vivado
# for GHDL on Linux
#
# Description:
# ------------------------------------
# This is a Bash script (executable) which:
# - creates a subdirectory in the current working directory
# - compiles all Xilinx Vivado simulation libraries and packages
#
# ==============================================================================
# Copyright (C) 2015-2016 Patrick Lehmann - Dresden, Germany
#
# GHDL is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2, or (at your option) any later
# version.
#
# GHDL is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GHDL; see the file COPYING. If not, write to the Free
# Software Foundation, 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
# ==============================================================================
# ---------------------------------------------
# work around for Darwin (Mac OS)
READLINK=readlink; if [[ $(uname) == "Darwin" ]]; then READLINK=greadlink; fi
# save working directory
WorkingDir=$(pwd)
ScriptDir="$(dirname $0)"
ScriptDir="$($READLINK -f $ScriptDir)"
# source configuration file from GHDL's 'vendors' library directory
. $ScriptDir/../ansi_color.sh
. $ScriptDir/config.sh
. $ScriptDir/shared.sh
# command line argument processing
NO_COMMAND=1
SKIP_EXISTING_FILES=0
SKIP_LARGE_FILES=0
SUPPRESS_WARNINGS=0
HALT_ON_ERROR=0
VHDLStandard=93
GHDLBinDir=""
DestDir=""
SrcDir=""
while [[ $# > 0 ]]; do
key="$1"
case $key in
-c|--clean)
CLEAN=TRUE
NO_COMMAND=0
;;
-a|--all)
COMPILE_ALL=TRUE
NO_COMMAND=0
;;
--unisim)
COMPILE_UNISIM=TRUE
NO_COMMAND=0
;;
--unimacro)
COMPILE_UNIMACRO=TRUE
NO_COMMAND=0
;;
--unifast)
COMPILE_UNIFAST=TRUE
NO_COMMAND=0
;;
--secureip)
COMPILE_SECUREIP=TRUE
;;
-h|--help)
HELP=TRUE
NO_COMMAND=0
;;
-s|--skip-existing)
SKIP_EXISTING_FILES=1
;;
-S|--skip-largefiles)
SKIP_LARGE_FILES=1
;;
-n|--no-warnings)
SUPPRESS_WARNINGS=1
;;
-H|--halt-on-error)
HALT_ON_ERROR=1
;;
--vhdl93)
VHDLStandard=93
;;
--vhdl2008)
VHDLStandard=2008
;;
--ghdl)
GHDLBinDir="$2"
shift # skip argument
;;
--src)
SrcDir="$2"
shift # skip argument
;;
--out)
DestDir="$2"
shift # skip argument
;;
*) # unknown option
echo 1>&2 -e "${COLORED_ERROR} Unknown command line option '$key'.${ANSI_NOCOLOR}"
exit -1
;;
esac
shift # past argument or value
done
if [ $NO_COMMAND -eq 1 ]; then
HELP=TRUE
fi
if [ "$HELP" == "TRUE" ]; then
test $NO_COMMAND -eq 1 && echo 1>&2 -e "/n${COLORED_ERROR} No command selected."
echo ""
echo "Synopsis:"
echo " A script to compile the Xilinx Vivado simulation libraries for GHDL on Linux."
echo " One library folder 'lib/v??' per VHDL library will be created relative to the current"
echo " working directory."
echo ""
echo " Use the adv. options or edit 'config.sh' to supply paths and default params."
echo ""
echo "Usage:"
echo " compile-xilinx-vivado.sh <common command>|<library> [<options>] [<adv. options>]"
echo ""
echo "Common commands:"
echo " -h --help Print this help page"
echo " -c --clean Remove all generated files"
echo ""
echo "Libraries:"
echo " -a --all Compile all Xilinx simulation libraries."
echo " --unisim Compile the unisim library."
echo " --unimacro Compile the unimacro library."
echo " --unifast Compile the unifast library."
echo " --secureip Compile the secureip library."
echo ""
echo "Library compile options:"
echo " --vhdl93 Compile the libraries with VHDL-93."
echo " --vhdl2008 Compile the libraries with VHDL-2008."
echo " -s --skip-existing Skip already compiled files (an *.o file exists)."
echo " -S --skip-largefiles Don't compile large entities like DSP and PCIe primitives."
echo " -H --halt-on-error Halt on error(s)."
echo ""
echo "Advanced options:"
echo " --ghdl <GHDL bin dir> Path to GHDL's binary directory, e.g. /usr/local/bin"
echo " --out <dir name> Name of the output directory, e.g. xilinx-vivado"
echo " --src <Path to lib> Path to the sources, e.g. /opt/Xilinx/Vivado/2016.3/data/vhdl/src"
echo ""
echo "Verbosity:"
echo " -n --no-warnings Suppress all warnings. Show only error messages."
echo ""
exit 0
fi
if [ "$COMPILE_ALL" == "TRUE" ]; then
COMPILE_UNISIM=TRUE
COMPILE_UNIMACRO=TRUE
COMPILE_UNIFAST=TRUE
COMPILE_SECUREIP=TRUE
fi
if [ $VHDLStandard -eq 2008 ]; then
echo -e "${ANSI_RED}Not all Xilinx primitives are VHDL-2008 compatible! Setting HALT_ON_ERROR to FALSE.${ANSI_NOCOLOR}"
HALT_ON_ERROR=0
fi
DefaultDirectories=("/opt/Xilinx/Vivado" "/opt/xilinx/Vivado")
if [ ! -z $XILINX_VIVADO ]; then
EnvSourceDir=$XILINX_VIVADO/vhdl/src
else
for DefaultDir in ${DefaultDirectories[@]}; do
for Major in 2017 2016 2015 2014; do
for Minor in 4 3 2 1; do
Dir=$DefaultDir/${Major}.${Minor}
if [ -d $Dir ]; then
EnvSourceDir=$Dir/${SourceDirectories[XilinxVivado]}
break 3
fi
done
done
done
fi
# -> $SourceDirectories
# -> $DestinationDirectories
# -> $SrcDir
# -> $EnvSourceDir
# -> $DestDir
# -> $GHDLBinDir
# <= $SourceDirectory
# <= $DestinationDirectory
# <= $GHDLBinary
SetupDirectories XilinxVivado "Xilinx Vivado"
# create "xilinx-vivado" directory and change to it
# => $DestinationDirectory
CreateDestinationDirectory
cd $DestinationDirectory
# => $SUPPRESS_WARNINGS
# <= $GRC_COMMAND
SetupGRCat
# -> $VHDLStandard
# <= $VHDLVersion
# <= $VHDLStandard
# <= $VHDLFlavor
GHDLSetup
# define global GHDL Options
GHDL_OPTIONS=(-fexplicit -frelaxed-rules --no-vital-checks --warn-binding --mb-comments)
GHDL_PARAMS=(${GHDL_OPTIONS[@]})
GHDL_PARAMS+=(--ieee=$VHDLFlavor --std=$VHDLStandard -P$DestinationDirectory)
STOPCOMPILING=0
ERRORCOUNT=0
# Cleanup directory
# ==============================================================================
if [ "$CLEAN" == "TRUE" ]; then
echo 1>&2 -e "${COLORED_ERROR} '--clean' is not implemented!"
exit -1
echo -e "${ANSI_YELLOW}Cleaning up vendor directory ...${ANSI_NOCOLOR}"
rm *.o 2> /dev/null
rm *.cf 2> /dev/null
fi
# Library unisim
# ==============================================================================
# compile unisim packages
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNISIM" == "TRUE" ]; then
Library="unisim"
Files=(
${Library}s/unisim_VPKG.vhd
${Library}s/unisim_VCOMP.vhd
${Library}s/retarget_VCOMP.vhd
${Library}s/unisim_retarget_VCOMP.vhd
)
# append absolute source path
SourceFiles=()
for File in ${Files[@]}; do
SourceFiles+=("$SourceDirectory/$File")
done
GHDLCompilePackages
fi
# compile unisim primitives
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNISIM" == "TRUE" ]; then
Library="unisim"
SourceFiles=()
while IFS= read -r File; do
SourceFiles+=("$SourceDirectory/${Library}s/primitive/$File")
done < <(grep --no-filename -R '^[a-zA-Z]' "$SourceDirectory/${Library}s/primitive/vhdl_analyze_order")
GHDLCompileLibrary
fi
# compile unisim retarget primitives
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNISIM" == "TRUE" ]; then
Library="unisim"
SourceFiles="$(LC_COLLATE=C ls $SourceDirectory/${Library}s/retarget/*.vhd)"
GHDLCompileLibrary
fi
# compile unisim secureip primitives
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNISIM" == "TRUE" ] && [ "$COMPILE_SECUREIP" == "TRUE" ]; then
Library="secureip"
SourceFiles="$(LC_COLLATE=C ls $SourceDirectory/unisims/$Library/*.vhd)"
GHDLCompileLibrary
fi
# Library unimacro
# ==============================================================================
# compile unimacro packages
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNIMACRO" == "TRUE" ]; then
Library="unimacro"
Files=(
$Library/unimacro_VCOMP.vhd
)
# append absolute source path
SourceFiles=()
for File in ${Files[@]}; do
SourceFiles+=("$SourceDirectory/$File")
done
GHDLCompilePackages
fi
# compile unimacro macros
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNIMACRO" == "TRUE" ]; then
Library="unimacro"
SourceFiles=()
while IFS= read -r File; do
SourceFiles+=("$SourceDirectory/${Library}/$File")
done < <(grep --no-filename -R '^[a-zA-Z]' "$SourceDirectory/${Library}/vhdl_analyze_order")
GHDLCompileLibrary
fi
# Library UNIFAST
# ==============================================================================
# compile unisim primitives
if [ $STOPCOMPILING -eq 0 ] && [ "$COMPILE_UNIFAST" == "TRUE" ]; then
Library="unifast"
SourceFiles=()
while IFS= read -r File; do
SourceFiles+=("$SourceDirectory/${Library}/primitive/$File")
done < <(grep --no-filename -R '^[a-zA-Z]' "$SourceDirectory/${Library}/primitive/vhdl_analyze_order")
GHDLCompileLibrary
fi
echo "--------------------------------------------------------------------------------"
echo -n "Compiling Xilinx Vivado libraries "
if [ $ERRORCOUNT -gt 0 ]; then
echo -e $COLORED_FAILED
else
echo -e $COLORED_SUCCESSFUL
fi
|
emogenet/ghdl
|
libraries/vendors/compile-xilinx-vivado.sh
|
Shell
|
gpl-2.0
| 9,677 |
echo "***************************"
echo "XTrie Big Test with variable # of threads"
for threads in 1 10 20 30 40 50 60 70 80
do
echo "*********** $threads"
time perf stat -e cache-references -e cache-misses ./perf_test $threads 100000
done
|
trougnouf/School
|
ProgrammingAssignment3/Resources/concurrent_ds-master/skiptrie/xtree/perf_test/runners/run_small_cache.sh
|
Shell
|
gpl-3.0
| 248 |
#!/bin/bash
# SPDX-License-Identifier: GPL-3.0-or-later AND MIT
# Copyright (c) 2017-2021 Maxim Biro <[email protected]>
# Copyright (c) 2021 by The qTox Project Contributors
set -euo pipefail
usage()
{
echo "Download and build qrencode for the windows cross compiling environment"
echo "Usage: $0 --arch {win64|win32}"
}
ARCH=""
while (( $# > 0 )); do
case $1 in
--arch) ARCH=$2; shift 2 ;;
-h|--help) usage; exit 1 ;;
*) echo "Unexpected argument $1"; usage; exit 1;;
esac
done
if [[ "$ARCH" == "win64" ]]; then
HOST="x86_64-w64-mingw32"
elif [[ "$ARCH" == "win32" ]]; then
HOST="i686-w64-mingw32"
else
echo "Unexpected arch $ARCH"
usage
exit 1
fi
"$(dirname "$0")"/download/download_qrencode.sh
CFLAGS="-O2 -g0" ./configure --host="${HOST}" \
--prefix=/windows \
--enable-shared \
--disable-static \
--disable-sdltest \
--without-tools \
--without-debug
make -j $(nproc)
make install
|
qTox/qTox
|
buildscripts/build_qrencode_windows.sh
|
Shell
|
gpl-3.0
| 1,145 |
# Plowshare openload.io module
# Copyright (c) 2015 ljsdoug <[email protected]>
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
MODULE_OPENLOAD_IO_REGEXP_URL='https\?://openload\.io/'
MODULE_OPENLOAD_IO_DOWNLOAD_OPTIONS=""
MODULE_OPENLOAD_IO_DOWNLOAD_RESUME=yes
MODULE_OPENLOAD_IO_DOWNLOAD_FINAL_LINK_NEEDS_COOKIE=no
MODULE_OPENLOAD_IO_DOWNLOAD_SUCCESSIVE_INTERVAL=
MODULE_OPENLOAD_IO_PROBE_OPTIONS=""
# Output a openload.io file download URL
# $1: cookie file (unused here)
# $2: openload.io url
# stdout: real file download link
openload_io_download() {
local -r URL=$2
local PAGE WAIT FILE_URL FILE_NAME
PAGE=$(curl -L "$URL") || return
if match "<p class=\"lead\">We can't find the file you are looking for" "$PAGE"; then
return $ERR_LINK_DEAD
fi
WAIT=$(parse_tag 'id="secondsleft"' span <<< "$PAGE") || return
wait $(($WAIT)) seconds || return
FILE_URL=$(parse_attr 'id="realdownload"' href <<< "$PAGE")
FILE_NAME=$(parse_tag 'id="filename"' span <<< "$PAGE")
echo "$FILE_URL"
echo "$FILE_NAME"
return 0
}
# Probe a download URL
# $1: cookie file (unused here)
# $2: openload.io url
# $3: requested capability list
# stdout: 1 capability per line
openload_io_probe() {
local -r URL=$2
local -r REQ_IN=$3
local PAGE REQ_OUT FILE_SIZE
PAGE=$(curl -L "$URL") || return
if match "<p class=\"lead\">We can't find the file you are looking for" "$PAGE"; then
return $ERR_LINK_DEAD
fi
REQ_OUT=c
if [[ $REQ_IN = *f* ]]; then
parse_tag 'id="filename"' span <<< "$PAGE" && REQ_OUT="${REQ_OUT}f"
fi
if [[ $REQ_IN = *s* ]]; then
FILE_SIZE=$(parse_tag 'class="count"' span <<< "$PAGE") && \
translate_size "$FILE_SIZE" && REQ_OUT="${REQ_OUT}s"
fi
echo $REQ_OUT
return 0
}
|
dataoscar/plowshare-modules-legacy
|
openload_io.sh
|
Shell
|
gpl-3.0
| 2,437 |
#!/bin/bash
# T&M Hansson IT AB © - 2021, https://www.hanssonit.se/
true
SCRIPT_NAME="Not-supported Menu"
# shellcheck source=lib.sh
source /var/scripts/fetch_lib.sh || source <(curl -sL https://raw.githubusercontent.com/nextcloud/vm/master/lib.sh)
# Check for errors + debug code and abort if something isn't right
# 1 = ON
# 0 = OFF
DEBUG=0
debug_mode
# Must be root
root_check
print_text_in_color "$ICyan" "Running the Not-supported Menu script..."
if network_ok
then
# Delete, download, run
run_script NOT_SUPPORTED_FOLDER not-supported_menu
fi
exit
|
nextcloud/vm
|
not-supported/not-supported.sh
|
Shell
|
gpl-3.0
| 570 |
timeout_set 3 minutes
master_cfg="MAGIC_DISABLE_METADATA_DUMPS = 1"
master_cfg+="|AUTO_RECOVERY = 1"
master_cfg+="|EMPTY_TRASH_PERIOD = 1"
master_cfg+="|EMPTY_RESERVED_INODES_PERIOD = 1"
CHUNKSERVERS=3 \
MOUNTS=2 \
USE_RAMDISK="YES" \
MOUNT_0_EXTRA_CONFIG="mfsacl,mfscachemode=NEVER,mfsreportreservedperiod=1" \
MOUNT_1_EXTRA_CONFIG="mfsmeta" \
MFSEXPORTS_EXTRA_OPTIONS="allcanchangequota,ignoregid" \
MFSEXPORTS_META_EXTRA_OPTIONS="nonrootmeta" \
MASTER_EXTRA_CONFIG="$master_cfg" \
DEBUG_LOG_FAIL_ON="master.fs.checksum.mismatch" \
setup_local_empty_lizardfs info
# Save path of meta-mount in MFS_META_MOUNT_PATH for metadata generators
export MFS_META_MOUNT_PATH=${info[mount1]}
# Save path of changelog.mfs in CHANGELOG to make it possible to verify generated changes
export CHANGELOG="${info[master_data_path]}"/changelog.mfs
lizardfs_metalogger_daemon start
# Generate some metadata and remember it
cd "${info[mount0]}"
metadata_generate_all
metadata=$(metadata_print)
# Check if the metadata checksum is fine.
# Possible checksum mismatch will be reported at the end of the test.
assert_success lizardfs_admin_master magic-recalculate-metadata-checksum
# simulate master server failure and recovery
sleep 3
cd
lizardfs_master_daemon kill
# leave only files written by metalogger
rm ${info[master_data_path]}/{changelog,metadata,sessions}.*
mfsmetarestore -a -d "${info[master_data_path]}"
lizardfs_master_daemon start
# check restored filesystem
cd "${info[mount0]}"
assert_no_diff "$metadata" "$(metadata_print)"
lizardfs_wait_for_all_ready_chunkservers
metadata_validate_files
|
jedisct1/lizardfs
|
tests/test_suites/ShortSystemTests/test_metadata_recovery.sh
|
Shell
|
gpl-3.0
| 1,604 |
#!/usr/bin/env bash
# create local user accounts
/rstudio/vagrant/provision-create-users.sh
# install packages needed for development environment
if [ ! -f /etc/redhat-release ]; then
apt-get install -y vim
apt-get install -y silversearcher-ag
apt-get install -y python-dev
# install NPM and utilities for JavaScript development
apt-get install -y npm
update-alternatives --install /usr/bin/node node /usr/bin/nodejs 10
npm install -g tern
npm install -g jshint
npm install -g grunt-cli
fi
# perform remainder of the install script as regular user
sudo --login --set-home -u vagrant /rstudio/vagrant/provision-primary-user.sh
|
jar1karp/rstudio
|
vagrant/provision-primary.sh
|
Shell
|
agpl-3.0
| 668 |
g++ rpc_client.cpp -o rpc_client -I../../ ../../lib/openbsd-4.6/libulxmlrpcpp.a /usr/lib/libexpat.a -lssl -lcrypto -pthread
|
kindkaktus/ulxmlrpcpp
|
ulxmlrpcpp/test_ipv6/build_client.sh
|
Shell
|
lgpl-2.1
| 124 |
#!/usr/bin/env bash
#
# bootstrap.sh: Build and configuration script for nlp-webtools in Vagrant
# --------------------------------------------------------------------------------------
# This script is only the *first time* you issue the command:
#
# vagrant up
#
# Or, following the commands:
#
# (vagrant halt)
# vagrant destroy
# vagrant up
#
#===============================================================================
# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=81
#===============================================================================
#
# Script Version
__ScriptVersion="0.1"
# Base directory for build log
LOG_BASE=/var/log
WWW_ROOT=/var/www
#--- FUNCTION ----------------------------------------------------------------
# NAME: __function_defined
# DESCRIPTION: Checks if a function is defined within this scripts scope
# PARAMETERS: function name
# RETURNS: 0 or 1 as in defined or not defined
#-------------------------------------------------------------------------------
__function_defined() {
FUNC_NAME=$1
if [ "$(command -v $FUNC_NAME)x" != "x" ]; then
echoinfo "Found function $FUNC_NAME"
return 0
fi
echodebug "$FUNC_NAME not found...."
return 1
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __strip_duplicates
# DESCRIPTION: Strip duplicate strings
#-------------------------------------------------------------------------------
__strip_duplicates() {
echo "$@" | tr -s '[:space:]' '\n' | awk '!x[$0]++'
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: echoerr
# DESCRIPTION: Echo errors to stderr.
#-------------------------------------------------------------------------------
echoerror() {
printf "%s * ERROR%s: %s\n" "${RC}" "${EC}" "$@" 1>&2;
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: echoinfo
# DESCRIPTION: Echo information to stdout.
#-------------------------------------------------------------------------------
echoinfo() {
printf "%s * STATUS%s: %s\n" "${GC}" "${EC}" "$@";
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: echowarn
# DESCRIPTION: Echo warning informations to stdout.
#-------------------------------------------------------------------------------
echowarn() {
printf "%s * WARN%s: %s\n" "${YC}" "${EC}" "$@";
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: echodebug
# DESCRIPTION: Echo debug information to stdout.
#-------------------------------------------------------------------------------
echodebug() {
if [ $_ECHO_DEBUG -eq $BS_TRUE ]; then
printf "${BC} * DEBUG${EC}: %s\n" "$@";
fi
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __apt_get_install_noinput
# DESCRIPTION: (DRY) apt-get install with noinput options
#-------------------------------------------------------------------------------
__apt_get_install_noinput() {
apt-get install -y -o DPkg::Options::=--force-confold "$@"; return $?
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __apt_get_upgrade_noinput
# DESCRIPTION: (DRY) apt-get upgrade with noinput options
#-------------------------------------------------------------------------------
__apt_get_upgrade_noinput() {
apt-get upgrade -y -o DPkg::Options::=--force-confold; return $?
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __pip_install_noinput
# DESCRIPTION: (DRY)
#-------------------------------------------------------------------------------
__pip_install_noinput() {
#pip install --upgrade "$@"; return $?
# Uncomment for Python 3
pip3 install --upgrade $@; return $?
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __pip_install_noinput
# DESCRIPTION: (DRY)
#-------------------------------------------------------------------------------
__pip_pre_install_noinput() {
#pip install --pre --upgrade "$@"; return $?
# Uncomment for Python 3
pip3 install --pre --upgrade $@; return $?
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: __check_apt_lock
# DESCRIPTION: (DRY)
#-------------------------------------------------------------------------------
__check_apt_lock() {
lsof /var/lib/dpkg/lock > /dev/null 2>&1
RES=`echo $?`
return $RES
}
__enable_universe_repository() {
if [ "x$(grep -R universe /etc/apt/sources.list /etc/apt/sources.list.d/ | grep -v '#')" != "x" ]; then
# The universe repository is already enabled
return 0
fi
echodebug "Enabling the universe repository"
# Ubuntu versions higher than 12.04 do not live in the old repositories
if [ $DISTRO_MAJOR_VERSION -gt 12 ] || ([ $DISTRO_MAJOR_VERSION -eq 12 ] && [ $DISTRO_MINOR_VERSION -gt 04 ]); then
add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1
elif [ $DISTRO_MAJOR_VERSION -lt 11 ] && [ $DISTRO_MINOR_VERSION -lt 10 ]; then
# Below Ubuntu 11.10, the -y flag to add-apt-repository is not supported
add-apt-repository "deb http://old-releases.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1
fi
add-apt-repository -y "deb http://old-releases.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1
return 0
}
__check_unparsed_options() {
shellopts="$1"
# grep alternative for SunOS
if [ -f /usr/xpg4/bin/grep ]; then
grep='/usr/xpg4/bin/grep'
else
grep='grep'
fi
unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' )
if [ "x$unparsed_options" != "x" ]; then
usage
echo
echoerror "options are only allowed before install arguments"
echo
exit 1
fi
}
configure_cpan() {
(echo y;echo o conf prerequisites_policy follow;echo o conf commit)|cpan > /dev/null
}
usage() {
echo "usage"
exit 1
}
install_ubuntu_17.04_deps() {
echoinfo "Updating your APT Repositories ... "
apt-get update >> $LOG_BASE/nlp-install.log 2>&1 || return 1
echoinfo "Installing Python Software Properies ... "
__apt_get_install_noinput software-properties-common >> $LOG_BASE/nlp-install.log 2>&1 || return 1
echoinfo "Enabling Universal Repository ... "
__enable_universe_repository >> $LOG_BASE/nlp-install.log 2>&1 || return 1
echoinfo "Updating Repository Package List ..."
apt-get update >> $LOG_BASE/nlp-install.log 2>&1 || return 1
echoinfo "Upgrading all packages to latest version ..."
__apt_get_upgrade_noinput >> $LOG_BASE/nlp-install.log 2>&1 || return 1
return 0
}
install_ubuntu_17.04_packages() {
packages="antiword
automake
dkms
ffmpeg
flac
g++-5
gcc-5
lame
libffi-dev
libjpeg-dev
libmad0
libpulse-dev
libsox-fmt-mp3
libtool
libxml2-dev
libxslt1-dev
poppler-utils
pstotext
python
python-dev
python-pip
python3-dev
python3-pip
sox
swig
swig3.0
tesseract-ocr
unrtf
virtualbox-guest-utils
virtualenv
virtualenvwrapper
zlib1g-dev"
if [ "$@" = "dev" ]; then
packages="$packages"
elif [ "$@" = "stable" ]; then
packages="$packages"
fi
for PACKAGE in $packages; do
__apt_get_install_noinput $PACKAGE >> $LOG_BASE/nlp-install.log 2>&1
ERROR=$?
if [ $ERROR -ne 0 ]; then
echoerror "Install Failure: $PACKAGE (Error Code: $ERROR)"
else
echoinfo "Installed Package: $PACKAGE"
fi
done
return 0
}
install_ubuntu_17.04_pip_packages() {
pip_packages="textract
gensim
pyLDAvis
configobj"
pip_special_packages="textacy"
if [ "$@" = "dev" ]; then
pip_packages="$pip_packages"
elif [ "$@" = "stable" ]; then
pip_packages="$pip_packages"
fi
ERROR=0
for PACKAGE in $pip_packages; do
CURRENT_ERROR=0
echoinfo "Installed Python Package: $PACKAGE"
__pip_install_noinput $PACKAGE >> $LOG_BASE/nlp-install.log 2>&1 || (let ERROR=ERROR+1 && let CURRENT_ERROR=1)
if [ $CURRENT_ERROR -eq 1 ]; then
echoerror "Python Package Install Failure: $PACKAGE"
fi
done
# Prep environment for special packages, install cld2-cffi
env CC=/usr/bin/gcc-5 pip3 install -U cld2-cffi
for PACKAGE in $pip_special_packages; do
CURRENT_ERROR=0
echoinfo "Installed Python (special setup) Package: $PACKAGE"
__pip_pre_install_noinput $PACKAGE >> $LOG_BASE/nlp-install.log 2>&1 || (let ERROR=ERROR+1 && let CURRENT_ERROR=1)
if [ $CURRENT_ERROR -eq 1 ]; then
echoerror "Python Package Install Failure: $PACKAGE"
fi
done
if [ $ERROR -ne 0 ]; then
echoerror
return 1
fi
return 0
}
install_source_packages() {
#echoinfo "nlp-webtools: Nothing to be installed currently. Continuing..."
# Install libuna from specific release
echoinfo "nlp-webtools: Building and installing libuna"
CDIR=$(pwd)
# Newer versions break a lot of stuff. Keep 20150927 for now.
cd /tmp
wget -q https://github.com/libyal/libuna/releases/download/20170112/libuna-alpha-20170112.tar.gz
tar zxf libuna-alpha-20170112.tar.gz >> $HOME/nlp-install.log 2>&1
cd libuna-20170112
./configure >> $HOME/nlp-install.log 2>&1
make -s >> $HOME/nlp-install.log 2>&1
make install >> $HOME/nlp-install.log 2>&1
ldconfig >> $HOME/nlp-install.log 2>&1
# Now clean up
cd /tmp
rm -rf libuna-20170112
rm libuna-alpha-20170112.tar.gz
# Install libewf from current sources
echoinfo "nlp-webtools: Building and installing libewf"
CDIR=$(pwd)
# Newer versions break a lot of stuff. Keep 20140608 for now.
cd /tmp
cp /vagrant/externals/libewf-20140608.tar.gz .
tar zxf libewf-20140608.tar.gz >> $HOME/nlp-install.log 2>&1
cd libewf-20140608
./configure --enable-python --enable-v1-api >> $HOME/nlp-install.log 2>&1
make -s >> $HOME/nlp-install.log 2>&1
make install >> $HOME/nlp-install.log 2>&1
ldconfig >> $HOME/nlp-install.log 2>&1
# Now clean up
cd /tmp
rm -rf libewf-20140608
rm libewf-20140608.tar.gz
echoinfo "nlp-webtools: Adding DFXML tools and libraries"
CDIR=$(pwd)
git clone https://github.com/simsong/dfxml /usr/share/dfxml >> $HOME/nlp-install.log 2>&1
# No cleanup needed
cd /tmp
# Install The Sleuth Kit (TSK) from current sources
echoinfo "nlp-webtools: Building and installing The Sleuth Kit"
CDIR=$(pwd)
git clone --recursive https://github.com/sleuthkit/sleuthkit /usr/share/sleuthkit >> $HOME/nlp-install.log 2>&1
cd /usr/share/sleuthkit
git fetch
git checkout master >> $HOME/nlp-install.log 2>&1
./bootstrap >> $HOME/nlp-install.log 2>&1
./configure >> $HOME/nlp-install.log 2>&1
make -s >> $HOME/nlp-install.log 2>&1
make install >> $HOME/nlp-install.log 2>&1
ldconfig >> $HOME/nlp-install.log 2>&1
# Install PyTSK
echoinfo "nlp-webtools: Building and installing PyTSK (Python bindings for TSK)"
echoinfo " -- Please be patient. This may take several minutes..."
CDIR=$(pwd)
cd /tmp
git clone https://github.com/py4n6/pytsk
cd pytsk
python setup.py update >> $HOME/nlp-install.log 2>&1
python setup.py build >> $HOME/nlp-install.log 2>&1
python setup.py install >> $HOME/nlp-install.log 2>&1
# Now clean up
cd /tmp
#rm -rf pytsk3-20170508
rm -rf pytsk
}
complete_message() {
echo
echo "Installation Complete!"
echo
}
OS=$(lsb_release -si)
ARCH=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
VER=$(lsb_release -sr)
if [ $OS != "Ubuntu" ]; then
echo "nlp-webtools is only installable on Ubuntu operating systems at this time."
exit 1
fi
if [ $VER != "17.04" ]; then
echo "nlp-webtools is only installable on Ubuntu 17.04 at this time."
exit 3
fi
if [ "`whoami`" != "root" ]; then
echoerror "The nlp-webtools bootstrap script must run as root."
echoinfo "Preferred Usage: sudo bootstrap.sh (options)"
echo ""
exit 3
fi
if [ "$SUDO_USER" = "" ]; then
echo "The SUDO_USER variable doesn't seem to be set"
exit 4
fi
# while getopts ":hvcsiyu" opt
while getopts ":hv" opt
do
case "${opt}" in
h ) usage; exit 0 ;;
v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;;
\?) echo
echoerror "Option does not exist: $OPTARG"
usage
exit 1
;;
esac
done
shift $(($OPTIND-1))
if [ "$#" -eq 0 ]; then
ITYPE="stable"
else
__check_unparsed_options "$*"
ITYPE=$1
shift
fi
# Check installation type
if [ "$(echo $ITYPE | egrep '(dev|stable)')x" = "x" ]; then
echoerror "Installation type \"$ITYPE\" is not known..."
exit 1
fi
echoinfo "****************************************************************"
echoinfo "The nlp-webtools provisioning script will now configure your system."
echoinfo "****************************************************************"
echoinfo ""
#if [ "$YESTOALL" -eq 1 ]; then
# echoinfo "You supplied the -y option, this script will not exit for any reason"
#fi
echoinfo "OS: $OS"
echoinfo "Arch: $ARCH"
echoinfo "Version: $VER"
echoinfo "The current user is: $SUDO_USER"
export DEBIAN_FRONTEND=noninteractive
install_ubuntu_${VER}_deps $ITYPE
install_ubuntu_${VER}_packages $ITYPE
install_ubuntu_${VER}_pip_packages $ITYPE
install_source_packages
complete_message
|
BitCurator/bitcurator-nlp-tools
|
attic/provision/bootstrap.sh
|
Shell
|
lgpl-3.0
| 13,880 |
#!/usr/bin/env bash
/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD $@
|
kak-bo-che/zen_toolworks_quelab
|
openscad.sh
|
Shell
|
unlicense
| 74 |
#!/bin/bash
#
# Launcher for the fuzz runner engine.
# See https://github.com/bazelbuild/rules_fuzzing for more info.
if (( ! FUZZER_IS_REGRESSION )); then
echo "NOTE: Non-regression mode is not supported by this engine."
fi
command_line=("${FUZZER_BINARY}")
if [[ -n "${FUZZER_SEED_CORPUS_DIR}" ]]; then
command_line+=("${FUZZER_SEED_CORPUS_DIR}")
fi
exec "${command_line[@]}"
|
envoyproxy/envoy
|
test/fuzz/fuzz_runner_launcher.sh
|
Shell
|
apache-2.0
| 389 |
#!/bin/bash -e
set -o pipefail
# Copyright 2016 The Kythe Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script verifies and formats a single Kythe example, which is expected
# to be piped in on standard input from example.sh.
#
# The script assumes its working directory is the schema output directory and
# requires the following environment variables:
# TMP
# LANGUAGE
# LABEL
# CXX_INDEXER_BIN
# VERIFIER_BIN
# SHASUM_TOOL
# SHOWGRAPH
# VERIFIER_ARGS
SRCS="$TMP/example"
mkdir "$SRCS"
ARGS_FILE="$TMP/args"
touch "$ARGS_FILE"
# This filter assumes that its stdin is a full C++ source file which will be
# placed into $TEST_MAIN for compilation/verification. Optionally, after the
# main source text, more files can be specified with header lines formatted like
# "#example filename". The lines proceeding these header lines will be placed
# next to test.cc in "$SRCS/filename".
export TEST_MAIN="$SRCS/test.cc"
# The raw filter input will be placed into this file for later syntax highlighting
RAW_EXAMPLE="$TMP/raw.hcc"
# Test entries will be dropped here.
TEST_ENTRIES="$TMP/test.entries"
# Example filter input:
# #include "test.h"
# //- @C completes Decl1
# //- @C completes Decl2
# //- @C defines Defn
# class C { };
#
# #example test.h
# //- @C defines Decl1
# class C;
# //- @C defines Decl2
# class C;
#
# The above input will generate/verify two files: test.cc and test.h
# Split collected_files.hcc into files via "#example file.name" delimiter lines.
{ echo "#example test.cc";
tee "$RAW_EXAMPLE";
} | awk -v argsfile="$ARGS_FILE" -v root="$SRCS/" '
/#example .*/ {
x=root $2;
next;
}
/#arguments / {
$1 = "";
print > argsfile;
next;
}
{print > x;}'
CXX_ARGS="-std=c++1y $(cat "$ARGS_FILE")"
for TEST_CC in "${SRCS}"/*.cc
do
# shellcheck disable=SC2086
"$CXX_INDEXER_BIN" --ignore_unimplemented=false \
--experimental_record_dataflow_edges -i "${TEST_CC}" -- $CXX_ARGS \
>> "${TEST_ENTRIES}"
done
"$VERIFIER_BIN" "${VERIFIER_ARGS}" --ignore_dups "${SRCS}"/* < "${TEST_ENTRIES}"
trap 'error FORMAT' ERR
EXAMPLE_ID=$($SHASUM_TOOL "$RAW_EXAMPLE" | cut -c 1-64)
if [[ -n "${DIV_STYLE}" ]]; then
echo "<div style=\"${DIV_STYLE}\">"
else
echo "<div>"
fi
echo "<h5 id=\"_${LABEL}\">${LABEL}"
if [[ "${SHOWGRAPH}" == 1 ]]; then
"$VERIFIER_BIN" "${VERIFIER_ARGS}" --ignore_dups --graphviz < "${TEST_ENTRIES}" > "$TMP/${EXAMPLE_ID}.dot"
dot -Tsvg -o "$EXAMPLE_ID.svg" "$TMP/${EXAMPLE_ID}.dot"
echo "(<a href=\"${EXAMPLE_ID}.svg\" target=\"_blank\">${LANGUAGE}</a>)</h5>"
else
echo " (${LANGUAGE})</h5>"
fi
source-highlight --failsafe --output=STDOUT --src-lang cpp -i "$RAW_EXAMPLE"
echo "</div>"
|
kythe/kythe
|
kythe/docs/schema/example-cxx.sh
|
Shell
|
apache-2.0
| 3,230 |
#!/bin/bash
source "${SH_LIBRARY_PATH}/common.sh"
source "${SH_LIBRARY_PATH}/vault.sh"
source "${SH_LIBRARY_PATH}/githubapi.sh"
check_env REPOSITORY
DIR=$(dirname ${BASH_SOURCE[0]})
NAME=$(basename -s ".sh" ${BASH_SOURCE[0]})
githubapi_setup_environment
#
# To avoid the password being present in the Jenkins job console page,
# we pass the SMTP password to the ruby script via the processes stdin.
#
echo $(vault_read_smtp_password) | log_must ruby "${DIR}/${NAME}.rb" \
--netrc-file netrc-file \
--repository "$REPOSITORY" \
--smtp-user "$(vault_read_smtp_user)" \
--smtp-password "-"
# vim: tabstop=4 softtabstop=4 shiftwidth=4 expandtab textwidth=72 colorcolumn=80
|
openzfs/openzfs-ci
|
jenkins/sh/send-illumos-mails/send-illumos-mails.sh
|
Shell
|
apache-2.0
| 691 |
#!/bin/sh -eux
# # Delete all Linux headers
# dpkg --list \
# | awk '{ print $2 }' \
# | grep 'linux-headers' \
# | xargs apt-get -y purge;
# Remove specific Linux kernels, such as linux-image-3.11.0-15-generic but
# keeps the current kernel and does not touch the virtual packages,
# e.g. 'linux-image-generic', etc.
dpkg --list \
| awk '{ print $2 }' \
| grep 'linux-image-3.*-generic' \
| grep -v `uname -r` \
| xargs apt-get -y purge;
# # Delete Linux source
# dpkg --list \
# | awk '{ print $2 }' \
# | grep linux-source \
# | xargs apt-get -y purge;
# # Delete development packages
# dpkg --list \
# | awk '{ print $2 }' \
# | grep -- '-dev$' \
# | xargs apt-get -y purge;
# # Delete compilers and other development tools
# apt-get -y purge cpp gcc g++;
# # Delete X11 libraries
# apt-get -y purge libx11-data xauth libxmuu1 libxcb1 libx11-6 libxext6;
# # Delete obsolete networking
# apt-get -y purge ppp pppconfig pppoeconf;
# Delete oddities
apt-get -y purge popularity-contest;
apt-get -y autoremove;
apt-get -y clean;
|
darrenleeweber/vagrant_boxes
|
ubuntu1404-desktop/scripts/cleanup.sh
|
Shell
|
apache-2.0
| 1,084 |
# Publish data as an object without timestamp (server-side timestamp will be used)
mqtt pub -v -q 1 -h "mqtt.thingsboard.cloud" -t "v1/devices/me/telemetry" -u '$ACCESS_TOKEN' -s -m "{"temperature":42}"
# Publish data as an object without timestamp (server-side timestamp will be used)
cat telemetry-data-as-object.json | mqtt pub -v -h "mqtt.thingsboard.cloud" -t "v1/devices/me/telemetry" -u '$ACCESS_TOKEN' -s
# Publish data as an array of objects without timestamp (server-side timestamp will be used)
cat telemetry-data-as-array.json | mqtt pub -v -h "mqtt.thingsboard.cloud" -t "v1/devices/me/telemetry" -u '$ACCESS_TOKEN' -s
# Publish data as an object with timestamp (telemetry timestamp will be used)
cat telemetry-data-with-ts.json | mqtt pub -v -h "mqtt.thingsboard.cloud" -t "v1/devices/me/telemetry" -u '$ACCESS_TOKEN' -s
|
thingsboard/thingsboard.github.io
|
docs/edge/reference/resources/mqtt-js-telemetry.sh
|
Shell
|
apache-2.0
| 834 |
#!/usr/bin/env bash
# set -e # exit on any errors (jspm install errors on the belmgr-plugin install)
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"/../
EDITOR_DIR="$DIR"/webeditor
PATH="$EDITOR_DIR"/node_modules/.bin:$PATH
cd "$EDITOR_DIR"
if [ ! -L gulp ]; then
echo "Linking gulp command into \"$EDITOR_DIR\" ..."
ln -s ./node_modules/.bin/gulp .
fi
if [ ! -L jspm ]; then
echo "Linking jspm command into \"$EDITOR_DIR\" ..."
ln -s ./node_modules/.bin/jspm .
fi
echo "Running 'npm install' for BELMgr application ... "
npm install
echo "Running 'jspm install -y' for BELMgr application ... "
jspm install -y
jspm install belmgr-plugin -o {jspmNodeConversion: false}
echo "Running 'gulp build' for BELMgr application ... "
gulp build
echo "Enter webeditor directory and run 'gulp serve' to test application"
|
nbargnesi/belmgr
|
scripts/build.sh
|
Shell
|
apache-2.0
| 840 |
#!/bin/bash
if [ $# -lt 2 ]; then
echo "Illegal number of parameters. Usage: `basename $0` rdf-host rdf-port"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TEMPLATE=$DIR/repositories-config/owlim-owl-horst-optimized.ttl
echo "Creating iServe repository in $1:$2"
curl -X POST -H "Content-Type:application/x-turtle" -T $TEMPLATE http://$1:$2/openrdf-sesame/repositories/SYSTEM/rdf-graphs/service?graph=http://iserve.kmi.open.ac.uk/data#g1
RET=$?
while [[ RET -ne 0 ]]; do
echo "=> Waiting for confirmation of Sesame Repository creation..."
sleep 5
curl -X POST -H "Content-Type:application/x-turtle" -T $TEMPLATE http://$1:$2/openrdf-sesame/repositories/SYSTEM/rdf-graphs/service?graph=http://iserve.kmi.open.ac.uk/data#g1
RET=$?
done
curl -X POST -H "Content-Type:application/x-turtle" -d "<http://iserve.kmi.open.ac.uk/data#g1> a <http://www.openrdf.org/config/repository#RepositoryContext>." http://$1:$2/openrdf-sesame/repositories/SYSTEM/statements
RET=$?
while [[ RET -ne 0 ]]; do
echo "=> Waiting for confirmation of Context creation..."
sleep 5
curl -X POST -H "Content-Type:application/x-turtle" -d "<http://iserve.kmi.open.ac.uk/data#g1> a <http://www.openrdf.org/config/repository#RepositoryContext>." http://$1:$2/openrdf-sesame/repositories/SYSTEM/statements
RET=$?
done
echo "Configuring free text indexing..."
curl -X POST -d "update=INSERT+DATA+%7B%0A++%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23include%3E+%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23setParam%3E+%22literal+uri%22+.%0A++%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23index%3E+%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23setParam%3E+%22literals%2C+uri%22+.%0A++%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23moleculeSize%3E+%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23setParam%3E+%221%22+.%0A++%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23entityIndex%3E+%3Chttp%3A%2F%2Fwww.ontotext.com%2Fowlim%2Flucene%23createIndex%3E+%22true%22+.%0A%7D%0A" http://$1:$2/openrdf-sesame/repositories/iserve/statements
echo "iServe repository created"
|
kmi/iserve
|
scripts/setup-owlim.sh
|
Shell
|
apache-2.0
| 2,137 |
#!/bin/sh
if [ -f /deployment/init.sh ];
then
echo "Running custom init script"
chmod +x /deployment/init.sh
/deployment/init.sh
fi
if [ -d /deployment ];
then
echo "Mapping deployed wars"
rm -rf /var/lib/tomcat7/webapps
ln -s /deployment /var/lib/tomcat7/webapps
fi
if [ -n "${Xmx}" ];
then
sed -i s/Xmx.*\ /Xmx${Xmx}\ /g /etc/default/tomcat7
fi
if [ -n "${JAVA_OPTS}" ];
then
# Add any Java opts that are set in the container
echo "Adding JAVA OPTS"
echo "JAVA_OPTS=\"\${JAVA_OPTS} ${JAVA_OPTS} \"" >> /etc/default/tomcat7
fi
if [ -n "${JAVA_HOME}" ];
then
# Add java home if set in container
echo "Adding JAVA_HOME"
echo "JAVA_HOME=\"${JAVA_HOME}\"" >> /etc/default/tomcat7
fi
chown tomcat7:tomcat7 /deployment
service tomcat7 restart
#Override the exit command to prevent accidental container distruction
echo 'alias exit="echo Are you sure? this will kill the container. use Ctrl + p, Ctrl + q to detach or ctrl + d to exit"' > ~/.bashrc
#Run bash to keep container running and provide interactive mode
bash
|
Maluuba/docker-files
|
docker-tomcat7-java8/start-tomcat.sh
|
Shell
|
apache-2.0
| 1,106 |
#!/bin/bash
################################################################################
# Sample application to run from your dialplan.
################################################################################
# Location for your log4php.properties
export log4php_properties=/tmp/log4php.properties
export beans_xml_dir=/tmp
# Make sure this is in the include path.
export PAGIBootstrap=example.php
# Your copy of PAGI, where src/ is.
pagi=/export/users/marcelog/src/sts/PAGI
ding=/export/users/marcelog/src/sts/Ding
# Your copy of log4php (optional)
log4php=/export/users/marcelog
# PHP to run and options
php=/usr/php-5.3/bin/php
phpoptions="-d include_path=${log4php}:${pagi}/src/mg:${ding}/src/mg:${ding}/docs/examples/PAGI"
# Standard.. the idea is to have a common launcher.
launcher=${ding}/src/mg/Ding/Helpers/Pagi/PagiHelper.php
# Go!
${php} ${phpoptions} ${launcher}
|
iaejean/Ding
|
docs/examples/PAGI/run.sh
|
Shell
|
apache-2.0
| 898 |
#! /usr/bin/env bash
usage() {
echo "usage: gsutil_upload.sh s3://bucket/boto.config gs://gsbucket/path/ true|false"
echo
echo "The contents of INPUT1_STAGING_DIR (${INPUT1_STAGING_DIR}) will be uploaded to the GS path."
exit 3
}
if ([ -z "$1" ] || [ -z "$2" ]); then
echo "ERROR: missing arguments"
usage
fi
if [ -z "${INPUT1_STAGING_DIR}" ]; then
echo "ERROR: INPUT1_STAGING_DIR must be specified"
usage
fi
declare -a on_exit_items
function on_exit() {
for i in "${on_exit_items[@]}"; do
eval ${i}
done
}
function add_on_exit() {
local n=${#on_exit_items[*]}
on_exit_items[$n]="$*"
if [[ ${n} -eq 0 ]]; then
trap on_exit EXIT
fi
}
set -xe
WORKING_DIR=$(mktemp -d)
cd ${WORKING_DIR}
add_on_exit rm -rf ${WORKING_DIR}
# This is the name of the tarball
TARBALL="gsutil.tar.gz"
# This is the download location for the latest gsutil release
GSUTIL_URL="https://storage.googleapis.com/pub/${TARBALL}"
# This is the configuration file for gsutil
export BOTO_CONFIG="boto.gsutil"
# This is the source of the boto config file
BOTO_CONFIG_SOURCE="$1"
# This is the destination Google Storage location
OUTPUT_GOOGLE_STORAGE="$2"
# This is the flag to determine whether to use recursive option or not.
RECURSIVE="${3:-false}"
# Download and extract the tarball.
# We use --no-check-certificate because Google are naughty with their certificates.
wget --no-verbose --no-check-certificate ${GSUTIL_URL}
tar -xzf ${TARBALL}
# Download the boto configuration
aws s3 cp ${BOTO_CONFIG_SOURCE} ${BOTO_CONFIG}
NUM_INPUT_FILES=$(ls ${INPUT1_STAGING_DIR}/* | wc -l | awk '{print $1}')
if [ "${NUM_INPUT_FILES}" -eq "0" ]; then
echo "ERROR: no input files provided - not uploading"
exit 3
fi
if [[ ${RECURSIVE} == "true" ]]; then
# To perform a parallel (multi-threaded/multi-processing) copy use -m option.
# To copy an entire directory tree use the -r option.
./gsutil/gsutil -m cp -r ${INPUT1_STAGING_DIR}/* ${OUTPUT_GOOGLE_STORAGE}
else
./gsutil/gsutil cp ${INPUT1_STAGING_DIR}/* ${OUTPUT_GOOGLE_STORAGE}
fi
|
realstraw/hyperion
|
scripts/activities/gsutil-upload.sh
|
Shell
|
bsd-3-clause
| 2,066 |
#!/bin/sh
# weak_as_needed.sh -- a test case for version handling with weak symbols
# and --as-needed libraries.
# Copyright (C) 2018-2020 Free Software Foundation, Inc.
# Written by Cary Coutant <[email protected]>.
# This file is part of gold.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
# MA 02110-1301, USA.
# This test verifies that a weak reference is properly bound to
# an as-needed library, when it is first resolved to a symbol in
# a library that ends up being not needed.
# Ref: https://stackoverflow.com/questions/50751421/undefined-behavior-in-shared-lib-using-libpthread-but-not-having-it-in-elf-as-d
check()
{
if ! grep -q "$2" "$1"
then
echo "Did not find expected output in $1:"
echo " $2"
echo ""
echo "Actual output below:"
cat "$1"
exit 1
fi
}
check_missing()
{
if grep -q "$2" "$1"
then
echo "Found unexpected output in $1:"
echo " $2"
echo ""
echo "Actual output below:"
cat "$1"
exit 1
fi
}
check weak_as_needed.stdout "WEAK .* UND *bar@v2"
check weak_as_needed.stdout "NEEDED.*weak_as_needed_c\.so"
check_missing weak_as_needed.stdout "NEEDED.*weak_as_needed_b\.so"
exit 0
|
mattstock/binutils-bexkat1
|
gold/testsuite/weak_as_needed.sh
|
Shell
|
gpl-2.0
| 1,790 |
#!/bin/bash
PROG="${GRINS_BUILDSRC_DIR}/grins"
INPUT="${GRINS_TEST_INPUT_DIR}/ins_invalid_pin_location_unit.in"
${LIBMESH_RUN:-} $PROG $INPUT
|
nicholasmalaya/grins
|
test/error_xfail/ins_invalid_pin_location.sh
|
Shell
|
lgpl-2.1
| 145 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.