code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
declare -a cleanupARR
declare -a cleanupLBARR
declare -a dbInstanceARR
aws ec2 describe-instances --filter Name=instance-state-code,Values=16 --output table | grep InstanceId | sed "s/|//g" | tr -d ' ' | sed "s/InstanceId//g"
mapfile -t cleanupARR < <(aws ec2 describe-instances --filter Name=instance-state-code,Values=16 --output table | grep InstanceId | sed "s/|//g" | tr -d ' ' | sed "s/InstanceId//g")
echo "the output is ${cleanupARR[@]}"
aws ec2 terminate-instances --instance-ids ${cleanupARR[@]}
echo "Cleaning up existing Load Balancers"
mapfile -t cleanupLBARR < <(aws elb describe-load-balancers --output json | grep LoadBalancerName | sed "s/[\"\:\, ]//g" | sed "s/LoadBalancerName//g")
echo "The LBs are ${cleanupLBARR[@]}"
LENGTH=${#cleanupLBARR[@]}
echo "ARRAY LENGTH IS $LENGTH"
for (( i=0; i<${LENGTH}; i++));
do
aws elb delete-load-balancer --load-balancer-name ${cleanupLBARR[i]} --output text
sleep 1
done
# Delete Launchconf and Autoscaling groups
LAUNCHCONF=(`aws autoscaling describe-launch-configurations --output json | grep LaunchConfigurationName | sed "s/[\"\:\, ]//g" | sed "s/LaunchConfigurationName//g"`)
SCALENAME=(`aws autoscaling describe-auto-scaling-groups --output json | grep AutoScalingGroupName | sed "s/[\"\:\, ]//g" | sed "s/AutoScalingGroupName//g"`)
echo "The asgs are: " ${SCALENAME[@]}
echo "the number is: " ${#SCALENAME[@]}
if [ ${#SCALENAME[@]} -gt 0 ]
then
echo "SCALING GROUPS to delete..."
#aws autoscaling detach-launch-.
aws autoscaling update-auto-scaling-group --auto-scaling-group-name $SCALENAME --min-size 0 --max-size 0 --desired-capacity 0
aws autoscaling disable-metrics-collection --auto-scaling-group-name $SCALENAME
sleep 10
aws autoscaling delete-auto-scaling-group --auto-scaling-group-name $SCALENAME --force-delete
sleep 5
aws autoscaling delete-launch-configuration --launch-configuration-name $LAUNCHCONF
fi
echo "All done"
|
MitPat/Environment_FinalMP
|
cleanup.sh
|
Shell
|
gpl-3.0
| 1,938 |
#!/bin/bash
function send_tcp_message()
{
local address=$1
local port=$2
local message=$3
exec 3<>/dev/tcp/$address/$port
echo -e "$message" >&3
}
send_tcp_message $1 $2 $3
## Ref https://gist.github.com/akhin/6fe3987af338f8a55ca31eea0733e480
|
tonight-halfmoon/shabang
|
elementary/tcp/tcp_client.sh
|
Shell
|
gpl-3.0
| 254 |
#!/bin/bash
#samples bamlistdir sitesfile anc
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch AU_90 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch AU_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch AU_14 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch CA_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch CA_14 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch IL_14 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch MA_14 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch ME_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch ME_90 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch NY_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch NY_90 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch OH_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch OH_90 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch WA_01 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
sbatch /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/ANGSD/run_SAF_100MB.sbatch WA_14 /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/whole-genome-reseq/BamLists_NoRel /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/genome/bedfiles/genome_subset_100MB_shuffled_sorted.angsdsites /n/holylfs/LABS/edwards_lab/ashultz/HFWGReseq/gatk/anc_fasta/HF_ancestral_alleles.fasta
sleep 1
|
ajshultz/whole-genome-reseq
|
ANGSD/submit_SAF_subpops_100MB.sh
|
Shell
|
gpl-3.0
| 5,897 |
#!/bin/sh
rm -rf to-compress
rm -f to-compress.tar.gz
tar -xf linux-4.13.tar.gz
cp -va linux-4.13 to-compress
cp -va linux-4.13 to-compress/copy
rm -rf linux-4.13
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/compress-gzip-1.2.0/pre.sh
|
Shell
|
gpl-3.0
| 164 |
#! /bin/bash
# Verify the script is being runned as root
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Verify ruby is installed
if ! type "ruby" > /dev/null; then
echo "makefile-gen depends on ruby to work"
exit 1
fi
# Copy the script to /usr/bin
cp srcs/makefile-gen.rb /usr/bin/makefile-gen
# Installing the man page
cp man/makefile-gen.1 /usr/share/man/man1/
echo "executable and man installed"
makefile-gen -h
|
kayofeld/makefile-gen
|
install.sh
|
Shell
|
gpl-3.0
| 474 |
#! /bin/bash
echo Tripartite bell scenario in probability space
echo Two binary measurements per party
# The results show that there are no non-trivial inequalities on the
# target subspace.
# For testing inside Travis:
set -e
set -x
here=$(dirname $BASH_SOURCE)
data=$here/data
subs="AB Ab aB ab AC Ac aC ac BC Bc bC bc"
symm="Aa <> aA; AaBb <> BbAa; BbCc <> CcBb"
# create system of elemental inequalities:
makesys -b "A a B b C c" -o init.txt
# methods other than AFI are too slow...
time afi init.txt -s "$subs" -o fin-sym.txt -y "$symm" -r 2 -v
# consistency check
equiv init.txt $data/init-bell3x2s.txt
equiv fin-sym $data/final-bell3x2s-2margs.txt
pretty fin-sym -y "$sym"
|
coldfix/pystif
|
example/bell/pspace.sh
|
Shell
|
gpl-3.0
| 690 |
paths=$@
if [[ "$paths" == "" ]]; then
paths=wallet-db-user
fi
for i in $paths; do
mkdir "$i"
rscoin-user --wallet-path "$i" update
rscoin-user --wallet-path "$i" list
done
|
input-output-hk/rscoin-haskell
|
admin/initDemoUser.sh
|
Shell
|
gpl-3.0
| 183 |
#!/bin/sh
set -e
OS=$(uname -s)
PROJECT=strusWebService
# set up environment
case $OS in
Linux)
;;
Darwin)
if test "X$CC" = "Xgcc"; then
# gcc on OSX is a mere frontend to clang, force using gcc 4.8
export CXX=g++-4.8
export CC=gcc-4.8
fi
# forcing brew versions (of gettext) over Mac versions
export CFLAGS="-I/usr/local"
export CXXFLAGS="-I/usr/local"
export LDFLAGS="-L/usr/local/lib"
;;
*)
echo "ERROR: unknown operating system '$OS'."
;;
esac
# build pre-requisites
DEPS="strusBase strus strusAnalyzer strusTrace strusModule"
GITURL=`git config remote.origin.url`
cd ..
for i in $DEPS; do
git clone `echo $GITURL | sed "s@/$PROJECT\.@/$i.@g"` $i
cd $i
git checkout travis
case $OS in
Linux)
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release \
-DLIB_INSTALL_DIR=lib -DCMAKE_CXX_FLAGS=-g \
..
make VERBOSE=1
make VERBOSE=1 test
sudo make VERBOSE=1 install
cd ..
;;
Darwin)
if test "X$CC" = "Xgcc-4.8"; then
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS=-g -G 'Unix Makefiles' \
..
make VERBOSE=1
make VERBOSE=1 test
sudo make VERBOSE=1 install
cd ..
else
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS=-g -G Xcode \
..
xcodebuild -configuration Release -target ALL_BUILD
xcodebuild -configuration Release -target RUN_TESTS
sudo xcodebuild -configuration Release -target install
cd ..
fi
;;
*)
echo "ERROR: unknown operating system '$OS'."
;;
esac
cd ..
done
cd $PROJECT
# build the package itself
case $OS in
Linux)
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release \
-DLIB_INSTALL_DIR=lib -DCMAKE_CXX_FLAGS=-g \
..
make VERBOSE=1
make run &
sleep 10
make VERBOSE=1 test
pkill strusWebService
sudo make VERBOSE=1 install
cd ..
;;
Darwin)
if test "X$CC" = "Xgcc-4.8"; then
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS=-g -G 'Unix Makefiles' \
..
make VERBOSE=1
make run &
sleep 10
make VERBOSE=1 test
pkill strusWebService
sudo make VERBOSE=1 install
cd ..
else
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS=-g -G Xcode \
..
xcodebuild -configuration Release -target ALL_BUILD
Release/strusWebService -v -c ../config.js &
sleep 10
xcodebuild -configuration Release -target RUN_TESTS
pkill strusWebService
sudo xcodebuild -configuration Release -target install
cd ..
fi
;;
*)
echo "ERROR: unknown operating system '$OS'."
;;
esac
|
Eurospider/strusWebService
|
dist/travis/script.sh
|
Shell
|
mpl-2.0
| 2,878 |
#!/usr/bin/env sh
# Open a Bash shell into the workspace Docker container
cd laradock && docker-compose exec --user=laradock workspace bash
|
WaveHack/OpenDominion
|
bin/02-login.sh
|
Shell
|
agpl-3.0
| 141 |
#!/bin/sh
source venv/bin/activate
ping
exec gunicorn -b :5000 --access-logfile - --error-logfile - VKUpdate:app
|
stsouko/MWUI
|
vk_update/boot.sh
|
Shell
|
agpl-3.0
| 114 |
python pq.py printParallelParams | parallel 'gs -q -dSAFER -sDEVICE=bbox -dFirstPage=1 -dLastPage=1 -f "{}" 2>&1 | grep %BoundingBox | sed -e s/%%BoundingBox://g | tr -s " " "\t" | sed -e s/^/{/.}\\t/g'
|
alexbyrnes/FCC-Political-Ads_The-Code
|
fpa/get_bboxes.sh
|
Shell
|
agpl-3.0
| 204 |
#!/bin/bash
set -ex
VENV_ROOT=$WORKSPACE/venvs
mkdir -p $VENV_ROOT
rm -rf $WORKSPACE/logs
if [ ! -d "$VENV_ROOT/analytics-tasks" ]
then
virtualenv $VENV_ROOT/analytics-tasks
fi
if [ ! -d "$VENV_ROOT/analytics-configuration" ]
then
virtualenv $VENV_ROOT/analytics-configuration
fi
TASKS_BIN=$VENV_ROOT/analytics-tasks/bin
CONF_BIN=$VENV_ROOT/analytics-configuration/bin
. $CONF_BIN/activate
make -C analytics-configuration provision.emr
function terminate_cluster() {
. $CONF_BIN/activate
make -C analytics-configuration terminate.emr
}
if [ "$TERMINATE" = "true" ]; then
trap terminate_cluster EXIT
fi
. $TASKS_BIN/activate
make -C analytics-tasks bootstrap
TASKS_REPO=${TASKS_REPO:-https://github.com/edx/edx-analytics-pipeline.git}
VIRTUALENV_EXTRA_ARGS="${VIRTUALENV_EXTRA_ARGS:-}"
# Define task on the command line, including the task name and all of its arguments.
# All arguments provided on the command line are passed through to the remote-task call.
remote-task --job-flow-name="$CLUSTER_NAME" --repo $TASKS_REPO --branch $TASKS_BRANCH --wait --log-path $WORKSPACE/logs/ --remote-name automation --user $TASK_USER --virtualenv-extra-args="$VIRTUALENV_EXTRA_ARGS" --secure-config-branch="$SECURE_BRANCH" --secure-config-repo="$SECURE_REPO" --secure-config="$SECURE_CONFIG" --override-config="$OVERRIDE_CONFIG" "$@"
cat $WORKSPACE/logs/* || true
. $CONF_BIN/activate
make -C analytics-configuration collect.metrics
|
open-craft/edx-analytics-configuration
|
automation/run-automated-task.sh
|
Shell
|
agpl-3.0
| 1,454 |
#!/bin/bash
#
for file in $1/*.tif
do
if test -f "$file"
then
echo "processing $file..."
/usr/local/bin/sipi --file $file --format jpx ${file%%.*}.jp2
fi
done
|
dhlab-basel/Sipi
|
test_tifs.sh
|
Shell
|
agpl-3.0
| 173 |
#!/bin/bash
cd /opt/superdesk/client &&
npm install &&
#bower --allow-root install &&
grunt --force server --server='http://localhost:5000/api' --ws='ws://localhost:5100' &
cd /opt/superdesk &&
bash ./scripts/fig_wrapper.sh honcho start
|
hlmnrmr/liveblog
|
docker/start-dev.sh
|
Shell
|
agpl-3.0
| 238 |
#!/usr/bin/env bash
echo "Are you sure that the files"
echo " * 'lib/core/libimagrt/src/version.rs'"
echo " * 'scripts/version-updated'"
echo "contain the right version setting?"
echo "If yes, pass '--I-AM-SURE-VERSION-IS-UPDATED' as parameter"
[[ "--I-AM-SURE-VERSION-IS-UPDATED" == $1 ]] || exit 1
CRATES=(
./lib/etc/libimagutil
./lib/etc/libimagtimeui
./lib/core/libimagerror
./lib/core/libimagstore
./lib/etc/libimaginteraction
./lib/core/libimagrt
./lib/entry/libimagentrylink
./lib/entry/libimagentryurl
./lib/entry/libimagentrytag
./lib/entry/libimagentryfilter
./lib/entry/libimagentrygps
./lib/entry/libimagentryedit
./lib/entry/libimagentryview
./lib/entry/libimagentrydatetime
./lib/entry/libimagentryutil
./lib/entry/libimagentrycategory
./lib/entry/libimagentryref
./lib/entry/libimagentrymarkdown
./lib/entry/libimagentryannotation
./lib/domain/libimagbookmark
./lib/domain/libimagcalendar
./lib/domain/libimaghabit
./lib/domain/libimagnotes
./lib/domain/libimagcontact
./lib/domain/libimagdiary
./lib/domain/libimaglog
./lib/domain/libimagtimetrack
./lib/domain/libimagtodo
./lib/domain/libimagmail
./lib/domain/libimagwiki
./bin/domain/imag-habit
./bin/domain/imag-diary
./bin/domain/imag-calendar
./bin/domain/imag-contact
./bin/domain/imag-notes
./bin/domain/imag-bookmark
./bin/domain/imag-timetrack
./bin/domain/imag-mail
./bin/domain/imag-todo
./bin/domain/imag-log
./bin/domain/imag-wiki
./bin/core/imag-markdown
./bin/core/imag-ref
./bin/core/imag-gps
./bin/core/imag-diagnostics
./bin/core/imag-mv
./bin/core/imag-store
./bin/core/imag-tag
./bin/core/imag-grep
./bin/core/imag-annotate
./bin/core/imag-link
./bin/core/imag-view
./bin/core/imag-init
./bin/core/imag-edit
./bin/core/imag-ids
./bin/core/imag-id-in-collection
./bin/core/imag-git
./bin/core/imag-category
./bin/core/imag-header
./bin/core/imag-create
./bin/core/imag
)
for crate in ${CRATES[*]}; do
echo -e "\t[CARGO][CHECK ]\t$crate"
RUST_BACKTRACE=1 cargo publish --manifest-path $crate/Cargo.toml || exit 1
echo -e "\t[Waiting...]"
sleep 15
done
|
matthiasbeyer/imag
|
scripts/release.sh
|
Shell
|
lgpl-2.1
| 2,302 |
#!/bin/sh
#
# A simple RTP server
# sends the output of autoaudiosrc as alaw encoded RTP on port 5002, RTCP is sent on
# port 5003. The destination is 127.0.0.1.
# the receiver RTCP reports are received on port 5007
#
# .--------. .-------. .-------. .----------. .-------.
# |audiosrc| |alawenc| |pcmapay| | rtpbin | |udpsink| RTP
# | src->sink src->sink src->send_rtp send_rtp->sink | port=5002
# '--------' '-------' '-------' | | '-------'
# | |
# | | .-------.
# | | |udpsink| RTCP
# | send_rtcp->sink | port=5003
# .-------. | | '-------' sync=false
# RTCP |udpsrc | | | async=false
# port=5007 | src->recv_rtcp |
# '-------' '----------'
# change this to send the RTP data and RTCP to another host
DEST=127.0.0.1
#AELEM=autoaudiosrc
AELEM=audiotestsrc
# PCMA encode from an the source
ASOURCE="$AELEM ! audioconvert"
AENC="alawenc ! rtppcmapay"
gst-launch-1.0 -v gstrtpbin name=rtpbin \
$ASOURCE ! $AENC ! rtpbin.send_rtp_sink_0 \
rtpbin.send_rtp_src_0 ! udpsink port=5002 host=$DEST \
rtpbin.send_rtcp_src_0 ! udpsink port=5003 host=$DEST sync=false async=false \
udpsrc port=5007 ! rtpbin.recv_rtcp_sink_0
|
dgerlach/gst-plugins-good
|
tests/examples/rtp/server-alsasrc-PCMA.sh
|
Shell
|
lgpl-2.1
| 1,619 |
# Manage authentication keys.
_ku_cmd xxx 'manage xxx'
function _ku_xxx() {
# Help message
function _ku_help() {
echo ''
echo 'Usage: kuero keys:COMMAND [options] help'
echo ''
echo 'add # add a key for the current user'
echo 'clear # remove all authentication keys from the current user'
echo 'remove # remove a key from the current user'
echo ''
}
# Detailed help message
function _ku_cmd_help() {
case $1 in
'aaa')
echo 'Still to be done...'
echo ''
;;
*)
echo "$1 does not exist!"
;;
esac
exit 1
}
# main
case $1 in
'aaa')
if [[ $2 == 'help' ]]; then _ku_cmd_help $1; fi;
# Usage message
_ku_usage-aaa() { echo 'Usage: [-u <string>]' 1>&2; echo 'type COMMAND help for more details'; exit 1; }
# Read option arguments.
shift
while getopts "u:" opt; do
case ${opt} in
u) user=${OPTARG} ;;
$) _ku_usage-aaa ;;
esac
done
# Check if all the mandatory arguments are provided.
if [[ -z ${user} ]]; then
echo "The options [u] are mandatory. Type help for more details."
exit 1
fi
# Well done. Do
echo 'Do ...'
;;
*)
_ku_help
;;
esac
}
|
jclo/kuero
|
src/client/lib/template-kuero-cmd.sh
|
Shell
|
lgpl-2.1
| 1,326 |
#!/bin/bash
echo -e "Creating javadoc...\n"
./gradlew javadoc
echo -e "Publishing javadoc...\n"
cp -R html/javadoc $HOME/javadoc-latest
echo -e "Installing requirements...\n"
cd docs
pip3 install -r requirements.txt
echo -e "Generating static HTML pages for documentation...\n"
make html
echo -e "Publishing documentation...\n"
cp -Rf _build/html $HOME/docs
cd $HOME
git config --global user.email "[email protected]"
git config --global user.name "travis-ci"
git clone --quiet --branch=gh-pages [email protected]:loklak/loklak_server.git gh-pages
cd gh-pages
git rm -rf ./*
cp -Rf $HOME/docs/* .
cp -Rf $HOME/javadoc-latest ./javadoc
touch .nojekyll
git add -f .
git commit -m "Latest javadoc on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo -e "Published Javadoc to gh-pages.\n"
exit 0
else
echo -e "Publishing failed. Maybe the access-token was invalid or had insufficient permissions.\n"
exit 1
fi
|
shivenmian/loklak_server
|
.utility/push-docs-to-gh-pages.sh
|
Shell
|
lgpl-2.1
| 1,028 |
#!/bin/bash
coverage run -m pytest && coverage html -d coverage
|
adamreeve/npTDMS
|
coverage.sh
|
Shell
|
lgpl-3.0
| 64 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Default
CND_DISTDIR=dist
CND_BUILDDIR=build
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=nio.bin
OUTPUT_BASENAME=nio.bin
PACKAGE_TOP_DIR=nio/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/nio/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/nio.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/nio.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
dalmirdasilva/ArduinoNIOLibrary
|
nbproject/Package-Default.bash
|
Shell
|
lgpl-3.0
| 1,370 |
#!/system/busybox sh
CONF_FILE="/data/userdata/anticensorship"
CURRENT_MODE="$(cat $CONF_FILE)"
echo $CURRENT_MODE
if [[ "$1" == "get" ]]
then
[[ "$CURRENT_MODE" == "" ]] && exit 0
[[ "$CURRENT_MODE" == "0" ]] && exit 0
[[ "$CURRENT_MODE" == "1" ]] && exit 1
# error
exit 255
fi
if [[ "$1" == "set_next" ]]
then
[[ "$CURRENT_MODE" == "" ]] || [[ "$CURRENT_MODE" == "0" ]] && echo "1" > $CONF_FILE && /etc/anticensorship.sh 1
[[ "$CURRENT_MODE" == "1" ]] && echo "0" > $CONF_FILE && /etc/anticensorship.sh 0
fi
|
ValdikSS/huawei_oled_hijack
|
scripts/E5885/anticensorship.sh
|
Shell
|
lgpl-3.0
| 543 |
alias lg='lazygit'
alias gs='git status'
alias gl='git log'
alias ga='git add'
alias gr='git rm'
alias gd='git diff'
alias gdc='git diff --cached'
alias gg='git grep --break --heading --line-number'
alias gcm='git commit'
alias gcmm='git commit -m'
alias gcma='git commit --amend'
alias gpl='git pull --rebase'
alias gps='git push'
alias gpsb='git push && gbackup'
alias gpsf='git push --force-with-lease'
alias gpsfb='git push --force-with-lease && gbackup'
alias gpst='git push --tags'
alias gsth='git stash'
alias gstp='git stash pop'
alias gfp='git fetch --prune'
alias gfpup='git fetch --prune upstream'
alias gcho='git checkout'
alias gchrp='git cherry-pick -x'
alias grbs='git fetch --prune upstream && git rebase upstream/master'
alias gdbr='git branch -d'
alias gDbr='git branch -D'
gnbr() {
git checkout -b $1
git push --set-upstream origin $1
echo
echo "\e[33mTip: use \e[0m\e[1;33mgchb $1\e[0m\e[33m next time!\e[0m"
}
# smarter git checkout, combination of gcho & gnbr
gchb() {
if ! git checkout $1; then
git checkout -b $1
git push --set-upstream origin $1
fi
}
gdcm() {
echo 'Commits that exist in '$1' but not in '$2':'
git log --graph --pretty=format:'%Cred%h%Creset %s' --abbrev-commit $2..$1
echo 'Commits that exist in '$2' but not in '$1':'
git log --graph --pretty=format:'%Cred%h%Creset %s' --abbrev-commit $1..$2
}
gbackup() {
if git remote -v | grep backup > /dev/null; then
git push --mirror backup
elif [[ "$1" == "add" ]]; then
git remote add backup [email protected]:synaptiko/$(basename `git rev-parse --show-toplevel`).git
else
echo 'Backup remote is not added, you need to add it first with:'
echo 'gbackup add'
fi
}
gsync() {
fetchResult=$(mktemp)
if [[ "$1" == "" ]]; then
echo "git fetch --prune"
git fetch --prune 2>&1 | tee $fetchResult
if git remote -v | grep -E "^upstream\s+" > /dev/null; then
echo "git fetch --prune upstream"
git fetch --prune upstream 2>&1 | tee -a $fetchResult
fi
elif [[ "$1" == "all" ]]; then
echo "git fetch --all --prune"
git fetch --all --prune 2>&1 | tee $fetchResult
else
echo "git fetch --prune $1"
git fetch --prune $1 2>&1 | tee $fetchResult
fi
if git branch --show-current | grep -E "^master$" > /dev/null; then
if git remote -v | grep -E "^upstream\s+" > /dev/null; then
if git rev-parse --abbrev-ref HEAD@{upstream} | grep -E "^origin/master$" > /dev/null; then
echo "git rebase upstream/master"
git rebase upstream/master 2>&1 | cat
echo "git push"
git push 2>&1 | cat
else
echo "git pull --rebase"
git pull --rebase 2>&1 | cat
fi
else
echo "git pull --rebase"
git pull --rebase 2>&1 | cat
fi
else
echo "git pull --rebase"
git pull --rebase 2>&1 | cat
fi
echo
branchesToDelete=$(mktemp)
# TODO jprokop: improve later, somehow detect that branches are related to the remote and also that they do not contain unpushed changes
sed -e "/^ - \[deleted\]/!d" $fetchResult | sed -e "s/^.*\/\([^/]*\)$/\1/" | sort | uniq > $branchesToDelete
if [[ -s $branchesToDelete ]]; then
cat $branchesToDelete | xargs -p git branch -D
fi
rm $branchesToDelete $fetchResult
}
|
synaptiko/.files
|
zsh/git-aliases.zsh
|
Shell
|
unlicense
| 3,160 |
#! /bin/bash
# exercise the query state functions
curl_sparql_request <<EOF \
| jq '.results.bindings[] | .[].value' | fgrep -q 'true'
# prefix xsd: <http://www.w3.org/2001/XMLSchema-datatypes>
select ((( xsd:gYearMonth('1976-02-05:00') = '1976-02-05:00'^^xsd:gYearMonth) &&
( xsd:gYearMonth('1976-02Z') = '1976-02Z'^^xsd:gYearMonth) &&
( xsd:gYearMonth('1976-02Z') != '1976-02'^^xsd:gYearMonth) &&
( xsd:gYearMonth('1976-02+12:00') = '1976-02+12:00'^^xsd:gYearMonth) &&
( xsd:gYearMonth('1976-02-12:00') != '1976-02+12:00'^^xsd:gYearMonth) &&
( xsd:gYearMonth('1976-02-10:00') != '1976-02Z'^^xsd:gYearMonth) &&
# test coercion
( xsd:gYearMonth(xsd:dateTime('2014-12-31T23:59:58Z')) = '2014-12Z'^^xsd:gYearMonth ) &&
( xsd:gYearMonth(xsd:date('2014-12-31')) = '2014-12Z'^^xsd:gYearMonth ) &&
# no order, but also not incommensurable
( xsd:gYearMonth('1975-02') < xsd:gYearMonth('1976-02') ) &&
(! ( xsd:gYearMonth('1976-02') < xsd:gYearMonth('1975-02') )) &&
( xsd:gYearMonth('1975-02') <= xsd:gYearMonth('1975-03') ) &&
(! ( xsd:gYearMonth('1976-02') <= xsd:gYearMonth('1975-03') ))
)
as ?ok)
where {
}
EOF
|
dydra/http-api-tests
|
extensions/sparql-protocol/temporal-data/gYearMonth.sh
|
Shell
|
unlicense
| 1,260 |
#!/bin/bash
# set $GEM_HOME/bin/ for CocoaPods.
PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:$GEM_HOME/bin"; export PATH
version=$(cat version)
echo "make clean build"
make clean build | ruby files/extra/reduce-logs.rb
if [ ${PIPESTATUS[0]} -ne 0 ]; then
exit 99
fi
# --------------------------------------------------
# http://developer.apple.com/documentation/Darwin/Conceptual/KEXTConcept/KEXTConceptPackaging/packaging_kext.html
echo "Copy Files"
rm -rf pkgroot
mkdir -p pkgroot
mkdir -p "pkgroot/Applications"
cp -R "src/core/server/build/Release/Seil.app" "pkgroot/Applications"
basedir="pkgroot/Applications/Seil.app/Contents/Applications"
mkdir -p "$basedir"
cp -R "src/util/preferences/build/Release/Seil Preferences.app" "$basedir"
basedir="pkgroot/Applications/Seil.app/Contents/Library/bin"
mkdir -p "$basedir"
cp -R src/util/cli/build/Release/seil "$basedir"
mkdir -p "pkgroot/Library"
cp -R files/LaunchDaemons "pkgroot/Library"
basedir="pkgroot/Library/Application Support/org.pqrs/Seil"
mkdir -p "$basedir"
for ostype in 10.11; do
# We should sign kext after OS X 10.9.
cp -R src/core/kext/${ostype}/build/Release/Seil.kext "$basedir/Seil.${ostype}.signed.kext"
done
cp -R pkginfo/Scripts/preinstall "$basedir/uninstall_core.sh"
for f in \
files/extra/setpermissions.sh \
files/extra/startup.sh \
files/extra/uninstall.sh \
files/extra/uninstaller.applescript \
;
do
cp -R "$f" "$basedir"
done
# Sign with Developer ID
bash files/extra/codesign.sh "pkgroot"
# Setting file permissions.
#
# Note:
# If target files are already exists in system disk,
# PackageMaker uses their permissions.
#
# For example:
# If /Applications/Seil.app permission is 0777 by accidental reasons,
# the directory permission will be 0777 in Archive.bom
# even if we set this directory permission to 0755 by setpermissions.sh.
#
# Then, we need to repair file permissions in postinstall script.
# Please also see postinstall.
#
sh "files/extra/setpermissions.sh" pkgroot
sh "files/extra/setpermissions.sh" pkginfo
chmod 755 \
pkginfo/Scripts/postinstall \
pkginfo/Scripts/preinstall \
pkginfo/fixbom.rb
# --------------------------------------------------
echo "Create pkg"
pkgName="Seil.sparkle_guided.pkg"
pkgIdentifier="org.pqrs.driver.Seil"
archiveName="Seil-${version}"
rm -rf $archiveName
mkdir $archiveName
pkgbuild \
--root pkgroot \
--component-plist pkginfo/pkgbuild.plist \
--scripts pkginfo/Scripts \
--identifier $pkgIdentifier \
--version $version \
--install-location "/" \
$archiveName/Installer.pkg
echo "Fix Archive.bom"
pkgutil --expand $archiveName/Installer.pkg $archiveName/expanded
ruby pkginfo/fixbom.rb $archiveName/expanded/Bom pkgroot/
pkgutil --flatten $archiveName/expanded $archiveName/Installer.pkg
rm -r $archiveName/expanded
productbuild \
--distribution pkginfo/Distribution.xml \
--package-path $archiveName \
$archiveName/$pkgName
rm -f $archiveName/Installer.pkg
# --------------------------------------------------
echo "Sign with Developer ID"
bash files/extra/codesign-pkg.sh $archiveName/$pkgName
# --------------------------------------------------
echo "Make Archive"
# Note:
# Some third vendor archiver fails to extract zip archive.
# Therefore, we use dmg instead of zip.
rm -f $archiveName.dmg
hdiutil create -nospotlight $archiveName.dmg -srcfolder $archiveName
rm -rf $archiveName
chmod 644 $archiveName.dmg
|
tekezo/Seil
|
make-package.sh
|
Shell
|
unlicense
| 3,507 |
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
P_OPERATOR_VERSION=${1:-"v0.19.0"}
P_OPERATOR_ADDON_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${P_OPERATOR_ADDON_DIR}
git clone --depth 1 https://github.com/coreos/prometheus-operator/
cp prometheus-operator/bundle.yaml ${P_OPERATOR_VERSION}.yaml
mkdir tmp
cp prometheus-operator/contrib/kube-prometheus/manifests/* tmp
for i in `ls tmp`
do
echo "---" >> ${P_OPERATOR_VERSION}.yaml
cat tmp/$i >> ${P_OPERATOR_VERSION}.yaml
done
rm -rf ${P_OPERATOR_ADDON_DIR}/prometheus-operator ${P_OPERATOR_ADDON_DIR}/tmp/
cd -
|
blakebarnett/kops
|
addons/prometheus-operator/sync-repo.sh
|
Shell
|
apache-2.0
| 1,185 |
#!/bin/sh -e
# Copyright 2015 realglobe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lua に対応した nginx と Lua ライブラリがインストール済みとして、
# edo-auth をインストールする。
project_dir=$(cd $(dirname $0)/.. && pwd)
install_dir=${install_dir:=${project_dir}/root}
# フルパスにする。
if ! [ -d ${install_dir} ]; then
mkdir -p ${install_dir}
echo "${install_dir} was created"
fi
install_dir=$(cd ${install_dir} && pwd)
nginx_dir=${nginx_dir:=${install_dir}/opt/nginx}
if ! [ -d ${nginx_dir}/lua/lib ]; then
mkdir -p ${nginx_dir}/lua/lib
fi
cp ${project_dir}/*.lua ${nginx_dir}/lua/
cp ${project_dir}/lib/*.lua ${nginx_dir}/lua/lib/
echo "---------- edo-auth was installed in ${install_dir} ----------"
|
realglobe-Inc/edo-auth
|
script/install_edo-auth.sh
|
Shell
|
apache-2.0
| 1,280 |
#!/usr/bin/env bash
source /etc/profile.d/chruby.sh
chruby ruby
function cp_artifacts {
mv $HOME/.bosh director-state/
cp -r director.yml director-creds.yml director-state.json cpi-release/ director-state/
}
trap cp_artifacts EXIT
: ${BAT_INFRASTRUCTURE:?}
: ${BOSH_SL_VM_NAME_PREFIX:?}
: ${BOSH_SL_VM_DOMAIN:?}
mv bosh-cli/bosh-cli-* /usr/local/bin/bosh-cli
chmod +x /usr/local/bin/bosh-cli
echo -e "\n\033[32m[INFO] Generating local cpi release manifest.\033[0m"
export CPI_RELEASE=$(echo cpi-release/*.tgz)
cat > cpi-replace.yml <<EOF
---
- type: replace
path: /releases/name=bosh-softlayer-cpi?
value:
name: bosh-softlayer-cpi
url: file://$CPI_RELEASE
EOF
echo -e "\n\033[32m[INFO] Generating manifest director.yml.\033[0m"
powerdns_yml_path=$(find ${pwd} -name powerdns.yml | head -n 1)
bosh-cli interpolate bosh-deployment/bosh.yml \
-o bosh-deployment/$BAT_INFRASTRUCTURE/cpi.yml \
-o ./cpi-replace.yml \
-o ${powerdns_yml_path} \
-o bosh-deployment/jumpbox-user.yml \
-o bosh-cpi-release/ci/bats/ops/remove-health-monitor.yml \
-v dns_recursor_ip=8.8.8.8 \
-v director_name=bats-director \
-v sl_director_fqn=$BOSH_SL_VM_NAME_PREFIX.$BOSH_SL_VM_DOMAIN \
--vars-file <( bosh-cpi-release/ci/bats/iaas/$BAT_INFRASTRUCTURE/director-vars ) \
--vars-store director-creds.yml \
> director.yml
export BOSH_LOG_LEVEL=DEBUG
export BOSH_LOG_PATH=./run.log
echo -e "\n\033[32m[INFO] Deploying director.\033[0m"
bosh-cli create-env \
--state director-state.json \
--vars-store director-creds.yml \
director.yml
|
mattcui/bosh-softlayer-cpi-release
|
ci/bats/tasks/deploy-director-softlayer.sh
|
Shell
|
apache-2.0
| 1,560 |
#!/bin/bash
mkdir -p mashupcamp_editor/static
cp ../mashupcamp_editor/README.txt mashupcamp_editor/
cp ../mashupcamp_editor/app.yaml mashupcamp_editor/
cp ../mashupcamp_editor/article.py mashupcamp_editor/
cp ../mashupcamp_editor/article.html mashupcamp_editor/
cp ../mashupcamp_editor/main.py mashupcamp_editor/
cp ../mashupcamp_editor/static/json.js mashupcamp_editor/static
cp ../mashupcamp_editor/static/q12-min.js mashupcamp_editor/static
zip -r mashupcamp_editor-0.1.zip mashupcamp_editor
|
jscud/sippycode
|
simple_releases/mashupcamp_editor.sh
|
Shell
|
apache-2.0
| 496 |
#!/bin/sh
set -e
source ./shared.functions.sh
START_DIR=$PWD
WORK_DIR=$START_DIR/../../../../../.macosbuild
mkdir -p $WORK_DIR
WORK_DIR=$(abspath "$WORK_DIR")
INDY_SDK=$WORK_DIR/vcx-indy-sdk
VCX_SDK=$START_DIR/../../../../..
VCX_SDK=$(abspath "$VCX_SDK")
export IOS_TARGETS=$3
source ./mac.05.libvcx.env.sh
cd ../../..
DEBUG_SYMBOLS="debuginfo"
if [ ! -z "$1" ]; then
DEBUG_SYMBOLS=$1
fi
if [ "$DEBUG_SYMBOLS" = "nodebug" ]; then
sed -i .bak 's/debug = true/debug = false/' Cargo.toml
fi
if [ -z "${IOS_TARGETS}" ]; then
echo "please provide the targets e.g aarch64-apple-ios,armv7-apple-ios,i386-apple-ios,x86_64-apple-ios"
exit 1
fi
CLEAN_BUILD="cleanbuild"
if [ ! -z "$2" ]; then
CLEAN_BUILD=$2
fi
if [ "${CLEAN_BUILD}" = "cleanbuild" ]; then
echo "cleanbuild"
cargo clean
rm -rf ${BUILD_CACHE}/target
rm -rf ${BUILD_CACHE}/arch_libs
fi
git log -1 > $WORK_DIR/evernym.vcx-sdk.git.commit.log
export OPENSSL_LIB_DIR_DARWIN=${OPENSSL_LIB_DIR}
bkpIFS="$IFS"
IFS=',()][' read -r -a targets <<<"${IOS_TARGETS}"
echo "Building targets: ${targets[@]}" ##Or printf "%s\n" ${array[@]}
IFS="$bkpIFS"
to_combine=""
for target in ${targets[*]}
do
if [ "${target}" = "aarch64-apple-ios" ]; then
target_arch="arm64"
elif [ "${target}" = "armv7-apple-ios" ]; then
target_arch="armv7"
elif [ "${target}" = "armv7s-apple-ios" ]; then
target_arch="armv7s"
elif [ "${target}" = "i386-apple-ios" ]; then
target_arch="i386"
elif [ "${target}" = "x86_64-apple-ios" ]; then
target_arch="x86_64"
fi
libtool="/usr/bin/libtool"
libindy_dir="${BUILD_CACHE}/libindy/${LIBINDY_VERSION}"
if [ -e ${libindy_dir}/${target_arch}/libindy.a ]; then
echo "${target_arch} libindy architecture already extracted"
else
mkdir -p ${libindy_dir}/${target_arch}
lipo -extract $target_arch ${libindy_dir}/libindy.a -o ${libindy_dir}/${target_arch}/libindy.a
${libtool} -static ${libindy_dir}/${target_arch}/libindy.a -o ${libindy_dir}/${target_arch}/libindy_libtool.a
mv ${libindy_dir}/${target_arch}/libindy_libtool.a ${libindy_dir}/${target_arch}/libindy.a
fi
export OPENSSL_LIB_DIR=$WORK_DIR/OpenSSL-for-iPhone/lib/${target_arch}
export IOS_SODIUM_LIB=$WORK_DIR/libzmq-ios/libsodium-ios/dist/ios/lib/${target_arch}
export IOS_ZMQ_LIB=$WORK_DIR/libzmq-ios/dist/ios/lib/${target_arch}
export LIBINDY_DIR=${libindy_dir}/${target_arch}
cargo build --target "${target}" --release --no-default-features --features "ci"
to_combine="${to_combine} ./target/${target}/release/libvcx.a"
done
mkdir -p ./target/universal/release
lipo -create $to_combine -o ./target/universal/release/libvcx.a
# echo "Copying iOS target folder into directory: $(abspath "${BUILD_CACHE}")"
# cp -rfp ./target ${BUILD_CACHE}
export OPENSSL_LIB_DIR=$OPENSSL_LIB_DIR_DARWIN
|
Artemkaaas/indy-sdk
|
vcx/libvcx/build_scripts/ios/mac/mac.06.libvcx.build.sh
|
Shell
|
apache-2.0
| 2,909 |
#!/bin/sh
./server.py -p 8092 -d config &
|
ekivemark/BlueButtonDev
|
oidc/pyoidc-master/oidc_example/op2/start.sh
|
Shell
|
apache-2.0
| 42 |
#!/bin/bash
# Copyright 2015 Insight Data Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. ~/.profile
MASTER_IP=$1
NUM_WORKERS=$2
MEMINFO=($(free -m | sed -n '2p' | sed -e "s/[[:space:]]\+/ /g"))
TOTMEM=${MEMINFO[1]}
TASKMANAGER_HEAP=$(printf "%.0f" $(echo "0.90 * ( $TOTMEM - 1000 )" | awk '{print $1*($4-$6)}'))
TASK_SLOTS=$(nproc)
PARALLELISM=$(echo "$TASK_SLOTS * $NUM_WORKERS" | awk '{print $1*$3}')
TMP_DIRS=/var/flink/tmp
sudo mkdir -p $TMP_DIRS
sudo chown -R ubuntu $TMP_DIRS
cp ${HADOOP_HOME}/share/hadoop/tools/lib/aws-java-sdk-*.jar ${FLINK_HOME}/lib
cp ${HADOOP_HOME}/share/hadoop/tools/lib/hadoop-aws-*.jar ${FLINK_HOME}/lib
cp ${HADOOP_HOME}/share/hadoop/tools/lib/httpclient-*.jar ${FLINK_HOME}/lib
cp ${HADOOP_HOME}/share/hadoop/tools/lib/httpcore-*.jar ${FLINK_HOME}/lib
sed -i "[email protected]: [email protected]: $MASTER_IP@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "[email protected]: [email protected]: 1024@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "[email protected]: [email protected]: $TASKMANAGER_HEAP@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "[email protected]: [email protected]: $TASK_SLOTS@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "[email protected]: [email protected]: $PARALLELISM@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "s@# taskmanager.tmp.dirs: /[email protected]: $TMP_DIRS@g" $FLINK_HOME/conf/flink-conf.yaml
sed -i "s@# fs.hdfs.hadoopconf: /path/to/hadoop/conf/@fs.hdfs.hadoopconf: $HADOOP_HOME/etc/hadoop@g" $FLINK_HOME/conf/flink-conf.yaml
|
InsightDataScience/pegasus
|
config/flink/setup_single.sh
|
Shell
|
apache-2.0
| 2,088 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
declare -a algs=("SharedGlobal" "SharedMultiZone" "Local" "LocalWeighted" "LocalOpt" "LocalShared" "Original")
if [ $# -gt 0 ]
then file=$1
else file=./data/range-input.csv
fi
if [ ! -f $file ]
then echo "input file $file doesn't exist, please run ./hack/range-input-generator.py first"; exit
fi
for alg in ${algs[@]}; do
echo "Running $alg"
go run main.go -input=$file -alg=$alg -output=./data/$alg-range-output.csv
done
|
googleinterns/k8s-topology-simulator
|
run-all.sh
|
Shell
|
apache-2.0
| 1,023 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
set_bazel_outdir
install_ubuntu_16_pip_deps pip3.5
pip3.7 install --upgrade auditwheel --user
install_bazelisk
python2.7 tensorflow/tools/ci_build/update_version.py --nightly
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=0
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python3.5)
yes "" | "$PYTHON_BIN_PATH" configure.py
# Build the pip package
bazel build --config=opt --config=v2 \
--crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \
tensorflow/tools/pip_package:build_pip_package
./bazel-bin/tensorflow/tools/pip_package/build_pip_package pip_pkg --cpu --nightly_flag
# Upload the built packages to pypi.
for WHL_PATH in $(ls pip_pkg/tf_nightly_cpu-*dev*.whl); do
WHL_DIR=$(dirname "${WHL_PATH}")
WHL_BASE_NAME=$(basename "${WHL_PATH}")
AUDITED_WHL_NAME="${WHL_DIR}"/$(echo "${WHL_BASE_NAME//linux/manylinux2010}")
auditwheel repair --plat manylinux2010_x86_64 -w "${WHL_DIR}" "${WHL_PATH}"
# test the whl pip package
chmod +x tensorflow/tools/ci_build/builds/nightly_release_smoke_test.sh
./tensorflow/tools/ci_build/builds/nightly_release_smoke_test.sh ${AUDITED_WHL_NAME}
RETVAL=$?
# Upload the PIP package if whl test passes.
if [ ${RETVAL} -eq 0 ]; then
echo "Basic PIP test PASSED, Uploading package: ${AUDITED_WHL_NAME}"
twine upload -r pypi-warehouse "${AUDITED_WHL_NAME}" || echo
else
echo "Basic PIP test FAILED, will not upload ${AUDITED_WHL_NAME} package"
return 1
fi
done
|
jhseu/tensorflow
|
tensorflow/tools/ci_build/release/ubuntu_16/cpu_py35_full/nightly_release.sh
|
Shell
|
apache-2.0
| 2,333 |
#!/bin/bash
jflex --nobak -d "$(dirname $0)/../src/org/dflow/compiler/parser/" dflow.l
bison -o "$(dirname $0)/../src/org/dflow/compiler/parser/ApplicationParser.java" application.y
bison -o "$(dirname $0)/../src/org/dflow/compiler/parser/DataModelParser.java" datamodel.y
|
leonardo-fernandes/dflow
|
org.dflow.compiler/src-parsers/generate-parsers.sh
|
Shell
|
apache-2.0
| 274 |
#!/bin/bash
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
if [[ -z "${PROJECT_ROOT:-}" ]]; then
PROJECT_ROOT="github/python-appengine-admin"
fi
cd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
# Debug: show build environment
env | grep KOKORO
# Setup service account credentials.
export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
# Setup project id.
export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
# Remove old nox
python3 -m pip uninstall --yes --quiet nox-automation
# Install nox
python3 -m pip install --upgrade --quiet nox
python3 -m nox --version
# If this is a continuous build, send the test log to the FlakyBot.
# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot.
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then
cleanup() {
chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
$KOKORO_GFILE_DIR/linux_amd64/flakybot
}
trap cleanup EXIT HUP
fi
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
python3 -m nox -s ${NOX_SESSION:-}
else
python3 -m nox
fi
|
googleapis/python-appengine-admin
|
.kokoro/build.sh
|
Shell
|
apache-2.0
| 1,765 |
#!/usr/bin/env bash
#===----------------------------------------------------------------------===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===----------------------------------------------------------------------===#
# SYNOPSIS
# format-diff.sh FILE [OPTION]...
#
# DESCRIPTION
# Runs the formatter and displays a side-by-side diff of the original file
# and the formatted results. The script will use `colordiff` for the output
# if it is present; otherwise, regular `diff` will be used.
#
# The first argument to this script must be the `.swift` source file to be
# formatted. Any remaining arguments after that will be passed directly to
# `swift-format`.
set -euo pipefail
SRCFILE="$1" ; shift
# Use `colordiff` if it's present; otherwise, fall back to `diff`.
if which colordiff >/dev/null ; then
DIFF="$(which colordiff)"
else
DIFF="$(which diff)"
fi
# Make sure the formatter is built in debug mode so we can reference the
# executable easily.
swift build --product swift-format
# Run a side-by-side diff with the original source file on the left and the
# formatted output on the right.
"$DIFF" -y -W 210 "$SRCFILE" <(.build/debug/swift-format "$@" "$SRCFILE")
|
apple/swift-format
|
Scripts/format-diff.sh
|
Shell
|
apache-2.0
| 1,502 |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
STARTTIME=$(date +%s)
OS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${OS_ROOT}/hack/lib/init.sh"
os::log::install_errexit
EXAMPLES=examples
OUTPUT_PARENT=${OUTPUT_ROOT:-$OS_ROOT}
pushd ${OS_ROOT}/Godeps/_workspace > /dev/null
godep_path=$(pwd)
pushd src/github.com/jteeuwen/go-bindata > /dev/null
GOPATH=$godep_path go install ./...
popd > /dev/null
popd > /dev/null
pushd "${OS_ROOT}" > /dev/null
Godeps/_workspace/bin/go-bindata -nocompress -nometadata -prefix "bootstrap" -pkg "bootstrap" \
-o "${OUTPUT_PARENT}/pkg/bootstrap/bindata.go" -ignore "README.md" \
${EXAMPLES}/image-streams/... \
${EXAMPLES}/db-templates/... \
${EXAMPLES}/jenkins/pipeline/... \
${EXAMPLES}/quickstarts/...
popd > /dev/null
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
sgallagher/origin
|
hack/gen-bootstrap-bindata.sh
|
Shell
|
apache-2.0
| 1,050 |
#!/bin/sh
wget https://downloads.raspberrypi.org/raspios_lite_armhf/images/raspios_lite_armhf-2021-03-25/2021-03-04-raspios-buster-armhf-lite.zip
|
javier-ruiz-b/docker-rasppi-images
|
rpi-image-builder/download-raspios.sh
|
Shell
|
apache-2.0
| 145 |
#!/bin/bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This shell script is used to build a cluster and create a namespace from our
# argo workflow
set -o errexit
set -o nounset
set -o pipefail
CLUSTER_NAME="${CLUSTER_NAME}"
REGION="${AWS_REGION:-us-west-2}"
REGISTRY="${ECR_REGISTRY:-public.ecr.aws/j1r0q0g6/training/training-operator}"
VERSION="${PULL_BASE_SHA}"
GO_DIR=${GOPATH}/src/github.com/${REPO_OWNER}/${REPO_NAME}
echo "Configuring kubeconfig.."
aws eks update-kubeconfig --region=${REGION} --name=${CLUSTER_NAME}
echo "Update Training Operator manifest with new name $REGISTRY and tag $VERSION"
cd manifests/overlays/standalone
kustomize edit set image public.ecr.aws/j1r0q0g6/training/training-operator=${REGISTRY}:${VERSION}
echo "Installing Training Operator manifests"
kustomize build . | kubectl apply -f -
TIMEOUT=30
until kubectl get pods -n kubeflow | grep tf-job-operator | grep 1/1 || [[ $TIMEOUT -eq 1 ]]; do
sleep 10
TIMEOUT=$((TIMEOUT - 1))
done
kubectl describe all -n kubeflow
kubectl describe pods -n kubeflow
|
kubeflow/training-operator
|
scripts/setup-tf-operator.sh
|
Shell
|
apache-2.0
| 1,595 |
#!/bin/bash
set -e
# Sends JSON message '{"command":"function", "arg1": "Hello world"}' to user '1234' with namespace 'custom-api'.
# Returns false if no user '1234' is connected. Returns true otherwise.
curl -X POST -d 'user=1234&event=custom-api&message={"command":"function", "arg1": "Hello world"}' http://localhost:8000/messenger/send_json
|
EducationalTestingService/halef-messenger
|
examples/http-post.sh
|
Shell
|
apache-2.0
| 347 |
#!/bin/bash
# Copyright 2016 Port Direct
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
echo "${OS_DISTRO}: Setting environment Vars For Keystone"
################################################################################
. /etc/os-container.env
. /opt/harbor/service-hosts.sh
. /opt/harbor/harbor-common.sh
. /opt/harbor/magnum/vars.sh
################################################################################
check_required_vars OS_DOMAIN \
AUTH_MAGNUM_KEYSTONE_REGION \
AUTH_KEYSTONE_ADMIN_PROJECT \
AUTH_KEYSTONE_ADMIN_USER \
AUTH_KEYSTONE_ADMIN_PASSWORD \
KEYSTONE_API_SERVICE_HOST_SVC
################################################################################
unset OS_AUTH_URL
unset OS_REGION_NAME
unset OS_CACERT
unset OS_IDENTITY_API_VERSION
unset OS_PROJECT_NAME
unset OS_PROJECT_DOMAIN_NAME
unset OS_DOMAIN_NAME
unset OS_PASSWORD
unset OS_USERNAME
unset OS_USER_DOMAIN_NAME
################################################################################
export OS_AUTH_URL="https://${KEYSTONE_API_SERVICE_HOST_SVC}:5000/v3"
export OS_REGION_NAME="${AUTH_MAGNUM_KEYSTONE_REGION}"
export OS_CACERT="${MAGNUM_DB_CA}"
export OS_IDENTITY_API_VERSION="3"
export OS_PROJECT_NAME="${AUTH_KEYSTONE_ADMIN_PROJECT}"
export OS_PROJECT_DOMAIN_NAME="default"
export OS_DOMAIN_NAME="default"
export OS_PASSWORD="${AUTH_KEYSTONE_ADMIN_PASSWORD}"
export OS_USERNAME="${AUTH_KEYSTONE_ADMIN_USER}"
export OS_USER_DOMAIN_NAME="default"
echo "${OS_DISTRO}: testing ${OS_USERNAME}:${OS_PROJECT_NAME}@${OS_DOMAIN_NAME}"
################################################################################
openstack token issue
|
portdirect/harbor
|
docker/openstack/openstack-magnum/common-assets/opt/harbor/magnum/manage/env-keystone-admin-auth.sh
|
Shell
|
apache-2.0
| 2,247 |
#!/bin/bash
#
# controller-build - Build the SDN controller
#
# Copyright (C) 2016 HCL Technologies
#
# Author: Paolo Rovelli <[email protected]>
#
if [ $# -ne 1 ]; then
echo "Usage: controller-build <config.json>"
exit 0
fi
. $(dirname $(readlink -f ${0}))/../utils/spinner-utils.sh
CONTROLLER_URL=$(jq -r '.["controller-odl"]'.url ${1})
CONTROLLER_IMAGE=$(jq -r '.["controller-odl"]'.image ${1})
CONTROLLER_VERSION=$(jq -r '.["controller-odl"]'.version ${1})
CONTROLLER_REPO=${CONTROLLER_URL}/${CONTROLLER_IMAGE}
CONTROLLER_DIR=${CONTROLLER_IMAGE}-${CONTROLLER_VERSION}
CONTROLLER_FILE=${CONTROLLER_IMAGE}-${CONTROLLER_VERSION}.tar.gz
CONTROLLER_PATH=$(pwd)/.controllers
# Download the SDN controller distribution
mkdir -p ${CONTROLLER_PATH}
if [ ! -e ${CONTROLLER_PATH}/${CONTROLLER_FILE} ]; then
spinner_exec "Download the SDN controller: " \
curl -L -o ${CONTROLLER_PATH}/${CONTROLLER_FILE} \
${CONTROLLER_REPO}/${CONTROLLER_VERSION}/${CONTROLLER_FILE}
if [ $? -ne 0 ]; then
return $?
fi
fi
# Install the SDN controller distribution
rm -fr ${CONTROLLER_PATH}/${CONTROLLER_DIR}
spinner_exec "Extract the SDN controller: " \
tar zxf ${CONTROLLER_PATH}/${CONTROLLER_FILE} -C ${CONTROLLER_PATH}
if [ $? -ne 0 ]; then
return $?
fi
# Install SDN boot features
spinner_exec "Install default SDN features: " \
sed -i 's/config,standard,region,package,kar,ssh,management/config,standard,region,package,kar,ssh,management,odl-netconf-connector-all,odl-restconf,odl-mdsal-apidocs,odl-dlux-all/g' \
${CONTROLLER_PATH}/${CONTROLLER_DIR}/etc/org.apache.karaf.features.cfg
|
demx8as6/CENTENNIAL
|
02-MWTN-PoC/test/test-env/controller-odl/controller-build.sh
|
Shell
|
apache-2.0
| 1,637 |
#!/bin/bash
### It dockerizes automatically ###
cd /home/TolaActivity
git stash
git pull origin master
docker-compose build
docker-compose up
|
mercycorps/TolaActivity
|
docker.sh
|
Shell
|
apache-2.0
| 146 |
#! /usr/bin/env bash
#
# This script builds all the YAMLs that Knative container-freezer publishes.
# It may be varied between different branches, of what it does, but the
# following usage must be observed:
#
# generate-yamls.sh <repo-root-dir> <generated-yaml-list>
# repo-root-dir the root directory of the repository.
# generated-yaml-list an output file that will contain the list of all
# YAML files. The first file listed must be our
# manifest that contains all images to be tagged.
# Different versions of our scripts should be able to call this script with
# such assumption so that the test/publishing/tagging steps can evolve
# differently than how the YAMLs are built.
# The following environment variables affect the behavior of this script:
# * `$KO_FLAGS` Any extra flags that will be passed to ko.
# * `$YAML_OUTPUT_DIR` Where to put the generated YAML files, otherwise a
# random temporary directory will be created. **All existing YAML files in
# this directory will be deleted.**
# * `$KO_DOCKER_REPO` If not set, use ko.local as the registry.
set -o errexit
set -o pipefail
readonly YAML_REPO_ROOT=${1:?"First argument must be the repo root dir"}
readonly YAML_LIST_FILE=${2:?"Second argument must be the output file"}
readonly YAML_ENV_FILE=${3:-$(mktemp)}
# Set output directory
if [[ -z "${YAML_OUTPUT_DIR:-}" ]]; then
readonly YAML_OUTPUT_DIR="$(mktemp -d)"
fi
rm -fr ${YAML_OUTPUT_DIR}/*.yaml
# Generated Knative component YAML files
readonly CONTAINER_FREEZER_YAML=${YAML_OUTPUT_DIR}/container-freezer.yaml
# Flags for all ko commands
KO_YAML_FLAGS="-P"
KO_FLAGS="${KO_FLAGS:-}"
[[ "${KO_DOCKER_REPO}" != gcr.io/* ]] && KO_YAML_FLAGS=""
if [[ "${KO_FLAGS}" != *"--platform"* ]]; then
KO_YAML_FLAGS="${KO_YAML_FLAGS} --platform=all"
fi
readonly KO_YAML_FLAGS="${KO_YAML_FLAGS} ${KO_FLAGS}"
if [[ -n "${TAG:-}" ]]; then
LABEL_YAML_CMD=(sed -e "s|serving.knative.dev/release: devel|serving.knative.dev/release: \"${TAG}\"|" -e "s|app.kubernetes.io/version: devel|app.kubernetes.io/version: \"${TAG:1}\"|")
else
LABEL_YAML_CMD=(cat)
fi
: ${KO_DOCKER_REPO:="ko.local"}
export KO_DOCKER_REPO
cd "${YAML_REPO_ROOT}"
echo "Building Knative Container-Freezer"
ko resolve ${KO_YAML_FLAGS} -f config/ | "${LABEL_YAML_CMD[@]}" > "${CONTAINER_FREEZER_YAML}"
echo "All manifests generated"
|
knative-sandbox/container-freezer
|
hack/generate-yamls.sh
|
Shell
|
apache-2.0
| 2,400 |
#!/usr/bin/env bash
# Copyright 2013, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author [email protected]
# chkconfig: 2345 20 20
# Description: Build and Rebuild a virtual environment
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Rackspace Appliance init script
# Description: Build and Rebuild a virtual environment
### END INIT INFO
# Set HOME
export HOME="/root"
# Set the Path
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Set the location of the script
SCRIPT_DIR='/opt/vm-rebuilder'
# Set the systems IP ADDRESS
SYS_IP=$(/opt/vm-rebuilder/getip.py)
PUB_IP=$(/opt/vm-rebuilder/getip.py eth0)
# What is the Name of this Script, and what are we starting
PROGRAM="VM_REBUILDER At: ${SYS_IP}"
# ==============================================================================
# DO NOT EDIT THIS AREA UNLESS YOU KNOW WHAT YOU ARE DOING
# ==============================================================================
set -e
function sys_ip_check() {
if [ ! "${SYS_IP}" ];then
cat > /etc/motd <<EOF
THIS INSTALLATION HAS FAILED!
The system does not seem to have "a valid network device"
Please check your VM's Settings and try again.
EOF
error_exit "No Network Device Found."
fi
}
# Kill all the Openstack things
function os_kill() {
set +e
# General Services
SERVICES="cinder glance nova keystone ceilometer heat apache httpd"
# Stop Service
for service in ${SERVICES}; do
find /etc/init.d/ -name "*${service}*" -exec {} stop \;
done
set -e
}
# Reset nova endpoints
function reset_nova_endpoint() {
set +e
echo "Resetting Nova Endpoints"
# Load the Openstack Credentials
MYSQLCRD="/root/.my.cnf"
USERNAME="$(awk -F'=' '/user/ {print $2}' ${MYSQLCRD})"
PASSWORD="$(awk -F'=' '/password/ {print $2}' ${MYSQLCRD})"
NUKECMD="delete from endpoint where region=\"RegionOne\";"
mysql -u "${USERNAME}" -p"${PASSWORD}" -o keystone -e "${NUKECMD}"
set -e
}
# Reconfigure RabbitMQ
function reset_rabbitmq() {
set +e
echo "Resetting RabbitMQ"
# Replace IP address for Rabbit
sed "s/NODE_IP_ADDRESS=.*/NODE_IP_ADDRESS=\"\"/" /etc/rabbitmq/rabbitmq-env.conf > /tmp/rabbitmq-env.conf2
mv /tmp/rabbitmq-env.conf2 /etc/rabbitmq/rabbitmq-env.conf
set -e
}
# Stop Rabbit MQ
function rabbitmq_kill() {
set +e
# Replace IP address for Rabbit
echo "Stopping RabbitMQ"
service rabbitmq-server stop
set -e
}
# Stop and then Start RabbitMQ
function restart_rabbitmq(){
set +e
service rabbitmq-server stop
sleep 2
service rabbitmq-server start
set -e
}
# Set MOTD with new information
function reset_motd() {
cp /etc/motd.old /etc/motd
echo "Resetting MOTD"
# Change the Horizon URL in the MOTD
HORIZON="https:\/\/${SYS_IP}:443"
sed "s/Horizon URL is.*/Horizon URL is : ${HORIZON}/" /etc/motd > /etc/motd2
mv /etc/motd2 /etc/motd
CHEF_SERVER="https:\/\/${SYS_IP}:4000"
sed "s/Chef Server URL is.*/Chef Server URL is : ${CHEF_SERVER}/" /etc/motd > /etc/motd2
mv /etc/motd2 /etc/motd
}
# Rebuild Knife
function reset_knife_rb() {
echo "Resetting Knife"
# Create Chef Dir if not found
if [ ! -d "/root/.chef" ];then
mkdir -p /root/.chef
fi
# Set knife.rb
cat > /root/.chef/knife.rb <<EOF
log_level :info
log_location STDOUT
node_name 'admin'
client_key '/etc/chef-server/admin.pem'
validation_client_name 'chef-validator'
validation_key '/etc/chef-server/chef-validator.pem'
chef_server_url "https://${SYS_IP}:4000"
cache_options( :path => '/root/.chef/checksums' )
cookbook_path [ '/opt/allinoneinone/chef-cookbooks/cookbooks' ]
EOF
}
# Graceful Shutdown of ChefServer
function chef_kill() {
set +e
retryerator chef-server-ctl graceful-kill
rm /etc/chef-server/chef-server-running.json
rm /etc/chef-server/chef-server-secrets.json
rm /var/chef/cache/remote_file/*.json
set -e
}
# Reconfigure Chef Server and client.rb
function reset_chef_server() {
echo "Resetting Chef Server"
cat > /etc/chef/client.rb <<EOF
log_level :auto
log_location STDOUT
chef_server_url "https://${SYS_IP}:4000"
validation_client_name "chef-validator"
EOF
cat > /etc/chef-server/chef-server.rb <<EOF
erchef['s3_url_ttl'] = 3600
nginx["ssl_port"] = 4000
nginx["non_ssl_port"] = 4080
nginx["enable_non_ssl"] = true
rabbitmq["enable"] = false
rabbitmq["password"] = "Passw0rd"
chef_server_webui['web_ui_admin_default_password'] = "Passw0rd"
bookshelf['url'] = "https://${SYS_IP}:4000"
EOF
# Reconfigure Chef-server
retryerator chef-server-ctl reconfigure
sleep 2
set +e
retryerator chef-server-ctl restart
set -e
echo "Resting Post Chef Restart"
sleep 10
}
# Rebuild Chef Environment
function reset_chef_env() {
echo "Resetting Chef Environment"
# Munge the Base JSON Environment
ORIG_JSON="${SCRIPT_DIR}/base.json"
NEW_ENV=$(${SCRIPT_DIR}/env-rebuilder.py ${ORIG_JSON})
# Overwrite the OLD Environment with a NEW environment
retryerator knife environment from file ${NEW_ENV}
sleep 5
}
# Run Chef-client to rebuild all the things
function retryerator() {
set +v
set +e
MAX_RETRIES=${MAX_RETRIES:-5}
RETRY=0
# Set the initial return value to failure
false
while [ $? -ne 0 -a ${RETRY} -lt ${MAX_RETRIES} ];do
# Begin Cooking
RETRY=$((${RETRY}+1))
$@
done
if [ ${RETRY} -eq ${MAX_RETRIES} ];then
cat > /etc/motd<<EOF
THIS INSTALLATION HAS FAILED!
Please Reinstalled/Import the OVA.
You can also run: touch /opt/first.boot
Then reboot to reattempt another deployment.
EOF
error_exit "Hit maximum number of retries (${MAX_RETRIES}), giving up..."
fi
set -v
set -e
}
function chef_rebuild_group() {
reset_knife_rb
reset_chef_server
}
# Package For Distribution
function package_prep() {
echo "Performing package prep"
ORIG_JSON="${SCRIPT_DIR}/base.json"
NEW_ENV=$(${SCRIPT_DIR}/env-rebuilder.py ${ORIG_JSON} "override")
# Overwrite the OLD Environment with BASE environment
if [ -f "/opt/last_user.ip.lock" ];then
rm /opt/last_user.ip.lock
fi
if [ -f "/opt/last_public.ip.lock" ];then
rm /opt/last_public.ip.lock
fi
# Overwrite the OLD Environment with a NEW environment
retryerator knife environment from file ${NEW_ENV}
# Nuke our history
echo '' | tee /root/.bash_history
history -c
sync
}
# Clear all of the cache things we can find
function clear_cache() {
apt-get clean
}
# Start Everything
function start_vm() {
start_swap
sys_ip_check
reset_nova_endpoint
reset_rabbitmq
restart_rabbitmq
chef_rebuild_group
sleep 5
reset_chef_env
echo "Running chef-client"
retryerator chef-client
reset_motd
}
# Disable Swap
function start_swap() {
# Enable swap from script
if [ -f "/opt/swap.sh" ];then
/opt/swap.sh
fi
# Enable all the swaps
swapon -a
}
# Fill all remaining Disk with Zero's
function zero_fill() {
echo "Performing A Zero Fill"
set +e
pushd /tmp
cat /dev/zero > zero.fill
sync
sleep 1
rm -f zero.fill
sync
sleep 1
popd
set -e
sync
sleep 1
}
# Truncate the contents of our net rules
function udev_truncate() {
cat > /etc/udev/rules.d/70-persistent-net.rules<<EOF
# Net Device Rules
EOF
}
# Truncate the root bash history
function root_history() {
if [ -f "/root/.bash_history" ];then
echo '' | tee /root/.bash_history
fi
sync
}
# Stop the VM services
function stop_vm() {
reset_rabbitmq
rabbitmq_kill
echo "Last System IP address was: \"$SYS_IP\"" | tee /opt/last_user.ip.lock
echo "Last Public IP address was: \"$PUB_IP\"" | tee /opt/last_public.ip.lock
# Flush all of the routes on the system
ip route flush all
sync
}
# Perform all packaging operations
function package_vm() {
reset_nova_endpoint
touch /opt/first.boot
SYS_IP="127.0.0.1"
package_prep
retryerator chef-client
clear_cache
os_kill
chef_rebuild_group
reset_rabbitmq
chef_kill
rabbitmq_kill
stop_swap
zero_fill
udev_truncate
root_history
touch /forcefsck
shutdown_server
}
# Stop Swap
function stop_swap() {
SWAPFILE="/tmp/SwapFile"
echo "Stopping Swap"
swapoff -a
sleep 2
if [ -f "${SWAPFILE}" ];then
echo "Removing Swap File."
rm ${SWAPFILE}
fi
}
# System Stop
function shutdown_server() {
shutdown -P now
}
# Check before Rebuilding
function rebuild_check() {
echo "Checking the environment."
set +e
SYS="$(grep -w \"${SYS_IP}\" /opt/last_user.ip.lock)"
PUB="$(grep -w \"${PUB_IP}\" /opt/last_public.ip.lock)"
set -e
if [ -f "/opt/first.boot" ];then
echo "Warming up for first boot process..."
rm /opt/first.boot
elif [ ! -z "${SYS}" ] && [ ! -z "${PUB}" ];then
echo "No System Changes Detected, Continuing with Regular Boot..."
exit 0
else
echo "Changes detected Recooking the box..."
fi
}
# ==============================================================================
# DO NOT EDIT THIS AREA UNLESS YOU KNOW WHAT YOU ARE DOING
# ==============================================================================
# Service functions
case "$1" in
start)
clear
echo "${PROGRAM} is Initializing..."
reset_motd
rebuild_check
start_vm
;;
stop)
echo "${PROGRAM} is Shutting Down..."
stop_vm
stop_swap
udev_truncate
;;
restart)
echo "${PROGRAM} is Restarting..."
stop_vm
rebuild_check
start_vm
;;
os-kill)
os_kill
;;
force-rebuild)
start_vm
;;
nuke-endpoints)
reset_nova_endpoint
;;
package-instance)
package_vm
;;
*)
USAGE="{start|stop|restart|os-kill|force-rebuild|nuke-endpoints|package-instance}"
echo "Usage: $0 ${USAGE}" >&2
exit 1
;;
esac
|
cloudnull/rcbops_virt_builder
|
rebuild-env.sh
|
Shell
|
apache-2.0
| 10,414 |
#create a fork, clone it and set its upstream value
#1. get github authentication token:
# https://github.com/settings/applications#personal-access-tokens
# Click generate personal toke
token=""
##############################
# Creates fork of taulia repo
# Clones repo source code in current folder/repoName
# Configs upstream to taulia repo
#
# reponame - name of taulia's repository
# username - your github username
# token - your github authentication token (PERSONAL)
#
##############################
forkRepo(){
reponame=$1
username="lenikirilov-taulia"
#token=$3
#2. Createa a Fork to your own user
curl -X POST -u $token:x-oauth-basic https://api.github.com/repos/taulia/$reponame/forks
#3. Clone locally
git clone [email protected]:$username/$reponame.git
#4. Setup upstream to the default branch in the repo
cd $reponame
git remote add upstream [email protected]:taulia/$reponame.git
}
#5. Setup git sync alias
# Allows you to sync your develop or master branch with taulia's equivalent
# and makes a push to your fork so that it's in sync with the taulia repo
#git config --global alias.sync 'pull; push origin'
#6Example
#forkRepo "pod-con" "lenikirilov-taulia" $token
|
leni-kirilov/scripts
|
bash/github_fork.bash
|
Shell
|
apache-2.0
| 1,212 |
#!/bin/sh
JDBC_IMPORTER_HOME=/data-importer
bin=$JDBC_IMPORTER_HOME/bin
lib=$JDBC_IMPORTER_HOME/lib
echo '{
"type" : "jdbc",
"jdbc" : {
"schedule" : "0 0/2 * * * ?",
"statefile" : "statefile.json",
"url" : "jdbc:jtds:sqlserver://dbserver:1433;databasename=dbname;",
"user" : "myuser",
"password" : "find-out",
"sql" :[
{
"statement" : "select *, id as _id from Logtable where TimeStamp > ?",
"parameter" : [ "$metrics.lastexecutionstart" ]
}
],
"metrics" : {
"lastexecutionend" : "2014-07-06T09:08:06.076Z",
"counter" : "1",
"lastexecutionstart" : "2014-07-06T09:08:00.948Z"
},
"index" : "vexierelog3.2",
"type" : "auditlog",
"elasticsearch" : {
"cluster" : "elasticsearch",
"host" : "myeshost",
"port" : 9300
}
}
}' | java \
-cp "${lib}/*" \
-Dlog4j.configurationFile=${bin}/log4j2.xml \
org.xbib.tools.Runner \
org.xbib.tools.JDBCImporter
|
vinayakbhadage/data-importer
|
bin/dataimport.sh
|
Shell
|
apache-2.0
| 1,118 |
#!/bin/bash
NET_OC_CONF_DOC="https://raw.githubusercontent.com/fanyueciyuan/eazy-for-ss/master/ocservauto"
rm -f ocservauto.sh
wget -c --no-check-certificate http://git.io/p9r8 -O ocservauto.sh
[ ! -d /etc/ocserv ] && mkdir /etc/ocserv
cd /etc/ocserv
[ -f /etc/init.d/ocserv ] && rm -f /etc/init.d/ocserv
[ -f ocserv-up.sh ] && rm -f ocserv-up.sh
[ -f ocserv-down.sh ] && rm -f ocserv-down.sh
wget -c --no-check-certificate $NET_OC_CONF_DOC/ocserv -O /etc/init.d/ocserv
chmod 755 /etc/init.d/ocserv
pgrep systemd-journal > /dev/null 2>&1 && systemctl daemon-reload > /dev/null 2>&1
wget -c --no-check-certificate $NET_OC_CONF_DOC/ocserv-up.sh
chmod +x ocserv-up.sh
wget -c --no-check-certificate $NET_OC_CONF_DOC/ocserv-down.sh
chmod +x ocserv-down.sh
|
satifanie/anyconnect
|
eazy-for-ss-master/ocservauto/ocservautofordebian.sh
|
Shell
|
apache-2.0
| 757 |
#!/usr/local/bin/bash
#
# Destroy an existing VM and its ZVOL
# See the README.md file for general documentation
#
# This script takes one argument:
# - the VM name, with an optional path prefix
### Constants
# The zfs mount point where are stored the ZVOLs
zvol_path='tank/vm/disks/'
# The zfs pool where are stored the ZVOLs
zfs_pool='tank/vm/disks/'
# VirtualBox VM root
vbox_root='/tank/vm/'
# TODO: add the possibilty to use other snapshot names
curr='Clean'
prev='Previous'
### User supplied variables
# $1: the full VM path, it can take subdirectories under $zvol_path
if [ -z $1 ]; then
echo -e "\033[31mPlease provide a VM name/path to destroy.\033[0m"
exit 1
fi
VM_path=$1
VM=${VM_path/*\//}
### Checks
# Check VM exists and has ZVOL disks
VBoxManage showvminfo $VM > /dev/null
if [ $? -ne 0 ]; then
echo -e "\033[31mThe VM \033[38;5;12m$VM\033[0;31m doesn't seem to be existing.\033[0m"
exit 1
fi
zfs list $zfs_pool$VM_path > /dev/null
if [ $? -ne 0 ]; then
echo -e "\033[31mThe VM \033[38;5;12m$VM\033[0;31m doesn't seem to have a ZVOL disk at \033[38;5;12m$VM_path\033[0m"
exit 1
fi
### Ask for confirmation
#
echo -e "We're going to delete \033[38;5;12m$VM\033[0m and destroy all of its ZVOL under \033[38;5;12m$VM_path\033[0m. Here is a simulation:"
zfs destroy -rnv $zfs_pool$VM_path
echo -e "\033[1mIs that ok?\033[0m"
unset ANSWER
read -p "(y/N) " -n 1 ANSWER
echo
if [[ "${ANSWER:=n}" == "n" || "${ANSWER:=N}" == "N" ]]; then
echo "Ok, I quit"
exit
fi
### Take action
# Delete VM
VBoxManage unregistervm $VM --delete
if [ $? -ne 0 ]; then
echo -e "\033[31mThere was an error trying to delete the VM \033[38;5;12m$VM\033[0m."
echo -e "It's better I stop here."
exit 1
fi
# Remove remaining directory if any
rmdir $vbox_root$VM_path 2>/dev/null
# Destroy ZVOL and associated snapshots
zfs destroy -rv $zfs_pool$VM_path
if [ $? -eq 0 ]; then
echo -e "\033[38;5;12m${VM}\033[0m was deleted and its disk is destroyed."
else
echo -e "\033[31mSomething went wrong trying to destroy \033[38;5;12m$zfs_pool$VM_path\033[0m"
echo -e "But the VM \033[38;5;12m$VM\033[0;31m is deleted."
echo -e "Check the error messages above."
fi
|
tonin/vbox-helpers
|
vbox-vm-destroy.sh
|
Shell
|
apache-2.0
| 2,206 |
#!/bin/bash
# Copyright 2015 Backstop Solutions Group, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =======================================================================
#
# this horrific thing checks to make sure that Puppetfile and site-modules
# are in sync
ERROR=0
# find modules that are imported via Puppetfile but not on disk
while read sitemodule
do
if [ ! -d "site-modules/${sitemodule}" ]
then
echo "Yo, ${sitemodule} is still in Puppetfile, but not on disk" >&2
ERROR=1
fi
done < <( grep site_module Puppetfile | awk -F\' '{print $2}' )
while read diskmodule
do
realname=$( echo "${diskmodule}" | sed 's/^site-modules\///g' )
egrep -q "site_module\s+'${realname}'" Puppetfile
if [[ $? -gt 0 ]] ; then
echo "Yo, ${diskmodule} is on disk but not in Puppetfile"
ERROR=1
fi
done < <( find site-modules -mindepth 1 -maxdepth 1 -type d )
exit $ERROR
|
BillWeiss/2015PuppetCampMaterials
|
precommit/40_site-modules-Puppetfile.sh
|
Shell
|
apache-2.0
| 1,428 |
#!/bin/bash
DATA_PATH=$1
[ $# -lt 1 ] && DATA_PATH=fio_file
CWD=$(pwd)
trap "wait && exit" SIGINT SIGTERM
# Gather system snapshot
[ ! -e linux_summary ] && git clone https://github.com/jschaub30/linux_summary
cd linux_summary
git pull
./linux_summary.sh
HOST=$(hostname -s)
SNAPSHOT=$CWD/linux_summary/${HOST}.html
mv index.html $SNAPSHOT
cd $CWD
CONFIG_FILE=config.job
SIZE=4G
# Setup the output run directory
rm -f last
RUNDIR=rundir/$(date +"%Y%m%d-%H%M%S")
mkdir -p $RUNDIR
ln -sf $RUNDIR last
cp analyze.R $RUNDIR/.
cp tidy.sh $RUNDIR/.
cp csv2html.sh $RUNDIR/.
cp $SNAPSHOT $RUNDIR/.
cp $DATA_PATH $CONFIG_FILE $RUNDIR/.
for QD in 1 4 16 64 256
do
echo ======= QD=$QD =======
#./run_once.sh OLTP1 4k $SIZE randrw 60 40 $QD $DATA_PATH $CONFIG_FILE
#./run_once.sh OLTP2 8k $SIZE randrw 90 10 $QD $DATA_PATH $CONFIG_FILE
#random read write mix with cache hits, send the same offest 2 times before generating a random offset
#./run_once.sh OLTP3 4k $SIZE randrw:2 70 30 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "Random read IOPS" 4k $SIZE randread 100 0 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "Random write IOPS" 4k $SIZE randwrite 0 100 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "Random r50/w50 IOPS" 4k $SIZE randrw 50 50 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "Read BW" 256k $SIZE rw 100 0 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "Write BW" 256k $SIZE rw 0 100 $QD $DATA_PATH $CONFIG_FILE
./run_once.sh "r50/w50 BW" 256k $SIZE rw 50 50 $QD $DATA_PATH $CONFIG_FILE
done
mv FIO_OUT* $RUNDIR/.
cd $RUNDIR
./tidy.sh
./analyze.R
./csv2html.sh output.csv > output.html
./csv2html.sh output.G.csv > output.G.html
[ $? -ne 0 ] && echo ****** Problem generating image files ******
cd $CWD
cat template.html | perl -pe "s/TAG_HOSTNAME/$HOST.html/" > $RUNDIR/index.html
IP=$(hostname -I | cut -d' ' -f1)
echo
echo "#### PID MONITOR ####: All data saved to $RUNDIR"
echo "#### PID MONITOR ####: View the html output using the following command:"
echo "#### PID MONITOR ####: $ cd $RUNDIR"
echo "#### PID MONITOR ####: $ python -m SimpleHTTPServer 12345"
echo "#### PID MONITOR ####: Then navigate to http://${IP}:12345"
|
jschaub30/fio_scripts
|
collect.sh
|
Shell
|
apache-2.0
| 2,155 |
#!/bin/bash
if [ "$#" -lt 3 ];
then
echo "Usage: <nworkers> <nthreads> <path_in_HDFS>"
exit -1
fi
# кладём дату в HDFS
hadoop fs -mkdir $3/data
hadoop fs -put ../data/clean_train.txt.train $3/data
hadoop fs -put ../data/clean_train.txt.test $3/data
# TODO: не забыть вынести путь до DMLC в переменную (или вычислять)
# запускаем rabit, передавая адрес в hdfs
../../dmlc-core/tracker/dmlc_yarn.py -n $1 --vcores $2 ../../xgboost.dmlc sf-crimes.conf nthread=$2\
data=hdfs://$3/data/clean_train.txt.train\
eval[test]=hdfs://$3/data/clean_train.txt.test\
model_out=hdfs://$3/sf-crimes.final.model
# получаем модель
hadoop fs -get $3/sf-crimes.final.model final.model
# TODO: запускать wormhole_configurer
# выводим предикшн, таск в конфиге (TODO: эхо таска)
../../repo/dmlc-core/yarn/run_hdfs_prog.py ../../bin/xgboost.dmlc sf-crimes.conf task=pred model_in=final.model test:data=../data/clean_train.txt.test
# дампим бустеры модели final.model в dump.raw.txt
../../repo/dmlc-core/yarn/run_hdfs_prog.py ../../bin/xgboost.dmlc sf-crimes.conf task=dump model_in=final.model name_dump=dump.raw.txt
# визуализируем фичи
../../repo/dmlc-core/yarn/run_hdfs_prog.py ../../bin/xgboost.dmlc sf-crimes.conf task=dump model_in=final.model fmap=../data/featmap.txt name_dump=dump.nice.txt
#тада!
cat dump.nice.txt
|
SammyVimes/san_francisco_crimes
|
hadoop/dmlc.sh
|
Shell
|
apache-2.0
| 1,495 |
# -----------------------------------------------------------------------------
#
# Package : github.com/containerd/continuity
# Version : v0.0.0-20200228182428-0f16d7a0959c
# Source repo : https://github.com/containerd/continuity
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=github.com/containerd/continuity
PACKAGE_VERSION=v0.0.0-20200228182428-0f16d7a0959c
PACKAGE_URL=https://github.com/containerd/continuity
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq wget gcc-c++
wget https://golang.org/dl/go1.16.1.linux-ppc64le.tar.gz && tar -C /bin -xf go1.16.1.linux-ppc64le.tar.gz && mkdir -p /home/tester/go/src /home/tester/go/bin /home/tester/go/pkg
export PATH=$PATH:/bin/go/bin
export GOPATH=/home/tester/go
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
export PATH=$GOPATH/bin:$PATH
export GO111MODULE=on
function test_with_master_without_flag_u(){
echo "Building $PACKAGE_PATH with master branch"
export GO111MODULE=auto
if ! go get -d -t $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/install_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_Fails" > /home/tester/output/version_tracker
exit 0
else
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with master branch without flag -u"
# Ensure go.mod file exists
go mod init
if ! gi test ./...; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails" > /home/tester/output/version_tracker
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
fi
}
function test_with_master(){
echo "Building $PACKAGE_PATH with master"
export GO111MODULE=auto
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master_without_flag_u
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
function test_without_flag_u(){
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION and without -u flag"
if ! go get -d -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION"
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
|
ppc64le/build-scripts
|
g/github.com__containerd__continuity/github.com__containerd__continuity_rhel_8.3.sh
|
Shell
|
apache-2.0
| 5,141 |
#!/bin/bash -x
echo "##################### EXECUTE: kurento_ci_container_dnat_hook_handler #####################"
# Path information
BASEPATH="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" # Absolute canonical path
PATH="${BASEPATH}:${PATH}"
exec >> hook.log
exec 2>&1
echo "Arguments: $*"
event=$1
container=$2
echo "Event:|$event| Container:|$container|"
if [ $event = 'start' ]; then
# Check if this container has been started by our job
name=$(docker inspect -f '{{.Name}}' $container)
if [[ ! ${name:1} == ${BUILD_TAG}* ]]; then
echo "It's not my container"
exit 0
fi
docker inspect $container
inspect=$(docker inspect -f '{{.Config.Labels.KurentoDnat}}' $container)
if [[ ! $inspect == 'true' ]]; then
echo "It's not a dnat container. Skip."
exit 0
fi
echo "[$container] **** Starting container $name with dnat label. Preparing dnat."
#Check ip
ip=$(docker inspect -f '{{.Config.Labels.IpAddress}}' $container)
# Check transport
transport=$(docker inspect -f '{{.Config.Labels.Transport}}' $container)
docker_pid=$(docker inspect -f '{{.State.Pid}}' $container)
echo $docker_pid > $container.id
echo "[$container] >>>> Calling dnat script"
sudo "${BASEPATH}/kurento_ci_container_dnat.sh" $container $event $docker_pid $transport $ip >> dnat2.log
fi
if [ $event = 'stop' ]; then
echo "[$container] ++++ Stopping container $name with id $container"
fi
if [ $event = 'destroy' ]; then
echo "[$container] ---- Destroying container $name with id $container"
if [ -f $container.id ]; then
echo "Container with dnat found. Deleting dnat rules."
docker_pid=$(cat $container.id)
echo "Calling dnat script"
sudo "${BASEPATH}/kurento_ci_container_dnat.sh" $container $event $docker_pid >> dnat2destroy.log
else
echo "Container not found. Ignoring."
fi
fi
if [ $event == 'die' ]; then
echo "[$container] ???? Dying container $name with id $container"
fi
|
Kurento/adm-scripts
|
kurento_ci_container_dnat_hook_handler.sh
|
Shell
|
apache-2.0
| 1,943 |
function install_runurl {
if [ ! -a /usr/bin/runurl ]; then
wget -qO/usr/bin/runurl run.alestic.com/runurl
chmod 755 /usr/bin/runurl
fi
}
|
ekoontz/whirr
|
core/src/main/resources/functions/install_runurl.sh
|
Shell
|
apache-2.0
| 150 |
#! /bin/bash
# shellcheck disable=SC2206
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo
echo Using Apigee X project \""$PROJECT_ID"\" and dataset bqml
bq --project_id="$PROJECT_ID" --location=us mk --dataset "$PROJECT_ID":bqml
bq --project_id="$PROJECT_ID" mk --table "$PROJECT_ID":bqml.prod_recommendations userId:STRING,itemId:STRING,predicted_session_duration_confidence:FLOAT
bq --project_id="$PROJECT_ID" load --autodetect --replace --source_format=NEWLINE_DELIMITED_JSON "$PROJECT_ID":bqml.prod_recommendations ./prod_recommendations_json.txt
bq --project_id="$PROJECT_ID" query --nouse_legacy_sql \
"SELECT * FROM \`$PROJECT_ID.bqml.prod_recommendations\` AS A" \
ORDER BY A.userId ASC, predicted_session_duration_confidence DESC
|
apigee/devrel
|
references/product-recommendations/setup_bigquery.sh
|
Shell
|
apache-2.0
| 1,280 |
#!/usr/bin/env bash
###############################################################################
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Fail on first error.
set -e
CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)"
. ${CURR_DIR}/installer_base.sh
apt_get_update_and_install \
git \
vim \
silversearcher-ag
# More:
# lrzsz
bash ${CURR_DIR}/install_bosfs.sh
# Clean up cache to reduce layer size.
apt-get clean && \
rm -rf /var/lib/apt/lists/*
|
jinghaomiao/apollo
|
docker/build/installers/install_release_deps.sh
|
Shell
|
apache-2.0
| 1,120 |
#!/bin/bash
#
# Push the current fuzzilli image (built from the Docker/ directory) to the GCE docker registry.
#
set -e
source config.sh
docker tag fuzzilli gcr.io/$PROJECT_ID/$CONTAINER_NAME
docker push gcr.io/$PROJECT_ID/$CONTAINER_NAME
|
googleprojectzero/fuzzilli
|
Cloud/GCE/push.sh
|
Shell
|
apache-2.0
| 242 |
#!/bin/sh
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#
. /etc/include.properties
. /etc/device.properties
. $RDK_PATH/utils.sh
# exit if an instance is already running
if [ ! -f /etc/os-release ];then
if [ ! -f /tmp/.log-rotate-daemon.pid ];then
# store the PID
echo $$ > /tmp/.log-rotate-daemon.pid
else
pid=`cat /tmp/.log-rotate-daemon.pid`
if [ -d /proc/$pid ];then
exit 0
fi
fi
fi
pidCleanup()
{
# PID file cleanup
if [ -f /tmp/.log-rotate-daemon.pid ];then
rm -rf /tmp/.log-rotate-daemon.pid
fi
}
# Disable the log rotate in stand by mode
if [ -f $RAMDISK_PATH/.standby ]; then
if [ ! -f /etc/os-release ];then pidCleanup; fi
exit 0
fi
# Set the log rotate property file
propertyFile="/etc/logRotate.properties"
if [ "$BUILD_TYPE" != "prod" ]; then
if [ -f $PERSISTENT_PATH/logRotate.properties ]; then
propertyFile="$PERSISTENT_PATH/logRotate.properties"
fi
fi
# include the log rotate property file
. $propertyFile
if [ "$logRotateEnable" ] && [ "$logRotateEnable" != "true" ]; then
echo "Log Rotate Disabled"
if [ ! -f /etc/os-release ];then pidCleanup; fi
exit 0
fi
# Verify the log rotate flag
if [ ! /etc/os-release ]; then
if [ ! -f $RAMDISK_PATH/.normalRebootFlag ]; then
echo "Log Rotate Disabled"
if [ ! -f /etc/os-release ];then pidCleanup; fi
exit 0
fi
fi
backupRotatedFile()
{
dirName=$1
fileName=$2
. /etc/dcm.properties
# Get Mac Address of eSTB
MAC=`getMacAddressOnly`
count=`cat /tmp/.rotateCount`
extn="$count.tgz"
Name="_Rotate"
file=$MAC$Name$extn
tar -zcvf ${dirName}/$file ${dirName}/${fileName}.${COUNT}
tftp -b 8192 -p -r $file -l ${dirName}/$file $LOG_SERVER
sleep 3
echo "Rotating the file: $file & uploading" >> /opt/logs/messages.txt
rm -rf ${dirName}/$file
count=`expr $count + 1`
echo $count > /tmp/.rotateCount
}
logrotate()
{
ret=0
DIRNAME=$1
FILENAME=$2
COUNT=$3
SIZE=$4
#FILESIZE=`du -ks ${DIRNAME}/${FILENAME} | cut -f1`
FILESIZE=`ls -l ${DIRNAME}/${FILENAME} | awk -F" " '{print $5}'`
file="${DIRNAME}/${FILENAME}.${COUNT}"
#if [ -f $file ]; then
# backupRotatedFile $DIRNAME $FILENAME
#fi
cd $DIRNAME
if [ $FILESIZE -gt $SIZE ];
then
i=$COUNT
while [ $i -ge 2 ]
do
j=`expr $i - 1`
segment2=${FILENAME}.${i}
segment1=${FILENAME}.${j}
if [ -f ${DIRNAME}/${segment1} ]
then
mv $segment1 $segment2
fi
i=`expr $i - 1`
done
cp ${FILENAME} ${FILENAME}."1"
cat /dev/null > ${DIRNAME}/${FILENAME}
fi
}
logRotateFramework()
{
logFile=$1
rotationCount=$2
rotationSize=$3
if [ "$logFile" != "" ] && [ -f "$logFile" ] ; then
log=`basename $logFile`
else
echo "Log File: $logFile not exist..!"
return 0
fi
if [ ! -s $logFile ]; then echo "$logFile is empty"; return 0; fi
if [ $rotationCount -eq 0 ] || [ $rotationSize -eq 0 ] ; then
echo "RotateCount:$rotationCount,RotateSize:$rotationSize, No rotation needed"; return 0;
fi
# rotate the file
logrotate $logFileBase $log $rotationCount $rotationSize
}
if [ ! -h $pumalog ]; then
logRotateFramework $pumalog $pumalogRotatCount $pumalogRotatSize
fi
echo "Triggering logrotate ..."
if [ "$DEVICE_TYPE" != "mediaclient" ]; then
logRotateFramework $ecmLog $ecmLogRotatCount $ecmLogRotatSize
logRotateFramework $vodLog $vodLogRotateCount $vodLogRotateSize
logRotateFramework $podLog $podLogRotateCount $podLogRotateSize
logRotateFramework $mainMONLog $mainMONLogRotatCount $mainMONLogRotatSize
logRotateFramework $receiverMONLog $receiverMONLogRotatCount $receiverMONLogRotatSize
logRotateFramework $mfrLog $mfrLogRotateCount $mfrLogRotateSize
logRotateFramework $snmpdLog $snmpdLogRotateCount $snmpdLogRotateSize
logRotateFramework $dibblerLog $dibblerLogRotateCount $dibblerLogRotateSize
logRotateFramework $upstreamStatsLog $upstreamStatsLogRotateCount $upstreamStatsLogRotateSize
logRotateFramework $xiRecoveryLog $xiRecoveryLogRotateCount $xiRecoveryLogRotateSize
else
logRotateFramework $wifiTelemetryLog $wifiTelemetryLogRotateCount $wifiTelemetryLogRotateSize
logRotateFramework $tr69AgentLog $tr69AgentHttpLogRotateCount $tr69AgentHttpLogRotateSize
logRotateFramework $tr69AgentHttpLog $tr69AgentHttpLogRotateCount $tr69AgentHttpLogRotateSize
logRotateFramework $tr69AgentSoapLog $tr69AgentSoapLogRotateCount $tr69AgentSoapLogRotateSize
logRotateFramework $xiConnectionStatusLog $xiConnectionStatusLogRotateCount $xiConnectionStatusLogRotateSize
fi
if [ "$DEVICE_TYPE" == "XHC1" ];then
logRotateFramework $streamsrvLog $streamsrvLogRotateCount $streamsrvLogRotateSize
logRotateFramework $stunnelHttpsLog $stunnelHttpsLogRotateCount $stunnelHttpsLogRotateSize
logRotateFramework $upnpLog $upnpLogRotateCount $upnpLogRotateSize
logRotateFramework $upnpigdLog $upnpigdLogRotateCount $upnpigdLogRotateSize
logRotateFramework $cgiLog $cgiLogRotateCount $cgiLogRotateSize
logRotateFramework $systemLog $systemLogRotateCount $systemLogRotateSize
logRotateFramework $eventLog $eventLogRotateCount $eventLogRotateSize
logRotateFramework $xw3MonitorLog $xw3MonitorLogRotateCount $xw3MonitorLogRotateSize
logRotateFramework $sensorDLog $sensorDLogRotateCount $sensorDLogRotateSize
logRotateFramework $webpaLog $webpaLogRotateCount $webpaLogRotateSize
logRotateFramework $userLog $userLogRotateCount $userLogRotateSize
logRotateFramework $webrtcStreamingLog $webrtcStreamingLogRotateCount $webrtcStreamingLogRotateSize
logRotateFramework $cvrPollLog $cvrPollLogRotateCount $cvrPollLogRotateSize
logRotateFramework $thumbnailUploadLog $thumbnailUploadLogRotateCount $thumbnailUploadLogRotateSize
logRotateFramework $metricsLog $metricsLogRotateCount $metricsLogRotateSize
logRotateFramework $wifiLog $wifiLogRotateCount $wifiLogRotateSize
logRotateFramework $netsrvLog $netsrvmgrLogRotateCount $netsrvmgrLogRotateSize
logRotateFramework $dropbearLog $dropbearLogRotateCount $dropbearLogRotateSize
logRotateFramework $dcmLog $dcmLogRotateCount $dcmLogRotateSize
logRotateFramework $applnLog $applnLogRotateCount $applnLogRotateSize
logRotateFramework $diskStatusLog $diskStatusLogRotateCount $diskStatusLogRotateSize
logRotateFramework $rfcLog $rfcLogRotateCount $rfcLogRotateSize
logRotateFramework $overlayLog $overlayLogRotateCount $overlayLogRotateSize
logRotateFramework $sysLog $sysLogRotatCount $sysLogRotatSize
logRotateFramework $sysDmesgLog $sysLogRotatCount $sysLogRotatSize
logRotateFramework $xvisionLog $xvisionLogRotateCount $xvisionLogRotateSize
logRotateFramework $ivaDaemonLog $ivaDaemonLogRotateCount $ivaDaemonLogRotateSize
logRotateFramework $evoLog $evoLogRotateCount $evoLogRotateSize
else
logRotateFramework $receiverLog $receiverLogRotatCount $receiverLogRotatSize
logRotateFramework $applnLog $applnLogRotateCount $applnLogRotateSize
logRotateFramework $rmfLog $rmfLogRotateCount $rmfLogRotateSize
logRotateFramework $runXreLog $runXreLogRotatCount $runXreLogRotatSize
logRotateFramework $sysLog $sysLogRotatCount $sysLogRotatSize
logRotateFramework $ntpLog $ntpLogRotateCount $ntpLogRotateSize
logRotateFramework $fusionDaleLog $fusionDaleLogRotateCount $fusionDaleLogRotateSize
logRotateFramework $xDiscoveryLog $xdisRotateCount $xdisRotateSize
logRotateFramework $xDiscoveryLogList $xdisRotateCount $xdisRotateSize
logRotateFramework $lighttpdErrorLog $lighttpdRotateCount $lighttpdRotateSize
logRotateFramework $lighttpdAccessLog $lighttpdRotateCount $lighttpdRotateSize
logRotateFramework $dcmLog $dcmLogRotateCount $dcmLogRotateSize
logRotateFramework $fdsLog $fdsRotateCount $fdsRotateSize
logRotateFramework $uimngrFile $uimngrRotateCount $uimngrRotateSize
logRotateFramework $storagemgrLog $storagemgrLogRotateCount $storagemgrLogRotateSize
logRotateFramework $rf4ceFile $rf4ceRotateCount $rf4ceRotateSize
logRotateFramework $trmLog $trmRotateCount $trmRotateSize
logRotateFramework $trmMgrLog $trmRotateCount $trmRotateSize
logRotateFramework $xDeviceLog $xDeviceRotateCount $xDeviceRotateSize
logRotateFramework $socProvLog $socProvRotateCount $socProvRotateSize
logRotateFramework $socProvCryptoLog $socProvRotateCount $socProvRotateSize
logRotateFramework $vlThreadLog $vlThreadRotateCount $vlThreadRotateSize
logRotateFramework $mocaStatusLog $mocaStatRotateCount $mocaStatRotateSize
logRotateFramework $snmp2jsonLog $snmp2jsonLogRotateCount $snmp2jsonLogRotateSize
logRotateFramework $decoderStatusLog $decoderStatusLogRotateCount $decoderStatusLogRotateSize
logRotateFramework $mfrLog $mfrLogRotateCount $mfrLogRotateSize
logRotateFramework $sysDmesgLog $sysLogRotatCount $sysLogRotatSize
logRotateFramework $cefLog $cefLogRotatCount $cefLogRotatSize
logRotateFramework $cecLog $cecLogRotateCount $cecLogRotateSize
logRotateFramework $diskStatusLog $diskStatusLogRotateCount $diskStatusLogRotateSize
logRotateFramework $systemLog $systemLogRotateCount $systemLogRotateSize
logRotateFramework $netsrvLog $netsrvmgrLogRotateCount $netsrvmgrLogRotateSize
logRotateFramework $samhainLog $samhainLogRotateCount $samhainLogRotateSize
logRotateFramework $fogLog $fogLogRotateCount $fogLogRotateSize
logRotateFramework $hddStatusLog $hddStatusLogRotateCount $hddStatusLogRotateSize
logRotateFramework $dropbearLog $dropbearLogRotateCount $dropbearLogRotateSize
logRotateFramework $mountLog $mountLogRotateCount $mountLogRotateSize
logRotateFramework $rbiDaemonLogLog $rbiDaemonLogRotateCount $rbiDaemonLogRotateSize
logRotateFramework $rfcLog $rfcLogRotateCount $rfcLogRotateSize
logRotateFramework $tlsLog $tlsLogRotateCount $tlsLogRotateSize
logRotateFramework $playreadycdmiLog $playreadycdmiLogRotateCount $playreadycdmiLogRotateSize
logRotateFramework $pingTelemetryLog $pingTelemetryLogRotateCount $pingTelemetryLogRotateSize
logRotateFramework $dnsmasqLog $dnsmasqLogRotateCount $dnsmasqLogRotateSize
logRotateFramework $zramLog $zramLogRotateCount $zramLogRotateSize
fi
if [ "$WIFI_SUPPORT" == "true" ];then
logRotateFramework $wpaSupplicantLog $wpaSupplicantLogRotateCount $wpaSupplicantLogRotateSize
fi
if [ "$CONTAINER_SUPPORT" == "true" ];then
logRotateFramework $lxcxreLog $lxcxreLogRotateCount $lxcxreLogRotateSize
logRotateFramework $lxcxreAppLog $lxcxreAppLogRotateCount $lxcxreAppLogRotateSize
fi
if [ "$SOC" = "BRCM" ];then
logRotateFramework $nxSvrLog $nxSvrLogRotateCount $nxSvrLogRotateSize
fi
trap "" 13
if [ "$DEVICE_TYPE" != "mediaclient" ]; then
if [ -f ${riLog} ]; then
if [ "$BUILD_TYPE" = "dev" ] && [ "$HDD_ENABLED" = "false" ]; then
logrotate $logFileBase `basename $riLog` 4 $riLogRotateSize
else
logrotate $logFileBase `basename $riLog` $riLogRotateCount $riLogRotateSize
maxFile=`find /opt/logs -type f | grep -v -E 'PreviousLogs|pcap|logbackup|[0-9]$|\.tgz$|\.gz$' | xargs ls -S | head -n 1`
if [ -f $maxFile ]; then
size=`stat $maxFile | grep Size: | cut -d ":" -f2 | awk '{print $1}'`
if [ $size -gt 22020096 ]; then
if [ ! -s $maxFile ]; then echo "$maxFile is empty"; return 0; fi
cp $maxFile $maxFile.1
cat /dev/null > $maxFile
fi
fi
fi
fi
fi
if [ "$HDD_ENABLED" = "false" ]; then
maxFile=`find /opt/logs -type f | grep -v PreviousLogs | xargs ls -S | grep -v -E '[0-9]$|\.tgz$|\.gz$' | grep -v pcap | head -n 1 | grep -v -E "txt.\*|.log.\*"`
if [ -f $maxFile ]; then
size=`stat $maxFile | grep Size: | cut -d ":" -f2 | awk '{print $1}'`
if [ $size -gt 2097152 ]; then
if [ ! -s $maxFile ]; then echo "$maxFile is empty"; return 0; fi
mv $maxFile $maxFile.1
cat /dev/null > $maxFile
fi
fi
fi
if [ ! -f /etc/os-release ];then
#Adding a work around to create core_log.txt whith restricted user privilege
#if linux multi user is enabled
if [ "$ENABLE_MULTI_USER" == "true" ] ; then
if [ "$BUILD_TYPE" == "prod" ] ; then
touch /opt/logs/core_log.txt
chown restricteduser:restrictedgroup /opt/logs/core_log.txt
else
if [ ! -f /opt/disable_chrootXREJail ]; then
touch /opt/logs/core_log.txt
chown restricteduser:restrictedgroup /opt/logs/core_log.txt
fi
fi
fi
fi
if [ ! -f /etc/os-release ];then pidCleanup; fi
exit 0
|
rdkcmf/rdk-sysint
|
lib/rdk/logRotateDaemon.sh
|
Shell
|
apache-2.0
| 13,919 |
#!/usr/bin/env bash
#
# Copyright 2009-2019 OpenEstate.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MSGFMT="msgfmt"
MSGFMT_PARAMS=""
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#cd "$DIR/src"
#
# Compile global translations.
#
rm -f "$DIR/src/locale/*.mo"
for i in $(ls "$DIR/i18n"); do
path="$DIR/i18n/$i"
if [ ! -f $path ]; then
continue
fi
suffix="${path##*.}"
if [ "$suffix" != "po" ]; then
continue
fi
lang="$(basename ${path%%.*})"
#echo "$lang"
mkdir -p "$DIR/src/locale"
"$MSGFMT" -o "$DIR/src/locale/$lang.mo" --language="$lang" ${MSGFMT_PARAMS} "$path"
done
#
# Compile separate translations for each theme.
#
for i in $(ls -d ${DIR}/i18n/*/); do
themePath=${i%%/}
themeName=$(basename ${themePath})
rm -f "$DIR/src/themes/$themeName/locale/*.mo"
for j in $(ls "$themePath"); do
path="$themePath/$j"
#echo "$path"
if [ ! -f $path ]; then
continue
fi
suffix="${path##*.}"
if [ "$suffix" != "po" ]; then
continue
fi
lang="$(basename ${path%%.*})"
#echo "$lang"
mkdir -p "$DIR/src/themes/$themeName/locale"
"$MSGFMT" -o "$DIR/src/themes/$themeName/locale/$lang.mo" --language="$lang" ${MSGFMT_PARAMS} "$path"
done
done
|
OpenEstate/OpenEstate-PHP-Export
|
i18n-compile.sh
|
Shell
|
apache-2.0
| 1,848 |
#!/bin/bash
readonly ROOT="${ROOT:-.}"
readonly FILENAME=${FILENAME:-$(basename $(pwd))}
readonly TARGET_DIR=$(mktemp -d)
readonly TARGET="${TARGET_DIR}/${FILENAME}.zip"
zip -r "${TARGET}" "${ROOT}" -x bin/\* -x ./target/\* -x .project -x .settings/\* -x .classpath > /dev/null
if [ "${?}" -eq 0 ]; then
echo "Project compressed:"
du -hs "${TARGET}"
else
echo "Compress failed."
fi
|
rpelisse/weld-se-log-issue
|
src/main/bash/compress-project.sh
|
Shell
|
apache-2.0
| 391 |
#!/bin/bash
nTop=$1
for protFile in $( ls sparseData2/protInfo_expr*.csv ); do
# echo - $protFile -:
cat $protFile.desc
echo -n ";"
python3.5 ../../getAUC.py $protFile $nTop
done
|
ameenetemady/DeepPep
|
gatherResults.sh
|
Shell
|
apache-2.0
| 182 |
#!/bin/bash
set -e
ROOT="tests"
for f in $ROOT/*.sysl; do
f=`basename $f`
$GOPATH/bin/sysl -v pb --mode textpb --root $ROOT -o $ROOT/$f.out.txt /$f
done;
rm $ROOT/*.out.txt
$GOPATH/bin/sysl -v sd -a 'Project' $ROOT/sequence_diagram_project.sysl
rm _.png
$GOPATH/bin/sysl -v sd -s 'WebFrontend <- RequestProfile' -o sd.png $ROOT/sequence_diagram_project.sysl
rm sd.png
$GOPATH/bin/sysl -v ints -j 'Project' $ROOT/integration_test.sysl
rm _.png
version=`$GOPATH/bin/sysl --version 2>&1 >/dev/null`
if [[ ${version} = "unspecified" ]]; then
echo "version is unspecified"
exit 1
fi
echo "gosysl version is ${version}"
$GOPATH/bin/sysl info
|
ANZ-bank/Sysl
|
scripts/test-gosysl.sh
|
Shell
|
apache-2.0
| 653 |
#!/bin/bash
# Copyright 2021 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# The version of tbb installed on manylinux2014 is too old to support the
# parallel STL libraries on the installed GCC9-based toolchain. Further,
# Intel *broke* compatibility starting in 2021 for GCC<=10.
# To make matters worse, the prior 2020 versions did not have cmake or
# install support.
# Shame on you Intel.
# See: https://community.intel.com/t5/Intel-oneAPI-Threading-Building/tbb-task-has-not-been-declared/m-p/1254418
# Since this is unlikely to be helpful outside of the old centos systems
# that manylinux2014 is based on (newer ones are based on Debian),
# we just tailor this specifically for docker images of that distro.
# You can test this with either an official manylinux2014 docker image or
# our special one (which is really only special in that it includes bazel):
# docker run --rm -it -v $(pwd):/work stellaraccident/manylinux2014_x86_64-bazel-4.2.2:latest /bin/bash
set -e
mkdir -p /tmp/libtbb_build
cd /tmp/libtbb_build
curl -o tbbsrc.tgz -L https://github.com/oneapi-src/oneTBB/archive/refs/tags/v2020.3.tar.gz
tar xzf tbbsrc.tgz
cd oneTBB-*/
echo "****** BUILDING TBB ******"
make -j$(nproc)
cp -R include/* /usr/include
cp build/*_release/* /usr/lib64
echo "prefix=/usr
exec_prefix=${prefix}
libdir=${exec_prefix}/lib64
includedir=${prefix}/include
Name: Threading Building Blocks
Description: Intel's parallelism library for C++
URL: http://www.threadingbuildingblocks.org/
Version:
Libs: -ltbb
Cflags:
" > /usr/lib64/pkgconfig/tbb.pc
echo "****** DONE BUILDING TBB ******"
cd /
rm -Rf /tmp/libtbb_build
|
google/iree
|
build_tools/github_actions/install_tbb_manylinux2014.sh
|
Shell
|
apache-2.0
| 1,774 |
#!/bin/bash
DATA_DIR=../data
BIN_DIR=../bin
SRC_DIR=../src
pushd ${SRC_DIR} && make; popd
sh ./create-lowercase-phrases-data.sh
set -x
$BIN_DIR/distance $DATA_DIR/lowercase-vectors-phrase.bin
|
dav/word2vec
|
scripts/demo-phrases.sh
|
Shell
|
apache-2.0
| 195 |
#!/bin/sh
case `uname -s` in
HP-UX)
OS="Unix";;
SunOS)
OS="Unix";;
IRIX*)
OS="Unix";;
Linux)
OS="Unix";;
*)
OS="Windows";;
esac
./make/${OS}/make.log_presentation.sh $*
#
# -- end:
#
|
kit-transue/software-emancipation-discover
|
log_presentation/make.log_presentation.sh
|
Shell
|
bsd-2-clause
| 203 |
# Aliases in this file are bash and zsh compatible
# Don't change. The following determines where YADR is installed.
yadr=$HOME/.yadr
# YADR support
alias yav='yadr vim-add-plugin'
alias ydv='yadr vim-delete-plugin'
alias ylv='yadr vim-list-plugin'
alias yup='yadr update-plugins'
alias yip='yadr init-plugins'
# PS
alias psa="ps aux"
alias psg="ps aux | grep "
alias psr='ps aux | grep ruby'
# Moving around
alias cdb='cd -'
# Show human friendly numbers and colors
alias df='df -h'
alias ll='ls -alGh'
alias ls='ls -Gh'
alias la='ls -aG'
alias du='du -h -d 2'
# show me files matching "ls grep"
alias lsg='la | grep'
# Alias Editing
alias ae='vim $yadr/zsh/aliases.zsh' #alias edit
alias ar='source $yadr/zsh/aliases.zsh' #alias reload
# vim using
mvim --version > /dev/null 2>&1
MACVIM_INSTALLED=$?
if [ $MACVIM_INSTALLED -eq 0 ]; then
# alias vim="mvim -v"
fi
# vimrc editing
alias ve='vim ~/.vimrc'
# zsh profile editing
alias ze='vim ~/.zshrc'
alias zr='source ~/.zshrc'
# Git Aliases
alias gs='git status'
alias gstsh='git stash'
alias gst='git stash'
alias gsp='git stash pop'
alias gsa='git stash apply'
alias gsh='git show'
alias gshw='git show'
alias gshow='git show'
alias gi='vim .gitignore'
alias gcm='git ci -m'
alias gcim='git ci -m'
alias gci='git ci'
alias gco='git co'
alias gcp='git cp'
alias ga='git add -A'
alias guns='git unstage'
alias gunc='git uncommit'
alias gm='git merge'
alias gms='git merge --squash'
alias gam='git amend --reset-author'
alias grv='git remote -v'
alias grr='git remote rm'
alias grad='git remote add'
alias gr='git rebase'
alias gra='git rebase --abort'
alias ggrc='git rebase --continue'
alias gbi='git rebase --interactive'
alias gl='git l'
alias glg='git l'
alias glog='git l'
alias co='git co'
alias gf='git fetch'
alias gfch='git fetch'
alias gd='git diff'
alias gb='git b'
alias gbd='git b -D -w'
alias gdc='git diff --cached -w'
alias gpub='grb publish'
alias gtr='grb track'
alias gpl='git pull'
alias gplr='git pull --rebase'
alias gps='git push'
alias gpsh='git push'
alias gnb='git nb' # new branch aka checkout -b
alias grs='git reset'
alias grsh='git reset --hard'
alias gcln='git clean'
alias gclndf='git clean -df'
alias gclndfx='git clean -dfx'
alias gsm='git submodule'
alias gsmi='git submodule init'
alias gsmu='git submodule update'
alias gt='git t'
alias gbg='git bisect good'
alias gbb='git bisect bad'
# Common shell functions
alias less='less -r'
alias tf='tail -f'
alias l='less'
alias lh='ls -alt | head' # see the last modified files
alias screen='TERM=screen screen'
alias c='clear'
# Zippin
alias gz='tar -zcvf'
# Ruby
alias rc='rails c' # Rails 3
alias co='script/console --irb=pry' # Rails 2
alias ts='thin start'
alias ms='mongrel_rails start'
alias tfdl='tail -f log/development.log'
alias tftl='tail -f log/test.log'
alias ka9='killall -9'
alias k9='kill -9'
# Gem install
alias sgi='sudo gem install --no-ri --no-rdoc'
# TODOS
# This uses NValt (NotationalVelocity alt fork) - http://brettterpstra.com/project/nvalt/
# to find the note called 'todo'
alias todo='open nvalt://find/todo'
# Forward port 80 to 3000
alias portforward='sudo ipfw add 1000 forward 127.0.0.1,3000 ip from any to any 80 in'
alias rdm='rake db:migrate'
alias rdmr='rake db:migrate:redo'
# Zeus
alias zs='zeus server'
alias zc='zeus console'
# Rspec
alias rs='rspec spec'
alias sr='spring rspec'
alias srgm='spring rails g migration'
alias srdm='spring rake db:migrate'
alias srdt='spring rake db:migrate'
alias srdmt='spring rake db:migrate db:test:prepare'
# Sprintly - https://github.com/nextbigsoundinc/Sprintly-GitHub
alias sp='sprintly'
# spb = sprintly branch - create a branch automatically based on the bug you're working on
alias spb="git checkout -b \`sp | tail -2 | grep '#' | sed 's/^ //' | sed 's/[^A-Za-z0-9 ]//g' | sed 's/ /-/g' | cut -d"-" -f1,2,3,4,5\`"
# Misc
alias whereami='pwd'
alias brew_python='/usr/local/bin/python'
alias brew_pip='/usr/local/bin/pip-2.7'
|
neanias/dotfiles
|
zsh/aliases.zsh
|
Shell
|
bsd-2-clause
| 3,964 |
#!/bin/bash
## First, setup the ace hack
# copy blang file
cp blang.js ace-master/lib/ace/mode/
cp xtend.js ace-master/lib/ace/mode/
cd ace-master
npm clean
npm install
node Makefile.dryice.js
cd ..
rm -rf www/ace
cp -r ace-master/build/src/ www/ace
## Then, generate the actual documentation
# Rebuild source
cd ..
./setup-cli.sh
cd -
# Run the document generator
cd www
java -cp ../../build/install/blang/lib/\* blang.runtime.internals.doc.MakeHTMLDoc
cd -
##### Javadocs
## DSL
cd ../../blangDSL/ca.ubc.stat.blang.parent
./gradlew assemble
cd -
rm -rf www/javadoc-dsl
mv ../../blangDSL/ca.ubc.stat.blang.parent/ca.ubc.stat.blang/build/docs/javadoc www/javadoc-dsl
## xlinear
cd ../../xlinear
./gradlew assemble
cd -
rm -rf www/javadoc-xlinear
mv ../../xlinear/build/docs/javadoc www/javadoc-xlinear
## inits
cd ../../inits
./gradlew assemble
cd -
rm -rf www/javadoc-inits
mv ../../inits/build/docs/javadoc www/javadoc-inits
## SDK
cd ..
./gradlew assemble
cd -
rm -rf www/javadoc-sdk
mv ../build/docs/javadoc www/javadoc-sdk
|
UBC-Stat-ML/blangSDK
|
doc/build.sh
|
Shell
|
bsd-2-clause
| 1,055 |
#!/bin/bash
if [ "`whoami`" != 'postgres' ] ; then
echo "Please run this script as the user 'postgres'."
exit 2
fi
if [ $# != 3 ] ; then
echo "Usage: $0 database original_user new_user"
echo "Example: $0 FB2010_05 joachim gazebo"
exit 1
fi
DB=$1
ORIGINAL_USER=$2
NEW_USER=$3
echo "CREATE DATABASE $DB OWNER ${NEW_USER}" | psql postgres
for i in *.dump ; do
cat $i | sed s/${ORIGINAL_USER}/${NEW_USER}/g | psql $DB
done
|
strawlab/flystockdb
|
support/scripts/restore_x.sh
|
Shell
|
bsd-2-clause
| 433 |
#!/bin/bash
$1/planet-dump-ng --generator "planet-dump-ng test X.Y.Z" --changeset-discussions discussions.osm.bz2 --changeset-discussions-no-userinfo discussions-no-userinfo.osm.bz2 --dump-file $1/test/liechtenstein-2013-08-03.dmp
|
zerebubuth/planet-dump-ng
|
test/discussions.xml.case/cmd.sh
|
Shell
|
bsd-2-clause
| 232 |
#!/bin/bash
# 50-genresp.sh
# Generates responses for iRODS' setup_irods.sh script.
# Zone SID, agent key, database admin, and admin password are all randomized.
RESPFILE="/opt/irods-aws/setup_responses"
EC2_INSTANCE_ID=$(ec2metadata --instance-id)
echo "irods" > $RESPFILE # service account user ID
echo "irods" >> $RESPFILE # service account group ID
echo "tempZone" >> $RESPFILE # initial zone name
echo "1247" >> $RESPFILE # service port #
echo "20000" >> $RESPFILE # transport starting port #
echo "20199" >> $RESPFILE # transport ending port #
echo "/var/lib/irods/Vault" >> $RESPFILE # vault path
(openssl rand -base64 16 | sed 's,/,S,g' | sed 's,+,_,g' | cut -c 1-16 \
| tr -d '\n' ; echo "") >> $RESPFILE # zone SID
openssl rand -base64 32 | sed 's,/,S,g' | sed 's,+,_,g' | cut -c 1-32 >> $RESPFILE # neg key
echo "1248" >> $RESPFILE # control plane port
openssl rand -base64 32 | sed 's,/,S,g' | sed 's,+,_,g' | cut -c 1-32 >> $RESPFILE # control plane key
echo "https://schemas.irods.org/configuration" >> $RESPFILE # schema validation URI
echo "rods" >> $RESPFILE # iRODS admin account
echo $EC2_INSTANCE_ID >> $RESPFILE # iRODS admin password
echo "yes" >> $RESPFILE # confirm iRODS settings
echo "localhost" >> $RESPFILE # database hostname
echo "5432" >> $RESPFILE # database port
echo "ICAT" >> $RESPFILE # database DB name
echo "irods" >> $RESPFILE # database admin username
echo $EC2_INSTANCE_ID >> $RESPFILE # database admin password
echo "yes" >> $RESPFILE # confirm database settings
|
DICE-UNC/contrib
|
ec2-irods4.0.3-idw2/per-instance/50-genresp.sh
|
Shell
|
bsd-3-clause
| 1,898 |
#!/bin/sh
#
# Copyright (c) 2011, University of Konstanz, Distributed Systems Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Konstanz nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###SCRIPT FOR CI###
###(SVN->GITHUB SYNCING)###
###FOURTH STEP, MERGING SVN TO GITHUB
set +e
#merging the svn data
git merge refs/remotes/git-svn
exit 0
|
sebastiangraf/treetank
|
scripts/svnToGit/02.sh
|
Shell
|
bsd-3-clause
| 1,759 |
#!/bin/bash
cd "/export/home/ssakdeo/softwares/Tomcat 7/apache-tomcat-7.0.5/apache-tomcat-7.0.5/webapps/WISE/WEB-INF/classes"
/export/home/ssakdeo/softwares/jdk1.6.0_10/jre/bin/java -classpath ../lib -jar email_scheduler.jar /WISE
|
ctsidev/SecureWise
|
latest binaries/run scripts/scripts with other functions/email_schedule.sh
|
Shell
|
bsd-3-clause
| 232 |
#!/usr/bin/env bash
set -x
set -o pipefail
set -e
diff <(./minn.awk -v group=10 example_input.txt) <(echo 'SNP BP P'; echo 'line6 31529180 0.120407155')
diff <(./minn.awk -v group=10 -v key=2 example_input.txt) <(echo 'SNP BP P'; echo 'line1 31528339 0.329333277')
diff <(./minn.awk -v group=5 -v key=2 example_input.txt) <(echo 'SNP BP P'; echo 'line1 31528339 0.329333277'; echo 'line6 31529180 0.120407155')
diff <(./minn.awk -v group=5 -v key=3 example_input.txt) <(echo 'SNP BP P'; echo 'line2 31528360 0.235504901'; echo 'line6 31529180 0.120407155')
diff <(./minn.awk -v group=3 example_input.txt) <(echo 'SNP BP P'; echo 'line2 31528360 0.235504901'; echo 'line6 31529180 0.120407155';echo 'line9 31529434 0.124976827')
|
glennsb/minn
|
test.sh
|
Shell
|
bsd-3-clause
| 734 |
#!/usr/bin/env bash
# go to the clang-format dir because this is where the .clang-format file is
# located
cd "$( dirname "${BASH_SOURCE[0]}" )"
# format files
clang-format -style=file ../input/coding_conventions.hpp > ../output/coding_conventions.hpp
clang-format -style=file ../input/coding_conventions.cpp > ../output/coding_conventions.cpp
#
echo "To check the output use:"
echo "> meld input output"
|
ekiwi/xpcc-playground
|
format/clang-format/format_coding_conventions.sh
|
Shell
|
bsd-3-clause
| 408 |
#!/bin/bash
set -e -u -o pipefail
usage() {
echo "Usage:"
echo "$0 -h|--help"
echo "$0 -f|--flow1-prio {0..7} -F|--flow2-prio {0..7} -t|--flow1-time-ms {0..50} -T|--flow2-time-ms {0..50}"
}
[ -z "${TOPDIR+x}" ] && { echo "Please source envsetup before running this script."; exit 1; }
# Chassis ETH2: Switch port RGMII 1
# Chassis ETH3: Switch port RGMII 2
# Chassis ETH4: Switch port RGMII 3
# Chassis ETH5: Switch port RGMII 0
# To LS1021: Switch port RGMII 4
#
sja1105-tool config default ls1021atsn
O=`getopt -l help,flow1-prio:,flow2-prio:,flow1-time-ms:,flow2-time-ms: -- hf:F:t:T: "$@"` || exit 1
eval set -- "$O"
while true; do
case "$1" in
-h|--help)
usage; exit 0;;
-f|--flow1-prio)
flow1_prio="$2"; shift 2;;
-F|--flow2-prio)
flow2_prio="$2"; shift 2;;
-t|--flow1-time-ms)
flow1_time="$2"; shift 2;;
-T|--flow2-time-ms)
flow2_time="$2"; shift 2;;
--)
shift; break;;
*)
echo "error parsing $1"; exit 1;;
esac
done
[ "$#" -gt 0 ] && { echo "error: trailing arguments: $@"; exit 1; }
[ -z "${flow1_prio+x}" ] && { echo "please provide an argument to --flow1-prio"; exit 1; }
[ -z "${flow2_prio+x}" ] && { echo "please provide an argument to --flow2-prio"; exit 1; }
[ -z "${flow1_time+x}" ] && { echo "please provide an argument to --flow1-time-ms"; exit 1; }
[ -z "${flow2_time+x}" ] && { echo "please provide an argument to --flow2-time-ms"; exit 1; }
# Content starts here
# Flow 1 is coming from Board 1. From Board 2's perspective, this
# is connected to chassis port ETH3 (RGMII 2).
# Flow 2 is coming from Board 2. This is the internal port RGMII4.
sja1105-tool config modify mac-config[2] vlanprio ${flow1_prio}
sja1105-tool config modify mac-config[4] vlanprio ${flow2_prio}
# Egress port for both flows is ETH2 (RGMII 1).
# This port will be configured for Qbv scheduling.
scheduler-create << EOF
{
"clksrc": "standalone",
"cycles": [
{
"start-time-ms": "1",
"timeslots": [
{
"duration-ms": "${flow1_time}",
"ports": [1],
"gates-open": [${flow1_prio}],
"comment": "Flow 1 timeslot"
},
{
"duration-ms": "${flow2_time}",
"ports": [1],
"gates-open": [${flow2_prio}],
"comment": "Flow 2 timeslot"
}
]
}
]
}
EOF
# Content ends here
xml_name="${BASH_SOURCE[0]/.sh}.xml"
rm -f ${xml_name}
sja1105-tool config save ${xml_name}
echo "Configuration saved as ${xml_name}."
echo "View with: \"sja1105-tool config load ${xml_name}; sja1105-tool config show | less\""
|
openil/sja1105-tool
|
src/helpers/configs/rate-limiting/scheduling.sh
|
Shell
|
bsd-3-clause
| 2,485 |
#!/usr/bin/env bash
set -e
pip_cache="$HOME/.cache"
docker_pip_cache="/tmp/cache/pip"
cd tests
docker build --build-arg PYTHON_IMAGE=python:3.6 -t python-linters .
docker run \
-e LOCAL_USER_ID=$UID \
-e PIP_CACHE=${docker_pip_cache} \
-v ${pip_cache}:$(dirname ${docker_pip_cache}) \
-v "$(dirname $(pwd))":/app \
-w /app \
--rm python-linters \
/bin/bash \
-c "pip install --user -U pip
pip install --user -r tests/requirements/lint-isort.txt --cache-dir ${docker_pip_cache}
/home/user/.local/bin/isort -c -df"
|
beniwohli/apm-agent-python
|
tests/scripts/docker/isort.sh
|
Shell
|
bsd-3-clause
| 545 |
# run all examples just to ensure they don't throw any errors
node ./examples/simple/render.js ./examples/stylesheet.xml /tmp/map.png
node ./examples/simple/blank.js & PID=$!; sleep 2; kill $PID
node ./examples/simple/simple.js & PID=$!; sleep 2; kill $PID
node ./examples/simple/simple_pool.js & PID=$!; sleep 2; kill $PID
node ./examples/simple/simple_express.js & PID=$!; sleep 2; kill $PID
node ./examples/js_datasource/simple.js; open js_points.png
node ./examples/js_datasource/usgs_quakes.js; open quakes.png
node ./examples/memory_datasource/simple.js; open memory_points.png
node ./examples/wms/wms.js ./examples/stylesheet.xml 8000 & PID=$!; sleep 2; kill $PID
node ./examples/wms/wms_pool.js ./examples/stylesheet.xml 8000 & PID=$!; sleep 2; kill $PID
node ./examples/tile/pool/app.js ./examples/stylesheet.xml 8000 & PID=$!; sleep 2; kill $PID
node ./examples/tile/database/app.js ./examples/stylesheet.xml 8000 & PID=$!; sleep 2; kill $PID
node ./examples/tile/elastic/app.js 8000 & PID=$!; sleep 2; kill $PID
# cleanup
sleep 2;
rm memory_points*
rm js_points.png
rm quakes.png
|
springmeyer/node-mapnik-heroku
|
test/run_examples.sh
|
Shell
|
bsd-3-clause
| 1,091 |
#!/bin/bash -e
IMG_FILE="${STAGE_WORK_DIR}/${IMG_FILENAME}.img"
IMGID="$(dd if="${IMG_FILE}" skip=440 bs=1 count=4 2>/dev/null | xxd -e | cut -f 2 -d' ')"
BOOT_PARTUUID="${IMGID}-01"
ROOT_PARTUUID="${IMGID}-02"
sed -i "s/BOOTDEV/PARTUUID=${BOOT_PARTUUID}/" "${ROOTFS_DIR}/etc/fstab"
sed -i "s/ROOTDEV/PARTUUID=${ROOT_PARTUUID}/" "${ROOTFS_DIR}/etc/fstab"
sed -i "s/ROOTDEV/PARTUUID=${ROOT_PARTUUID}/" "${ROOTFS_DIR}/boot/cmdline.txt"
|
WLANThermo/pi-gen
|
export-image/03-set-partuuid/00-run.sh
|
Shell
|
bsd-3-clause
| 439 |
#!/bin/bash
#############################################################################
# Copyright (c) 2018, Intel Corporation #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#############################################################################
if [ $# -eq 1 ]
then
OUTFILE=$1
else
echo "you have to provide an outfile (stdout of test_matops.sh) and csc/csr!"
exit -1
fi
TOTALFLOPS=0
WEIGHTAVGGFLOPS=0
for i in `cat ${OUTFILE} | grep PERFDUMP | awk -F"," '{print $3 "," $7 "," $8 "," $10 "," $11}'`
do
FLOPS=`echo $i | awk -F"," '{print $2}'`
TOTALFLOPS=`echo $TOTALFLOPS+$FLOPS | bc`
done
for i in `cat ${OUTFILE} | grep PERFDUMP | awk -F"," '{print $3 "," $7 "," $8 "," $10 "," $11}'`
do
FLOPS=`echo $i | awk -F"," '{print $2}'`
GFLOPS=`echo $i | awk -F"," '{print $5}'`
WEIGHT=`echo $FLOPS/$TOTALFLOPS | bc -l`
WEIGHTGFLOPS=`echo $GFLOPS*$WEIGHT | bc -l`
WEIGHTAVGGFLOPS=`echo $WEIGHTAVGGFLOPS+$WEIGHTGFLOPS | bc -l`
done
echo $OUTFILE","$WEIGHTAVGGFLOPS
|
egeor/libxsmm
|
samples/edge/analyze_test_matops_out.sh
|
Shell
|
bsd-3-clause
| 2,934 |
#!/bin/bash
# iOS
npx react-native bundle \
--platform ios \
--dev false \
--entry-file index.js \
--bundle-output bundle/devlauncher.ios.bundle
tee > ios/EXDevLauncherBundle.cpp <<EOF
// NOTE: Autogenerated using 'yarn bundle' in 'expo-dev-launcher' package
#include "EXDevLauncherBundle.h"
const unsigned int EXDevLauncherBundleLength = $(wc -c < bundle/devlauncher.ios.bundle) + 2;
const char *EXDevLauncherBundle = R"DELIM(
$(cat bundle/devlauncher.ios.bundle)
)DELIM";
EOF
rm bundle/devlauncher.ios.bundle
# Android
npx react-native bundle \
--platform android \
--dev false \
--entry-file index.js \
--bundle-output android/src/debug/res/raw/expo_dev_launcher_android.bundle
|
exponent/exponent
|
packages/expo-dev-launcher/write_embedded_bundle.sh
|
Shell
|
bsd-3-clause
| 731 |
#!/bin/bash
#script takes the merged bam files generated by call.chipreadhandler.sh
# and calls macs peaks under standard definitions - qsub script written on the fly
OUTDIR=/magnuson-lab/jraab/analysis/swi_snf_final/output/macs_peaks/
INPUT=/magnuson-lab/jraab/analysis/swi_snf_final/data/chip/processed/input.sorted.bam
IDIR=/magnuson-lab/jraab/analysis/swi_snf_final/data/chip/processed/
if [ ! -d $OUTDIR ]; then
mkdir $OUTDIR
fi
stubs="arid1a arid1b arid2 snf5"
for s in $stubs; do
sfile=$(find $IDIR -name $s*.merged.sorted.bam)
echo $sfile
echo "#!/bin/bash" >> tmp.sh
echo "#$ -cwd
#$ -e tmp.err
#$ -o tmp.out
#$ -j ${s}.peaks" >> tmp.sh
echo "module load python/2.7.6" >> tmp.sh
echo "source /magnuson-lab/jraab/virtualenvs/base/bin/activate" >> tmp.sh
echo "macs2 callpeak -t $sfile -c $INPUT -n $OUTDIR/$s -g 2.7e9 -q 0.01 --broad " >> tmp.sh
qsub tmp.sh
rm tmp.sh
done
|
jraab/raab_swisnf_2015
|
code/callMacsNormal.sh
|
Shell
|
mit
| 936 |
#!/bin/bash
set -eu
: ${1:? Usage: $0 RELEASE_VERSION}
SCRIPTS=`dirname "$0"`
RELEASE_VERSION="$1"
if [[ ! "$RELEASE_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Error: RELEASE_VERSION must be in X.Y.Z format, but was $RELEASE_VERSION"
exit 1
fi
function contains-line() {
grep --line-regexp --quiet --fixed-strings -e "$1"
}
function demand-file-contains-line() {
local file="$1"
local expected="$2"
cat "$file" | contains-line "$expected" || (echo "Add this line to $file and try again:"; echo "$expected"; exit 1)
}
function assert-file-contains-substring() {
local file="$1"
local expected="$2"
cat "$file" | grep --quiet --fixed-strings -e "$expected" || (echo "Error: file $file did not contain $expected"; exit 1)
}
function set-project-version()
{
local file="pom.xml"
local version="$1"
mvn versions:set \
-DgenerateBackupPoms=false \
-DnewVersion="$version" \
--file "$file"
assert-file-contains-substring "$file" "<version>$version</version>"
}
function set-documentation-version()
{
local file="README.md"
local version="$1"
sed -i -r -e "s/^(\\s*<version>).+(<\\/version>)\$/\1$version\2/" "$file"
assert-file-contains-substring "$file" "<version>$version</version>"
}
function next-snapshot-version()
{
local prefix=`echo $1 | sed -n -r 's/([0-9]+\.[0-9]+\.)[0-9]+/\1/p'`
local suffix=`echo $1 | sed -n -r 's/[0-9]+\.[0-9]+\.([0-9]+)/\1/p'`
((suffix++))
echo "$prefix$suffix-SNAPSHOT"
}
APP_NAME="DataTree"
NEXT_VERSION=`next-snapshot-version $RELEASE_VERSION`
demand-file-contains-line README.md "### $APP_NAME $RELEASE_VERSION (`date --iso-8601`)"
set -x
set-project-version "$RELEASE_VERSION"
set-documentation-version "$RELEASE_VERSION"
git add -u
git commit -m "Release $RELEASE_VERSION"
git tag -s -m "$APP_NAME $RELEASE_VERSION" "v$RELEASE_VERSION"
$SCRIPTS/stage.sh "$APP_NAME $RELEASE_VERSION"
set-project-version "$NEXT_VERSION"
git add -u
git commit -m "Prepare for next development iteration"
$SCRIPTS/publish.sh "$APP_NAME $RELEASE_VERSION"
|
orfjackal/datatree
|
scripts/release.sh
|
Shell
|
mit
| 2,097 |
#!/usr/bin/env bash
ENV_NAME=$1
echo "--- Setting up hostname ---"
sudo rm /etc/hostname
sudo echo "$ENV_NAME" >> /etc/hostname
sudo echo "127.0.1.1 $ENV_NAME" >> /etc/hosts
sudo hostname $ENV_NAME
hostname
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- Installing base packages ---"
sudo apt-get install -y vim curl git-core build-essential openssl libssl-dev python-software-properties python g++ make zip unzip
|
mandado/Lagrant
|
vagrant/scripts/init.sh
|
Shell
|
mit
| 445 |
#!/bin/bash
fur favicon "favicon.png" --text="api" --color="n" --font="p" --shape="i" --font-size="92" --format="png"
|
fur-labo/fur-examples
|
example/08-apeman-api/render_favicon.sh
|
Shell
|
mit
| 118 |
#!/bin/bash
#
# SCRIPT: generic_rsync.bash
# AUTHOR: Randy Michael
# DATE: 11/18/2007
# REV: 1.0
# PURPOSE: This is a generic shell script to copy files
# using rsync.
#
# set -n # Uncomment to check script syntax without execution
# set -x # Uncomment to debug this script
#
# REV LIST:
#
#
##############################################
# DEFINE FILES AND VARIABLES HERE
##############################################
# Define the source and destination files/directories
SOURCE_FL="/scripts/"
DESTIN_FL="booboo:/scripts"
##############################################
# BEGINNING OF MAIN
##############################################
# Start the rsync copy
rsync -avz "$SOURCE_FL" "$DESTIN_FL"
# End of generic_rsync.bash
|
quchunguang/test
|
bookshell/chapter7/generic_rsync.bash
|
Shell
|
mit
| 732 |
source $stdenv/setup
export DESTDIR=$out
mkdir -p $out/usr/bin
genericBuild
|
SymbiFlow/nixpkgs
|
pkgs/os-specific/linux/cramfsswap/builder.sh
|
Shell
|
mit
| 78 |
#!/usr/bin/env bash
# ddns_updater.sh: Checks DDNS sites and updates the IPs if needed.
# author: [email protected]
# bomb on any error
set -e
# make sure basic paths are set
export PATH=/sbin:/usr/sbin:/bin:/usr/bin:$PATH
CDW=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
. ${CDW}/functions
SQLITE_DB=${CDW}/../auth/db/auth.db
# obtain the interface with the default gateway
IFACE=$(get_iface 4)
LIST=`sudo $(which sqlite3) ${SQLITE_DB} "SELECT domain,last_ipaddr FROM DDNS"`
for ROW in $LIST; do
DOMAIN=`echo $ROW | awk '{split($0,a,"|"); print a[1]}'`
OLDIP=`echo $ROW | awk '{split($0,a,"|"); print a[2]}'`
NEWIP=`dig +short $DOMAIN`
if [ "$OLDIP" != "$NEWIP" ]; then
echo "$(date): Updating $DOMAIN"
if [ -n "$OLDIP" ]; then
iptables -t nat -D PREROUTING -s $OLDIP/32 -i $IFACE -j ACCEPT -v && iptables-save > /etc/iptables/rules.v4 || iptables-save > /etc/iptables.rules
fi
iptables -t nat -I PREROUTING -s $NEWIP/32 -i $IFACE -j ACCEPT -v && iptables-save > /etc/iptables/rules.v4 || iptables-save > /etc/iptables.rules
#UPDATE database
sudo $(which sqlite3) ${SQLITE_DB} "UPDATE DDNS SET last_ipaddr = '${NEWIP}' WHERE domain = '${DOMAIN}'"
fi
done
exit 0
|
ab77/netflix-proxy
|
scripts/ddns_updater.sh
|
Shell
|
mit
| 1,337 |
#!/usr/bin/env bash
printf "%$(tput cols)s\n"|tr " " "="
echo "Starting vagrant machine"
printf "%$(tput cols)s\n"|tr " " "="
vagrant up
printf "%$(tput cols)s\n"|tr " " "="
echo "Starting ssh session"
printf "%$(tput cols)s\n"|tr " " "="
vagrant ssh
|
christian-rizza/vagrant-tutorial
|
vagrant-ssh.sh
|
Shell
|
mit
| 251 |
#!/bin/bash
# run_lstm_1c.sh is like run_lstm_1b.sh but changing from the old LSTM
# implementation to our new 'fast' LSTM layer. The xconfig changes from
# 'lstm-layer' to 'fast-lstm-layer'. It's as good as or maybe slightly better
# than the old setup.
# steps/info/chain_dir_info.pl exp/chain_cleaned/lstm1c_sp_bi
# exp/chain_cleaned/lstm1c_sp_bi: num-iters=253 nj=2..12 num-params=9.6M dim=40+100->3607 combine=-0.09->-0.09 xent:train/valid[167,252,final]=(-1.26,-1.14,-1.14/-1.34,-1.27,-1.27) logprob:train/valid[167,252,final]=(-0.092,-0.078,-0.078/-0.116,-0.111,-0.111)
# local/chain/compare_wer_general.sh exp/chain_cleaned/lstm1b_sp_bi exp/chain_cleaned/lstm1c_sp_bi
# System lstm1b_sp_bi lstm1c_sp_bi
# WER on dev(orig) 11.3 11.2
# WER on dev(rescored) 10.7 10.5
# WER on test(orig) 10.6 10.6
# WER on test(rescored) 10.0 10.1
# Final train prob -0.0787 -0.0777
# Final valid prob -0.1104 -0.1108
# Final train prob (xent) -1.1442 -1.1445
# Final valid prob (xent) -1.2782 -1.2692
## how you run this (note: this assumes that the run_lstm.sh soft link points here;
## otherwise call it directly in its location).
# by default, with cleanup:
# local/chain/run_lstm.sh
# without cleanup:
# local/chain/run_lstm.sh --train-set train --gmm tri3 --nnet3-affix "" &
# note, if you have already run one of the non-chain nnet3 systems
# (e.g. local/nnet3/run_tdnn.sh), you may want to run with --stage 14.
set -e -o pipefail
# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=0
nj=30
decode_nj=30
min_seg_len=1.55
chunk_left_context=40
chunk_right_context=0
label_delay=5
xent_regularize=0.1
train_set=train_cleaned
gmm=tri3_cleaned # the gmm for the target data
num_threads_ubm=32
nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned
# decode options
extra_left_context=50
extra_right_context=0
frames_per_chunk=150
# The rest are configs specific to this script. Most of the parameters
# are just hardcoded at this level, in the commands below.
train_stage=-10
tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration.
lstm_affix=1c #affix for LSTM directory, e.g. "a" or "b", in case we change the configuration.
common_egs_dir= # you can set this to use previously dumped egs.
# End configuration section.
echo "$0 $@" # Print the command line for logging
. cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
local/nnet3/run_ivector_common.sh --stage $stage \
--nj $nj \
--min-seg-len $min_seg_len \
--train-set $train_set \
--gmm $gmm \
--num-threads-ubm $num_threads_ubm \
--nnet3-affix "$nnet3_affix"
gmm_dir=exp/$gmm
ali_dir=exp/${gmm}_ali_${train_set}_sp_comb
tree_dir=exp/chain${nnet3_affix}/tree_bi${tree_affix}
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats
dir=exp/chain${nnet3_affix}/lstm${lstm_affix}_sp_bi
train_data_dir=data/${train_set}_sp_hires_comb
lores_train_data_dir=data/${train_set}_sp_comb
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb
for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$lores_train_data_dir/feats.scp $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done
if [ $stage -le 14 ]; then
echo "$0: creating lang directory with one state per phone."
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
if [ -d data/lang_chain ]; then
if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then
echo "$0: data/lang_chain already exists, not overwriting it; continuing"
else
echo "$0: data/lang_chain already exists and seems to be older than data/lang..."
echo " ... not sure what to do. Exiting."
exit 1;
fi
else
cp -r data/lang data/lang_chain
silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo
fi
fi
if [ $stage -le 15 ]; then
# Get the alignments as lattices (gives the chain training more freedom).
# use the same num-jobs as the alignments
steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
data/lang $gmm_dir $lat_dir
rm $lat_dir/fsts.*.gz # save space
fi
if [ $stage -le 16 ]; then
# Build a tree using our new topology. We know we have alignments for the
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
# those.
if [ -f $tree_dir/final.mdl ]; then
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
exit 1;
fi
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--context-opts "--context-width=2 --central-position=1" \
--leftmost-questions-truncate -1 \
--cmd "$train_cmd" 4000 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir
fi
if [ $stage -le 17 ]; then
mkdir -p $dir
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults
fast-lstm-layer name=lstm1 cell-dim=512 delay=-3
fast-lstm-layer name=lstm2 cell-dim=512 delay=-3
fast-lstm-layer name=lstm3 cell-dim=512 delay=-3
## adding the layers for chain branch
output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 18 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir $train_ivector_dir \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize 0.1 \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.00005 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--egs.dir "$common_egs_dir" \
--egs.opts "--frames-overlap-per-eg 0" \
--egs.chunk-width "$frames_per_chunk" \
--egs.chunk-left-context "$chunk_left_context" \
--egs.chunk-right-context "$chunk_right_context" \
--trainer.num-chunk-per-minibatch 128 \
--trainer.frames-per-iter 1500000 \
--trainer.max-param-change 2.0 \
--trainer.num-epochs 4 \
--trainer.deriv-truncate-margin 10 \
--trainer.optimization.shrink-value 0.99 \
--trainer.optimization.num-jobs-initial 2 \
--trainer.optimization.num-jobs-final 12 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.momentum 0.0 \
--cleanup.remove-egs true \
--feat-dir $train_data_dir \
--tree-dir $tree_dir \
--lat-dir $lat_dir \
--dir $dir
fi
if [ $stage -le 19 ]; then
# Note: it might appear that this data/lang_chain directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang $dir $dir/graph
fi
if [ $stage -le 20 ]; then
rm $dir/.error 2>/dev/null || true
for dset in dev test; do
(
steps/nnet3/decode.sh --num-threads 4 --nj $decode_nj --cmd "$decode_cmd" \
--acwt 1.0 --post-decode-acwt 10.0 \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--frames-per-chunk "$frames_per_chunk" \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${dset}_hires \
--scoring-opts "--min-lmwt 5 " \
$dir/graph data/${dset}_hires $dir/decode_${dset} || exit 1;
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \
data/${dset}_hires ${dir}/decode_${dset} ${dir}/decode_${dset}_rescore || exit 1
) || touch $dir/.error &
done
wait
if [ -f $dir/.error ]; then
echo "$0: something went wrong in decoding"
exit 1
fi
fi
exit 0
|
michellemorales/OpenMM
|
kaldi/egs/tedlium/s5_r2/local/chain/tuning/run_lstm_1c.sh
|
Shell
|
gpl-2.0
| 10,427 |
#!/usr/bin/env bash
cd ../../default
drush updb -y
cd ..
find . -maxdepth 1 -type d -name "mercy.edu.*" -exec sh -c '(cd {} && drush updb -y )' ';'
|
eleanorw13/development
|
sites/all/scripts/deploy-prod.sh
|
Shell
|
gpl-2.0
| 148 |
#!/bin/sh
cat <<EOF
Usage: t-seqall <additional-sbagen-options-if-required>
Please note: These tone-sets are based on data from a document that
appeared on USENET, from someone who appears to have measured the
frequencies used on some of the Monroe Institute tapes. Although the
frequencies were measured, the relative amplitudes of the tones were
not, so the results may not be identical to that of the Monroe
Institute tapes. Also I believe that the Monroe Institute uses a
guiding voice that leads the subject through the experience.
Despite that, these tone-sets (or Focus levels) may be useful for
experimenters to base their own tone-sets on. This experimental
sequence lasts 1 hour 40 minutes, and takes the subject through the
listed tone-sets in order. The possibilities associated with some of
the focus-levels are from a book by Ken Eagle-Feather, but these are
certainly not the last word on the effects of the focus levels.
EOF
echo -n "Press return to continue: "
read xx
cat <<'EOF'
Don't get disappointed if you don't zip off to another world on the
first attempt ! I think that the tones are intended to point the
direction - to nudge your awareness to the place where these things
are possible. If you're too inflexible, or if you don't have enough
awareness or energy to see what there is to see when you get there (my
case a lot of the time), you're going to think nothing's happening.
You also don't have the support of the Monroe Institute belief system
when you're experimenting alone like this, and, as I say, we don't
know for certain that these tone-sets accurately reproduce the Monroe
Institute tapes. But this is certainly fertile raw material to work
with - if you're ready to play, then there's nothing to stop you !
10 - Brain awake, body asleep
12 - Expanded awareness, in the mood to head out and explore
15 - No time, 3 hours seems like 2 minutes
21 - Alternate energy systems, bridge into worlds with non-human entities
22 - Place where people with `loose, chaotic thoughts' go after death
23 - Belief system territories, where dogmatic people go when dead
24 -
25 -
26 -
27 - The park, a way-station, a place of high creativity, a stepping
stone to areas beyond.
EOF
echo -n "Press return to continue: "
read xx
pre="$*";
pink=''
#pink="pink/30" # Uncomment this line to add pink noise
cat <<EOF |
# tN is the tone-set I'm using.
# tNa is an alternate tone-set for the same focus level
# tNi is a tone-set I'm using as an introduction, to scramble the phases
#
t0: $pink 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10
t0a: $pink 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10
t1: $pink 50+0.25/10 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10 400+10.0/10 500+10.1/10 600+4.8/10
t1a: $pink 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10 400+10.0/10 500+10.1/10 600+4.8/10
t2: $pink 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10 500+7.05/10 630+7.1/10 750+7.0/10
t2a: $pink 100+1.5/10 200+4.0/10 250+4.0/10 300+4.0/10 500+7.05/10 630+7.0/10 750+7.0/10
t3: $pink 200+4.0/10 250+5.4/10 300+5.4/10 600+16.2/10 750+16.2/10 900+16.2/10
t3a: $pink 200+4.0/10 250+4.0/10 300+4.0/10 600+16.2/10 750+15.9/10 900+16.2/10
t4: $pink 200+4.0/10 250+4.0/10 300+4.0/10 600+16.2/10 750+15.9/10 900+16.2/10
t4a: $pink 200+4.0/10 250+4.0/10 300+4.0/10 600+16.2/10 750+16.2/10 900+16.2/10
t5: $pink 400+3.9/10 503+4.0/10 600+4.0/10 750+3.9/10 900+4.0/10
t5a: $pink 400+3.9/10 503+4.0/10 600+4.0/10 750+3.9/10 900+4.0/10
t6: $pink 50+0.75/10 200+1.5/10 400+3.9/10 503+4.0/10 600+4.0/10 750+4.0/10 900+4.0/10
t6a: $pink 50+0.75/10 400+3.9/10 503+4.0/10 600+4.0/10 750+4.0/10 900+4.0/10
t7: $pink 503+4.0/10 600+4.0/10 750+4.0/10 900+4.0/10
t7a: $pink 503+4.0/10 600+4.0/10 750+4.0/10 900+4.0/10
t8: $pink 400+3.9/10 503+4.2/10 600+4.0/10 750+4.0/10 900+4.0/10
t8a: $pink 400+3.9/10 503+4.2/10 600+4.0/10 750+4.0/10 900+4.0/10
t9: $pink 50+0.80/10 400+4.0/10 503+4.2/10 600+4.0/10 750+4.0/10 900+4.0/10
t9a: $pink 400+3.9/10 503+4.2/10 600+4.0/10 750+4.0/10 900+4.0/10
t0i: $pink 100+1.5/0 200+4.0/0 250+4.03/0 300+4.07/0
t1i: $pink 50+0.25/0 100+1.5/0 200+4.0/0 250+4.03/0 300+4.07/0 400+10.0/0 500+10.1/0 600+4.8/0
t2i: $pink 100+1.5/0 200+4.0/0 250+4.03/0 300+4.07/0 500+7.05/0 630+7.1/0 750+7.0/0
t3i: $pink 200+4.0/0 250+5.4/0 300+5.45/0 600+16.2/0 750+16.23/0 900+16.27/0
t4i: $pink 200+4.0/0 250+4.03/0 300+4.07/0 600+16.2/0 750+15.9/0 900+16.25/0
t5i: $pink 400+3.9/0 503+4.0/0 600+4.03/0 750+3.95/0 900+4.07/0
t6i: $pink 50+0.75/0 200+1.5/0 400+3.9/0 503+4.0/0 600+4.03/0 750+4.05/0 900+4.08/0
t7i: $pink 503+4.0/0 600+4.03/0 750+4.05/0 900+4.08/0
t8i: $pink 400+3.9/0 503+4.2/0 600+4.0/0 750+4.03/0 900+4.07/0
t9i: $pink 50+0.80/0 400+4.0/0 503+4.2/0 600+4.03/0 750+4.05/0 900+4.08/0
off: -
0:00:00 t0i ->
0:00:20 t0 ->
0:09:50 t0 ->
0:09:59 off
0:10:00 t1i ->
0:10:20 t1 ->
0:19:50 t1 ->
0:19:59 off
0:20:00 t2i ->
0:20:20 t2 ->
0:29:50 t2 ->
0:29:59 off
0:30:00 t3i ->
0:30:20 t3 ->
0:39:50 t3 ->
0:39:59 off
0:40:00 t4i ->
0:40:20 t4 ->
0:49:50 t4 ->
0:49:59 off
0:50:00 t5i ->
0:50:20 t5 ->
0:59:50 t5 ->
0:59:59 off
1:00:00 t6i ->
1:00:20 t6 ->
1:09:50 t6 ->
1:09:59 off
1:10:00 t7i ->
1:10:20 t7 ->
1:19:50 t7 ->
1:19:59 off
1:20:00 t8i ->
1:20:20 t8 ->
1:29:50 t8 ->
1:29:59 off
1:30:00 t9i ->
1:30:20 t9 ->
1:39:50 t9 ->
1:39:59 off
EOF
sbagen $pre -S -E -
|
jave/sbagen-alsa
|
scripts/t-seqall.sh
|
Shell
|
gpl-2.0
| 5,331 |
#!/bin/sh
#==============================================================================
# Copyright (c) Linux Test Project, 2014
# Copyright (c) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#==============================================================================
. test.sh
TST_CLEANUP=netns_ns_exec_cleanup
# Set to 1 only for test cases using ifconfig (ioctl).
USE_IFCONFIG=0
##
# Variables which can be used in test cases (set by netns_setup() function):
###############################################################################
# Use in test cases to execute commands inside a namespace. Set to 'ns_exec' or
# 'ip netns exec' command according to NS_EXEC_PROGRAM argument specified in
# netns_setup() function call.
NS_EXEC=""
# Set to "net" for ns_create/ns_exec as their options requires
# to specify a namespace type. Empty for ip command.
NS_TYPE=""
# IP addresses of veth0 (IP0) and veth1 (IP1) devices (ipv4/ipv6 variant
# is determined according to the IP_VERSION argument specified in netns_setup()
# function call.
IP0=""
IP1=""
NETMASK=""
# 'ping' or 'ping6' according to the IP_VERSION argument specified
# in netns_setup() function call.
tping=""
# Network namespaces handles for manipulating and executing commands inside
# namespaces. For 'ns_exec' handles are PIDs of daemonized processes running
# in namespaces.
NS_HANDLE0=""
NS_HANDLE1=""
# Adds "inet6 add" to the 'ifconfig' arguments which is required for the ipv6
# version. Always use with 'ifconfig', even if ipv4 version of a test case is
# used, in which case IFCONF_IN6_ARG will be empty string. Usage:
# ifconfig <device> $IFCONF_IN6_ARG IP/NETMASK
IFCONF_IN6_ARG=""
###############################################################################
tst_check_iproute()
{
local cur_ipver="$(ip -V)"
local spe_ipver="$1"
cur_ipver=${cur_ipver##*s}
if [ -z $cur_ipver ] || [ -z $spe_ipver ]; then
tst_brkm TBROK "don't obtain valid iproute version"
fi
if [ $cur_ipver -lt $spe_ipver ]; then
tst_brkm TCONF \
"The commands in iproute tools do not support required objects"
fi
}
##
# Sets up global variables which can be used in test cases (documented above),
# creates two network namespaces and a pair of virtual ethernet devices, each
# device in one namespace. Each device is then enabled and assigned an IP
# address according to the function parameters. IFCONF_IN6_ARG variable is set
# only if ipv6 variant of test case is used (determined by IP_VERSION argument).
#
# SYNOPSIS:
# netns_setup <NS_EXEC_PROGRAM> <IP_VERSION> <COMM_TYPE> <IP4_VETH0>
# <IP4_VETH1> <IP6_VETH0> <IP6_VETH1>
#
# OPTIONS:
# * NS_EXEC_PROGRAM (ns_exec|ip)
# Program which will be used to enter and run other commands
# inside a network namespace.
# * IP_VERSION (ipv4|ipv6)
# Version of IP. (ipv4|ipv6)
# * COMM_TYPE (netlink|ioctl)
# Communication type between kernel and user space
# for enabling and assigning IP addresses to the virtual
# ethernet devices. Uses 'ip' command for netlink and 'ifconfig'
# for ioctl. (If set to ioctl, function also checks the existance
# of the 'ifconfig' command.)
# * IP4_VETH0, IP4_VETH1
# IPv4 addresses for veth0 and veth1 devices.
# * IP6_VETH0, IP6_VETH1
# IPv6 addresses for veth0 and veth1 devices.
#
# On success function returns, on error tst_brkm is called and TC is terminated.
netns_setup()
{
tst_require_root
tst_check_cmds ip
case "$1" in
ns_exec)
setns_check
if [ $? -eq 32 ]; then
tst_brkm TCONF "setns not supported"
fi
NS_TYPE="net"
netns_ns_exec_setup
TST_CLEANUP=netns_ns_exec_cleanup
;;
ip)
netns_ip_setup
TST_CLEANUP=netns_ip_cleanup
;;
*)
tst_brkm TBROK \
"first argument must be a program used to enter a network namespace (ns_exec|ip)"
;;
esac
case "$3" in
netlink)
;;
ioctl)
USE_IFCONFIG=1
tst_check_cmds ifconfig
;;
*)
tst_brkm TBROK \
"third argument must be a comm. type between kernel and user space (netlink|ioctl)"
;;
esac
if [ -z "$4" ]; then
tst_brkm TBROK "fourth argument must be the IPv4 address for veth0"
fi
if [ -z "$5" ]; then
tst_brkm TBROK "fifth argument must be the IPv4 address for veth1"
fi
if [ -z "$6" ]; then
tst_brkm TBROK "sixth argument must be the IPv6 address for veth0"
fi
if [ -z "$7" ]; then
tst_brkm TBROK "seventh argument must be the IPv6 address for veth1"
fi
case "$2" in
ipv4)
IP0=$4; IP1=$5
tping="ping"; NETMASK=24
;;
ipv6)
IFCONF_IN6_ARG="inet6 add"
IP0=$6; IP1=$7;
tping="ping6"; NETMASK=64
;;
*)
tst_brkm TBROK "second argument must be an ip version (ipv4|ipv6)"
;;
esac
netns_set_ip
}
##
# Sets up NS_EXEC to use 'ns_exec', creates two network namespaces and stores
# their handles into NS_HANDLE0 and NS_HANDLE1 variables (in this case handles
# are PIDs of daemonized processes running in these namespaces). Virtual
# ethernet device is then created for each namespace.
netns_ns_exec_setup()
{
NS_EXEC="ns_exec"
NS_HANDLE0=$(ns_create $NS_TYPE)
if [ $? -eq 1 ]; then
tst_resm TINFO "$NS_HANDLE0"
tst_brkm TBROK "unable to create a new network namespace"
fi
NS_HANDLE1=$(ns_create $NS_TYPE)
if [ $? -eq 1 ]; then
tst_resm TINFO "$NS_HANDLE1"
tst_brkm TBROK "unable to create a new network namespace"
fi
$NS_EXEC $NS_HANDLE0 $NS_TYPE ip link add veth0 type veth peer name veth1 || \
tst_brkm TBROK "unable to create veth pair devices"
$NS_EXEC $NS_HANDLE0 $NS_TYPE ns_ifmove veth1 $NS_HANDLE1
ret=$?
if [ $ret -eq 0 ]; then
return;
fi
if [ $ret -eq 32 ]; then
tst_brkm TCONF "IFLA_NET_NS_PID not supported"
fi
tst_brkm TBROK "unable to add device veth1 to the separate network namespace"
}
##
# Sets up NS_EXEC to use 'ip netns exec', creates two network namespaces
# and stores their handles into NS_HANDLE0 and NS_HANDLE1 variables. Virtual
# ethernet device is then created for each namespace.
netns_ip_setup()
{
tst_check_iproute 111010
NS_EXEC="ip netns exec"
NS_HANDLE0=tst_net_ns0
NS_HANDLE1=tst_net_ns1
ip netns del $NS_HANDLE0 2>/dev/null
ip netns del $NS_HANDLE1 2>/dev/null
ip netns add $NS_HANDLE0 || \
tst_brkm TBROK "unable to create a new network namespace"
ip netns add $NS_HANDLE1 || \
tst_brkm TBROK "unable to create a new network namespace"
$NS_EXEC $NS_HANDLE0 ip link add veth0 type veth peer name veth1 || \
tst_brkm TBROK "unable to create veth pair devices"
$NS_EXEC $NS_HANDLE0 ip link set veth1 netns $NS_HANDLE1 || \
tst_brkm TBROK "unable to add device veth1 to the separate network namespace"
}
##
# Enables virtual ethernet devices and assigns IP addresses for both
# of them (IPv4/IPv6 variant is decided by netns_setup() function).
netns_set_ip()
{
if [ -z "$NS_EXEC" ]; then
tst_brkm TBROK "netns_setup() function must be called first"
fi
# This applies only for ipv6 variant:
# Do not accept Router Advertisements (accept_ra) and do not use
# Duplicate Address Detection (accept_dad) which uses Neighbor
# Discovery Protocol - the problem is that until DAD can confirm that
# there is no other host with the same address, the address is
# considered to be "tentative" (attempts to bind() to the address fail
# with EADDRNOTAVAIL) which may cause problems for tests using ipv6.
echo 0 | $NS_EXEC $NS_HANDLE0 $NS_TYPE \
tee /proc/sys/net/ipv6/conf/veth0/accept_dad \
/proc/sys/net/ipv6/conf/veth0/accept_ra >/dev/null
echo 0 | $NS_EXEC $NS_HANDLE1 $NS_TYPE \
tee /proc/sys/net/ipv6/conf/veth1/accept_dad \
/proc/sys/net/ipv6/conf/veth1/accept_ra >/dev/null
case $USE_IFCONFIG in
1)
$NS_EXEC $NS_HANDLE0 $NS_TYPE ifconfig veth0 $IFCONF_IN6_ARG $IP0/$NETMASK ||
tst_brkm TBROK "adding address to veth0 failed"
$NS_EXEC $NS_HANDLE1 $NS_TYPE ifconfig veth1 $IFCONF_IN6_ARG $IP1/$NETMASK ||
tst_brkm TBROK "adding address to veth1 failed"
$NS_EXEC $NS_HANDLE0 $NS_TYPE ifconfig veth0 up ||
tst_brkm TBROK "enabling veth0 device failed"
$NS_EXEC $NS_HANDLE1 $NS_TYPE ifconfig veth1 up ||
tst_brkm TBROK "enabling veth1 device failed"
;;
*)
$NS_EXEC $NS_HANDLE0 $NS_TYPE ip address add $IP0/$NETMASK dev veth0 ||
tst_brkm TBROK "adding address to veth0 failed"
$NS_EXEC $NS_HANDLE1 $NS_TYPE ip address add $IP1/$NETMASK dev veth1 ||
tst_brkm TBROK "adding address to veth1 failed"
$NS_EXEC $NS_HANDLE0 $NS_TYPE ip link set veth0 up ||
tst_brkm TBROK "enabling veth0 device failed"
$NS_EXEC $NS_HANDLE1 $NS_TYPE ip link set veth1 up ||
tst_brkm TBROK "enabling veth1 device failed"
;;
esac
}
netns_ns_exec_cleanup()
{
if [ -z "$NS_EXEC" ]; then
return
fi
# removes veth0 device (which also removes the paired veth1 device)
$NS_EXEC $NS_HANDLE0 $NS_TYPE ip link delete veth0
kill -9 $NS_HANDLE0 2>/dev/null
kill -9 $NS_HANDLE1 2>/dev/null
}
netns_ip_cleanup()
{
if [ -z "$NS_EXEC" ]; then
return
fi
# removes veth0 device (which also removes the paired veth1 device)
$NS_EXEC $NS_HANDLE0 ip link delete veth0
ip netns del $NS_HANDLE0 2>/dev/null
ip netns del $NS_HANDLE1 2>/dev/null
}
|
richiejp/ltp
|
testcases/kernel/containers/netns/netns_helper.sh
|
Shell
|
gpl-2.0
| 9,605 |
#!/bin/bash
#
# MIT Alexander Couzens <[email protected]>
set -e
SDK_HOME="$HOME/sdk"
SDK_PATH=https://downloads.lede-project.org/snapshots/targets/ar71xx/generic/
SDK=lede-sdk-ar71xx-generic_gcc-5.4.0_musl.Linux-x86_64
PACKAGES_DIR="$PWD"
echo_red() { printf "\033[1;31m$*\033[m\n"; }
echo_green() { printf "\033[1;32m$*\033[m\n"; }
echo_blue() { printf "\033[1;34m$*\033[m\n"; }
exec_status() {
PATTERN="$1"
shift
while :;do sleep 590;echo "still running (please don't kill me Travis)";done &
("$@" 2>&1) | tee logoutput
R=${PIPESTATUS[0]}
kill $! && wait $! 2>/dev/null
if [ $R -ne 0 ]; then
echo_red "=> '$*' failed (return code $R)"
return 1
fi
if grep -qE "$PATTERN" logoutput; then
echo_red "=> '$*' failed (log matched '$PATTERN')"
return 1
fi
echo_green "=> '$*' successful"
return 0
}
# download will run on the `before_script` step
# The travis cache will be used (all files under $HOME/sdk/). Meaning
# We don't have to download the file again
download_sdk() {
mkdir -p "$SDK_HOME"
cd "$SDK_HOME"
echo_blue "=== download SDK"
wget "$SDK_PATH/sha256sums" -O sha256sums
wget "$SDK_PATH/sha256sums.gpg" -O sha256sums.asc
# LEDE Build System (LEDE GnuPG key for unattended build jobs)
gpg --recv 0xCD84BCED626471F1
# LEDE Release Builder (17.01 "Reboot" Signing Key)
gpg --recv 0x833C6010D52BBB6B
gpg --verify sha256sums.asc
grep "$SDK" sha256sums > sha256sums.small
# if missing, outdated or invalid, download again
if ! sha256sum -c ./sha256sums.small ; then
wget "$SDK_PATH/$SDK.tar.xz" -O "$SDK.tar.xz"
fi
# check again and fail here if the file is still bad
sha256sum -c ./sha256sums.small
echo_blue "=== SDK is up-to-date"
}
# test_package will run on the `script` step.
# test_package call make download check for very new/modified package
test_packages2() {
# search for new or modified packages. PKGS will hold a list of package like 'admin/muninlite admin/monit ...'
PKGS=$(git diff --diff-filter=d --name-only "$TRAVIS_COMMIT_RANGE" | grep 'Makefile$' | grep -v '/files/' | awk -F'/Makefile' '{ print $1 }')
if [ -z "$PKGS" ] ; then
echo_blue "No new or modified packages found!"
return 0
fi
echo_blue "=== Found new/modified packages:"
for pkg in $PKGS ; do
echo "===+ $pkg"
done
echo_blue "=== Setting up SDK"
tmp_path=$(mktemp -d)
cd "$tmp_path"
tar Jxf "$SDK_HOME/$SDK.tar.xz" --strip=1
# use github mirrors to spare lede servers
cat > feeds.conf <<EOF
src-git base https://github.com/lede-project/source.git
src-link packages $PACKAGES_DIR
src-git luci https://github.com/openwrt/luci.git
EOF
# enable BUILD_LOG
sed -i '1s/^/config BUILD_LOG\n\tbool\n\tdefault y\n\n/' Config-build.in
./scripts/feeds update -a
./scripts/feeds install -a
make defconfig
echo_blue "=== Setting up SDK done"
RET=0
# E.g: pkg_dir => admin/muninlite
# pkg_name => muninlite
for pkg_dir in $PKGS ; do
pkg_name=$(echo "$pkg_dir" | awk -F/ '{ print $NF }')
echo_blue "=== $pkg_name: Starting quick tests"
exec_status 'WARNING|ERROR' make "package/$pkg_name/download" V=s || RET=1
exec_status 'WARNING|ERROR' make "package/$pkg_name/check" V=s || RET=1
echo_blue "=== $pkg_name: quick tests done"
done
[ $RET -ne 0 ] && return $RET
for pkg_dir in $PKGS ; do
pkg_name=$(echo "$pkg_dir" | awk -F/ '{ print $NF }')
echo_blue "=== $pkg_name: Starting compile test"
# we can't enable verbose built else we often hit Travis limits
# on log size and the job get killed
exec_status '^ERROR' make "package/$pkg_name/compile" -j3 || RET=1
echo_blue "=== $pkg_name: compile test done"
echo_blue "=== $pkg_name: begin compile logs"
for f in $(find logs/package/feeds/packages/$pkg_name/ -type f); do
echo_blue "Printing $f"
cat "$f"
done
echo_blue "=== $pkg_name: end compile logs"
echo_blue "=== $pkg_name: begin packages sizes"
du -ba bin/
echo_blue "=== $pkg_name: end packages sizes"
done
return $RET
}
test_commits() {
RET=0
for commit in $(git rev-list ${TRAVIS_COMMIT_RANGE/.../..}); do
echo_blue "=== Checking commit '$commit'"
if git show --format='%P' -s $commit | grep -qF ' '; then
echo_red "Pull request should not include merge commits"
RET=1
fi
author="$(git show -s --format=%aN $commit)"
if echo $author | grep -q '\S\+\s\+\S\+'; then
echo_green "Author name ($author) seems ok"
else
echo_red "Author name ($author) need to be your real name 'firstname lastname'"
RET=1
fi
subject="$(git show -s --format=%s $commit)"
if echo "$subject" | grep -q -e '^[0-9A-Za-z,/-]\+: ' -e '^Revert '; then
echo_green "Commit subject line seems ok ($subject)"
else
echo_red "Commit subject line MUST start with '<package name>: ' ($subject)"
RET=1
fi
body="$(git show -s --format=%b $commit)"
sob="$(git show -s --format='Signed-off-by: %aN <%aE>' $commit)"
if echo "$body" | grep -qF "$sob"; then
echo_green "Signed-off-by match author"
else
echo_red "Signed-off-by is missing or doesn't match author (should be '$sob')"
RET=1
fi
done
return $RET
}
test_packages() {
test_commits && test_packages2 || return 1
}
echo_blue "=== Travis ENV"
env
echo_blue "=== Travis ENV"
while true; do
# if clone depth is too small, git rev-list / diff return incorrect or empty results
C="$(git rev-list ${TRAVIS_COMMIT_RANGE/.../..} | tail -n1)" 2>/dev/null
[ -n "$C" -a "$C" != "a22de9b74cf9579d1ce7e6cf1845b4afa4277b00" ] && break
echo_blue "Fetching 50 commits more"
git fetch origin --deepen=50
done
if [ "$TRAVIS_PULL_REQUEST" = false ] ; then
echo "Only Pull Requests are supported at the moment." >&2
exit 0
fi
if [ $# -ne 1 ] ; then
cat <<EOF
Usage: $0 (download_sdk|test_packages)
download_sdk - download the SDK to $HOME/sdk.tar.xz
test_packages - do a make check on the package
EOF
exit 1
fi
$@
|
pfzim/openwrt-packages
|
.travis_do.sh
|
Shell
|
gpl-2.0
| 5,819 |
#!/bin/bash -ue
# Copyright (C) 2013 Percona Inc
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston
# MA 02110-1301 USA.
# Documentation: http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html
# Make sure to read that before proceeding!
. $(dirname $0)/wsrep_sst_common
ealgo=""
ekey=""
ekeyfile=""
encrypt=0
nproc=1
ecode=0
ssyslog=""
ssystag=""
XTRABACKUP_PID=""
SST_PORT=""
REMOTEIP=""
REMOTEHOST=""
tca=""
tcert=""
tkey=""
sockopt=""
progress=""
ttime=0
totime=0
lsn=""
ecmd=""
rlimit=""
# Initially
stagemsg="${WSREP_SST_OPT_ROLE}"
cpat=""
ib_home_dir=""
ib_log_dir=""
ib_undo_dir=""
sfmt="tar"
strmcmd=""
tfmt=""
tcmd=""
rebuild=0
rebuildcmd=""
payload=0
pvformat="-F '%N => Rate:%r Avg:%a Elapsed:%t %e Bytes: %b %p' "
pvopts="-f -i 10 -N $WSREP_SST_OPT_ROLE "
STATDIR=""
uextra=0
disver=""
tmpopts=""
itmpdir=""
xtmpdir=""
scomp=""
sdecomp=""
ssl_dhparams=""
ssl_cert=""
ssl_ca=""
ssl_key=""
# Required for backup locks
# For backup locks it is 1 sent by joiner
# 5.6.21 PXC and later can't donate to an older joiner
sst_ver=1
if which pv &>/dev/null && pv --help | grep -q FORMAT;then
pvopts+=$pvformat
fi
pcmd="pv $pvopts"
declare -a RC
INNOBACKUPEX_BIN=innobackupex
DATA="${WSREP_SST_OPT_DATA}"
INFO_FILE="xtrabackup_galera_info"
IST_FILE="xtrabackup_ist"
MAGIC_FILE="${DATA}/${INFO_FILE}"
# Setting the path for ss and ip
export PATH="/usr/sbin:/sbin:$PATH"
timeit(){
local stage=$1
shift
local cmd="$@"
local x1 x2 took extcode
if [[ $ttime -eq 1 ]];then
x1=$(date +%s)
wsrep_log_info "Evaluating $cmd"
eval "$cmd"
extcode=$?
x2=$(date +%s)
took=$(( x2-x1 ))
wsrep_log_info "NOTE: $stage took $took seconds"
totime=$(( totime+took ))
else
wsrep_log_info "Evaluating $cmd"
eval "$cmd"
extcode=$?
fi
return $extcode
}
get_keys()
{
# $encrypt -eq 1 is for internal purposes only
if [[ $encrypt -ge 2 || $encrypt -eq -1 ]];then
return
fi
if [[ $encrypt -eq 0 ]];then
if $MY_PRINT_DEFAULTS xtrabackup | grep -q encrypt;then
wsrep_log_error "Unexpected option combination. SST may fail. Refer to http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html "
fi
return
fi
if [[ $sfmt == 'tar' ]];then
wsrep_log_info "NOTE: Xtrabackup-based encryption - encrypt=1 - cannot be enabled with tar format"
encrypt=-1
return
fi
wsrep_log_info "Xtrabackup based encryption enabled in my.cnf - Supported only from Xtrabackup 2.1.4"
if [[ -z $ealgo ]];then
wsrep_log_error "FATAL: Encryption algorithm empty from my.cnf, bailing out"
exit 3
fi
if [[ -z $ekey && ! -r $ekeyfile ]];then
wsrep_log_error "FATAL: Either key or keyfile must be readable"
exit 3
fi
if [[ -z $ekey ]];then
ecmd="xbcrypt --encrypt-algo=$ealgo --encrypt-key-file=$ekeyfile"
else
wsrep_log_warning "Using the 'encrypt-key' option causes the encryption key"
wsrep_log_warning "to be set via the command-line and is considered insecure."
wsrep_log_warning "It is recommended to use the 'encrypt-key-file' option instead."
ecmd="xbcrypt --encrypt-algo=$ealgo --encrypt-key=$ekey"
fi
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
ecmd+=" -d"
fi
stagemsg+="-XB-Encrypted"
}
#
# If the ssl_dhparams variable is already set, uses that as a source
# of dh parameters for OpenSSL. Otherwise, looks for dhparams.pem in the
# datadir, and creates it there if it can't find the file.
# No input parameters
#
check_for_dhparams()
{
if [[ -z "$ssl_dhparams" ]]; then
if ! [[ -r "$DATA/dhparams.pem" ]]; then
wsrep_check_programs openssl
wsrep_log_info "Could not find dhparams file, creating $DATA/dhparams.pem"
if ! openssl dhparam -out "$DATA/dhparams.pem" 2048 >/dev/null 2>&1
then
wsrep_log_error "******** FATAL ERROR ********************************* "
wsrep_log_error "* Could not create the dhparams.pem file with OpenSSL. "
wsrep_log_error "****************************************************** "
exit 22
fi
fi
ssl_dhparams="$DATA/dhparams.pem"
fi
}
#
# verifies that the certificate matches the private key
# doing this will save us having to wait for a timeout that would
# otherwise occur.
#
# 1st param: path to the cert
# 2nd param: path to the private key
#
verify_cert_matches_key()
{
local cert_path=$1
local key_path=$2
wsrep_check_programs openssl diff
# generate the public key from the cert and the key
# they should match (otherwise we can't create an SSL connection)
if ! diff <(openssl x509 -in "$cert_path" -pubkey -noout) <(openssl rsa -in "$key_path" -pubout 2>/dev/null) >/dev/null 2>&1
then
wsrep_log_error "******** FATAL ERROR ************************* "
wsrep_log_error "* The certifcate and private key do not match. "
wsrep_log_error "* Please check your certificate and key files. "
wsrep_log_error "********************************************** "
exit 22
fi
}
# Checks to see if the file exists
# If the file does not exist (or cannot be read), issues an error
# and exits
#
# 1st param: file name to be checked (for read access)
# 2nd param: 1st error message (header)
# 3rd param: 2nd error message (footer, optional)
#
verify_file_exists()
{
local file_path=$1
local error_message1=$2
local error_message2=$3
if ! [[ -r "$file_path" ]]; then
wsrep_log_error "******** FATAL ERROR ************************* "
wsrep_log_error "* $error_message1 "
wsrep_log_error "* Could not find/access : $file_path "
if ! [[ -z "$error_message2" ]]; then
wsrep_log_error "* $error_message2 "
fi
wsrep_log_error "********************************************** "
exit 22
fi
}
get_transfer()
{
if [[ -z $SST_PORT ]];then
TSST_PORT=4444
else
TSST_PORT=$SST_PORT
fi
if [[ $tfmt == 'nc' ]];then
if [[ ! -x `which nc` ]];then
wsrep_log_error "nc(netcat) not found in path: $PATH"
exit 2
fi
if [[ $encrypt -eq 2 || $encrypt -eq 3 || $encrypt -eq 4 ]]; then
wsrep_log_error "******** FATAL ERROR *********************** "
wsrep_log_error "* Using SSL encryption (encrypt= 2, 3, or 4) "
wsrep_log_error "* is not supported when using nc(netcat). "
wsrep_log_error "******************************************** "
exit 22
fi
wsrep_log_info "Using netcat as streamer"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
if nc -h 2>&1 | grep -q ncat; then
tcmd="nc $sockopt -l ${TSST_PORT}"
else
tcmd="nc $sockopt -dl ${TSST_PORT}"
fi
else
# netcat doesn't understand [] around IPv6 address
tcmd="nc ${REMOTEIP//[\[\]]/} ${TSST_PORT}"
fi
else
tfmt='socat'
wsrep_log_info "Using socat as streamer"
if [[ ! -x `which socat` ]];then
wsrep_log_error "socat not found in path: $PATH"
exit 2
fi
donor_extra=""
joiner_extra=""
if [[ $encrypt -eq 2 || $encrypt -eq 3 || $encrypt -eq 4 ]]; then
if ! socat -V | grep -q WITH_OPENSSL; then
wsrep_log_error "******** FATAL ERROR ****************** "
wsrep_log_error "* socat is not openssl enabled. "
wsrep_log_error "* Unable to encrypt SST communications. "
wsrep_log_error "*************************************** "
exit 2
fi
# Determine the socat version
SOCAT_VERSION=`socat -V 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1`
if [[ -z "$SOCAT_VERSION" ]]; then
wsrep_log_error "******** FATAL ERROR **************** "
wsrep_log_error "* Cannot determine the socat version. "
wsrep_log_error "************************************* "
exit 2
fi
# socat versions < 1.7.3 will have 512-bit dhparams (too small)
# so create 2048-bit dhparams and send that as a parameter
# socat version >= 1.7.3, checks to see if the peername matches the hostname
# set commonname="" to disable the peername checks
#
if ! check_for_version "$SOCAT_VERSION" "1.7.3"; then
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]]; then
# dhparams check (will create ssl_dhparams if needed)
check_for_dhparams
joiner_extra=",dhparam=$ssl_dhparams"
fi
fi
if check_for_version "$SOCAT_VERSION" "1.7.3"; then
donor_extra=',commonname=""'
fi
fi
if [[ $encrypt -eq 2 ]]; then
wsrep_log_warning "**** WARNING **** encrypt=2 is deprecated and will be removed in a future release"
wsrep_log_info "Using openssl based encryption with socat: with crt and ca"
verify_file_exists "$tcert" "Both certificate and CA files are required." \
"Please check the 'tcert' option. "
verify_file_exists "$tca" "Both certificate and CA files are required." \
"Please check the 'tca' option. "
stagemsg+="-OpenSSL-Encrypted-2"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
wsrep_log_info "Decrypting with CERT: $tcert, CA: $tca"
tcmd="socat -u openssl-listen:${TSST_PORT},reuseaddr,cert=${tcert},cafile=${tca}${joiner_extra}${sockopt} stdio"
else
wsrep_log_info "Encrypting with CERT: $tcert, CA: $tca"
tcmd="socat -u stdio openssl-connect:${REMOTEIP}:${TSST_PORT},cert=${tcert},cafile=${tca}${donor_extra}${sockopt}"
fi
elif [[ $encrypt -eq 3 ]];then
wsrep_log_warning "**** WARNING **** encrypt=3 is deprecated and will be removed in a future release"
wsrep_log_info "Using openssl based encryption with socat: with key and crt"
verify_file_exists "$tcert" "Both certificate and key files are required." \
"Please check the 'tcert' option. "
verify_file_exists "$tkey" "Both certificate and key files are required." \
"Please check the 'tkey' option. "
stagemsg+="-OpenSSL-Encrypted-3"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
wsrep_log_info "Decrypting with CERT: $tcert, KEY: $tkey"
tcmd="socat -u openssl-listen:${TSST_PORT},reuseaddr,cert=${tcert},key=${tkey},verify=0${joiner_extra}${sockopt} stdio"
else
wsrep_log_info "Encrypting with CERT: $tcert, KEY: $tkey"
tcmd="socat -u stdio openssl-connect:${REMOTEIP}:${TSST_PORT},cert=${tcert},key=${tkey},verify=0${sockopt}"
fi
elif [[ $encrypt -eq 4 ]]; then
wsrep_log_info "Using openssl based encryption with socat: with key, crt, and ca"
verify_file_exists "$ssl_ca" "CA, certificate, and key files are required." \
"Please check the 'ssl-ca' option. "
verify_file_exists "$ssl_cert" "CA, certificate, and key files are required." \
"Please check the 'ssl-cert' option. "
verify_file_exists "$ssl_key" "CA, certificate, and key files are required." \
"Please check the 'ssl-key' option. "
# Check to see that the key matches the cert
verify_cert_matches_key $ssl_cert $ssl_key
stagemsg+="-OpenSSL-Encrypted-4"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]]; then
wsrep_log_info "Decrypting with CERT: $ssl_cert, KEY: $ssl_key, CA: $ssl_ca"
tcmd="socat -u openssl-listen:${TSST_PORT},reuseaddr,cert=${ssl_cert},key=${ssl_key},cafile=${ssl_ca},verify=1${joiner_extra}${sockopt} stdio"
else
wsrep_log_info "Encrypting with CERT: $ssl_cert, KEY: $ssl_key, CA: $ssl_ca"
tcmd="socat -u stdio openssl-connect:${REMOTEIP}:${TSST_PORT},cert=${ssl_cert},key=${ssl_key},cafile=${ssl_ca},verify=1${donor_extra}${sockopt}"
fi
else
if [[ $encrypt -eq 1 ]]; then
wsrep_log_warning "**** WARNING **** encrypt=1 is deprecated and will be removed in a future release"
fi
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]]; then
tcmd="socat -u TCP-LISTEN:${TSST_PORT},reuseaddr${sockopt} stdio"
else
tcmd="socat -u stdio TCP:${REMOTEIP}:${TSST_PORT}${sockopt}"
fi
fi
fi
}
get_footprint()
{
pushd $WSREP_SST_OPT_DATA 1>/dev/null
payload=$(find . -regex '.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' -type f -print0 | du --files0-from=- --block-size=1 -c | awk 'END { print $1 }')
if $MY_PRINT_DEFAULTS xtrabackup | grep -q -- "--compress";then
# QuickLZ has around 50% compression ratio
# When compression/compaction used, the progress is only an approximate.
payload=$(( payload*1/2 ))
fi
popd 1>/dev/null
pcmd+=" -s $payload"
adjust_progress
}
adjust_progress()
{
if [[ ! -x `which pv` ]];then
wsrep_log_error "pv not found in path: $PATH"
wsrep_log_error "Disabling all progress/rate-limiting"
pcmd=""
rlimit=""
progress=""
return
fi
if [[ -n $progress && $progress != '1' ]];then
if [[ -e $progress ]];then
pcmd+=" 2>>$progress"
else
pcmd+=" 2>$progress"
fi
elif [[ -z $progress && -n $rlimit ]];then
# When rlimit is non-zero
pcmd="pv -q"
fi
if [[ -n $rlimit && "$WSREP_SST_OPT_ROLE" == "donor" ]];then
wsrep_log_info "Rate-limiting SST to $rlimit"
pcmd+=" -L \$rlimit"
fi
}
read_cnf()
{
sfmt=$(parse_cnf sst streamfmt "xbstream")
tfmt=$(parse_cnf sst transferfmt "socat")
tca=$(parse_cnf sst tca "")
tcert=$(parse_cnf sst tcert "")
tkey=$(parse_cnf sst tkey "")
encrypt=$(parse_cnf sst encrypt 0)
sockopt=$(parse_cnf sst sockopt "")
progress=$(parse_cnf sst progress "")
rebuild=$(parse_cnf sst rebuild 0)
ttime=$(parse_cnf sst time 0)
cpat=$(parse_cnf sst cpat '.*\.pem$\|.*init\.ok$\|.*galera\.cache$\|.*sst_in_progress$\|.*\.sst$\|.*gvwstate\.dat$\|.*grastate\.dat$\|.*\.err$\|.*\.log$\|.*RPM_UPGRADE_MARKER$\|.*RPM_UPGRADE_HISTORY$')
ealgo=$(parse_cnf xtrabackup encrypt "")
ekey=$(parse_cnf xtrabackup encrypt-key "")
ekeyfile=$(parse_cnf xtrabackup encrypt-key-file "")
scomp=$(parse_cnf sst compressor "")
sdecomp=$(parse_cnf sst decompressor "")
# Refer to http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html
if [[ -z $ealgo ]];then
ealgo=$(parse_cnf sst encrypt-algo "")
ekey=$(parse_cnf sst encrypt-key "")
ekeyfile=$(parse_cnf sst encrypt-key-file "")
fi
# Pull the parameters needed for encrypt=4
ssl_ca=$(parse_cnf sst ssl-ca "")
if [[ -z "$ssl_ca" ]]; then
ssl_ca=$(parse_cnf mysqld ssl-ca "")
fi
ssl_cert=$(parse_cnf sst ssl-cert "")
if [[ -z "$ssl_cert" ]]; then
ssl_cert=$(parse_cnf mysqld ssl-cert "")
fi
ssl_key=$(parse_cnf sst ssl-key "")
if [[ -z "$ssl_key" ]]; then
ssl_key=$(parse_cnf mysqld ssl-key "")
fi
rlimit=$(parse_cnf sst rlimit "")
uextra=$(parse_cnf sst use-extra 0)
iopts=$(parse_cnf sst inno-backup-opts "")
iapts=$(parse_cnf sst inno-apply-opts "")
impts=$(parse_cnf sst inno-move-opts "")
stimeout=$(parse_cnf sst sst-initial-timeout 100)
ssyslog=$(parse_cnf sst sst-syslog 0)
ssystag=$(parse_cnf mysqld_safe syslog-tag "${SST_SYSLOG_TAG:-}")
ssystag+="-"
if [[ $ssyslog -ne -1 ]];then
if $MY_PRINT_DEFAULTS mysqld_safe | tr '_' '-' | grep -q -- "--syslog";then
ssyslog=1
fi
fi
}
get_stream()
{
if [[ $sfmt == 'xbstream' ]];then
wsrep_log_info "Streaming with xbstream"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
strmcmd="xbstream -x"
else
strmcmd="xbstream -c \${INFO_FILE}"
fi
else
sfmt="tar"
wsrep_log_info "Streaming with tar"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
strmcmd="tar xfi - "
else
strmcmd="tar cf - \${INFO_FILE} "
fi
fi
}
get_proc()
{
set +e
nproc=$(grep -c processor /proc/cpuinfo)
[[ -z $nproc || $nproc -eq 0 ]] && nproc=1
set -e
}
sig_joiner_cleanup()
{
wsrep_log_error "Removing $MAGIC_FILE file due to signal"
rm -f "$MAGIC_FILE"
}
cleanup_joiner()
{
# Since this is invoked just after exit NNN
local estatus=$?
if [[ $estatus -ne 0 ]];then
wsrep_log_error "Cleanup after exit with status:$estatus"
elif [ "${WSREP_SST_OPT_ROLE}" = "joiner" ];then
wsrep_log_info "Removing the sst_in_progress file"
wsrep_cleanup_progress_file
fi
if [[ -n $progress && -p $progress ]];then
wsrep_log_info "Cleaning up fifo file $progress"
rm $progress
fi
if [[ -n ${STATDIR:-} ]];then
[[ -d $STATDIR ]] && rm -rf $STATDIR
fi
# Final cleanup
pgid=$(ps -o pgid= $$ | grep -o '[0-9]*')
# This means no setsid done in mysqld.
# We don't want to kill mysqld here otherwise.
if [[ $$ -eq $pgid ]];then
# This means a signal was delivered to the process.
# So, more cleanup.
if [[ $estatus -ge 128 ]];then
kill -KILL -$$ || true
fi
fi
exit $estatus
}
check_pid()
{
local pid_file="$1"
[ -r "$pid_file" ] && ps -p $(cat "$pid_file") >/dev/null 2>&1
}
cleanup_donor()
{
# Since this is invoked just after exit NNN
local estatus=$?
if [[ $estatus -ne 0 ]];then
wsrep_log_error "Cleanup after exit with status:$estatus"
fi
if [[ -n ${XTRABACKUP_PID:-} ]];then
if check_pid $XTRABACKUP_PID
then
wsrep_log_error "xtrabackup process is still running. Killing... "
kill_xtrabackup
fi
fi
rm -f ${DATA}/${IST_FILE} || true
if [[ -n $progress && -p $progress ]];then
wsrep_log_info "Cleaning up fifo file $progress"
rm -f $progress || true
fi
wsrep_log_info "Cleaning up temporary directories"
if [[ -n $xtmpdir ]];then
[[ -d $xtmpdir ]] && rm -rf $xtmpdir || true
fi
if [[ -n $itmpdir ]];then
[[ -d $itmpdir ]] && rm -rf $itmpdir || true
fi
# Final cleanup
pgid=$(ps -o pgid= $$ | grep -o '[0-9]*')
# This means no setsid done in mysqld.
# We don't want to kill mysqld here otherwise.
if [[ $$ -eq $pgid ]];then
# This means a signal was delivered to the process.
# So, more cleanup.
if [[ $estatus -ge 128 ]];then
kill -KILL -$$ || true
fi
fi
exit $estatus
}
kill_xtrabackup()
{
local PID=$(cat $XTRABACKUP_PID)
[ -n "$PID" -a "0" != "$PID" ] && kill $PID && (kill $PID && kill -9 $PID) || :
wsrep_log_info "Removing xtrabackup pid file $XTRABACKUP_PID"
rm -f "$XTRABACKUP_PID" || true
}
setup_ports()
{
if [[ "$WSREP_SST_OPT_ROLE" == "donor" ]];then
SST_PORT=$WSREP_SST_OPT_PORT
REMOTEIP=$WSREP_SST_OPT_HOST
lsn=$(echo $WSREP_SST_OPT_PATH | awk -F '[/]' '{ print $2 }')
sst_ver=$(echo $WSREP_SST_OPT_PATH | awk -F '[/]' '{ print $3 }')
else
SST_PORT=$WSREP_SST_OPT_PORT
fi
}
# waits ~1 minute for nc/socat to open the port and then reports ready
# (regardless of timeout)
wait_for_listen()
{
local HOST=$1
local PORT=$2
local MODULE=$3
for i in {1..300}
do
ss -p state listening "( sport = :$PORT )" | grep -qE 'socat|nc' && break
sleep 0.2
done
echo "ready ${HOST}:${PORT}/${MODULE}//$sst_ver"
}
check_extra()
{
local use_socket=1
if [[ $uextra -eq 1 ]];then
if $MY_PRINT_DEFAULTS --mysqld | tr '_' '-' | grep -- "--thread-handling=" | grep -q 'pool-of-threads';then
local eport=$($MY_PRINT_DEFAULTS mysqld | tr '_' '-' | grep -- "--extra-port=" | cut -d= -f2)
if [[ -n $eport ]];then
# Xtrabackup works only locally.
# Hence, setting host to 127.0.0.1 unconditionally.
wsrep_log_info "SST through extra_port $eport"
INNOEXTRA+=" --host=127.0.0.1 --port=$eport "
use_socket=0
else
wsrep_log_error "Extra port $eport null, failing"
exit 1
fi
else
wsrep_log_info "Thread pool not set, ignore the option use_extra"
fi
fi
if [[ $use_socket -eq 1 ]] && [[ -n "${WSREP_SST_OPT_SOCKET}" ]];then
INNOEXTRA+=" --socket=${WSREP_SST_OPT_SOCKET}"
fi
}
recv_joiner()
{
local dir=$1
local msg=$2
local tmt=$3
local checkf=$4
local ltcmd
if [[ ! -d ${dir} ]];then
# This indicates that IST is in progress
return
fi
pushd ${dir} 1>/dev/null
set +e
if [[ $tmt -gt 0 && -x `which timeout` ]];then
if timeout --help | grep -q -- '-k';then
ltcmd="timeout -k $(( tmt+10 )) $tmt $tcmd"
else
ltcmd="timeout -s9 $tmt $tcmd"
fi
timeit "$msg" "$ltcmd | $strmcmd; RC=( "\${PIPESTATUS[@]}" )"
else
timeit "$msg" "$tcmd | $strmcmd; RC=( "\${PIPESTATUS[@]}" )"
fi
set -e
popd 1>/dev/null
if [[ ${RC[0]} -eq 124 ]];then
wsrep_log_error "Possible timeout in receving first data from donor in gtid stage"
exit 32
fi
for ecode in "${RC[@]}";do
if [[ $ecode -ne 0 ]];then
wsrep_log_error "Error while getting data from donor node: " \
"exit codes: ${RC[@]}"
exit 32
fi
done
if [[ $checkf -eq 1 && ! -r "${MAGIC_FILE}" ]];then
# this message should cause joiner to abort
wsrep_log_error "xtrabackup process ended without creating '${MAGIC_FILE}'"
wsrep_log_info "Contents of datadir"
wsrep_log_info "$(ls -l ${dir}/*)"
exit 32
fi
}
send_donor()
{
local dir=$1
local msg=$2
pushd ${dir} 1>/dev/null
set +e
timeit "$msg" "$strmcmd | $tcmd; RC=( "\${PIPESTATUS[@]}" )"
set -e
popd 1>/dev/null
for ecode in "${RC[@]}";do
if [[ $ecode -ne 0 ]];then
wsrep_log_error "Error while getting data from donor node: " \
"exit codes: ${RC[@]}"
exit 32
fi
done
}
# Returns the version string in a standardized format
# Input "1.2.3" => echoes "010203"
# Wrongly formatted values => echoes "000000"
normalize_version()
{
local major=0
local minor=0
local patch=0
# Only parses purely numeric version numbers, 1.2.3
# Everything after the first three values are ignored
if [[ $1 =~ ^([0-9]+)\.([0-9]+)\.?([0-9]*)([\.0-9])*$ ]]; then
major=${BASH_REMATCH[1]}
minor=${BASH_REMATCH[2]}
patch=${BASH_REMATCH[3]}
fi
printf %02d%02d%02d $major $minor $patch
}
# Compares two version strings
# The first parameter is the version to be checked
# The second parameter is the minimum version required
# Returns 1 (failure) if $1 >= $2, 0 (success) otherwise
check_for_version()
{
local local_version_str="$( normalize_version $1 )"
local required_version_str="$( normalize_version $2 )"
if [[ "$local_version_str" < "$required_version_str" ]]; then
return 1
else
return 0
fi
}
if [[ ! -x `which $INNOBACKUPEX_BIN` ]];then
wsrep_log_error "innobackupex not in path: $PATH"
exit 2
fi
# check the version, we require XB-2.4 to ensure that we can pass the
# datadir via the command-line option
XB_REQUIRED_VERSION="2.3.5"
XB_VERSION=`$INNOBACKUPEX_BIN --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1`
if [[ -z $XB_VERSION ]]; then
wsrep_log_error "FATAL: Cannot determine the $INNOBACKUPEX_BIN version. Needs xtrabackup-$XB_REQUIRED_VERSION or higher to perform SST"
exit 2
fi
if ! check_for_version $XB_VERSION $XB_REQUIRED_VERSION; then
wsrep_log_error "FATAL: The $INNOBACKUPEX_BIN version is $XB_VERSION. Needs xtrabackup-$XB_REQUIRED_VERSION or higher to perform SST"
exit 2
fi
rm -f "${MAGIC_FILE}"
if [[ ! ${WSREP_SST_OPT_ROLE} == 'joiner' && ! ${WSREP_SST_OPT_ROLE} == 'donor' ]];then
wsrep_log_error "Invalid role ${WSREP_SST_OPT_ROLE}"
exit 22
fi
read_cnf
setup_ports
if ${INNOBACKUPEX_BIN} /tmp --help 2>/dev/null | grep -q -- '--version-check'; then
disver="--no-version-check"
fi
if [[ ${FORCE_FTWRL:-0} -eq 1 ]];then
wsrep_log_info "Forcing FTWRL due to environment variable FORCE_FTWRL equal to $FORCE_FTWRL"
iopts+=" --no-backup-locks "
fi
INNOEXTRA=""
if [[ $ssyslog -eq 1 ]];then
if [[ ! -x `which logger` ]];then
wsrep_log_error "logger not in path: $PATH. Ignoring"
else
wsrep_log_info "Logging all stderr of SST/Innobackupex to syslog"
exec 2> >(logger -p daemon.err -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE)
wsrep_log_error()
{
logger -p daemon.err -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@"
}
wsrep_log_info()
{
logger -p daemon.info -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@"
}
INNOAPPLY="${INNOBACKUPEX_BIN} $disver $iapts --apply-log \$rebuildcmd \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-apply "
INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} 2>&1 | logger -p daemon.err -t ${ssystag}innobackupex-move "
INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2> >(logger -p daemon.err -t ${ssystag}innobackupex-backup)"
fi
else
INNOAPPLY="${INNOBACKUPEX_BIN} $disver $iapts --apply-log \$rebuildcmd \${DATA} &>\${DATA}/innobackup.prepare.log"
INNOMOVE="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} --defaults-group=mysqld${WSREP_SST_OPT_CONF_SUFFIX} $disver $impts --datadir=${DATA} --move-back --force-non-empty-directories \${DATA} &>\${DATA}/innobackup.move.log"
INNOBACKUP="${INNOBACKUPEX_BIN} ${WSREP_SST_OPT_DEFAULT} $disver $iopts \$tmpopts \$INNOEXTRA --galera-info --stream=\$sfmt \$itmpdir 2>\${DATA}/innobackup.backup.log"
fi
get_stream
get_transfer
if [ "$WSREP_SST_OPT_ROLE" = "donor" ]
then
trap cleanup_donor EXIT
if [ $WSREP_SST_OPT_BYPASS -eq 0 ]
then
usrst=0
if [[ -z $sst_ver ]];then
wsrep_log_error "Upgrade joiner to 5.6.21 or higher for backup locks support"
wsrep_log_error "The joiner is not supported for this version of donor"
exit 93
fi
if [[ -z $(parse_cnf mysqld tmpdir "") && -z $(parse_cnf xtrabackup tmpdir "") ]];then
xtmpdir=$(mktemp -d)
tmpopts=" --tmpdir=$xtmpdir "
wsrep_log_info "Using $xtmpdir as xtrabackup temporary directory"
fi
itmpdir=$(mktemp -d)
wsrep_log_info "Using $itmpdir as innobackupex temporary directory"
if [[ -n "${WSREP_SST_OPT_USER:-}" && "$WSREP_SST_OPT_USER" != "(null)" ]]; then
INNOEXTRA+=" --user=$WSREP_SST_OPT_USER"
usrst=1
fi
if [ -n "${WSREP_SST_OPT_PSWD:-}" ]; then
INNOEXTRA+=" --password=$WSREP_SST_OPT_PSWD"
elif [[ $usrst -eq 1 ]];then
# Empty password, used for testing, debugging etc.
INNOEXTRA+=" --password="
fi
get_keys
check_extra
wsrep_log_info "Streaming GTID file before SST"
# Store donor's wsrep GTID (state ID) and wsrep_gtid_domain_id
# (separated by a space).
echo "${WSREP_SST_OPT_GTID} ${WSREP_SST_OPT_GTID_DOMAIN_ID}" > "${MAGIC_FILE}"
ttcmd="$tcmd"
if [[ $encrypt -eq 1 ]];then
if [[ -n $scomp ]];then
tcmd=" \$ecmd | $scomp | $tcmd "
else
tcmd=" \$ecmd | $tcmd "
fi
elif [[ -n $scomp ]];then
tcmd=" $scomp | $tcmd "
fi
send_donor $DATA "${stagemsg}-gtid"
# Restore the transport commmand to its original state
tcmd="$ttcmd"
if [[ -n $progress ]];then
get_footprint
tcmd="$pcmd | $tcmd"
elif [[ -n $rlimit ]];then
adjust_progress
tcmd="$pcmd | $tcmd"
fi
wsrep_log_info "Sleeping before data transfer for SST"
sleep 10
wsrep_log_info "Streaming the backup to joiner at ${REMOTEIP} ${SST_PORT:-4444}"
# Add compression to the head of the stream (if specified)
if [[ -n $scomp ]]; then
tcmd="$scomp | $tcmd"
fi
# Add encryption to the head of the stream (if specified)
if [[ $encrypt -eq 1 ]]; then
tcmd=" \$ecmd | $tcmd "
fi
set +e
timeit "${stagemsg}-SST" "$INNOBACKUP | $tcmd; RC=( "\${PIPESTATUS[@]}" )"
set -e
if [ ${RC[0]} -ne 0 ]; then
wsrep_log_error "${INNOBACKUPEX_BIN} finished with error: ${RC[0]}. " \
"Check ${DATA}/innobackup.backup.log"
exit 22
elif [[ ${RC[$(( ${#RC[@]}-1 ))]} -eq 1 ]];then
wsrep_log_error "$tcmd finished with error: ${RC[1]}"
exit 22
fi
# innobackupex implicitly writes PID to fixed location in $xtmpdir
XTRABACKUP_PID="$xtmpdir/xtrabackup_pid"
else # BYPASS FOR IST
wsrep_log_info "Bypassing the SST for IST"
echo "continue" # now server can resume updating data
# Store donor's wsrep GTID (state ID) and wsrep_gtid_domain_id
# (separated by a space).
echo "${WSREP_SST_OPT_GTID} ${WSREP_SST_OPT_GTID_DOMAIN_ID}" > "${MAGIC_FILE}"
echo "1" > "${DATA}/${IST_FILE}"
get_keys
if [[ $encrypt -eq 1 ]];then
if [[ -n $scomp ]];then
tcmd=" \$ecmd | $scomp | $tcmd "
else
tcmd=" \$ecmd | $tcmd "
fi
elif [[ -n $scomp ]];then
tcmd=" $scomp | $tcmd "
fi
strmcmd+=" \${IST_FILE}"
send_donor $DATA "${stagemsg}-IST"
fi
echo "done ${WSREP_SST_OPT_GTID}"
wsrep_log_info "Total time on donor: $totime seconds"
elif [ "${WSREP_SST_OPT_ROLE}" = "joiner" ]
then
[[ -e $SST_PROGRESS_FILE ]] && wsrep_log_info "Stale sst_in_progress file: $SST_PROGRESS_FILE"
[[ -n $SST_PROGRESS_FILE ]] && touch $SST_PROGRESS_FILE
ib_home_dir=$(parse_cnf mysqld innodb-data-home-dir "")
ib_log_dir=$(parse_cnf mysqld innodb-log-group-home-dir "")
ib_undo_dir=$(parse_cnf mysqld innodb-undo-directory "")
stagemsg="Joiner-Recv"
sencrypted=1
nthreads=1
MODULE="xtrabackup_sst"
rm -f "${DATA}/${IST_FILE}"
# May need xtrabackup_checkpoints later on
rm -f ${DATA}/xtrabackup_binary ${DATA}/xtrabackup_galera_info ${DATA}/xtrabackup_logfile
wait_for_listen ${WSREP_SST_OPT_HOST} ${WSREP_SST_OPT_PORT:-4444} ${MODULE} &
trap sig_joiner_cleanup HUP PIPE INT TERM
trap cleanup_joiner EXIT
if [[ -n $progress ]];then
adjust_progress
tcmd+=" | $pcmd"
fi
get_keys
if [[ $encrypt -eq 1 && $sencrypted -eq 1 ]];then
if [[ -n $sdecomp ]];then
strmcmd=" $sdecomp | \$ecmd | $strmcmd"
else
strmcmd=" \$ecmd | $strmcmd"
fi
elif [[ -n $sdecomp ]];then
strmcmd=" $sdecomp | $strmcmd"
fi
STATDIR=$(mktemp -d)
MAGIC_FILE="${STATDIR}/${INFO_FILE}"
recv_joiner $STATDIR "${stagemsg}-gtid" $stimeout 1
if ! ps -p ${WSREP_SST_OPT_PARENT} &>/dev/null
then
wsrep_log_error "Parent mysqld process (PID:${WSREP_SST_OPT_PARENT}) terminated unexpectedly."
exit 32
fi
if [ ! -r "${STATDIR}/${IST_FILE}" ]
then
if [[ -d ${DATA}/.sst ]];then
wsrep_log_info "WARNING: Stale temporary SST directory: ${DATA}/.sst from previous state transfer. Removing"
rm -rf ${DATA}/.sst
fi
mkdir -p ${DATA}/.sst
(recv_joiner $DATA/.sst "${stagemsg}-SST" 0 0) &
jpid=$!
wsrep_log_info "Proceeding with SST"
wsrep_log_info "Cleaning the existing datadir and innodb-data/log directories"
find $ib_home_dir $ib_log_dir $ib_undo_dir $DATA -mindepth 1 -regex $cpat -prune -o -exec rm -rfv {} 1>&2 \+
tempdir=$(parse_cnf mysqld log-bin "")
if [[ -n ${tempdir:-} ]];then
binlog_dir=$(dirname $tempdir)
binlog_file=$(basename $tempdir)
if [[ -n ${binlog_dir:-} && $binlog_dir != '.' && $binlog_dir != $DATA ]];then
pattern="$binlog_dir/$binlog_file\.[0-9]+$"
wsrep_log_info "Cleaning the binlog directory $binlog_dir as well"
find $binlog_dir -maxdepth 1 -type f -regex $pattern -exec rm -fv {} 1>&2 \+ || true
rm $binlog_dir/*.index || true
fi
fi
TDATA=${DATA}
DATA="${DATA}/.sst"
MAGIC_FILE="${DATA}/${INFO_FILE}"
wsrep_log_info "Waiting for SST streaming to complete!"
wait $jpid
get_proc
if [[ ! -s ${DATA}/xtrabackup_checkpoints ]];then
wsrep_log_error "xtrabackup_checkpoints missing, failed innobackupex/SST on donor"
exit 2
fi
# Rebuild indexes for compact backups
if grep -q 'compact = 1' ${DATA}/xtrabackup_checkpoints;then
wsrep_log_info "Index compaction detected"
rebuild=1
fi
if [[ $rebuild -eq 1 ]];then
nthreads=$(parse_cnf xtrabackup rebuild-threads $nproc)
wsrep_log_info "Rebuilding during prepare with $nthreads threads"
rebuildcmd="--rebuild-indexes --rebuild-threads=$nthreads"
fi
if test -n "$(find ${DATA} -maxdepth 1 -type f -name '*.qp' -print -quit)";then
wsrep_log_info "Compressed qpress files found"
if [[ ! -x `which qpress` ]];then
wsrep_log_error "qpress not found in path: $PATH"
exit 22
fi
if [[ -n $progress ]] && pv --help | grep -q 'line-mode';then
count=$(find ${DATA} -type f -name '*.qp' | wc -l)
count=$(( count*2 ))
if pv --help | grep -q FORMAT;then
pvopts="-f -s $count -l -N Decompression -F '%N => Rate:%r Elapsed:%t %e Progress: [%b/$count]'"
else
pvopts="-f -s $count -l -N Decompression"
fi
pcmd="pv $pvopts"
adjust_progress
dcmd="$pcmd | xargs -n 2 qpress -T${nproc}d"
else
dcmd="xargs -n 2 qpress -T${nproc}d"
fi
# Decompress the qpress files
wsrep_log_info "Decompression with $nproc threads"
timeit "Joiner-Decompression" "find ${DATA} -type f -name '*.qp' -printf '%p\n%h\n' | $dcmd"
extcode=$?
if [[ $extcode -eq 0 ]];then
wsrep_log_info "Removing qpress files after decompression"
find ${DATA} -type f -name '*.qp' -delete
if [[ $? -ne 0 ]];then
wsrep_log_error "Something went wrong with deletion of qpress files. Investigate"
fi
else
wsrep_log_error "Decompression failed. Exit code: $extcode"
exit 22
fi
fi
if [[ ! -z $WSREP_SST_OPT_BINLOG ]];then
BINLOG_DIRNAME=$(dirname $WSREP_SST_OPT_BINLOG)
BINLOG_FILENAME=$(basename $WSREP_SST_OPT_BINLOG)
# To avoid comparing data directory and BINLOG_DIRNAME
mv $DATA/${BINLOG_FILENAME}.* $BINLOG_DIRNAME/ 2>/dev/null || true
pushd $BINLOG_DIRNAME &>/dev/null
for bfiles in $(ls -1 ${BINLOG_FILENAME}.[0-9]*);do
echo ${BINLOG_DIRNAME}/${bfiles} >> ${BINLOG_FILENAME}.index
done
popd &> /dev/null
fi
wsrep_log_info "Preparing the backup at ${DATA}"
timeit "Xtrabackup prepare stage" "$INNOAPPLY"
if [ $? -ne 0 ];
then
wsrep_log_error "${INNOBACKUPEX_BIN} apply finished with errors. Check ${DATA}/innobackup.prepare.log"
exit 22
fi
MAGIC_FILE="${TDATA}/${INFO_FILE}"
set +e
rm $TDATA/innobackup.prepare.log $TDATA/innobackup.move.log
set -e
wsrep_log_info "Moving the backup to ${TDATA}"
timeit "Xtrabackup move stage" "$INNOMOVE"
if [[ $? -eq 0 ]];then
wsrep_log_info "Move successful, removing ${DATA}"
rm -rf $DATA
DATA=${TDATA}
else
wsrep_log_error "Move failed, keeping ${DATA} for further diagnosis"
wsrep_log_error "Check ${DATA}/innobackup.move.log for details"
exit 22
fi
else
wsrep_log_info "${IST_FILE} received from donor: Running IST"
fi
if [[ ! -r ${MAGIC_FILE} ]];then
wsrep_log_error "SST magic file ${MAGIC_FILE} not found/readable"
exit 2
fi
wsrep_log_info "Galera co-ords from recovery: $(cat ${MAGIC_FILE})"
cat "${MAGIC_FILE}" # Output : UUID:seqno wsrep_gtid_domain_id
wsrep_log_info "Total time on joiner: $totime seconds"
fi
exit 0
|
tempesta-tech/mariadb_10.2
|
scripts/wsrep_sst_xtrabackup-v2.sh
|
Shell
|
gpl-2.0
| 39,189 |
#!/usr/bin/bash
pandoc KGDB.md -s -f markdown -t html -o KGDB.html
pandoc NOTES.md -s -f markdown -t html -o NOTES.html
pandoc README.md -s -f markdown -t html -o README.html
pandoc SKINNING.md -s -f markdown -t html -o SKINNING.html
|
Wintermute0110/advanced-emulator-launcher
|
make_docs.sh
|
Shell
|
gpl-2.0
| 234 |
#!/bin/bash
# NIC IRQ affinity
# show IRQs for network interfaces
# cat /proc/interrupts | egrep "eth[0-9]+-Tx"
for i in {119..126}; do echo "Setting IRQ $i"; echo 00000100 > /proc/irq/$i/smp_affinity; done
for i in {129..136}; do echo "Setting IRQ $i"; echo 00000200 > /proc/irq/$i/smp_affinity; done
for i in {155..162}; do echo "Setting IRQ $i"; echo 00000400 > /proc/irq/$i/smp_affinity; done
for i in {164..171}; do echo "Setting IRQ $i"; echo 00000800 > /proc/irq/$i/smp_affinity; done
for i in {190..197}; do echo "Setting IRQ $i"; echo 00001000 > /proc/irq/$i/smp_affinity; done
for i in {199..206}; do echo "Setting IRQ $i"; echo 00002000 > /proc/irq/$i/smp_affinity; done
|
google-code/amico
|
utils/set_nic_irq_smp_affinity_multiqueue.sh
|
Shell
|
gpl-2.0
| 686 |
#!/bin/bash
MY_PATH="`dirname \"$0\"`"
if [ -f "$MY_PATH/build_number" ]
then
number=`cat $MY_PATH/build_number`
else
number=0
fi
if [ ! -f $MY_PATH/../build_number.h ]
then
# This is needed to build, so make sure it's available
echo "const int build_number = ""$number;" | tee $MY_PATH/../build_number.h
fi
if [ ! -f "$MY_PATH/ReleaseManager" ]
then
# Script only needs running by Release Managers
exit;
fi
if [ "$1" == "release" ]
then
echo "Updating build number"
let number++
echo "$number" > $MY_PATH/build_number
echo "const int build_number = ""$number;" | tee $MY_PATH/../build_number.h
else
echo "Skipping build number update"
fi
|
daniel-holder/sleepyhead-cloud
|
sleepyhead/scripts/inc_build.sh
|
Shell
|
gpl-3.0
| 653 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "Release 1 IP, 10 connections killed, 3 fail"
setup_ctdb
ctdb_get_1_public_address |
while read dev ip bits ; do
ok_null
simple_test_event "takeip" $dev $ip $bits
count=10
setup_tcp_connections $count \
"$ip" 445 10.254.254.0 12300
setup_tcp_connections_unkillable 3 \
"$ip" 445 10.254.254.0 43210
ok <<EOF
Killed 10/13 TCP connections to released IP 10.0.0.3
Remaining connections:
10.0.0.3:445 10.254.254.1:43211
10.0.0.3:445 10.254.254.2:43212
10.0.0.3:445 10.254.254.3:43213
EOF
simple_test_event "releaseip" $dev $ip $bits
done
|
lidan-fnst/samba
|
ctdb/tests/eventscripts/10.interface.012.sh
|
Shell
|
gpl-3.0
| 625 |
#!/bin/sh
#
# srecord - Manipulate EPROM load files
# Copyright (C) 2009, 2011 Peter Miller
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
TEST_SUBJECT="write MIF"
. test_prelude
cat > test.in << 'fubar'
S00600004844521B
S111000048656C6C6F2C20576F726C64210A7B
S5030001FB
S9030000FC
fubar
if test $? -ne 0; then no_result; fi
cat > test.ok << 'fubar'
-- HDR
--
-- Generated automatically by srec_cat -o --mif
--
DEPTH = 14;
WIDTH = 8;
ADDRESS_RADIX = HEX;
DATA_RADIX = HEX;
CONTENT BEGIN
0000: 48 65 6C 6C 6F 2C 20 57 6F 72 6C 64 21 0A;
-- start address = 0000
END;
fubar
if test $? -ne 0; then no_result; fi
srec_cat test.in -o test.out -mif
if test $? -ne 0; then fail; fi
diff test.ok test.out
if test $? -ne 0; then fail; fi
#
# The things tested here, worked.
# No other guarantees are made.
#
pass
# vim: set ts=8 sw=4 et :
|
freyc/SRecord
|
test/01/t0169a.sh
|
Shell
|
gpl-3.0
| 1,426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.