code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash -e
[ -n "${GOPATH:-}" ] && export "PATH=${GOPATH}/bin:${PATH}"
# Always ignore SC2230 ('which' is non-standard. Use builtin 'command -v' instead.)
export SHELLCHECK_OPTS="-e SC2230 -e SC2039 -e SC2028 -e SC2002 -e SC2005"
export BOOTSTRAP_REUSE_LOCAL="${BOOTSTRAP_REUSE_LOCAL:-}"
export BOOTSTRAP_REUSE="${BOOTSTRAP_REUSE:-false}"
export BOOTSTRAP_PROVIDER="${BOOTSTRAP_PROVIDER:-lxd}"
export BOOTSTRAP_SERIES="${BOOTSTRAP_SERIES:-}"
export BUILD_AGENT="${BUILD_AGENT:-false}"
export RUN_SUBTEST="${RUN_SUBTEST:-}"
export CURRENT_LTS="focal"
current_pwd=$(pwd)
export CURRENT_DIR="${current_pwd}"
OPTIND=1
VERBOSE=1
RUN_ALL="false"
SKIP_LIST=""
RUN_LIST=""
ARTIFACT_FILE=""
OUTPUT_FILE=""
import_subdir_files() {
test "$1"
local file
for file in "$1"/*.sh; do
# shellcheck disable=SC1090
. "$file"
done
}
import_subdir_files includes
# If adding a test suite, then ensure to add it here to be picked up!
# Please keep these in alphabetic order.
TEST_NAMES="agents \
appdata \
backup \
bootstrap \
branches \
caasadmission \
charmhub \
cli \
controller \
deploy \
expose_ec2 \
hooks \
hooktools \
machine \
manual \
model \
network \
ovs_maas \
relations \
resources \
smoke \
spaces_ec2 \
static_analysis \
unit \
upgrade"
# Show test suites, can be used to test if a test suite is available or not.
show_test_suites() {
output=""
for test in ${TEST_NAMES}; do
name=$(echo "${test}" | sed -E "s/^run_//g" | sed -E "s/_/ /g")
# shellcheck disable=SC2086
output="${output}\n${test}"
done
echo -e "${output}" | column -t -s "|"
exit 0
}
show_help() {
version=$(juju version)
echo ""
echo "$(red 'Juju test suite')"
echo "¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯"
# shellcheck disable=SC2016
echo 'Juju tests suite expects you to have a Juju available on your $PATH,'
echo "so that if a tests needs to bootstrap it can just use that one"
echo "directly."
echo ""
echo "Juju Version:"
echo "¯¯¯¯¯¯¯¯¯¯¯¯¯"
echo "Using juju version: $(green "${version}")"
echo ""
echo "Usage:"
echo "¯¯¯¯¯¯"
echo "Flags should appear $(red 'before') arguments."
echo ""
echo "cmd [-h] [-v] [-A] [-s test] [-a file] [-x file] [-r] [-l controller] [-p provider type <lxd|aws|manual|microk8s|vsphere|maas>]"
echo ""
echo " $(green './main.sh -h') Display this help message"
echo " $(green './main.sh -v') Verbose and debug messages"
echo " $(green './main.sh -A') Run all the test suites"
echo " $(green './main.sh -s') Skip tests using a comma seperated list"
echo " $(green './main.sh -a') Create an artifact file"
echo " $(green './main.sh -x') Output file from streaming the output"
echo " $(green './main.sh -r') Reuse bootstrapped controller between testing suites"
echo " $(green './main.sh -l') Local bootstrapped controller name to reuse"
echo " $(green './main.sh -p') Bootstrap provider to use when bootstrapping <lxd|aws|manual|k8s|openstack|vsphere|maas>"
echo " vsphere assumes juju boston vsphere for image metadata generation"
echo " openstack assumes providing image data directly is not required"
echo " $(green './main.sh -c') Cloud name to use when bootstrapping, must be one of provider types listed above"
echo " $(green './main.sh -R') Region to use with cloud"
echo " $(green './main.sh -S') Bootstrap series to use <default is host>, priority over -l"
echo ""
echo "Tests:"
echo "¯¯¯¯¯¯"
echo "Available tests:"
echo ""
# Let's use the TEST_NAMES to print out what's available
output=""
for test in ${TEST_NAMES}; do
name=$(echo "${test}" | sed -E "s/^run_//g" | sed -E "s/_/ /g")
# shellcheck disable=SC2086
output="${output}\n $(green ${test})|Runs the ${name} tests"
done
echo -e "${output}" | column -t -s "|"
echo ""
echo "Examples:"
echo "¯¯¯¯¯¯¯¯¯"
echo "Run a singular test:"
echo ""
echo " $(green './main.sh static_analysis test_static_analysis_go')"
echo ""
echo "Run static analysis tests, but skip the go static analysis tests:"
echo ""
echo " $(green './main.sh -s test_static_analysis_go static_analysis')"
echo ""
echo "Run a more verbose output and save that to an artifact tar (it"
echo "requires piping the output from stdout and stderr into a output.log,"
echo "which is then copied into the artifact tar file on test cleanup):"
echo ""
echo " $(green './main.sh -v -a artifact.tar.gz -x output.log 2>&1|tee output.log')"
exit 1
}
while getopts "hH?vAs:a:x:rl:p:c:R:S:" opt; do
case "${opt}" in
h | \?)
show_help
;;
H)
show_test_suites
;;
v)
VERBOSE=2
alias juju="juju --debug"
;;
A)
RUN_ALL="true"
;;
s)
SKIP_LIST="${OPTARG}"
;;
a)
ARTIFACT_FILE="${OPTARG}"
;;
x)
OUTPUT_FILE="${OPTARG}"
;;
r)
export BOOTSTRAP_REUSE="true"
;;
l)
export BOOTSTRAP_REUSE_LOCAL="${OPTARG}"
export BOOTSTRAP_REUSE="true"
CLOUD=$(juju show-controller "${OPTARG}" --format=json 2>/dev/null | jq -r ".[\"${OPTARG}\"] | .details | .cloud")
PROVIDER=$(juju clouds --client 2>/dev/null | grep "${CLOUD}" | awk '{print $4}' | head -n 1)
if [[ -z ${PROVIDER} ]]; then
PROVIDER="${CLOUD}"
fi
export BOOTSTRAP_PROVIDER="${PROVIDER}"
export BOOTSTRAP_CLOUD="${PROVIDER}"
;;
p)
export BOOTSTRAP_PROVIDER="${OPTARG}"
;;
c)
PROVIDER=$(juju clouds --client --all --format=json 2>/dev/null | jq -r ".[\"${OPTARG}\"] | .type")
export BOOTSTRAP_PROVIDER="${PROVIDER}"
num_regions=$(juju clouds --client --all --format=json 2>/dev/null | jq -r ".[\"${OPTARG}\"] | .regions | length")
if [[ ${num_regions} -gt 1 ]]; then
echo "more than 1 region, must specify"
exit 1
fi
CLOUD="${OPTARG}"
REGION=$(juju clouds --client --all --format=json 2>/dev/null | jq -r ".[\"${CLOUD}\"] | .regions | keys[0]")
export BOOTSTRAP_REGION="${REGION}"
export BOOTSTRAP_CLOUD="${CLOUD}"
;;
R)
export BOOTSTRAP_REGION="${OPTARG}"
;;
S)
export BOOTSTRAP_SERIES="${OPTARG}"
;;
*)
echo "Unexpected argument ${opt}" >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
[[ ${1:-} == "--" ]] && shift
export VERBOSE="${VERBOSE}"
export SKIP_LIST="${SKIP_LIST}"
if [[ $# -eq 0 ]]; then
if [[ ${RUN_ALL} != "true" ]]; then
echo "$(red '---------------------------------------')"
echo "$(red 'Run with -A to run all the test suites.')"
echo "$(red '---------------------------------------')"
echo ""
show_help
exit 1
fi
fi
echo ""
echo "==> Checking for dependencies"
check_dependencies curl jq shellcheck
if [[ ${USER:-'root'} == "root" ]]; then
echo "The testsuite must not be run as root." >&2
exit 1
fi
cleanup() {
# Allow for failures and stop tracing everything
set +ex
# Allow for inspection
if [[ -n ${TEST_INSPECT:-} ]]; then
if [[ ${TEST_RESULT} != "success" ]]; then
echo "==> TEST DONE: ${TEST_CURRENT_DESCRIPTION}"
fi
echo "==> Test result: ${TEST_RESULT}"
echo "Tests Completed (${TEST_RESULT}): hit enter to continue"
# shellcheck disable=SC2034
read -r nothing
fi
echo "==> Cleaning up"
archive_logs "partial"
cleanup_jujus
cleanup_funcs
echo ""
if [[ ${TEST_RESULT} != "success" ]]; then
echo "==> TESTS DONE: ${TEST_CURRENT_DESCRIPTION}"
if [[ -f "${TEST_DIR}/${TEST_CURRENT}.log" ]]; then
echo "==> RUN OUTPUT: ${TEST_CURRENT}"
cat "${TEST_DIR}/${TEST_CURRENT}.log" | sed 's/^/ | /g'
echo ""
fi
fi
echo "==> Test result: ${TEST_RESULT}"
archive_logs "full"
if [ "${TEST_RESULT}" = "success" ]; then
rm -rf "${TEST_DIR}"
echo "==> Tests Removed: ${TEST_DIR}"
fi
echo "==> TEST COMPLETE"
}
# Move any artifacts to the chosen location
archive_logs() {
if [[ -z ${ARTIFACT_FILE} ]]; then
return
fi
archive_type="${1}"
echo "==> Test ${archive_type} artifact: ${ARTIFACT_FILE}"
if [[ -f ${OUTPUT_FILE} ]]; then
cp "${OUTPUT_FILE}" "${TEST_DIR}"
fi
TAR_OUTPUT=$(tar -C "${TEST_DIR}" --transform s/./artifacts/ -zcvf "${ARTIFACT_FILE}" ./ 2>&1)
# shellcheck disable=SC2181
if [[ $? -eq 0 ]]; then
echo "==> Test ${archive_type} artifact: COMPLETED"
else
echo "${TAR_OUTPUT}"
TEST_RESULT=failure
fi
}
TEST_CURRENT=setup
TEST_RESULT=failure
trap cleanup EXIT HUP INT TERM
# Setup test directory
TEST_DIR=$(mktemp -d tmp.XXX | xargs -I % echo "$(pwd)/%")
run_test() {
TEST_CURRENT=${1}
TEST_CURRENT_DESCRIPTION=${2:-${1}}
TEST_CURRENT_NAME=${TEST_CURRENT#"test_"}
if [[ -n ${4} ]]; then
TEST_CURRENT=${4}
fi
import_subdir_files "suites/${TEST_CURRENT_NAME}"
# shellcheck disable=SC2046,SC2086
echo "==> TEST BEGIN: ${TEST_CURRENT_DESCRIPTION} ($(green $(basename ${TEST_DIR})))"
START_TIME=$(date +%s)
${TEST_CURRENT}
END_TIME=$(date +%s)
echo "==> TEST DONE: ${TEST_CURRENT_DESCRIPTION} ($((END_TIME - START_TIME))s)"
}
# allow for running a specific set of tests
if [[ $# -gt 0 ]]; then
# shellcheck disable=SC2143
if [[ "$(echo "${2}" | grep -E "^run_")" ]]; then
TEST="$(grep -lr "run \"${2}\"" "suites/${1}" | xargs sed -rn 's/.*(test_\w+)\s+?\(\)\s+?\{/\1/p')"
if [[ -z ${TEST} ]]; then
echo "==> Unable to find parent test for ${2}."
echo " Try and run the parent test directly."
exit 1
fi
export RUN_SUBTEST="${2}"
echo "==> Running subtest: ${2}"
run_test "test_${1}" "" "" "${TEST}"
TEST_RESULT=success
exit
fi
# shellcheck disable=SC2143
if [[ "$(echo "${2}" | grep -E "^test_")" ]]; then
TEST="$(grep -lr "${2}" "suites/${1}")"
if [[ -z ${TEST} ]]; then
echo "==> Unable to find test ${2} in ${1}."
echo " Try and run the test suite directly."
exit 1
fi
export RUN_LIST="test_${1},${2}"
echo "==> Running subtest ${2} for ${1} suite"
run_test "test_${1}" "${1}" "" ""
TEST_RESULT=success
exit
fi
run_test "test_${1}" "" "$@" ""
TEST_RESULT=success
exit
fi
for test in ${TEST_NAMES}; do
name=$(echo "${test}" | sed -E "s/^run_//g" | sed -E "s/_/ /g")
run_test "test_${test}" "${name}" "" ""
done
TEST_RESULT=success
|
jameinel/juju
|
tests/main.sh
|
Shell
|
agpl-3.0
| 10,328 |
java -cp "/Applications/mallet-2.0.7/class:/Applications/mallet-2.0.7/lib/mallet-deps.jar" cc.mallet.fst.SimpleTagger --model-file crf.model.train1 dataset.test1 > crf.out.train1
|
negacy/TIMEXCCP-PYTHON
|
runTest.sh
|
Shell
|
lgpl-2.1
| 182 |
#!/bin/bash
# Note that h2def.py should be in PATH for this script to work and
# JHBUILD_SOURCES should be defined to contain the path to the root of the
# jhbuild sources. Also this script should reside in
# gstreamermm/gstrearmer/src.
if [ -z "$JHBUILD_SOURCES" -o ! -x "`which h2def.py`" ]; then
echo -e "JHBUILD_SOURCES must contain the path to the jhbuild sources and \
h2def.py\nneeds to be executable and in PATH."
exit 1;
fi
DIR=`dirname "$0"`
PREFIX="$JHBUILD_SOURCES/gstreamer"
h2def.py "$PREFIX"/gst/*.h "$PREFIX"/libs/gst/{base,controller,dataprotocol,\
net,check}/*.h > "$DIR/gst_methods.defs"
PREFIX="$JHBUILD_SOURCES/gst-plugins-base"
h2def.py "$PREFIX"/gst-libs/gst/{app,audio,cdda,fft,floatcast,interfaces,\
netbuffer,riff,rtp,rtsp,sdp,tag,pbutils,video}/*.h >> "$DIR/gst_methods.defs"
#Patch generated file.
patch $DIR/gst_methods.defs $DIR/gst_methods.defs.patch
|
peper0/gstreamermm-plugins
|
gstreamer/src/generate-methods.sh
|
Shell
|
lgpl-2.1
| 894 |
#PBS -l nodes=1:ppn=16,mem=32gb,walltime=24:00:00 -j oe -N HTC.ATCC
module load picard
INDIR=bam
GENOME=/bigdata/stajichlab/shared/projects/Candida/HMAC/Clus_reseq/New_ref_assembly/ref/S9.fasta
OUTDIR=Variants
LIST=samples.info
b=`basename $GENOME .fasta`
dir=`dirname $GENOME`
mkdir -p $OUTDIR
CPU=$PBS_NUM_PPN
if [ ! $CPU ]; then
CPU=1
fi
if [ ! -f $dir/$b.dict ]; then
module load picard
java -jar $PICARD CreateSequenceDictionary \
R=$GENOME OUTPUT=$dir/$b.dict \
SPECIES="Candida lusitaniae" TRUNCATE_NAMES_AT_WHITESPACE=true
module unload picard
fi
if [ ! $PBS_ARRAYID ]; then
PBS_ARRAYID=1;
fi
# guarantee java 7 is running not java 8
module load gatk
module list
java -version
O=$(sed -n ${PBS_ARRAYID}p $LIST | awk '{print $1}')
BAM=$INDIR/$O.realign.bam
if [ ! -f $BAM ]; then
echo "No $BAM file"
exit
fi
if [ ! -f $OUTDIR/$O.g.vcf ]; then
java -Xmx32g -jar $GATK \
-T HaplotypeCaller \
--max_alternate_alleles 10 \
-stand_emit_conf 10 -stand_call_conf 30 \
-ERC GVCF \
-ploidy 1 \
-I $BAM -R $GENOME \
-o $OUTDIR/$O.g.vcf -nct $CPU
fi
|
stajichlab/Candida_lusitaniae
|
New_ref_genome/pipeline/03_HTC.sh
|
Shell
|
lgpl-3.0
| 1,079 |
#
# Configuration file for using the XML library in GNOME applications
#
XML2_LIBDIR="-L/target/lib"
XML2_LIBS="-lxml2 -L/target/lib -lz -lm "
XML2_INCLUDEDIR="-I/target/include/libxml2"
MODULE_VERSION="xml2-2.9.7"
|
Haini/dotfiles
|
.joplin-bin/lib/node_modules/joplin/node_modules/sharp/vendor/lib/xml2Conf.sh
|
Shell
|
unlicense
| 222 |
#!/usr/bin/env bash
# Come on, give us at least one argument
if [ $# -eq 0 ]; then
echo "No arguments supplied; please supply at least one thread"
exit 1
fi
start=0
cache=""
while IFS=: read field_name field_contents
do
# Strip leading white space
field_name="${field_name#"${field_name%%[![:space:]]*}"}"
field_contents="${field_contents#"${field_contents%%[![:space:]]*}"}"
# 4chan jams everything into a giant array for some reason; [ signifies the start
if [[ "$field_contents" == "[" ]]; then
start=1
continue
fi
# Ignore everything before start
if [[ $start -eq 0 ]]; then
continue
fi
# ] signifies the end of the giant array; stop when past it
if [[ "$field_name" == "]" ]]; then
break
fi
# { means start of the post, reset cache and start bundling!
if [[ "$field_name" == "{" ]]; then
cache=""
continue
fi
# } means end of the post, output post!
if [[ "$field_name" == "}," || "$field_name" == "}" ]]; then
echo "$cache"
continue
fi
cache="$cache$field_name:$field_contents"
done < <(curl -sL http://a.4cdn.org/b/thread/742807737.json | jq -M .)
|
alicemargatroid/Misaka-Network
|
LastOrder/getThread.sh
|
Shell
|
unlicense
| 1,214 |
#!/bin/sh
DoExitAsm ()
{ echo "An error occurred while assembling $1"; exit 1; }
DoExitLink ()
{ echo "An error occurred while linking $1"; exit 1; }
echo Linking passwords
OFS=$IFS
IFS="
"
/usr/bin/ld.bfd -b elf64-x86-64 -m elf_x86_64 --dynamic-linker=/lib64/ld-linux-x86-64.so.2 -L. -o passwords link.res
if [ $? != 0 ]; then DoExitLink passwords; fi
IFS=$OFS
echo Linking passwords
OFS=$IFS
IFS="
"
/usr/bin/objcopy --only-keep-debug passwords passwords.dbg
if [ $? != 0 ]; then DoExitLink passwords; fi
IFS=$OFS
echo Linking passwords
OFS=$IFS
IFS="
"
/usr/bin/objcopy --add-gnu-debuglink=passwords.dbg passwords
if [ $? != 0 ]; then DoExitLink passwords; fi
IFS=$OFS
echo Linking passwords
OFS=$IFS
IFS="
"
/usr/bin/strip --strip-unneeded passwords
if [ $? != 0 ]; then DoExitLink passwords; fi
IFS=$OFS
|
Insaned79/pass
|
ppas.sh
|
Shell
|
apache-2.0
| 813 |
#!/usr/bin/env bash
# Migrate databases if necessary, sleep allows postgres container to finish launching
sleep 3
python ampcrowd/manage.py syncdb --noinput
# Generate static content
python ampcrowd/manage.py collectstatic --noinput
# Process options
DEVELOP=0
SSL=0
FOREGROUND=0
while getopts "dsf" OPTION
do
case $OPTION in
d)
DEVELOP=1
;;
s)
SSL=1
;;
f)
FOREGROUND=1
;;
esac
done
export DEVELOP
export SSL
export FOREGROUND
if [ "$DEVELOP" -eq "1" ]
then
echo "Celery launched in debug mode"
python ampcrowd/manage.py celery worker -l DEBUG --beat &
if [ "$SSL" -eq "1" ]
then
echo "Gunicorn starting"
(cd ampcrowd && gunicorn -c ../deploy/gunicorn_config.py crowd_server.wsgi:application)
else
python ampcrowd/manage.py runserver 0.0.0.0:8000
fi
else
echo "Celery launched in production mode"
python ampcrowd/manage.py celery worker --beat --detach
echo "Gunicorn starting"
(cd ampcrowd && gunicorn -c ../deploy/gunicorn_config.py crowd_server.wsgi:application)
fi
|
amplab/ampcrowd
|
ampcrowd/docker-entrypoint.sh
|
Shell
|
apache-2.0
| 1,017 |
#!/bin/bash
clip_name=$1
VOLUME_INCREASE=${2:-1}
output_mp4=$3
OUT_DIR=$4
echo $clip_name $VOLUME_INCREASE $output_mp4 $OUT_DIR
offset_clip_id=0
mkdir -p "${OUT_DIR}" &&
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" &&
~/projects/study/code/video/ex/process_shadowing_pronunciation_video_clips/srt2tsv.sh "${clip_name}.srt" "${OUT_DIR}/marks.tsv" &&
if [ ! -f "${clip_name}.tr.srt" ]; then
echo "
1
00:00:00,000 --> 00:00:01,000
test satırı
" > "${clip_name}.tr.srt"
fi
~/projects/study/code/video/ex/process_shadowing_pronunciation_video_clips/srt2tsv.sh "${clip_name}.tr.srt" "${OUT_DIR}/marks.tr.tsv" &&
R --vanilla -e "one.video.to.multiple.clips::main_generate_ffmpeg_cmd_for_splitting_videos(path = '$OUT_DIR/marks.tsv', offset_clip_id = ${offset_clip_id}, original_video = '${output_mp4}', clip_name = '${clip_name}')" &&
bash ./$OUT_DIR/split01.sh &&
bash ./$OUT_DIR/split02.sh &&
silence01=$OUT_DIR/silence01.mp4 &&
VOLUME_INCREASE=0.01 &&
SILENCE_DURATION=02 &&
ffmpeg -ss 00:00 -to 00:$SILENCE_DURATION -i "${output_mp4}" -c:v libx264 -crf 23 -c:a aac -filter:a "volume=${VOLUME_INCREASE}" $silence01 &&
out_silence=$OUT_DIR/silence.mp4 &&
ffmpeg -i ${silence01} -t 00:$SILENCE_DURATION -c:v copy -c:a copy $out_silence &&
ffmpeg -f concat -safe 0 -i $OUT_DIR/video_files_merge.in -c copy "$OUT_DIR/${clip_name}_silence.mp4"
echo "done"
|
mertnuhoglu/study
|
code/video/ex/process_shadowing_pronunciation_video_clips/make_shadowing_video_clips_step02.sh
|
Shell
|
apache-2.0
| 1,387 |
#!/bin/bash
IMAGE=--image-family=tf-latest-cpu
INSTANCE_NAME=dlvm
[email protected] # CHANGE THIS
echo "Looking for Jupyter URL on $INSTANCE_NAME"
while true; do
proxy=$(gcloud compute instances describe ${INSTANCE_NAME} 2> /dev/null | grep dot-datalab-vm)
if [ -z "$proxy" ]
then
echo -n "."
sleep 1
else
echo "done!"
echo "$proxy"
break
fi
done
|
GoogleCloudPlatform/bigquery-oreilly-book
|
05_devel/find_url.sh
|
Shell
|
apache-2.0
| 405 |
#!/bin/bash
# ----------------------
# KUDU Deployment Script
# Version: 0.1.11
# ----------------------
# Helpers
# -------
exitWithMessageOnError () {
if [ ! $? -eq 0 ]; then
echo "An error has occurred during web site deployment."
echo $1
exit 1
fi
}
# Prerequisites
# -------------
# Verify node.js installed
hash node 2>/dev/null
exitWithMessageOnError "Missing node.js executable, please install node.js, if already installed make sure it can be reached from current environment."
# Setup
# -----
SCRIPT_DIR="${BASH_SOURCE[0]%\\*}"
SCRIPT_DIR="${SCRIPT_DIR%/*}"
ARTIFACTS=$SCRIPT_DIR/../artifacts
KUDU_SYNC_CMD=${KUDU_SYNC_CMD//\"}
if [[ ! -n "$DEPLOYMENT_SOURCE" ]]; then
DEPLOYMENT_SOURCE=$SCRIPT_DIR
fi
if [[ ! -n "$NEXT_MANIFEST_PATH" ]]; then
NEXT_MANIFEST_PATH=$ARTIFACTS/manifest
if [[ ! -n "$PREVIOUS_MANIFEST_PATH" ]]; then
PREVIOUS_MANIFEST_PATH=$NEXT_MANIFEST_PATH
fi
fi
if [[ ! -n "$DEPLOYMENT_TARGET" ]]; then
DEPLOYMENT_TARGET=$ARTIFACTS/wwwroot
else
KUDU_SERVICE=true
fi
if [[ ! -n "$KUDU_SYNC_CMD" ]]; then
# Install kudu sync
echo Installing Kudu Sync
npm install kudusync -g --silent
exitWithMessageOnError "npm failed"
if [[ ! -n "$KUDU_SERVICE" ]]; then
# In case we are running locally this is the correct location of kuduSync
KUDU_SYNC_CMD=kuduSync
else
# In case we are running on kudu service this is the correct location of kuduSync
KUDU_SYNC_CMD=$APPDATA/npm/node_modules/kuduSync/bin/kuduSync
fi
fi
############################################################################
# Build
############################################################################
# Install go if needed
export GOROOT=$HOME/go
export PATH=$PATH:$GOROOT/bin
export GOPATH=$DEPLOYMENT_SOURCE
if [ ! -e "$GOROOT" ]; then
GO_ARCHIVE=$HOME/tmp/go.zip
mkdir -p ${GO_ARCHIVE%/*}
curl https://storage.googleapis.com/golang/go1.4.1.windows-amd64.zip -o $GO_ARCHIVE
unzip $GO_ARCHIVE -d $HOME
fi
# Create and store unique artifact name
DEPLOYMENT_ID=${SCM_COMMIT_ID:0:10}
ARTIFACT_NAME=$WEBSITE_SITE_NAME-$DEPLOYMENT_ID.exe
TARGET_ARTIFACT=$DEPLOYMENT_SOURCE/_target/$ARTIFACT_NAME
echo $TARGET_ARTIFACT > _artifact.txt
echo Building go artifact $TARGET_ARTIFACT from commit $DEPLOYMENT_ID
go build -v -o $TARGET_ARTIFACT
##################################################################################################################################
# Deployment
# ----------
echo Handling Basic Web Site deployment.
if [[ "$IN_PLACE_DEPLOYMENT" -ne "1" ]]; then
"$KUDU_SYNC_CMD" -v 50 -f "$DEPLOYMENT_SOURCE" -t "$DEPLOYMENT_TARGET" -n "$NEXT_MANIFEST_PATH" -p "$PREVIOUS_MANIFEST_PATH" -i ".git;.hg;.deployment;deploy.sh;.gitignore"
exitWithMessageOnError "Kudu Sync failed"
fi
echo Removing old artifacts
find ${TARGET_ARTIFACT%/*} $DEPLOYMENT_TARGET/_target -type f -name ${ARTIFACT_NAME%-*}-*.exe -maxdepth 1 -print0 |
grep -zv $ARTIFACT_NAME |
xargs -0 rm -v
##################################################################################################################################
# Post deployment stub
if [[ -n "$POST_DEPLOYMENT_ACTION" ]]; then
POST_DEPLOYMENT_ACTION=${POST_DEPLOYMENT_ACTION//\"}
cd "${POST_DEPLOYMENT_ACTION_DIR%\\*}"
"$POST_DEPLOYMENT_ACTION"
exitWithMessageOnError "post deployment action failed"
fi
echo "Finished successfully."
|
hruan/go-azure
|
deploy.sh
|
Shell
|
apache-2.0
| 3,382 |
#!/usr/bin/bash
grep -q '^user:' /etc/passwd || echo "user:x:$(id -u):0:USER:/root:/bin/bash" >> /etc/passwd
grep -q '^group:' /etc/group || echo "group:x:$(id -G | awk '{print $2}'):user" >> /etc/group
# get initial content for /var/www/html
GIT_DIR=/srv/git/security_meeting.git /root/postreceive.sh
LANG=C /usr/sbin/httpd -DFOREGROUND
|
joelsmith/images
|
meeting-oosecurity/root/start.sh
|
Shell
|
apache-2.0
| 341 |
#!/bin/bash
. release-common.sh
git_reset() {
git checkout master
git branch -D version-$CEYLON_RELEASE_VERSION
git tag -d $CEYLON_RELEASE_VERSION
git push origin :version-$CEYLON_RELEASE_VERSION
git push origin :$CEYLON_RELEASE_VERSION
git reset --hard $CEYLON_BRANCHING_TAG
}
cd ../ceylon
git_reset
cd ../ceylon-sdk
git_reset
cd ../ceylon-debian-repo
git_reset
cd ../ceylon-rpm-repo
git_reset
cd ../ceylon.formatter
git_reset
cd ../ceylon.tool.converter.java2ceylon
git_reset
cd ../ceylon-ide-common
git_reset
cd ../ceylon
|
ceylon/ceylon
|
release-abort.sh
|
Shell
|
apache-2.0
| 548 |
#!/usr/bin/env bash
INTERNAL_IP=${ip}
sudo mkdir -p /var/lib/{kubelet,kube-proxy,kubernetes}
sudo mkdir -p /var/run/kubernetes
sudo mv bootstrap.kubeconfig /var/lib/kubelet
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy
sudo mv ca.pem /var/lib/kubernetes/
# install docker
curl -sLO https://get.docker.com/builds/Linux/x86_64/docker-1.12.6.tgz
tar -xvf docker-1.12.6.tgz
sudo cp docker/docker* /usr/bin/
cat > docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
ExecStart=/usr/bin/docker daemon \\
--iptables=false \\
--ip-masq=false \\
--host=unix:///var/run/docker.sock \\
--log-level=error \\
--storage-driver=overlay
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv docker.service /etc/systemd/system/docker.service
sudo systemctl daemon-reload
sudo systemctl enable docker
sudo systemctl start docker
sudo docker version
# install cni plugins
sudo mkdir -p /opt/cni
wget https://storage.googleapis.com/kubernetes-release/network-plugins/cni-amd64-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz
sudo tar -xvf cni-amd64-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz -C /opt/cni
# install kubelet
curl -sLO https://storage.googleapis.com/kubernetes-release/release/v1.7.4/bin/linux/amd64/kubectl
curl -sLO https://storage.googleapis.com/kubernetes-release/release/v1.7.4/bin/linux/amd64/kube-proxy
curl -sLO https://storage.googleapis.com/kubernetes-release/release/v1.7.4/bin/linux/amd64/kubelet
chmod +x kubectl kube-proxy kubelet
sudo mv kubectl kube-proxy kubelet /usr/bin/
API_SERVERS=$(sudo cat /var/lib/kubelet/bootstrap.kubeconfig | \
grep server | cut -d ':' -f2,3,4 | tr -d '[:space:]')
cat > kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/bin/kubelet \\
--api-servers=$API_SERVERS \\
--allow-privileged=true \\
--cluster-dns=10.32.0.10 \\
--cluster-domain=cluster.local \\
--container-runtime=docker \\
--experimental-bootstrap-kubeconfig=/var/lib/kubelet/bootstrap.kubeconfig \\
--network-plugin=kubenet \\
--kubeconfig=/var/lib/kubelet/kubeconfig \\
--serialize-image-pulls=false \\
--register-node=true \\
--tls-cert-file=/var/lib/kubelet/kubelet-client.crt \\
--tls-private-key-file=/var/lib/kubelet/kubelet-client.key \\
--cert-dir=/var/lib/kubelet \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv kubelet.service /etc/systemd/system/kubelet.service
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl start kubelet
sudo systemctl status kubelet --no-pager
# install kube-proxy
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/bin/kube-proxy \\
--cluster-cidr=10.200.0.0/16 \\
--masquerade-all=true \\
--kubeconfig=/var/lib/kube-proxy/kube-proxy.kubeconfig \\
--proxy-mode=iptables \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv kube-proxy.service /etc/systemd/system/kube-proxy.service
sudo systemctl daemon-reload
sudo systemctl enable kube-proxy
sudo systemctl start kube-proxy
sudo systemctl status kube-proxy --no-pager
|
yamamoto-febc/kube-sacloud-template
|
provisioning/bootstrap_kube_workers.sh
|
Shell
|
apache-2.0
| 3,381 |
# Delete keychain
security delete-keychain ios-build.keychain
# Remove provisioning profile
rm -f ~/Library/MobileDevice/Provisioning\ Profiles/*
echo "*********************************"
echo "* KeyChain Destroyed *"
echo "*********************************"
|
GMSLabs/Hyber-SDK-iOS
|
travis/scripts/remove-key.sh
|
Shell
|
apache-2.0
| 271 |
#!/bin/bash
sh clean.sh
./autogen.sh
AXIS2C_HOME=`pwd`/deploy
export AXIS2C_HOME
./configure --prefix=${AXIS2C_HOME} --enable-tests=no --enable-trace=yes --enable-diclient=no --enable-static=no --enable-openssl=no --with-apache2=/usr/local/apache2/include --with-archive=/usr/include
make
make install
cd tools/tcpmon
./autogen.sh
./configure --prefix=${AXIS2C_HOME} --enable-tests=no
make
make install
cd ../../samples
./autogen.sh
./configure --prefix=${AXIS2C_HOME} --with-axis2=${AXIS2C_HOME}/include/axis2-1.6.0
make
make install
make dist
tar xf axis2c-sample-src-1.6.0.tar.gz
mv axis2c-sample-src-1.6.0 samples
#$rm -rf ${AXIS2C_HOME}/samples
mv samples ${AXIS2C_HOME}/samples/src
cd ..
rm -rf xdocs/api/html
maven site
cd xdocs/api
doxygen doxygenconf
cd ../..
cp -r xdocs/api/html target/docs/api/
cp xdocs/docs/mod_log/module.xml target/docs/docs/mod_log
cp -r target/docs ${AXIS2C_HOME}
cd ${AXIS2C_HOME}
# rm -rf config.sub missing config.guess depcomp ltmain.sh
for i in `find . -name "*.la"`
do
rm $i
done
for i in `find . -name "*.a"`
do
rm $i
done
strip -s ./lib/*
strip -s modules/addressing/*
strip -s modules/logging/*
strip -s ./bin/axis2_http_server
strip -s ./samples/bin/*
strip -s ./bin/tools/tcpmon
rm -rf ./lib/pkgconfig
|
axbannaz/axis2-c
|
bindist.sh
|
Shell
|
apache-2.0
| 1,272 |
#!/usr/bin/env sh
# generated from catkin/cmake/template/setup.sh.in
# Sets various environment variables and sources additional environment hooks.
# It tries it's best to undo changes from a previously sourced setup file before.
# Supported command line options:
# --extend: skips the undoing of changes from a previously sourced setup file
# since this file is sourced either use the provided _CATKIN_SETUP_DIR
# or fall back to the destination set at configure time
: ${_CATKIN_SETUP_DIR:=/home/tom/x_os/devel}
_SETUP_UTIL="$_CATKIN_SETUP_DIR/_setup_util.py"
unset _CATKIN_SETUP_DIR
if [ ! -f "$_SETUP_UTIL" ]; then
echo "Missing Python script: $_SETUP_UTIL"
return 22
fi
# detect if running on Darwin platform
_UNAME=`uname -s`
_IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
_IS_DARWIN=1
fi
unset _UNAME
# make sure to export all environment variables
export CMAKE_PREFIX_PATH
export CPATH
if [ $_IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH
else
export DYLD_LIBRARY_PATH
fi
unset _IS_DARWIN
export PATH
export PKG_CONFIG_PATH
export PYTHONPATH
# remember type of shell if not already set
if [ -z "$CATKIN_SHELL" ]; then
CATKIN_SHELL=sh
fi
# invoke Python script to generate necessary exports of environment variables
# use TMPDIR if it exists, otherwise fall back to /tmp
if [ -d "${TMPDIR}" ]; then
_TMPDIR="${TMPDIR}"
else
_TMPDIR=/tmp
fi
_SETUP_TMP=`mktemp "${_TMPDIR}/setup.sh.XXXXXXXXXX"`
unset _TMPDIR
if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
echo "Could not create temporary file: $_SETUP_TMP"
return 1
fi
CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ >> "$_SETUP_TMP"
_RC=$?
if [ $_RC -ne 0 ]; then
if [ $_RC -eq 2 ]; then
echo "Could not write the output of '$_SETUP_UTIL' to temporary file '$_SETUP_TMP': may be the disk if full?"
else
echo "Failed to run '\"$_SETUP_UTIL\" $@': return code $_RC"
fi
unset _RC
unset _SETUP_UTIL
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
return 1
fi
unset _RC
unset _SETUP_UTIL
. "$_SETUP_TMP"
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
# source all environment hooks
_i=0
while [ $_i -lt $_CATKIN_ENVIRONMENT_HOOKS_COUNT ]; do
eval _envfile=\$_CATKIN_ENVIRONMENT_HOOKS_$_i
unset _CATKIN_ENVIRONMENT_HOOKS_$_i
eval _envfile_workspace=\$_CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
unset _CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
# set workspace for environment hook
CATKIN_ENV_HOOK_WORKSPACE=$_envfile_workspace
. "$_envfile"
unset CATKIN_ENV_HOOK_WORKSPACE
_i=$((_i + 1))
done
unset _i
unset _CATKIN_ENVIRONMENT_HOOKS_COUNT
|
Txion001/src_x_os
|
x_os/devel/setup.sh
|
Shell
|
apache-2.0
| 2,533 |
#!/bin/sh
#
# Provisioning script for a vagrant machine.
#
# install Cloduera Repo and update the lists
wget http://archive.cloudera.com/cm4/installer/latest/cloudera-manager-installer.bin
chmod u+x cloudera-manager-installer.bin
update-locale LANG=en_US.UTF-8
export LANG=en_US.utf8
export LC_ALL=en_US.UTF-8
export LANGUAGE=en_US
# we need expect to install cloudera manager
apt-get install -y expect
/usr/bin/expect -c "set timeout 600; spawn ./cloudera-manager-installer.bin --ui=stdio --noprompt --noreadme --nooptions --i-agree-to-all-licenses; expect EOF"
# Register Cloudera package repositories
cat > /etc/apt/sources.list.d/cloudera-cdh3.list <<END
deb http://archive.cloudera.com/debian lucid-cdh3 contrib
END
wget http://extjs.com/deploy/ext-2.2.zip
mkdir -p /usr/lib/oozie/libext && cp ext-2.2.zip /usr/lib/oozie/libext/
# Install all the required packages for CDH3 (Core Hadoop) components
apt-get update && apt-get install -y --force-yes oracle-j2sdk1.6 hadoop-0.20 hadoop-0.20-native hadoop-hive hadoop-pig oozie oozie-client
apt-get install -y --force-yes cloudera-manager-daemons cloudera-manager-agent
# disable oozie auto-start as part of CDH
update-rc.d -f oozie remove
|
axemblr/cloudera-manager-api
|
src/test/resources/vagrant/build-image/install-cm-and-cdh.sh
|
Shell
|
apache-2.0
| 1,204 |
#!/bin/bash
#Change NDK to your Android NDK location
NDK=/home/joe/Android/sdk/ndk-bundle
PLATFORM=$NDK/platforms/android-21/arch-arm64/
PREBUILT=$NDK/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64
GENERAL="\
--enable-small \
--enable-cross-compile \
--extra-libs="-lgcc" \
--arch=aarch64 \
--cc=$PREBUILT/bin/aarch64-linux-android-gcc \
--cross-prefix=$PREBUILT/bin/aarch64-linux-android- \
--nm=$PREBUILT/bin/aarch64-linux-android-nm \
--extra-cflags="-I../x264/android/arm64/include" \
--extra-ldflags="-L../x264/android/arm64/lib" "
MODULES="\
--enable-gpl \
--enable-libx264"
function build_arm64
{
./configure \
--logfile=conflog.txt \
--target-os=linux \
--prefix=./android/arm64-v8a \
${GENERAL} \
--sysroot=$PLATFORM \
--extra-cflags="" \
--extra-ldflags="-lx264 -Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog" \
--enable-shared \
--disable-static \
--disable-doc \
--enable-zlib \
${MODULES}
make clean
make -j4
make install
}
build_arm64
echo Android ARM64 builds finished
|
joetang1989/Android-Universal-Image-Loader-Study
|
note/11.FFmpeg编译/build-scripts-of-ffmpeg-x264-for-android-ndk/ffmpeg/build_android_arm64_v8a.sh
|
Shell
|
apache-2.0
| 1,073 |
cd /root/event-go-server
NOW="$(date)"
$(mv nohup.log log/nohup-"$NOW".log)
echo "" > nohup.log
nohup npm start > nohup.log 2>&1 &
echo $! > app.pid
|
qhccthanh/event-go-server
|
start.sh
|
Shell
|
apache-2.0
| 149 |
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE}")"
git pull origin master
function doIt() {
rsync --exclude ".git/" --exclude ".DS_Store" --exclude "bootstrap.sh" \
--exclude "README.md" --exclude "LICENSE.md" -av --no-perms . ~
source ~/.bash_profile
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
doIt
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
doIt
fi
fi
unset doIt
|
sporkmonger/dotfiles
|
bootstrap.sh
|
Shell
|
apache-2.0
| 554 |
java TopKIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial Basic 2> ../Analysis/Caida/Caida1MTop300BaselineVal1ProbEvictions.csv
java TopKIdentifier ../Analysis/Caida/caidaSplit1.csv ../Analysis/Caida/caidaSplit1SizeBySrcIp.csv runTrial SpaceSaving 2> ../Analysis/Caida/Caida1MTop300SpaceSavingMVal1ProbEvictions.csv
|
vibhaa/iw15
|
code/runProblematicEvictions2.sh
|
Shell
|
apache-2.0
| 365 |
#!/bin/bash
DATE=$(date +"%Y%m%d")
echo Using $DATE as date
ffmpeg -f image2 -r 10 -i "/mnt/nas_plants/$DATE/${DATE}_%03d.jpg" -r 10 -s hd720 -vcodec libx264 /mnt/nas_plants/videos/$DATE.mp4
|
rahulpopuri/plants
|
scripts/timelapse.sh
|
Shell
|
apache-2.0
| 193 |
#!/usr/bin/env bash
set -e
set -x
cp -r /root/ssh /root/.ssh
chown -R root:root /root/.ssh
chmod 0600 /root/.ssh/id_rsa
eval $(ssh-agent -s)
ssh -o StrictHostKeychecking=no aptly@${APTLY_SERVER} mkdir -p incoming/lg_ros_nodes/origin/master/
cd /src/lg_ros_nodes/catkin/
scp -o StrictHostKeychecking=no ./debs/*.deb aptly@${APTLY_SERVER}:incoming/lg-ros/origin/master/
ssh -o StrictHostKeychecking=no aptly@${APTLY_SERVER} bash /home/aptly/bin/publish-incoming.sh --project lg-ros --branch origin/master --rosrel "melodic" --distro "bionic"
ssh -o StrictHostKeychecking=no aptly@${APTLY_SERVER} bash /home/aptly/bin/publish-incoming-separate-repos.sh --project lg-ros --branch origin/master --rosrel "melodic" --distro "bionic"
|
EndPointCorp/lg_ros_nodes
|
scripts/deploy.sh
|
Shell
|
apache-2.0
| 732 |
ssh [email protected] 'bash ~/Orchard/Scripts/send_log.sh; bash ~/Orchard/Scripts/send_scp.sh'
|
useorchard/orchard2
|
Python/send_orchard1.sh
|
Shell
|
apache-2.0
| 97 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "DateTools/DateTools/DateTools.bundle"
install_resource "MJRefresh/MJRefresh/MJRefresh.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "DateTools/DateTools/DateTools.bundle"
install_resource "MJRefresh/MJRefresh/MJRefresh.bundle"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
ClegeJJ/CJWeiBo
|
Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
apache-2.0
| 4,913 |
#!/usr/bin/env bash
cd "$(dirname "$0")"
rm -rf google
PROTOC=./node_modules/grpc-tools/bin/protoc.js
for p in $(find ../third_party/googleapis/google -type f -name *.proto); do
$PROTOC \
--proto_path=../third_party/googleapis \
--js_out=import_style=commonjs,binary:./ \
--grpc_out=./ \
"$p"
done
|
GoogleCloudPlatform/grpc-gcp-node
|
cloudprober/codegen.sh
|
Shell
|
apache-2.0
| 319 |
#meta keywords below
echo -n '"' | tee --append $metadatatargetpath$uuid/"current-import.csv"
while read keyword
do
echo -n ''$keyword', ' | tee --append $metadatatargetpath"$uuid/current-import.csv"
done < "tmp/"$uuid/$sku"keywords.txt"
|
fredzannarbor/pagekicker-community
|
scripts/includes/keywordreader.sh
|
Shell
|
apache-2.0
| 242 |
curl -XPUT 'localhost:9200/mystackoverflow?pretty' -d '
{
"settings" : {
"index" : {
"number_of_shards" : 10,
"number_of_replicas" : 1
}
}
}
'
|
stefansavev/elasticsearch-mystackoverflow-example
|
scripts/create_index.sh
|
Shell
|
apache-2.0
| 191 |
#!/bin/sh
ln -sf /etc/nginx/conf.d/health-check-return--drain /etc/nginx/conf.d/health-check-return-cmd
/usr/sbin/nginx -s reload
|
eyal-lupu/docker-nginx-healthcheck-webserver
|
filesystem/usr/sbin/mark-drain.sh
|
Shell
|
apache-2.0
| 130 |
echo "Building monkey-ops docker image"
TAG=$1
PROXY=""
if [ "$#" -gt 1 ]; then
PROXY="--build-arg https_proxy=$2"
fi
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./image/monkey-ops ./go/*.go
if [ $? = 0 ]
then
docker build ${PROXY} -t produban/monkey-ops:${TAG} -f ./image/Dockerfile ./image
fi
|
Produban/monkey-ops
|
build.sh
|
Shell
|
apache-2.0
| 319 |
#!/usr/bin/env bash
set -f
# docker stack deploy --compose-file $COMPOSE_PATH/docker-proxy/docker-compose.yml docker-proxy
docker stack deploy --compose-file $COMPOSE_PATH/proxy/docker-compose.yml proxy
sleep 15
docker stack deploy --compose-file $COMPOSE_PATH/visualizer/docker-compose.yml visualizer
docker stack deploy --compose-file $COMPOSE_PATH/portainer/docker-compose.yml portainer
docker stack deploy --compose-file $COMPOSE_PATH/filebrowser/docker-compose.yml filebrowser
docker stack deploy --compose-file $COMPOSE_PATH/monitor/docker-compose.yml monitor
docker stack deploy --compose-file $COMPOSE_PATH/logging/docker-compose.yml logging
docker stack deploy --compose-file $COMPOSE_PATH/selenium/docker-compose.yml selenium
# docker stack deploy --compose-file $COMPOSE_PATH/guacamole/guacamole-mysql-initdb/docker-compose.yml guacamole-setup
# shutdown setup program (or change to use remote db)
docker stack deploy --compose-file $COMPOSE_PATH/guacamole/docker-compose.yml guacamole
docker stack deploy --compose-file $COMPOSE_PATH/sonarqube/docker-compose.yml sonarqube
docker stack deploy --compose-file $COMPOSE_PATH/security/clair/docker-compose.yml clair
docker stack deploy --compose-file $COMPOSE_PATH/jenkins/docker-compose.yml jenkins
#docker stack deploy --compose-file $COMPOSE_PATH/jenkins-swarm-agent/docker-compose.yml jenkins-swarm-agent
docker stack deploy --compose-file $COMPOSE_PATH/nexus/docker-compose.yml nexus
docker stack deploy --compose-file $COMPOSE_PATH/wordpress/docker-compose.yml wordpress
docker stack deploy --compose-file $COMPOSE_PATH/windows/docker-compose.yml windows
docker stack deploy --compose-file $COMPOSE_PATH/hello-dual/docker-compose.yml hello-dual
docker stack deploy --compose-file $DEVOPS_PATH/docker-compose.yml devops-example
docker stack deploy --compose-file $COMPOSE_PATH/wordpress/wordpress-selenium-test/docker-compose.yml wordpress-test
set +f
|
uscdev/compose
|
scripts/setup/05-docker-deploy.sh
|
Shell
|
apache-2.0
| 1,925 |
#!/bin/bash
# Translated languages (update these also to qlcplus.pro)
languages="de_DE es_ES fr_FR it_IT nl_NL cz_CZ pt_BR ca_ES ja_JP"
# Compile all files for the given language into one common qlcplus_<lang>.qm file
function compile {
echo Processing $1
lrelease -silent `find . -name *_$1.ts` -qm qlcplus_$1.qm
}
# Compile all translated languages present in $languages
for lang in $languages; do
compile $lang
done
|
hveld/qlcplus
|
translate.sh
|
Shell
|
apache-2.0
| 435 |
#!/bin/bash
CURRENT_CONFIG_HASH=$(sha1sum /etc/haproxy/haproxy.cfg | cut -f1 -d' ')
NEW_CONFIG_HASH=$(sha1sum /opt/kolla/config_files/haproxy.cfg | cut -f1 -d' ')
if [[ $CURRENT_CONFIG_HASH != $NEW_CONFIG_HASH ]]; then
changed=changed
source /opt/kolla/config-external.sh
/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -sf $(cat /run/haproxy.pid)
fi
echo $changed
|
fdumpling/kolla
|
docker/haproxy/ensure_latest_config.sh
|
Shell
|
apache-2.0
| 398 |
#!/usr/bin/env bash
repo_dir="$(dirname $0)/.."
# repo_dir=$(git rev-parse --show-toplevel)
. "${repo_dir}/bin/functions"
if [ $1 ]; then
app=$1
else
echo "You must specify a (valid) app name to install via tp"
exit 1
fi
PATH=$PATH:/opt/puppetlabs/puppet/bin
echo_title "Running tp::install { ${app}: }. Puppet version $(puppet --version)"
# Run puppet apply with correct configs
puppet apply --verbose --report --show_diff --summarize \
--modulepath "${repo_dir}/site:${repo_dir}/modules:/etc/puppetlabs/code/modules" \
--environmentpath "${repo_dir}" \
--hiera_config="${repo_dir}/hiera.yaml" \
--detailed-exitcodes -e "tp::install { $app: auto_prereq => true }"
result=$?
# Puppet exit codes 0 and 2 both imply an error less run
if [ "x$result" == "x0" ] || [ "x$result" == "x2" ]; then
echo_success "Puppet run without errors"
exit 0
else
echo_failure "There were errors in the Puppet run"
exit 1
fi
|
example42/psick
|
bin/tp_install.sh
|
Shell
|
apache-2.0
| 927 |
#!/bin/bash
if [ -z "$local_war" ]; then
echo "local_war is not set"
exit 1
else
echo "local_war is ${local_war}"
fi
if [ -z "$uploaded_war" ]; then
echo "uploaded_war is not set"
exit 1
else
echo "uploaded_war is ${uploaded_war}"
fi
if [ -z "$nested_uploaded_war" ]; then
echo "nested_uploaded_war is not set"
exit 1
else
echo "nested_uploaded_war is ${nested_uploaded_war}"
fi
if [ -z "$remote_war" ]; then
echo "remote_war is not set"
exit 1
else
echo "remote_war is ${remote_war}"
fi
|
alien4cloud/samples
|
demo-input-artifact/scripts/assert.sh
|
Shell
|
apache-2.0
| 540 |
#! /bin/sh
#
# File: swarm-setup-d8.sh
#
# Purpose: Complete setup steps for the Jenkins worker node instance
#
# Pre-conditions:
# Debian 8 OS
# slave.jar is downloaded to the user home directory
# This script is run from the Git repo directory
#
echo 'Create jenkins user'
sudo addgroup build
sudo adduser --disabled-password --system --ingroup build jenkins
sudo mkdir /home/jenkins/build
sudo chown jenkins:build /home/jenkins/build
echo 'Changing to user home directory'
cd
echo 'Installing Docker...'
sudo apt-get purge lxc-docker*
sudo apt-get purge docker.io*
sudo apt-get update
sudo apt-get -y -qq install apt-transport-https ca-certificates
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo bash -c "echo deb https://apt.dockerproject.org/repo debian-jessie main >> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-cache policy docker-engine
sudo apt-get -y -qq install docker-engine
sudo service docker start
sudo docker run hello-world
sudo gpasswd -a $USER docker
sudo gpasswd -a jenkins docker
sudo service docker restart
echo 'Installing Supervisor...'
# Install supervisor package
sudo apt-get -y -qq install supervisor
echo 'Installing Jenkins Slave software'
# Install the Jenkins build agent agent code
sudo mkdir -p /opt/jenkins-slave
sudo mv slave.jar /opt/jenkins-slave
sudo chown -R root:root /opt/jenkins-slave
echo 'Installing Jenkins Swarm plugin client-side software'
wget -q -O swarm-client-2.0.jar \
http://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/2.0/swarm-client-2.0-jar-with-dependencies.jar
sudo mkdir -p /opt/swarm-client
sudo mv swarm-client*.jar /opt/swarm-client/
sudo chown -R root:root /opt/swarm-client
echo 'Installing Swarm supervisor config'
cd
tee swarm.conf << 'EOF' > /dev/null
[program:swarm]
directory=/home/jenkins
command=java -Xmx256m -Xmx256m -Dfile.encoding=UTF-8 -jar /opt/swarm-client/swarm-client-2.0.jar -master http://jenkins-master:8080 -username admin -password JPW -fsroot /home/jenkins -description 'auto' -labels 'slave' -name 'slave-auto' -executors 1 -mode exclusive
autostart=true
autorestart=true
user=jenkins
stdout_logfile=syslog
stderr_logfile=syslog
EOF
sudo chown root:root swarm.conf
sudo mv swarm.conf /etc/supervisor/conf.d/
sudo chmod 755 /etc/supervisor/conf.d/swarm.conf
sudo sed -i "s|JPW|$JENKINS_PW|g" /etc/supervisor/conf.d/swarm.conf
echo 'Installing UNZIP program'
cd
sudo apt-get -y -qq install unzip
# Configure the Swarm service to start when the instance boots
sudo supervisorctl reread
sudo supervisorctl update
echo 'Finished with installation script'
|
GoogleCloudPlatformTraining/cpo200-Google-Container-Registry
|
swarm-setup-d8.sh
|
Shell
|
apache-2.0
| 2,673 |
#!/bin/sh -x
sudo valgrind --tool=memcheck -v --demangle=yes \
--leak-check=full \
--leak-resolution=high \
--show-reachable=yes \
--show-possibly-lost=yes \
$1
|
thortex/rpi3-webiopi
|
webiopi_0.7.1/python/native/valgrind.sh
|
Shell
|
apache-2.0
| 183 |
# Copyright 2014 Commonwealth Bank of Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
set -u
set -v
readonly location="$( cd $(dirname $0) && pwd -P )"
source $location/settings.sh
# a warning indicating that this script is deprecated in favour of sbt-deploy-to.sh
echo "WARNING: the sbt-deploy.sh CI script is deprecated; please use sbt-deploy-to.sh instead."
echo "REASON FOR DEPRECATION:"
echo " sbt-deploy.sh assumes that deployment will happen to the public artifactoryonline"
echo " repository (ext-releases-local). Some private repositories, however, should be"
echo " deployed to a non-public location. Hence, for the purposes of uniformity, a new script,"
echo " sbt-deploy-to.sh, has been created that requires a target repository to be explicitly"
echo " specified."
set +e
isReleaseBranch $TRAVIS_BRANCH
IS_RELEASE=$?
set -e
if [[ $TRAVIS_PULL_REQUEST == "false" && $IS_RELEASE -eq 0 ]]; then
if [ $# -eq 0 ]; then
echo "DEPRECATION MIGRATION: this script can be replaced by: sbt-deploy-to.sh ext-releases-local"
sbt -Dsbt.global.base=$TRAVIS_BUILD_DIR/ci ';set publishTo in ThisBuild := Some("commbank-releases" at "http://commbank.artifactoryonline.com/commbank/ext-releases-local"); set publishMavenStyle in ThisBuild := true; + publish'
else
echo "DEPRECATION MIGRATION: this script can be replaced by: sbt-deploy-to.sh ext-releases-local" $@
for project in $@; do
echo "Publishing $project"
sbt -Dsbt.global.base=$TRAVIS_BUILD_DIR/ci ";project $project; set publishTo in ThisBuild := Some(\"commbank-releases\" at \"http://commbank.artifactoryonline.com/commbank/ext-releases-local\"); set publishMavenStyle in ThisBuild := true; + publish"
done
fi
else
echo "Not a release branch. Nothing to deploy."
fi
|
wrouesnel/ci
|
sbt-deploy.sh
|
Shell
|
apache-2.0
| 2,362 |
#!/bin/bash
#
conda update -y -n base conda
|
evansde77/cirrus
|
containers/ca-anaconda3/install_python.sh
|
Shell
|
apache-2.0
| 53 |
#!/bin/bash
docker build --rm -t whlee21/centos6-oracle-jdk17 `dirname $0`
docker images
|
whlee21/docker
|
centos6-oracle-jdk17/docker-build.sh
|
Shell
|
apache-2.0
| 90 |
#! /bin/sh
echo "TESTING --first option: "
cp -f "$DIR/first.in" tmp.first.out
$MSRP --first -f -d "Ab" "!!" tmp.first.out
cmp --quiet tmp.first.out "$DIR/first.out" || exit 1
echo "PASSED!"
rm tmp.first.out
|
malex984/msrp
|
test/first.sh
|
Shell
|
bsd-2-clause
| 216 |
#!/bin/bash
set -e
flake8 cms --exclude migrations
coverage erase
coverage run `which py.test` --ds=test_settings --verbose cms "$@"
coverage report
|
universalcore/unicore-cms-django
|
run_tests.sh
|
Shell
|
bsd-2-clause
| 152 |
cd $(dirname $0)
if [ ! -d _virtualenv ];
then
virtualenv _virtualenv
fi
_virtualenv/bin/pip install -r requirements.txt
source _virtualenv/bin/activate
nosetests installation_test.py -m'^$'
|
mwilliamson/whack-package-apache2-mod-php5
|
tests/run-tests.sh
|
Shell
|
bsd-2-clause
| 197 |
#!/bin/bash
# Copyright © Jonathan G. Rennison 2014 <[email protected]>
# License: New BSD License, see BSD-LICENSE.txt
# This script enables password-less sudo access for the following commands for the current login user:
CMDLIST=(
'/usr/bin/apt-get update'
'/usr/bin/apt-get upgrade'
)
# This creates the file /etc/sudoers.d/nopw-USERNAME
# This assumes that a suitable includedir directive is already in /etc/sudoers
. "`dirname "$0"`/../common/util.sh"
sudoers_add_prechecks
sudoers_add_tmps "nopw-$USER"
cat > "$TMPSUDOERS" << EOL
# This file was created by `readlink -f "$0"` at `date "+%F %T %z"`
EOL
for cmd in "${CMDLIST[@]}"; do
echo "$USER ALL = NOPASSWD: $cmd" >> "$TMPSUDOERS"
done
sudoers_add_install
|
JGRennison/dotfiles
|
misc/install_user_nopw_sudoers_d.sh
|
Shell
|
bsd-3-clause
| 734 |
#!/bin/sh
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script generates a set of test (end-entity, intermediate, root)
# certificates that can be used to test fetching of an intermediate via AIA.
try() {
"$@" || (e=$?; echo "$@" > /dev/stderr; exit $e)
}
try rm -rf out
try mkdir out
try /bin/sh -c "echo 01 > out/2048-sha256-root-serial"
touch out/2048-sha256-root-index.txt
# Generate the key
try openssl genrsa -out out/2048-sha256-root.key 2048
# Generate the root certificate
CA_COMMON_NAME="Test Root CA" \
try openssl req \
-new \
-key out/2048-sha256-root.key \
-out out/2048-sha256-root.req \
-config ca.cnf
CA_COMMON_NAME="Test Root CA" \
try openssl x509 \
-req -days 3650 \
-in out/2048-sha256-root.req \
-signkey out/2048-sha256-root.key \
-extfile ca.cnf \
-extensions ca_cert \
-text > out/2048-sha256-root.pem
# Generate the leaf certificate requests
try openssl req \
-new \
-keyout out/expired_cert.key \
-out out/expired_cert.req \
-config ee.cnf
try openssl req \
-new \
-keyout out/ok_cert.key \
-out out/ok_cert.req \
-config ee.cnf
SUBJECT_NAME=req_localhost_cn \
try openssl req \
-new \
-keyout out/localhost_cert.key \
-out out/localhost_cert.req \
-reqexts req_localhost_san \
-config ee.cnf
# Generate the leaf certificates
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 060101000000Z \
-enddate 070101000000Z \
-in out/expired_cert.req \
-out out/expired_cert.pem \
-config ca.cnf
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-days 3650 \
-in out/ok_cert.req \
-out out/ok_cert.pem \
-config ca.cnf
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions name_constraint_bad \
-subj "/CN=Leaf certificate/" \
-days 3650 \
-in out/ok_cert.req \
-out out/name_constraint_bad.pem \
-config ca.cnf
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions name_constraint_good \
-subj "/CN=Leaf Certificate/" \
-days 3650 \
-in out/ok_cert.req \
-out out/name_constraint_good.pem \
-config ca.cnf
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-days 3650 \
-in out/localhost_cert.req \
-out out/localhost_cert.pem \
-config ca.cnf
try /bin/sh -c "cat out/ok_cert.key out/ok_cert.pem \
> ../certificates/ok_cert.pem"
try /bin/sh -c "cat out/localhost_cert.key out/localhost_cert.pem \
> ../certificates/localhost_cert.pem"
try /bin/sh -c "cat out/expired_cert.key out/expired_cert.pem \
> ../certificates/expired_cert.pem"
try /bin/sh -c "cat out/2048-sha256-root.key out/2048-sha256-root.pem \
> ../certificates/root_ca_cert.pem"
try /bin/sh -c "cat out/ok_cert.key out/name_constraint_bad.pem \
> ../certificates/name_constraint_bad.pem"
try /bin/sh -c "cat out/ok_cert.key out/name_constraint_good.pem \
> ../certificates/name_constraint_good.pem"
# Now generate the one-off certs
## SHA-256 general test cert
try openssl req -x509 -days 3650 \
-config ../scripts/ee.cnf -newkey rsa:2048 -text \
-sha256 \
-out ../certificates/sha256.pem
## Self-signed cert for SPDY/QUIC/HTTP2 pooling testing
try openssl req -x509 -days 3650 -extensions req_spdy_pooling \
-config ../scripts/ee.cnf -newkey rsa:2048 -text \
-out ../certificates/spdy_pooling.pem
## SubjectAltName parsing
try openssl req -x509 -days 3650 -extensions req_san_sanity \
-config ../scripts/ee.cnf -newkey rsa:2048 -text \
-out ../certificates/subjectAltName_sanity_check.pem
## Punycode handling
SUBJECT_NAME="req_punycode_dn" \
try openssl req -x509 -days 3650 -extensions req_punycode \
-config ../scripts/ee.cnf -newkey rsa:2048 -text \
-out ../certificates/punycodetest.pem
## Reject intranet hostnames in "publicly" trusted certs
# 365 * 3 = 1095
SUBJECT_NAME="req_dn" \
try openssl req -x509 -days 1095 \
-config ../scripts/ee.cnf -newkey rsa:2048 -text \
-out ../certificates/reject_intranet_hosts.pem
## Validity too long unit test support.
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/10_year_validity.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 081030000000Z \
-enddate 181029000000Z \
-in ../certificates/10_year_validity.req \
-out ../certificates/10_year_validity.pem \
-config ca.cnf
# 365 * 11 = 4015
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/11_year_validity.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 141030000000Z \
-days 4015 \
-in ../certificates/11_year_validity.req \
-out ../certificates/11_year_validity.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/39_months_after_2015_04.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 150402000000Z \
-enddate 180702000000Z \
-in ../certificates/39_months_after_2015_04.req \
-out ../certificates/39_months_after_2015_04.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/40_months_after_2015_04.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 150402000000Z \
-enddate 180801000000Z \
-in ../certificates/40_months_after_2015_04.req \
-out ../certificates/40_months_after_2015_04.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/60_months_after_2012_07.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 141030000000Z \
-enddate 190930000000Z \
-in ../certificates/60_months_after_2012_07.req \
-out ../certificates/60_months_after_2012_07.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/61_months_after_2012_07.req
# 30 * 61 = 1830
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 141030000000Z \
-days 1830 \
-in ../certificates/61_months_after_2012_07.req \
-out ../certificates/61_months_after_2012_07.pem \
-config ca.cnf
# start date after expiry date
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/start_after_expiry.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 180901000000Z \
-enddate 150402000000Z \
-in ../certificates/start_after_expiry.req \
-out ../certificates/start_after_expiry.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/start_after_expiry.req
# Issued pre-BRs, lifetime < 120 months, expires before 2019-07-01
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_ok.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 080101000000Z \
-enddate 150101000000Z \
-in ../certificates/pre_br_validity_ok.req \
-out ../certificates/pre_br_validity_ok.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_ok.req
# Issued pre-BRs, lifetime > 120 months, expires before 2019-07-01
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_bad_121.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 080101000000Z \
-enddate 180501000000Z \
-in ../certificates/pre_br_validity_bad_121.req \
-out ../certificates/pre_br_validity_bad_121.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_bad_121.req
# Issued pre-BRs, lifetime < 120 months, expires after 2019-07-01
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_bad_2020.req
CA_COMMON_NAME="Test Root CA" \
try openssl ca \
-batch \
-extensions user_cert \
-startdate 120501000000Z \
-enddate 190703000000Z \
-in ../certificates/pre_br_validity_bad_2020.req \
-out ../certificates/pre_br_validity_bad_2020.pem \
-config ca.cnf
try openssl req -config ../scripts/ee.cnf \
-newkey rsa:2048 -text -out ../certificates/pre_br_validity_bad_2020.req
# Regenerate CRLSets
## Block a leaf cert directly by SPKI
try python crlsetutil.py -o ../certificates/crlset_by_leaf_spki.raw \
<<CRLBYLEAFSPKI
{
"BlockedBySPKI": ["../certificates/ok_cert.pem"]
}
CRLBYLEAFSPKI
## Block a leaf cert by issuer-hash-and-serial (ok_cert.pem == serial 2, by
## virtue of the serial file and ordering above.
try python crlsetutil.py -o ../certificates/crlset_by_root_serial.raw \
<<CRLBYROOTSERIAL
{
"BlockedByHash": {
"../certificates/root_ca_cert.pem": [2]
}
}
CRLBYROOTSERIAL
## Block a leaf cert by issuer-hash-and-serial. However, this will be issued
## from an intermediate CA issued underneath a root.
try python crlsetutil.py -o ../certificates/crlset_by_intermediate_serial.raw \
<<CRLSETBYINTERMEDIATESERIAL
{
"BlockedByHash": {
"../certificates/quic_intermediate.crt": [3]
}
}
CRLSETBYINTERMEDIATESERIAL
|
ltilve/chromium
|
net/data/ssl/scripts/generate-test-certs.sh
|
Shell
|
bsd-3-clause
| 9,814 |
#!/bin/bash
scala -classpath ./target/sparse_vector_output-0.0.0.jar GenerateInput ./input 10 10000
|
agrippa/spark-swat
|
functional-tests/sparse-vector-output/generate.sh
|
Shell
|
bsd-3-clause
| 101 |
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_13.lmn 1>/dev/null 2>queen/result_mcndfs/queen13
cat queen/result_mcndfs/queen13
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_14.lmn 1>/dev/null 2>queen/result_mcndfs/queen14
cat queen/result_mcndfs/queen14
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_15.lmn 1>/dev/null 2>queen/result_mcndfs/queen15
cat queen/result_mcndfs/queen15
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_16.lmn 1>/dev/null 2>queen/result_mcndfs/queen16
cat queen/result_mcndfs/queen16
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_17.lmn 1>/dev/null 2>queen/result_mcndfs/queen17
cat queen/result_mcndfs/queen17
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_18.lmn 1>/dev/null 2>queen/result_mcndfs/queen18
cat queen/result_mcndfs/queen18
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_19.lmn 1>/dev/null 2>queen/result_mcndfs/queen19
cat queen/result_mcndfs/queen19
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_20.lmn 1>/dev/null 2>queen/result_mcndfs/queen20
cat queen/result_mcndfs/queen20
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_21.lmn 1>/dev/null 2>queen/result_mcndfs/queen21
cat queen/result_mcndfs/queen21
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_22.lmn 1>/dev/null 2>queen/result_mcndfs/queen22
cat queen/result_mcndfs/queen22
../src/slim --ltl -p1 --no-dump --use-mcndfs --use-Ncore=48 --psym queen0.psym --nc queen0.nc queen/queen_23.lmn 1>/dev/null 2>queen/result_mcndfs/queen23
cat queen/result_mcndfs/queen23
|
ysm001/slim
|
benchmarkset/queen/queen_bench_mcndfs.sh
|
Shell
|
bsd-3-clause
| 2,057 |
#!/usr/bin/env bash
set -ex
echo Installing driver dependencies
curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
curl https://packages.microsoft.com/config/ubuntu/16.04/prod.list | sudo tee /etc/apt/sources.list.d/mssql.list
sudo apt-get update
ACCEPT_EULA=Y sudo apt-get install -qy msodbcsql17 unixodbc unixodbc-dev libssl1.0.0
|
BenMorel/dbal
|
tests/travis/install-sqlsrv-dependencies.sh
|
Shell
|
mit
| 360 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcassets)
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
install_resource "${BUILT_PRODUCTS_DIR}/XBTransition.bundle"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ `find . -name '*.xcassets' | wc -l` -ne 0 ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
find "${PWD}" -name "*.xcassets" -print0 | xargs -0 actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
EugeneNguyen/XBTransition
|
Example/Pods/Target Support Files/Pods-XBTransition/Pods-XBTransition-resources.sh
|
Shell
|
mit
| 3,707 |
#!/bin/bash
#
##########################################################################
#Red Hat Enterprise Linux 6 - DISA STIG Compliance Remediation Content
#Copyright (C) 2013
#Vincent C. Passaro ([email protected])
#
##########################################################################
#
###################### Buddha Labs LLC ################################
# By Vincent C. Passaro #
# Buddha Labs LLC. #
# vince[@]buddhalabs[.]com #
# www.buddhalabs.com #
###################### Buddha Labs LLC ################################
#_________________________________________________________________________
# Version | Change Information | Author | Date
#-------------------------------------------------------------------------
# 1.0 | Initial Script Creation | Vincent Passaro | 1-JUNE-2013
#
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): RHEL-06-000126
# Group Title: SRG-OS-000096
#
# Rule ID: RHEL-06-000126_rule
# Severity: low
# Rule Version (STIG-ID): RHEL-06-000126
# Rule Title: The Reliable Datagram Sockets (RDS) protocol must be
# disabled unless required.
#
# Vulnerability Discussion: Disabling RDS protects the system against
# exploitation of any flaws in its implementation.
#
# Responsibility:
# IAControls:
#
# Check Content:
#
# If the system is configured to prevent the loading of the "rds" kernel
# module, it will contain lines inside any file in "/etc/modprobe.d" or the
# deprecated"/etc/modprobe.conf". These lines instruct the module loading
# system to run another program (such as "/bin/true") upon a module
# "install" event. Run the following command to search for such lines in
# all files in "/etc/modprobe.d" and the deprecated "/etc/modprobe.conf":
# $ grep -r rds /etc/modprobe.conf /etc/modprobe.d
# If no line is returned, this is a finding.
#
# Fix Text:
#
# The Reliable Datagram Sockets (RDS) protocol is a transport layer
# protocol designed to provide reliable high- bandwidth, low-latency
# communications between nodes in a cluster. To configure the system to
# prevent the "rds" kernel module from being loaded, add the following line
# to a file in the directory "/etc/modprobe.d":
# install rds /bin/true
#######################DISA INFORMATION##################################
#
# Global Variables
PDI=RHEL-06-000126
#
#BEGIN_CHECK
#END_CHECK
#BEGIN_REMEDY
#END_REMEDY
|
atomicturtle/t-stig
|
aqueduct-0.4/compliance/Bash/STIG/rhel-6/prod/RHEL-06-000126.sh
|
Shell
|
mit
| 2,724 |
// Copyright 2015 XLGAMES Inc.
//
// Distributed under the MIT License (See
// accompanying file "LICENSE" or the website
// http://www.opensource.org/licenses/mit-license.php)
#include "../Utility/perlinnoise.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
cbuffer Parameters
{
// common control parameters
float2 Center;
float Radius;
float Adjustment;
uint2 SurfaceMins; // Minimum coord of the "CachedSurface", in terrain uber-surface coords
uint2 SurfaceMaxs; // Max coord of the "CachedSurface", in terrain uber-surface coords
uint2 DispatchOffset; // Terrain uber-surfacec coord when dispatchThreadId = uint3(0,0,0)
}
float LengthSquared(float2 input) { return dot(input, input); }
cbuffer PaintParameters
{
uint paintValue;
}
RWTexture2D<uint> OutputSurface : register(u0);
Texture2D<uint> InputSurface;
[numthreads(16, 16, 1)]
void Paint(uint3 dispatchThreadId : SV_DispatchThreadID)
{
uint2 surfaceSpaceCoord = DispatchOffset + dispatchThreadId.xy;
float rsq = LengthSquared(float2(surfaceSpaceCoord) - Center);
if (surfaceSpaceCoord.x <= SurfaceMaxs.x && surfaceSpaceCoord.y <= SurfaceMaxs.y && rsq < (Radius*Radius)) {
OutputSurface[surfaceSpaceCoord - SurfaceMins] = paintValue;
}
}
|
xlgames-inc/XLE
|
Working/Game/xleres/ui/terrainmodification_int.sh
|
Shell
|
mit
| 1,281 |
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Dobbscoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 19985. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 19985 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 19985 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
dobbscoin/dobbscoin-source
|
contrib/qos/tc.sh
|
Shell
|
mit
| 1,675 |
#!/usr/bin/env sh
# generated from catkin/python/catkin/environment_cache.py
# based on a snapshot of the environment before and after calling the setup script
# it emulates the modifications of the setup script without recurring computations
# new environment variables
export CATKIN_TEST_RESULTS_DIR="/projects/ros/seniordesign/trackbot/build/test_results"
export ROS_TEST_RESULTS_DIR="/projects/ros/seniordesign/trackbot/build/test_results"
# modified environment variables
export CMAKE_PREFIX_PATH="/projects/ros/seniordesign/trackbot/build/devel:$CMAKE_PREFIX_PATH"
export CPATH="/projects/ros/seniordesign/trackbot/build/devel/include:$CPATH"
export LD_LIBRARY_PATH="/projects/ros/seniordesign/trackbot/build/devel/lib:$LD_LIBRARY_PATH"
export PATH="/projects/ros/seniordesign/trackbot/build/devel/bin:$PATH"
export PKG_CONFIG_PATH="/projects/ros/seniordesign/trackbot/build/devel/lib/pkgconfig:$PKG_CONFIG_PATH"
export PYTHONPATH="/projects/ros/seniordesign/trackbot/build/devel/lib/python2.7/dist-packages:$PYTHONPATH"
export ROSLISP_PACKAGE_DIRECTORIES="/projects/ros/seniordesign/trackbot/build/devel/share/common-lisp"
export ROS_PACKAGE_PATH="/projects/ros/seniordesign/trackbot:/opt/ros/groovy/share:/opt/ros/groovy/stacks"
|
cheezium/seniordesign
|
trackbot/build/catkin_generated/setup_cached.sh
|
Shell
|
mit
| 1,239 |
#!/bin/bash
# Wrapper script to bowtie that accepts the same arguments except
# for the first one which has to be the output file.
# Get path to output file and shift it.
OUT=$1
shift
echo "EXECUTING bowtie -S $* >${OUT}"
bowtie -S $* >${OUT}
|
weese/docker-seqan
|
wrappers/bowtie-wrapper.sh
|
Shell
|
mit
| 246 |
#!/bin/bash
#
#
STORM_NAME=$1
WORKDIR=$2
#
cd $WORKDIR
sdat1=`awk '{printf "%s", $1} ' ${STORM_NAME}.datesin `
sdat2=`awk '{printf "%s", $2} ' ${STORM_NAME}.datesin `
edat1=`awk '{printf "%s", $3} ' ${STORM_NAME}.datesin `
edat2=`awk '{printf "%s", $4} ' ${STORM_NAME}.datesin `
str=`echo $sdat1 | cut -c1-6 `
stc=`echo $sdat1 | cut -c7-8 `
st1=`echo $sdat2 | cut -c1-2 `
ste=`echo $sdat2 | cut -c3-6 `
st2=`echo $((st1+1)) `
st3=`echo $((stc+1)) `
if [ $st2 -lt "10" ]
then
stc="0"$st2
else
stc=$st2
fi
if [ $st3 -lt "10" ]
then
sto="0"$st3
else
sto=$st3
fi
startf=$stc$ste
start1=$str$sto
#
cat > ww3_multi.inp << EOF
-------------------------------------------------------------------- $
$ WAVEWATCH III multi-grid model driver input file $
$ -------------------------------------------------------------------- $
$
$ *******************************************************************
$ *** NOTE : This is an example file from the mww3_test_05 script ***
$ *** Unlilke other input example files this one CANNOT ***
$ *** be run as an independent interactive run ***
$ *******************************************************************
$
$ The first input line sets up the general multi-grid model definition
$ by defining the follwing six parameters :
$
$ 1) Number of wave model grids.i ( NRGRD )
$ 2) Number of grids definint input fields. ( NRINP )
$ 3) Flag for using unified point output file. ( UNIPTS )
$ 4) Output server type as in ww3_shel.inp
$ 5) Flag for dedicated process for iunified point output.
$ 6) Flag for grids sharing dedicated output processes.
$
6 0 F 1 T T
$
$ -------------------------------------------------------------------- $
$ Now each actual wave model grid is defined using 13 parameters to be
$ read fom a single line in the file. Each line contains the following
$ parameters
$ 1) Define the grid with the extension of the mod_def file.
$ 2-8) Define the inputs used by the grids with 8 keywords
$ corresponding to the 8 flags defining the input in the
$ input files. Valid keywords are:
$ 'no' : This input is not used.
$ 'native' : This grid has its own input files, e.g. grid
$ grdX (mod_def.grdX) uses ice.grdX.
$ 'MODID' : Take input from the grid identified by
$ MODID. In the example below, all grids get
$ their wind from wind.input (mod_def.input).
$ 9) Rank number of grid (internally sorted and reassigned).
$ 10) Group number (internally reassigned so that different
$ ranks result in different group numbers.
$ 11-12) Define fraction of cumminicator (processes) used for this
$ grid.
$ 13) Flag identifying dumping of boundary data used by this
$ grid. If true, the file nest.MODID is generated.
$
'grd1' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 1 1 0.00 1.00 F
'grd2' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 2 1 0.00 1.00 F
'grd3' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 3 1 0.00 1.00 F
'grd4' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 4 2 0.00 1.00 F
'grd5' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 5 3 0.00 1.00 F
'grd6' 'no' 'no' 'native' 'no' 'no' 'no' 'no' 6 4 0.00 1.00 F
$ 'grd3' 'no' 'no' 'input' 'no' 'no' 'no' 'no' 3 1 0.50 1.00 F
$
$ In this example three grids are used requiring the files
$ mod_def.grdN. All files get ther winds from the grid 'input'
$ defined by mod_def.input, and no other inputs are used. In the lines
$ that are commented out, each grid runs on a part of the pool of
$ processes assigned to the computation.
$
$ -------------------------------------------------------------------- $
$ Starting and ending times for the entire model run
$
$sdat1 $sdat2 $edat1 $edat2
$
$ -------------------------------------------------------------------- $
$ Specific multi-scale model settings (single line).
$ Flag for masking computation in two-way nesting (except at
$ output times).
$ Flag for masking at printout time.
$
F F
$
$ Define output data ------------------------------------------------- $
$
$ Five output types are available (see below). All output types share
$ a similar format for the first input line:
$ - first time in yyyymmdd hhmmss format, output interval (s), and
$ last time in yyyymmdd hhmmss format (all integers).
$ Output is disabled by setting the output interval to 0.
$
$ Type 1 : Fields of mean wave parameters
$ Standard line and line with flags to activate output fields
$ as defined in section 2.4 of the manual. The second line is
$ not supplied if no output is requested.
$ The raw data file is out_grd.ww3,
$ see w3iogo.ftn for additional doc.
$
$
$sdat1 $startf 3600 $edat1 $edat2
$
$ D C W D W I H L T T T C F D S D H T L T S W W P D F C C U C F T T W W W W A U B F T S T J F T U M M P U U
$ E U N T L C S 0 M G P I P P S P P H I S S N T C F F S H A A W C C C C B B E B B X W O U S S S 2 S S
$ P R D V E 2 1 E R R P P P P P P F R D X D T A W W A C F H M R R D B B Y O C S S S C S 1 2
F F T F F F T F F T F F T T T F T T F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F F
$
$
$sdat1 $startf 0 $edat1 $edat2
$
$
$sdat1 $startf 0 $edat1 $edat2
$
$
$ Type 4 : Restart files (no additional data required).
$ The data file is restartN.ww3, see
$ w3iors.ftn for additional doc.
$
$edat1 $edat2 1 $edat1 $edat2
$
$ Type 5 : Boundary data (no additional data required).
$ The data file is nestN.ww3, see
$ w3iobp.ftn for additional doc.
$
$sdat1 $startf 0 $edat1 $edat2
$
$sdat1 $startf 0 $edat1 $edat2
$
$ -------------------------------------------------------------------- $
$ Output requests per grid and type to overwrite general setup
$ as defined above. First record per set is the grid name MODID
$ and the output type number. Then follows the standard time string,
$ and conventional data as per output type. In mww3_test_05 this is
$ not used. Below, one example generating partitioning output for
$ the inner grid is included but commented out.
$
'grd1' 2
$sdat1 $startf 3600 $edat1 $edat2
-72.9697 34.89667 '41001'
-75.2406 32.29500 '41002'
-79.0994 32.51000 '41004'
-77.3817 29.30190 '41006'
-81.0800 30.73000 '41008'
-80.1833 28.50190 '41009'
-78.5331 28.88000 '41010'
-80.5333 30.04140 '41012'
-77.7431 33.43640 '41013'
-76.5217 24.63667 '41016'
-75.4019 35.00610 '41025'
-77.2794 34.47610 '41035'
-76.9525 34.21080 '41036'
-70.8700 23.86690 '41046'
-71.4911 27.46940 '41047'
-69.6489 31.97833 '41048'
-63.0000 27.50000 '41049'
-73.6489 38.36833 '44001'
-70.6433 38.50000 '44004'
-68.5600 42.65033 '44005'
-70.0900 43.53000 '44007'
-69.4275 40.50000 '44008'
-74.7000 38.45000 '44009'
-66.5550 41.06000 '44011'
-74.5800 38.79000 '44012'
-74.8336 36.58310 '44014'
-73.6156 37.11280 '44015'
-72.0478 40.69220 '44017'
-69.2944 41.25860 '44018'
-70.1864 41.44310 '44020'
-74.3911 37.53533 '44023'
-73.1667 40.25033 '44025'
-67.3144 44.27250 '44027'
-73.7028 40.36940 '44065'
-72.6006 39.58310 '44066'
-71.0036 41.39250 '44070'
-53.0386 14.48033 '41040'
-45.9967 14.50667 '41041'
-65.0136 20.98890 '41043'
-58.6950 21.65190 '41044'
-70.9939 23.99866 '41046'
-71.4911 27.46940 '41047'
-69.6489 31.97833 '41048'
-63.0000 27.50000 '41049'
-51.0000 45.00000 '44131'
-61.1300 41.19000 '44137'
-53.6200 44.25000 '44138'
-57.3500 44.32000 '44139'
-50.6100 42.73000 '44140'
-56.1300 42.12000 '44141'
-49.9800 45.89000 '44143'
0.E3 0.E3 'STOPSTRING'
$
'grd2' 2
$sdat1 $startf 3600 $edat1 $edat2
-72.9697 34.89667 '41001'
-75.2406 32.29500 '41002'
-79.0994 32.51000 '41004'
-77.3817 29.30190 '41006'
-81.0800 30.73000 '41008'
-80.1833 28.50190 '41009'
-78.5331 28.88000 '41010'
-80.5333 30.04140 '41012'
-77.7431 33.43640 '41013'
-76.5217 24.63667 '41016'
-75.4019 35.00610 '41025'
-76.9525 34.21080 '41036'
-70.8700 23.86690 '41046'
-71.4911 27.46940 '41047'
-69.6489 31.97833 '41048'
-63.0000 27.50000 '41049'
-73.6489 38.36833 '44001'
-70.6433 38.50000 '44004'
-68.5600 42.65033 '44005'
-70.0900 43.53000 '44007'
-69.4275 40.50000 '44008'
-74.7000 38.45000 '44009'
-66.5550 41.06000 '44011'
-74.5800 38.79000 '44012'
-70.7800 42.38000 '44013'
-74.8336 36.58310 '44014'
-73.6156 37.11280 '44015'
-72.0478 40.69220 '44017'
-69.2944 41.25860 '44018'
-74.3911 37.53533 '44023'
-73.1667 40.25033 '44025'
-67.3144 44.27250 '44027'
-73.7028 40.36940 '44065'
-72.6006 39.58310 '44066'
-81.2925 30.70921 '41112'
-75.5914 36.25492 '44100'
-75.7200 36.91519 '44099'
-71.1260 40.96861 '44097'
-70.1688 42.79779 '44098'
-61.1300 41.19000 '44137'
-57.3500 44.32000 '44139'
-56.1300 42.12000 '44141'
0.E3 0.E3 'STOPSTRING'
$
'grd3' 2
$sdat1 $startf 3600 $edat1 $edat2
-67.2500 44.50000 '63001'
-67.3333 44.41667 '63002'
-67.4166 44.33333 '63003'
-67.5000 44.33333 '63004'
-67.5833 44.33333 '63005'
-67.6666 44.33333 '63006'
-67.7500 44.33333 '63007'
-67.8333 44.25000 '63008'
-67.9166 44.25000 '63009'
-68.0000 44.16667 '63010'
-68.0833 44.16667 '63011'
-68.1666 44.16667 '63012'
-68.1666 44.08333 '63013'
-68.2500 44.00000 '63014'
-68.3333 44.00000 '63015'
-68.4166 44.00000 '63016'
-68.5000 43.91667 '63017'
-68.5833 43.91667 '63018'
-68.6666 43.91667 '63019'
-68.7500 43.91667 '63020'
-68.8333 43.91667 '63021'
-68.9166 43.91667 '63022'
-69.0000 43.91667 '63023'
-69.0833 43.83333 '63024'
-69.1666 43.75000 '63025'
-69.2500 43.75000 '63026'
-69.3333 43.75000 '63027'
-69.4166 43.66667 '63028'
-69.5000 43.66667 '63029'
-69.5833 43.66667 '63030'
-69.6666 43.58333 '63031'
-69.7500 43.58333 '63032'
-69.8333 43.58333 '63033'
-69.9166 43.58333 '63034'
-70.0000 43.58333 '63035'
-70.0000 43.50000 '63036'
-70.0833 43.41667 '63037'
-70.1666 43.41667 '63038'
-70.2500 43.33333 '63039'
-70.3333 43.25000 '63040'
-70.4166 43.16667 '63041'
-70.4167 43.08333 '63042'
-70.5000 43.00000 '63043'
-70.5833 42.91667 '63044'
-70.5833 42.83333 '63045'
-70.5000 42.83333 '63046'
-70.4166 42.75000 '63047'
-70.4166 42.66667 '63048'
-70.4166 42.58333 '63049'
-70.5000 42.50000 '63050'
-70.5833 42.41667 '63051'
-70.6666 42.41667 '63052'
-70.5833 42.33333 '63053'
-70.0000 42.25000 '63054'
-70.0833 42.25000 '63055'
-70.1666 42.25000 '63056'
-70.2500 42.25000 '63057'
-70.3333 42.25000 '63058'
-70.4166 42.25000 '63059'
-70.5000 42.25000 '63060'
-69.9166 42.16667 '63061'
-69.8333 42.08333 '63062'
-69.8333 42.00000 '63063'
-69.7500 41.91667 '63064'
-69.7500 41.83333 '63065'
-69.7500 41.75000 '63066'
-69.7500 41.66667 '63067'
-69.7500 41.58333 '63068'
-69.6666 41.50000 '63069'
-69.7500 41.50000 '63070'
-69.6666 41.41667 '63071'
-69.6666 41.33333 '63072'
-69.6666 41.25000 '63073'
-71.0000 41.25000 '63074'
-71.0833 41.25000 '63075'
-71.1666 41.25000 '63076'
-71.2500 41.25000 '63077'
-71.3333 41.25000 '63078'
-71.4166 41.25000 '63079'
-69.6666 41.16667 '63080'
-70.3333 41.16667 '63081'
-70.4166 41.16667 '63082'
-70.5000 41.16667 '63083'
-70.5833 41.16667 '63084'
-70.6666 41.16667 '63085'
-70.7500 41.16667 '63086'
-70.8333 41.16667 '63087'
-70.9166 41.16667 '63088'
-71.4166 41.16667 '63089'
-69.6666 41.08333 '63090'
-70.0000 41.08333 '63091'
-70.0833 41.08333 '63092'
-70.1666 41.08333 '63093'
-70.2500 41.08333 '63094'
-71.4166 41.08333 '63095'
-69.6666 41.00000 '63096'
-69.7500 41.00000 '63097'
-69.8333 41.00000 '63098'
-69.9166 41.00000 '63099'
-71.5833 41.00000 '63100'
-71.6666 41.00000 '63101'
-71.8333 40.91667 '63102'
-71.9166 40.83333 '63103'
-72.0000 40.83333 '63104'
-72.0833 40.83333 '63105'
-72.1666 40.75000 '63106'
-72.2500 40.75000 '63107'
-72.3333 40.75000 '63108'
-72.4166 40.66667 '63109'
-72.5000 40.66667 '63110'
-72.5833 40.66667 '63111'
-72.6666 40.58333 '63112'
-72.7500 40.58333 '63113'
-72.8333 40.58333 '63114'
-72.9166 40.50000 '63115'
-73.0000 40.50000 '63116'
-73.0833 40.50000 '63117'
-73.1666 40.50000 '63118'
-73.2500 40.50000 '63119'
-73.3333 40.50000 '63120'
-73.4166 40.41667 '63121'
-73.5000 40.41667 '63122'
-73.5833 40.41667 '63123'
-73.6666 40.41667 '63124'
-73.7500 40.41667 '63125'
-73.8333 40.41667 '63126'
-73.8333 40.33333 '63127'
-73.8333 40.25000 '63128'
-73.8333 40.16667 '63129'
-73.8333 40.08333 '63130'
-73.8333 40.00000 '63131'
-68.5600 41.34967 '44005'
-70.0900 42.47000 '44007'
-69.4275 40.50000 '44008'
-66.5550 40.94000 '44011'
-70.8003 42.38000 '44013'
-72.0478 40.69220 '44017'
-69.2944 41.25860 '44018'
-73.1667 40.25033 '44025'
-67.3144 44.27250 '44027'
-73.7028 40.36940 '44065'
-71.0036 41.39250 '44070'
-71.1261 40.96862 '44097'
-70.1688 42.79780 '44098'
0.E3 0.E3 'STOPSTRING'
$
'grd4' 2
$sdat1 $startf 3600 $edat1 $edat2
-75.3333 35.16667 '63259'
-75.4166 35.08333 '63260'
-75.5000 35.08333 '63261'
-75.5833 35.08333 '63262'
-75.6666 35.00000 '63263'
-75.7500 35.00000 '63264'
-75.8333 35.00000 '63265'
-75.9166 35.00000 '63266'
-75.9166 34.91667 '63267'
-76.0000 34.83333 '63268'
-76.0833 34.83333 '63269'
-76.1666 34.75000 '63270'
-76.2500 34.66667 '63271'
-76.3333 34.58333 '63272'
-76.3333 34.50000 '63273'
-76.6666 34.50000 '63274'
-76.7500 34.50000 '63275'
-76.8333 34.50000 '63276'
-76.9166 34.50000 '63277'
-77.0000 34.50000 '63278'
-77.0833 34.50000 '63279'
-76.3333 34.41667 '63280'
-76.5833 34.41667 '63281'
-77.0833 34.41667 '63282'
-77.1666 34.41667 '63283'
-77.2500 34.41667 '63284'
-76.3333 34.33333 '63285'
-76.5000 34.33333 '63286'
-76.6666 34.33333 '63287'
-76.8333 34.33333 '63288'
-77.0000 34.33333 '63289'
-77.2500 34.33333 '63290'
-77.3333 34.33333 '63291'
-77.4166 34.33333 '63292'
-77.3333 34.25000 '63293'
-77.5000 34.25000 '63294'
-77.4166 34.16667 '63295'
-77.5833 34.16667 '63296'
-77.5000 34.08333 '63297'
-77.6666 34.08333 '63298'
-77.5833 34.00000 '63299'
-77.6666 34.00000 '63300'
-77.6666 33.91667 '63301'
-77.7500 33.91667 '63302'
-77.6666 33.83333 '63303'
-77.7500 33.83333 '63304'
-77.6666 33.75000 '63305'
-77.7500 33.75000 '63306'
-77.6666 33.66667 '63307'
-77.7500 33.66667 '63308'
-78.1666 33.66667 '63309'
-78.2500 33.66667 '63310'
-78.3333 33.66667 '63311'
-78.4166 33.66667 '63312'
-78.5000 33.66667 '63313'
-78.5833 33.66667 '63314'
-77.5833 33.58333 '63315'
-77.8333 33.58333 '63316'
-78.0000 33.58333 '63317'
-78.5833 33.58333 '63318'
-77.5833 33.50000 '63319'
-77.7500 33.50000 '63320'
-77.9166 33.50000 '63321'
-78.0833 33.50000 '63322'
-78.2500 33.50000 '63323'
-78.4166 33.50000 '63324'
-78.6666 33.50000 '63325'
-78.5000 33.41667 '63326'
-78.7500 33.41667 '63327'
-78.5833 33.33333 '63328'
-78.7500 33.33333 '63329'
-78.5833 33.25000 '63330'
-78.8333 33.25000 '63331'
-78.6666 33.16667 '63332'
-78.8333 33.16667 '63333'
-78.7500 33.08333 '63334'
-78.9166 33.00000 '63335'
-79.0833 33.00000 '63336'
-79.0833 32.91667 '63337'
-79.1666 32.91667 '63338'
-79.1666 32.83333 '63339'
-79.2500 32.83333 '63340'
-79.3333 32.83333 '63341'
-79.2500 32.75000 '63342'
-79.4166 32.75000 '63343'
-79.3333 32.66667 '63344'
-79.5000 32.66667 '63345'
-79.5833 32.66667 '63346'
-79.4166 32.58333 '63347'
-79.6666 32.58333 '63348'
-79.5833 32.50000 '63349'
-79.8333 32.50000 '63350'
-79.6666 32.41667 '63351'
-79.9166 32.41667 '63352'
-80.0000 32.41667 '63353'
-79.7500 32.33333 '63354'
-79.8333 32.33333 '63355'
-80.0833 32.33333 '63356'
-80.1666 32.33333 '63357'
-79.9166 32.25000 '63358'
-80.0000 32.25000 '63359'
-80.2500 32.25000 '63360'
-80.0833 32.16667 '63361'
-80.3333 32.16667 '63362'
-80.1666 32.08333 '63363'
-80.2500 32.08333 '63364'
-80.4166 32.08333 '63365'
-80.5000 32.08333 '63366'
-80.3333 32.00000 '63367'
-80.5833 32.00000 '63368'
-75.3333 35.00000 '63502'
-75.5833 34.75000 '63503'
-75.8333 34.50000 '63504'
-76.0000 34.25000 '63505'
-76.2500 34.00000 '63506'
-76.5833 33.75000 '63507'
-76.9166 33.50000 '63508'
-77.2500 33.16667 '63509'
-77.9166 33.00000 '63510'
-78.2500 32.75000 '63511'
-78.6666 32.50000 '63512'
-79.0833 32.16667 '63513'
-79.0994 32.51000 '41004'
-77.7431 33.43640 '41013'
-75.4019 35.00610 '41025'
-77.2794 34.47610 '41035'
-76.9525 34.21080 '41036'
-73.6489 37.63167 '44001'
-74.7000 37.55000 '44009'
-74.5800 38.78333 '44012'
-74.8336 35.41690 '44014'
-73.6156 36.88720 '44015'
-74.3911 36.46467 '44023'
-73.7028 40.36940 '44065'
-72.6006 39.58310 '44066'
-75.7140 36.29988 '44056'
-77.7087 34.14148 '41110'
-75.5914 36.25492 '44100'
-75.7201 36.91520 '44099'
0.0 0.0 'STOPSTRING'
$
'grd5' 2
$sdat1 $startf 3600 $edat1 $edat2
-79.5833 32.50000 '63349'
-79.8333 32.50000 '63350'
-79.6666 32.41667 '63351'
-79.9166 32.41667 '63352'
-80.0000 32.41667 '63353'
-79.7500 32.33333 '63354'
-79.8333 32.33333 '63355'
-80.0833 32.33333 '63356'
-80.1666 32.33333 '63357'
-79.9166 32.25000 '63358'
-80.0000 32.25000 '63359'
-80.2500 32.25000 '63360'
-80.0833 32.16667 '63361'
-80.3333 32.16667 '63362'
-80.1666 32.08333 '63363'
-80.2500 32.08333 '63364'
-80.4166 32.08333 '63365'
-80.5000 32.08333 '63366'
-80.3333 32.00000 '63367'
-80.5833 32.00000 '63368'
-80.4166 31.91667 '63369'
-80.5833 31.91667 '63370'
-80.4166 31.83333 '63371'
-80.6666 31.83333 '63372'
-80.4166 31.75000 '63373'
-80.7500 31.75000 '63374'
-80.5000 31.66667 '63375'
-80.5833 31.66667 '63376'
-80.8333 31.66667 '63377'
-80.5833 31.58333 '63378'
-80.9166 31.58333 '63379'
-80.6666 31.50000 '63380'
-80.9166 31.50000 '63381'
-80.7500 31.41667 '63382'
-80.9166 31.41667 '63383'
-80.8333 31.33333 '63384'
-80.9166 31.33333 '63385'
-80.8333 31.25000 '63386'
-81.0000 31.25000 '63387'
-80.8333 31.16667 '63388'
-81.0000 31.16667 '63389'
-80.8333 31.08333 '63390'
-81.0833 31.08333 '63391'
-80.9166 31.00000 '63392'
-81.0833 31.00000 '63393'
-80.9166 30.91667 '63394'
-81.0833 30.91667 '63395'
-80.9166 30.83333 '63396'
-81.1666 30.83333 '63397'
-81.0000 30.75000 '63398'
-81.1666 30.75000 '63399'
-81.0000 30.66667 '63400'
-81.2500 30.66667 '63401'
-81.0833 30.58333 '63402'
-81.2500 30.58333 '63403'
-81.0833 30.50000 '63404'
-81.2500 30.50000 '63405'
-81.0833 30.41667 '63406'
-81.2500 30.41667 '63407'
-81.0833 30.33333 '63408'
-81.2500 30.33333 '63409'
-81.0833 30.25000 '63410'
-81.1666 30.25000 '63411'
-81.0833 30.16667 '63412'
-81.1666 30.16667 '63413'
-81.0833 30.08333 '63414'
-81.1666 30.08333 '63415'
-81.0833 30.00000 '63416'
-81.0833 29.91667 '63417'
-81.0833 29.83333 '63418'
-81.0000 29.75000 '63419'
-80.9166 29.66667 '63420'
-81.0000 29.66667 '63421'
-81.0000 29.58333 '63422'
-80.9166 29.50000 '63423'
-80.8333 29.41667 '63424'
-80.9166 29.41667 '63425'
-80.8333 29.33333 '63426'
-80.8333 29.25000 '63427'
-80.7500 29.16667 '63428'
-80.6666 29.08333 '63429'
-80.5833 29.00000 '63430'
-80.5833 28.91667 '63431'
-80.5000 28.83333 '63432'
-80.5000 28.75000 '63433'
-80.4166 28.66667 '63434'
-80.3333 28.58333 '63435'
-80.3333 28.50000 '63436'
-80.3333 28.41667 '63437'
-80.3333 28.33333 '63438'
-80.4166 28.33333 '63439'
-80.3333 28.25000 '63440'
-80.4166 28.25000 '63441'
-80.4166 28.16667 '63442'
-80.3333 28.08333 '63443'
-80.3333 28.00000 '63444'
-78.6666 32.50000 '63512'
-79.0833 32.16667 '63513'
-79.4166 31.83333 '63514'
-79.6666 31.41667 '63515'
-79.9166 31.00000 '63516'
-80.1666 30.50000 '63517'
-80.2500 30.00000 '63518'
-80.1666 29.50000 '63519'
-80.0833 29.00000 '63520'
-80.0000 28.50000 '63521'
-79.9166 28.00000 '63522'
-75.2406 31.70500 '41002'
-77.3817 28.69810 '41006'
-81.0800 31.40200 '41008'
-80.2333 28.50000 '41009'
-78.5331 28.88000 '41010'
-80.5333 30.04140 '41012'
-80.5333 28.40002 '41113'
-81.2926 30.70922 '41112'
0.0 0.0 'STOPSTRING'
$
'grd6' 2
$sdat1 $startf 3600 $edat1 $edat2
-80.3333 28.50000 63436
-80.3333 28.41667 63437
-80.3333 28.33333 63438
-80.4166 28.33333 63439
-80.3333 28.25000 63440
-80.4166 28.25000 63441
-80.4166 28.16667 63442
-80.3333 28.08333 63443
-80.3333 28.00000 63444
-80.2500 27.91667 63445
-80.2500 27.83333 63446
-80.1666 27.75000 63447
-80.1666 27.66667 63448
-80.1666 27.58333 63449
-80.0833 27.50000 63450
-80.0833 27.41667 63451
-80.0000 27.33333 63452
-80.0000 27.25000 63453
-80.0000 27.16667 63454
-79.9166 27.08333 63455
-79.9166 27.00000 63456
-79.9166 26.91667 63457
-79.8333 26.83333 63458
-79.8333 26.75000 63459
-79.8333 26.66667 63460
-79.8333 26.58333 63461
-79.9166 26.50000 63462
-79.9166 26.41667 63463
-79.9166 26.33333 63464
-79.9166 26.25000 63465
-79.9166 26.16667 63466
-79.9166 26.08333 63467
-79.9166 26.00000 63468
-79.9166 25.91667 63469
-79.9166 25.83333 63470
-79.9166 25.75000 63471
-80.0000 25.66667 63472
-80.0000 25.58333 63473
-80.0000 25.50000 63474
-80.0000 25.41667 63475
-80.0833 25.33333 63476
-80.0833 25.25000 63477
-80.1666 25.16667 63478
-80.1666 25.08333 63479
-80.2500 25.00000 63480
-80.3333 24.91667 63481
-80.4166 24.83333 63482
-80.5000 24.75000 63483
-80.5833 24.75000 63484
-80.6666 24.66667 63485
-80.7500 24.66667 63486
-80.8333 24.58333 63487
-80.9166 24.58333 63488
-81.0000 24.58333 63489
-81.0833 24.50000 63490
-81.1666 24.50000 63491
-81.2500 24.50000 63492
-81.3333 24.50000 63493
-81.4166 24.50000 63494
-81.5000 24.41667 63495
-81.5833 24.41667 63496
-81.6666 24.41667 63497
-81.7500 24.41667 63498
-81.8333 24.41667 63499
-81.9166 24.41667 63500
-82.0000 24.41667 63501
-80.0000 28.50000 63521
-79.9166 28.00000 63522
-79.9166 27.50000 63523
-80.2333 28.50000 41009
-80.2196 27.55256 44114
-80.5333 28.40002 41113
0.0 0.0 'STOPSTRING'
$ -------------------------------------------------------------------- $
$ Mandatory end of outpout requests per grid, identified by output
$ type set to 0.
$
'the_end' 0
$
$ -------------------------------------------------------------------- $
$ Moving grid data as in ww3_hel.inp. All grids will use same data.
$
'STP'
$
$ -------------------------------------------------------------------- $
$ End of input file $
$ -------------------------------------------------------------------- $
EOF
|
CHL-WIS/WIS_ATL
|
shells/genscript_multi_full.sh
|
Shell
|
gpl-2.0
| 23,739 |
# ----------------------------------------------------------------------------
# Verifica se a data passada por parâmetro é um feriado ou não.
# Caso não seja passado nenhuma data é pego a data atual.
# Pode-se configurar a variável ZZFERIADO para os feriados regionais.
# O formato é o dd/mm:descrição, por exemplo: 20/11:Consciência negra.
# Uso: zzferiado -l [ano] | [data]
# Ex.: zzferiado 25/12/2008
# zzferiado -l
# zzferiado -l 2010
#
# Autor: Marcell S. Martini <marcellmartini (a) gmail com>
# Desde: 2008-11-21
# Versão: 6
# Licença: GPLv2
# Requisitos: zzcarnaval zzcorpuschristi zzdiadasemana zzsextapaixao zzsemacento
# Tags: data
# ----------------------------------------------------------------------------
zzferiado ()
{
zzzz -h feriado "$1" && return
local feriados carnaval corpuschristi
local hoje data sextapaixao ano listar
local dia diasemana descricao linha
hoje=$(date '+%d/%m/%Y')
# Verifica se foi passado o parâmetro -l
if test "$1" = "-l"; then
# Se não for passado $2 pega o ano atual
ano=${2:-$(basename $hoje)}
# Seta a flag listar
listar=1
# Teste da variável ano
zztool -e testa_ano $ano || return 1
else
# Se não for passada a data é pega a data de hoje
data=${1:-$hoje}
# Verifica se a data é valida
zztool -e testa_data "$data" || return 1
# Uma coisa interessante, como data pode ser usada com /(20/11/2008)
# podemos usar o basename e dirname para pegar o que quisermos
# Ex.: dirname 25/12/2008 -> 25/12
# basename 25/12/2008 -> 2008
#
# Pega só o dia e o mes no formato: dd/mm
data=$(dirname $data)
ano=$(basename ${1:-$hoje})
fi
# Para feriados Estaduais ou regionais Existe a variável de
# ambiente ZZFERIADO que pode ser configurada no $HOME/.bashrc e
# colocar as datas com dd/mm:descricao
carnaval=$(dirname $(zzcarnaval $ano ) )
sextapaixao=$(dirname $(zzsextapaixao $ano ) )
corpuschristi=$(dirname $(zzcorpuschristi $ano ) )
feriados="01/01:Confraternização Universal $carnaval:Carnaval $sextapaixao:Sexta-ferida da Paixao 21/04:Tiradentes 01/05:Dia do Trabalho $corpuschristi:Corpu Christi 07/09:Independência do Brasil 12/10:Nossa Sra. Aparecida 02/11:Finados 15/11:Proclamação da República 25/12:Natal $ZZFERIADO"
# Verifica se lista ou nao, caso negativo verifica se a data escolhida é feriado
if test "$listar" = "1"; then
# Pega os dados, coloca 1 por linha, inverte dd/mm para mm/dd,
# ordena, inverte mm/dd para dd/mm
echo $feriados |
sed 's# \([0-3]\)#~\1#g' |
tr '~' '\n' |
sed 's#^\(..\)/\(..\)#\2/\1#g' |
sort -n |
sed 's#^\(..\)/\(..\)#\2/\1#g' |
while read linha; do
dia=$(echo $linha | cut -d: -f1)
diasemana=$(zzdiadasemana $dia/$ano | zzsemacento)
descricao=$(echo $linha | cut -d: -f2)
printf "%s %-15s %s\n" "$dia" "$diasemana" "$descricao" |
sed 's/terca-feira/terça-feira/ ; s/ sabado / sábado /'
# ^ Estou tirando os acentos do dia da semana e depois recolocando
# pois o printf não lida direito com acentos. O %-15s não fica
# exatamente com 15 caracteres quando há acentos.
done
else
# Verifica se a data está dentro da lista de feriados
# e imprime o resultado
if zztool grep_var "$data" "$feriados"; then
echo "É feriado: $data/$ano"
else
echo "Não é feriado: $data/$ano"
fi
fi
return 0
}
|
gmgall/funcoeszz
|
zz/zzferiado.sh
|
Shell
|
gpl-2.0
| 3,345 |
#!/bin/bash
# This script prepares the CI build for running
echo "Configuring backend"
sed -i -e "s|my \$hostname = .*$|my \$hostname = 'localhost';|" \
-e "s|our \$bsuser = 'obsrun';|our \$bsuser = 'jenkins';|" \
-e "s|our \$bsgroup = 'obsrun';|our \$bsgroup = 'jenkins';|" src/backend/BSConfig.pm.template
cp src/backend/BSConfig.pm.template src/backend/BSConfig.pm
chmod a+x src/api/script/start_test_backend
pushd src/api
echo "Creating database"
mysql -e 'create database ci_api_test;'
echo "Configuring database"
cp config/database.yml.example config/database.yml
sed -e 's,password:.*,password:,' -i config/database.yml
sed -i "s|database: api|database: ci_api|" config/database.yml
echo "Configuring frontend"
cp config/options.yml.example config/options.yml
cp config/thinking_sphinx.yml.example config/thinking_sphinx.yml
echo "Initialize database"
bundle exec rake db:drop db:create db:setup --trace
# Stuff
# travis rvm can not deal with our extended executable names
sed -i 1,1s,\.ruby2\.3,, {script,bin}/*
# Clear temp data
rm -rf log/* tmp/cache tmp/sessions tmp/sockets
popd
echo "Build apidocs"
pushd docs/api
make
popd
|
shyukri/open-build-service
|
dist/ci/obs_testsuite_travis_before.sh
|
Shell
|
gpl-2.0
| 1,158 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2011-2017 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Tup should parse correctly if the last newline is missing.
. ./tup.sh
cat > ok.c << HERE
#include <stdio.h>
int main(void)
{
printf(": |> touch foo |> foo");
return 0;
}
HERE
gcc ok.c -o ok.exe
./ok.exe > Tupfile
tup touch Tupfile ok.c ok.exe
update
check_exist foo
cat > ok.c << HERE
#include <stdio.h>
int main(void)
{
/* The six backslashes here becomes 3 in the C program, 2 of which
* become a backslash in the Tupfile, and 1 of which is used with
* the newline.
*/
printf(": |> \\\\\\ntouch bar |> bar");
return 0;
}
HERE
gcc ok.c -o ok.exe
./ok.exe > Tupfile
tup touch Tupfile
update
check_exist bar
eotup
|
fasterthanlime/tup-fuseless
|
test/t2083-missing-newline.sh
|
Shell
|
gpl-2.0
| 1,383 |
#!/bin/sh
#
# Copyright (c) 2005, 2006 Junio C Hamano
SUBDIRECTORY_OK=Yes
OPTIONS_KEEPDASHDASH=
OPTIONS_STUCKLONG=t
OPTIONS_SPEC="\
git am [options] [(<mbox>|<Maildir>)...]
git am [options] (--continue | --skip | --abort)
--
i,interactive run interactively
b,binary* (historical option -- no-op)
3,3way allow fall back on 3way merging if needed
q,quiet be quiet
s,signoff add a Signed-off-by line to the commit message
u,utf8 recode into utf8 (default)
k,keep pass -k flag to git-mailinfo
keep-non-patch pass -b flag to git-mailinfo
m,message-id pass -m flag to git-mailinfo
keep-cr pass --keep-cr flag to git-mailsplit for mbox format
no-keep-cr do not pass --keep-cr flag to git-mailsplit independent of am.keepcr
c,scissors strip everything before a scissors line
whitespace= pass it through git-apply
ignore-space-change pass it through git-apply
ignore-whitespace pass it through git-apply
directory= pass it through git-apply
exclude= pass it through git-apply
include= pass it through git-apply
C= pass it through git-apply
p= pass it through git-apply
patch-format= format the patch(es) are in
reject pass it through git-apply
resolvemsg= override error message when patch failure occurs
continue continue applying patches after resolving a conflict
r,resolved synonyms for --continue
skip skip the current patch
abort restore the original branch and abort the patching operation.
committer-date-is-author-date lie about committer date
ignore-date use current timestamp for author date
rerere-autoupdate update the index with reused conflict resolution if possible
S,gpg-sign? GPG-sign commits
rebasing* (internal use for git-rebase)"
. git-sh-setup
. git-sh-i18n
prefix=$(git rev-parse --show-prefix)
set_reflog_action am
require_work_tree
cd_to_toplevel
git var GIT_COMMITTER_IDENT >/dev/null ||
die "$(gettext "You need to set your committer info first")"
if git rev-parse --verify -q HEAD >/dev/null
then
HAS_HEAD=yes
else
HAS_HEAD=
fi
cmdline="git am"
if test '' != "$interactive"
then
cmdline="$cmdline -i"
fi
if test '' != "$threeway"
then
cmdline="$cmdline -3"
fi
empty_tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
sq () {
git rev-parse --sq-quote "$@"
}
stop_here () {
echo "$1" >"$dotest/next"
git rev-parse --verify -q HEAD >"$dotest/abort-safety"
exit 1
}
safe_to_abort () {
if test -f "$dotest/dirtyindex"
then
return 1
fi
if ! test -f "$dotest/abort-safety"
then
return 0
fi
abort_safety=$(cat "$dotest/abort-safety")
if test "z$(git rev-parse --verify -q HEAD)" = "z$abort_safety"
then
return 0
fi
gettextln "You seem to have moved HEAD since the last 'am' failure.
Not rewinding to ORIG_HEAD" >&2
return 1
}
stop_here_user_resolve () {
if [ -n "$resolvemsg" ]; then
printf '%s\n' "$resolvemsg"
stop_here $1
fi
eval_gettextln "When you have resolved this problem, run \"\$cmdline --continue\".
If you prefer to skip this patch, run \"\$cmdline --skip\" instead.
To restore the original branch and stop patching, run \"\$cmdline --abort\"."
stop_here $1
}
go_next () {
rm -f "$dotest/$msgnum" "$dotest/msg" "$dotest/msg-clean" \
"$dotest/patch" "$dotest/info"
echo "$next" >"$dotest/next"
this=$next
}
cannot_fallback () {
echo "$1"
gettextln "Cannot fall back to three-way merge."
exit 1
}
fall_back_3way () {
O_OBJECT=$(cd "$GIT_OBJECT_DIRECTORY" && pwd)
rm -fr "$dotest"/patch-merge-*
mkdir "$dotest/patch-merge-tmp-dir"
# First see if the patch records the index info that we can use.
cmd="git apply $git_apply_opt --build-fake-ancestor" &&
cmd="$cmd "'"$dotest/patch-merge-tmp-index" "$dotest/patch"' &&
eval "$cmd" &&
GIT_INDEX_FILE="$dotest/patch-merge-tmp-index" \
git write-tree >"$dotest/patch-merge-base+" ||
cannot_fallback "$(gettext "Repository lacks necessary blobs to fall back on 3-way merge.")"
say "$(gettext "Using index info to reconstruct a base tree...")"
cmd='GIT_INDEX_FILE="$dotest/patch-merge-tmp-index"'
if test -z "$GIT_QUIET"
then
eval "$cmd git diff-index --cached --diff-filter=AM --name-status HEAD"
fi
cmd="$cmd git apply --cached $git_apply_opt"' <"$dotest/patch"'
if eval "$cmd"
then
mv "$dotest/patch-merge-base+" "$dotest/patch-merge-base"
mv "$dotest/patch-merge-tmp-index" "$dotest/patch-merge-index"
else
cannot_fallback "$(gettext "Did you hand edit your patch?
It does not apply to blobs recorded in its index.")"
fi
test -f "$dotest/patch-merge-index" &&
his_tree=$(GIT_INDEX_FILE="$dotest/patch-merge-index" git write-tree) &&
orig_tree=$(cat "$dotest/patch-merge-base") &&
rm -fr "$dotest"/patch-merge-* || exit 1
say "$(gettext "Falling back to patching base and 3-way merge...")"
# This is not so wrong. Depending on which base we picked,
# orig_tree may be wildly different from ours, but his_tree
# has the same set of wildly different changes in parts the
# patch did not touch, so recursive ends up canceling them,
# saying that we reverted all those changes.
eval GITHEAD_$his_tree='"$FIRSTLINE"'
export GITHEAD_$his_tree
if test -n "$GIT_QUIET"
then
GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
fi
our_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree)
git-merge-recursive $orig_tree -- $our_tree $his_tree || {
git rerere $allow_rerere_autoupdate
die "$(gettext "Failed to merge in the changes.")"
}
unset GITHEAD_$his_tree
}
clean_abort () {
test $# = 0 || cat >&2 <<EOF
$@
EOF
rm -fr "$dotest"
exit 1
}
patch_format=
check_patch_format () {
# early return if patch_format was set from the command line
if test -n "$patch_format"
then
return 0
fi
# we default to mbox format if input is from stdin and for
# directories
if test $# = 0 || test "x$1" = "x-" || test -d "$1"
then
patch_format=mbox
return 0
fi
# otherwise, check the first few non-blank lines of the first
# patch to try to detect its format
{
# Start from first line containing non-whitespace
l1=
while test -z "$l1"
do
read l1 || break
done
read l2
read l3
case "$l1" in
"From "* | "From: "*)
patch_format=mbox
;;
'# This series applies on GIT commit'*)
patch_format=stgit-series
;;
"# HG changeset patch")
patch_format=hg
;;
*)
# if the second line is empty and the third is
# a From, Author or Date entry, this is very
# likely an StGIT patch
case "$l2,$l3" in
,"From: "* | ,"Author: "* | ,"Date: "*)
patch_format=stgit
;;
*)
;;
esac
;;
esac
if test -z "$patch_format" &&
test -n "$l1" &&
test -n "$l2" &&
test -n "$l3"
then
# This begins with three non-empty lines. Is this a
# piece of e-mail a-la RFC2822? Grab all the headers,
# discarding the indented remainder of folded lines,
# and see if it looks like that they all begin with the
# header field names...
tr -d '\015' <"$1" |
sed -n -e '/^$/q' -e '/^[ ]/d' -e p |
sane_egrep -v '^[!-9;-~]+:' >/dev/null ||
patch_format=mbox
fi
} < "$1" || clean_abort
}
split_patches () {
case "$patch_format" in
mbox)
if test t = "$keepcr"
then
keep_cr=--keep-cr
else
keep_cr=
fi
git mailsplit -d"$prec" -o"$dotest" -b $keep_cr -- "$@" > "$dotest/last" ||
clean_abort
;;
stgit-series)
if test $# -ne 1
then
clean_abort "$(gettext "Only one StGIT patch series can be applied at once")"
fi
series_dir=$(dirname "$1")
series_file="$1"
shift
{
set x
while read filename
do
set "$@" "$series_dir/$filename"
done
# remove the safety x
shift
# remove the arg coming from the first-line comment
shift
} < "$series_file" || clean_abort
# set the patch format appropriately
patch_format=stgit
# now handle the actual StGIT patches
split_patches "$@"
;;
stgit)
this=0
test 0 -eq "$#" && set -- -
for stgit in "$@"
do
this=$(expr "$this" + 1)
msgnum=$(printf "%0${prec}d" $this)
# Perl version of StGIT parse_patch. The first nonemptyline
# not starting with Author, From or Date is the
# subject, and the body starts with the next nonempty
# line not starting with Author, From or Date
@@PERL@@ -ne 'BEGIN { $subject = 0 }
if ($subject > 1) { print ; }
elsif (/^\s+$/) { next ; }
elsif (/^Author:/) { s/Author/From/ ; print ;}
elsif (/^(From|Date)/) { print ; }
elsif ($subject) {
$subject = 2 ;
print "\n" ;
print ;
} else {
print "Subject: ", $_ ;
$subject = 1;
}
' -- "$stgit" >"$dotest/$msgnum" || clean_abort
done
echo "$this" > "$dotest/last"
this=
msgnum=
;;
hg)
this=0
test 0 -eq "$#" && set -- -
for hg in "$@"
do
this=$(( $this + 1 ))
msgnum=$(printf "%0${prec}d" $this)
# hg stores changeset metadata in #-commented lines preceding
# the commit message and diff(s). The only metadata we care about
# are the User and Date (Node ID and Parent are hashes which are
# only relevant to the hg repository and thus not useful to us)
# Since we cannot guarantee that the commit message is in
# git-friendly format, we put no Subject: line and just consume
# all of the message as the body
LANG=C LC_ALL=C @@PERL@@ -M'POSIX qw(strftime)' -ne 'BEGIN { $subject = 0 }
if ($subject) { print ; }
elsif (/^\# User /) { s/\# User/From:/ ; print ; }
elsif (/^\# Date /) {
my ($hashsign, $str, $time, $tz) = split ;
$tz_str = sprintf "%+05d", (0-$tz)/36;
print "Date: " .
strftime("%a, %d %b %Y %H:%M:%S ",
gmtime($time-$tz))
. "$tz_str\n";
} elsif (/^\# /) { next ; }
else {
print "\n", $_ ;
$subject = 1;
}
' -- "$hg" >"$dotest/$msgnum" || clean_abort
done
echo "$this" >"$dotest/last"
this=
msgnum=
;;
*)
if test -n "$patch_format"
then
clean_abort "$(eval_gettext "Patch format \$patch_format is not supported.")"
else
clean_abort "$(gettext "Patch format detection failed.")"
fi
;;
esac
}
prec=4
dotest="$GIT_DIR/rebase-apply"
sign= utf8=t keep= keepcr= skip= interactive= resolved= rebasing= abort=
messageid= resolvemsg= resume= scissors= no_inbody_headers=
git_apply_opt=
committer_date_is_author_date=
ignore_date=
allow_rerere_autoupdate=
gpg_sign_opt=
threeway=
if test "$(git config --bool --get am.messageid)" = true
then
messageid=t
fi
if test "$(git config --bool --get am.keepcr)" = true
then
keepcr=t
fi
while test $# != 0
do
case "$1" in
-i|--interactive)
interactive=t ;;
-b|--binary)
gettextln >&2 "The -b/--binary option has been a no-op for long time, and
it will be removed. Please do not use it anymore."
;;
-3|--3way)
threeway=t ;;
-s|--signoff)
sign=t ;;
-u|--utf8)
utf8=t ;; # this is now default
--no-utf8)
utf8= ;;
-m|--message-id)
messageid=t ;;
--no-message-id)
messageid=f ;;
-k|--keep)
keep=t ;;
--keep-non-patch)
keep=b ;;
-c|--scissors)
scissors=t ;;
--no-scissors)
scissors=f ;;
-r|--resolved|--continue)
resolved=t ;;
--skip)
skip=t ;;
--abort)
abort=t ;;
--rebasing)
rebasing=t threeway=t ;;
--resolvemsg=*)
resolvemsg="${1#--resolvemsg=}" ;;
--whitespace=*|--directory=*|--exclude=*|--include=*)
git_apply_opt="$git_apply_opt $(sq "$1")" ;;
-C*|-p*)
git_apply_opt="$git_apply_opt $(sq "$1")" ;;
--patch-format=*)
patch_format="${1#--patch-format=}" ;;
--reject|--ignore-whitespace|--ignore-space-change)
git_apply_opt="$git_apply_opt $1" ;;
--committer-date-is-author-date)
committer_date_is_author_date=t ;;
--ignore-date)
ignore_date=t ;;
--rerere-autoupdate|--no-rerere-autoupdate)
allow_rerere_autoupdate="$1" ;;
-q|--quiet)
GIT_QUIET=t ;;
--keep-cr)
keepcr=t ;;
--no-keep-cr)
keepcr=f ;;
--gpg-sign)
gpg_sign_opt=-S ;;
--gpg-sign=*)
gpg_sign_opt="-S${1#--gpg-sign=}" ;;
--)
shift; break ;;
*)
usage ;;
esac
shift
done
# If the dotest directory exists, but we have finished applying all the
# patches in them, clear it out.
if test -d "$dotest" &&
test -f "$dotest/last" &&
test -f "$dotest/next" &&
last=$(cat "$dotest/last") &&
next=$(cat "$dotest/next") &&
test $# != 0 &&
test "$next" -gt "$last"
then
rm -fr "$dotest"
fi
if test -d "$dotest" && test -f "$dotest/last" && test -f "$dotest/next"
then
case "$#,$skip$resolved$abort" in
0,*t*)
# Explicit resume command and we do not have file, so
# we are happy.
: ;;
0,)
# No file input but without resume parameters; catch
# user error to feed us a patch from standard input
# when there is already $dotest. This is somewhat
# unreliable -- stdin could be /dev/null for example
# and the caller did not intend to feed us a patch but
# wanted to continue unattended.
test -t 0
;;
*)
false
;;
esac ||
die "$(eval_gettext "previous rebase directory \$dotest still exists but mbox given.")"
resume=yes
case "$skip,$abort" in
t,t)
die "$(gettext "Please make up your mind. --skip or --abort?")"
;;
t,)
git rerere clear
head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
git read-tree --reset -u $head_tree $head_tree &&
index_tree=$(git write-tree) &&
git read-tree -m -u $index_tree $head_tree
git read-tree -m $head_tree
;;
,t)
if test -f "$dotest/rebasing"
then
exec git rebase --abort
fi
git rerere clear
if safe_to_abort
then
head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
git read-tree --reset -u $head_tree $head_tree &&
index_tree=$(git write-tree) &&
orig_head=$(git rev-parse --verify -q ORIG_HEAD || echo $empty_tree) &&
git read-tree -m -u $index_tree $orig_head
if git rev-parse --verify -q ORIG_HEAD >/dev/null 2>&1
then
git reset ORIG_HEAD
else
git read-tree $empty_tree
curr_branch=$(git symbolic-ref HEAD 2>/dev/null) &&
git update-ref -d $curr_branch
fi
fi
rm -fr "$dotest"
exit ;;
esac
rm -f "$dotest/dirtyindex"
else
# Possible stray $dotest directory in the independent-run
# case; in the --rebasing case, it is upto the caller
# (git-rebase--am) to take care of stray directories.
if test -d "$dotest" && test -z "$rebasing"
then
case "$skip,$resolved,$abort" in
,,t)
rm -fr "$dotest"
exit 0
;;
*)
die "$(eval_gettext "Stray \$dotest directory found.
Use \"git am --abort\" to remove it.")"
;;
esac
fi
# Make sure we are not given --skip, --continue, or --abort
test "$skip$resolved$abort" = "" ||
die "$(gettext "Resolve operation not in progress, we are not resuming.")"
# Start afresh.
mkdir -p "$dotest" || exit
if test -n "$prefix" && test $# != 0
then
first=t
for arg
do
test -n "$first" && {
set x
first=
}
if is_absolute_path "$arg"
then
set "$@" "$arg"
else
set "$@" "$prefix$arg"
fi
done
shift
fi
check_patch_format "$@"
split_patches "$@"
# -i can and must be given when resuming; everything
# else is kept
echo " $git_apply_opt" >"$dotest/apply-opt"
echo "$threeway" >"$dotest/threeway"
echo "$sign" >"$dotest/sign"
echo "$utf8" >"$dotest/utf8"
echo "$keep" >"$dotest/keep"
echo "$messageid" >"$dotest/messageid"
echo "$scissors" >"$dotest/scissors"
echo "$no_inbody_headers" >"$dotest/no_inbody_headers"
echo "$GIT_QUIET" >"$dotest/quiet"
echo 1 >"$dotest/next"
if test -n "$rebasing"
then
: >"$dotest/rebasing"
else
: >"$dotest/applying"
if test -n "$HAS_HEAD"
then
git update-ref ORIG_HEAD HEAD
else
git update-ref -d ORIG_HEAD >/dev/null 2>&1
fi
fi
fi
git update-index -q --refresh
case "$resolved" in
'')
case "$HAS_HEAD" in
'')
files=$(git ls-files) ;;
?*)
files=$(git diff-index --cached --name-only HEAD --) ;;
esac || exit
if test "$files"
then
test -n "$HAS_HEAD" && : >"$dotest/dirtyindex"
die "$(eval_gettext "Dirty index: cannot apply patches (dirty: \$files)")"
fi
esac
# Now, decide what command line options we will give to the git
# commands we invoke, based on the result of parsing command line
# options and previous invocation state stored in $dotest/ files.
if test "$(cat "$dotest/utf8")" = t
then
utf8=-u
else
utf8=-n
fi
keep=$(cat "$dotest/keep")
case "$keep" in
t)
keep=-k ;;
b)
keep=-b ;;
*)
keep= ;;
esac
case "$(cat "$dotest/messageid")" in
t)
messageid=-m ;;
f)
messageid= ;;
esac
case "$(cat "$dotest/scissors")" in
t)
scissors=--scissors ;;
f)
scissors=--no-scissors ;;
esac
if test "$(cat "$dotest/no_inbody_headers")" = t
then
no_inbody_headers=--no-inbody-headers
else
no_inbody_headers=
fi
if test "$(cat "$dotest/quiet")" = t
then
GIT_QUIET=t
fi
if test "$(cat "$dotest/threeway")" = t
then
threeway=t
fi
git_apply_opt=$(cat "$dotest/apply-opt")
if test "$(cat "$dotest/sign")" = t
then
SIGNOFF=$(git var GIT_COMMITTER_IDENT | sed -e '
s/>.*/>/
s/^/Signed-off-by: /'
)
else
SIGNOFF=
fi
last=$(cat "$dotest/last")
this=$(cat "$dotest/next")
if test "$skip" = t
then
this=$(expr "$this" + 1)
resume=
fi
while test "$this" -le "$last"
do
msgnum=$(printf "%0${prec}d" $this)
next=$(expr "$this" + 1)
test -f "$dotest/$msgnum" || {
resume=
go_next
continue
}
# If we are not resuming, parse and extract the patch information
# into separate files:
# - info records the authorship and title
# - msg is the rest of commit log message
# - patch is the patch body.
#
# When we are resuming, these files are either already prepared
# by the user, or the user can tell us to do so by --continue flag.
case "$resume" in
'')
if test -f "$dotest/rebasing"
then
commit=$(sed -e 's/^From \([0-9a-f]*\) .*/\1/' \
-e q "$dotest/$msgnum") &&
test "$(git cat-file -t "$commit")" = commit ||
stop_here $this
git cat-file commit "$commit" |
sed -e '1,/^$/d' >"$dotest/msg-clean"
echo "$commit" >"$dotest/original-commit"
get_author_ident_from_commit "$commit" >"$dotest/author-script"
git diff-tree --root --binary --full-index "$commit" >"$dotest/patch"
else
git mailinfo $keep $no_inbody_headers $messageid $scissors $utf8 "$dotest/msg" "$dotest/patch" \
<"$dotest/$msgnum" >"$dotest/info" ||
stop_here $this
# skip pine's internal folder data
sane_grep '^Author: Mail System Internal Data$' \
<"$dotest"/info >/dev/null &&
go_next && continue
test -s "$dotest/patch" || {
eval_gettextln "Patch is empty. Was it split wrong?
If you would prefer to skip this patch, instead run \"\$cmdline --skip\".
To restore the original branch and stop patching run \"\$cmdline --abort\"."
stop_here $this
}
rm -f "$dotest/original-commit" "$dotest/author-script"
{
sed -n '/^Subject/ s/Subject: //p' "$dotest/info"
echo
cat "$dotest/msg"
} |
git stripspace > "$dotest/msg-clean"
fi
;;
esac
if test -f "$dotest/author-script"
then
eval $(cat "$dotest/author-script")
else
GIT_AUTHOR_NAME="$(sed -n '/^Author/ s/Author: //p' "$dotest/info")"
GIT_AUTHOR_EMAIL="$(sed -n '/^Email/ s/Email: //p' "$dotest/info")"
GIT_AUTHOR_DATE="$(sed -n '/^Date/ s/Date: //p' "$dotest/info")"
fi
if test -z "$GIT_AUTHOR_EMAIL"
then
gettextln "Patch does not have a valid e-mail address."
stop_here $this
fi
export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
case "$resume" in
'')
if test '' != "$SIGNOFF"
then
LAST_SIGNED_OFF_BY=$(
sed -ne '/^Signed-off-by: /p' \
"$dotest/msg-clean" |
sed -ne '$p'
)
ADD_SIGNOFF=$(
test "$LAST_SIGNED_OFF_BY" = "$SIGNOFF" || {
test '' = "$LAST_SIGNED_OFF_BY" && echo
echo "$SIGNOFF"
})
else
ADD_SIGNOFF=
fi
{
if test -s "$dotest/msg-clean"
then
cat "$dotest/msg-clean"
fi
if test '' != "$ADD_SIGNOFF"
then
echo "$ADD_SIGNOFF"
fi
} >"$dotest/final-commit"
;;
*)
case "$resolved$interactive" in
tt)
# This is used only for interactive view option.
git diff-index -p --cached HEAD -- >"$dotest/patch"
;;
esac
esac
resume=
if test "$interactive" = t
then
test -t 0 ||
die "$(gettext "cannot be interactive without stdin connected to a terminal.")"
action=again
while test "$action" = again
do
gettextln "Commit Body is:"
echo "--------------------------"
cat "$dotest/final-commit"
echo "--------------------------"
# TRANSLATORS: Make sure to include [y], [n], [e], [v] and [a]
# in your translation. The program will only accept English
# input at this point.
gettext "Apply? [y]es/[n]o/[e]dit/[v]iew patch/[a]ccept all "
read reply
case "$reply" in
[yY]*) action=yes ;;
[aA]*) action=yes interactive= ;;
[nN]*) action=skip ;;
[eE]*) git_editor "$dotest/final-commit"
action=again ;;
[vV]*) action=again
git_pager "$dotest/patch" ;;
*) action=again ;;
esac
done
else
action=yes
fi
if test $action = skip
then
go_next
continue
fi
hook="$(git rev-parse --git-path hooks/applypatch-msg)"
if test -x "$hook"
then
"$hook" "$dotest/final-commit" || stop_here $this
fi
if test -f "$dotest/final-commit"
then
FIRSTLINE=$(sed 1q "$dotest/final-commit")
else
FIRSTLINE=""
fi
say "$(eval_gettext "Applying: \$FIRSTLINE")"
case "$resolved" in
'')
# When we are allowed to fall back to 3-way later, don't give
# false errors during the initial attempt.
squelch=
if test "$threeway" = t
then
squelch='>/dev/null 2>&1 '
fi
eval "git apply $squelch$git_apply_opt"' --index "$dotest/patch"'
apply_status=$?
;;
t)
# Resolved means the user did all the hard work, and
# we do not have to do any patch application. Just
# trust what the user has in the index file and the
# working tree.
resolved=
git diff-index --quiet --cached HEAD -- && {
gettextln "No changes - did you forget to use 'git add'?
If there is nothing left to stage, chances are that something else
already introduced the same changes; you might want to skip this patch."
stop_here_user_resolve $this
}
unmerged=$(git ls-files -u)
if test -n "$unmerged"
then
gettextln "You still have unmerged paths in your index
did you forget to use 'git add'?"
stop_here_user_resolve $this
fi
apply_status=0
git rerere
;;
esac
if test $apply_status != 0 && test "$threeway" = t
then
if (fall_back_3way)
then
# Applying the patch to an earlier tree and merging the
# result may have produced the same tree as ours.
git diff-index --quiet --cached HEAD -- && {
say "$(gettext "No changes -- Patch already applied.")"
go_next
continue
}
# clear apply_status -- we have successfully merged.
apply_status=0
fi
fi
if test $apply_status != 0
then
eval_gettextln 'Patch failed at $msgnum $FIRSTLINE'
if test "$(git config --bool advice.amworkdir)" != false
then
eval_gettextln 'The copy of the patch that failed is found in:
$dotest/patch'
fi
stop_here_user_resolve $this
fi
hook="$(git rev-parse --git-path hooks/pre-applypatch)"
if test -x "$hook"
then
"$hook" || stop_here $this
fi
tree=$(git write-tree) &&
commit=$(
if test -n "$ignore_date"
then
GIT_AUTHOR_DATE=
fi
parent=$(git rev-parse --verify -q HEAD) ||
say >&2 "$(gettext "applying to an empty history")"
if test -n "$committer_date_is_author_date"
then
GIT_COMMITTER_DATE="$GIT_AUTHOR_DATE"
export GIT_COMMITTER_DATE
fi &&
git commit-tree ${parent:+-p} $parent ${gpg_sign_opt:+"$gpg_sign_opt"} $tree \
<"$dotest/final-commit"
) &&
git update-ref -m "$GIT_REFLOG_ACTION: $FIRSTLINE" HEAD $commit $parent ||
stop_here $this
if test -f "$dotest/original-commit"; then
echo "$(cat "$dotest/original-commit") $commit" >> "$dotest/rewritten"
fi
hook="$(git rev-parse --git-path hooks/post-applypatch)"
test -x "$hook" && "$hook"
go_next
done
if test -s "$dotest"/rewritten; then
git notes copy --for-rewrite=rebase < "$dotest"/rewritten
hook="$(git rev-parse --git-path hooks/post-rewrite)"
if test -x "$hook"; then
"$hook" rebase < "$dotest"/rewritten
fi
fi
# If am was called with --rebasing (from git-rebase--am), it's up to
# the caller to take care of housekeeping.
if ! test -f "$dotest/rebasing"
then
rm -fr "$dotest"
git gc --auto
fi
|
peanhuang/git
|
contrib/examples/git-am.sh
|
Shell
|
gpl-2.0
| 24,385 |
#!/bin/sh
#
# Copyright (c) 2017 - 2020 Eaton
#
# This file is part of the Eaton 42ity project.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#! \file 20-fty-compat.sh
# \brief Create compat symlinks
# \author Michal Vyskocil <[email protected]>
# \author Jim Klimov <[email protected]>
#
# Move an OLD file to NEW location (if it exists and is not a symlink)
# and link it back for legacy compatibility purposes; optionally
# set new ownership and access rights on the newly located file.
# If OLD filesystem object is a directory, recurse with mvln() for
# each object found inside it. See mvlndir() for wholesale relocation.
# Note: This assumes manipulations with files in deployment local
# data and config directories (not packaged) - so if some target
# filenames exist under FTY paths, we should not overwrite them with
# files from legacy BIOS paths.
#
# TODO: Make use of template paths from :
# @libexecdir@/@PACKAGE@ = /usr/libexec/{fty,bios}
# @datadir@/@PACKAGE@ = /usr/share/{fty,bios}
# Common base location for many components, so no @PACKAGE@ :
# @ftydatadir@ = /var/lib/{fty,bios}
#
mvln () {
OLD="${1-}"
NEW="${2-}"
OWN="${3-}"
MOD="${4-}"
RECURSE_FLAG=""
if [[ ! -e "${OLD}" ]] || [[ ! -s "${OLD}" ]] || [[ -L "${OLD}" ]] ; then
echo "Nothing to relocate: No '$OLD', or it is already a symlink" >&2
return 0
fi
OLD_DIR=$(dirname "${OLD}") && [[ -n "${OLD_DIR}" ]] || return
NEW_DIR=$(dirname "${NEW}") && [[ -n "${NEW_DIR}" ]] || return
mkdir -p "${OLD_DIR}" || return
mkdir -p "${NEW_DIR}" || return
if [[ -d "${OLD}" ]]; then
NUMOBJ="$(cd "${OLD}" && find . | wc -l)" || NUMOBJ=-1
if [[ "$NUMOBJ" -le 1 ]]; then
echo "Symlinking empty old directory: '$OLD' => '$NEW'" >&2
if [[ -d "${NEW}" ]] ; then
mv "${OLD}" "${OLD}.old-bios" && \
ln -srf "${NEW}" "${OLD}" && \
return $?
# Keep the NEW access rights
fi
if [[ -e "${NEW}" ]] ; then
echo "ERROR: '$NEW' already exists and is not a directory" >&2
return 1
fi
mv "${OLD}" "${NEW}" || \
mkdir -p "${NEW}" || return
ln -srf "${NEW}" "${OLD}"
# For empty NEW directory, fall through to optional resetting of access rights
else
# Create dirs, symlink files; chmod+chown later
echo "Recursing into directory: '$OLD'" >&2
( cd "${OLD}" && find . | while read LINE ; do
case "${LINE}" in
""|.|./) ;;
*) (mvln "${OLD}/${LINE}" "${NEW}/${LINE}" "" "" ) || exit ;;
esac
done )
fi
RECURSE_FLAG="-R"
else
if [[ -f "${OLD}" ]]; then
if [[ -e "${NEW}" ]]; then
# If new setup has a file in an unpackaged directory
# (so created by updated services), keep it in place.
echo "Relocated as backup: '${OLD}' => '${NEW}.old-bios' because NEW file exists" >&2
mv "${OLD}" "${NEW}.old-bios"
else
echo "Relocated: '${OLD}' => '${NEW}'" >&2
mv "${OLD}" "${NEW}"
fi
fi
# Make this symlink even if expected NEW file is currently missing
echo "Symlink back: '${NEW}' => '${OLD}'" >&2
ln -srf "${NEW}" "${OLD}"
fi
if [[ -n "${OWN}" ]] && [[ -e "${NEW}" ]] ; then
chown $RECURSE_FLAG "${OWN}" "${NEW}"
fi
if [[ -n "${MOD}" ]] && [[ -e "${NEW}" ]] ; then
chmod $RECURSE_FLAG "${MOD}" "${NEW}"
fi
}
# Simply move a whole existing OLD directory to a NEW name, if NEW does not
# yet exist, and add a legacy symlink with the OLD name pointing to the NEW
# location. Generally it is safer (but slower) to mvln() recursively, with
# existence checks done for each object along the way.
mvlndir() {
OLD="${1-}"
NEW="${2-}"
[[ -d "${NEW}" ]] && return 0
if [[ ! -d "${OLD}" ]] || [[ -e "${NEW}" ]] ; then
echo "Not relocating dir: '${OLD}' => '${NEW}' because LD does not exist or is not a dir" >&2
return 1
fi
echo "Relocating dir: '${OLD}' => '${NEW}'" >&2
NEW_DIR=$(dirname "${NEW}") && \
[[ -n "${NEW_DIR}" ]] && \
mkdir -p "${NEW_DIR}" && \
mv "${OLD}" "${NEW}" && \
ln -srf "${NEW}" "${OLD}"
}
# Handle certain config files
mvln /etc/agent-smtp/bios-agent-smtp.cfg /etc/fty-email/fty-email.cfg www-data: ""
if [ -s /etc/fty-email/fty-email.cfg.old-bios ]; then
cp -pf /etc/fty-email/fty-email.cfg /etc/fty-email/fty-email.cfg.new-default
cat /etc/fty-email/fty-email.cfg.old-bios | sed \
-e 's|/var/lib/bios|/var/lib/fty|g' \
-e 's|bios-agent-smtp|fty-email|' \
-e 's|agent-smtp|fty-email|' \
> /etc/fty-email/fty-email.cfg
fi
mvln /etc/agent-metric-store/bios-agent-ms.cfg /etc/fty-metric-store/fty-metric-store.cfg www-data: ""
mvln /etc/agent-nut/bios-agent-nut.cfg /etc/fty-nut/fty-nut.cfg www-data: ""
mvln /etc/default/bios.cfg /etc/default/fty.cfg www-data: ""
mvln /etc/default/bios /etc/default/fty www-data: ""
# Dirs with same content and access rights
mvlndir /var/lib/fty/nut /var/lib/fty/fty-nut
# 42ity renaming
mvln /etc/bios /etc/fty
mvln /etc/pam.d/bios /etc/pam.d/fty
mvln /usr/libexec/bios /usr/libexec/fty
mvln /var/lib/bios/license /var/lib/fty/fty-eula/license
mvln /var/lib/fty/license /var/lib/fty/fty-eula/license
chown www-data:sasl /var/lib/fty/fty-eula/license /var/lib/fty/fty-eula
chown 0:0 /var/lib/fty
# Note: currently we do want to keep tntnet@bios, which relies on old pathnames
#mvln /etc/tntnet/bios.d /etc/tntnet/fty.d
# Warning: order matters, somewhat
mvln /usr/share/bios /usr/share/fty
mvlndir /usr/share/bios/etc/default/bios /usr/share/fty/etc/default/fty
## Backward compatibility for new (renamed) paths
mvlndir /var/lib/bios/sql /var/lib/fty/sql
mvln /var/lib/bios/bios-agent-cm /var/lib/fty/fty-metric-compute
mvln /var/lib/bios/agent-outage /var/lib/fty/fty-outage
mvln /var/lib/bios/agent-smtp /var/lib/fty/fty-email
mvln /var/lib/bios/alert_agent /var/lib/fty/fty-alert-engine
mvln /var/lib/bios/bios-agent-rt /var/lib/fty/fty-metric-cache
mvln /var/lib/bios/composite-metrics /var/lib/fty/fty-metric-composite
mvln /var/lib/bios/nut /var/lib/fty/fty-nut
mvln /var/lib/bios/nut /var/lib/fty/nut
# The /var/lib/fty/fty-sensor-env should now be created via tmpfiles
# But a legacy system may have an agent file of its own...
mvln /var/lib/bios/composite-metrics/agent_th /var/lib/fty/fty-sensor-env/state
# alert list file must be converted, do it manually
if [[ -e /var/lib/bios/agent-alerts-list/state_file ]]; then
mkdir -p /var/lib/fty/fty-alert-list
chown bios:root /var/lib/fty/fty-alert-list
/usr/bin/fty-alert-list-convert \
state_file \
/var/lib/bios/agent-alerts-list \
/var/lib/fty/fty-alert-list
chown bios:bios-infra /var/lib/fty/fty-alert-list/state_file || :
rm -rf /var/lib/bios/agent-alerts-list
fi
# uptime file must be converted, do it manually
if [[ -e /var/lib/bios/uptime/state ]]; then
mkdir -p /var/lib/fty/fty-kpi-power-uptime
chown bios:root /var/lib/fty/fty-kpi-power-uptime
/usr/bin/fty-kpi-power-uptime-convert \
state \
/var/lib/bios/uptime \
/var/lib/fty/fty-kpi-power-uptime
chown bios:bios-infra /var/lib/fty/fty-kpi-power-uptime/state || :
rm -rf /var/lib/bios/uptime
fi
# Our web-server should be able to read these credentials,
# at least per current implementation of license_POST
# Note that on some systems these dirs are the same (so we
# reapply the settings twice to the same FS object), but
# on upgrades from legacy they can be different.
for CREDSQL_DIR in "/var/lib/fty/sql/mysql" "/var/lib/bios/sql/mysql" ; do
mkdir -p "${CREDSQL_DIR}"
chgrp www-data "${CREDSQL_DIR}"
BIOS_DB_RW="$CREDSQL_DIR/bios-db-rw"
if [[ -e "${BIOS_DB_RW}" ]] ; then
chgrp www-data "${BIOS_DB_RW}"
chmod g+r "${BIOS_DB_RW}"
fi
done
|
jimklimov/fty-core
|
setup/20-fty-compat.sh
|
Shell
|
gpl-2.0
| 8,945 |
#!/bin/bash
#PBS -l walltime=4:00:00
#PBS -l nodes=1:ppn=2
#PBS -l vmem=32G
#PBS -N Baltic_2_3_5_500_5_no_no_no_no
cd /zhome/fc/e/102910/maritime-vrp/build
LD_LIBRARY_PATH=/zhome/fc/e/102910/gcc/lib64 ./maritime_vrp ../data/old_thesis_data/program_params.json ../data/new/Baltic_2_3_5_500_5_no_no_no_no.json
|
OR-Bologna/maritime-vrp
|
opt/launchers/Baltic_2_3_5_500_5_no_no_no_no.sh
|
Shell
|
gpl-3.0
| 308 |
#!/bin/bash
CLASSPATH='lib/*:build/'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DEFAULT_TO_DATABASE='false'
while getopts a:c:t:d: option
do
case "${option}"
in
c) CASESTUDIES=${OPTARG};;
t) TRIALS=${OPTARG};;
a) APPROACHES=${OPTARG};;
d) TODATABASE=${OPTARG:=$DEFAULT_TO_DATABASE};;
esac
done
if [ -z $CASESTUDIES ] || [ -z $TRIALS ] || [ -z $APPROACHES ] || [ -z $TODATABASE ] ; then
echo "Experiment failed - requires -c CASESTUDIES -t TRIALS -a APPROACHES -d TODATABASE* (* use '' to default)"
exit 1
fi
IFS=':' read -ra CASESTUDY <<< "$CASESTUDIES"
IFS=':' read -ra APPROACH <<< "$APPROACHES"
ARGUMENTS='-mutationPipeline=Mutation2013'
for c in "${CASESTUDY[@]}"; do
for a in "${APPROACH[@]}"; do
$DIR'/'experiment.sh -c 'parsedcasestudy.'$c -a 'org.schemaanalyst.mutation.analysis.technique.'$a -t $TRIALS -p 'Mutation2013' -d $TODATABASE
done
done
|
schemaanalyst/schemaanalyst
|
scripts/techniqueexperiment.sh
|
Shell
|
gpl-3.0
| 901 |
# LICENCE : CloudUnit is available under the Affero Gnu Public License GPL V3 : https://www.gnu.org/licenses/agpl-3.0.html
# but CloudUnit is licensed too under a standard commercial license.
# Please contact our sales team if you would like to discuss the specifics of our Enterprise license.
# If you are not sure whether the GPL is right for you,
# you can always test our software under the GPL and inspect the source code before you contact us
# about purchasing a commercial license.
# LEGAL TERMS : "CloudUnit" is a registered trademark of Treeptik and can't be used to endorse
# or promote products derived from this project without prior written permission from Treeptik.
# Products or services derived from this software may not be called "CloudUnit"
# nor may "Treeptik" or similar confusing terms appear in their names without prior written permission.
# For any questions, contact us : [email protected]
#!/bin/bash
set -x
# Local execution for maven
cd ../cu-manager
mvn clean test "-Dtest=*IT"
|
cxxly/cstack
|
integration-tests/integration.sh
|
Shell
|
agpl-3.0
| 1,017 |
#!/bin/bash
# Make sure only root can run our script
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
IP=192.168.201.202
USER=erp
# opencv
scp $USER@$IP:lib/libopencv/* /usr/local/lib/
./tools/makelink.sh
# python-opencv
scp $USER@$IP:lib/pythonopencv/* /usr/local/lib/python2.7/dist-packages/
# zxing
if [ -d "$HOME/.libZxing" ]; then
rm -rf $HOME/.libZxing
fi
mkdir $HOME/.libZxing
scp $USER@$IP:lib/zxing/* ~/.libZxing/
|
MickSandoz/compassion-modules
|
sbc_compassion/setup.sh
|
Shell
|
agpl-3.0
| 472 |
yum -y install gcc python-devel mysql-devel sqlite-devel MySQL-python freetype-devel openssh-clients python-sqlite python-setuptools || exit 1
tar xf setuptools-1.1.6.tar.gz
cd setuptools-1.1.6
python setup.py build
python setup.py install
cd ..
tar xf pexpect-2.3.tar.gz
cd pexpect-2.3
python setup.py build
python setup.py install
cd ..
tar xf pycrypto-2.6.tar.gz
cd pycrypto-2.6
python setup.py build
python setup.py install
cd ..
tar xf httplib2-0.8.tar.gz
cd httplib2-0.8
python setup.py build
python setup.py install
cd ..
tar xf Imaging-1.1.7.tar.gz
cd Imaging-1.1.7
if uname -a | grep x86_64;then
sed -i "s#^FREETYPE_ROOT.*#FREETYPE_ROOT=\'/usr/lib64\'#" setup.py
fi
python setup.py build
python setup.py install
cd ..
tar xf Django-1.2.7.tar.gz
cd Django-1.2.7
python setup.py build
python setup.py install
cd ..
tar xf MySQL-python-1.2.3.tar.gz
cd MySQL-python-1.2.3
python setup.py build
python setup.py install
cd ..
tar xf uwsgi-1.9.5.tar.gz
cd uwsgi-1.9.5
python setup.py build
python setup.py install
|
sdgdsffdsfff/yunwei
|
packets/installpackets.sh
|
Shell
|
lgpl-3.0
| 1,051 |
#!/bin/bash
set -e
. /opt/kolla/kolla-common.sh
. /opt/kolla/config-cinder.sh
fail_unless_db
check_required_vars MARIADB_SERVICE_HOST DB_ROOT_PASSWORD \
CINDER_DB_NAME CINDER_DB_USER CINDER_DB_PASSWORD \
INIT_CINDER_DB
cfg=/etc/cinder/cinder.conf
if [ "${INIT_CINDER_DB}" == "true" ]; then
mysql -h ${MARIADB_SERVICE_HOST} -u root -p${DB_ROOT_PASSWORD} mysql <<EOF
CREATE DATABASE IF NOT EXISTS ${CINDER_DB_NAME};
GRANT ALL PRIVILEGES ON ${CINDER_DB_NAME}.* TO
'${CINDER_DB_USER}'@'%' IDENTIFIED BY '${CINDER_DB_PASSWORD}'
EOF
su -s /bin/sh -c "cinder-manage db sync" cinder
fi
crudini --set $cfg \
DEFAULT \
log_file \
"${CINDER_SCHEDULER_LOG_FILE}"
echo "Starting cinder-scheduler"
exec /usr/bin/cinder-scheduler --config-file $cfg
|
brk3/kolla
|
docker/common/cinder-app/cinder-scheduler/start.sh
|
Shell
|
apache-2.0
| 803 |
#! /usr/bin/env bash
set -eu
shopt -s nullglob
# Locate the script file. Cross symlinks if necessary.
loc="$0"
while [ -h "$loc" ]; do
ls=`ls -ld "$loc"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
loc="$link" # Absolute link
else
loc="`dirname "$loc"`/$link" # Relative link
fi
done
base_dir=$(cd "`dirname "$loc"`" && pwd)
temp_out="$base_dir/djinni-output-temp"
in="$base_dir/example.djinni"
cpp_out="$base_dir/generated-src/cpp"
jni_out="$base_dir/generated-src/jni"
objc_out="$base_dir/generated-src/objc"
java_out="$base_dir/generated-src/java/com/dropbox/textsort"
java_package="com.dropbox.textsort"
gen_stamp="$temp_out/gen.stamp"
if [ $# -eq 0 ]; then
# Normal build.
true
elif [ $# -eq 1 ]; then
command="$1"; shift
if [ "$command" != "clean" ]; then
echo "Unexpected argument: \"$command\"." 1>&2
exit 1
fi
for dir in "$temp_out" "$cpp_out" "$jni_out" "$java_out"; do
if [ -e "$dir" ]; then
echo "Deleting \"$dir\"..."
rm -r "$dir"
fi
done
exit
fi
# Build djinni
"$base_dir/../src/build"
[ ! -e "$temp_out" ] || rm -r "$temp_out"
"$base_dir/../src/run-assume-built" \
--java-out "$temp_out/java" \
--java-package $java_package \
--java-class-access-modifier "package" \
--java-nullable-annotation "javax.annotation.CheckForNull" \
--java-nonnull-annotation "javax.annotation.Nonnull" \
--ident-java-field mFooBar \
\
--cpp-out "$temp_out/cpp" \
--cpp-namespace textsort \
--ident-cpp-enum-type foo_bar \
\
--jni-out "$temp_out/jni" \
--ident-jni-class NativeFooBar \
--ident-jni-file NativeFooBar \
\
--objc-out "$temp_out/objc" \
--objcpp-out "$temp_out/objc" \
--objc-type-prefix TXS \
\
--idl "$in"
# Copy changes from "$temp_output" to final dir.
mirror() {
local prefix="$1" ; shift
local src="$1" ; shift
local dest="$1" ; shift
mkdir -p "$dest"
rsync -a --delete --checksum --itemize-changes "$src"/ "$dest" | grep -v '^\.' | sed "s/^/[$prefix]/"
}
echo "Copying generated code to final directories..."
mirror "cpp" "$temp_out/cpp" "$cpp_out"
mirror "java" "$temp_out/java" "$java_out"
mirror "jni" "$temp_out/jni" "$jni_out"
mirror "objc" "$temp_out/objc" "$objc_out"
date > "$gen_stamp"
echo "djinni completed."
|
iRobotCorporation/djinni
|
example/run_djinni.sh
|
Shell
|
apache-2.0
| 2,401 |
#!/bin/bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an example script that creates a single shard vttablet deployment.
set -e
cell=${CELL:-'test'}
keyspace=${KEYSPACE:-'test_keyspace'}
shard=${SHARD:-'0'}
uid_base=${UID_BASE:-'100'}
port_base=$[15000 + $uid_base]
grpc_port_base=$[16000 + $uid_base]
mysql_port_base=$[17000 + $uid_base]
tablet_hostname=''
# Travis hostnames are too long for MySQL, so we use IP.
# Otherwise, blank hostname means the tablet auto-detects FQDN.
if [ "$TRAVIS" == true ]; then
tablet_hostname=`hostname -i`
fi
script_root=`dirname "${BASH_SOURCE}"`
source $script_root/env.sh
init_db_sql_file="$VTROOT/config/init_db.sql"
export EXTRA_MY_CNF=$VTROOT/config/mycnf/default-fast.cnf
case "$MYSQL_FLAVOR" in
"MySQL56")
export EXTRA_MY_CNF=$EXTRA_MY_CNF:$VTROOT/config/mycnf/master_mysql56.cnf
;;
"MariaDB")
export EXTRA_MY_CNF=$EXTRA_MY_CNF:$VTROOT/config/mycnf/master_mariadb.cnf
;;
*)
echo "Please set MYSQL_FLAVOR to MySQL56 or MariaDB."
exit 1
;;
esac
mkdir -p $VTDATAROOT/backups
# Start 3 vttablets by default.
# Pass TABLETS_UIDS indices as env variable to change
uids=${TABLETS_UIDS:-'0 1 2'}
# Start all mysqlds in background.
for uid_index in $uids; do
uid=$[$uid_base + $uid_index]
mysql_port=$[$mysql_port_base + $uid_index]
printf -v alias '%s-%010d' $cell $uid
printf -v tablet_dir 'vt_%010d' $uid
export KEYSPACE=$keyspace
export SHARD=$shard
export TABLET_ID=$alias
export TABLET_DIR=$tablet_dir
export MYSQL_PORT=$mysql_port
tablet_type=replica
if [[ $uid_index -gt 1 ]]; then
tablet_type=rdonly
fi
export TABLET_TYPE=$tablet_type
echo "Starting MySQL for tablet $alias..."
action="init -init_db_sql_file $init_db_sql_file"
if [ -d $VTDATAROOT/$tablet_dir ]; then
echo "Resuming from existing vttablet dir:"
echo " $VTDATAROOT/$tablet_dir"
action='start'
fi
$VTROOT/bin/mysqlctl \
-log_dir $VTDATAROOT/tmp \
-tablet_uid $uid \
-mysql_port $mysql_port \
$action &
done
# Wait for all mysqld to start up.
wait
optional_auth_args=''
if [ "$1" = "--enable-grpc-static-auth" ];
then
echo "Enabling Auth with static authentication in grpc"
optional_auth_args='-grpc_auth_mode static -grpc_auth_static_password_file ./grpc_static_auth.json'
fi
# Start all vttablets in background.
for uid_index in $uids; do
uid=$[$uid_base + $uid_index]
port=$[$port_base + $uid_index]
grpc_port=$[$grpc_port_base + $uid_index]
printf -v alias '%s-%010d' $cell $uid
printf -v tablet_dir 'vt_%010d' $uid
printf -v tablet_logfile 'vttablet_%010d_querylog.txt' $uid
tablet_type=replica
if [[ $uid_index -gt 1 ]]; then
tablet_type=rdonly
fi
echo "Starting vttablet for $alias..."
# shellcheck disable=SC2086
$VTROOT/bin/vttablet \
$TOPOLOGY_FLAGS \
-log_dir $VTDATAROOT/tmp \
-log_queries_to_file $VTDATAROOT/tmp/$tablet_logfile \
-tablet-path $alias \
-tablet_hostname "$tablet_hostname" \
-init_keyspace $keyspace \
-init_shard $shard \
-init_tablet_type $tablet_type \
-health_check_interval 5s \
-enable_semi_sync \
-enable_replication_reporter \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
-restore_from_backup \
-port $port \
-grpc_port $grpc_port \
-service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
-pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
-vtctld_addr http://$hostname:$vtctld_web_port/ \
$optional_auth_args \
> $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
echo "Access tablet $alias at http://$hostname:$port/debug/status"
done
disown -a
|
HubSpot/vitess
|
examples/local/vttablet-up.sh
|
Shell
|
apache-2.0
| 4,235 |
#!/usr/bin/env bash
# Copyright 2018 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exit on non-true return value
set -e
# Exit on reference to uninitialized variable
set -u
set -o pipefail
source $SOURCE_DIR/functions.sh
THIS_DIR="$( cd "$( dirname "$0" )" && pwd )"
prepare $THIS_DIR
if needs_build_package ; then
# Download the dependency from S3
download_dependency $PACKAGE "${PACKAGE_STRING}.tar.gz" $THIS_DIR
setup_package_build $PACKAGE $PACKAGE_VERSION
wrap ./configure --prefix=${LOCAL_INSTALL}
wrap make -j${BUILD_THREADS} install
finalize_package_build $PACKAGE $PACKAGE_VERSION
fi
|
timarmstrong/native-toolchain
|
source/bison/build.sh
|
Shell
|
apache-2.0
| 1,123 |
#!/bin/bash
#
# Creates a list of commands for running experiments
#
CMD_NAME="$1"
SSIZES="$2"
EPS1S="$3"
EPS2S="$4"
CMD_FILE="cmd_${CMD_NAME##*/}"
# Make a command
if [[ ! "${CMD_NAME}" =~ '/' ]]; then
CMD_NAME="./${CMD_NAME}"
fi
# Check that the binary exists and is executable
if [[ ! -x "${CMD_NAME}" ]]; then
echo "Not an executable: ${CMD_NAME}"
exit 1
fi
# Remove the commands file (if any)
rm -f "${CMD_FILE}.txt"
# Create a command for sample size, etc.
for ssz in ${SSIZES}; do
for eps1 in ${EPS1S}; do
for eps2 in ${EPS2S}; do
echo "${CMD_NAME} ${ssz} ${eps1} ${eps2}" >> "${CMD_FILE}.txt"
done
done
done
|
mlapin/cvpr14mtl
|
usps/make_cmd_experiments.sh
|
Shell
|
bsd-2-clause
| 646 |
NODE_ADD=0
NODE_CHANGE=1
NODE_DEL=2
if [ "$qconf_event" != "$NODE_DEL" ]; then
value=`./qconf get_conf $qconf_path`
# TODO: Nginx directory, You may change this to your own directory
nginx=/usr/local/nginx
nginx_conf_path=$nginx/conf/nginx.conf
old_conf_value=`cat $nginx_conf_path`
#if value is modified, then change the file
if [ "$value" != "$old_conf_value" ]; then
cp $nginx_conf_path ${nginx_conf_path}.bak
echo $value > ${nginx_conf_path}.tmp
mv ${nginx_conf_path}.tmp $nginx_conf_path
# TODO: Restart nginx, You may change this to your own command of nginx starting
service nginx restart
fi
fi
|
fengshao0907/QConf
|
agent/script/nginx_template.sh
|
Shell
|
bsd-2-clause
| 667 |
#!/bin/sh
ROOT_PATH=`pwd`
INSTALL_PATH=/home/haiwen/nginx
SRC_ROOT_PATH=$ROOT_PATH
cd $SRC_ROOT_PATH;
#rm -rf nginx-0.8.54
#tar zxvf nginx-0.8.54.tar.gz
#rm -rf pcre-8.01
#tar zxvf pcre-8.01.tar.gz
#tar third modules
#cd $SRC_ROOT_PATH"/third_modules"
#rm -rf headers-more-nginx-module
#tar zxvf headers-more-nginx-module.tar.gz
cd $SRC_ROOT_PATH
make clean
./configure \
--prefix=$INSTALL_PATH \
--with-pcre="/home/haiwen/myself/nginx_install/src/pcre-8.01" \
--http-log-path=$INSTALL_PATH"/logs/access_log" \
--error-log-path=$INSTALL_PATH"/logs/error_log" \
--with-http_realip_module \
--with-http_stub_status_module \
--without-http_userid_module \
--http-client-body-temp-path=$INSTALL_PATH"/cache/client_body" \
--http-proxy-temp-path=$INSTALL_PATH"/cache/proxy" \
--http-fastcgi-temp-path=$INSTALL_PATH"/cache/fastcgi" \
--http-uwsgi-temp-path=$INSTALL_PATH"/cache/uwsgi" \
--http-scgi-temp-path=$INSTALL_PATH"/cache/scgi" \
--add-module=$SRC_ROOT_PATH"/third_modules/headers-more-nginx-module"
make
make install
echo "make done!";
cd $SRC_ROOT_PATH"/../"
#mkdir & cp conf
#rm -rf $INSTALL_PATH"/cache/"
mkdir $INSTALL_PATH"/cache/"
cp -rpf $SRC_ROOT_PATH"/conf/* $INSTALL_PATH"/conf/"
#clean
#rm -rf $SRC_ROOT_PATH"/nginx-0.8.54"
#rm -rf $SRC_ROOT_PATH"/pcre-8.01"
#rm -rf $SRC_ROOT_PATH"/third_modules/headers-more-nginx-module"
|
wangfakang/ReadNginxSrc
|
install.sh
|
Shell
|
bsd-2-clause
| 1,401 |
#!/usr/bin/env bash
# Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
set -x
ninja=$(which ninja)
depot_tools=$(dirname $ninja)
cmd="sed -i /jessie-updates/d /etc/apt/sources.list\
&& apt-get update && apt-get -y install build-essential debhelper git python\
&& PATH=\"$depot_tools:\$PATH\"\
python tools/bots/linux_distribution_support.py"
image="launcher.gcr.io/google/debian8:latest"
docker run -e BUILDBOT_BUILDERNAME -v $depot_tools:$depot_tools\
-v `pwd`:`pwd` -w `pwd` -i --rm $image bash -c "$cmd"
|
dartino/dart-sdk
|
tools/run_debian_build.sh
|
Shell
|
bsd-3-clause
| 686 |
#!/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # http://stackoverflow.com/questions/59895
export PYTHONPATH=~/python-packages/lib/python2.6/site-packages
export PATH=~/python-packages/bin:$PATH
alias monit="monit -c $DIR/servers/monitrc"
|
snac-pilot/eac-graph-load
|
setenv.bash
|
Shell
|
bsd-3-clause
| 263 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Mockingjay/Mockingjay.framework"
install_framework "${BUILT_PRODUCTS_DIR}/URITemplate/URITemplate.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Mockingjay/Mockingjay.framework"
install_framework "${BUILT_PRODUCTS_DIR}/URITemplate/URITemplate.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
feliperuzg/CleanExample
|
Pods/Target Support Files/Pods-CleanExampleUITests/Pods-CleanExampleUITests-frameworks.sh
|
Shell
|
mit
| 4,974 |
#! /bin/sh
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test support for AC_CONFIG_LIBOBJ_DIR.
required=cc
. test-init.sh
cat >> configure.ac << 'END'
AC_CONFIG_LIBOBJ_DIR([libobj-dir])
AC_PROG_CC
AM_PROG_AR
AC_PROG_RANLIB
AC_LIBOBJ([foobar])
AC_OUTPUT
END
cat > Makefile.am << 'END'
AUTOMAKE_OPTIONS = subdir-objects
noinst_LIBRARIES = libtu.a
libtu_a_SOURCES =
libtu_a_LIBADD = $(LIBOBJS)
## Hack with this variable ans with extra make recursion in the check-local
## rule are required for when we move this Makefile in a subdir, later.
my_distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)
check-local:
(cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) distdir)
ls -l $(top_srcdir)/* $(top_builddir)/* $(my_distdir)/*
test -f libtu.a
test ! -r $(top_srcdir)/foobar.c
test -f $(top_srcdir)/libobj-dir/foobar.c
test ! -r $(my_distdir)/foobar.c
test -f $(my_distdir)/libobj-dir/foobar.c
$(AR) t libtu.a
$(AR) t libtu.a | grep foobar
END
mkdir libobj-dir
cat > libobj-dir/foobar.c << 'END'
extern int dummy;
END
cp "$am_scriptdir/ar-lib" . || fatal_ "fetching auxiliary script 'ar-lib'"
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
$MAKE
$MAKE check
$MAKE distcheck
# Same check, but with the Makefile.am using $(LIBOBJS) not being
# the top-level one.
$MAKE distclean
rm -rf autom4te*.cache aclocal.m4 configure
mkdir sub
mv -f Makefile.am sub
echo SUBDIRS = sub > Makefile.am
sed '/^AC_OUTPUT/i\
AC_CONFIG_FILES([sub/Makefile])
' configure.ac > t
mv -f t configure.ac
cat configure.ac # For debugging.
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
$MAKE
$MAKE check
$MAKE distcheck
:
|
kuym/openocd
|
tools/automake-1.15/t/libobj19.sh
|
Shell
|
gpl-2.0
| 2,231 |
# Copyright (C) 2008 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# tests lvm initialization, and especially negative tests of error paths
#
. ./test-utils.sh
aux prepare_devs 5
# invalid units
not pvs --config 'global { units = "<" }'
|
ystk/debian-lvm2
|
test/t-lvm-init.sh
|
Shell
|
gpl-2.0
| 627 |
#!/bin/bash
set -e
# TODO - We (Joomla) need to expose an alternate API to checking the latest version other than our XML files
current="$(curl -A 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.89 Safari/537.36' -sSL 'http://developer.joomla.org/latest_version.json' | sed -r 's/^.*"current":"([^"]+)".*$/\1/')"
# We're putting a lot of trust in this process, once Joomla has an exposed API to query the SHA1 use that instead
wget -O joomla.zip https://github.com/joomla/joomla-cms/releases/download/$current/Joomla_$current-Stable-Full_Package.zip
sha1="$(sha1sum joomla.zip | sed -r 's/ .*//')"
for variant in apache fpm; do
(
set -x
sed -ri '
s/^(ENV JOOMLA_VERSION) .*/\1 '"$current"'/;
s/^(ENV JOOMLA_SHA1) .*/\1 '"$sha1"'/;
' "$variant/Dockerfile"
# To make management easier, we use these files for all variants
cp docker-entrypoint.sh "$variant/docker-entrypoint.sh"
cp makedb.php "$variant/makedb.php"
)
done
rm joomla.zip
|
OSTraining/docker-joomla
|
update.sh
|
Shell
|
gpl-2.0
| 1,022 |
#!/bin/bash
############################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
### File : tuned ##
##
### Description: This testcase tests tuned package ##
##
### Author: Ravindran Arani <[email protected]> ##
###########################################################################################
#cd $(dirname $0)
#LTPBIN=${LTPBIN%/shared}/tuned
source $LTPBIN/tc_utils.source
TESTDIR="${LTPBIN%/shared}/tuned/"
function tc_local_setup()
{
tc_check_package tuned
tc_break_if_bad $? "tuned is not installed"
}
function runtests()
{
pushd $TESTDIR >$stdout 2>$stderr
TST_TOTAL=8
#Start tuned service if not already started:
tc_register "tuned Service Start"
tc_service_status tuned
servicerc=$?
if [ $servicerc -ne 0 ]; then
tc_service_start_and_wait tuned
fi
#List all available profiles:
tc_register "List all available profiles"
tuned-adm list |grep -i "balanced\|desktop\|latency-performance\|network-latency\|network-throughput\|powersave\|sap\|throughput-performance\|virtual-guest\|virtual-host" >$stdout 2>$stderr
tc_pass_or_fail $? "profiles are not listed"
#Let tuned to recommend you the best profile for your system:
tc_register "tuned-adm recommend command check"
tuned-adm recommend >$stdout 2>$stderr
tc_pass_or_fail $? "tuned-adm has failed to recommend profile"
#check the current set profile:
tc_register "tuned-adm active command check"
profile=`tuned-adm active|cut -d' ' -f4` >$stdout 2>$stderr
if [ $? -eq 0 ] && [ ! -z "$profile" ]
then RC=0
else RC=1
fi
tc_pass_or_fail $RC "tuned-adm active has issues"
tc_service_restart_and_wait tuned
#Try to switch between profiles:
tc_register "test changing tuned profiles"
if [ $profile != balanced ]
then profile2set="balanced"
else profile2set="powersave"
fi
tuned-adm profile $profile2set >$stdout 2>$stderr
tc_pass_or_fail $? "set profile failed"
#Check if the profile was really set
tc_register "test tuned profile change instruction"
newprofile=`tuned-adm active|cut -d' ' -f4` >$stdout 2>$stderr
if [ $newprofile != $profile2set ]
then RC=1
else RC=0
fi
tc_pass_or_fail $RC "new profile is not set properly"
#Stop tuned service
tc_register "Stop tuned service if not already stopped"
tc_service_status tuned
if [ $? -eq 0 ]; then
tc_service_stop_and_wait tuned
fi
#Restore tuned to its original configuration
tc_register "Restore tuned to its original configuration"
if [ $servicerc -ne 0 ]; then
tc_service_stop_and_wait tuned
else
tc_service_start_and_wait tuned
fi
tuned-adm profile $profile >$stdout 2>$stderr
RC=$?
#below warnings showup when we try to set a profile while services are down. So, ignoring.
tc_ignore_warnings "Cannot talk to Tuned daemon via DBus"
tc_ignore_warnings "You need to (re)start the tuned daemon by hand for changes to apply"
tc_pass_or_fail $RC "Restore of original config failed"
popd >$stdout 2>$stderr
}
#
#MAIN
#
tc_setup
runtests
|
rajashreer7/autotest-client-tests
|
linux-tools/tuned/tuned.sh
|
Shell
|
gpl-2.0
| 4,920 |
#!/bin/sh
# $1 - action
# $2 - type of file
action=$1
filetype=$2
[ -n "${MC_XDG_OPEN}" ] || MC_XDG_OPEN="xdg-open"
do_view_action() {
filetype=$1
case "${filetype}" in
trpm)
rpm -qivl --scripts `basename "${MC_EXT_BASENAME}" .trpm`
;;
src.rpm|rpm)
if rpm --nosignature --version >/dev/null 2>&1; then
RPM="rpm --nosignature"
else
RPM="rpm"
fi
$RPM -qivlp --scripts "${MC_EXT_FILENAME}"
;;
deb)
dpkg-deb -I "${MC_EXT_FILENAME}" && echo && dpkg-deb -c "${MC_EXT_FILENAME}"
;;
debd)
dpkg -s `echo "${MC_EXT_BASENAME}" | sed 's/\([0-9a-z.-]*\).*/\1/'`
;;
deba)
apt-cache show `echo "${MC_EXT_BASENAME}" | sed 's/\([0-9a-z.-]*\).*/\1/'`
;;
*)
;;
esac
}
do_open_action() {
filetype=$1
case "${filetype}" in
*)
;;
esac
}
case "${action}" in
view)
do_view_action "${filetype}"
;;
open)
"${MC_XDG_OPEN}" "${MC_EXT_FILENAME}" 2>/dev/null || \
do_open_action "${filetype}"
;;
*)
;;
esac
|
nilsonmorales/Puppyes-nightrc
|
usr/local/libexec/mc/ext.d/package.sh
|
Shell
|
gpl-3.0
| 1,110 |
#!/bin/sh
EXEC=/usr/bin/python
MyServer=http://localhost:7080
#MyServer=http://server2:9673
case $2 in
n) Server=$MyServer/Cuon
;;
ssl) Server=https://localhost:7580/Cuon
;;
qemu) Server=http://192.168.17.2:4080/Cuon
;;
qemussl) Server=https://192.168.17.2:4580/Cuon
;;
die) Server=http://dietzel-normteile.dyndns.org:7080/Cuon
;;
diessl) Server=https://dietzel-normteile.dyndns.org:8443/Cuon
;;
cuweb) Server=http://84.244.7.139:7080
;;
cuwebssl) Server=https://84.244.7.139:8443
;;
cyweb) Server=http://84.244.4.80:7080/Cuon
;;
cywebssl) Server=https://84.244.4.80:8443/Cuon
;;
*) Server=$MyServer/Cuon
;;
esac
case $1 in
ai)
make all
sudo make ai
cp ../cuon_server/src/ai_main.py cuon/AI
cd cuon/AI
$EXEC tki1.py
;;
mini)
make all
sudo make ai
cd cuon/AI
$EXEC miniClient.py
;;
gtkmini)
make all
sudo make ai
rm -R gtkMiniClient
make gtkMiniClient
cd gtkMiniClient
$EXEC gtkMiniClient.py
;;
server)
sudo make install_server
cd CUON/
#make iClient
sudo cp ../cuon_server.py .
$EXEC cuon_server.py $Server server $3
;;
client)
echo " Server = $Server"
#sudo make install_server
cd Client
cp -R CUON/* ~/cuon/bin
cp -R locale ~/cuon
cd ~/cuon/bin
$EXEC Cuon.py $Server client NO ~/cuon/locale
;;
profile)
sudo make install_server
cd CUON/
$EXEC -m profile -o cuonprofile Cuon.py $Server client $3
;;
esac
cd ../
|
BackupTheBerlios/cuon-svn
|
cuon_client/pycuon.sh
|
Shell
|
gpl-3.0
| 1,338 |
#!/bin/bash
set -m
# Send SIGTERM to child processes of PID 1.
function signal_handler()
{
kill "$pid"
}
function init_systemd()
{
GREEN='\033[0;32m'
echo -e "${GREEN}Systemd init system enabled."
for var in $(compgen -e); do
printf '%q=%q\n' "$var" "${!var}"
done > /etc/docker.env
echo 'source /etc/docker.env' >> ~/.bashrc
printf '#!/bin/bash\n exec ' > /etc/balenaApp.sh
printf '%q ' "$@" >> /etc/balenaApp.sh
chmod +x /etc/balenaApp.sh
mkdir -p /etc/systemd/system/balena.service.d
cat <<-EOF > /etc/systemd/system/balena.service.d/override.conf
[Service]
WorkingDirectory=$(pwd)
EOF
sleep infinity &
exec env DBUS_SYSTEM_BUS_ADDRESS=unix:path=/run/dbus/system_bus_socket /sbin/init quiet systemd.show_status=0
}
function init_non_systemd()
{
# trap the stop signal then send SIGTERM to user processes
trap signal_handler SIGRTMIN+3 SIGTERM
# echo error message, when executable file doesn't exist.
if CMD=$(command -v "$1" 2>/dev/null); then
shift
"$CMD" "$@" &
pid=$!
wait "$pid"
exit_code=$?
fg &> /dev/null || exit "$exit_code"
else
echo "Command not found: $1"
exit 1
fi
}
INITSYSTEM=$(echo "$INITSYSTEM" | awk '{print tolower($0)}')
case "$INITSYSTEM" in
'1' | 'true')
INITSYSTEM='on'
;;
esac
if [ "$INITSYSTEM" = "on" ]; then
init_systemd "$@"
else
init_non_systemd "$@"
fi
|
resin-io-projects/boombeastic
|
spotify/systemd/entry.sh
|
Shell
|
apache-2.0
| 1,345 |
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export DOCKER_NAME_TAG="ubuntu:20.04"
export CONTAINER_NAME=ci_native_fuzz_valgrind
export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libsqlite3-dev valgrind"
export NO_DEPENDS=1
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=true
export FUZZ_TESTS_CONFIG="--valgrind"
export GOAL="install"
export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang CXX=clang++"
export CCACHE_SIZE=200M
|
MarcoFalke/bitcoin
|
ci/test/00_setup_env_native_fuzz_with_valgrind.sh
|
Shell
|
mit
| 760 |
#!/bin/sh
. /scripts/A-config.sh
echo Restarting USB mixer device...
killall dvsource-alsa
killall -9 dvsource-alsa
dvsource-alsa hw:1
|
xfxf/veyepar
|
setup/dvs_launchers/scripts/2-audiomixer.sh
|
Shell
|
mit
| 139 |
#
# Git branch
#
# Show current git branch
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
SPACESHIP_GIT_BRANCH_SHOW="${SPACESHIP_GIT_BRANCH_SHOW=true}"
SPACESHIP_GIT_BRANCH_PREFIX="${SPACESHIP_GIT_BRANCH_PREFIX="$SPACESHIP_GIT_SYMBOL"}"
SPACESHIP_GIT_BRANCH_SUFFIX="${SPACESHIP_GIT_BRANCH_SUFFIX=""}"
SPACESHIP_GIT_BRANCH_COLOR="${SPACESHIP_GIT_BRANCH_COLOR="magenta"}"
# ------------------------------------------------------------------------------
# Section
# ------------------------------------------------------------------------------
spaceship_git_branch() {
[[ $SPACESHIP_GIT_BRANCH_SHOW == false ]] && return
local git_current_branch="$vcs_info_msg_0_"
[[ -z "$git_current_branch" ]] && return
git_current_branch="${git_current_branch#heads/}"
git_current_branch="${git_current_branch/.../}"
spaceship::section \
"$SPACESHIP_GIT_BRANCH_COLOR" \
"$SPACESHIP_GIT_BRANCH_PREFIX${git_current_branch}$SPACESHIP_GIT_BRANCH_SUFFIX"
}
|
matthewberryman/oh-my-zsh
|
themes/sections/git_branch.zsh
|
Shell
|
mit
| 1,095 |
#!/usr/bin/env bash
function createRootDirectory() {
log "$0" "createRootDirectory";
_root_path=$1;
if [[ -d "${_root_path}" ]]
then
rm -rf "${_root_path}";
fi
mkdir "${_root_path}";
}
|
bradyhouse/house
|
fiddles/bash/fiddle-0069-NodeProjectSetup/bin/_add_directory.sh
|
Shell
|
mit
| 218 |
#!/usr/bin/expect -f
#!/bin/sh
set arguments [lindex $argv 0]
set password [lindex $argv 1]
eval spawn $arguments
match_max 100000
set timeout 1
#expect "*yes/no*" {send "yes\r"; exp_continue};
set timeout -1
expect {
#connection error
"?sh: Error*" {puts "CONNECTION_ERROR"; exit};
#connection refused
"*onnection refused*" {puts "CONNECTION_REFUSED"; exit};
#host error
"*o route to host*" {puts "NO_ROUTE_TO_HOST"; exit};
#host key verification failed
"*ey verification failed*" {puts "HOST_KEY_VERIFICATION_FAILED"; exit};
#forwarding port error
"*ad dynamic forwarding specification*" {puts "BAD_DYNAMIC_FORWARDING_SPECIFICATION"; exit};
"*rivileged ports can only be forwarded by root*" {puts "PRIVILEGED_DYNAMIC_PORTS_UNAVAILABLE"; exit};
"*annot listen to port*" {puts "DYNAMIC_PORTS_USED"; exit};
#remote port error
"*ad port*" {puts "BAD_REMOTE_PORT"; exit};
"*onnection closed by remote host*" {puts "REMOTE_PORT_SHUT_DOWN"; exit};
#syntax error
"*sage*" {puts "SSH_SYNTAX_ERROR"; exit};
#broken pipe
"*roken pipe*" {puts "BROKEN_PIPE"; exit};
#bot answers
"*yes/no*" {send "yes\r"; exp_continue};
"*?assword:*" { send "$password\r"; set timeout 4;
expect "*?assword:*" {puts "WRONG_PASSWORD"; exit;}
};
}
puts "CONNECTED";
set timeout -1
expect eof;
|
OpenFibers/SSHMole
|
SSHMole/SSHTask/SSHCommand.sh
|
Shell
|
gpl-2.0
| 1,352 |
#! /bin/sh
## Quit immediately on error
set -e
## Want to run this as 'vagrant', so rerun if root
if [ "$(id -u)" = "0" ]; then
sudo -u vagrant bash $0 $@
exit 0
fi
## Check arguments
if [ ! "x"$# = "x1" ]; then exit 2; fi
version=$@
majorversion=`echo $version | cut -f1 -d.`
markerfile=~vagrant/vagrant/provisions/installR-${version}
echo -n "Installing R version $version.... "
## Check if we have anything to do
if [ -e ${markerfile} ]; then echo "Already installed" ; exit 0; fi
## Target directory
rdir=~vagrant/R/R-$version
mkdir -p ${rdir}
## Temporary build directory
tmp=`mktemp -d`
cd ${tmp}
## Download, extract and build
wget http://cran.rstudio.com/src/base/R-${majorversion}/R-${version}.tar.gz
tar xzf R-${version}.tar.gz
cd R-${version}
./configure --prefix=${rdir}
make
make install
## Clean up
cd
rm -rf ${tmp}
## Mark this as done
echo DONE.
touch ${markerfile}
|
igraph/xdata-igraph
|
tools/virtual/vagrant/provisioners/installR.sh
|
Shell
|
gpl-2.0
| 900 |
#!/bin/sh
# Test use of compression subprocesses by sort
# Copyright (C) 2010-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ sort
expensive_
SORT_FAILURE=2
seq -w 2000 > exp || fail=1
tac exp > in || fail=1
insize=$(stat -c %s - <in) || fail=1
# This compressor's behavior is adjustable via environment variables.
export PRE_COMPRESS=
export POST_COMPRESS=
cat <<\EOF >compress || framework_failure_
#!/bin/sh
eval "$PRE_COMPRESS"
tr 41 14 || exit
eval "$POST_COMPRESS"
EOF
chmod +x compress
# "Impatient exit" tests
#
# In these test cases, the biggest compressor (or decompressor) exits
# with nonzero status, after sleeping a bit. Until coreutils 8.7
# 'sort' impatiently exited without waiting for its decompression
# subprocesses in these cases. Check compression too, while we're here.
#
for compress_arg in '' '-d'
do
POST_COMPRESS='
test "X$1" != "X'$compress_arg'" || {
test "X$1" = "X" && exec <&1
size=$(stat -c %s -)
exec >/dev/null 2>&1 <&1 || exit
expr $size "<" '"$insize"' / 2 || { sleep 1; exit 1; }
}
' sort --compress-program=./compress -S 1k --batch-size=2 in > out
test $? -eq $SORT_FAILURE || fail=1
done
# "Pre-exec child" test
#
# Ignore a random child process created before 'sort' was exec'ed.
# This bug was also present in coreutils 8.7.
#
( (sleep 1; exec false) &
PRE_COMPRESS='test -f ok || sleep 2'
POST_COMPRESS='touch ok'
exec sort --compress-program=./compress -S 1k in >out
) || fail=1
compare exp out || fail=1
test -f ok || fail=1
rm -f ok
rm -f compress
# If $TMPDIR is relative, give subprocesses time to react when 'sort' exits.
# Otherwise, under NFS, when 'sort' unlinks the temp files and they
# are renamed to .nfsXXXX instead of being removed, the parent cleanup
# of this directory will fail because the files are still open.
case $TMPDIR in
/*) ;;
*) sleep 1;;
esac
Exit $fail
|
davisqi/coreutils
|
tests/misc/sort-compress-proc.sh
|
Shell
|
gpl-3.0
| 2,566 |
#!/bin/bash -e
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script is used to generate .gni files and files in the
# config/platform directories needed to build libvpx.
# Every time libvpx source code is updated just run this script.
#
# Usage:
# $ ./generate_gni.sh [--disable-avx] [--only-configs]
#
# The following optional flags are supported:
# --disable-avx : AVX+AVX2 support is disabled.
# --only-configs : Excludes generation of GN and GYP files (i.e. only
# configuration headers are generated).
# --enable-vp9-highbitdepth : Allow for high bit depth internal, 10 and 12 bit
# vp9 encode and decode. Only applied to x86[_64].
export LC_ALL=C
BASE_DIR=$(pwd)
LIBVPX_SRC_DIR="source/libvpx"
LIBVPX_CONFIG_DIR="source/config"
unset DISABLE_AVX
for i in "$@"
do
case $i in
--disable-avx)
DISABLE_AVX="--disable-avx --disable-avx2"
shift
;;
--only-configs)
ONLY_CONFIGS=true
shift
;;
--enable-vp9-highbitdepth)
HIGHBD="--enable-vp9-highbitdepth"
shift
;;
*)
echo "Unknown option: $i"
exit 1
;;
esac
done
# Print license header.
# $1 - Output base name
function write_license {
echo "# This file is generated. Do not edit." >> $1
echo "# Copyright (c) 2014 The Chromium Authors. All rights reserved." >> $1
echo "# Use of this source code is governed by a BSD-style license that can be" >> $1
echo "# found in the LICENSE file." >> $1
echo "" >> $1
}
# Search for source files with the same basename in vp8, vp9, and vpx_dsp. The
# build can support such files but only when they are built into disparate
# modules. Configuring such modules for both gyp and gn are tricky so avoid the
# issue at least until gyp is removed.
function find_duplicates {
local readonly duplicate_file_names=$(find \
$BASE_DIR/$LIBVPX_SRC_DIR/vp8 \
$BASE_DIR/$LIBVPX_SRC_DIR/vp9 \
$BASE_DIR/$LIBVPX_SRC_DIR/vpx_dsp \
-type f -name \*.c | xargs -I {} basename {} | sort | uniq -d \
)
if [ -n "${duplicate_file_names}" ]; then
echo "WARNING: DUPLICATE FILES FOUND"
for file in ${duplicate_file_names}; do
find \
$BASE_DIR/$LIBVPX_SRC_DIR/vp8 \
$BASE_DIR/$LIBVPX_SRC_DIR/vp9 \
$BASE_DIR/$LIBVPX_SRC_DIR/vpx_dsp \
-name $file
done
exit 1
fi
}
# Generate a gni with a list of source files.
# $1 - Array name for file list. This is processed with 'declare' below to
# regenerate the array locally.
# $2 - GN variable name.
# $3 - Output file.
function write_gni {
# Convert the first argument back in to an array.
declare -a file_list=("${!1}")
echo "$2 = [" >> "$3"
for f in $file_list
do
echo " \"//third_party/libvpx/source/libvpx/$f\"," >> "$3"
done
echo "]" >> "$3"
}
# Convert a list of source files into gni files.
# $1 - Input file.
function convert_srcs_to_project_files {
# Do the following here:
# 1. Filter .c, .h, .s, .S and .asm files.
# 2. Move certain files to a separate lists to allow applying different
# compiler options.
# 3. Replace .asm.s to .asm because gn will do the conversion.
local source_list=$(grep -E '(\.c|\.h|\.S|\.s|\.asm)$' $1)
# Not sure why vpx_config.c is not included.
source_list=$(echo "$source_list" | grep -v 'vpx_config\.c')
# The actual ARM files end in .asm. We have rules to translate them to .S
source_list=$(echo "$source_list" | sed s/\.asm\.s$/.asm/)
# Select all x86 files ending with .c
local intrinsic_list=$(echo "$source_list" | \
egrep '(mmx|sse2|sse3|ssse3|sse4|avx|avx2).c$')
# Select all neon files ending in C but only when building in RTCD mode
if [ "libvpx_srcs_arm_neon_cpu_detect" == "$2" ]; then
# Select all arm neon files ending in _neon.c and all asm files.
# The asm files need to be included in the intrinsics target because
# they need the -mfpu=neon flag.
# the pattern may need to be updated if vpx_scale gets intrinsics
local intrinsic_list=$(echo "$source_list" | \
egrep 'neon.*(\.c|\.asm)$')
fi
# Remove these files from the main list.
source_list=$(comm -23 <(echo "$source_list") <(echo "$intrinsic_list"))
local x86_list=$(echo "$source_list" | egrep '/x86/')
# Write a single .gni file that includes all source files for all archs.
if [ 0 -ne ${#x86_list} ]; then
local c_sources=$(echo "$source_list" | egrep '.(c|h)$')
local assembly_sources=$(echo "$source_list" | egrep '.asm$')
local mmx_sources=$(echo "$intrinsic_list" | grep '_mmx\.c$')
local sse2_sources=$(echo "$intrinsic_list" | grep '_sse2\.c$')
local sse3_sources=$(echo "$intrinsic_list" | grep '_sse3\.c$')
local ssse3_sources=$(echo "$intrinsic_list" | grep '_ssse3\.c$')
local sse4_1_sources=$(echo "$intrinsic_list" | grep '_sse4\.c$')
local avx_sources=$(echo "$intrinsic_list" | grep '_avx\.c$')
local avx2_sources=$(echo "$intrinsic_list" | grep '_avx2\.c$')
write_gni c_sources $2 "$BASE_DIR/libvpx_srcs.gni"
write_gni assembly_sources $2_assembly "$BASE_DIR/libvpx_srcs.gni"
write_gni mmx_sources $2_mmx "$BASE_DIR/libvpx_srcs.gni"
write_gni sse2_sources $2_sse2 "$BASE_DIR/libvpx_srcs.gni"
write_gni sse3_sources $2_sse3 "$BASE_DIR/libvpx_srcs.gni"
write_gni ssse3_sources $2_ssse3 "$BASE_DIR/libvpx_srcs.gni"
write_gni sse4_1_sources $2_sse4_1 "$BASE_DIR/libvpx_srcs.gni"
if [ -z "$DISABLE_AVX" ]; then
write_gni avx_sources $2_avx "$BASE_DIR/libvpx_srcs.gni"
write_gni avx2_sources $2_avx2 "$BASE_DIR/libvpx_srcs.gni"
fi
else
local c_sources=$(echo "$source_list" | egrep '.(c|h)$')
local assembly_sources=$(echo -e "$source_list\n$intrinsic_list" | \
egrep '.asm$')
local neon_sources=$(echo "$intrinsic_list" | grep '_neon\.c$')
write_gni c_sources $2 "$BASE_DIR/libvpx_srcs.gni"
write_gni assembly_sources $2_assembly "$BASE_DIR/libvpx_srcs.gni"
if [ 0 -ne ${#neon_sources} ]; then
write_gni neon_sources $2_neon "$BASE_DIR/libvpx_srcs.gni"
fi
fi
}
# Clean files from previous make.
function make_clean {
make clean > /dev/null
rm -f libvpx_srcs.txt
}
# Lint a pair of vpx_config.h and vpx_config.asm to make sure they match.
# $1 - Header file directory.
function lint_config {
# mips and native client do not contain any assembly so the headers do not
# need to be compared to the asm.
if [[ "$1" != *mipsel && "$1" != *mips64el && "$1" != nacl ]]; then
$BASE_DIR/lint_config.sh \
-h $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.h \
-a $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.asm
fi
}
# Print the configuration.
# $1 - Header file directory.
function print_config {
$BASE_DIR/lint_config.sh -p \
-h $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.h \
-a $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.asm
}
# Print the configuration from Header file.
# This function is an abridged version of print_config which does not use
# lint_config and it does not require existence of vpx_config.asm.
# $1 - Header file directory.
function print_config_basic {
combined_config="$(cat $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.h \
| grep -E ' +[01] *$')"
combined_config="$(echo "$combined_config" | grep -v DO1STROUNDING)"
combined_config="$(echo "$combined_config" | sed 's/[ \t]//g')"
combined_config="$(echo "$combined_config" | sed 's/.*define//')"
combined_config="$(echo "$combined_config" | sed 's/0$/=no/')"
combined_config="$(echo "$combined_config" | sed 's/1$/=yes/')"
echo "$combined_config" | sort | uniq
}
# Generate *_rtcd.h files.
# $1 - Header file directory.
# $2 - Architecture.
# $3 - Optional - any additional arguments to pass through.
function gen_rtcd_header {
echo "Generate $LIBVPX_CONFIG_DIR/$1/*_rtcd.h files."
rm -rf $BASE_DIR/$TEMP_DIR/libvpx.config
if [[ "$2" == "mipsel" || "$2" == "mips64el" || "$2" == nacl ]]; then
print_config_basic $1 > $BASE_DIR/$TEMP_DIR/libvpx.config
else
$BASE_DIR/lint_config.sh -p \
-h $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.h \
-a $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_config.asm \
-o $BASE_DIR/$TEMP_DIR/libvpx.config
fi
$BASE_DIR/$LIBVPX_SRC_DIR/build/make/rtcd.pl \
--arch=$2 \
--sym=vp8_rtcd $DISABLE_AVX $3 \
--config=$BASE_DIR/$TEMP_DIR/libvpx.config \
$BASE_DIR/$LIBVPX_SRC_DIR/vp8/common/rtcd_defs.pl \
> $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vp8_rtcd.h
$BASE_DIR/$LIBVPX_SRC_DIR/build/make/rtcd.pl \
--arch=$2 \
--sym=vp9_rtcd $DISABLE_AVX $3 \
--config=$BASE_DIR/$TEMP_DIR/libvpx.config \
$BASE_DIR/$LIBVPX_SRC_DIR/vp9/common/vp9_rtcd_defs.pl \
> $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vp9_rtcd.h
$BASE_DIR/$LIBVPX_SRC_DIR/build/make/rtcd.pl \
--arch=$2 \
--sym=vpx_scale_rtcd $DISABLE_AVX $3 \
--config=$BASE_DIR/$TEMP_DIR/libvpx.config \
$BASE_DIR/$LIBVPX_SRC_DIR/vpx_scale/vpx_scale_rtcd.pl \
> $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_scale_rtcd.h
$BASE_DIR/$LIBVPX_SRC_DIR/build/make/rtcd.pl \
--arch=$2 \
--sym=vpx_dsp_rtcd $DISABLE_AVX $3 \
--config=$BASE_DIR/$TEMP_DIR/libvpx.config \
$BASE_DIR/$LIBVPX_SRC_DIR/vpx_dsp/vpx_dsp_rtcd_defs.pl \
> $BASE_DIR/$LIBVPX_CONFIG_DIR/$1/vpx_dsp_rtcd.h
rm -rf $BASE_DIR/$TEMP_DIR/libvpx.config
}
# Generate Config files. "--enable-external-build" must be set to skip
# detection of capabilities on specific targets.
# $1 - Header file directory.
# $2 - Config command line.
function gen_config_files {
./configure $2 > /dev/null
# Disable HAVE_UNISTD_H as it causes vp8 to try to detect how many cpus
# available, which doesn't work from inside a sandbox on linux.
( echo '/HAVE_UNISTD_H/s/[01]/0/' ; echo 'w' ; echo 'q' ) | ed -s vpx_config.h
# Use the correct ads2gas script.
if [[ "$1" == linux* ]]; then
local ASM_CONV=ads2gas.pl
else
local ASM_CONV=ads2gas_apple.pl
fi
# Generate vpx_config.asm. Do not create one for mips or native client.
if [[ "$1" != *mipsel && "$1" != *mips64el && "$1" != nacl ]]; then
if [[ "$1" == *x64* ]] || [[ "$1" == *ia32* ]]; then
egrep "#define [A-Z0-9_]+ [01]" vpx_config.h | awk '{print "%define " $2 " " $3}' > vpx_config.asm
else
egrep "#define [A-Z0-9_]+ [01]" vpx_config.h | awk '{print $2 " EQU " $3}' | perl $BASE_DIR/$LIBVPX_SRC_DIR/build/make/$ASM_CONV > vpx_config.asm
fi
fi
cp vpx_config.* $BASE_DIR/$LIBVPX_CONFIG_DIR/$1
make_clean
rm -rf vpx_config.*
}
function update_readme {
local IFS=$'\n'
# Split git log output '<date>\n<commit hash>' on the newline to produce 2
# array entries.
local vals=($(git --no-pager log -1 --format="%cd%n%H" \
--date=format:"%A %B %d %Y"))
sed -E -i.bak \
-e "s/^(Date:)[[:space:]]+.*$/\1 ${vals[0]}/" \
-e "s/^(Commit:)[[:space:]]+[a-f0-9]{40}/\1 ${vals[1]}/" \
${BASE_DIR}/README.chromium
rm ${BASE_DIR}/README.chromium.bak
cat <<EOF
README.chromium updated with:
Date: ${vals[0]}
Commit: ${vals[1]}
EOF
}
find_duplicates
echo "Create temporary directory."
TEMP_DIR="$LIBVPX_SRC_DIR.temp"
rm -rf $TEMP_DIR
cp -R $LIBVPX_SRC_DIR $TEMP_DIR
cd $TEMP_DIR
echo "Generate config files."
all_platforms="--enable-external-build --enable-postproc --enable-multi-res-encoding --enable-temporal-denoising"
all_platforms="${all_platforms} --enable-vp9-temporal-denoising --enable-vp9-postproc --size-limit=16384x16384"
all_platforms="${all_platforms} --enable-realtime-only --disable-install-docs"
x86_platforms="--enable-pic --as=yasm $DISABLE_AVX $HIGHBD"
gen_config_files linux/ia32 "--target=x86-linux-gcc ${all_platforms} ${x86_platforms}"
gen_config_files linux/x64 "--target=x86_64-linux-gcc ${all_platforms} ${x86_platforms}"
gen_config_files linux/arm "--target=armv7-linux-gcc --disable-neon ${all_platforms}"
gen_config_files linux/arm-neon "--target=armv7-linux-gcc ${all_platforms}"
gen_config_files linux/arm-neon-cpu-detect "--target=armv7-linux-gcc --enable-runtime-cpu-detect ${all_platforms}"
gen_config_files linux/arm64 "--target=armv8-linux-gcc ${all_platforms}"
gen_config_files linux/mipsel "--target=mips32-linux-gcc ${all_platforms}"
gen_config_files linux/mips64el "--target=mips64-linux-gcc ${all_platforms}"
gen_config_files linux/generic "--target=generic-gnu $HIGHBD ${all_platforms}"
gen_config_files win/ia32 "--target=x86-win32-vs12 ${all_platforms} ${x86_platforms}"
gen_config_files win/x64 "--target=x86_64-win64-vs12 ${all_platforms} ${x86_platforms}"
gen_config_files mac/ia32 "--target=x86-darwin9-gcc ${all_platforms} ${x86_platforms}"
gen_config_files mac/x64 "--target=x86_64-darwin9-gcc ${all_platforms} ${x86_platforms}"
gen_config_files ios/arm-neon "--target=armv7-linux-gcc ${all_platforms}"
gen_config_files ios/arm64 "--target=armv8-linux-gcc ${all_platforms}"
gen_config_files nacl "--target=generic-gnu $HIGHBD ${all_platforms}"
echo "Remove temporary directory."
cd $BASE_DIR
rm -rf $TEMP_DIR
echo "Lint libvpx configuration."
lint_config linux/ia32
lint_config linux/x64
lint_config linux/arm
lint_config linux/arm-neon
lint_config linux/arm-neon-cpu-detect
lint_config linux/arm64
lint_config linux/mipsel
lint_config linux/mips64el
lint_config linux/generic
lint_config win/ia32
lint_config win/x64
lint_config mac/ia32
lint_config mac/x64
lint_config ios/arm-neon
lint_config ios/arm64
lint_config nacl
echo "Create temporary directory."
TEMP_DIR="$LIBVPX_SRC_DIR.temp"
rm -rf $TEMP_DIR
cp -R $LIBVPX_SRC_DIR $TEMP_DIR
cd $TEMP_DIR
gen_rtcd_header linux/ia32 x86
gen_rtcd_header linux/x64 x86_64
gen_rtcd_header linux/arm armv7 "--disable-neon --disable-neon_asm"
gen_rtcd_header linux/arm-neon armv7
gen_rtcd_header linux/arm-neon-cpu-detect armv7
gen_rtcd_header linux/arm64 armv8
gen_rtcd_header linux/mipsel mipsel
gen_rtcd_header linux/mips64el mips64el
gen_rtcd_header linux/generic generic
gen_rtcd_header win/ia32 x86
gen_rtcd_header win/x64 x86_64
gen_rtcd_header mac/ia32 x86
gen_rtcd_header mac/x64 x86_64
gen_rtcd_header ios/arm-neon armv7
gen_rtcd_header ios/arm64 armv8
gen_rtcd_header nacl nacl
echo "Prepare Makefile."
./configure --target=generic-gnu > /dev/null
make_clean
if [ -z $ONLY_CONFIGS ]; then
# Remove existing .gni file.
rm -rf $BASE_DIR/libvpx_srcs.gni
write_license $BASE_DIR/libvpx_srcs.gni
echo "Generate X86 source list."
config=$(print_config linux/ia32)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_x86
# Copy vpx_version.h. The file should be the same for all platforms.
cp vpx_version.h $BASE_DIR/$LIBVPX_CONFIG_DIR
echo "Generate X86_64 source list."
config=$(print_config linux/x64)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_x86_64
echo "Generate ARM source list."
config=$(print_config linux/arm)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_arm
echo "Generate ARM NEON source list."
config=$(print_config linux/arm-neon)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_arm_neon
echo "Generate ARM NEON CPU DETECT source list."
config=$(print_config linux/arm-neon-cpu-detect)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_arm_neon_cpu_detect
echo "Generate ARM64 source list."
config=$(print_config linux/arm64)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_arm64
echo "Generate MIPS source list."
config=$(print_config_basic linux/mipsel)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_mips
echo "MIPS64 source list is identical to MIPS source list. No need to generate it."
echo "Generate NaCl source list."
config=$(print_config_basic nacl)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_nacl
echo "Generate GENERIC source list."
config=$(print_config_basic linux/generic)
make_clean
make libvpx_srcs.txt target=libs $config > /dev/null
convert_srcs_to_project_files libvpx_srcs.txt libvpx_srcs_generic
fi
echo "Remove temporary directory."
cd $BASE_DIR
rm -rf $TEMP_DIR
gn format --in-place $BASE_DIR/BUILD.gn
gn format --in-place $BASE_DIR/libvpx_srcs.gni
cd $BASE_DIR/$LIBVPX_SRC_DIR
update_readme
cd $BASE_DIR
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/libvpx/generate_gni.sh
|
Shell
|
gpl-3.0
| 16,697 |
#!/bin/sh
# **************************************
# Running Navitia in the blink of an eye
# **************************************
#
# You just need to blink slowly
#
# Here is a step by step guide for running navitia on a Ubuntu 14.04
#
#
# It's more an install guide but it can help as an out-of-a-box build script
# the prerequisite the run that script is :
# - to have git and sudo installed
# - to have cloned the sources repository
#
# git clone https://github.com/CanalTP/navitia
#
# - and to be in the cloned repository:
# cd navitia
# /!\ WARNING /!\
# the script needs the sudo (installed with dependencies) privileges for dependencies install and databases handling
# If used as an out of the box script be sure to read it beforehand
echo "!WARNING!"
echo "The script needs to install dependencies and update databases so it needs some privileges"
echo "It will thus prompt for your password"
echo "Make sure to review what the script is doing and check if you are ok with it"
#stop on errors
set -e
kraken_pid=
jormun_pid=
clean_exit()
{
#kill the background job at the end
echo "killing kraken (pid=$kraken_pid) and jormungandr (pid=$jormun_pid)";
kill $kraken_pid
kill -TERM $jormun_pid
exit 0
}
kraken_db_user_password=
navitia_dir="$(dirname $(readlink -f $0))"
gtfs_data_dir=
osm_file=
install_dependencies=1
clean_apt=
usage()
{
cat << EOF
usage: $0 options
This script setup a running navitia
only the password is mandatory:
- if no dataset are given a default Paris one will be used
- by default all dependencies are installed
Note that you must have sudo installed
OPTIONS:
-h Show this message
-p kraken database password
-g gtfs directory
-o osm file
-n do not install dependencies
-c if OS is Debian, clean the APT configuration (repository)
EOF
}
while getopts “hp:g:o:nc” OPTION
do
case "$OPTION" in
h)
usage
exit 1
;;
p)
kraken_db_user_password="$OPTARG"
;;
g)
gtfs_data_dir="$OPTARG"
;;
o)
osm_file="$OPTARG"
;;
n)
install_dependencies=
;;
c)
clean_apt=true
;;
?)
usage
exit 1
;;
esac
done
if [ -z "$kraken_db_user_password" ]
then
echo "no database password given, abort"
exit 1
fi
#Be sure that basic dependencies are installed
sudo apt-get install -y unzip wget
if [ -z "$gtfs_data_dir" ] || [ -z "$osm_file" ]
then
echo "no gtfs or osm file given, we'll take a default data set, Paris"
echo "getting gtfs paris data from data.navitia.io"
wget -P /tmp http://data.navitia.io/gtfs_paris_20140502.zip
unzip -d /tmp/gtfs /tmp/gtfs_paris_20140502.zip
gtfs_data_dir=/tmp/gtfs
echo "getting paris osm data from metro.teczno.com"
wget -P /tmp http://osm-extracted-metros.s3.amazonaws.com/paris.osm.pbf
osm_file=/tmp/paris.osm.pbf
fi
run_dir="$navitia_dir"/run
mkdir -p "$run_dir"
#Hack
#for convenience reason, some submodule links are in ssh (easier to push)
#it is however thus mandatory for the user to have a github access
#for this script we thus change the ssh links to https
sed -i 's,git\@github.com:\([^/]*\)/\(.*\).git,https://github.com/\1/\2,' .gitmodules
#we need to get the submodules
git submodule update --init
#========
#Building
#========
#
#First you need to install all dependencies.
#
#first the system and the c++ dependencies:
if [ -n "$install_dependencies" ]
then
echo "** installing all dependencies"
sudo apt-get install -y g++ cmake liblog4cplus-dev libzmq-dev libosmpbf-dev libboost-all-dev libpqxx3-dev libgoogle-perftools-dev libprotobuf-dev python-pip libproj-dev protobuf-compiler libgeos-c1
postgresql_package='postgresql-9.3'
postgresql_postgis_package='postgis postgresql-9.3-postgis-2.1 postgresql-9.3-postgis-scripts'
distrib=`lsb_release -si`
version=`lsb_release -sr`
# Fix Ubuntu 15.04 package
if [ "$distrib" = "Ubuntu" -a "$version" = "15.04" ]; then
postgresql_package='postgresql-9.4'
postgresql_postgis_package='postgis postgresql-9.4-postgis-2.1 postgresql-9.4-postgis-scripts'
fi
if [ "$distrib" = "Debian" ] && grep -q '^7\.' /etc/debian_version; then
# on Debian, we must add the APT repository of PostgreSQL project
# to have the right version of postgis
# no magic stuff : https://wiki.postgresql.org/wiki/Apt#PostgreSQL_packages_for_Debian_and_Ubuntu
apt_file='/etc/apt/sources.list.d/postgresql.list'
sudo /bin/sh -c "echo 'deb http://apt.postgresql.org/pub/repos/apt/ wheezy-pgdg main' > $apt_file"
sudo apt-get -y install wget ca-certificates
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
fi
sudo apt-get install -y $postgresql_package $postgresql_postgis_package #Note: postgres 9.1 and postgis 2.0 would be enough, be postgis 2.1 is easier to setup
# then you need to install all python dependencies: ::
sudo pip install -r "$navitia_dir"/source/jormungandr/requirements.txt
sudo pip install -r "$navitia_dir"/source/tyr/requirements.txt
fi
#the build procedure is explained is the install documentation
echo "** building navitia"
navitia_build_dir="$navitia_dir"/release
mkdir -p "$navitia_build_dir" && cd "$navitia_build_dir"
cmake -DCMAKE_BUILD_TYPE=Release ../source
make -j$(($(grep -c '^processor' /proc/cpuinfo)+1))
#=======================
#Setting up the database
#=======================
echo "** setting up the database"
#
#Each kraken is backed by a postgres database
#
#You need to set up the database
kraken_db_name='navitia'
db_owner='navitia'
# for the default build we give ownership of the base to a 'navitia' user, but you can do whatever you want here
encap=$(sudo -i -u postgres psql postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='$db_owner'") # we check if there is already a user
if [ -z "$encap" ]; then
sudo -i -u postgres psql -c "create user $db_owner;alter user $db_owner password '$kraken_db_user_password';"
else
echo "user $db_owner already exists"
fi
if ! sudo -i -u postgres psql -l | grep -q "^ ${kraken_db_name}"; then
sudo -i -u postgres createdb "$kraken_db_name" -O "$db_owner"
sudo -i -u postgres psql -c "create extension postgis; " "$kraken_db_name"
else
echo "db $kraken_db_name already exists"
fi
# Then you need to update it's scheme
# The database migration is handled by alembic
# You can edit the alembic.ini file if you want a custom behaviour (or give your own with the alembic -c option)
# you can give the database url either by setting the sqlalchemy.url parameter in the config file or by giving
# a -x dbname option
cd "$navitia_dir"/source/sql
PYTHONPATH=. alembic -x dbname="postgresql://$db_owner:$kraken_db_user_password@localhost/$kraken_db_name" upgrade head
cd
# Install jormungandr database and upgrade it's schema
# WARNING : default name is "jormungandr", so it should be the same in your SQLALCHEMY_DATABASE_URI on default_settings.py
if ! sudo -i -u postgres psql -l | grep -q "^ jormungandr"; then
sudo -i -u postgres createdb jormungandr -O "$db_owner"
sudo -i -u postgres psql -c "create extension postgis; " jormungandr
else
echo "db jormungandr already exists"
fi
cd "$navitia_dir"/source/tyr
PYTHONPATH=.:../navitiacommon/ TYR_CONFIG_FILE=default_settings.py ./manage_tyr.py db upgrade
cd
#====================
# Filling up the data
#====================
# ** filling up the database **
## we need to import the gtfs data
"$navitia_build_dir"/ed/gtfs2ed -i "$gtfs_data_dir" --connection-string="host=localhost user=$db_owner dbname=$kraken_db_name password=$kraken_db_user_password"
## we need to import the osm data
"$navitia_build_dir"/ed/osm2ed -i "$osm_file" --connection-string="host=localhost user=$db_owner dbname=$kraken_db_name password=$kraken_db_user_password"
## then we export the database into kraken's custom file format
"$navitia_build_dir"/ed/ed2nav -o "$run_dir"/data.nav.lz4 --connection-string="host=localhost user=$db_owner dbname=$kraken_db_name password=$kraken_db_user_password"
#========
# Running
#========
# * Kraken *
echo "** running kraken"
# we now need to pop the kraken
# Note we run Jormungandr and kraken in the same shell so the output might be messy
# We have to create the kraken configuration file
cat << EOF > "$run_dir"/kraken.ini
[GENERAL]
#file to load
database = data.nav.lz4
#ipc socket in default.ini file in the jormungandr instances dir
zmq_socket = ipc:///tmp/default_kraken
#number of threads
nb_threads = 1
#name of the instance
instance_name=default
[LOG]
log4cplus.rootLogger=DEBUG, ALL_MSGS
log4cplus.appender.ALL_MSGS=log4cplus::FileAppender
log4cplus.appender.ALL_MSGS.File=kraken.log
log4cplus.appender.ALL_MSGS.layout=log4cplus::PatternLayout
log4cplus.appender.ALL_MSGS.layout.ConversionPattern=[%D{%y-%m-%d %H:%M:%S,%q}] %b:%L [%-5p] - %m %n
EOF
# WARNING, for the moment you have to run it in the kraken.ini directory
cd "$run_dir"
"$navitia_build_dir"/kraken/kraken &
kraken_pid=$!
# * Jormungandr *
echo "** running jormungandr"
# it's almost done, we now need to pop Jormungandr (the python front end)
# Jormungandr need to know how to call the kraken
# The configuration for that is handle by a repository where every kraken is referenced by a .ini file
mkdir -p "$run_dir"/jormungandr
# For our test we only need one kraken
cat << EOFJ > "$run_dir"/jormungandr/default.ini
[instance]
# name of the kraken
key = default
# zmq socket used to talk to the kraken, should be the same as the one defined by the zmq_socket param in kraken
socket = ipc:///tmp/default_kraken
EOFJ
# the Jormungnandr configuration is in the source/jormungandr/jormungandr/default_settings.py file
# should be almost enough for the moment, we just need to change the location of the krakens configuration
sed "s,^INSTANCES_DIR.*,INSTANCES_DIR = '$run_dir/jormungandr'," "$navitia_dir"/source/jormungandr/jormungandr/default_settings.py > "$run_dir"/jormungandr_settings.py
#we also don't want to depend on the jormungandr database for this test
sed -i 's/DISABLE_DATABASE.*/DISABLE_DATABASE=False/' "$run_dir"/jormungandr_settings.py
JORMUNGANDR_CONFIG_FILE="$run_dir"/jormungandr_settings.py PYTHONPATH="$navitia_dir/source/navitiacommon:$navitia_dir/source/jormungandr" python "$navitia_dir"/source/jormungandr/jormungandr/manage.py runserver -d -r &
jormun_pid=$!
echo "That's it!"
echo "you can now play with the api"
echo "Note: you might have to wait a bit for the service to load the data"
echo "open another shell and try for example:"
echo "curl localhost:5000/v1/coverage/default/stop_areas"
#we block the script for the user to test the api
read -p "when you are finished, hit a key to close kraken and jormungandr" n
clean_exit
# cleaning APT repository if -c option was specified
test -n "$clean_apt" && rm -f "$apt_file" && printf "Option -c was specified, removing %s \n" "$apt_file"
|
TeXitoi/navitia
|
build_navitia.sh
|
Shell
|
agpl-3.0
| 11,284 |
#!/bin/sh
for root in compiler core interpreter
do
for i in $(find $root -name "*.scala")
do
head -2 $i | tail -1 | grep -v Copyright && echo $i
done
done
|
haro-freezd/noop
|
findMissingCopyright.sh
|
Shell
|
apache-2.0
| 174 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Grid cluster control.
#
#
# Import common functions.
#
if [ "${IGNITE_HOME}" = "" ];
then IGNITE_HOME_TMP="$(dirname "$(cd "$(dirname "$0")"; "pwd")")";
else IGNITE_HOME_TMP=${IGNITE_HOME};
fi
#
# Set SCRIPTS_HOME - base path to scripts.
#
SCRIPTS_HOME="${IGNITE_HOME_TMP}/bin"
source "${SCRIPTS_HOME}"/include/functions.sh
#
# Discover path to Java executable and check it's version.
#
checkJava
#
# Discover IGNITE_HOME environment variable.
#
setIgniteHome
if [ "${DEFAULT_CONFIG}" == "" ]; then
DEFAULT_CONFIG=config/default-config.xml
fi
#
# Set IGNITE_LIBS.
#
. "${SCRIPTS_HOME}"/include/setenv.sh
. "${SCRIPTS_HOME}"/include/build-classpath.sh # Will be removed in the binary release.
CP="${IGNITE_LIBS}:${IGNITE_HOME}/libs/optional/ignite-zookeeper/*"
RANDOM_NUMBER=$("$JAVA" -cp "${CP}" org.apache.ignite.startup.cmdline.CommandLineRandomNumberGenerator)
RESTART_SUCCESS_FILE="${IGNITE_HOME}/work/ignite_success_${RANDOM_NUMBER}"
RESTART_SUCCESS_OPT="-DIGNITE_SUCCESS_FILE=${RESTART_SUCCESS_FILE}"
#
# Find available port for JMX
#
# You can specify IGNITE_JMX_PORT environment variable for overriding automatically found JMX port
#
# This is executed when -nojmx is not specified
#
if [ "${NOJMX}" == "0" ] ; then
findAvailableJmxPort
fi
# Mac OS specific support to display correct name in the dock.
osname=`uname`
if [ "${DOCK_OPTS}" == "" ]; then
DOCK_OPTS="-Xdock:name=Ignite Node"
fi
#
# JVM options. See http://java.sun.com/javase/technologies/hotspot/vmoptions.jsp for more details.
#
# ADD YOUR/CHANGE ADDITIONAL OPTIONS HERE
#
if [ -z "$JVM_OPTS" ] ; then
if [[ `"$JAVA" -version 2>&1 | egrep "1\.[7]\."` ]]; then
JVM_OPTS="-Xms256m -Xmx1g"
else
JVM_OPTS="-Xms256m -Xmx1g"
fi
fi
#
# Uncomment to enable experimental commands [--wal]
#
# JVM_OPTS="${JVM_OPTS} -DIGNITE_ENABLE_EXPERIMENTAL_COMMAND=true"
#
# Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection.
#
# JVM_OPTS="$JVM_OPTS -XX:+UseG1GC"
#
# Uncomment if you get StackOverflowError.
# On 64 bit systems this value can be larger, e.g. -Xss16m
#
# JVM_OPTS="${JVM_OPTS} -Xss4m"
#
# Uncomment to set preference for IPv4 stack.
#
# JVM_OPTS="${JVM_OPTS} -Djava.net.preferIPv4Stack=true"
#
# Assertions are disabled by default since version 3.5.
# If you want to enable them - set 'ENABLE_ASSERTIONS' flag to '1'.
#
ENABLE_ASSERTIONS="1"
#
# Set '-ea' options if assertions are enabled.
#
if [ "${ENABLE_ASSERTIONS}" = "1" ]; then
JVM_OPTS="${JVM_OPTS} -ea"
fi
#
# Set main class to start service (grid node by default).
#
if [ "${MAIN_CLASS}" = "" ]; then
MAIN_CLASS=org.apache.ignite.internal.commandline.CommandHandler
fi
#
# Remote debugging (JPDA).
# Uncomment and change if remote debugging is required.
#
# JVM_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8787 ${JVM_OPTS}"
#
# Final JVM_OPTS for Java 9 compatibility
#
${JAVA_HOME}/bin/java -version 2>&1 | grep -qE 'java version "9.*"' && {
JVM_OPTS="--add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
--add-exports java.base/sun.nio.ch=ALL-UNNAMED \
--add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED \
--add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED \
--add-modules java.xml.bind \
${JVM_OPTS}"
} || true
ERRORCODE="-1"
while [ "${ERRORCODE}" -ne "130" ]
do
if [ "${INTERACTIVE}" == "1" ] ; then
case $osname in
Darwin*)
"$JAVA" ${JVM_OPTS} ${QUIET} "${DOCK_OPTS}" "${RESTART_SUCCESS_OPT}" ${JMX_MON} \
-DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \
-DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@
;;
*)
"$JAVA" ${JVM_OPTS} ${QUIET} "${RESTART_SUCCESS_OPT}" ${JMX_MON} \
-DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \
-DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@
;;
esac
else
case $osname in
Darwin*)
"$JAVA" ${JVM_OPTS} ${QUIET} "${DOCK_OPTS}" "${RESTART_SUCCESS_OPT}" ${JMX_MON} \
-DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \
-DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@
;;
*)
"$JAVA" ${JVM_OPTS} ${QUIET} "${RESTART_SUCCESS_OPT}" ${JMX_MON} \
-DIGNITE_UPDATE_NOTIFIER=false -DIGNITE_HOME="${IGNITE_HOME}" \
-DIGNITE_PROG_NAME="$0" ${JVM_XOPTS} -cp "${CP}" ${MAIN_CLASS} $@
;;
esac
fi
ERRORCODE="$?"
if [ ! -f "${RESTART_SUCCESS_FILE}" ] ; then
break
else
rm -f "${RESTART_SUCCESS_FILE}"
fi
done
if [ -f "${RESTART_SUCCESS_FILE}" ] ; then
rm -f "${RESTART_SUCCESS_FILE}"
fi
|
irudyak/ignite
|
bin/control.sh
|
Shell
|
apache-2.0
| 5,736 |
#!/bin/bash
FN="u133aaofav2cdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/u133aaofav2cdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/u133aaofav2cdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-u133aaofav2cdf/bioconductor-u133aaofav2cdf_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-u133aaofav2cdf/bioconductor-u133aaofav2cdf_2.18.0_src_all.tar.gz"
)
MD5="716483ddb6664b8b7f0c58cd21136e8b"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-u133aaofav2cdf/post-link.sh
|
Shell
|
mit
| 1,445 |
# MAKE ALL
# make_release.sh
#
# MAKE JUST ONE
# make_release.sh electron
# or
# make_release.sh 10
if [ -z $1 ]; then
MAKE_RELEASE_PLATFORM="all"
else
MAKE_RELEASE_PLATFORM="$1"
fi
echo attempting build for PLATFORM: $MAKE_RELEASE_PLATFORM
if [ $MAKE_RELEASE_PLATFORM == "photon" ] || [ $MAKE_RELEASE_PLATFORM == "6" ]; then
./release.sh 6 photon
else if [ $MAKE_RELEASE_PLATFORM == "p1" ] || [ $MAKE_RELEASE_PLATFORM == "8" ]; then
./release.sh 8 p1
else if [ $MAKE_RELEASE_PLATFORM == "electron" ] || [ $MAKE_RELEASE_PLATFORM == "10" ]; then
./release.sh 10 electron
else if [ $MAKE_RELEASE_PLATFORM == "core" ] || [ $MAKE_RELEASE_PLATFORM == "0" ]; then
./release.sh 0 core
else if [ $MAKE_RELEASE_PLATFORM == "all" ]; then
./release.sh 6 photon
./release.sh 8 p1
./release.sh 10 electron
./release.sh 0 core
else
echo ERROR, $MAKE_RELEASE_PLATFORM not valid!!
fi
fi
fi
fi
fi
|
glibersat/firmware
|
platform/spark/firmware/build/make_release.sh
|
Shell
|
agpl-3.0
| 894 |
#!/usr/bin/env roundup
#
: ${RUNDECK_USER?"environment variable not set."}
: ${RUNDECK_PROJECT?"environment variable not set."}
# Let's get started
# -----------------
# Helpers
# ------------
#. ./include.sh
# The Plan
# --------
describe "project: rundeck project should exist: $RUNDECK_PROJECT"
it_should_list_project_nodes() {
bash -c "rd nodes list -p $RUNDECK_PROJECT"
}
it_should_get_system_imfo() {
out=( $(bash -c "RD_FORMAT=json rd system info | jq -r .executions.executionMode") )
test "$out" = 'active'
}
|
rophy/rundeck
|
test/docker/dockers/rundeck/tests/ssltests/basic-client-test.sh
|
Shell
|
apache-2.0
| 539 |
#!/bin/bash
go test github.com/openshift/service-serving-cert-signer/test/e2e/ -v -count=1
|
childsb/origin
|
vendor/github.com/openshift/service-serving-cert-signer/test/e2e.sh
|
Shell
|
apache-2.0
| 91 |
#!/usr/bin/env bash
# Make sure the package information is up-to-date
apt-get update || exit 1
# Compilers
apt-get install -y g++-4.7 || exit 1
apt-get install -y gfortran-4.7 || exit 1
apt-get install -y clang-3.4 || exit 1
# Configuration
apt-get install -y cmake || exit 1
# Source control
apt-get install -y git || exit 1
# Anaconda Python (miniconda) with Python dependencies
echo Downloading Miniconda...
curl -O http://repo.continuum.io/miniconda/Miniconda-3.0.0-Linux-x86_64.sh || exit 1
su -c 'bash Miniconda-*.sh -b -p ~/anaconda' vagrant || exit 1
# Install dependencies
su -c '~/anaconda/bin/conda install --yes ipython llvmpy cython numba numpy scipy llvmmath ply pycparser pyparsing pyyaml flask nose pytables' vagrant || exit 1
# Add anaconda to the PATH
printf '\nexport PATH=~/anaconda/bin:$PATH\n' >> .bashrc
chown vagrant .bashrc
export PATH=~/anaconda/bin:$PATH
# Clone and install dynd-python
git clone https://github.com/ContinuumIO/dynd-python.git || exit 1
mkdir dynd-python/libraries
pushd dynd-python/libraries
git clone https://github.com/ContinuumIO/libdynd.git || exit 1
popd
mkdir dynd-python/build
chown -R vagrant dynd-python
pushd dynd-python/build
su -c 'cmake -DPYTHON_EXECUTABLE=~/anaconda/bin/python -DCYTHON_EXECUTABLE=~/anaconda/bin/cython ..' vagrant || exit 1
su -c 'make' vagrant || exit 1
make install || exit 1
ldconfig
popd
# Clone and install various projects
for PROJ in datashape blz blaze
do
git clone https://github.com/ContinuumIO/${PROJ}.git || exit 1
chown -R vagrant ${PROJ}
pushd ${PROJ}
su -c '~/anaconda/bin/python setup.py install' vagrant || exit 1
popd
done
# Clone and install pykit
git clone https://github.com/pykit/pykit.git || exit 1
chown -R vagrant pykit
pushd pykit
su -c '~/anaconda/bin/python setup.py install' vagrant || exit 1
popd
|
xsixing/blaze
|
vagrant/saucy64-py27/bootstrap.sh
|
Shell
|
bsd-3-clause
| 1,833 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
set -e
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
MASTER=""
MASTER_IP=""
NODE_IPS=""
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# From user input set the necessary k8s and etcd configuration information
function setClusterInfo() {
# Initialize NODE_IPS in setClusterInfo function
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# When setClusterInfo is called for many times, this could cause potential problems
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3 which is obviously wrong
NODE_IPS=""
ii=0
for i in $nodes; do
nodeIP=${i#*@}
if [[ "${roles[${ii}]}" == "ai" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
NODE_IPS="$nodeIP"
elif [[ "${roles[${ii}]}" == "a" ]]; then
MASTER_IP=$nodeIP
MASTER=$i
elif [[ "${roles[${ii}]}" == "i" ]]; then
if [[ -z "${NODE_IPS}" ]];then
NODE_IPS="$nodeIP"
else
NODE_IPS="$NODE_IPS,$nodeIP"
fi
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
done
}
# Verify ssh prereqs
function verify-prereqs {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
# Install handler for signal trap
function trap-add {
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
trap "${handler}" ${signal}
}
function verify-cluster {
ii=0
for i in ${nodes}
do
if [ "${roles[${ii}]}" == "a" ]; then
verify-master
elif [ "${roles[${ii}]}" == "i" ]; then
verify-node $i
elif [ "${roles[${ii}]}" == "ai" ]; then
verify-master
verify-node $i
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " http://${MASTER_IP}:8080"
echo
}
function verify-master(){
# verify master has all required daemons
printf "Validating master"
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh $SSH_OPTS "$MASTER" "pgrep -f ${daemon}" >/dev/null 2>&1 || {
printf "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
printf "\nWarning: Process \"${daemon}\" failed to run on ${MASTER}, please check.\n"
exit 1
fi
sleep 2
}
done
done
printf "\n"
}
function verify-node(){
# verify node has all required daemons
printf "Validating ${1}"
local -a required_daemon=("kube-proxy" "kubelet" "docker")
local validated="1"
local try_count=1
local max_try_count=30
until [[ "$validated" == "0" ]]; do
validated="0"
local daemon
for daemon in "${required_daemon[@]}"; do
ssh $SSH_OPTS "$1" "pgrep -f $daemon" >/dev/null 2>&1 || {
printf "."
validated="1"
((try_count=try_count+1))
if [[ ${try_count} -gt ${max_try_count} ]]; then
printf "\nWarning: Process \"${daemon}\" failed to run on ${1}, please check.\n"
exit 1
fi
sleep 2
}
done
done
printf "\n"
}
function create-etcd-opts(){
cat <<EOF > ~/kube/default/etcd
ETCD_OPTS="-name infra
-listen-client-urls http://0.0.0.0:4001 \
-advertise-client-urls http://127.0.0.1:4001"
EOF
}
function create-kube-apiserver-opts(){
cat <<EOF > ~/kube/default/kube-apiserver
KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:4001 \
--logtostderr=true \
--service-cluster-ip-range=${1} \
--admission-control=${2} \
--service-node-port-range=${3} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key"
EOF
}
function create-kube-controller-manager-opts(){
cat <<EOF > ~/kube/default/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true"
EOF
}
function create-kube-scheduler-opts(){
cat <<EOF > ~/kube/default/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--master=127.0.0.1:8080"
EOF
}
function create-kubelet-opts(){
cat <<EOF > ~/kube/default/kubelet
KUBELET_OPTS="--address=0.0.0.0 \
--port=10250 \
--hostname-override=$1 \
--api-servers=http://$2:8080 \
--logtostderr=true \
--cluster-dns=$3 \
--cluster-domain=$4"
EOF
}
function create-kube-proxy-opts(){
cat <<EOF > ~/kube/default/kube-proxy
KUBE_PROXY_OPTS="--master=http://${1}:8080 \
--logtostderr=true"
EOF
}
function create-flanneld-opts(){
cat <<EOF > ~/kube/default/flanneld
FLANNEL_OPTS="--etcd-endpoints=http://${1}:4001"
EOF
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
setClusterInfo
KUBE_MASTER=$MASTER
KUBE_MASTER_IP=$MASTER_IP
echo "Using master $MASTER_IP"
}
# Detect the information about the nodes
#
# Assumed vars:
# nodes
# Vars set:
# KUBE_NODE_IP_ADDRESS (array)
function detect-nodes {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
KUBE_NODE_IP_ADDRESSES=()
setClusterInfo
ii=0
for i in ${nodes}
do
if [ "${roles[${ii}]}" == "i" ] || [ "${roles[${ii}]}" == "ai" ]; then
KUBE_NODE_IP_ADDRESSES+=("${i#*@}")
fi
((ii=ii+1))
done
if [[ -z "${KUBE_NODE_IP_ADDRESSES[@]}" ]]; then
echo "Could not detect Kubernetes node nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
# Instantiate a kubernetes cluster on ubuntu
function kube-up() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
# ensure the binaries are well prepared
if [ ! -f "ubuntu/binaries/master/kube-apiserver" ]; then
echo "No local binaries for kube-up, downloading... "
"${KUBE_ROOT}/cluster/ubuntu/build.sh"
fi
setClusterInfo
ii=0
for i in ${nodes}
do
{
if [ "${roles[${ii}]}" == "a" ]; then
provision-master
elif [ "${roles[${ii}]}" == "ai" ]; then
provision-masterandnode
elif [ "${roles[${ii}]}" == "i" ]; then
provision-node $i
else
echo "unsupported role for ${i}. please check"
exit 1
fi
}
((ii=ii+1))
done
wait
verify-cluster
detect-master
export CONTEXT="ubuntu"
export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080"
source "${KUBE_ROOT}/cluster/common.sh"
# set kubernetes user and password
load-or-gen-kube-basicauth
create-kubeconfig
}
function provision-master() {
# copy the binaries and scripts to the ~/kube directory on the master
echo "Deploying master on machine ${MASTER_IP}"
echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/reconfDocker.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
# remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \
create-etcd-opts; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${NODE_IPS}"; \
create-kube-scheduler-opts; \
create-flanneld-opts "127.0.0.1"; \
sudo -p '[sudo] password to start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\
sudo groupadd -f -r kube-cert; \
${PROXY_SETTING} sudo -E ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
sudo service etcd start; \
sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "a";"
}
function provision-node() {
# copy the binaries and scripts to the ~/kube directory on the node
echo "Deploying node on machine ${1#*@}"
echo
ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/minion "${1}:~/kube"
# remote login to MASTER and use sudo to configue k8s master
ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \
setClusterInfo; \
create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; \
create-kube-proxy-opts "${MASTER_IP}"; \
create-flanneld-opts "${MASTER_IP}"; \
sudo -p '[sudo] password to start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin; \
sudo service flanneld start; \
sudo -b ~/kube/reconfDocker.sh "i";"
}
function provision-masterandnode() {
# copy the binaries and scripts to the ~/kube directory on the master
echo "Deploying master and node on machine ${MASTER_IP}"
echo
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
# scp order matters
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/minion/* ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
# remote login to the node and use sudo to configue k8s
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \
create-etcd-opts; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${NODE_IPS}"; \
create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
create-kube-proxy-opts "${MASTER_IP}";\
create-flanneld-opts "127.0.0.1"; \
sudo -p '[sudo] password to start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \
sudo groupadd -f -r kube-cert; \
${PROXY_SETTING} sudo -E ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
sudo service etcd start; \
sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "ai";"
}
# Delete a kubernetes cluster
function kube-down {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
tear_down_alive_resources
ii=0
for i in ${nodes}; do
{
echo "Cleaning on node ${i#*@}"
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password to stop master: " service etcd stop && sudo rm -rf /infra*;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd'
elif [[ "${roles[${ii}]}" == "i" ]]; then
ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password to stop node: " service flanneld stop'
else
echo "unsupported role for ${i}"
fi
# Delete the files in order to generate a clean environment, so you can change each node's role at next deployment.
ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube /var/lib/kubelet;
sudo rm -rf /run/flannel/subnet.env' || true
}
((ii=ii+1))
done
}
# Perform common upgrade setup tasks
function prepare-push() {
# Use local binaries for kube-push
if [[ "${KUBE_VERSION}" == "" ]]; then
if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then
echo "No local binaries.Please check"
exit 1
else
echo "Please make sure all the required local binaries are prepared ahead"
sleep 3
fi
else
# Run build.sh to get the required release
export KUBE_VERSION
"${KUBE_ROOT}/cluster/ubuntu/build.sh"
fi
}
# Update a kubernetes master with expected release
function push-master {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
setClusterInfo
ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "a" ]]; then
echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-master
elif [[ "${roles[${ii}]}" == "ai" ]]; then
echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-masterandnode
elif [[ "${roles[${ii}]}" == "i" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
# Update a kubernetes node with expected release
function push-node() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/minion/kubelet" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
node_ip=${1}
setClusterInfo
ii=0
existing=false
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "i" && ${i#*@} == $node_ip ]]; then
echo "Cleaning node ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service flanneld stop;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-node $i
existing=true
elif [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]] && [[ ${i#*@} == $node_ip ]]; then
echo "${i} is master node, please try ./kube-push -m instead"
existing=true
elif [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
((ii=ii+1))
continue
else
echo "unsupported role for ${i}, please check"
exit 1
fi
((ii=ii+1))
done
if [[ "${existing}" == false ]]; then
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
}
# Update a kubernetes cluster with expected source
function kube-push {
prepare-push
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
#stop all the kube's process & etcd
ii=0
for i in ${nodes}; do
{
echo "Cleaning on node ${i#*@}"
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password to stop master: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd' || true
elif [[ "${roles[${ii}]}" == "i" ]]; then
ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password to stop node: " service flanneld stop' || true
else
echo "unsupported role for ${i}"
fi
ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
}
((ii=ii+1))
done
#provision all nodes,including master & nodes
setClusterInfo
ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "a" ]]; then
provision-master
elif [[ "${roles[${ii}]}" == "i" ]]; then
provision-node $i
elif [[ "${roles[${ii}]}" == "ai" ]]; then
provision-masterandnode
else
echo "unsupported role for ${i}. please check"
exit 1
fi
((ii=ii+1))
done
verify-cluster
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Ubuntu doesn't need special preparations for e2e tests" 1>&2
}
|
wulonghui/kubernetes
|
cluster/ubuntu/util.sh
|
Shell
|
apache-2.0
| 19,930 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.