code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e -E -u -o pipefail; shopt -s failglob;
# howto: http://xavinux.blogspot.com.ar/2013/06/update-nemo-after-instalation.html
# change dir to /
cd /
# refresh all repositories
zypper ref
# update packages
zypper up
# upgrade
zypper dup
# `zypper -h` #for more info
|
wilas/vbkick-templates-forge
|
PlasmaActive/postinstall/update.sh
|
Shell
|
mpl-2.0
| 280 |
#!/usr/bin/env bash
set -e
###############################################################################
#
# edx-all-tests.sh
#
# Execute all tests for edx-platform.
#
# This script can be called from a Jenkins
# multiconfiguration job that defines these environment
# variables:
#
# `TEST_SUITE` defines which kind of test to run.
# Possible values are:
#
# - "quality": Run the quality (pep8/pylint) checks
# - "unit": Run the JavaScript and Python unit tests
# (also tests building the Sphinx documentation,
# because we couldn't think of a better place to put it)
# - "lms-acceptance": Run the acceptance (Selenium) tests for the LMS
# - "cms-acceptance": Run the acceptance (Selenium) tests for Studio
# - "bok-choy": Run acceptance tests that use the bok-choy framework
#
# `SHARD` is a number (1, 2, or 3) indicating which subset of the tests
# to build. Currently, "lms-acceptance" and "bok-choy" each have two
# shards (1 and 2), "cms-acceptance" has three shards (1, 2, and 3),
# and all the other test suites have one shard.
#
# For the "bok-choy", the tests are put into shard groups using the nose
# 'attr' decorator (e.g. "@attr('shard_1')"). Currently, anything with
# the 'shard_1' attribute will run in the first shard. All other bok-choy
# tests will run in shard 2.
#
# For the lettuce acceptance tests, ("lms-" and "cms-acceptance") they
# are decorated with "@shard_{}" (e.g. @shard_1 for the first shard).
# The lettuce tests must have a shard specified to be run in jenkins,
# as there is no shard that runs unspecified tests.
#
#
# Jenkins configuration:
#
# - The edx-platform git repository is checked out by the Jenkins git plugin.
#
# - Jenkins logs in as user "jenkins"
#
# - The Jenkins file system root is "/home/jenkins"
#
# - An init script creates a virtualenv at "/home/jenkins/edx-venv"
# with some requirements pre-installed (such as scipy)
#
# Jenkins worker setup:
# See the edx/configuration repo for Jenkins worker provisioning scripts.
# The provisioning scripts install requirements that this script depends on!
#
###############################################################################
# Violations thresholds for failing the build
PYLINT_THRESHOLD=4725
PEP8_THRESHOLD=200
source $HOME/jenkins_env
# Clean up previous builds
git clean -qxfd
# Clear the mongo database
# Note that this prevents us from running jobs in parallel on a single worker.
mongo --quiet --eval 'db.getMongo().getDBNames().forEach(function(i){db.getSiblingDB(i).dropDatabase()})'
# Ensure we have fetched origin/master
# Some of the reporting tools compare the checked out branch to origin/master;
# depending on how the GitHub plugin refspec is configured, this may
# not already be fetched.
git fetch origin master:refs/remotes/origin/master
# Reset the jenkins worker's ruby environment back to
# the state it was in when the instance was spun up.
if [ -e $HOME/edx-rbenv_clean.tar.gz ]; then
rm -rf $HOME/.rbenv
tar -C $HOME -xf $HOME/edx-rbenv_clean.tar.gz
fi
# Bootstrap Ruby requirements so we can run the tests
bundle install
# Ensure the Ruby environment contains no stray gems
bundle clean --force
# Reset the jenkins worker's virtualenv back to the
# state it was in when the instance was spun up.
if [ -e $HOME/edx-venv_clean.tar.gz ]; then
rm -rf $HOME/edx-venv
tar -C $HOME -xf $HOME/edx-venv_clean.tar.gz
fi
# Activate the Python virtualenv
source $HOME/edx-venv/bin/activate
# If the environment variable 'SHARD' is not set, default to 'all'.
# This could happen if you are trying to use this script from
# jenkins and do not define 'SHARD' in your multi-config project.
# Note that you will still need to pass a value for 'TEST_SUITE'
# or else no tests will be executed.
SHARD=${SHARD:="all"}
case "$TEST_SUITE" in
"quality")
paver run_pep8 -l $PEP8_THRESHOLD > pep8.log || { cat pep8.log; EXIT=1; }
paver run_pylint -l $PYLINT_THRESHOLD > pylint.log || { cat pylint.log; EXIT=1; }
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
paver run_quality -p 100
# Need to create an empty test result so the post-build
# action doesn't fail the build.
mkdir -p reports
cat > reports/quality.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="quality" tests="1" errors="0" failures="0" skip="0">
<testcase classname="quality" name="quality" time="0.604"></testcase>
</testsuite>
END
exit $EXIT
;;
"unit")
paver test
paver coverage
;;
"lms-acceptance")
case "$SHARD" in
"all")
paver test_acceptance -s lms --extra_args="-v 3"
;;
*)
paver test_acceptance -s lms --extra_args="-v 3 --tag shard_${SHARD}"
;;
esac
;;
"cms-acceptance")
case "$SHARD" in
"all")
paver test_acceptance -s cms --extra_args="-v 3"
;;
*)
paver test_acceptance -s cms --extra_args="-v 3 --tag shard_${SHARD}"
;;
esac
;;
"bok-choy")
case "$SHARD" in
"all")
paver test_bokchoy
paver bokchoy_coverage
;;
"1")
paver test_bokchoy --extra_args="-a shard_1"
paver bokchoy_coverage
;;
"2")
paver test_bokchoy --extra_args="-a 'shard_2'"
paver bokchoy_coverage
;;
"3")
paver test_bokchoy --extra_args="-a shard_1=False,shard_2=False"
paver bokchoy_coverage
;;
# Default case because if we later define another bok-choy shard on Jenkins
# (e.g. Shard 4) in the multi-config project and expand this file
# with an additional case condition, old branches without that commit
# would not execute any tests on the worker assigned to that shard
# and thus their build would fail.
# This way they will just report 1 test executed and passed.
*)
# Need to create an empty test result so the post-build
# action doesn't fail the build.
# May be unnecessary if we changed the "Skip if there are no test files"
# option to True in the jenkins job definitions.
mkdir -p reports
cat > reports/bok_choy/xunit.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">
<testcase classname="acceptance.tests" name="shard_placeholder" time="0.001"></testcase>
</testsuite>
END
;;
esac
;;
esac
|
dsajkl/123
|
scripts/all-tests.sh
|
Shell
|
agpl-3.0
| 6,981 |
#!/bin/bash
docker build --rm -t memex/mongo:7.0 .
|
jreeme/firmament
|
docker/memex/mongo/_build.sh
|
Shell
|
unlicense
| 52 |
#! /bin/bash
function assertions {
if [ -z "$1" ]; then
echo Usage: $0 "<version>"
exit 1
fi
if ! git diff --quiet --exit-code; then
echo Current branch "isn't" clean. Use '`git status` for more details.'
echo Quiting...
exit 1
fi
if git rev-parse --verify --quiet $1 > /dev/null; then
echo 'Target tag `'$1'` already exists.'
echo Quiting...
exit 1
fi
if [ ! -z `git branch --list $TARGET_BRANCH` ]; then
echo 'Target branch `'$TARGET_BRANCH'` already exists.'
echo Quiting...
exit 1
fi
CURRENT_BRANCH=`git name-rev --name-only HEAD`
if [ :$CURRENT_BRANCH != :master ]; then
echo 'Current branch `'$CURRENT_BRANCH'`' "isn't" '`master`.'
echo Quiting...
exit 1
fi
echo Preparing release for '`'$1'`'
echo -n Proceed? "[Y|n] "
read input
test :$input = :N -o :$input = :n && exit 1
h1 Test
grunt
}
function h1 {
echo
echo '## '$*
}
function error {
echo 'ERROR: '$*
exit 2
}
function update_authors {
h1 Update AUTHORS file
grunt update-authors > /dev/null
if [ -z "$(git diff)" ]; then
echo No updates for AUTHORS file needed...
else
git commit -a -m 'AUTHORS: Update' > /dev/null &&
git show --stat
fi
}
function update_version {
h1 Update package.json '`versions`' attribute
sed -i.orig 's/"version": "[^"]\+"/"version": "'$1'"/' package.json &&
git commit -a -m $1 &&
git show
}
function build {
h1 Include distribution files
# Yeap, again. Now including the new version in the dist files.
grunt > /dev/null || error Build failed
git add dist/* > /dev/null &&
git commit -a -m "Build: Include distribution files" > /dev/null &&
git show --stat ||
error Failed including distribution files
}
function tag {
h1 'Tag `'$1'` (detached)'
git tag -a -m $1 $1 > /dev/null
}
function checkout_back_to_master {
git checkout master > /dev/null
}
function final_message {
h1 Done
echo
echo Now you need to:
echo git push --tags origin
echo npm publish
echo git checkout master
echo git branch -D $TARGET_BRANCH
}
TARGET_BRANCH=b$1
assertions $1 &&
update_authors &&
update_version $1 &&
git checkout -b $TARGET_BRANCH &&
build &&
tag $1 &&
final_message
|
akaver/AspNetCore-BaseWebApp
|
WebApp/wwwroot/lib/globalize/chore/release.sh
|
Shell
|
unlicense
| 2,158 |
#!/bin/sh
# shellcheck disable=SC2039
#
# Copyright (c) 2010-2016 Chef Software, Inc. and/or applicable contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Fails on unset variables & whenever a command returns a non-zero exit code.
set -eu
# If the variable `$DEBUG` is set, then print the shell commands as we execute.
if [ -n "${DEBUG:-}" ]; then set -x; fi
BT_ROOT="https://api.bintray.com/content/habitat"
BT_SEARCH="https://api.bintray.com/packages/habitat"
main() {
# Use stable Bintray channel by default
channel="stable"
# Set an empty version variable, signaling we want the latest release
version=""
# Parse command line flags and options.
while getopts "c:hv:" opt; do
case "${opt}" in
c)
channel="${OPTARG}"
;;
h)
print_help
exit 0
;;
v)
version="${OPTARG}"
;;
\?)
echo "" >&2
print_help >&2
exit_with "Invalid option" 1
;;
esac
done
info "Installing Habitat 'hab' program"
create_workdir
get_platform
get_version
download_archive
verify_archive
extract_archive
install_hab
print_hab_version
info "Installation of Habitat 'hab' program complete."
}
print_help() {
need_cmd cat
need_cmd basename
local _cmd
_cmd="$(basename "${0}")"
cat <<USAGE
${_cmd}
Authors: The Habitat Maintainers <[email protected]>
Installs the Habitat 'hab' program.
USAGE:
${_cmd} [FLAGS]
FLAGS:
-c Specifies a channel [values: stable, unstable] [default: stable]
-h Prints help information
-v Specifies a version (ex: 0.15.0, 0.15.0/20161222215311)
ENVIRONMENT VARIABLES:
SSL_CERT_FILE allows you to verify against a custom cert such as one
generated from a corporate firewall
USAGE
}
create_workdir() {
need_cmd mktemp
need_cmd rm
need_cmd mkdir
if [ -n "${TMPDIR:-}" ]; then
local _tmp="${TMPDIR}"
elif [ -d /var/tmp ]; then
local _tmp=/var/tmp
else
local _tmp=/tmp
fi
workdir="$(mktemp -d -p "$_tmp" 2> /dev/null || mktemp -d "${_tmp}/hab.XXXX")"
# Add a trap to clean up any interrupted file downloads
# shellcheck disable=SC2154
trap 'code=$?; rm -rf $workdir; exit $code' INT TERM EXIT
cd "${workdir}"
}
get_platform() {
need_cmd uname
need_cmd tr
local _ostype
_ostype="$(uname -s)"
case "${_ostype}" in
Darwin|Linux)
sys="$(uname -s | tr '[:upper:]' '[:lower:]')"
arch="$(uname -m | tr '[:upper:]' '[:lower:]')"
;;
*)
exit_with "Unrecognized OS type when determining platform: ${_ostype}" 2
;;
esac
case "${sys}" in
darwin)
need_cmd shasum
ext=zip
shasum_cmd="shasum -a 256"
;;
linux)
need_cmd sha256sum
ext=tar.gz
shasum_cmd="sha256sum"
;;
*)
exit_with "Unrecognized sys type when determining platform: ${sys}" 3
;;
esac
}
get_version() {
need_cmd grep
need_cmd head
need_cmd sed
need_cmd tr
local _btv
local _j="${workdir}/version.json"
_btv="$(echo "${version:-%24latest}" | tr '/' '-')"
if [ -z "${_btv##*%24latest*}" ]; then
btv=$_btv
else
info "Determining fully qualified version of package for \`$version'"
dl_file "${BT_SEARCH}/${channel}/hab-${arch}-${sys}" "${_j}"
# This is nasty and we know it. Clap your hands. If the install.sh stops
# work its likely related to this here sed command. We have to pull
# versions out of minified json. So if this ever stops working its likely
# BT api output is no longer minified.
_rev="$(sed -e 's/^.*"versions":\[\([^]]*\)\].*$/\1/' -e 's/"//g' "${_j}" \
| tr ',' '\n' \
| grep "^${_btv}" \
| head -1)"
if [ -z "${_rev}" ]; then
_e="Version \`${version}' could not used or version doesn't exist."
_e="$_e Please provide a simple version like: \"0.15.0\""
_e="$_e or a fully qualified version like: \"0.15.0/20161222203215\"."
exit_with "$_e" 6
else
btv=$_rev
info "Using fully qualified Bintray version string of: $btv"
fi
fi
}
download_archive() {
need_cmd cut
need_cmd mv
url="${BT_ROOT}/${channel}/${sys}/${arch}/hab-${btv}-${arch}-${sys}.${ext}"
query="?bt_package=hab-${arch}-${sys}"
local _hab_url="${url}${query}"
local _sha_url="${url}.sha256sum${query}"
dl_file "${_hab_url}" "${workdir}/hab-latest.${ext}"
dl_file "${_sha_url}" "${workdir}/hab-latest.${ext}.sha256sum"
archive="${workdir}/$(cut -d ' ' -f 3 hab-latest.${ext}.sha256sum)"
sha_file="${archive}.sha256sum"
info "Renaming downloaded archive files"
mv -v "${workdir}/hab-latest.${ext}" "${archive}"
mv -v "${workdir}/hab-latest.${ext}.sha256sum" "${archive}.sha256sum"
}
verify_archive() {
if command -v gpg >/dev/null; then
info "GnuPG tooling found, verifying the shasum digest is properly signed"
local _sha_sig_url="${url}.sha256sum.asc${query}"
local _sha_sig_file="${archive}.sha256sum.asc"
local _key_url="https://bintray.com/user/downloadSubjectPublicKey?username=habitat"
local _key_file="${workdir}/habitat.asc"
dl_file "${_sha_sig_url}" "${_sha_sig_file}"
dl_file "${_key_url}" "${_key_file}"
gpg --no-permission-warning --dearmor "${_key_file}"
gpg --no-permission-warning \
--keyring "${_key_file}.gpg" --verify "${_sha_sig_file}"
fi
info "Verifying the shasum digest matches the downloaded archive"
${shasum_cmd} -c "${sha_file}"
}
extract_archive() {
need_cmd sed
info "Extracting ${archive}"
case "${ext}" in
tar.gz)
need_cmd zcat
need_cmd tar
zcat "${archive}" | tar x -C "${workdir}"
archive_dir="$(echo "${archive}" | sed 's/.tar.gz$//')"
;;
zip)
need_cmd unzip
unzip "${archive}" -d "${workdir}"
archive_dir="$(echo "${archive}" | sed 's/.zip$//')"
;;
*)
exit_with "Unrecognized file extension when extracting: ${ext}" 4
;;
esac
}
install_hab() {
case "${sys}" in
darwin)
need_cmd mkdir
need_cmd install
info "Installing hab into /usr/local/bin"
mkdir -pv /usr/local/bin
install -v "${archive_dir}"/hab /usr/local/bin/hab
;;
linux)
local _ident="core/hab"
if [ ! -z "${version-}" ]; then _ident="${_ident}/$version"; fi
info "Installing Habitat package using temporarily downloaded hab"
# Install hab release using the extracted version and add/update symlink
"${archive_dir}/hab" install "$_ident"
# TODO fn: The updated binlink behavior is to skip targets that already
# exist so we want to use the `--force` flag. Unfortunetly, old versions
# of `hab` don't have this flag. For now, we'll run with the new flag and
# fall back to running the older behavior. This can be removed at a
# future date when we no lnger are worrying about Habitat versions 0.33.2
# and older. (2017-09-29)
"${archive_dir}/hab" pkg binlink "$_ident" hab --force \
|| "${archive_dir}/hab" pkg binlink "$_ident" hab
;;
*)
exit_with "Unrecognized sys when installing: ${sys}" 5
;;
esac
}
print_hab_version() {
need_cmd hab
info "Checking installed hab version"
hab --version
}
need_cmd() {
if ! command -v "$1" > /dev/null 2>&1; then
exit_with "Required command '$1' not found on PATH" 127
fi
}
info() {
echo "--> hab-install: $1"
}
warn() {
echo "xxx hab-install: $1" >&2
}
exit_with() {
warn "$1"
exit "${2:-10}"
}
dl_file() {
local _url="${1}"
local _dst="${2}"
local _code
local _wget_extra_args=""
local _curl_extra_args=""
# Attempt to download with wget, if found. If successful, quick return
if command -v wget > /dev/null; then
info "Downloading via wget: ${_url}"
if [ -n "${SSL_CERT_FILE:-}" ]; then
wget ${_wget_extra_args:+"--ca-certificate=${SSL_CERT_FILE}"} -q -O "${_dst}" "${_url}"
else
wget -q -O "${_dst}" "${_url}"
fi
_code="$?"
if [ $_code -eq 0 ]; then
return 0
else
local _e="wget failed to download file, perhaps wget doesn't have"
_e="$_e SSL support and/or no CA certificates are present?"
warn "$_e"
fi
fi
# Attempt to download with curl, if found. If successful, quick return
if command -v curl > /dev/null; then
info "Downloading via curl: ${_url}"
if [ -n "${SSL_CERT_FILE:-}" ]; then
curl ${_curl_extra_args:+"--cacert ${SSL_CERT_FILE}"} -sSfL "${_url}" -o "${_dst}"
else
curl -sSfL "${_url}" -o "${_dst}"
fi
_code="$?"
if [ $_code -eq 0 ]; then
return 0
else
local _e="curl failed to download file, perhaps curl doesn't have"
_e="$_e SSL support and/or no CA certificates are present?"
warn "$_e"
fi
fi
# If we reach this point, wget and curl have failed and we're out of options
exit_with "Required: SSL-enabled 'curl' or 'wget' on PATH with" 6
}
main "$@" || exit 99
|
georgemarshall/habitat
|
components/hab/install.sh
|
Shell
|
apache-2.0
| 9,471 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stop hadoop map reduce daemons. Run this on master node.
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
. $bin/../libexec/mapred-config.sh
"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred stop jobtracker
"$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred stop tasktracker
|
rekhajoshm/mapreduce-fork
|
bin/stop-mapred.sh
|
Shell
|
apache-2.0
| 1,165 |
magenta_midi \
--input_port="IAC Driver IAC Bus 3" \
--output_port="IAC Driver IAC Bus 4" \
--passthrough=false \
--qpm=120 \
--allow_overlap=true \
--enable_metronome=false \
--log=DEBUG \
--clock_control_number=1 \
--end_call_control_number=2 \
--min_listen_ticks_control_number=3 \
--max_listen_ticks_control_number=4 \
--response_ticks_control_number=5 \
--temperature_control_number=6 \
--tempo_control_number=7 \
--generator_select_control_number=8 \
--state_control_number=9 \
--loop_control_number=10 \
--panic_control_number=11 \
--mutate_control_number=12 \
--bundle_files=./drum_kit_rnn.mag \
--playback_offset=-0.035 \
--playback_channel=2
|
bda2017-shallowermind/MusTGAN
|
magenta/magenta/demos/NIPS_2016/magenta_drums.sh
|
Shell
|
apache-2.0
| 740 |
#!/bin/bash
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
start_test Statistics logging works.
check ls $MOD_PAGESPEED_STATS_LOG
check [ $(grep "timestamp: " $MOD_PAGESPEED_STATS_LOG | wc -l) -ge 1 ]
# An array of all the timestamps that were logged.
TIMESTAMPS=($(sed -n '/timestamp: /s/[^0-9]*//gp' $MOD_PAGESPEED_STATS_LOG))
check [ ${#TIMESTAMPS[@]} -ge 1 ]
for T in ${TIMESTAMPS[@]}; do
check [ $T -ge $START_TIME ]
done
# Check a few arbitrary statistics to make sure logging is taking place.
check [ $(grep "num_flushes: " $MOD_PAGESPEED_STATS_LOG | wc -l) -ge 1 ]
# We are not outputting histograms.
check [ $(grep "histogram#" $MOD_PAGESPEED_STATS_LOG | wc -l) -eq 0 ]
check [ $(grep "image_ongoing_rewrites: " $MOD_PAGESPEED_STATS_LOG | wc -l) \
-ge 1 ]
start_test Statistics logging JSON handler works.
JSON=$OUTDIR/console_json.json
STATS_JSON_URL="$CONSOLE_URL?json&granularity=0&var_titles=num_\
flushes,image_ongoing_rewrites"
echo "$WGET_DUMP $STATS_JSON_URL > $JSON"
$WGET_DUMP $STATS_JSON_URL > $JSON
# Each variable we ask for should show up once.
check [ $(grep "\"num_flushes\": " $JSON | wc -l) -eq 1 ]
check [ $(grep "\"image_ongoing_rewrites\": " $JSON | wc -l) -eq 1 ]
check [ $(grep "\"timestamps\": " $JSON | wc -l) -eq 1 ]
# An array of all the timestamps that the JSON handler returned.
JSON_TIMESTAMPS=($(sed -rn 's/^\{"timestamps": \[(([0-9]+, )*[0-9]*)\].*}$/\1/;/^[0-9]+/s/,//gp' $JSON))
# Check that we see the same timestamps that are in TIMESTAMPS.
# We might have generated extra timestamps in the time between TIMESTAMPS
# and JSON_TIMESTAMPS, so only loop through TIMESTAMPS.
check [ ${#JSON_TIMESTAMPS[@]} -ge ${#TIMESTAMPS[@]} ]
t=0
while [ $t -lt ${#TIMESTAMPS[@]} ]; do
check [ ${TIMESTAMPS[$t]} -eq ${JSON_TIMESTAMPS[$t]} ]
t=$(($t+1))
done
start_test JSON handler does not mirror HTML
JSON_URL="$CONSOLE_URL?json&granularity=0&var_titles=<boo>"
OUT=$($CURL --silent $JSON_URL)
check_not_from "$OUT" fgrep -q "<boo>"
check_from "$OUT" fgrep -q "<boo>"
start_test Statistics console is available.
CONSOLE_URL=$PRIMARY_SERVER/pagespeed_console
CONSOLE_HTML=$OUTDIR/console.html
$WGET_DUMP $CONSOLE_URL > $CONSOLE_HTML
check grep -q "console" $CONSOLE_HTML
|
VersoBit/mod_pagespeed
|
pagespeed/apache/system_tests/statistics_logging.sh
|
Shell
|
apache-2.0
| 2,747 |
#!/bin/bash
echo "Cleaning k8s resource..."
# Back to the root of the project
cd $(dirname $0)
cd ../..
DEFAULT_NAMESPACE=openpitrix-system
NAMESPACE=${DEFAULT_NAMESPACE}
usage() {
echo "Usage:"
echo " clean.sh [-n NAMESPACE]"
echo "Description:"
echo " -n NAMESPACE: the namespace of kubernetes."
exit -1
}
while getopts n:h option
do
case "${option}"
in
n) NAMESPACE=${OPTARG};;
h) usage ;;
*) usage ;;
esac
done
kubectl delete namespace ${NAMESPACE}
echo "Cleaning docker resource..."
docker rmi openpitrix/openpitrix-dev:latest
docker rmi openpitrix/openpitrix-dev:metadata
docker rmi openpitrix/openpitrix-dev:flyway
docker rmi openpitrix
echo "Cleaned successfully"
|
openpitrix/openpitrix
|
deploy/kubernetes/scripts/clean.sh
|
Shell
|
apache-2.0
| 700 |
#!/bin/sh
export LD_LIBRARY_PATH=bin/SkyBlue/LCP/EDLoggingControl/LogDecompressor:${LD_LIBRARY_PATH}
#If do not pass a argument.
if [ ! "$1" ]; then
#java -cp .:lib/sqlite-jdbc-3.7.2.jar -jar Httpd.jar
java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd
else
#If argument is not file data path.
if [ ! -f "$1" ]; then
#java -cp .:lib/sqlite-jdbc-3.7.2.jar -jar Httpd.jar
java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd
else
#java -cp .:lib/sqlite-jdbc-3.7.2.jar -jar Httpd.jar -conf $1
java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd -conf $1
fi
fi
|
not001praween001/BlueSkyLoggerCloudBINResearchVer1.0
|
bin/Httpd.sh
|
Shell
|
apache-2.0
| 896 |
#!/bin/bash
set -x
PID=$(ctx instance runtime_properties pid)
kill -9 ${PID}
ctx logger info "Sucessfully stopped Nodecellar (${PID})"
|
denismakogon/aria-examples
|
scripts/nodecellar/stop-nodecellar-app.sh
|
Shell
|
apache-2.0
| 138 |
#!/bin/bash
set -e
#
# Test cpp compilation (debug and release)
#
./cake --quiet tests/helloworld_cpp.cpp
result=$(bin/helloworld_cpp)
if [[ $result != "debug 1" ]]; then
echo test 1: Incorrect variant: $result
exit 1
fi
./cake --quiet tests/helloworld_cpp.cpp --variant=release
result=$(bin/helloworld_cpp)
if [[ $result != "release 1" ]]; then
echo test 2: Incorrect variant: $result
exit 1
fi
result=$(bin/helloworld_cpp extra args)
if [[ $result != "release 3" ]]; then
echo test 3: Incorrect args: $result
exit 1
fi
#
# Test c compilation (debug and release)
#
./cake --quiet tests/helloworld_c.c
result=$(bin/helloworld_c)
if [[ $result != "debug 1" ]]; then
echo test 4: Incorrect variant: $result
exit 1
fi
./cake --quiet tests/helloworld_c.c --variant=release
result=$(bin/helloworld_c)
if [[ $result != "release 1" ]]; then
echo test 5: Incorrect variant: $result
exit 1
fi
result=$(bin/helloworld_c extra args)
if [[ $result != "release 3" ]]; then
echo test 6: Incorrect args: $result
exit 1
fi
#
# Test that c compilation picks up //#CFLAGS
#
./cake --quiet tests/test_cflags.c
if [[ $? != 0 ]]; then
echo test 7: cake does not detect the //#CFLAGS in a c file
exit 1
fi
#
# Test static library compilation
#
rm -rf bin/*
./cake --static-library tests/get_numbers.cpp
./cake tests/test_library.cpp
result=$(bin/test_library)
if [[ $result != "1 2" ]]; then
echo test 5: Incorrect result from static library test: $result
exit 1
fi
#
# Test dynamic library compilation
#
rm -rf bin/*
./cake --dynamic-library tests/get_numbers.cpp
LD_LIBRARY_PATH=bin
export LD_LIBRARY_PATH
./cake tests/test_library.cpp
result=$(bin/test_library)
unset LD_LIBRARY_PATH
if [[ $result != "1 2" ]]; then
echo test 5: Incorrect result from dynamic library test: $result
exit 1
fi
|
mgrosvenor/q2pc
|
build/cake/test.sh
|
Shell
|
bsd-3-clause
| 1,862 |
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
NACLPORTS_CPPFLAGS+=" -I${START_DIR}"
EXECUTABLES="test/unittests"
if [ "${NACL_LIBC}" = "glibc" ]; then
EXECUTABLES+=" test/elf_reader test/library_dependencies"
fi
if [ "${NACL_SHARED}" = "1" ]; then
NACLPORTS_CFLAGS+=" -fPIC"
NACLPORTS_CXXFLAGS+=" -fPIC"
EXECUTABLES+=
fi
if [ "${NACL_LIBC}" = "newlib" ]; then
NACLPORTS_CPPFLAGS+=" -I${NACLPORTS_INCLUDE}/glibc-compat"
fi
ConfigureStep() {
MakeDir ${BUILD_DIR}
cp -rf ${START_DIR}/* ${BUILD_DIR}
}
BuildStep() {
SetupCrossEnvironment
export TOOLCHAIN
export NACL_SHARED
DefaultBuildStep
}
TestStep() {
if [ "${NACL_LIBC}" = "glibc" ]; then
SetupCrossEnvironment
export TOOLCHAIN
export NACL_SHARED
LogExecute make test
fi
if [ "${TOOLCHAIN}" = "pnacl" ]; then
RunSelLdrCommand test/unittests
else
LogExecute test/unittests.sh
fi
}
InstallStep() {
MakeDir ${DESTDIR_LIB}
LogExecute cp libnacl_spawn.a ${DESTDIR_LIB}
if [ "${NACL_SHARED}" = "1" ]; then
LogExecute cp libnacl_spawn.so ${DESTDIR_LIB}
fi
LogExecute cp libcli_main.a ${DESTDIR_LIB}
MakeDir ${DESTDIR_INCLUDE}
LogExecute cp -f ${START_DIR}/include/spawn.h ${DESTDIR_INCLUDE}/
LogExecute cp -f ${START_DIR}/include/nacl_main.h ${DESTDIR_INCLUDE}/
if [ "${TOOLCHAIN}" = "bionic" ]; then
LogExecute cp -f ${START_DIR}/include/bsd_spawn.h ${DESTDIR_INCLUDE}/
fi
}
|
yeyus/naclports
|
ports/nacl-spawn/build.sh
|
Shell
|
bsd-3-clause
| 1,537 |
#!/bin/bash
fw_depends java8 maven
mvn clean package
cd target/dist
unzip *.zip
APP_ENTRY=com.techempower.act.AppEntry
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
BASE=$DIR/target/dist
if [[ $DIR == *"dist" ]]; then
BASE=$DIR
fi
CP=$BASE/classes:$BASE/lib/*
echo
echo CLASSPATH: $CP
echo
JAVA_OPTS="-Djava.security.egd=file:/dev/./urandom -Xms1G -Xmx1G -Xss320k -XX:+UseNUMA -XX:+UseParallelGC -XX:+AggressiveOpts"
java -server $JAVA_OPTS -Dapp.mode=prod -Dapp.nodeGroup=$GROUP -Dprofile=json_plaintext -Dxio.worker_threads.max=256 -cp "$CP" $APP_ENTRY
|
saturday06/FrameworkBenchmarks
|
frameworks/Java/act/setup-json-plaintext.sh
|
Shell
|
bsd-3-clause
| 572 |
#!/bin/bash
# warning: this test is useful to check if training fails, and what speed you can achieve
# the toy datasets are too small to obtain useful translation results,
# and hyperparameters are chosen for speed, not for quality.
# For a setup that preprocesses and trains a larger data set,
# check https://github.com/rsennrich/wmt16-scripts/tree/master/sample
mkdir -p models
export THEANO_FLAGS=$THEANO_FLAGS,floatX=float16
../nematus/nmt.py \
--model models/model.npz \
--datasets data/corpus.en data/corpus.de \
--dictionaries data/vocab.en.json data/vocab.de.json \
--dim_word 512 \
--dim 1024 \
--n_words_src 30000 \
--n_words 30000 \
--maxlen 50 \
--optimizer adam \
--lrate 0.00001 \
--batch_size 80 \
--no_shuffle \
--dispFreq 500 \
--finish_after 500
|
Proyag/nematus
|
test/test_train_float16_midbatch_largemodel.sh
|
Shell
|
bsd-3-clause
| 798 |
#!/usr/bin/env bash
THE_BASE_DIR_PATH=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
source $THE_BASE_DIR_PATH/_init.sh
rm $THE_SELECT_LIST_FILE_PATH -f
echo 'rm' $THE_SELECT_LIST_FILE_PATH
|
samwhelp/fix-ubuntu-1404
|
misc/bin/select-remove.sh
|
Shell
|
mit
| 188 |
#
# Copyright (C) 2010 OpenWrt.org
#
. /lib/ramips.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ramips.sh
platform_check_image() {
local board=$(ramips_board_name)
local magic="$(get_magic_long "$1")"
[ "$#" -gt 1 ] && return 1
case "$board" in
3g150b|\
3g300m|\
a5-v11|\
ac1200pro|\
ai-br100|\
air3gii|\
all0239-3g|\
all0256n|\
all5002|\
all5003|\
ar725w|\
asl26555|\
awapn2403|\
awm002-evb|\
awm003-evb|\
bc2|\
broadway|\
carambola|\
cf-wr800n|\
cs-qr10|\
d105|\
d240|\
dap-1350|\
db-wrt01|\
dcs-930|\
dcs-930l-b1|\
dir-300-b1|\
dir-300-b7|\
dir-320-b1|\
dir-600-b1|\
dir-615-d|\
dir-615-h1|\
dir-620-a1|\
dir-620-d1|\
dir-810l|\
duzun-dm06|\
dwr-512-b|\
e1700|\
esr-9753|\
ex2700|\
f7c027|\
firewrt|\
fonera20n|\
freestation5|\
gl-mt300a|\
gl-mt300n|\
gl-mt750|\
hc5*61|\
hg255d|\
hlk-rm04|\
hpm|\
ht-tm02|\
hw550-3g|\
ip2202|\
jhr-n805r|\
jhr-n825r|\
jhr-n926r|\
kn_rc|\
kn_rf|\
kng_rc|\
linkits7688|\
linkits7688d|\
m2m|\
m3|\
m4|\
mac1200rv2|\
microwrt|\
miniembplug|\
miniembwifi|\
miwifi-mini|\
miwifi-nano|\
mlw221|\
mlwg2|\
mofi3500-3gn|\
mpr-a1|\
mpr-a2|\
mr-102n|\
mt7628|\
mzk-750dhp|\
mzk-dp150n|\
mzk-ex300np|\
mzk-ex750np|\
mzk-w300nh2|\
mzk-wdpr|\
nbg-419n|\
nbg-419n2|\
newifi-d1|\
nixcore|\
nw718|\
omega2|\
omega2p|\
oy-0001|\
pbr-d1|\
pbr-m1|\
psg1208|\
psg1218|\
psr-680w|\
px-4885|\
rb750gr3|\
re6500|\
rp-n53|\
rt5350f-olinuxino|\
rt5350f-olinuxino-evb|\
rt-g32-b1|\
rt-n10-plus|\
rt-n13u|\
rt-n14u|\
rt-n15|\
rt-n56u|\
rut5xx|\
sap-g3200u3|\
sk-wb8|\
sl-r7205|\
tew-691gr|\
tew-692gr|\
tew-714tru|\
timecloud|\
tiny-ac|\
ur-326n4g|\
ur-336un|\
v22rw-2x2|\
vocore|\
vocore2|\
vr500|\
w150m|\
w2914nsv2|\
w306r-v20|\
w502u|\
wf-2881|\
whr-1166d|\
whr-300hp2|\
whr-600d|\
whr-g300n|\
widora-neo|\
witi|\
wizfi630a|\
wl-330n|\
wl-330n3g|\
wl-341v3|\
wl-351|\
wl-wn575a3|\
wli-tx4-ag300n|\
wlr-6000|\
wmr-300|\
wn3000rpv3|\
wnce2001|\
wndr3700v5|\
wr512-3gn|\
wr6202|\
wrh-300cr|\
wrtnode|\
wrtnode2r |\
wrtnode2p |\
wsr-600|\
wt1520|\
wt3020|\
wzr-agl300nh|\
x5|\
x8|\
y1|\
y1s|\
zbt-ape522ii|\
zbt-cpe102|\
zbt-wa05|\
zbt-we826|\
zbt-wg2626|\
zbt-wg3526|\
zbt-wr8305rt|\
zte-q7|\
youku-yk1)
[ "$magic" != "27051956" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
3g-6200n|\
3g-6200nl|\
br-6475nd)
[ "$magic" != "43535953" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ar670w)
[ "$magic" != "6d000080" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
c20i|\
c50|\
mr200)
[ "$magic" != "03000000" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
cy-swr1100|\
dch-m225|\
dir-610-a1|\
dir-645|\
dir-860l-b1)
[ "$magic" != "5ea3a417" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ubnt-erx)
nand_do_platform_check "$board" "$1"
return $?;
;;
wcr-1166ds|\
wsr-1166)
[ "$magic" != "48445230" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_nand_pre_upgrade() {
local board=$(ramips_board_name)
case "$board" in
ubnt-erx)
platform_upgrade_ubnt_erx "$ARGV"
;;
esac
}
platform_pre_upgrade() {
local board=$(ramips_board_name)
case "$board" in
ubnt-erx)
nand_do_upgrade "$ARGV"
;;
esac
}
platform_do_upgrade() {
local board=$(ramips_board_name)
case "$board" in
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
blink_led() {
. /etc/diag.sh; set_state upgrade
}
append sysupgrade_pre_upgrade disable_watchdog
append sysupgrade_pre_upgrade blink_led
|
feckert/source
|
target/linux/ramips/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 3,850 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2021 Petr Vorel <[email protected]>
TST_TESTFUNC=do_test
. tst_test.sh
do_test()
{
tst_res TINFO "expecting SUCCESS string passed from stdin"
read line
EXPECT_PASS [ "$line" = "SUCCESS" ]
}
tst_run
|
pevik/ltp
|
testcases/commands/shell/shell_pipe01.sh
|
Shell
|
gpl-2.0
| 273 |
#!/bin/sh
# /usr/lib/ddns/dynamic_dns_lucihelper.sh
#
#.Distributed under the terms of the GNU General Public License (GPL) version 2.0
#.2014-2016 Christian Schoenebeck <christian dot schoenebeck at gmail dot com>
# This script is used by luci-app-ddns
#
# variables in small chars are read from /etc/config/ddns as parameter given here
# variables in big chars are defined inside these scripts as gloval vars
# variables in big chars beginning with "__" are local defined inside functions only
# set -vx #script debugger
. /usr/lib/ddns/dynamic_dns_functions.sh # global vars are also defined here
usage() {
cat << EOF
Usage:
$MYPROG [options] -- command
Commands:
get_local_ip using given INTERFACE or NETWORK or SCRIPT or URL
get_registered_ip for given FQDN
verify_dns given DNS-SERVER
verify_proxy given PROXY
start start given SECTION
reload force running ddns processes to reload changed configuration
restart restart all ddns processes
Parameters:
-6 => use_ipv6=1 (default 0)
-d DNS-SERVER => dns_server=SERVER[:PORT]
-f => force_ipversion=1 (default 0)
-g => is_glue=1 (default 0)
-i INTERFACE => ip_interface=INTERFACE; ip_source="interface"
-l FQDN => lookup_host=FQDN
-n NETWORK => ip_network=NETWORK; ip_source="network"
-p PROXY => proxy=[USER:PASS@]PROXY:PORT
-s SCRIPT => ip_script=SCRIPT; ip_source="script"
-t => force_dnstcp=1 (default 0)
-u URL => ip_url=URL; ip_source="web"
-S SECTION SECTION to start
-h => show this help and exit
-L => use_logfile=1 (default 0)
-v LEVEL => VERBOSE=LEVEL (default 0)
-V => show version and exit
EOF
}
usage_err() {
printf %s\\n "$MYPROG: $@" >&2
usage >&2
exit 255
}
# preset some variables, wrong or not set in ddns-functions.sh
SECTION_ID="lucihelper"
LOGFILE="$ddns_logdir/$SECTION_ID.log"
DATFILE="$ddns_rundir/$SECTION_ID.$$.dat" # save stdout data of WGet and other extern programs called
ERRFILE="$ddns_rundir/$SECTION_ID.$$.err" # save stderr output of WGet and other extern programs called
DDNSPRG="/usr/lib/ddns/dynamic_dns_updater.sh"
VERBOSE=0 # no console logging
# global variables normally set by reading DDNS UCI configuration
use_syslog=0 # no syslog
use_logfile=0 # no logfile
use_ipv6=0 # Use IPv6 - default IPv4
force_ipversion=0 # Force IP Version - default 0 - No
force_dnstcp=0 # Force TCP on DNS - default 0 - No
is_glue=0 # Is glue record - default 0 - No
use_https=0 # not needed but must be set
while getopts ":6d:fghi:l:n:p:s:S:tu:Lv:V" OPT; do
case "$OPT" in
6) use_ipv6=1;;
d) dns_server="$OPTARG";;
f) force_ipversion=1;;
g) is_glue=1;;
i) ip_interface="$OPTARG"; ip_source="interface";;
l) lookup_host="$OPTARG";;
n) ip_network="$OPTARG"; ip_source="network";;
p) proxy="$OPTARG";;
s) ip_script="$OPTARG"; ip_source="script";;
t) force_dnstcp=1;;
u) ip_url="$OPTARG"; ip_source="web";;
h) usage; exit 255;;
L) use_logfile=1;;
v) VERBOSE=$OPTARG;;
S) SECTION=$OPTARG;;
V) printf %s\\n "ddns-scripts $VERSION"; exit 255;;
:) usage_err "option -$OPTARG missing argument";;
\?) usage_err "invalid option -$OPTARG";;
*) usage_err "unhandled option -$OPT $OPTARG";;
esac
done
shift $((OPTIND - 1 )) # OPTIND is 1 based
[ $# -eq 0 ] && usage_err "missing command"
__RET=0
case "$1" in
get_registered_ip)
[ -z "$lookup_host" ] && usage_err "command 'get_registered_ip': 'lookup_host' not set"
write_log 7 "-----> get_registered_ip IP"
IP=""
get_registered_ip IP
__RET=$?
[ $__RET -ne 0 ] && IP=""
printf "%s" "$IP"
;;
verify_dns)
[ -z "$dns_server" ] && usage_err "command 'verify_dns': 'dns_server' not set"
write_log 7 "-----> verify_dns '$dns_server'"
verify_dns "$dns_server"
__RET=$?
;;
verify_proxy)
[ -z "$proxy" ] && usage_err "command 'verify_proxy': 'proxy' not set"
write_log 7 "-----> verify_proxy '$proxy'"
verify_proxy "$proxy"
__RET=$?
;;
get_local_ip)
[ -z "$ip_source" ] && usage_err "command 'get_local_ip': 'ip_source' not set"
[ -n "$proxy" -a "$ip_source" = "web" ] && {
# proxy defined, used for ip_source=web
export HTTP_PROXY="http://$proxy"
export HTTPS_PROXY="http://$proxy"
export http_proxy="http://$proxy"
export https_proxy="http://$proxy"
}
# don't need IP only the return code
IP=""
if [ "$ip_source" = "web" -o "$ip_source" = "script" ]; then
# we wait only 3 seconds for an
# answer from "web" or "script"
write_log 7 "-----> timeout 3 -- get_local_ip IP"
timeout 3 -- get_local_ip IP
else
write_log 7 "-----> get_local_ip IP"
get_local_ip IP
fi
__RET=$?
;;
start)
[ -z "$SECTION" ] && usage_err "command 'start': 'SECTION' not set"
if [ $VERBOSE -eq 0 ]; then # start in background
$DDNSPRG -v 0 -S $SECTION -- start &
else
$DDNSPRG -v $VERBOSE -S $SECTION -- start
fi
;;
reload)
$DDNSPRG -- reload
;;
restart)
$DDNSPRG -- stop
sleep 1
$DDNSPRG -- start
;;
*)
__RET=255
;;
esac
# remove out and err file
[ -f $DATFILE ] && rm -f $DATFILE
[ -f $ERRFILE ] && rm -f $ERRFILE
return $__RET
|
KurdyMalloy/packages
|
net/ddns-scripts/files/dynamic_dns_lucihelper.sh
|
Shell
|
gpl-2.0
| 5,333 |
#!/bin/sh
# Copyright (C) 2010 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/test
aux prepare_vg 3
pvchange --metadataignore y $dev1
lvcreate -m 1 -l 1 -n mirror $vg
lvchange -a n $vg/mirror
lvcreate -l 1 -n lv1 $vg "$dev1"
# try to just change metadata; we expect the new version (with MISSING_PV set
# on the reappeared volume) to be written out to the previously missing PV
aux disable_dev "$dev1"
lvremove $vg/mirror
not vgck $vg 2>&1 | tee log
grep "missing 1 physical volume" log
not lvcreate -m 1 -l 1 -n mirror $vg # write operations fail
aux enable_dev "$dev1"
lvcreate -m 1 -l 1 -n mirror $vg # no MDA => automatically restored
vgck $vg
|
Jajcus/lvm2
|
test/shell/nomda-restoremissing.sh
|
Shell
|
gpl-2.0
| 1,041 |
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !!!EXPERIMENTAL!!! Upgrade a K8s cluster from routes to IP aliases for
# node connectivity on GCE. This is only for migration.
set -o errexit
set -o nounset
set -o pipefail
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
echo "ERR: KUBERNETES_PROVIDER must be gce" >&2
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
# Print the number of routes used for K8s cluster node connectivity.
#
# Assumed vars:
# PROJECT
function get-k8s-node-routes-count() {
local k8s_node_routes_count
k8s_node_routes_count=$(gcloud compute routes list \
--project="${PROJECT}" --filter='description=k8s-node-route' \
--format='value(name)' | wc -l)
echo -n "${k8s_node_routes_count}"
}
# Detect the subnetwork where the K8s cluster resides.
#
# Assumed vars:
# KUBE_MASTER
# PROJECT
# ZONE
# Vars set:
# IP_ALIAS_SUBNETWORK
function detect-k8s-subnetwork() {
local subnetwork_url
subnetwork_url=$(gcloud compute instances describe \
"${KUBE_MASTER}" --project="${PROJECT}" --zone="${ZONE}" \
--format='value(networkInterfaces[0].subnetwork)')
if [[ -n ${subnetwork_url} ]]; then
IP_ALIAS_SUBNETWORK=${subnetwork_url##*/}
fi
}
# Set IP_ALIAS_SUBNETWORK's allowSubnetCidrRoutesOverlap to a boolean value.
# $1: true or false for the desired allowSubnetCidrRoutesOverlap.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# GCE_API_ENDPOINT
# PROJECT
# REGION
function set-allow-subnet-cidr-routes-overlap() {
local allow_subnet_cidr_routes_overlap
allow_subnet_cidr_routes_overlap=$(gcloud compute networks subnets \
describe "${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \
--format='value(allowSubnetCidrRoutesOverlap)')
local allow_overlap=$1
if [ "${allow_subnet_cidr_routes_overlap,,}" = "${allow_overlap}" ]; then
echo "Subnet ${IP_ALIAS_SUBNETWORK}'s allowSubnetCidrRoutesOverlap is already set as $1"
return
fi
echo "Setting subnet \"${IP_ALIAS_SUBNETWORK}\" allowSubnetCidrRoutesOverlap to $1"
local fingerprint
fingerprint=$(gcloud compute networks subnets describe \
"${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \
--format='value(fingerprint)')
local access_token
access_token=$(gcloud auth print-access-token)
local request="{\"allowSubnetCidrRoutesOverlap\":$1, \"fingerprint\":\"${fingerprint}\"}"
local subnetwork_url
subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}"
until curl -s --header "Content-Type: application/json" --header "Authorization: Bearer ${access_token}" \
-X PATCH -d "${request}" "${subnetwork_url}" --output /dev/null; do
printf "."
sleep 1
done
}
# Add secondary ranges to K8s subnet.
#
# Assumed vars:
# IP_ALIAS_SUBNETWORK
# PROJECT
# REGION
# CLUSTER_IP_RANGE
# SERVICE_CLUSTER_IP_RANGE
function add-k8s-subnet-secondary-ranges() {
local secondary_ranges
secondary_ranges=$(gcloud compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--format='value(secondaryIpRanges)')
if [[ "${secondary_ranges}" =~ "pods-default" && "${secondary_ranges}" =~ "services-default" ]]; then
echo "${secondary_ranges} already contains both pods-default and services-default secondary ranges"
return
fi
echo "Adding secondary ranges: pods-default (${CLUSTER_IP_RANGE}), services-default (${SERVICE_CLUSTER_IP_RANGE})"
until gcloud compute networks subnets update "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--add-secondary-ranges="pods-default=${CLUSTER_IP_RANGE},services-default=${SERVICE_CLUSTER_IP_RANGE}"; do
printf "."
sleep 1
done
}
# Delete all K8s node routes.
#
# Assumed vars:
# PROJECT
function delete-k8s-node-routes() {
local -a routes
local -r batch=200
routes=()
while IFS=$'\n' read -r route; do
routes+=( "${route}" )
done < <(gcloud compute routes list \
--project="${PROJECT}" --filter='description=k8s-node-route' \
--format='value(name)')
while (( "${#routes[@]}" > 0 )); do
echo Deleting k8s node routes "${routes[*]::${batch}}"
gcloud compute routes delete --project "${PROJECT}" --quiet "${routes[@]::${batch}}"
routes=( "${routes[@]:${batch}}" )
done
}
detect-project
detect-master
k8s_node_routes_count=$(get-k8s-node-routes-count)
if [[ "${k8s_node_routes_count}" -eq 0 ]]; then
echo "No k8s node routes found and IP alias should already be enabled. Exiting..."
exit 0
fi
echo "Found ${k8s_node_routes_count} K8s node routes. Proceeding to upgrade them to IP aliases based connectivity..."
detect-k8s-subnetwork
if [ -z "${IP_ALIAS_SUBNETWORK}" ]; then
echo "No k8s cluster subnetwork found. Exiting..."
exit 1
fi
echo "k8s cluster sits on subnetwork \"${IP_ALIAS_SUBNETWORK}\""
set-allow-subnet-cidr-routes-overlap true
add-k8s-subnet-secondary-ranges
echo "Changing K8s master envs and restarting..."
export KUBE_GCE_IP_ALIAS_SUBNETWORK=${IP_ALIAS_SUBNETWORK}
export KUBE_GCE_NODE_IPAM_MODE="IPAMFromCluster"
export KUBE_GCE_ENABLE_IP_ALIASES=true
export SECONDARY_RANGE_NAME="pods-default"
export STORAGE_BACKEND="etcd3"
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
export ETCD_IMAGE=3.4.13-3
export ETCD_VERSION=3.4.13
# Upgrade master with updated kube envs
"${KUBE_ROOT}/cluster/gce/upgrade.sh" -M -l
delete-k8s-node-routes
set-allow-subnet-cidr-routes-overlap false
|
rnaveiras/kubernetes
|
cluster/gce/upgrade-aliases.sh
|
Shell
|
apache-2.0
| 6,149 |
#!/bin/bash
cd /convert/
python convert.py $1
|
gongweibao/cloud
|
docker/convert/run.sh
|
Shell
|
apache-2.0
| 46 |
#!/usr/bin/env bash
# Description:
# Script to extract the most security relevant details from a
# target SSL/TLS implementation by using ssl-cipher-check.
#
# Requires:
# - ssl-cipher-check.pl
# http://unspecific.com/ssl/
VERSION=0.1
echo ------------------------------------------------------
echo " $0 - ($VERSION) based on ssl-cipher-check.pl"
echo " Author: Abraham Aranguren @7a_ http://7-a.org"
echo ------------------------------------------------------
echo
if [ $# -ne 3 ]; then
echo "Usage: $0 <full path to ssl-cipher-check.pl> IP PORT"
exit
fi
SSL_CIPHER_CHECK=$1
HOST=$2
PORT=$3
RENEG_FILE='reneg.log'
RENEG_FILE_ERRORS='reneg_errors.log'
#echo "Before handshake.."
# Check if the target service speaks SSL/TLS (& check renegotiation)
(echo R; sleep 5) | openssl s_client -connect $HOST:$PORT > $RENEG_FILE 2> $RENEG_FILE_ERRORS &
pid=$!
sleep 5
#echo "After handshake.."
SSL_HANDSHAKE_LINES=$(cat $RENEG_FILE | wc -l)
if [ $SSL_HANDSHAKE_LINES -lt 15 ] ; then
# SSL handshake failed - Non SSL/TLS service
# If the target service does not speak SSL/TLS, openssl does not terminate
# Note: When bash is invoked as sh,
# it uses a sh compatibility mode where most modern features are
# turned off. sh doesn't recognize SIGKILL, but the bash
# invocation is using its builtin, and that builtin does.
# SIGINT = 2 (signal number)
kill -2 ${pid}
echo
echo "[*] SSL Checks skipped!: The host $HOST does not appear to speak SSL/TLS on port: $PORT"
echo
exit
else # SSL Handshake successful, proceed with check
echo
echo "[*] SSL Handshake Check OK: The host $HOST appears to speak SSL/TLS on port: $PORT"
echo
fi
echo [*] Analyzing SSL/TLS on $HOST:$PORT ...
echo [*] Step 1 - sslcan-based analysis
echo
DATE=$(date +%F_%R:%S)
echo "[*] ssl-cipher-check-based analysis (for comparison/assurance purposes)"
echo '[*] NOTE: If you get errors below, try running: "apt-get install gnutls-bin"'
OUTFILE=ssl_cipher_check_$DATE
LOGFILE=$OUTFILE.log
ERRFILE=$OUTFILE.err
echo
echo [*] Running ssl-cipher-check.pl on $HOST:$PORT...
#ssl-cipher-check.pl -va $HOST $PORT >> $LOGFILE 2>> $ERRFILE
$SSL_CIPHER_CHECK -va $HOST $PORT >> $LOGFILE 2>> $ERRFILE
echo
echo [*] Testing for SSLv2 ...
grep SSLv2 $LOGFILE | grep ENABLED
echo
echo [*] Testing for NULL cipher ...
grep NULL $LOGFILE | grep ENABLED
echo
echo [*] Testing weak ciphers ...
grep ENABLED $LOGFILE | grep WEAK
echo
echo [*] Testing strong ciphers ...
grep ENABLED $LOGFILE | grep STRONG
echo
echo [*] Default cipher: ...
grep -A 1 Default $LOGFILE | grep -v Default| sed 's/ *//'
echo
echo [*] New files created:
find . -size 0 -name '*.err' -delete # Delete empty error files
ls -l $OUTFILE.* # List new files
echo
echo
echo [*] done
echo
|
DarKnight--/owtf
|
scripts/ssl/verify_ssl_cipher_check.sh
|
Shell
|
bsd-3-clause
| 2,796 |
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_qt5
export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic gcc-7 can compile our c++17 and run our functional tests in python3, see doc/dependencies.md
export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev"
export DEP_OPTS="NO_QT=1 NO_UPNP=1 NO_NATPMP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1"
export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1"
export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports
--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process"
|
Sjors/bitcoin
|
ci/test/00_setup_env_native_qt5.sh
|
Shell
|
mit
| 1,195 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2009-2015 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Like t5038, but with multiple wrong files.
. ./tup.sh
cat > Tupfile << HERE
: |> touch foo; touch bar; touch baz |> foo
HERE
tup touch Tupfile
update_fail
cat > Tupfile << HERE
: |> touch foo |> foo
HERE
tup touch Tupfile
update
check_exist foo
check_not_exist bar baz
eotup
|
p2rkw/tup
|
test/t5039-wrong-target5.sh
|
Shell
|
gpl-2.0
| 1,030 |
#!/bin/sh
set -e
if [ ! $VERSION ]; then
VERSION=`git describe --tags`
fi
echo $VERSION | tr -d "\n"
|
aeppert/massive-octo-spice
|
version.sh
|
Shell
|
lgpl-3.0
| 108 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
help() {
printf " -c use customized key/cert\n"
printf " -k path of private key\n"
printf " -p path of certificate of public key\n"
printf " -t path of certificate chain\n"
printf " -u path of root ca certificate \n"
}
config_apache2_conf() {
local ip=$1
local srvr=$2
sed -i 's/ssl-cert-snakeoil.key/cert_apache.key/' /etc/apache2/sites-enabled/vhost*
sed -i 's/ssl-cert-snakeoil.pem/cert_apache.crt/' /etc/apache2/sites-enabled/vhost*
if [ -f /etc/ssl/certs/cert_apache_chain.crt ]
then
sed -i -e "s/#SSLCertificateChainFile.*/SSLCertificateChainFile \/etc\/ssl\/certs\/cert_apache_chain.crt/" /etc/apache2/sites-enabled/vhost*
fi
}
copy_certs_apache2() {
local certdir=$(dirname $0)/certs
local mydir=$(dirname $0)
if [ -f $customPrivKey ] && [ -f $customPrivCert ] ; then
cp $customPrivKey /etc/ssl/private/cert_apache.key && cp $customPrivCert /etc/ssl/certs/cert_apache.crt
fi
if [ ! -z "$customCertChain" ] && [ -f "$customCertChain" ] ; then
cp $customCertChain /etc/ssl/certs/cert_apache_chain.crt
fi
return 0
}
cflag=
cpkflag=
cpcflag=
cccflag=
customPrivKey=$(dirname $0)/certs/realhostip.key
customPrivCert=$(dirname $0)/certs/realhostip.crt
customCertChain=
customCACert=
publicIp=
hostName=
keyStore=$(dirname $0)/certs/realhostip.keystore
defaultJavaKeyStoreFile=/etc/ssl/certs/java/cacerts
defaultJavaKeyStorePass="changeit"
aliasName="CPVMCertificate"
storepass="vmops.com"
while getopts 'i:h:k:p:t:u:c' OPTION
do
case $OPTION in
c) cflag=1
;;
k) cpkflag=1
customPrivKey="$OPTARG"
;;
p) cpcflag=1
customPrivCert="$OPTARG"
;;
t) cccflag=1
customCertChain="$OPTARG"
;;
u) ccacflag=1
customCACert="$OPTARG"
;;
i) publicIp="$OPTARG"
;;
h) hostName="$OPTARG"
;;
?) help
;;
esac
done
if [ -z "$publicIp" ] || [ -z "$hostName" ]
then
help
exit 1
fi
if [ "$cflag" == "1" ]
then
if [ "$cpkflag$cpcflag" != "11" ]
then
help
exit 1
fi
if [ ! -f "$customPrivKey" ]
then
printf "private key file does not exist\n"
exit 2
fi
if [ ! -f "$customPrivCert" ]
then
printf "public certificate does not exist\n"
exit 3
fi
if [ "$cccflag" == "1" ]
then
if [ ! -f "$customCertChain" ]
then
printf "certificate chain does not exist\n"
exit 4
fi
fi
fi
copy_certs_apache2
if [ $? -ne 0 ]
then
echo "Failed to copy certificates"
exit 2
fi
if [ -f "$customCACert" ]
then
keytool -delete -alias $aliasName -keystore $keyStore -storepass $storepass -noprompt || true
keytool -import -alias $aliasName -keystore $keyStore -storepass $storepass -noprompt -file $customCACert
keytool -importkeystore -srckeystore $defaultJavaKeyStoreFile -destkeystore $keyStore -srcstorepass $defaultJavaKeyStorePass -deststorepass $storepass -noprompt
fi
config_apache2_conf $publicIp $hostName
systemctl restart apache2
|
DaanHoogland/cloudstack
|
systemvm/agent/scripts/config_ssl.sh
|
Shell
|
apache-2.0
| 3,814 |
jspm bundle-sfx main + voxel-demo -mi
http-server
|
pniederlag/jspm-cli
|
testlibs/bundle-sfx.sh
|
Shell
|
apache-2.0
| 50 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - updated on Shannon Mitchell([email protected])
# on 15-jan-2012 to fix a typo and added .bash_logout and .bash_login to
# the list. Fixed some logic with the find command.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-22361
#Group Title: GEN001870
#Rule ID: SV-26481r1_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001870
#Rule Title: Local initialization files must be group-owned by the user's
#primary group or root.
#
#Vulnerability Discussion: Local initialization files are used to configure
#the user's shell environment upon login. Malicious modification of these
#files could compromise accounts upon logon.
#
#Responsibility: System Administrator
#IAControls: ECLP-1
#
#Check Content:
#Check user home directories for local initialization files group-owned by a
#group other than the user's primary group or root.
#
#Procedure:
# FILES=" .login .cschrc .logout .profile .bash_profile .bashrc .bash_logout
#.env .dtprofile .dispatch .emacs .exrc";
# for PWLINE in `cut -d: -f4,6 /etc/passwd`; do HOMEDIR=$(echo ${PWLINE}|cut
#-d: -f2);GROUP=$(echo ${PWLINE} | cut -d: -f1);for INIFILE in $FILES;do stat
#-c %g/%G:%n ${HOMEDIR}/${INIFILE} 2>null|egrep -v "${GROUP}";done;done
#
#If any file is not group-owned by root or the user's primary GID, this is
#a finding.
#
#Fix Text: Change the group-owner of the local initialization file to the
#user's primary group, or root.
# chgrp <user's primary GID> <user's local initialization file>
#
#Procedure:
# FILES=" .login .cschrc .logout .profile .bash_profile .bashrc .bash_logout
#.env .dtprofile .dispatch .emacs .exrc";
# for PWLINE in `cut -d: -f4,6 /etc/passwd`; do HOMEDIR=$(echo ${PWLINE}|
#cut -d: -f2);GROUP=$(echo ${PWLINE} | cut -d: -f1);for INIFILE in $FILES;do
#MATCH=$(stat -c %g/%G:%n ${HOMEDIR}/${INIFILE} 2>null|egrep -c -v "${GROUP}")
#;if [ $MATCH != 0 ] ; then chgrp ${GROUP} ${HOMEDIR}/${INIFILE};fi;done;done
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN001870
#Start-Lockdown
USERACCT=`egrep -v "^\+|^#|^root:|^daemon:|^ident:|^bin:|^sys:|^adm:|^smtp:|^uucp:|^nuucp:|^listen:|^lpd:|^lp:|^ingres:|^apache:|^oracle:|^oracle7:|^oracle8:|^oracle9:|^informix:|^news:|^nobody:|^nobody4:|^noaccess:|^sybase:|^tivoli:|^mqm:|^www:|^ftp:|^tftp:|^hpdb:|^sshd:|^invscout:|^gccs:|^secman:|^sysadmin:|^install:|^staff:|^COE:|^predmail:|^snmp:|^smmsp:|^sm:|^share:|^BIF:|^GCCS:|^JDISS:|^SA:|^SSO:|^SM:|^ftp:|^gccsrv:|^gtnsmint:|^irc:|^Imadmin:|^netadmin:|^halt:|^mail:|^games:|^rpm:|^vcsa:|^nscd:|^rpc:|^rpcuser:|^mailnull:|^pcap:|^xfs:|^ntp:|^gdm:|^sync:|^shutdown:|^halt:|^operator:|^gopher:|^nfsnobody:|^dbus:|^haldaemon:|^netdump:|^webalizer:|^pvm:|^mysql:|^mailman:|^dovecot:|^cyrus:|^amanda:|^pegasus:|^HPSMH:|^hpsmh:|^webadmind:|^webadmin:|^webservd:|^avahi:|^beagleidx:|^hsqldb:|^postfix:|^svctag:|^postgres:|^ids:|^IDS:|^distcache:|^DISTCACHE:|^named:|^canna:|^wnn:|^fax:|^quagga:|^htt" /etc/passwd | cut -d":" -f1`
ALREADY=0
Answer=4
DotFiles='( -name .cshrc
-o -name .login
-o -name .logout
-o -name .bash_logout
-o -name .bash_login
-o -name .profile
-o -name .bash_profile
-o -name .bashrc
-o -name .env
-o -name .dtprofile
-o -name .dispatch
-o -name .emacs
-o -name .exrc )'
for UserName in ${USERACCT}
do
if [ `echo $UserName | cut -c1` != '+' ]
then
PwTest=`grep "^${UserName}:" /etc/passwd | cut -d: -f6`
PwHomeDir=${PwTest:-NOVALUE}
if [ "${PwHomeDir}" != "NOVALUE" -a "${PwHomeDir}" != " " ]
then
if [ -d ${PwHomeDir} ]
then
if [ ${PwHomeDir} = '/' ]
then
echo 'WARNING: Home directory for "'${UserName}'"' \
'("'${PwHomeDir}'") excluded from check.'
else
#Here is where we fix
PGID=`egrep "^${UserName}" /etc/passwd | awk -F':' '{print $4}'`
for filename in `find ${PwHomeDir} \
-xdev \
-type f \
! -fstype nfs \
${DotFiles} \
! -gid ${PGID} \
! -gid 0 \
-exec ls -adlL {} \; \
| tr -s " " | awk '{print $9}'`
do
chgrp $PGID $filename
done
fi
fi
fi
fi
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN001870.sh
|
Shell
|
apache-2.0
| 6,126 |
test=01
sub_test=0
description="no linebreak at end of file."
output=`echo -n "hello${DELIMITER}world" | $bin 2>&1`
if [ "$output" != '"hello","world"' ]; then
test_status $test $sub_test "$description" FAIL
else
test_status $test $sub_test "$description" PASS
fi
|
dbushong/crush-tools
|
src/csvformat/tests/test_01.sh
|
Shell
|
apache-2.0
| 268 |
#!/bin/bash
main()
{
script_name=${0##*/}
echo " "
echo " "
echo " "${script_name}
echo " "
echo " Objective:"
echo " The script removes ftnlen-related arguments which are not necessary in lapack2flame."
echo " "
files="$(find . -maxdepth 1 -name "*.c")"
for file in ${files}; do
echo -ne " Removing ftnlen from ... ${file} "\\r
tmp_file=$(echo "${file}.back")
# tr -s '\t\n' ' ' < ${file} \ # remove return and tab leading a single line source
# sed 's/;/;\'$'\n/g' \ # add return after ;
# sed 's/{/{\'$'\n/g' \ # add return after {
# sed 's/}/}\'$'\n/g' \ # add return after }
# sed 's/\*\//\*\/\'$'\n/g' \ # add return after */
# sed 's/ */\ /g' \ # remove multiple spaces into a single space
# sed 's/, ftnlen [0-9a-zA-Z_]*//g' \ # remove ftnlen in the function definition
# sed 's/, (ftnlen)[0-9]*//g' \ # remove ftnlen in function arguments
# > ${tmp_file} # wrote it to file
#
# | sed 's/\(\#define *[a--zA-Z] *[()a-zA-Z_0-9]*\)/\1\'$'\n/g' \
#
# int s_cat(char *, char **, integer *, integer *, ftnlen);
# s_cat(ch__1, a__1, i__3, &c__2, (ftnlen)2);
( tr -s '\t\n' ' ' < ${file} \
| sed 's/;/;\'$'\n/g' \
| sed 's/\((equiv_[0-9])\)/\1\'$'\n/g' \
| sed 's/\(\#undef *[a-zA-Z]*\)/\1\'$'\n/g' \
| sed 's/{/{\'$'\n/g' \
| sed 's/}/}\'$'\n/g' \
| sed 's/\*\//\*\/\'$'\n/g' \
| sed 's/ */\ /g' \
| sed 's/, ftnlen *[0-9a-zA-Z_]*//g' \
| sed 's/, ( *ftnlen) *[0-9]*//g' \
> ${tmp_file} ;
rm -f ${file} ;
sed 's/, ftnlen//g' < ${tmp_file} > ${file} ; # remove remainder of ftnlen used alone
rm -f ${tmp_file} )
done
return 0
}
main "$@"
|
alishakiba/libflame
|
src/map/lapack2flamec/f2c/c/remove_ftnlen.sh
|
Shell
|
bsd-3-clause
| 1,992 |
#!/bin/bash
FN="pd.margene.1.0.st_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/pd.margene.1.0.st_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.margene.1.0.st_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.margene.1.0.st/bioconductor-pd.margene.1.0.st_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.margene.1.0.st/bioconductor-pd.margene.1.0.st_3.12.0_src_all.tar.gz"
)
MD5="f670b192e4b453f13bb7cae154b5de42"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-pd.margene.1.0.st/post-link.sh
|
Shell
|
mit
| 1,466 |
# Rebuild the initrd:
# Skip if it is explicitly wanted to not rebuild the initrd:
if is_false $REBUILD_INITRAMFS ; then
Log "Skip recreating initrd (REBUILD_INITRAMFS is false)"
return 0
fi
# Skip if not needed but only when it is not explicitly wanted to rebuild the initrd in any case:
if ! is_true $REBUILD_INITRAMFS ; then
# During "rear recover" 260_recovery_storage_drivers.sh creates $TMP_DIR/storage_drivers
if ! test -s $TMP_DIR/storage_drivers ; then
Log "Skip recreating initrd: No needed storage drivers ('$TMP_DIR/storage_drivers' is empty)"
return 0
fi
# During "rear mkbackup/mkrescue" 260_storage_drivers.sh creates $VAR_DIR/recovery/storage_drivers
if cmp -s $TMP_DIR/storage_drivers $VAR_DIR/recovery/storage_drivers ; then
Log "Skip recreating initrd: '$TMP_DIR/storage_drivers' and '$VAR_DIR/recovery/storage_drivers' are the same"
return 0
fi
fi
# A longer time ago udev was optional on some distros.
# This changed and nowadays udev is not optional any more.
# See https://github.com/rear/rear/pull/1171#issuecomment-274442700
# But it is not necessarily an error if initrd cannot be re-created here
# because usually it works with the unchanged initrd from the backup restore.
if ! have_udev ; then
LogPrint "WARNING:
Cannot recreate initrd (no udev found).
It may work with the initrd 'as is' from the backup restore.
Check the recreated system (mounted at $TARGET_FS_ROOT)
and decide yourself, whether the system will boot or not.
"
return 0
fi
# Merge new drivers with previous initrd modules.
# We only add modules to the initrd, we don't take old ones out.
# This might be done better, but is not worth the risk.
# Set INITRD_MODULES from recovered system
if test -r $TARGET_FS_ROOT/etc/sysconfig/kernel ; then
# In SLE12 RC2 /etc/sysconfig/kernel is an useless stub that contains only one line
# INITRD_MODULES=""
# Since SLE12 RC3 /etc/sysconfig/kernel does no longer exist, see bnc#895084 where
# in particular https://bugzilla.novell.com/show_bug.cgi?id=895084#c7 reads
# Best would be to add something like that:
# # This replaces old INITRD_MODULES= variable from /etc/sysconfig/kernel
# # force_drivers+="kernel_module1 kernel_module2 ..."
# in our /etc/dracut.conf.d/01-dist.conf file.
# And a similar comment to /etc/sysconfig/kernel
# # DO NOT USE THIS FILE ANYMORE. IF YOU WANT TO ENFORCE LOADING
# # SPECIFIC KERNEL MODULES SEE /etc/dracut.conf.d/01-dist.conf
# # and the dracut (--force-drivers paramter) manpage.
# Because the comment above reads "probably not required" at least for now
# there is no support for force_drivers in /etc/dracut.conf.d/01-dist.conf.
source $TARGET_FS_ROOT/etc/sysconfig/kernel || Error "Could not source '$TARGET_FS_ROOT/etc/sysconfig/kernel'"
Log "Original INITRD_MODULES='$INITRD_MODULES'"
# Using array to split into words:
OLD_INITRD_MODULES=( $INITRD_MODULES )
# To see what has been added by the migration process, the new modules are added to the
# end of the list. To achieve this, we list the old modules twice in the variable
# NEW_INITRD_MODULES and then add the new modules. Then we use "uniq -u" to filter out
# the modules which only appear once in the list. The result array the only
# contains the new modules.
NEW_INITRD_MODULES=( $INITRD_MODULES $INITRD_MODULES $( cat $TMP_DIR/storage_drivers ) )
# uniq INITRD_MODULES
NEW_INITRD_MODULES=( $( tr " " "\n" <<< "${NEW_INITRD_MODULES[*]}" | sort | uniq -u ) )
Log "New INITRD_MODULES='${OLD_INITRD_MODULES[@]} ${NEW_INITRD_MODULES[@]}'"
sed -i -e '/^INITRD_MODULES/s/^.*$/#&\nINITRD_MODULES="'"${OLD_INITRD_MODULES[*]} ${NEW_INITRD_MODULES[*]}"'"/' $TARGET_FS_ROOT/etc/sysconfig/kernel
fi
my_udevtrigger
sleep 5
LogPrint "Running mkinitrd..."
# Run mkinitrd directly in chroot without a login shell in between (see https://github.com/rear/rear/issues/862).
# We need the mkinitrd binary in the chroot environment i.e. the mkinitrd binary in the recreated system.
# Normally we would use a login shell like: chroot $TARGET_FS_ROOT /bin/bash --login -c 'type -P mkinitrd'
# because otherwise there is no useful PATH (PATH is only /bin) so that 'type -P' won't find it
# but we cannot use a login shell because that contradicts https://github.com/rear/rear/issues/862
# so that we use a plain (non-login) shell and set a (hopefully) reasonable PATH:
local mkinitrd_binary=$( chroot $TARGET_FS_ROOT /bin/bash -c 'PATH=/sbin:/usr/sbin:/usr/bin:/bin type -P mkinitrd' )
# If there is no mkinitrd in the chroot environment plain 'chroot $TARGET_FS_ROOT' will hang up endlessly
# and then "rear recover" cannot be aborted with the usual [Ctrl]+[C] keys.
# Use plain $var because when var contains only blanks test "$var" results true because test " " results true:
if test $mkinitrd_binary ; then
# mkinitrd calls other tool like find, xargs, wc ... without full PATH.
# $PATH MUST BE SET for mkinitrd can run successfully.
if chroot $TARGET_FS_ROOT /bin/bash -c "PATH=/sbin:/usr/sbin:/usr/bin:/bin $mkinitrd_binary" >&2 ; then
LogPrint "Recreated initrd ($mkinitrd_binary)."
else
LogPrint "WARNING:
Failed to create initrd ($mkinitrd_binary).
Check '$RUNTIME_LOGFILE' to see the error messages in detail
and decide yourself, whether the system will boot or not.
"
fi
else
LogPrint "WARNING:
Cannot create initrd (found no mkinitrd in the recreated system).
Check the recreated system (mounted at $TARGET_FS_ROOT)
and decide yourself, whether the system will boot or not.
"
fi
|
phracek/rear
|
usr/share/rear/finalize/SUSE_LINUX/i386/550_rebuild_initramfs.sh
|
Shell
|
gpl-3.0
| 5,660 |
#ipsec setup stop
#umount /var/tmp; mount /var/tmp
#umount /usr/local; mount /usr/local
: ==== start ====
TESTNAME=psk-pluto-02
source /testing/pluto/bin/eastlocal.sh
ipsec setup start
ipsec auto --add road--eastnet-psk
|
y-trudeau/openswan-patch-meraki
|
testing/pluto/psk-pluto-02/eastinit.sh
|
Shell
|
gpl-2.0
| 221 |
# $OpenBSD: sftp-cmds.sh,v 1.14 2013/06/21 02:26:26 djm Exp $
# Placed in the Public Domain.
# XXX - TODO:
# - chmod / chown / chgrp
# - -p flag for get & put
tid="sftp commands"
# test that these files are readable!
for i in `(cd /bin;echo l*)`
do
if [ -r $i ]; then
GLOBFILES="$GLOBFILES $i"
fi
done
# Path with embedded quote
QUOTECOPY=${COPY}".\"blah\""
QUOTECOPY_ARG=${COPY}'.\"blah\"'
# File with spaces
SPACECOPY="${COPY} this has spaces.txt"
SPACECOPY_ARG="${COPY}\ this\ has\ spaces.txt"
# File with glob metacharacters
GLOBMETACOPY="${COPY} [metachar].txt"
rm -rf ${COPY} ${COPY}.1 ${COPY}.2 ${COPY}.dd ${COPY}.dd2
mkdir ${COPY}.dd
verbose "$tid: lls"
(echo "lcd ${OBJ}" ; echo "lls") | ${SFTP} -D ${SFTPSERVER} 2>&1 | \
grep copy.dd >/dev/null 2>&1 || fail "lls failed"
verbose "$tid: lls w/path"
echo "lls ${OBJ}" | ${SFTP} -D ${SFTPSERVER} 2>&1 | \
grep copy.dd >/dev/null 2>&1 || fail "lls w/path failed"
verbose "$tid: ls"
echo "ls ${OBJ}" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "ls failed"
# XXX always successful
verbose "$tid: shell"
echo "!echo hi there" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "shell failed"
# XXX always successful
verbose "$tid: pwd"
echo "pwd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "pwd failed"
# XXX always successful
verbose "$tid: lpwd"
echo "lpwd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "lpwd failed"
# XXX always successful
verbose "$tid: quit"
echo "quit" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "quit failed"
# XXX always successful
verbose "$tid: help"
echo "help" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "help failed"
# XXX always successful
rm -f ${COPY}
verbose "$tid: get"
echo "get $DATA $COPY" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp $DATA ${COPY} || fail "corrupted copy after get"
rm -f ${COPY}
verbose "$tid: get quoted"
echo "get \"$DATA\" $COPY" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp $DATA ${COPY} || fail "corrupted copy after get"
if [ "$os" != "cygwin" ]; then
rm -f ${QUOTECOPY}
cp $DATA ${QUOTECOPY}
verbose "$tid: get filename with quotes"
echo "get \"$QUOTECOPY_ARG\" ${COPY}" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp ${COPY} ${QUOTECOPY} || fail "corrupted copy after get with quotes"
rm -f ${QUOTECOPY} ${COPY}
fi
rm -f "$SPACECOPY" ${COPY}
cp $DATA "$SPACECOPY"
verbose "$tid: get filename with spaces"
echo "get ${SPACECOPY_ARG} ${COPY}" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp ${COPY} "$SPACECOPY" || fail "corrupted copy after get with spaces"
rm -f "$GLOBMETACOPY" ${COPY}
cp $DATA "$GLOBMETACOPY"
verbose "$tid: get filename with glob metacharacters"
echo "get \"${GLOBMETACOPY}\" ${COPY}" | \
${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "get failed"
cmp ${COPY} "$GLOBMETACOPY" || \
fail "corrupted copy after get with glob metacharacters"
rm -f ${COPY}.dd/*
verbose "$tid: get to directory"
echo "get $DATA ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp $DATA ${COPY}.dd/$DATANAME || fail "corrupted copy after get"
rm -f ${COPY}.dd/*
verbose "$tid: glob get to directory"
echo "get /bin/l* ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
for x in $GLOBFILES; do
cmp /bin/$x ${COPY}.dd/$x || fail "corrupted copy after get"
done
rm -f ${COPY}.dd/*
verbose "$tid: get to local dir"
(echo "lcd ${COPY}.dd"; echo "get $DATA" ) | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
cmp $DATA ${COPY}.dd/$DATANAME || fail "corrupted copy after get"
rm -f ${COPY}.dd/*
verbose "$tid: glob get to local dir"
(echo "lcd ${COPY}.dd"; echo "get /bin/l*") | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "get failed"
for x in $GLOBFILES; do
cmp /bin/$x ${COPY}.dd/$x || fail "corrupted copy after get"
done
rm -f ${COPY}
verbose "$tid: put"
echo "put $DATA $COPY" | \
${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "put failed"
cmp $DATA ${COPY} || fail "corrupted copy after put"
if [ "$os" != "cygwin" ]; then
rm -f ${QUOTECOPY}
verbose "$tid: put filename with quotes"
echo "put $DATA \"$QUOTECOPY_ARG\"" | \
${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "put failed"
cmp $DATA ${QUOTECOPY} || fail "corrupted copy after put with quotes"
fi
rm -f "$SPACECOPY"
verbose "$tid: put filename with spaces"
echo "put $DATA ${SPACECOPY_ARG}" | \
${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "put failed"
cmp $DATA "$SPACECOPY" || fail "corrupted copy after put with spaces"
rm -f ${COPY}.dd/*
verbose "$tid: put to directory"
echo "put $DATA ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "put failed"
cmp $DATA ${COPY}.dd/$DATANAME || fail "corrupted copy after put"
rm -f ${COPY}.dd/*
verbose "$tid: glob put to directory"
echo "put /bin/l? ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "put failed"
for x in $GLOBFILES; do
cmp /bin/$x ${COPY}.dd/$x || fail "corrupted copy after put"
done
rm -f ${COPY}.dd/*
verbose "$tid: put to local dir"
(echo "cd ${COPY}.dd"; echo "put $DATA") | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "put failed"
cmp $DATA ${COPY}.dd/$DATANAME || fail "corrupted copy after put"
rm -f ${COPY}.dd/*
verbose "$tid: glob put to local dir"
(echo "cd ${COPY}.dd"; echo "put /bin/l?") | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "put failed"
for x in $GLOBFILES; do
cmp /bin/$x ${COPY}.dd/$x || fail "corrupted copy after put"
done
verbose "$tid: rename"
echo "rename $COPY ${COPY}.1" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "rename failed"
test -f ${COPY}.1 || fail "missing file after rename"
cmp $DATA ${COPY}.1 >/dev/null 2>&1 || fail "corrupted copy after rename"
verbose "$tid: rename directory"
echo "rename ${COPY}.dd ${COPY}.dd2" | \
${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || \
fail "rename directory failed"
test -d ${COPY}.dd && fail "oldname exists after rename directory"
test -d ${COPY}.dd2 || fail "missing newname after rename directory"
verbose "$tid: ln"
echo "ln ${COPY}.1 ${COPY}.2" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "ln failed"
test -f ${COPY}.2 || fail "missing file after ln"
cmp ${COPY}.1 ${COPY}.2 || fail "created file is not equal after ln"
verbose "$tid: ln -s"
rm -f ${COPY}.2
echo "ln -s ${COPY}.1 ${COPY}.2" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 || fail "ln -s failed"
test -h ${COPY}.2 || fail "missing file after ln -s"
verbose "$tid: mkdir"
echo "mkdir ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "mkdir failed"
test -d ${COPY}.dd || fail "missing directory after mkdir"
# XXX do more here
verbose "$tid: chdir"
echo "chdir ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "chdir failed"
verbose "$tid: rmdir"
echo "rmdir ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "rmdir failed"
test -d ${COPY}.1 && fail "present directory after rmdir"
verbose "$tid: lmkdir"
echo "lmkdir ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "lmkdir failed"
test -d ${COPY}.dd || fail "missing directory after lmkdir"
# XXX do more here
verbose "$tid: lchdir"
echo "lchdir ${COPY}.dd" | ${SFTP} -D ${SFTPSERVER} >/dev/null 2>&1 \
|| fail "lchdir failed"
rm -rf ${COPY} ${COPY}.1 ${COPY}.2 ${COPY}.dd ${COPY}.dd2
rm -rf ${QUOTECOPY} "$SPACECOPY" "$GLOBMETACOPY"
|
eunsungc/gt6-RAMSES_8_5
|
gsi_openssh/source/regress/sftp-cmds.sh
|
Shell
|
apache-2.0
| 7,465 |
#!/bin/bash
# This is part of the rsyslog testbench, licensed under ASL 2.0
echo ======================================================================
# Check if inotify header exist
if [ -n "$(find /usr/include -name 'inotify.h' -print -quit)" ]; then
echo [imfile-readmode2.sh]
else
exit 77 # no inotify available, skip this test
fi
. $srcdir/diag.sh init
. $srcdir/diag.sh startup imfile-readmode2.conf
# write the beginning of the file
echo 'msgnum:0
msgnum:1' > rsyslog.input
echo 'msgnum:2' >> rsyslog.input
# sleep a little to give rsyslog a chance to begin processing
sleep 1
# write some more lines (see https://github.com/rsyslog/rsyslog/issues/144)
echo 'msgnum:3
msgnum:4' >> rsyslog.input
echo 'msgnum:5' >> rsyslog.input # this one shouldn't be written to the output file because of ReadMode 2
# give it time to finish
sleep 1
. $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
. $srcdir/diag.sh wait-shutdown # we need to wait until rsyslogd is finished!
# give it time to write the output file
sleep 1
## check if we have the correct number of messages
NUMLINES=$(grep -c HEADER ./rsyslog.out.log 2>/dev/null)
if [ -z $NUMLINES ]; then
echo "ERROR: expecting at least a match for HEADER, maybe rsyslog.out.log wasn't even written?"
cat ./rsyslog.out.log
exit 1
else
if [ ! $NUMLINES -eq 3 ]; then
echo "ERROR: expecting 3 headers, got $NUMLINES"
cat ./rsyslog.out.log
exit 1
fi
fi
## check if all the data we expect to get in the file is there
for i in {1..4}; do
grep msgnum:$i ./rsyslog.out.log > /dev/null 2>&1
if [ ! $? -eq 0 ]; then
echo "ERROR: expecting the string 'msgnum:$i', it's not there"
cat ./rsyslog.out.log
exit 1
fi
done
## if we got here, all is good :)
. $srcdir/diag.sh exit
|
RomeroMalaquias/rsyslog
|
tests/imfile-readmode2.sh
|
Shell
|
gpl-3.0
| 1,810 |
#!/bin/bash
## Set of SM tests for verifying iSCSI attach/detach refcounting
## Source the general system function file
. ./XE_api_library.sh
## source performance tests
. ./performance_functions.sh
# XXX The free and in use lists do not have to be stored in files. However,
# storing them in a file eases post-mortem debugging.
LOGFILE="/tmp/`date +%s-%N`"
FREEFILE="${LOGFILE}-Freelist"
INUSEFILE="${LOGFILE}-Usedlist"
# Initializes the free list (writes one number per line in a file).
init_freelist()
{
# FIXME no need for a "for" loop, just do "seq 0 9 > ${FREEFILE}".
for i in `seq 0 9` ; do
echo ${i} >> ${FREEFILE}
done
touch ${INUSEFILE}
echo "LOGFILES: ${FREEFILE} and ${INUSEFILE}"
}
# Deletes the free list and in use list files.
cleanup_freelist()
{
rm ${INUSEFILE} ${FREEFILE}
}
# Returns a random free LUN. The LUN is neither removed from the free list nor
# added to the used list.
# Arguments:
# 1: the size of the free list
# (TODO Couldn't we get the size of the free list by doing "wc -l ${FREEFILE}"?)
getFreeLUN()
{
rnd=0
while [ $rnd -eq 0 ]; do
rnd=$RANDOM
let "rnd %= ${1}+1"
VAL=`awk "NR==$rnd" ${FREEFILE}`
if [ -z $VAL ]; then
rnd=0
fi
done
echo $VAL
}
# Returns a random SR that is currently using a LUN. The SR is not removed from
# the used list nor added to the free list.
# Arguments:
# 1: size of the in use list
# (TODO Couldn't we get the size of the free list by doing "wc -l ${INUSEFILE}"?)
getUsedSR()
{
rnd=0
while [ $rnd -eq 0 ]; do
rnd=$RANDOM
let "rnd %= ${1}+1"
VAL=`awk "NR==$rnd" ${INUSEFILE} | cut -d" " -f2`
if [ -z $VAL ]; then
rnd=0
fi
done
echo $VAL
}
# Removes a "LUN SR" mapping from the in use list and returns the LUN to the
# free list.
# Arguments:
# 1: the SR
del_from_usedlist()
{
SR_ID=${1}
LUN=`get_LUN_fromSRID ${SR_ID}`
# Remove the line that contains the specified SR.
# FIXME replace with "grep -Ev '...' > ${INUSEFILE}"
line=`grep -n ${1} ${INUSEFILE} | cut -d: -f1`
awk "NR!=$line" ${INUSEFILE} > ${INUSEFILE}-tmp
mv ${INUSEFILE}-tmp ${INUSEFILE}
echo $LUN >> ${FREEFILE}
}
# Allocates the specified LUN by removing it from the free list and adding it to
# he used list.
# Args: LUNid SR_ID
# Arguments:
# 1: the LUN to allocate
# 2: the SR that will be using the LUN
del_from_freelist()
{
LUN=${1}
SRID=${2}
# Remove the line that contains the specified LUN.
line=`grep -n ^${LUN} ${FREEFILE} | cut -d: -f1`
awk "NR!=$line" ${FREEFILE} > ${FREEFILE}-tmp
mv ${FREEFILE}-tmp ${FREEFILE}
# Write to the in use file that the specified LUN is in use by the specified
# SR.
echo $LUN $SRID >> ${INUSEFILE}
}
# Retrieves the SR using the specified LUN.
# Args: LUNid
get_SRID_fromLUN()
{
#echo "Calling: awk '/^${1}/' ${INUSEFILE}"
grep ^${1} ${INUSEFILE} | cut -d' ' -f 2
}
# Retrieves the LUN the specified SR is using.
# Arguments:
# 1: The SR.
get_LUN_fromSRID()
{
#echo "Calling: awk '/^${1}/' ${INUSEFILE}"
grep ${1} ${INUSEFILE} | cut -d' ' -f 1
}
# Retrives the SCSI ID of the specified LUN.
# Args: LUNid
get_SCSIid_fromLUN()
{
subject="Querying SCSIid for LUN${1}"
debug_test "$subject"
ADDR=`echo ${ISCSI_RECID} | cut -d',' -f1`
# XXX /etc/xensource/SCSIutil.smrt does not exist in a fresh XS
# installation, how is it created?
cmd="$REM_CMD python /etc/xensource/SCSIutil.smrt /dev/iscsi/${LISCSI_TARGET_ID}/${ADDR}/LUN${1}"
run $cmd
if [ $RUN_RC -ne 0 ]; then
debug_result 1
incr_exitval
else
debug_result 0
fi
test_exit 1
GLOBAL_RET=$RUN_OUTPUT
}
# Initializes the SCSIidcache array with the IDs of the iSCSI LUNs.
# Arguments: none.
init_SCSIid_list()
{
GLOBAL_DEBUG=0
verify_device
ret=$?
GLOBAL_DEBUG=1
# Initialize the LUNs if not already done so so we can get their IDs. Undo
# this operation before returning from this function (by setting the RESET
# flag).
if [ $ret -gt 0 ]; then
RESET=1
iqn_initialise
open_iscsi_start
discover_target
if [ $? -gt 0 ]; then
debug "discover_target failed"
test_exit 1
fi
attach_target ${ISCSI_RECID} ${ISCSI_RECIQN}
# Wait for devices to appear
sleep 5
else
RESET=0
fi
test_exit 0
verify_LUNcount
for i in `seq 0 9`; do
get_SCSIid_fromLUN ${i}
SCSIidcache[$i]=$GLOBAL_RET
debug "Retrieved SCSIid $GLOBAL_RET for LUN $i"
done
if [ $RESET == 1 ]; then
detach_target ${ISCSI_RECID} ${ISCSI_RECIQN}
fi
}
# Initializes the SR on the specified LUN.
# Arguments:
# 1: the LUN to use
setup_sr()
{
SCSIid=${SCSIidcache[${1}]}
smCreate "${SUBSTRATE_TYPE}" "${CONTENT_TYPE}" "${DEVSTRING}" \
"NULL" "NULL" "${IQN_INITIATOR_ID}" "${LISCSI_TARGET_IP}" \
"${LISCSI_TARGET_ID}" "${SCSIid}"
}
# FIXME This doesn't seem to work.
discover_LUNs()
{
# Call sr_create with no LUNid arg
GLOBAL_DEBUG=0
setup_sr
test_exit 0
GLOBAL_DEBUG=1
}
# Tells whether the iSCSI target exists under /dev.
# Arguments: none.
testDevPath()
{
cmd="$REM_CMD test -e /dev/iscsi/${LISCSI_TARGET_ID}"
run $cmd
GLOBAL_RET=$RUN_RC
}
# Ensures that all 10 LUNs are present.
# Arguments: none.
verify_LUNcount()
{
RC=0
for i in `seq 0 9`; do
DEVPATH="/dev/iscsi/${LISCSI_TARGET_ID}/LUN${i}"
cmd="$REM_CMD test -e ${DEVPATH}"
run $cmd
RC=`expr ${RC} + ${RUN_RC}`
done
if [ $RC -ne 0 ]; then
debug "Not all LUNs present, this test requires 10 LUNs."
debug "Unable to continue tests, exiting quietly."
cleanup_SRs
cleanup_freelist
exit 0
fi
}
# Tells whether the iSCSI reference path exists.
# Arguments: none.
# TODO What's an iSCSI reference path?
testIscsiRefPath()
{
cmd="$REM_CMD test -e /var/run/sr-ref/${LISCSI_TARGET_ID}"
run $cmd
GLOBAL_RET=$RUN_RC
}
# Prints the number of references.
# Arguments: none.
# FIXME incomplete comment
getRefCount()
{
testIscsiRefPath
if [ $GLOBAL_RET -ne 0 ]; then
echo 0
return
fi
cmd="$REM_CMD wc -l /var/run/sr-ref/${LISCSI_TARGET_ID}"
run $cmd
VAL=`echo ${RUN_OUTPUT} | cut -d" " -f1`
echo $VAL
}
# XXX incomplete
# Args: SRid
debug_SRDelete()
{
# Debug disabled
return
debug "Deleting SR [$1]"
LUN=`get_LUN_fromSRID ${1}`
debug " LUN: $LUN"
debug " SCSIid: ${SCSIidcache[${LUN}]}"
}
# Destroys all SRs by checking the in use list.
# Arguments: none.
cleanup_SRs()
{
debug "Cleaning up the SRs"
USEDSIZE=`wc -l ${INUSEFILE} | cut -d" " -f1`
if [ $USEDSIZE == 0 ]; then
return
fi
for i in `seq 1 ${USEDSIZE}`; do
UsedSR=`getUsedSR $USEDSIZE`
SR_unplug ${UsedSR}
debug_SRDelete ${UsedSR}
sm_SRDelete ${UsedSR}
test_exit 1
del_from_usedlist ${UsedSR}
done
}
# Performs a verification test: create some SRs and check the LUN reference
# counter.
run_manual_verify_test()
{
debug ""
debug "Running manual verification tests"
debug "================================="
debug ""
debug && debug "TEST 1: Add SR, verify refcount"
setup_sr 0
test_exit 1
del_from_freelist 0 ${SR_ID}
echo "Created SR [$SR_ID]"
debug_test "Verify refcount for LUN 0"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 1 ]; then
debug_result 1
return 1
fi
debug_result 0
verify_LUNcount
SR_unplug ${SR_ID}
debug_test "Verify refcount for LUN 0"
testDevPath
if [ $GLOBAL_RET -eq 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 0 ]; then
debug_result 1
return 1
fi
debug_result 0
#discover_LUNs
debug_test "Discover LUN verify refcount"
testDevPath
if [ $GLOBAL_RET -eq 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 0 ]; then
debug_result 1
return 1
fi
debug_result 0
SR_ID=`get_SRID_fromLUN 0`
debug_SRDelete ${SR_ID}
sm_SRDelete ${SR_ID}
del_from_usedlist ${SR_ID}
debug && debug "TEST 2: Add 2 SRs, verify refcount, unplug one and reverify"
for i in `seq 0 1`; do
setup_sr $i
test_exit 1
del_from_freelist $i ${SR_ID}
done
debug_test "Verify refcount for LUNs 0 and 1"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 2 ]; then
debug_result 1
return 1
fi
debug_result 0
SR_ID=`get_SRID_fromLUN 1`
SR_unplug ${SR_ID}
debug_test "Verify refcount for LUN 0"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 1 ]; then
debug_result 1
return 1
fi
debug_result 0
#discover_LUNs
debug_test "Discover LUN verify refcount"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 1 ]; then
debug_result 1
return 1
fi
debug_result 0
SR_ID=`get_SRID_fromLUN 0`
SR_unplug ${SR_ID}
for i in `seq 0 1`; do
SR_ID=`get_SRID_fromLUN $i`
debug_SRDelete ${SR_ID}
sm_SRDelete ${SR_ID}
del_from_usedlist ${SR_ID}
done
debug && debug "TEST 3: Add 10 SRs, verify refcount, unplug 9 and reverify"
for i in `seq 0 9`; do
setup_sr $i
test_exit 1
del_from_freelist $i ${SR_ID}
done
debug_test "Verify refcount for LUNs 0 - 9"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 10 ]; then
debug_result 1
return 1
fi
debug_result 0
REFCOUNT=9
for i in `seq 0 9`; do
if [ $i == 6 ]; then
continue
fi
SR_ID=`get_SRID_fromLUN $i`
SR_unplug ${SR_ID}
debug_test "Verify refcount for ${REFCOUNT} LUNs"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne $REFCOUNT ]; then
debug_result 1
return 1
fi
debug_result 0
REFCOUNT=`expr ${REFCOUNT} - 1`
done
debug_test "Verify refcount for single LUN"
testDevPath
if [ $GLOBAL_RET -ne 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 1 ]; then
debug_result 1
return 1
fi
debug_result 0
SR_ID=`get_SRID_fromLUN 6`
SR_unplug ${SR_ID}
for i in `seq 0 9`; do
SR_ID=`get_SRID_fromLUN $i`
debug_SRDelete ${SR_ID}
sm_SRDelete ${SR_ID}
del_from_usedlist ${SR_ID}
done
return 0
}
# Performs the probability test: randomly create and destroy SRs.
run_probability_test()
{
debug ""
debug "Running probability test"
debug "========================"
debug ""
MAXSEQ=10
if [ ! -z ${FAST} ]; then
MAXLOOP=1
else
MAXLOOP=100
fi
loop=0
TEST=1
# Repeat MAXLOOP times. In the 1st iteration TEST is 1 so the inner "if"
# statement will be executed with 90% probability (x10). In the 2nd
# iteration TEST will be 6 so the "if" will be executed with 40%
# probability. From the 3rd iteration and on, TEST is 1 so the "if" will be
# executed with 80% probability.
while [ $loop -lt $MAXLOOP ]; do
# repeat for all LUNs
for i in `seq 1 $MAXSEQ` ; do
# pick a random number from 0 to 9
let "NUM = $RANDOM % 9"
# if NUM > TEST, create an SR on a random LUN
if [ $NUM -gt $TEST ]; then
FREESIZE=`wc -l ${FREEFILE} | cut -d" " -f1`
if [ $FREESIZE == 0 ]; then
continue
fi
FreeLUN=`getFreeLUN $FREESIZE`
setup_sr ${FreeLUN}
test_exit 1
del_from_freelist ${FreeLUN} ${SR_ID}
# if NUM <= TEST, randomly destroy an SR
else
USEDSIZE=`wc -l ${INUSEFILE} | cut -d" " -f1`
if [ $USEDSIZE == 0 ]; then
continue
fi
UsedSR=`getUsedSR $USEDSIZE`
SR_unplug ${UsedSR}
debug_SRDelete ${UsedSR}
sm_SRDelete ${UsedSR}
test_exit 1
del_from_usedlist ${UsedSR}
fi
#discover_LUNs
done
# Verify refcount matches usedlist
debug_test "Verifying refcount"
REF=`getRefCount`
USEDSIZE=`wc -l ${INUSEFILE} | cut -d" " -f1`
if [ $REF -ne ${USEDSIZE} ]; then
debug_result 1
return 1
fi
debug_result 0
# Flip probability
if [ $TEST == 0 ]; then
TEST=6
else
TEST=1
fi
loop=`expr $loop + 1`
done
}
run_tests()
{
gen_hostlist
DRIVER_TYPE=lvmoiscsi
SUBSTRATE_TYPE=lvmoiscsi
CONTENT_TYPE=user
init_freelist
init_SCSIid_list
run_manual_verify_test
test_exit 1
cleanup_SRs
run_probability_test
cleanup_SRs
cleanup_freelist
debug_test "Verifying refcount post stress loop"
testDevPath
if [ $GLOBAL_RET -eq 0 ]; then
debug_result 1
return 1
fi
REF=`getRefCount`
if [ $REF -ne 0 ]; then
debug_result 1
return 1
fi
debug_result 0
}
TEMPLATE_ALIAS=windows
process_arguments $@
post_process
check_req_args
check_req_sw
install_ssh_certificate ${REMHOSTNAME} ${SSH_PRIVATE_KEY} ${PASSWD}
install_scsiID_helper ${REMHOSTNAME}
print_version_info
if [[ -z ${IQN_INITIATOR_ID} || -z ${LISCSI_TARGET_IP} || -z ${LISCSI_TARGET_ID} ]]; then
debug "iSCSI configuration information missing. Skipping test"
exit
fi
if [ ! -z ${IQN_INITIATOR_ID_CHAP} ] ; then
debug "Not ready to run these tests with CHAP credentials, Exiting quietly."
exit
fi
run_tests
print_exit_info
|
pritha-srivastava/sm
|
tests/test_iscsi_refcount.sh
|
Shell
|
lgpl-2.1
| 14,577 |
#!/bin/sh
printf "#!/bin/sh\n" > "$usrcfd/tmp/session.tell.$nametmp.sh"
#!/bin/sh
echo
#!/bin/sh
echo
#!
#!/bin/bash
#!/bin/bash
|
siosio/intellij-community
|
plugins/sh/testData/oldLexer/v3/shebang.sh
|
Shell
|
apache-2.0
| 141 |
#!/bin/sh
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
pushd "$(dirname $0)" >/dev/null
npm install @types/flatbuffers
../flatc --ts --no-fb-import --gen-mutable -o ts -I include_test monster_test.fbs
../flatc -b -I include_test monster_test.fbs unicode_test.json
tsc --strict --noUnusedParameters --noUnusedLocals --noImplicitReturns --strictNullChecks ts/monster_test_generated.ts
node JavaScriptTest ./ts/monster_test_generated
../flatc --ts --js --no-fb-import -o ts union_vector/union_vector.fbs
# test JS version first, then transpile and rerun for TS
node JavaScriptUnionVectorTest ./ts/union_vector_generated
tsc --strict --noUnusedParameters --noUnusedLocals --noImplicitReturns --strictNullChecks ts/union_vector_generated.ts
node JavaScriptUnionVectorTest ./ts/union_vector_generated
npm uninstall @types/flatbuffers
|
bjtaylor1/osrm-backend
|
third_party/flatbuffers/tests/TypeScriptTest.sh
|
Shell
|
bsd-2-clause
| 1,387 |
#!/bin/sh
cat mklist.txt |
while read line; do
ws=""
list=""
for f in $line; do
echo "Processing $f"
f="../../$f"
test -f $f || {
echo "$f doesn't exist"
exit 1
}
ws="$(dirname $f)/wscript_build"
if [ -f $ws ]; then
if test -s $ws && ! grep "AUTOGENERATED.by.mktowscript" $ws > /dev/null; then
echo "Skipping manually edited file $ws"
continue
fi
fi
list="$list $f"
done
if [ "$list" = "" ]; then
continue
fi
./mktowscript.pl $list > wscript_build.$$ || {
echo "Failed on $f"
rm -f wscript_build.$$
exit 1
}
if cmp wscript_build.$$ $ws > /dev/null 2>&1; then
rm -f wscript_build.$$
else
mv wscript_build.$$ $ws || exit 1
fi
#exit 1
done
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/buildtools/mktowscript/rebuild_all.sh
|
Shell
|
gpl-2.0
| 724 |
#!/usr/bin/env bash
bash --version 2>&1 | head -n 1
set -eo pipefail
SCRIPT_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
###########################################################################
# CONFIGURATION
###########################################################################
BUILD_PROJECT_FILE="$SCRIPT_DIR/build/_build.csproj"
TEMP_DIRECTORY="$SCRIPT_DIR//.nuke/temp"
DOTNET_GLOBAL_FILE="$SCRIPT_DIR//global.json"
DOTNET_INSTALL_URL="https://dot.net/v1/dotnet-install.sh"
DOTNET_CHANNEL="Current"
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_SKIP_FIRST_TIME_EXPERIENCE=1
export DOTNET_MULTILEVEL_LOOKUP=0
###########################################################################
# EXECUTION
###########################################################################
function FirstJsonValue {
perl -nle 'print $1 if m{"'"$1"'": "([^"]+)",?}' <<< "${@:2}"
}
# If dotnet CLI is installed globally and it matches requested version, use for execution
if [ -x "$(command -v dotnet)" ] && dotnet --version &>/dev/null; then
export DOTNET_EXE="$(command -v dotnet)"
else
# Download install script
DOTNET_INSTALL_FILE="$TEMP_DIRECTORY/dotnet-install.sh"
mkdir -p "$TEMP_DIRECTORY"
curl -Lsfo "$DOTNET_INSTALL_FILE" "$DOTNET_INSTALL_URL"
chmod +x "$DOTNET_INSTALL_FILE"
# If global.json exists, load expected version
if [[ -f "$DOTNET_GLOBAL_FILE" ]]; then
DOTNET_VERSION=$(FirstJsonValue "version" "$(cat "$DOTNET_GLOBAL_FILE")")
if [[ "$DOTNET_VERSION" == "" ]]; then
unset DOTNET_VERSION
fi
fi
# Install by channel or version
DOTNET_DIRECTORY="$TEMP_DIRECTORY/dotnet-unix"
if [[ -z ${DOTNET_VERSION+x} ]]; then
"$DOTNET_INSTALL_FILE" --install-dir "$DOTNET_DIRECTORY" --channel "$DOTNET_CHANNEL" --no-path
else
"$DOTNET_INSTALL_FILE" --install-dir "$DOTNET_DIRECTORY" --version "$DOTNET_VERSION" --no-path
fi
export DOTNET_EXE="$DOTNET_DIRECTORY/dotnet"
fi
echo "Microsoft (R) .NET Core SDK version $("$DOTNET_EXE" --version)"
"$DOTNET_EXE" build "$BUILD_PROJECT_FILE" /nodeReuse:false /p:UseSharedCompilation=false -nologo -clp:NoSummary --verbosity quiet
"$DOTNET_EXE" run --project "$BUILD_PROJECT_FILE" --no-build -- "$@"
|
olsh/resharper-bootstrap-templates
|
build.sh
|
Shell
|
mit
| 2,280 |
#!/bin/sh
cd `dirname $0`
AC_SEARCH_OPTS=""
# For those of us with pkg-config and other tools in /usr/local
PATH=$PATH:/usr/local/bin
# This is to make life easier for people who installed pkg-config in /usr/local
# but have autoconf/make/etc in /usr/. AKA most mac users
if [ -d "/usr/local/share/aclocal" ]
then
AC_SEARCH_OPTS="-I /usr/local/share/aclocal"
fi
aclocal $AC_SEARCH_OPTS && \
autoconf && \
autoheader && \
automake --add-missing && \
./configure "$@" && \
make -j4
|
bc-jaymendoza/the_silver_searcher
|
build.sh
|
Shell
|
apache-2.0
| 488 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ GWASdata
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-gwasdata/pre-unlink.sh
|
Shell
|
mit
| 55 |
#!/bin/sh
# See LICENSE file for copyright and license details.
out=/tmp/$$.out
err=/tmp/$$.err
trap "rm -f $out $err" EXIT INT QUIT HUP
case $# in
0)
echo "usage: update.sh test ..." >&2
exit 1
;;
*)
for i
do
../cc1-z80 -I./ -w $i >$out 2>$err
(echo '/^error/+;/^output/-c'
cat $err
printf ".\n"
echo '/^output/+;/^\*\//-c'
cat $out
printf ".\nw\n") | ed -s $i
done
;;
esac
|
8l/scc
|
cc1/tests/update.sh
|
Shell
|
isc
| 400 |
#!/usr/bin/env bash
set -e # halt script on error
# Configure git name, email, and remote
git config --global user.email "[email protected]"
git config --global user.name "Automatic travis merge"
git remote add pb https://hyatt03:[email protected]/hyatt03/hyatt.space.git
# Create a temporary branch name
export TEMP_BRANCH=temp_`date +%s`
# Fetch first to avoid ref disputes
git fetch
# Create the branch and check it out
git branch "$TEMP_BRANCH"
git checkout "$TEMP_BRANCH"
# Add the built site
git add -f _site/
git commit -m "Automated commit from travis"
# Point the gh-pages head to this branch
git checkout -B gh-pages "$TEMP_BRANCH"
# Delete the temporary branch
git branch -d "$TEMP_BRANCH"
# Push the changes
echo "Pushing"
git push pb gh-pages --quiet -f
echo "Done pushing"
|
hyatt03/hyatt.space
|
script/PushToGH.sh
|
Shell
|
mit
| 806 |
wget https://zenodo.org/record/5648048/files/seecer.tar.gz
tar -zxvf seecer.tar.gz
mv seecer third_party
export LD_LIBRARY_PATH=third_party/seecer/SEECER/lib:$LD_LIBRARY_PATH
|
SchulzLab/SOS
|
initial_setup.sh
|
Shell
|
mit
| 177 |
#!/bin/bash
source power/config/config;
reps=$1;
shift;
keep="$1";
shift;
keep_n=3;
rep_script=./scripts/repeat_and_keep.sh
pfiles=( /tmp/Tot /tmp/Pac /tmp/PP0 );
while [ 1 ];
do
out=$($rep_script $reps $keep $keep_n $@);
vstat=$(./vstats $out);
# echo ${vstat[@]};
vstata=($vstat);
std=${vstata[4]};
stdhigh=$(echo "$std>=$stdmax" | bc -l);
if [ $stdhigh -eq 1 ];
then
echo "--high std ($std), repeat";
stdmax=$(echo "$stdmax+$stdstep" | bc -l);
else
# calc all averages
for f in ${pfiles[@]};
do
data=$(cat $f);
vstatf=$(./vstats $data);
echo $vstatf > $f;
done;
break;
fi;
done;
echo "$vstat ("$out")";
|
LPD-EPFL/lockin
|
scripts/repeat_avg_std.sh
|
Shell
|
mit
| 673 |
arcdir=$HOME/zhihu/ph/arcanist
export PATH=${arcdir}/bin:$PATH
if [ -f ${arcdir}/resources/shell/bash-completion ]; then
source ${arcdir}/resources/shell/bash-completion
fi
|
xiaogaozi/oh-my-zsh
|
custom/plugins/arcanist/arcanist.plugin.zsh
|
Shell
|
mit
| 177 |
i="0"
while [ $i -lt 100 ]
do
insmod tasklet.ko
rmmod tasklet.ko
i=$[$i+1]
done
|
DimaWittmann/monte_cassino
|
time/script_tasklet.sh
|
Shell
|
mit
| 85 |
#!/bin/bash
export LC_ALL=C
sudo apt-get install unzip
unzip train.zip
unzip test.zip
mkdir results
mkdir valid
mkdir valid/cats
mkdir valid/dogs
mkdir test/unknown
mkdir train/cats
mkdir train/dogs
mkdir sample
mkdir sample/results
mkdir sample/valid
mkdir sample/valid/cats
mkdir sample/valid/dogs
mkdir sample/test
mkdir sample/test/unknown
mkdir sample/train
mkdir sample/train/dogs
mkdir sample/train/cats
cp sample_submission.csv results
cp sample_submission.csv sample/results
sudo mv test/*.jpg test/unknown/ -f
sudo mv train/cat.* train/cats -f
sudo mv train/dog.* train/dogs -f
sudo mv train/dogs/dog.1????.* valid/dogs -f
sudo mv train/cats/cat.1????.* valid/cats -f
sudo cp train/cats/cat.??.jpg sample/train/cats
sudo cp train/dogs/dog.??.jpg sample/train/dogs
sudo cp valid/cats/cat.121??.jpg sample/valid/cats
sudo cp valid/dogs/dog.121??.jpg sample/valid/dogs
sudo cp test/unknown/??.jpg sample/test/unknown
sudo pip install -r requirements.txt
|
laviavigdor/deep-learning-toolbelt
|
init_dogs_and_cats.sh
|
Shell
|
mit
| 970 |
#!/bin/bash
photo_dir='.'
if test $# -gt 0; then
photo_dir=$1
fi
thumbnail_dir='_thumbnail'
find $photo_dir -type d -name $thumbnail_dir |xargs rm -rf
|
huzhifeng/express-gallary
|
tools/thumbnail_cleanup.sh
|
Shell
|
mit
| 152 |
#!/usr/bin/env bash
paste -d ' '\
<( cat $1 | grep Utterance | sed 's/.*(\(.*\))/\1/' )\
<( cat $1 | grep Recognized: | sed 's/Recognized: \(.*\)/\1/' )
|
rizar/actor-critic-public
|
bin/extract_for_kaldi.sh
|
Shell
|
mit
| 157 |
#!/usr/bin/env sh
# To enable deployment with Git you need to add a new SSH key.
# 1. Create a key
# - run `ssh-keygen -t rsa -b 4096 -C "[email protected]"`
# - leave the password empty
# - chose a new location for the key. Maybe your desktop
# 2. Add the public key to Github
# - Copy the content of yourkey.pub
# - Go to github.com/your-organisation/your-repo/settings/keys
# - Add key with a meaningful title here
# - Check "Allow write access"
# 3. Edit private key file
# - Open the private key in your text editor
# - Replace each new-line with "\n"
# 4. Add private key to Travis
# - travis-ci.org/your-organisation/your-repo/settings
# - Add a new environment variable "DEPLOY_KEY"
# - Make sure "Display value in build log" is "OFF"
# - Add content of private key file as value
echo "Setup SSH\n"
eval "$(ssh-agent -s)" # Start the ssh agent
# Read key, replace \n with actual new lines and write to file
echo "$DEPLOY_KEY" | sed 's/\\n/\n/g' > deploy_key.pem
chmod 600 deploy_key.pem # This key should have push access
ssh-add deploy_key.pem
./deploy.sh
|
trybash/game
|
scripts/deploy-travis.sh
|
Shell
|
mit
| 1,093 |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
travis_retry pip install codespell==1.13.0
travis_retry pip install flake8==3.5.0
|
GlobalBoost/GlobalBoost
|
.travis/lint_04_install.sh
|
Shell
|
mit
| 308 |
#!/bin/bash
python /usr/local/bin/process_kill.py
|
alevar/Work
|
OSX_logout_unmount/initiation_logout.sh
|
Shell
|
mit
| 51 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2630-1
#
# Security announcement date: 2015-06-10 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:36 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - qemu-kvm:1.0+noroms-0ubuntu14.23
#
# Last versions recommanded by security team:
# - qemu-kvm:1.0+noroms-0ubuntu14.31
#
# CVE List:
# - CVE-2015-3209
# - CVE-2015-4037
# - CVE-2015-4103
# - CVE-2015-4104
# - CVE-2015-4105
# - CVE-2015-4106
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade qemu-kvm=1.0+noroms-0ubuntu14.31 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2015/USN-2630-1.sh
|
Shell
|
mit
| 743 |
# This is a shell script to transform the PRODUCTNAME directory into a cookie-cutter template
# Delete files that we don't want to include in the template
rm -rf PRODUCTNAME/app/Pods
rm -rf PRODUCTNAME/app/PRODUCTNAME.xcworkspace
#This is the only lookup that is done on filenames
LOOKUP="PRODUCTNAME"
EXPANDED="{{ cookiecutter.project_name | replace(' ', '') }}"
# Make the tree
find ./PRODUCTNAME -type d | while read FILE
do
NEWFILE=`echo $FILE | sed -e "s/${LOOKUP}/${EXPANDED}/g"`
echo "mkdir -p \"$NEWFILE\""
done
# Copy the files over
find ./PRODUCTNAME -type f | while read FILE
do
NEWFILE=`echo $FILE | sed -e "s/${LOOKUP}/${EXPANDED}/g"`
echo "cp \"$FILE\" \"$NEWFILE\""
done
# Do replacements
function replace {
grep -rl $1 ./PRODUCTNAME | while read FILE
do
NEWFILE=`echo $FILE | sed -e "s/${LOOKUP}/${EXPANDED}/g"`
# Copy over incase the sed fails due to encoding
# echo "echo \"$FILE\""
echo "sed -e \"s/$1/$2/g\" \"$NEWFILE\" > t1 && mv t1 \"$NEWFILE\""
done
}
replace "PRODUCTNAME" "{{ cookiecutter.project_name | replace(' ', '') }}"
replace "ORGANIZATION" "{{ cookiecutter.company_name }}"
replace "LEADDEVELOPER" "{{ cookiecutter.lead_dev }}"
replace "LEADEMAIL" "{{ cookiecutter.lead_email }}"
replace "com.company.PRODUCTNAME" "{{ cookiecutter.bundle_identifier }}"
replace "CRASHLYTICS_API_KEY_VALUE" "{{ cookiecutter.fabric_api_key }}"
replace "CRASHLYTICS_API_SECRET_VALUE" "{{ cookiecutter.fabric_build_secret_key }}"
|
pmlbrito/cookiecutter-ios-template
|
generate_template.sh
|
Shell
|
mit
| 1,505 |
#! /bin/sh
# Created Time: 2016-04-23 14:42:34
#
# check build dir
if [ -d build ]; then
echo "dir build exist"
else
mkdir build
echo "mkdir build"
fi
cd build
rm -rf *
c_compiler=
cxx_compiler=
system_type=
cmake_para=
if [ "`getconf LONG_BIT`" = "32" ]; then
echo "32 bit system"
system_type="i686"
else
echo "64 bit system"
system_type="x86_64"
fi
for eachline in `cat /etc/issue`; do
if [ "`echo $eachline | awk -F "." '{print $1}'`" = "14" ]; then
echo "system version : 14"
c_compiler="x86_64-unknown-linux-gnu-gcc-4.9.2"
cxx_compiler="x86_64-unknown-linux-gnu-g++"
if [ "`which $c_compiler`" ]; then
echo "$c_compiler is exist"
else
c_compiler="/opt/gcc-4.9.2/bin/x86_64-unknown-linux-gnu-gcc-4.9.2"
cxx_compiler="/opt/gcc-4.9.2/bin/x86_64-unknown-linux-gnu-g++"
fi
break
fi
done
if [ "$c_compiler" = "" ]; then
echo "######################################################"
echo ""
echo "gcc -version"
echo `gcc -v`
echo ""
echo "######################################################"
else
echo "######################################################"
echo ""
echo "gcc -version"
echo `$c_compiler -v`
echo "######################################################"
cmake_para="-DCMAKE_C_COMPILER=$c_compiler -DCMAKE_CXX_COMPILER=$cxx_compiler -DSYSTEM_TYPE=$system_type"
fi
cmake ../ $cmake_para
make
|
linails/NLP-correlation
|
build.sh
|
Shell
|
mit
| 1,494 |
#!/bin/bash
amixer cset numid=3 1
python3 ~/src/python/shotclock/shotclock.py
|
unsignedbytes/shotclock
|
run.sh
|
Shell
|
mit
| 78 |
#!/bin/sh
kdiffpath=`type -P kdiff3`
if [ -z "$kdiffpath" ] ; then
echo >&2 "ERROR: Can't find KDiff3"
exit
fi
"$kdiffpath" "$2" "$5" | cat
|
itsananderson/git-better
|
git-scripts/diff-wrapper.sh
|
Shell
|
mit
| 151 |
#!/bin/bash
searchServerURL="http://localhost:9095"
indexName="myindex"
echo "Creating an index: $indexName into the searchServer ..."
sleep 1
curl -X PUT $searchServerURL/api/search/v2/index/$indexName
for doc in abc.json xyz.json
do
echo "Adding document: $doc into index: $indexName"
sleep 1
curl -X PUT $searchServerURL/api/search/v2/index/$indexName/document/$doc -d @$doc
done
echo "Searching word: ekstep in the index : $indexName"
sleep 1
curl -X POST $searchServerURL/api/search/v2/index/$indexName/_search -d '{"query":{"query": "ekstep"}}'
docId="abc.json"
echo "Get documentById for doc: $docId"
sleep 1
curl -X GET $searchServerURL/api/search/v2/index/$indexName/document/$docId
echo "Searching word: game in the index : $indexName"
sleep 1
curl -X POST $searchServerURL/api/search/v2/index/$indexName/_search -d '{"query":{"query": "game"}}'
|
projectOpenRAP/OpenRAP
|
searchServer/test/test_script.sh
|
Shell
|
mit
| 879 |
sudo apt-get install bmon
|
UedaTakeyuki/gc_setups
|
networkmonitor/bmon.setup.sh
|
Shell
|
mit
| 26 |
#!/bin/bash
readonly KEYWORDS_ACTOR_DANIELRADCLIFFE="Daniel(| )Radcliffe"
if [ "$1" == "" ]; #Normal operation
then
debug_start "Daniel Radcliffe"
RADCLIFFE=$(egrep -i "$KEYWORDS_ACTOR_DANIELRADCLIFFE" "$NEWPAGES")
categorize "RADCLIFFE" "Daniel Radcliffe"
debug_end "Daniel Radcliffe"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Entertainment/Actors_and_actresses/Daniel_Radcliffe/DanielRadcliffe.sh
|
Shell
|
mit
| 303 |
#!/bin/sh
pip2 install --upgrade --user pillow
|
tylerjohnhaden/penguin-website-colors
|
localpillow.sh
|
Shell
|
mit
| 47 |
#!/bin/bash
echo "Cleaning Build-OpenSSL-cURL"
rm -fr curl/curl-* curl/include curl/lib openssl/openssl-1* openssl/openssl-ios* openssl/Mac openssl/iOS* openssl/tvOS* openssl/Catalyst nghttp2/nghttp2-1* nghttp2/Mac nghttp2/iOS* nghttp2/tvOS* nghttp2/lib nghttp2/Catalyst example/iOS\ Test\ App/build/* *.tgz *.pkg nghttp2/pkg-config* /tmp/curl /tmp/openssl /tmp/pkg_config
|
jasonacox/Build-OpenSSL-cURL
|
clean.sh
|
Shell
|
mit
| 373 |
#!/bin/bash
# --------------------------------------------------------------------
# Testing Contracts At https://github.com/bokkypoobah/TokenTrader
# for https://cryptoderivatives.market/
#
# Testing Other Methods And Conditions
#
# (c) BokkyPooBah 2017. The MIT licence.
# --------------------------------------------------------------------
GETHATTACHPOINT=`grep IPCFILE settings.txt | sed "s/^.*=//"`
PASSWORD=`grep PASSWORD settings.txt | sed "s/^.*=//"`
OTHEROUTPUTFILE=`grep OTHEROUTPUTFILE settings.txt | sed "s/^.*=//"`
OTHERRESULTFILE=`grep OTHERRESULTFILE settings.txt | sed "s/^.*=//"`
TOKENDATAFILE=`grep TOKENDATA settings.txt | sed "s/^.*=//"`
TOKENADDRESSA=`grep tokenAddressA $TOKENDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENADDRESSB=`grep tokenAddressB $TOKENDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENADDRESSC=`grep tokenAddressC $TOKENDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENABI=`grep tokenABI $TOKENDATAFILE | tail -n 1 | sed "s/^.*=//"`
FACTORYDATAFILE=`grep FACTORYDATAFILE settings.txt | sed "s/^.*=//"`
TOKENTRADERFACTORYADDRESS=`grep ^tokenTraderFactoryAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENTRADERFACTORYABI=`grep ^tokenTraderFactoryABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENTRADERADDRESS=`grep ^tokenTraderAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENTRADERABI=`grep ^tokenTraderABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENSELLERFACTORYADDRESS=`grep tokenSellerFactoryAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENSELLERFACTORYABI=`grep tokenSellerFactoryABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENSELLERADDRESS=`grep tokenSellerAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
TOKENSELLERABI=`grep tokenSellerABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
GNTTOKENTRADERFACTORYADDRESS=`grep gntTokenTraderFactoryAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
GNTTOKENTRADERFACTORYABI=`grep gntTokenTraderFactoryABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
GNTTOKENTRADERADDRESS=`grep gntTokenTraderAddress $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
GNTTOKENTRADERABI=`grep gntTokenTraderABI $FACTORYDATAFILE | tail -n 1 | sed "s/^.*=//"`
printf "Connecting to geth on endpoint '$GETHATTACHPOINT'\n" | tee $OTHEROUTPUTFILE
printf "Token address '$TOKENADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "Token ABI '$TOKENABI'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenTraderFactory address '$TOKENTRADERFACTORYADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenTraderFactory ABI '$TOKENTRADERFACTORYABI'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenTrader address '$TOKENTRADERADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenTrader ABI '$TOKENTRADERABI'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenSellerFactory address '$TOKENSELLERFACTORYADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenSellerFactory ABI '$TOKENSELLERFACTORYABI'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenSeller address '$TOKENSELLERADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "TokenSeller ABI '$TOKENSELLERABI'\n" | tee -a $OTHEROUTPUTFILE
printf "GNTTokenTraderFactory address '$GNTTOKENTRADERFACTORYADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "GNTTokenTraderFactory ABI '$GNTTOKENTRADERFACTORYABI'\n" | tee -a $OTHEROUTPUTFILE
printf "GNTTokenTrader address '$GNTTOKENTRADERADDRESS'\n" | tee -a $OTHEROUTPUTFILE
printf "GNTTokenTrader ABI '$GNTTOKENTRADERABI'\n" | tee -a $OTHEROUTPUTFILE
geth --verbosity 3 attach $GETHATTACHPOINT << EOF | tee -a $OTHEROUTPUTFILE
var tokenA = web3.eth.contract($TOKENABI).at("$TOKENADDRESSA");
var tokenB = web3.eth.contract($TOKENABI).at("$TOKENADDRESSB");
var tokenC = web3.eth.contract($TOKENABI).at("$TOKENADDRESSC");
var tokenTraderFactory = web3.eth.contract($TOKENTRADERFACTORYABI).at("$TOKENTRADERFACTORYADDRESS");
var tokenTrader = web3.eth.contract($TOKENTRADERABI).at("$TOKENTRADERADDRESS");
var tokenSellerFactory = web3.eth.contract($TOKENSELLERFACTORYABI).at("$TOKENSELLERFACTORYADDRESS");
var tokenSeller = web3.eth.contract($TOKENSELLERABI).at("$TOKENSELLERADDRESS");
var gntTokenTraderFactory = web3.eth.contract($GNTTOKENTRADERFACTORYABI).at("$GNTTOKENTRADERFACTORYADDRESS");
var gntTokenTrader = web3.eth.contract($GNTTOKENTRADERABI).at("$GNTTOKENTRADERADDRESS");
var ACCOUNTS = 3;
var EPSILON = 0.01;
function pad(s) {
var o = s.toFixed(18);
while (o.length < 27) {
o = " " + o;
}
return o;
}
var accounts = [eth.accounts[0], eth.accounts[1], eth.accounts[2], "$TOKENADDRESSA", "$TOKENADDRESSB", "$TOKENADDRESSC", "$TOKENTRADERFACTORYADDRESS", "$TOKENSELLERFACTORYADDRESS", "$GNTTOKENTRADERFACTORYADDRESS", "$TOKENTRADERADDRESS", "$TOKENSELLERADDRESS", "$GNTTOKENTRADERADDRESS"];
var accountName = {};
accountName[eth.accounts[0]] = "Account #0";
accountName[eth.accounts[1]] = "Account #1";
accountName[eth.accounts[2]] = "Account #2";
accountName["$TOKENADDRESSA"] = "ERC20A";
accountName["$TOKENADDRESSB"] = "ERC20B";
accountName["$TOKENADDRESSC"] = "ERC20C";
accountName["$TOKENTRADERFACTORYADDRESS"] = "TokenTraderFactory";
accountName["$TOKENTRADERADDRESS"] = "TokenTrader b1.1,s1.2";
accountName["$TOKENSELLERFACTORYADDRESS"] = "TokenSellerFactory";
accountName["$TOKENSELLERADDRESS"] = "TokenSeller s1.2";
accountName["$GNTTOKENTRADERFACTORYADDRESS"] = "GNTTokenTraderFactory";
accountName["$GNTTOKENTRADERADDRESS"] = "GNTTokenTrader s1.3";
function printBalances(accounts) {
var i = 0;
console.log("RESULT: # Account EtherBalance TokenABalance TokenBBalance TokenCBalance Name");
accounts.forEach(function(e) {
var etherBalance = web3.fromWei(eth.getBalance(e), "ether");
var tokenABalance = web3.fromWei(tokenA.balanceOf(e), "ether");
var tokenBBalance = web3.fromWei(tokenB.balanceOf(e), "ether");
var tokenCBalance = web3.fromWei(tokenC.balanceOf(e), "ether");
console.log("RESULT: " + i + " " + e + " " + pad(etherBalance) + " " + pad(tokenABalance) + " " + pad(tokenBBalance) + " " + pad(tokenCBalance) + " " + accountName[e]);
i++;
});
}
function printTxData(name, txId) {
var tx = eth.getTransaction(txId);
var txReceipt = eth.getTransactionReceipt(txId);
console.log("RESULT: " + name + " gas=" + tx.gas + " gasUsed=" + txReceipt.gasUsed + " cost=" + tx.gasPrice.mul(txReceipt.gasUsed).div(1e18) +
" block=" + txReceipt.blockNumber + " txId=" + txId);
}
function assertEtherBalance(account, testBalance) {
var etherBalance = web3.fromWei(eth.getBalance(account), "ether");
var diff = etherBalance.minus(testBalance);
if (diff >= -EPSILON && diff <= EPSILON) {
console.log("RESULT: OK " + account + " has expected balance " + testBalance);
} else {
console.log("RESULT: FAILURE " + account + " has balance " + etherBalance + " <> expected " + testBalance);
}
}
function assertTokenBalance(account, token, testBalance) {
var tokenBalance = token.balanceOf(account).div(1e18);
var diff = tokenBalance.minus(testBalance);
if (diff >= -EPSILON && diff <= EPSILON) {
console.log("RESULT: OK " + account + " has expected " + accountName[token.address] + " token balance " + testBalance);
} else {
console.log("RESULT: FAILURE " + account + " has " + accountName[token.address] + " token balance " + tokenBalance + " <> expected " + testBalance);
}
}
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.8623);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
for (var i = 0; i < ACCOUNTS; i++) {
personal.unlockAccount(eth.accounts[i], "$PASSWORD", 100000);
}
console.log("RESULT: Owner transferring 10 tokenB to tokenTraderFactory and tokenSellerFactory");
var ownerTraderFactoryTransferTokenTxId = tokenB.transfer("$TOKENTRADERFACTORYADDRESS", 10e18, {from: eth.accounts[1], gas: 100000});
var ownerSellerFactoryTransferTokenTxId = tokenB.transfer("$TOKENSELLERFACTORYADDRESS", 10e18, {from: eth.accounts[1], gas: 100000});
while (txpool.status.pending > 0) {
}
printTxData("ownerTraderFactoryTransferTokenTxId", ownerTraderFactoryTransferTokenTxId);
printTxData("ownerSellerFactoryTransferTokenTxId", ownerSellerFactoryTransferTokenTxId);
console.log("RESULT: Expecting change in eth.accounts[1] tokens plus 10 tokens in TokenTraderFactory and TokenSellerFactory");
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.8623);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertTokenBalance(eth.accounts[1], tokenB, 980);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertTokenBalance(tokenTraderFactory.address, tokenB, 10);
assertTokenBalance(tokenSellerFactory.address, tokenB, 10);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
console.log("RESULT: Owner withdrawing 1 tokenB from tokenTraderFactory and tokenSellerFactory");
var ownerTraderFactoryWithdrawERC20TokenTxId = tokenTraderFactory.ownerWithdrawERC20Token(tokenB.address, 1e18, {from: eth.accounts[1], gas: 100000});
var ownerSellerFactoryWithdrawERC20TokenTxId = tokenSellerFactory.ownerWithdrawERC20Token(tokenB.address, 1e18, {from: eth.accounts[1], gas: 100000});
while (txpool.status.pending > 0) {
}
printTxData("ownerTraderFactoryWithdrawERC20TokenTxId", ownerTraderFactoryWithdrawERC20TokenTxId);
printTxData("ownerSellerFactoryWithdrawERC20TokenTxId", ownerSellerFactoryWithdrawERC20TokenTxId);
console.log("RESULT: Expecting change in eth.accounts[1] tokens minus 1 token in TokenTraderFactory and TokenSellerFactory");
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.8580);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertTokenBalance(eth.accounts[1], tokenB, 982);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertTokenBalance(tokenTraderFactory.address, tokenB, 9);
assertTokenBalance(tokenSellerFactory.address, tokenB, 9);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
console.log("RESULT: Owner transferring 10 tokenB to tokenTrader, tokenSeller and gntTokenTrader");
var ownerTraderTransferTokenTxId = tokenB.transfer(tokenTrader.address, 10e18, {from: eth.accounts[1], gas: 100000});
var ownerSellerTransferTokenTxId = tokenB.transfer(tokenSeller.address, 10e18, {from: eth.accounts[1], gas: 100000});
var ownerGntTraderTransferTokenTxId = tokenB.transfer(gntTokenTrader.address, 10e18, {from: eth.accounts[1], gas: 100000});
while (txpool.status.pending > 0) {
}
printTxData("ownerTraderTransferTokenTxId", ownerTraderTransferTokenTxId);
printTxData("ownerSellerTransferTokenTxId", ownerSellerTransferTokenTxId);
printTxData("ownerGntTraderTransferTokenTxId", ownerGntTraderTransferTokenTxId);
console.log("RESULT: Expecting change in eth.accounts[1] tokens plus 10 tokens in TokenTrader, TokenSeller and GNTTokenTrader");
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.8550);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertTokenBalance(eth.accounts[1], tokenB, 952);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertTokenBalance(tokenTraderFactory.address, tokenB, 9);
assertTokenBalance(tokenSellerFactory.address, tokenB, 9);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertTokenBalance(tokenTrader.address, tokenB, 10);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertTokenBalance(tokenSeller.address, tokenB, 10);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
assertTokenBalance(gntTokenTrader.address, tokenB, 10);
console.log("RESULT: Owner withdrawing 1 tokenB from tokenTrader, tokenSeller and gntTokenTrader");
var ownerTraderWithdrawERC20TokenTxId = tokenTrader.makerWithdrawERC20Token(tokenB.address, 1e18, {from: eth.accounts[1], gas: 100000});
var ownerSellerWithdrawERC20TokenTxId = tokenSeller.makerWithdrawERC20Token(tokenB.address, 1e18, {from: eth.accounts[1], gas: 100000});
var ownerGntWithdrawERC20TokenTxId = gntTokenTrader.withdrawToken(tokenB.address, 1e18, {from: eth.accounts[1], gas: 100000});
while (txpool.status.pending > 0) {
}
printTxData("ownerTraderWithdrawERC20TokenTxId", ownerTraderWithdrawERC20TokenTxId);
printTxData("ownerSellerWithdrawERC20TokenTxId", ownerSellerWithdrawERC20TokenTxId);
printTxData("ownerGntWithdrawERC20TokenTxId", ownerGntWithdrawERC20TokenTxId);
console.log("RESULT: Expecting change in eth.accounts[1] tokens minus 1 token in tokenTrader, tokenSeller and gntTokenTrader");
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.8527);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertTokenBalance(eth.accounts[1], tokenB, 955);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertTokenBalance(tokenTraderFactory.address, tokenB, 9);
assertTokenBalance(tokenSellerFactory.address, tokenB, 9);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertTokenBalance(tokenTrader.address, tokenB, 9);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertTokenBalance(tokenSeller.address, tokenB, 9);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
assertTokenBalance(gntTokenTrader.address, tokenB, 9);
console.log("RESULT: Testing failure in createTradeContract for Factories. gas==gasUsed");
var createTradeContract1TxId = tokenTraderFactory.createTradeContract("$TOKENADDRESSB", 0, 120000, 100000, true, true, {from: eth.accounts[1], gas: 1000000});
var createTradeContract2TxId = tokenTraderFactory.createTradeContract("$TOKENADDRESSB", 110000, 0, 100000, true, true, {from: eth.accounts[1], gas: 1000000});
var createTradeContract3TxId = tokenTraderFactory.createTradeContract("$TOKENADDRESSB", 120000, 110000, 100000, true, true, {from: eth.accounts[1], gas: 1000000});
var createTradeContract4TxId = tokenTraderFactory.createTradeContract("$TOKENADDRESSB", 110000, 120000, 0, true, true, {from: eth.accounts[1], gas: 1000000});
var createTradeContract5TxId = tokenTraderFactory.createTradeContract("0x0", 110000, 120000, 100000, true, true, {from: eth.accounts[1], gas: 1000000});
var createSaleContract1TxId = tokenSellerFactory.createSaleContract("$TOKENADDRESSB", 0, 100000, true, {from: eth.accounts[1], gas: 1000000});
var createSaleContract2TxId = tokenSellerFactory.createSaleContract("$TOKENADDRESSB", 120000, 0, true, {from: eth.accounts[1], gas: 1000000});
var createSaleContract3TxId = tokenSellerFactory.createSaleContract("0x0", 120000, 100000, true, {from: eth.accounts[1], gas: 1000000});
var createGNTTradeContract1TxId = gntTokenTraderFactory.createTradeContract("$TOKENADDRESSB", 130000, 0, true, {from: eth.accounts[1], gas: 1000000});
while (txpool.status.pending > 0) {
}
printTxData("createTradeContract1TxId", createTradeContract1TxId);
printTxData("createTradeContract2TxId", createTradeContract2TxId);
printTxData("createTradeContract3TxId", createTradeContract3TxId);
printTxData("createTradeContract4TxId", createTradeContract4TxId);
printTxData("createTradeContract5TxId", createTradeContract5TxId);
printTxData("createSaleContract1TxId", createSaleContract1TxId);
printTxData("createSaleContract2TxId", createSaleContract2TxId);
printTxData("createSaleContract3TxId", createSaleContract3TxId);
printTxData("createGNTTradeContract1TxId", createGNTTradeContract1TxId);
console.log("RESULT: Expecting small change in eth.accounts[1]");
printBalances(accounts);
assertEtherBalance(eth.accounts[1], 96812.6723);
assertTokenBalance(eth.accounts[1], tokenA, 686.0769);
assertTokenBalance(eth.accounts[1], tokenB, 955);
assertEtherBalance(eth.accounts[2], 96620.8970);
assertTokenBalance(eth.accounts[2], tokenA, 1300);
assertTokenBalance(tokenTraderFactory.address, tokenB, 9);
assertTokenBalance(tokenSellerFactory.address, tokenB, 9);
assertEtherBalance(tokenTrader.address, 258.0909);
assertTokenBalance(tokenTrader.address, tokenA, 4);
assertTokenBalance(tokenTrader.address, tokenB, 9);
assertEtherBalance(tokenSeller.address, 149);
assertTokenBalance(tokenSeller.address, tokenA, 4);
assertTokenBalance(tokenSeller.address, tokenB, 9);
assertEtherBalance(gntTokenTrader.address, 159);
assertTokenBalance(gntTokenTrader.address, tokenA, 5.9231);
assertTokenBalance(gntTokenTrader.address, tokenB, 9);
exit;
EOF
grep "RESULT: " $OTHEROUTPUTFILE | sed "s/RESULT: //" > $OTHERRESULTFILE
cat $OTHERRESULTFILE
|
bokkypoobah/TokenTrader
|
testing/test_20170115_1619/04_testOther.sh
|
Shell
|
mit
| 17,531 |
# only works on mac
[[ `uname -s` = "Darwin" ]] || exit
# install brew if not exists
if [ ! `command -v brew` ]; then
/usr/bin/ruby -e `curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install`
fi
# update brew only on not exists
brew_list=`brew list`
function update_module {
for name in $brew_list; do
if [ $name = "$1" ]; then
echo "[INFO] - `date +%T` - already exists brew module [$1]."
return
fi
done
brew install $1
}
update_module git
update_module macvim
update_module tree
update_module telnet
update_module tmux
update_module autojump
update_module htop
update_module fzf
update_module thefuck
update_module mysql
update_module maven
update_module most
update_module wget
update_module node
update_module fd
update_module tldr
update_module node
update_module fpp
update_module urlview
update_module hub
update_module direnv
update_module npm
|
whiledoing/dotfiles
|
script/system_startup_for_software.sh
|
Shell
|
mit
| 933 |
#! /bin/bash -eu
#########################
#
# Name: esacci2cmip5-format.bash
#
# Purpose: Rewrite ESA-CCI variable files to CMIP5:ish format
#
# Usage: ./esacci2cmip5-format.bash [-h] -i input-directory -o output-directory
#
# -h displays help text
# -i input directory
# -o output directory
#
# Extracts, rename and concateante variables from ESA-CCI files
# into a sinlge CMIP5:ish file.
#
# Revision history: 2015-11-20 -- Script created, Martin Evaldsson, Rossby Centre
#
# Contact persons: [email protected]
#
########################
cmip_variable=clwvi # clivi, clt
cci_variable=lwp # iwp , cc_total
cmip_filename=${cmip_variable}_Amon_ESACCI-L3C_CLOUD-CLD_PRODUCTS-AVHRR-fv1.4_observation_r1i1p1
# data folder as of Nov 2015: /nobackup/rossby17/rossby/joint_exp/esacci/Clouds/phase2/
function usage {
echo "
Usage: ./esacci2cmip5-format.bash [-h] -i input-directory -o output-directory
-h displays help text
-i input directory
-o output directory
Extracts, rename and concateante variables from ESA-CCI files
into a sinlge CMIP5:ish file.
" 1>&2
}
function usage_and_exit {
exit_code=${1:-0}
usage
exit $exit_code
}
if [ $# -eq 0 ]; then
usage_and_exit 0
fi
while getopts "hi:o:" opt; do
case $opt in
h)
usage_and_exit 0
;;
i)
input_directory=$OPTARG
;;
o)
output_directory=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage_and_exit 1
;;
esac
done
all_input=$(ls -1 ${input_directory}/*nc)
first_date=$(basename $(ls -1 ${input_directory}/*nc | awk -F - '{print $1}' | sort | head -1))
last_date=$(basename $(ls -1 ${input_directory}/*nc | awk -F - '{print $1}' | sort | tail -1))
output_tmp=${output_directory}/tmp
mkdir $output_tmp
cmip5_file=${output_directory}/${cmip_filename}_${first_date}-${last_date}.nc
for line in ${all_input}
do
case ${cmip_variable} in
clt)
# Extract cci_variable and scale variable (fraction -> %)
cdo mulc,100 -selvar,${cci_variable} $line ${output_tmp}/$(basename ${line%*.nc})-${cmip_variable}.nc
;;
clwvi)
# Extract lwp
cdo selvar,${cci_variable} $line ${output_tmp}/$(basename ${line%*.nc})-${cmip_variable}.nc
;;
clivi)
# Extract lwp
cdo selvar,${cci_variable} $line ${output_tmp}/$(basename ${line%*.nc})-${cmip_variable}.nc
;;
esac
done
# Concatenate to single file
ncrcat $(ls -1 ${output_tmp}/* | tr '\n' ' ') ${output_tmp}/tmp1.nc
# Update unit attribute
case ${cmip_variable} in
clt)
ncatted -a units,${cci_variable},c,c,"%" ${output_tmp}/tmp1.nc
;;
clwvi)
# Extract lwp
cdo selvar,${cci_variable} $line ${output_tmp}/$(basename ${line%*.nc})-${cmip_variable}.nc
;;
clivi)
# Extract lwp
cdo selvar,${cci_variable} $line ${output_tmp}/$(basename ${line%*.nc})-${cmip_variable}.nc
;;
esac
# Rename variable
ncrename -v ${cci_variable},${cmip_variable} ${output_tmp}/tmp1.nc ${cmip5_file}
|
marev711/scripts
|
process-obsdata/esacci2cmip5-format-v1.4.bash
|
Shell
|
mit
| 3,130 |
disk="sda2"
admin="[email protected]"
s=$(df | grep "$disk" | awk '{print $5}' )
s="${s%%%}"
mail () {
echo "df size greater than 95" | mail -s "df notify" "$admin"
}
[[ (( $s -gt 95 )) ]] && mail
|
liuyang1/dotfiles
|
gist/dfmail.sh
|
Shell
|
mit
| 198 |
#!/bin/bash
#set -d
# Start supervisord
exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
frankyrumple/ope
|
docker_build_files/ope-smc/start.sh
|
Shell
|
mit
| 104 |
#!/bin/bash
#-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
CH_DIR=$1
BUILD_TYPE=$2
RES=
CC=0
CXX=0
FIND_CLANG() {
for i in 7 8 9
do
if [[ -f "/usr/bin/clang-3.${i}" ]]; then
CC="/usr/bin/clang-3.${i}"
CXX="/usr/bin/clang++-3.${i}"
fi
done
if [[ $CC == 0 ]]; then
echo "Error: Couldn't find Clang"
exit 1
else
echo "Using CC [${CC}]"
echo "Using CXX [${CXX}]"
fi
}
SAFE_RUN() {
local SF_RETURN_VALUE=$($1 2>&1)
if [[ $? != 0 ]]; then
>&2 echo $SF_RETURN_VALUE
exit 1
fi
}
TEST () {
if [[ $RES =~ $1 ]]; then
echo "${TEST_PATH} : PASS"
else
echo "${TEST_PATH} FAILED"
echo -e "$RES"
exit 1
fi
}
RES=$(c++ --version)
if [[ ! $RES =~ "Apple LLVM" ]]; then
FIND_CLANG
else
CC="cc"
CXX="c++"
fi
RUN () {
TEST_PATH=$1
echo "Testing $TEST_PATH"
SAFE_RUN `cd $TEST_PATH; ${CH_DIR} Platform.js > Makefile`
RES=$(cd $TEST_PATH; cat Makefile)
if [[ $RES =~ "# IGNORE_THIS_TEST" ]]; then
echo "Ignoring $TEST_PATH"
else
SAFE_RUN `cd $TEST_PATH; make CC=${CC} CXX=${CXX}`
RES=$(cd $TEST_PATH; ./sample.o)
TEST "SUCCESS"
SAFE_RUN `cd $TEST_PATH; rm -rf ./sample.o`
fi
}
RUN_CMD () {
TEST_PATH=$1
CMD=$2
echo "Testing $TEST_PATH"
SAFE_RUN `cd $TEST_PATH; $CMD`
}
# static lib tests
tests=$(ls -w | tr "\t" " ")
for item in ${tests[*]}
do
if [[ $item =~ "test-static-" ]]; then
RUN $item
fi
done
# shared lib tests
LIB_DIR="$(dirname ${CH_DIR})"
if [[ `uname -a` =~ "Darwin" ]]; then
export DYLD_LIBRARY_PATH=${LIB_DIR}/:$DYLD_LIBRARY_PATH
else
export LD_LIBRARY_PATH=${LIB_DIR}/:$LD_LIBRARY_PATH
fi
RUN "test-shared-basic"
# test python
RUN_CMD "test-python" "python helloWorld.py ${BUILD_TYPE}"
SAFE_RUN `rm -rf Makefile`
|
mrkmarron/ChakraCore
|
test/native-tests/test_native.sh
|
Shell
|
mit
| 2,218 |
#!/bin/sh
# Set sed command based on OS.
# We use gnu-sed here, because BSD sed doesn't have \U or \L to allow
# case changes, but gnu-sed doesn't exist on linux machines
if [ "$(uname -s)" == "Darwin" ]; then
SED=gsed;
else
SED=sed;
fi
# Load the Joshua Project database into postgres, and make it accessible
# from sql soup.
MDB_LOCATION=$1;
#MDB_LOCATION="~/Downloads/JPHarvestFieldDataOnly.mdb";
# Create an empty database
psql -c "create database jpharvest"
# UNIQUE constraints cause violations of integrity constraints, and are
# quite possibly due to a bug in mdbtools (perhaps because I don't understand
# the mdb schema too, so change them so that the uniqueness constraint
# is removed
# - MSysNavPaneGroup tables are removed as they're not necessary
# - There are a few errors for missing unique constraints on three tables,
# tblLnkPEOtoGEO, tblLnkPEOtoGEOReligions and tblProgressStatusValues
# - There are 2 errors for duplicate constraint declarations for constraints,
# tblLnkPEOtoGEOProgressStatus_StatusType_fk and tblLNG6LanguageAlternateNames_ROL3_fk
#
# Otherwise it should be error free
mdb-schema $MDB_LOCATION postgres |
$SED 's/^CREATE UNIQUE INDEX/CREATE INDEX/' |
psql jpharvest |
awk '/^(NOTICE|ERROR|WARN)/' | grep -v 'MSysNavPaneGroup'
# - Change to date format (mdb-tools exports this db in MDY format)
# - Fix some case sensitivity issues (mixed case to upper case)
# (these changes will land from upstream by march or april 2014)
for i in $(grep -v "^#" jpharvest-table-insertion-order.txt); do
#echo "Ready to process $i. Enter to start"; read;
echo -n "Processing $i... ";
mdb-export -I postgres -q \' $MDB_LOCATION $i | $SED '1i\
set DateStyle="MDY";
s/\(RPz[a-z]\)/\U\1/;
s/\(UG[yz][a-z]\)/\U\1/;
' |
psql jpharvest |
awk '/INSERT 0 1/ {c++;}; /^[^IS]/ {print $0;}; /^$/ {}; END {print c " inserts";}'
done
# Need to update rows in tblLNG6LanguageAlternateNames where ROG3 is NULL
# then add a primary key on (probably) ROG3+ROL3+LangAltName (to allow SqlSoup to work)
psql jpharvest -c 'delete from "tblLNG6LanguageAlternateNames" where "ROG3" is NULL;'
psql jpharvest -c 'alter table "tblLNG6LanguageAlternateNames" add constraint "tblLNG6LanguageAlternateNames_pkey" PRIMARY KEY ("ROG3","ROL3", "LangAltName");'
# Need to add primary key on tblLNG7DialectAlternateNames (ROL4+AlternateDialectName)
# to allow SQLSoup to work
psql jpharvest -c 'alter table "tblLNG7DialectAlternateNames" add constraint "tblLNG7DialectAlternateNames_pkey" PRIMARY KEY ("ROL4", "AlternateDialectName");'
|
edwinsteele/language_explorer
|
data/load_jpharvest.sh
|
Shell
|
mit
| 2,558 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "$DIR/redist-tools"
source "$DIR/redist-config"
if [ ! -d "$DIR/target/upstream" ] ; then
rm -rf "$DIR/target/upstream"
mkdir -p "$DIR/target/upstream"
git clone $UPSTREAM_GIT_URL "$DIR/target/upstream"
else
cd "$DIR/target/upstream"
git fetch origin
git checkout master
git reset --hard origin/master
cd -
fi
JSONVER=$(parse-json "$DIR/target/upstream/package.json")
if [ $? -ne 0 ] ; then
echo "Could not parse $DIR/target/upstream/package.json"
exit 255
fi
POMVER=$(parse-pom "$DIR/pom.xml")
if [ $? -ne 0 ] ; then
echo "Could not parse $DIR/pom.xml"
exit 255
fi
vercomp $POMVER $JSONVER
if [ $? -eq 1 ] ; then
echo "Waiting for a newer release"
else
echo "Searching for update..."
GOOD_REV=$(xpath "$DIR/pom.xml" "/project/properties/$POM_PROPERTY/text()")
if [ $? -ne 0 ] ; then
echo "Cannot parse pom.xml, check the scripts are still valid"
exit 255
fi
cd "$DIR/target/upstream"
git bisect start origin/master $GOOD_REV
BAD_REV=$(git bisect run "$DIR/version-check.sh" "$DIR/target/upstream/package.json" "$DIR/pom.xml" | sed -ne "s/ is the first bad commit//p")
if [ "A$BAD_REV" == "A" ] ; then
echo "Could not find revision, check the scripts are still valid"
cd -
exit 255
fi
git checkout $BAD_REV
cd -
EXACT=$(xpath "$DIR/pom.xml" '/project/version/text()' 2>/dev/null)
REPL=$(parse-json "$DIR/target/upstream/package.json")
sed -e "s:<version>$EXACT</version>:<version>$REPL-SNAPSHOT</version>:;s:<$POM_PROPERTY>$GOOD_REV</$POM_PROPERTY>:<$POM_PROPERTY>$BAD_REV</$POM_PROPERTY>:" < "$DIR/pom.xml" > "$DIR/pom.xml.new"
mvn clean verify -f "$DIR/pom.xml.new"
if [ $? -eq 0 ] ; then
mv -f "$DIR/pom.xml.new" "$DIR/pom.xml"
fi
fi
|
jszip/analytics-redist
|
update-prep.sh
|
Shell
|
mit
| 1,861 |
#!/bin/bash
cd $(dirname $0)
export EXTERNAL_1="Value 1 taken from external environment"
export EXTERNAL_2="Value 2 taken from external environment"
scuba -e CMDLINE="This comes from the cmdline" example
|
JonathonReinhart/scuba
|
example/env_vars/run_example.sh
|
Shell
|
mit
| 206 |
#!/bin/bash
mkdir build
cd build
if [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
DYNAMIC_EXT="so"
OPENMP="-DWITH_OPENMP=1"
fi
if [ "$(uname -s)" == "Darwin" ]; then
DYNAMIC_EXT="dylib"
OPENMP=""
fi
cmake -LAH .. \
$OPENMP \
-DCMAKE_SKIP_RPATH=1 \
-DWITH_EIGEN=1 \
-DBUILD_opencv_apps=0 \
-DBUILD_TESTS=0 \
-DBUILD_DOCS=0 \
-DBUILD_PERF_TESTS=0 \
-DBUILD_ZLIB=0 \
-DZLIB_LIBRARY=$PREFIX/lib/libz.$DYNAMIC_EXT \
-DBUILD_TIFF=0 \
-DBUILD_PNG=0 \
-DBUILD_OPENEXR=1 \
-DBUILD_JASPER=0 \
-DBUILD_JPEG=0 \
-DPYTHON_EXECUTABLE=$PREFIX/bin/python${PY_VER} \
-DPYTHON_INCLUDE_PATH=$PREFIX/include/python${PY_VER} \
-DPYTHON_LIBRARY=$PREFIX/lib/libpython${PY_VER}.$DYNAMIC_EXT \
-DPYTHON_PACKAGES_PATH=$SP_DIR \
-DWITH_CUDA=0 \
-DWITH_OPENCL=0 \
-DWITH_OPENNI=0 \
-DWITH_FFMPEG=0 \
-DCMAKE_INSTALL_PREFIX=$PREFIX
make
make install
|
willyd/conda-recipes
|
opencv/recipe/build.sh
|
Shell
|
mit
| 1,997 |
#!/usr/bin/env bash
USER_ID=${LOCAL_USER_ID:-9001}
echo "Create user with UID : $USER_ID"
useradd --shell /bin/bash -u ${USER_ID} -o -c "" -m user
/usr/sbin/php-fpm7.0
|
v2p/pub
|
dockers/php/fpm-7.0-dev/entrypoint.sh
|
Shell
|
mit
| 170 |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Make sure all shell scripts:
# a.) explicitly opt out of locale dependence using
# "export LC_ALL=C" or "export LC_ALL=C.UTF-8", or
# b.) explicitly opt in to locale dependence using the annotation below.
export LC_ALL=C
EXIT_CODE=0
for SHELL_SCRIPT in $(git ls-files -- "*.sh" | grep -vE "src/(secp256k1|univalue)/"); do
if grep -q "# This script is intentionally locale dependent by not setting \"export LC_ALL=C\"" "${SHELL_SCRIPT}"; then
continue
fi
FIRST_NON_COMMENT_LINE=$(grep -vE '^(#.*)?$' "${SHELL_SCRIPT}" | head -1)
if [[ ${FIRST_NON_COMMENT_LINE} != "export LC_ALL=C" && ${FIRST_NON_COMMENT_LINE} != "export LC_ALL=C.UTF-8" ]]; then
echo "Missing \"export LC_ALL=C\" (to avoid locale dependence) as first non-comment non-empty line in ${SHELL_SCRIPT}"
EXIT_CODE=1
fi
done
exit ${EXIT_CODE}
|
fujicoin/fujicoin
|
test/lint/lint-shell-locale.sh
|
Shell
|
mit
| 1,066 |
#!/bin/bash
# Options / Usage
# put this script in the same directory as your *.cabal file
# it will use the first line of "cabal info ." to determine the package name
# custom options for "cabal haddock" (cabal haddock --help,
# http://www.haskell.org/haddock/doc/html/invoking.html)
CUSTOM_OPTIONS=(--haddock-options='-q aliased')
# hackage server to upload to (and to search uploaded versions for)
HACKAGESERVER=hackage.haskell.org
# whether to use cabal install (1) or copy docs directly from cabal haddock (0)
# some user had troubles installing their package (or dependencies)
CABAL_INSTALL=0
# put your credentials into ~/.netrc: (see man netrc)
# machine $HACKAGESERVER
# login $USERNAME
# password $PASSWORD
# nothing to configure below this line
# How it works
#
# It tries to find your package on the given hackage server, and
# uploads the generated -doc.tar.gz.
# It first tries the released version, then the candidate.
#
# To generate the docs it uses "cabal install" to install into a temporary directory,
# with a temporary ghc package db in it.
set -e
status_code() {
local code=$(curl "http://${HACKAGESERVER}$1" --silent -o /dev/null --write-out %{http_code})
echo "http://${HACKAGESERVER}$1 $code" >&2
echo $code
}
self=$(readlink -f "$0")
base=$(dirname "${self}")
cd "${base}"
tmpdir=$(mktemp --tmpdir -d doc-package-XXXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
name=$(cabal info . 2>/dev/null | awk '{print $2;exit}')
plain_name="${name%-*}" # strip version number (must not contain a '-', the name itself can)
if [ "200" = "$(status_code /package/${name})" ]; then
echo "Found released version ${name}"
targeturl="/package/${name}/docs"
elif [ "200" = "$(status_code /package/${name}/candidate)" ]; then
echo "Found candidate version ${name}"
targeturl="/package/${name}/candidate/docs"
else
echo "Found no uploaded version"
targeturl=""
fi
prefix="${tmpdir}/prefix"
docdir="${prefix}/share/doc/${name}"
if [ "${CABAL_INSTALL}" = 1 ]; then
# after cabal install:
htmldir="${docdir}/html"
else
# without cabal install:
htmldir="${tmpdir}/dist/doc/html/${plain_name}"
fi
packagedb="${tmpdir}/package.conf.d"
mkdir -p "${packagedb}"
pkgdocdir="${tmpdir}/${name}-docs"
pkgdocarchive="${tmpdir}/${name}-doc.tar.gz"
cabal configure \
--builddir="${tmpdir}/dist" \
--disable-optimization --ghc-option -O0 \
--docdir="${docdir}" \
--prefix="${prefix}"
# need separate haddock step, as install doesn't forward --builddir to haddock with
# cabal install --enable-documentation
# otherwise configure+haddock could be merged into install
# (prefix cabal haddock options with --haddock- for cabal install)
cabal haddock \
--builddir="${tmpdir}/dist" \
--html-location='/package/$pkg-$version/docs' \
--haddock-option='--built-in-themes' \
--hoogle --html \
"${CUSTOM_OPTIONS[@]}" \
--contents-location='/package/$pkg-$version' \
--hyperlink-source
if [ "${CABAL_INSTALL}" = 1 ]; then
cabal install \
--builddir="${tmpdir}/dist" \
--docdir="${docdir}" \
--prefix="${prefix}" \
--ghc-pkg-option --no-user-package-conf \
--ghc-pkg-option --package-db="${packagedb}"
fi
cp -ar "${htmldir}" "${pkgdocdir}"
(cd "$(dirname ${pkgdocdir})"; tar --format=ustar -caf "${pkgdocarchive}" "$(basename ${pkgdocdir})")
mkdir -p dist/
echo "Copying $(basename ${pkgdocdir}) to dist/"
cp -ar "${pkgdocarchive}" dist/
if [ "${targeturl}" != "" ]; then
echo -n "Upload to http://${HACKAGESERVER}${targeturl} (y/N)? "
read ack
if [ "${ack}" = "y" -o "${ack}" = "Y" ]; then
echo "Uploading..."
curl \
-X PUT \
-H "Content-Type: application/x-tar" \
-H "Content-Encoding: gzip" \
--data-binary @"${pkgdocarchive}" \
--digest --netrc \
"http://${HACKAGESERVER}${targeturl}"
else
echo "Not uploading."
fi
fi
echo Done.
|
asilvestre/haskell-neo4j-rest-client
|
hackage-upload-docs.sh
|
Shell
|
mit
| 3,796 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/FaveButton/FaveButton.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/FaveButton/FaveButton.framework"
fi
|
fthomasmorel/insapp-iOS
|
Pods/Target Support Files/Pods-Insapp/Pods-Insapp-frameworks.sh
|
Shell
|
mit
| 3,761 |
#!/bin/sh
export CRONKITE_AURURL=""
OUTPUT=$(${1} -search example 2>&1)
rval=$?
# echo it out once just for ctest -V output
echo $OUTPUT
echo $OUTPUT | egrep -qi 'illegal format'
nval=$?
if [ $rval -eq 1 ] && [ $nval -eq 0 ]; then
exit 0;
else
exit 1;
fi
|
cactus/cronkite
|
src/cli/tests/cli-nourl.sh
|
Shell
|
mit
| 263 |
#!/usr/bin/env bash
shopt -s -o pipefail
set -e # Exit on error
PKG_NAME="gperf"
PKG_VERSION="3.0.4"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.gz"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
function showHelp() {
echo -e "--------------------------------------------------------------------------------------------------------------"
echo -e "Description: Gperf generates a perfect hash function from a key set."
echo -e "--------------------------------------------------------------------------------------------------------------"
echo -e ""
}
function prepare() {
ln -sv /sources/${TARBALL} ${TARBALL}
}
function unpack() {
tar xf ${TARBALL}
}
function build() {
./configure --prefix=/usr
make ${MAKE_PARALLEL}
}
function runTest() {
make -j1 check
}
function instal() {
make ${MAKE_PARALLEL} install
}
function clean() {
rm -rf ${SRC_DIR} ${TARBALL}
}
# Run the installation procedure
time { showHelp;clean;prepare;unpack;pushd ${SRC_DIR};build;[[ ${MAKE_TESTS} = TRUE ]] && runTest;instal;popd;clean; }
# Verify installation
if [ -f /usr/bin/gperf ]; then
touch ${DONE_DIR_BUILD_SYSTEM}/$(basename $(pwd))
fi
|
PandaLinux/base-64
|
build-system/gperf/build.sh
|
Shell
|
mit
| 1,167 |
connected_to_internet() {
test_urls="\
https://www.google.com/ \
https://www.microsoft.com/ \
https://www.cloudflare.com/ \
"
processes="0"
pids=""
for test_url in $test_urls; do
curl --silent --head "$test_url" > /dev/null &
pids="$pids $!"
processes=$(($processes + 1))
done
while [ $processes -gt 0 ]; do
for pid in $pids; do
if ! ps | grep "^[[:blank:]]*$pid[[:blank:]]" > /dev/null; then
# Process no longer running
processes=$(($processes - 1))
pids=$(echo "$pids" | sed -E "s/(^| )$pid($| )/ /g")
if wait $pid; then
# Success! We have a connection to at least one public site, so the
# internet is up. Ignore other exit statuses.
kill -TERM $pids > /dev/null 2>&1 || true
wait $pids > /dev/null 2>&1
return 0
fi
fi
done
# wait -n $pids # Better than sleep, but not supported on all systems
sleep 0.1
done
return 1
}
|
patrikkernke/dotfiles
|
scripts/helpers/connected_to_internet.sh
|
Shell
|
mit
| 981 |
#!/bin/ksh
Number=${1:-23}
print "Psalm $Number:"
curl -s http://www.usccb.org/bible/psalms/$Number | grep 'class=.po' | sed -e "s/<span.*span>//" -e "s/<sup.*sup>//" -e "s/<[^>]*>//g" | recode -f html..ascii
|
glmck13/Askpi
|
psalm.sh
|
Shell
|
mit
| 212 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3107-1
#
# Security announcement date: 2014-12-20 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:09 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - subversion:1.6.17dfsg-4+deb7u7
#
# Last versions recommanded by security team:
# - subversion:1.6.17dfsg-4+deb7u11
#
# CVE List:
# - CVE-2014-3580
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade subversion=1.6.17dfsg-4+deb7u11 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/x86_64/2014/DSA-3107-1.sh
|
Shell
|
mit
| 640 |
#!/bin/bash
pushd $(dirname $0) > /dev/null
BASEDIR=$(dirname $(pwd))
popd > /dev/null
# Info
VERSION='0.1'
# Formatting
T_BOLD=$(tput bold)
T_NORMAL=$(tput sgr0)
T_UNDERSCORE=$(tput smul)
T_GREEN=$(tput setf 2)
# Check .env file
ENV_FILE="${BASEDIR}/.env"
if [ ! -f $ENV_FILE ]; then
echo "${T_UNDERSCORE}.env${T_NORMAL} file not found. Please create it first by running ${T_GREEN}composer install${T_NORMAL}"
exit 1
fi
source $ENV_FILE
# Check WordPress config file
WP_CONFIG_FILE="${BASEDIR}/config/application.php"
if [ ! -f $WP_CONFIG_FILE ]; then
echo "${T_UNDERSCORE}config/application.php${T_NORMAL} file not found, exiting"
exit 1
fi
# Replace ms-config
sed "s:#MULTISITE_CONFIGS#:define('WP_ALLOW_MULTISITE', true);:" -i $WP_CONFIG_FILE
echo "Please open the following URL in your browser: ${T_GREEN}${WP_SITEURL}/wp-admin/network.php${T_NORMAL}"
echo "Note that you need to select ${T_GREEN}Sub-domains${T_NORMAL}. Sub-directories install is ${T_UNDERSCORE}NOT${T_NORMAL} supported by this stack."
read -p "After clicking the ${T_GREEN}Install${T_NORMAL} button, come back here and press [Enter]... "
read -r -d '' WP_MS_CONFIG << EOC || true < /dev/tty
define('MULTISITE', true);\\
define('SUBDOMAIN_INSTALL', false);\\
define('DOMAIN_CURRENT_SITE', getenv('DOMAIN_CURRENT_SITE') );\\
define('PATH_CURRENT_SITE', '/');\\
define('SITE_ID_CURRENT_SITE', 1);\\
define('BLOG_ID_CURRENT_SITE', 1);\\
define('ADMIN_COOKIE_PATH', '/');\\
define('COOKIE_DOMAIN', '');\\
define('COOKIEPATH', '');\\
define('SITECOOKIEPATH', '');
EOC
sed "s:define('WP_ALLOW_MULTISITE', true);:$WP_MS_CONFIG:" -i $WP_CONFIG_FILE
echo "You can now hit the ${T_GREEN}Login${T_NORMAL} link, or go to this URL: ${T_GREEN}${WP_SITEURL}/wp-login.php${T_NORMAL}"
echo -e "${T_GREEN}Enjoy!${T_NORMAL}\n"
|
kucrut/wp-stack
|
bin/setup-multisite.sh
|
Shell
|
mit
| 1,839 |
#!/bin/bash
echo "*******************************************"
echo "AddressBase Basic"
BATCH_IMPORT_NUM_ROWS=100000 ./manage.py download_and_import_addressbase_basic
echo "*******************************************"
echo "Local Authorities"
./manage.py download_and_import_local_authorities
echo "*******************************************"
echo "Postcode / GSS Codes"
./manage.py download_and_import_postcode_gss_codes
echo "*******************************************"
echo "writing new cache version"
unset -v latest
for file in "/tmp/addressbase_basic"/*; do
[[ $file -nt $latest ]] && latest=$file
done
./manage.py write_new_cache_version ${latest}
|
ministryofjustice/postcodeinfo
|
scripts/download_and_import_all.sh
|
Shell
|
mit
| 663 |
#!/bin/bash
# Create and use temp directory
rm -rf /tmp/ceph-osd
mkdir -p /tmp/ceph-osd
cd /tmp/ceph-osd
# Create "run" dir for services
mkdir -p /var/run/ceph
# Create and register OSD
mkdir -p /var/lib/ceph/osd/ceph-${osd_id}
ceph-osd -i ${osd_id} --mkfs --mkkey
# Setup service
chkconfig ceph on
|
comodit/demos
|
ceph-cluster/osd/files/install.sh
|
Shell
|
mit
| 302 |
pod repo push egg Door.podspec --verbose --allow-warnings
|
Limon-O-O/Lego
|
Modules/Door/upload.sh
|
Shell
|
mit
| 58 |
#!/bin/bash
source `dirname $0`/globals.sh
starcluster sshmaster $CLUSTER -u $CLUSTER_USER "cd $PYVOTUNE_DIR && git pull"
echo "Killing master"
starcluster sshmaster $CLUSTER "tmux kill-session -t master"
echo "Flushing master redis"
starcluster sshmaster $CLUSTER "redis-cli flushall"
echo "Starting master..."
starcluster sshmaster $CLUSTER -u $CLUSTER_USER "tmux new -d -s master $PYVOTUNE_DIR/$START_MASTER"
#echo "Pausing"
#sleep 5
#`dirname $0`/start_nodes.sh
|
aelaguiz/pyvotune
|
samples/mnist/start_cluster.sh
|
Shell
|
mit
| 473 |
#!/bin/sh
#ACTION=$1
ACTION="start_nat"
echo start `date` > /tmp/ks_nat_log.txt
ks_nat=`nvram get ks_nat`
[ "$ks_nat" == "1" ] && echo exit `date` >> /tmp/ks_nat_log.txt && exit
for i in $(find /jffs/koolshare/init.d/ -name 'N*' | sort) ;
do
case "$i" in
*.sh )
# Source shell script for speed.
trap "" INT QUIT TSTP EXIT
#set $1
logger "nat_log_1 $i"
if [ -r "$i" ]; then
. $i $ACTION
fi
;;
*)
# No sh extension, so fork subprocess.
logger "nat_log_2 $i"
$i $ACTION
;;
esac
done
echo finish `date` >> /tmp/ks_nat_log.txt
|
koolshare/ttsoft
|
softcenter/softcenter/bin/ks-nat-start.sh
|
Shell
|
mit
| 695 |
#!/bin/sh
# Simple script to manager mongo replica sets
# Environment variables
SERVICE_NAME=${SERVICE_NAME:-mongo}
NETWORK_NAME=${NETWORK_NAME:-mongo}
REPLICA_SETS=${REPLICA_SETS:-rs}
MONGODB_PORT=${MONGODB_PORT:-27017}
set services= master= i=
#docker_api() { curl -sN --unix-socket /run/docker.sock http:/v1.26/$*; }
get_primary() {
services=$(nslookup tasks.$NETWORK_NAME 2>/dev/null | awk "/Addr/ {print \$4\":$MONGODB_PORT\"}")
for i in $services; do
[ -n "$(mongo $i --quiet --eval 'rs.isMaster().setName' 2>&1)" ] \
&& master=$(mongo $i --quiet --eval "rs.status().members.find(r=>r.state===1).name") \
&& return
done || mongo $i --quiet --eval "rs.initiate()" >/dev/null && master=$i \
|| { echo Database is broken; exit; }
}
sets() {
mongo $master --eval "rs.isMaster().ismaster" | grep -q true || get_primary
mongo $master --eval "rs.$1(\"$2\")" >/dev/null && echo $1 $2
}
echo -n .. Service $SERVICE_NAME is\ && docker service ps $SERVICE_NAME >/dev/null \
&& echo UP || { echo DOWN; exit 1; }
echo -n .. Master -\ && get_primary && echo $master
echo .. Remove down replica sets
for i in $(mongo $master --quiet --eval 'rs.status().members.filter(r=>r.state===8).map(r=>r.name).join(" ")'); do
sets remove $i
done
echo .. Add uninitialized services
for i in $services; do
mongo $i --eval 'rs.status().members.find(r=>r.state===1).self' &>/dev/null || sets add $i
done
echo .. Listen for docker container events
docker events -f type=container -f event=start -f event=die \
-f service=$SERVICE_NAME -f network=$NETWORK_NAME |
{
while read -r l; do
case $l in
*start*) sets add $(echo $l | sed 's/.* name=\(.*\))$/\1/').$NETWORK_NAME;;
*die*) sets remove $(echo $l | sed 's/.* name=\(.*\))$/\1/').$NETWORK_NAME;;
esac
done
}
|
vasetech/mongo-rs-ctrl
|
ctrl.sh
|
Shell
|
mit
| 1,779 |
#!/usr/bin/env bats
# tests against get.sh
BASHRC_TMP=~/.bashrc.msu
cp ~/.bashrc ~/.bashrc~ # backup
function setup() {
mv ~/.bashrc "${BASHRC_TMP}"
touch ~/.bashrc
}
function teardown() {
mv "${BASHRC_TMP}" ~/.bashrc
rm -rf /tmp/msu
}
@test "test-run get.sh" {
cat get.sh | bash
}
@test "clones to /tmp/msu" {
cat get.sh | bash
[ -d /tmp/msu ]
}
@test "removes existing directory at /tmp/msu before cloning" {
mkdir -p /tmp/msu
cat get.sh | bash
[ -d /tmp/msu ]
}
@test "clones to a depth of 1 by default" {
cat get.sh | bash
cd /tmp/msu
[ "$(git rev-list HEAD --count)" -eq 1 ]
cd ..
}
@test "clone for a certain build" {
local hash="9bc50798b321b134a0d471a8584fba4fc0c15b06"
cat get.sh | BUILD="${hash}" bash
cd /tmp/msu
[ "$(git rev-parse HEAD)" == "${hash}" ]
cd ..
}
@test "download url resolves successfully" {
wget https://git.io/vTE0s -O _test_get.sh
real="$(cat get.sh)"
downloaded="$(cat _test_get.sh)"
[ "${real}" == "${downloaded}" ]
}
|
GochoMugo/msu
|
test/test.get.sh
|
Shell
|
mit
| 1,015 |
#!/usr/bin/env bash
# This is a small script to stitch panorama images produced by Samsung Gear360
# Could be adopted to use with other cameras after creating pto file
# (Hugin template)
#
# https://github.com/ultramango/gear360pano
# http://stackoverflow.com/questions/59895/can-a-bash-script-tell-which-directory-it-is-stored-in
WHICH=`which $0`
DIR=$(dirname `readlink -f $WHICH`)
SCRIPTNAME=$0
GALLERYDIR="html"
OUTDIR="$DIR/$GALLERYDIR/data"
OUTTMPNAME="out"
PTOTMPL_SM_C200="$DIR/gear360sm-c200.pto"
PTOTMPL_SM_R210="$DIR/gear360sm-r210.pto"
JPGQUALITY=97
PTOJPGFILENAME="dummy.jpg"
# Note, this file is inside GALLERYDIR
GALLERYFILELIST="filelist.txt"
# By default we will ignore files that have been processed
IGNOREPROCESSED="yes"
# Default blending program
BLENDPROG="enblend"
# Default - we use GPU
EXTRANONAOPTIONS="-g"
EXTRAENBLENDOPTIONS="--gpu"
# Debug, yes - print debug, empty - no debug
DEBUG="no"
# Debug, arguments:
# 1. Text to print
print_debug() {
if [ "$DEBUG" == "yes" ]; then
echo "DEBUG: $@"
fi
}
# Clean-up function
clean_up() {
rm -rf "$TEMPDIR"
}
# Function to check if a command fails, arguments:
# - command to execute
# Source:
# http://stackoverflow.com/questions/5195607/checking-bash-exit-status-of-several-commands-efficiently
run_command() {
print_debug "run_command()"
# Remove empty arguments (it will confuse the executed command)
cmd=("$@")
local i
for i in "${!cmd[@]}"; do
[ -n "${cmd[$i]}" ] || unset "cmd[$i]"
done
print_debug "Running command: " "${cmd[@]}"
"${cmd[@]}"
local status=$?
if [ $status -ne 0 ]; then
echo "Error while running $1" >&2
if [ $1 != "notify-send" ]; then
# Display error in a nice graphical popup if available
run_command notify-send -a $SCRIPTNAME "Error while running $1"
fi
clean_up
exit 1
fi
return $status
}
# Function that processes panorama, arguments:
# 1. input filename
# 2. output filename
# 3. template filename
process_panorama() {
print_debug "process_panorama()"
print_debug "Args: $@"
# Create temporary directory
if [ -n "$TEMPDIRPREFIX" ]; then
TEMPDIR=`mktemp -d -p $TEMPDIRPREFIX`
else
TEMPDIR=`mktemp -d`
fi
print_debug "process_panorama: args: in: $1, out: $2, tmpl: $3, tempdir: ${TEMPDIR}"
# Stitch panorama (same file twice as input)
echo "Processing input images (nona)"
# We need to use run_command with many parameters, or $1 doesn't get
# quoted correctly and we cannot use filenames with spaces
run_command "nona" \
"$EXTRANONAOPTIONS" \
"-o" "$TEMPDIR/$OUTTMPNAME" \
"-m" "TIFF_m" \
"-z" "LZW" \
"$3" \
"$1" \
"$1"
echo "Stitching input images"
# TODO: possibly some clean up in extra arguments handling
if [ "$BLENDPROG" == "multiblend" ]; then
# Note, there's a weird bug that multiblend will use
# one space character to separate argument
EXTRABLENDOPTS="--quiet"
fi
# Add extra options for enblend (ex. gpu)
if [ "$BLENDPROG" == "enblend" ]; then
EXTRABLENDOPTS="$EXTRAENBLENDOPTIONS"
fi
run_command "$BLENDPROG" \
"$EXTRABLENDOPTS" \
"--compression=$JPGQUALITY" \
"-o" "$2" \
"$TEMPDIR/${OUTTMPNAME}0000.tif" \
"$TEMPDIR/${OUTTMPNAME}0001.tif"
# TODO: not sure about the tag exclusion list...
# Note: there's no check for exiftool as it is included with Hugin
IMG_WIDTH=$(exiftool -s -s -s -ImageWidth $1)
IMG_HEIGHT=$(exiftool -s -s -s -ImageHeight $1)
echo "Setting EXIF data (exiftool)"
run_command "exiftool" "-ProjectionType=equirectangular" \
"-q" \
"-m" \
"-TagsFromFile" "$1" \
"-exif:all" \
"-ExifByteOrder=II" \
"-FullPanoWidthPixels=$IMG_WIDTH" \
"-FullPanoHeightPixels=$IMG_HEIGHT" \
"-CroppedAreaImageWidthPixels=$IMG_WIDTH" \
"-CroppedAreaImageHeightPixels=$IMG_HEIGHT" \
"-CroppedAreaLeftPixels=0" \
"-CroppedAreaTopPixels=0" \
"--FocalLength" \
"--FieldOfView" \
"--ThumbnailImage" \
"--PreviewImage" \
"--EncodingProcess" \
"--YCbCrSubSampling" \
"--Compression" \
"$2"
# Problems with "-delete_original!", manually remove the file
rm ${2}_original
# Clean up any files/directories we created on the way
clean_up
}
print_help() {
echo -e "\nSmall script to stitch raw panorama files."
echo "Raw meaning two fisheye images side by side."
echo -e "Script originally writen for Samsung Gear 360.\n"
echo -e "Usage:\n$0 [options] infile [hugintemplate]\n"
echo "Where infile is a panorama file from camera, it can"
echo -e "be a wildcard (ex. *.JPG). hugintemplate is optional.\n"
echo "Panorama file will be written to a file with appended _pano,"
echo -e "example: 360_010.JPG -> 360_010_pano.JPG\n"
echo "-a|--process-all force panorama processing, by default processed"
echo " panoaramas are skipped (in output directory)"
echo "-g|--gallery update gallery file list"
echo "-m|--multiblend use multiblend (http://horman.net/multiblend/)"
echo " instead of enblend for final stitching"
echo "-n|--no-gpu do not use GPU (safer but slower)"
echo "-o|--output DIR will set the output directory of panoramas"
echo " default: html/data"
echo "-q|--quality QUALITY will set the JPEG quality to quality"
echo "-r|--remove remove source file after processing (use with care)"
echo "-t|--temp DIR set temporary directory (default: use system's"
echo " temporary directory)"
echo "-h|--help prints this help"
}
create_gallery() {
GALLERYFILELISTFULL="${GALLERYDIR}/${GALLERYFILELIST}"
echo "Updating gallery file list in ${GALLERYFILELISTFULL}"
ls -l *.mp4 *_pano.jpg > ${GALLERYFILELISTFULL}
}
# Process arguments. Source (modified):
# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-a|--process-all)
IGNOREPROCESSED="no"
shift
;;
-g|--gallery)
CREATEGALLERY="yes"
shift
;;
-h|--help)
print_help
shift
exit 0
;;
-m|--multiblend)
BLENDPROG="multiblend"
shift
;;
-n|--no-gpu)
# Clear use GPU options
EXTRANONAOPTIONS=""
EXTRAENBLENDOPTIONS=""
shift
;;
-o|--output)
OUTDIR="$2"
if [ ! -d "$2" ]; then
echo "Given output ($2) is not a directory, cannot continue"
exit 1
fi
shift
shift
;;
-q|--quality)
JPGQUALITY="$2"
# Two shifts because there's no shift in the loop
# otherwise we can't handle just "-h" option
shift
shift
;;
-r|--remove)
# Remove source file after processing
print_debug "Will remove source file after processing"
REMOVESOURCE=1
shift
;;
-t|--temp)
if [ -d "$2" ]; then
TEMPDIRPREFIX="$2"
else
echo "Given temporary ($2) is not a directory, using system default"
fi
shift
shift
;;
*)
break
;;
esac
done
# Check argument(s)
if [ -z "${1+x}" ]; then
print_help
exit 1
fi
# Check if we have the software to do it (Hugin, ImageMagick)
# http://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script
type nona >/dev/null 2>&1 || { echo >&2 "Hugin required but it is not installed. Aborting."; exit 1; }
STARTTS=`date +%s`
# Warn early about the gallery if the output directory is somewhere else
if [ "$CREATEGALLERY" == "yes" ] && [ "$OUTDIR" != "html/data" ] && [ "$OUTDIR" != "./html/data" ]; then
echo -e "\nGallery file list will be updated but output directory not set to html/data\n"
fi
# TODO: add option for parallel
for panofile in $1
do
OUTNAMEPROTO=`dirname "$panofile"`/`basename "${panofile%.*}"`_pano.jpg
OUTNAME=`basename $OUTNAMEPROTO`
OUTNAMEFULL=$OUTDIR/$OUTNAME
# Skip if this is already processed panorama
# https://stackoverflow.com/questions/229551/string-contains-in-bash
if [ $IGNOREPROCESSED == "yes" ] && [ -e "$OUTNAMEFULL" ]; then
echo "$panofile already processed, skipping... (override with -a)"
continue
fi
# Is there a pto override (second argument)?
if [ -n "$2" ]; then
PTOTMPL="$2"
else
# Detect camera model for each image
CAMERAMODEL=`exiftool -s -s -s -Model $panofile`
print_debug "Camera model: $CAMERAMODEL"
case $CAMERAMODEL in
SM-C200)
PTOTMPL=$PTOTMPL_SM_C200
;;
SM-R210)
PTOTMPL=$PTOTMPL_SM_R210
;;
*)
PTOTMPL=$PTOTMPL_SM_C200
;;
esac
print_debug "PTO file: $PTOTMPL"
fi
echo "Processing panofile: $panofile"
process_panorama $panofile $OUTNAMEFULL $PTOTMPL
if [ ! -z "${REMOVESOURCE+x}" ]; then
echo "Removing: $panofile"
rm $panofile
fi
done
if [ "$CREATEGALLERY" == "yes" ]; then
# This could be a bit more elegant, but this is the easiest
cd $GALLERYDIR
COUNT=`cat $GALLERYFILELIST | wc -l`
echo "Updating gallery file list, old file count: $COUNT"
find data -type f -iname "*.jpg" -o -iname "*.jpeg" -o -iname "*.mp4" > $GALLERYFILELIST
COUNT=`cat $GALLERYFILELIST | wc -l`
echo "New file count: $COUNT"
cd ..
fi
# Inform user about the result
ENDTS=`date +%s`
RUNTIME=$((ENDTS-STARTTS))
echo "Processing took: $RUNTIME s"
echo "Processed file(s) are in $OUTDIR"
# Uncomment this if you don't do videos; otherwise, it is quite annoying
#notify-send "Panorama written to $OUTNAME, took: $RUNTIME s"
exit 0
|
ultramango/gear360pano
|
gear360pano.sh
|
Shell
|
mit
| 9,709 |
#!/usr/bin/env bash
export PYTHONPATH=`pwd`
export PYTHONIOENCODING=UTF-8
virtualenv -p ${1} poline_venv
source poline_venv/bin/activate
${1} setup.py install
#Run smoke tests
echo 'Test: repr(x) for x in _'
ls -lah | pol -s 'repr(x) for x in _'
echo "Test: '{}\t{}'.format(x,c) for x, c in counter(l[1] for l in _ if l[1])"
ls -lah | pol -s "'{}\t{}'.format(x,c) for x, c in counter(l[1] for l in _ if l[1])"
echo "Test: ls()"
pol "i[5] for i in ls('-lah',s=True)"
echo "Test: ls() with old syntax"
pol "i[5] for i in ls(['-lah'],s=True)"
echo "Test: df as bar graph"
df -B1 | pol -s "'{:10.10}\t{:10.10}\t{:10.10}\t{:10.10}\t{:5.5}\t{}{}\t{:10.10}'.format(i[0],bytesize(i[1]),bytesize(i[2]),bytesize(i[3]),i[4],'#'*int(10*int(i[2])/int(i[1])+0.5) if i[1].isdigit() else ' '*5, '_'*(10-int(10*int(i[2])/int(i[1])+0.5)) if i[1].isdigit() else ' '*5,i[5]) for i in _"
echo "Test: barchart function"
pol "'{:20.20}\t{:10.10}\t{:10.10}\t{:10.10}\t{:5.5}\t{}\t{:10.10}'.format(i[0],bytesize(i[1]),bytesize(i[2]),bytesize(i[3]),i[4], barchart(int(i[2])/float(i[1]),p=True) if i[1].isdigit() else ' '*10,i[5]) for i in df('-B1', s=T)"
echo "Test: columns function"
pol "columns(20,10,10,10,5,None,10).format(i[0],bytesize(i[1]),bytesize(i[2]),bytesize(i[3]),i[4], barchart(int(i[2])/float(i[1]),p=True) if i[1].isdigit() else ' '*10,i[5]) for i in df('-B1',s=T)"
echo "Test: chained expressions"
pol "df('-B1')" ":columns(20,10,10,10,5,None,10).format(_0,bytesize(_1),bytesize(_2),bytesize(_3),_4, barchart(int(_2)/float(_1),p=True) if _1.isdigit() else ' '*10,_5)"
#Run unit tests
$1 tests/unittests.py
|
riolet/poline
|
tests/common_tests.sh
|
Shell
|
mit
| 1,610 |
#!/bin/bash
/usr/local/bin/puppet module install zack-r10k
/usr/local/bin/puppet module install abrader-gms
/usr/local/bin/puppet apply /vagrant/r10k-installation.pp
/usr/local/bin/r10k deploy environment -pv
/bin/echo '==> puppet config set hiera_config /etc/puppetlabs/code/environments/production/hiera.yaml'
/usr/local/bin/puppet config set hiera_config /etc/puppetlabs/code/environments/production/hiera.yaml
/bin/echo '==> /bin/systemctl restart pe-puppetserver'
/bin/systemctl restart pe-puppetserver
/bin/echo '==> /usr/local/bin/puppet agent -t'
/usr/local/bin/puppet agent -t || true
/sbin/service puppet stop
|
goutham27/puppet-control
|
config/setup.sh
|
Shell
|
mit
| 620 |
#!/bin/sh
#
# Alternatively you can do something like this in the project root dir:
# >mkdir build/make_debug
# >cd build/make_debug
# >cmake -DCMAKE_BUILD_TYPE:STRING=Debug ../..
# >make
#
current_dir=$(cd -P -- "$(dirname -- "$0")" && pwd -P) || exit
source_dir="${current_dir}/.."
build_dir="${current_dir}/../build/make"
# Build debug version
cmake_options_debug="-DCMAKE_BUILD_TYPE:STRING=Debug"
cmake -E make_directory "${build_dir}_debug" || exit
cmake -E chdir "${build_dir}_debug" cmake $cmake_options_debug "$source_dir" || exit
cmake -E chdir "${build_dir}_debug" cmake --build . || exit
# Build Release version
cmake_options="-DCMAKE_BUILD_TYPE:STRING=Release"
cmake -E make_directory "${build_dir}" || exit
cmake -E chdir "${build_dir}" cmake $cmake_options "$source_dir" || exit
cmake -E chdir "${build_dir}" cmake --build . || exit
|
suikki/simpleSDL
|
platforms/build.sh
|
Shell
|
mit
| 860 |
#!/bin/bash
wget https://github.com/samjabrahams/tensorflow-on-raspberry-pi/releases/download/v1.1.0/tensorflow-1.1.0-cp34-cp34m-linux_armv7l.whl
sudo pip3 install tensorflow-1.1.0-cp34-cp34m-linux_armv7l.whl
|
TUM-AERIUS/Aerius
|
Raspberry/install_tf.sh
|
Shell
|
mit
| 210 |
sudo npm -g install babel-cli
sudo npm -g install babel-preset-env
|
roseengineering/vitamin-d
|
install.sh
|
Shell
|
mit
| 67 |
#!/bin/bash
sudo vlc-wrapper -I telnet &> /var/log/vlc/server.log &
|
AVGP/vodca
|
vlc-runner.sh
|
Shell
|
mit
| 68 |
#!/bin/bash
set -e
# Takes three args -
# -o the quoted nameof the organism
# -n the number of strains
# -f the path to an existing prokaryotes.txt file
PROKFILE=""
NSTRAINS=""
while getopts "o:n:f:" opt; do
case $opt in
o)
ORGNAME=$OPTARG
echo "Organism name: $OPTARG" >&2
;;
n)
NSTRAINS=$OPTARG
echo "number of strains: $NSTRAINS" >&2
;;
f)
PROKFILE=$OPTARG
echo "file path to ncbi flatfile: $PROKFILE" >&2
;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo "USAGE: -o 'Organism name' -n 5 -f ./path/to/prokaryotes"
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
esac
done
if [ ! -f "$PROKFILE" ]
then
if [ ! -f "prokaryotes.txt" ]
then
wget ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/prokaryotes.txt >&2
fi
PROKFILE=./prokaryotes.txt
if [ ! -s "$PROKFILE" ]
then
echo "The file '$PROKFILE' is empty/corrupted; please delete and try again"
exit 1
fi
fi
# column 9 has the nucc accession if it is a complete genome, or a "-" if empt
# it starts with chromasome
# here we select the lines for all the complete genomes with awk,
# find the lines matching out query
# and save a temp file with the results
if [ -f /tmp/prok_subset_raw_outfile ]
then
rm /tmp/prok_subset_raw_outfile
fi
cat $PROKFILE | \
awk -F "\t" '$9 ~ /chrom*/ { print $0 }' | \
grep "$ORGNAME" > \
/tmp/prok_subset_raw_outfile
# if file is empty, raise an error
if [ ! -s /tmp/prok_subset_raw_outfile ]
then
echo "grepping for '$ORGNAME' returned no results"
exit 1
fi
# now we shuffle the file, get the top n lines, and use some sed to split apart the
# chromosome:NZ_CP013218.1/CP013218.1; plasmid paadD:NZ_CP014695.1/CP014695.1; plasmid plinE154:NZ_CP014694.1/CP014694.1
# to
# NZ_CP013218.1
# Note that we only get the first chromasome for a given entry. Sorry vibrioists
# shuf ./tmp_raw_outfile | head -n $NSTRAINS | cut -d "\t" -f 9
if [ $(command -v shuf) ]
then
echo "using shuf" >&2
SHUF=shuf
else
echo "using gshuf" >&2
SHUF=gshuf
fi
echo "selecting $NSTRAINS random strains" >&2
if [ $NSTRAINS != "" ]
then
$SHUF /tmp/prok_subset_raw_outfile | \
head -n $NSTRAINS | \
cut -f 9 | \
# sed "s/chro.*://" | \
sed "s/^chro[^:]*://" | \
# sed "s/\/.*//"
sed "s/[;\/].*//"
else
$SHUF /tmp/prok_subset_raw_outfile | \
cut -f 9 | \
# sed "s/chro.*://" | \
sed "s/^chro[^:]*://" | \
# sed "s/\/.*//" # handle instances lacking both genbank and refseq accs
sed "s/[;\/].*//"
fi
echo "done" >&2
|
nickp60/riboSeed
|
scripts/select_ref_by_ANI/get_n_random_complete_genomes.sh
|
Shell
|
mit
| 2,591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.