code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
#
# Variables
#
D=$PWD
#
# Functions
#
usage()
{
cat << EOF
usage: $0 options
This script is designed to produce a new attributes json file with Chef Analytics
required changes for tf_chef_analytics Terraform plan.
OPTIONS:
-h This help message
-v Verbose output
EOF
}
while getopts "hv" OPTION; do
case "$OPTION" in
h)
usage && exit 0 ;;
v)
set -x ;;
?)
usage && exit 0 ;;
esac
done
#
# Main
#
# If there's a local cookbooks directory, purge
[ -d cookbooks ] && rm -rf cookbooks
# Get community Chef Analytics cookbook from GitHub (not available via Supermarket)
git clone https://github.com/chef-cookbooks/chef-analytics cookbooks/chef-analytics
# Remove conflicting .chef directory
rm -rf cookbooks/chef-analytics/.chef
# Upload to Chef Server
cd cookbooks/chef-analytics && berks install && berks upload
# Clean up
cd $D && rm -rf cookbooks
exit 0
|
mengesb/tf_chef_analytics
|
files/chef-cookbooks.sh
|
Shell
|
apache-2.0
| 955 |
#!/bin/bash
set -e
export MAVENW_BIN
MAVENW_BIN="${MAVENW_BIN:-./mvnw}"
# It takes ages on Docker to run the app without this
if [[ ${BUILD_OPTIONS} != *"java.security.egd"* ]]; then
if [[ ! -z ${BUILD_OPTIONS} && ${BUILD_OPTIONS} != "null" ]]; then
export BUILD_OPTIONS="${BUILD_OPTIONS} -Djava.security.egd=file:///dev/urandom"
else
export BUILD_OPTIONS="-Djava.security.egd=file:///dev/urandom"
fi
fi
function build() {
local pipelineVersion="${PASSED_PIPELINE_VERSION:-${PIPELINE_VERSION:-}}"
# Required by settings.xml
BUILD_OPTIONS="${BUILD_OPTIONS} -DM2_SETTINGS_REPO_ID=${M2_SETTINGS_REPO_ID} -DM2_SETTINGS_REPO_USERNAME=${M2_SETTINGS_REPO_USERNAME} -DM2_SETTINGS_REPO_PASSWORD=${M2_SETTINGS_REPO_PASSWORD}"
# shellcheck disable=SC2086
"${MAVENW_BIN}" org.codehaus.mojo:versions-maven-plugin:2.3:set -DnewVersion="${pipelineVersion}" ${BUILD_OPTIONS} || (echo "Build failed!!!" && return 1)
if [[ "${CI}" == "CONCOURSE" ]]; then
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean verify deploy -Ddistribution.management.release.id="${M2_SETTINGS_REPO_ID}" -Ddistribution.management.release.url="${REPO_WITH_BINARIES_FOR_UPLOAD}" -Drepo.with.binaries="${REPO_WITH_BINARIES}" ${BUILD_OPTIONS} || (printTestResults && return 1)
else
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean verify deploy -Ddistribution.management.release.id="${M2_SETTINGS_REPO_ID}" -Ddistribution.management.release.url="${REPO_WITH_BINARIES_FOR_UPLOAD}" -Drepo.with.binaries="${REPO_WITH_BINARIES}" ${BUILD_OPTIONS}
fi
}
function apiCompatibilityCheck() {
local prodTag="${PASSED_LATEST_PROD_TAG:-${LATEST_PROD_TAG:-}}"
[[ -z "${prodTag}" ]] && prodTag="$(findLatestProdTag)"
echo "Last prod tag equals [${prodTag}]"
if [[ -z "${prodTag}" ]]; then
echo "No prod release took place - skipping this step"
else
# Putting env vars to output properties file for parameter passing
export PASSED_LATEST_PROD_TAG="${prodTag}"
local fileLocation="${OUTPUT_FOLDER}/test.properties}"
echo "PASSED_LATEST_PROD_TAG=${prodTag}" >>"${fileLocation}"
# Downloading latest jar
LATEST_PROD_VERSION=${prodTag#prod/}
echo "Last prod version equals [${LATEST_PROD_VERSION}]"
if [[ "${CI}" == "CONCOURSE" ]]; then
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean verify -Papicompatibility -Dlatest.production.version="${LATEST_PROD_VERSION}" -Drepo.with.binaries="${REPO_WITH_BINARIES}" ${BUILD_OPTIONS} || (printTestResults && return 1)
else
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean verify -Papicompatibility -Dlatest.production.version="${LATEST_PROD_VERSION}" -Drepo.with.binaries="${REPO_WITH_BINARIES}" ${BUILD_OPTIONS}
fi
fi
}
# The function uses Maven Wrapper - if you're using Maven you have to have it on your classpath
# and change this function
function extractMavenProperty() {
local prop="${1}"
MAVEN_PROPERTY=$("${MAVENW_BIN}" -q \
-Dexec.executable="echo" \
-Dexec.args="\${${prop}}" \
--non-recursive \
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
# In some spring cloud projects there is info about deactivating some stuff
MAVEN_PROPERTY=$(echo "${MAVEN_PROPERTY}" | tail -1)
# In Maven if there is no property it prints out ${propname}
if [[ "${MAVEN_PROPERTY}" == "\${${prop}}" ]]; then
echo ""
else
echo "${MAVEN_PROPERTY}"
fi
}
function retrieveGroupId() {
{
ruby -r rexml/document \
-e 'puts REXML::Document.new(File.new(ARGV.shift)).elements["/project/groupId"].text' pom.xml \
|| "${MAVENW_BIN}" org.apache.maven.plugins:maven-help-plugin:2.2:evaluate \
-Dexpression=project.groupId | grep -Ev '(^\[|Download\w+:)'
} | tail -1
}
function retrieveAppName() {
{
ruby -r rexml/document \
-e 'puts REXML::Document.new(File.new(ARGV.shift)).elements["/project/artifactId"].text' pom.xml \
|| "${MAVENW_BIN}" org.apache.maven.plugins:maven-help-plugin:2.2:evaluate \
-Dexpression=project.artifactId | grep -Ev '(^\[|Download\w+:)'
} | tail -1
}
function printTestResults() {
# shellcheck disable=SC1117
echo -e "\n\nBuild failed!!! - will print all test results to the console (it's the easiest way to debug anything later)\n\n" && tail -n +1 "$(testResultsAntPattern)"
}
function retrieveStubRunnerIds() {
extractMavenProperty 'stubrunner.ids'
}
function runSmokeTests() {
local applicationUrl="${APPLICATION_URL}"
local stubrunnerUrl="${STUBRUNNER_URL}"
echo "Running smoke tests. Application url [${applicationUrl}], Stubrunner Url [${stubrunnerUrl}]"
if [[ "${CI}" == "CONCOURSE" ]]; then
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean install -Psmoke -Dapplication.url="${applicationUrl}" -Dstubrunner.url="${stubrunnerUrl}" ${BUILD_OPTIONS} || (printTestResults && return 1)
else
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean install -Psmoke -Dapplication.url="${applicationUrl}" -Dstubrunner.url="${stubrunnerUrl}" ${BUILD_OPTIONS}
fi
}
function runE2eTests() {
local applicationUrl="${APPLICATION_URL}"
echo "Running e2e tests for application with url [${applicationUrl}]"
if [[ "${CI}" == "CONCOURSE" ]]; then
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean install -Pe2e -Dapplication.url="${applicationUrl}" ${BUILD_OPTIONS} || (printTestResults && return 1)
else
# shellcheck disable=SC2086
"${MAVENW_BIN}" clean install -Pe2e -Dapplication.url="${applicationUrl}" ${BUILD_OPTIONS}
fi
}
function outputFolder() {
echo "target"
}
function testResultsAntPattern() {
echo "**/surefire-reports/*"
}
export -f build
export -f apiCompatibilityCheck
export -f runSmokeTests
export -f runE2eTests
export -f outputFolder
export -f testResultsAntPattern
|
spring-cloud/spring-cloud-pipelines
|
common/src/main/bash/projectType/pipeline-maven.sh
|
Shell
|
apache-2.0
| 5,607 |
wget -O glyphicons-halflings-regular.eot http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.eot
wget -O glyphicons-halflings-regular.svg http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.svg
wget -O glyphicons-halflings-regular.ttf http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.ttf
wget -O glyphicons-halflings-regular.woff http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff
wget -O glyphicons-halflings-regular.woff2 http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff2
|
ronaldbradford/prototype
|
fonts/sync.sh
|
Shell
|
apache-2.0
| 642 |
#!/bin/bash
set -e
INSTALL_DIR=/home/icecp/"<%= version %>"-"<%= iteration %>"
SOFT_LINK=/home/icecp/icecp-node
after_remove()
{
echo "Performing post-removal steps for <%= name %> version=<%= version %> iteration=<%= iteration %>"
# Remove soft link and icecp-node directory contents
if [ -L $SOFT_LINK ] && [ "$(readlink $SOFT_LINK)" = $INSTALL_DIR ]; then
rm $SOFT_LINK
echo "Removed soft link $SOFT_LINK"
fi
if [ -d $INSTALL_DIR ]; then
rm -rf $INSTALL_DIR
echo "Removed $INSTALL_DIR"
fi
}
after_remove
|
icecp/icecp
|
icecp-node/packaging/scripts/after-remove.sh
|
Shell
|
apache-2.0
| 544 |
#!/usr/bin/env bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
# Included in all the Alluxio scripts with source command should not be executable directly also
# should not be passed any arguments, since we need original $*
# resolve links - $0 may be a softlink
this="${BASH_SOURCE-$0}"
common_bin=$(cd -P -- "$(dirname -- "${this}")" && pwd -P)
script="$(basename -- "${this}")"
this="${common_bin}/${script}"
# convert relative path to absolute path
config_bin=$(dirname "${this}")
script=$(basename "${this}")
config_bin=$(cd "${config_bin}"; pwd)
this="${config_bin}/${script}"
# Allow for a script which overrides the default settings for system integration folks.
[[ -f "${common_bin}/alluxio-layout.sh" ]] && . "${common_bin}/alluxio-layout.sh"
# This will set the default installation for a tarball installation while os distributors can create
# their own alluxio-layout.sh file to set system installation locations.
if [[ -z "${ALLUXIO_SYSTEM_INSTALLATION}" ]]; then
VERSION=1.5.0-SNAPSHOT
ALLUXIO_HOME=$(dirname $(dirname "${this}"))
ALLUXIO_JARS="${ALLUXIO_HOME}/assembly/target/alluxio-assemblies-${VERSION}-jar-with-dependencies.jar"
ALLUXIO_CONF_DIR="${ALLUXIO_CONF_DIR:-${ALLUXIO_HOME}/conf}"
ALLUXIO_LOGS_DIR="${ALLUXIO_LOGS_DIR:-${ALLUXIO_HOME}/logs}"
fi
if [[ -z "$(which java)" ]]; then
echo "Cannot find the 'java' command."
exit 1
fi
JAVA_HOME=${JAVA_HOME:-"$(dirname $(which java))/.."}
JAVA=${JAVA:-"${JAVA_HOME}/bin/java"}
if [[ -e "${ALLUXIO_CONF_DIR}/alluxio-env.sh" ]]; then
. "${ALLUXIO_CONF_DIR}/alluxio-env.sh"
fi
if [[ -n "${ALLUXIO_MASTER_ADDRESS}" ]]; then
echo "ALLUXIO_MASTER_ADDRESS is deprecated since version 1.1 and will be remove in version 2.0."
echo "Please use \"ALLUXIO_MASTER_HOSTNAME\" instead."
ALLUXIO_MASTER_HOSTNAME=${ALLUXIO_MASTER_ADDRESS}
fi
if [[ -n "${ALLUXIO_HOME}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.home=${ALLUXIO_HOME}"
fi
if [[ -n "${ALLUXIO_LOGS_DIR}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.logs.dir=${ALLUXIO_LOGS_DIR}"
fi
if [[ -n "${ALLUXIO_RAM_FOLDER}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.worker.tieredstore.level0.alias=MEM"
ALLUXIO_JAVA_OPTS+=" -Dalluxio.worker.tieredstore.level0.dirs.path=${ALLUXIO_RAM_FOLDER}"
fi
if [[ -n "${ALLUXIO_MASTER_HOSTNAME}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.master.hostname=${ALLUXIO_MASTER_HOSTNAME}"
fi
if [[ -n "${ALLUXIO_UNDERFS_ADDRESS}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.underfs.address=${ALLUXIO_UNDERFS_ADDRESS}"
fi
if [[ -n "${ALLUXIO_WORKER_MEMORY_SIZE}" ]]; then
ALLUXIO_JAVA_OPTS+=" -Dalluxio.worker.memory.size=${ALLUXIO_WORKER_MEMORY_SIZE}"
fi
ALLUXIO_JAVA_OPTS+=" -Dlog4j.configuration=file:${ALLUXIO_CONF_DIR}/log4j.properties"
ALLUXIO_JAVA_OPTS+=" -Dorg.apache.jasper.compiler.disablejsr199=true"
ALLUXIO_JAVA_OPTS+=" -Djava.net.preferIPv4Stack=true"
# Master specific parameters based on ALLUXIO_JAVA_OPTS.
ALLUXIO_MASTER_JAVA_OPTS+=${ALLUXIO_JAVA_OPTS}
ALLUXIO_MASTER_JAVA_OPTS+=" -Dalluxio.logger.type=${ALLUXIO_MASTER_LOGGER:-MASTER_LOGGER}"
# Proxy specific parameters that will be shared to all workers based on ALLUXIO_JAVA_OPTS.
ALLUXIO_PROXY_JAVA_OPTS+=${ALLUXIO_JAVA_OPTS}
ALLUXIO_PROXY_JAVA_OPTS+=" -Dalluxio.logger.type=${ALLUXIO_PROXY_LOGGER:-PROXY_LOGGER}"
# Worker specific parameters that will be shared to all workers based on ALLUXIO_JAVA_OPTS.
ALLUXIO_WORKER_JAVA_OPTS+=${ALLUXIO_JAVA_OPTS}
ALLUXIO_WORKER_JAVA_OPTS+=" -Dalluxio.logger.type=${ALLUXIO_WORKER_LOGGER:-WORKER_LOGGER}"
# Client specific parameters based on ALLUXIO_JAVA_OPTS.
ALLUXIO_USER_JAVA_OPTS+=${ALLUXIO_JAVA_OPTS}
ALLUXIO_USER_JAVA_OPTS+=" -Dalluxio.logger.type=USER_LOGGER"
# A developer option to prepend Alluxio jars before ALLUXIO_CLASSPATH jars
if [[ -n "${ALLUXIO_PREPEND_ALLUXIO_CLASSES}" ]]; then
export CLASSPATH="${ALLUXIO_CONF_DIR}/:${ALLUXIO_JARS}:${ALLUXIO_CLASSPATH}"
else
export CLASSPATH="${ALLUXIO_CONF_DIR}/:${ALLUXIO_CLASSPATH}:${ALLUXIO_JARS}"
fi
|
yuluo-ding/alluxio
|
libexec/alluxio-config.sh
|
Shell
|
apache-2.0
| 4,415 |
#
# This file must be used by invoking "source activate.sh" from the command line.
# You cannot run it directly.
# To exit from the environment this creates, execute the 'deactivate' function.
_RED="\033[0;31m"
_MAGENTA="\033[0;95m"
_YELLOW="\033[0;33m"
_RESET="\033[0m"
# This detects if a script was sourced or invoked directly
# See https://stackoverflow.com/a/28776166/2526265
sourced=0
if [ -n "$ZSH_EVAL_CONTEXT" ]; then
case $ZSH_EVAL_CONTEXT in *:file) sourced=1;; esac
THIS_SCRIPT="${0:-}"
elif [ -n "$KSH_VERSION" ]; then
[ "$(cd $(dirname -- $0) && pwd -P)/$(basename -- $0)" != "$(cd $(dirname -- ${.sh.file}) && pwd -P)/$(basename -- ${.sh.file})" ] && sourced=1
THIS_SCRIPT="${0:-}"
elif [ -n "$BASH_VERSION" ]; then
(return 2>/dev/null) && sourced=1
THIS_SCRIPT="$BASH_SOURCE"
else # All other shells: examine $0 for known shell binary filenames
# Detects `sh` and `dash`; add additional shell filenames as needed.
case ${0##*/} in sh|dash) sourced=1;; esac
THIS_SCRIPT="${0:-}"
fi
if [ $sourced -eq 0 ]; then
printf "${_RED}This script cannot be invoked directly.${_RESET}\n"
printf "${_RED}To function correctly, this script file must be sourced by calling \"source $0\".${_RESET}\n"
exit 1
fi
deactivate () {
# reset old environment variables
if [ ! -z "${_OLD_PATH:-}" ] ; then
export PATH="$_OLD_PATH"
unset _OLD_PATH
fi
if [ ! -z "${_OLD_PS1:-}" ] ; then
export PS1="$_OLD_PS1"
unset _OLD_PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" ] || [ -n "${ZSH_VERSION:-}" ] ; then
hash -r 2>/dev/null
fi
unset DOTNET_ROOT
unset DOTNET_MULTILEVEL_LOOKUP
if [ ! "${1:-}" = "init" ] ; then
# Remove the deactivate function
unset -f deactivate
fi
}
# Cleanup the environment
deactivate init
DIR="$( cd "$( dirname "$THIS_SCRIPT" )" && pwd )"
_OLD_PATH="$PATH"
# Tell dotnet where to find itself
export DOTNET_ROOT="$DIR/.dotnet"
# Tell dotnet not to look beyond the DOTNET_ROOT folder for more dotnet things
export DOTNET_MULTILEVEL_LOOKUP=0
# Put dotnet first on PATH
export PATH="$DOTNET_ROOT:$PATH"
# Set the shell prompt
if [ -z "${DISABLE_CUSTOM_PROMPT:-}" ] ; then
_OLD_PS1="$PS1"
export PS1="(`basename \"$DIR\"`) $PS1"
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" ] || [ -n "${ZSH_VERSION:-}" ] ; then
hash -r 2>/dev/null
fi
printf "${_MAGENTA}Enabled the .NET Core environment. Execute 'deactivate' to exit.${_RESET}\n"
if [ ! -f "$DOTNET_ROOT/dotnet" ]; then
printf "${_YELLOW}.NET Core has not been installed yet. Run $DIR/build/get-dotnet.sh to install it.${_RESET}\n"
else
printf "dotnet = $DOTNET_ROOT/dotnet\n"
fi
|
grpc/grpc-dotnet
|
activate.sh
|
Shell
|
apache-2.0
| 3,075 |
#!/bin/bash
docker run --rm \
-e PGPASSWORD=${PGPASSWORD} \
-e PGHOST=ims-postgres \
-e PGUSER=${PGUSER} \
-e PGDATABASE=${PGDATABASE} \
--cap-drop=all \
--net=ebisc \
-v /tmp/db_dump:/tmp/db_dump:Z \
ebisc/postgres:latest \
sh -c "pg_dump | gzip -c > /tmp/db_dump/ebisc.sql.gz"
|
EBiSC/ebisc_deployment
|
roles/ims/files/scripts/dump_db.bash
|
Shell
|
apache-2.0
| 298 |
csscombine -m style.css > style-min.css
|
devsar/ae-people
|
static/css/build-css.sh
|
Shell
|
apache-2.0
| 40 |
#!/bin/bash
#
# Copyright (C) 2010-2016 dtk contributors
#
# This file is part of the dtk project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#set -x
# Output repo directory:
output_dir=$1
dtk_server=$2
dtk_repo_manager=$3
dtk_provisioning=$4
dtk_repoman_url=$5
dtk_service_module_url="internal--sm--dtk"
dtk_component_module_url_prefix="internal--cm--"
apt_url="apt"
common_user_url="common_user"
dtk_url="dtk"
dtk_activemq_url="dtk_activemq"
dtk_addons_url="dtk_addons"
dtk_client_url="dtk_client"
dtk_java_url="dtk_java"
dtk_nginx_url="dtk_nginx"
dtk_postgresql_url="dtk_postgresql"
dtk_server_url="dtk_server"
dtk_thin_url="dtk_thin"
dtk_user_url="dtk_user"
gitolite_url="gitolite"
logrotate_url="logrotate"
nginx_url="nginx"
rvm_url="rvm"
stdlib_url="stdlib"
sysctl_url="sysctl"
passenger_url="dtk_passenger"
vcsrepo_url="vcsrepo"
docker_url="docker"
dtk_repo_manager_url="dtk_repo_manager"
dtk_modules=()
dtk_modules+=($apt_url)
dtk_modules+=($common_user_url)
dtk_modules+=($dtk_url)
dtk_modules+=($dtk_activemq_url)
dtk_modules+=($dtk_addons_url)
dtk_modules+=($dtk_client_url)
dtk_modules+=($dtk_java_url)
dtk_modules+=($dtk_nginx_url)
dtk_modules+=($dtk_postgresql_url)
dtk_modules+=($dtk_server_url)
dtk_modules+=($dtk_thin_url)
dtk_modules+=($dtk_user_url)
dtk_modules+=($gitolite_url)
dtk_modules+=($logrotate_url)
dtk_modules+=($nginx_url)
dtk_modules+=($rvm_url)
dtk_modules+=($stdlib_url)
dtk_modules+=($sysctl_url)
dtk_modules+=($passenger_url)
dtk_modules+=($vcsrepo_url)
dtk_modules+=($docker_url)
cd $output_dir
# Add dtk modules as subtrees to dtk provisioning repo
git clone $dtk_provisioning && cd dtk-provisioning
for module in ${dtk_modules[@]}; do
git subtree pull --prefix modules/${module} ${dtk_repoman_url}:${dtk_component_module_url_prefix}${module} master --squash -m "Updated module ${module}"
done
git subtree pull --prefix modules/${dtk_service_module_url} ${dtk_repoman_url}:${dtk_service_module_url} master --squash -m "Updated dtk service module"
git add .; git commit -m "Adding latest updates for dtk modules"; git push origin master
cd ..
# Add server related dtk modules
git clone $dtk_server && cd dtk-server && git submodule init && git submodule update
for module in ${dtk_modules[@]}; do
cd dtk-provisioning/modules/$module
git checkout master && git pull && cd ../../..
done
git add .; git commit -m "Adding latest updates for dtk modules"; git push origin master
cd ..
# Add repoman related dtk modules
git clone $dtk_repo_manager && cd dtk-repo-manager && git submodule init && git submodule update
cd dtk_modules/$dtk_repo_manager_url
git checkout master && git pull && cd ../..
git add .; git commit -m "Adding latest updates for dtk modules"; git push origin master
|
dtk/dtk-server
|
scripts/git_repo_tagging/add_dtk_modules.sh
|
Shell
|
apache-2.0
| 3,231 |
#!/usr/bin/env bash
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
REPO_ROOT=$(cd $(dirname "${BASH_SOURCE[0]}") && cd "$(git rev-parse --show-toplevel)" && pwd)
source ${REPO_ROOT}/build-support/common.sh
PANTS_EXE="${REPO_ROOT}/pants"
function usage() {
echo "Publishes the http://pantsbuild.github.io/ docs locally or remotely."
echo
echo "Usage: $0 (-h|-opd)"
echo " -h print out this help message"
echo " -o open the doc site locally"
echo " -p publish the doc site remotely"
echo " -d publish the site to a subdir at this path (useful for public previews)"
if (( $# > 0 )); then
die "$@"
else
exit 0
fi
}
publish_path=""
while getopts "hopd:" opt; do
case ${opt} in
h) usage ;;
o) preview="true" ;;
p) publish="true" ;;
d) publish_path="${OPTARG}" ;;
*) usage "Invalid option: -${OPTARG}" ;;
esac
done
${PANTS_EXE} builddict --print-exception-stacktrace \
--omit-impl-re='internal_backend.*' || \
die "Failed to generate the 'BUILD Dictionary' and/or 'Options Reference'."
function do_open() {
if [[ "${preview}" = "true" ]]; then
if which xdg-open &>/dev/null; then
xdg-open $1
elif which open &>/dev/null; then
open $1
else
die "Failed to find an opener on your system for $1"
fi
fi
}
# generate html from markdown pages.
${PANTS_EXE} markdown --print-exception-stacktrace \
--markdown-fragment src:: examples:: src/docs:: //:readme \
testprojects/src/java/com/pants/testproject/page:readme || \
die "Failed to generate HTML from markdown'."
# invoke doc site generator.
${PANTS_EXE} sitegen --print-exception-stacktrace \
--sitegen-config-path=src/python/pants/docs/docsite.json || \
die "Failed to generate doc site'."
do_open "${REPO_ROOT}/dist/docsite/index.html"
if [[ "${publish}" = "true" ]]; then
url="http://pantsbuild.github.io/${publish_path}"
read -ep "To abort publishing these docs to ${url} press CTRL-C, otherwise press enter to \
continue."
(
${REPO_ROOT}/src/python/pants/docs/publish_via_git.sh \
[email protected]:pantsbuild/pantsbuild.github.io.git \
${publish_path} && \
do_open ${url}/index.html
) || die "Publish to ${url} failed."
fi
|
tejal29/pants
|
build-support/bin/publish_docs.sh
|
Shell
|
apache-2.0
| 2,355 |
#!/bin/bash
JAVA_VERSION=`java -version 2>&1 | awk 'NR==1{ gsub(/"/,""); print $3 }'`
if [[ $JAVA_VERSION == 11* ]]; then
echo "Java version ok: $JAVA_VERSION"
else
echo "Java version is expected as 11 but $JAVA_VERSION. try:"
echo "export JAVA_HOME=\`/usr/libexec/java_home -v 11\`"
exit 1
fi
|
michaelliao/warpdb
|
check_java_version.sh
|
Shell
|
apache-2.0
| 304 |
source ../admin-openrc.sh
mkdir -p /tmp/images
wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
glance image-create --name "cirros-0.3.4-x86_64" --file /tmp/images/cirros-0.3.4-x86_64-disk.img \
--disk-format qcow2 --container-format bare --is-public True --progress
glance image-list
#rm -r /tmp/images
|
sxh615/kilo-install
|
glance/script/5.verify.sh
|
Shell
|
apache-2.0
| 343 |
#!/bin/sh
COMMAND=`cat getdata.sh`
# Run getdata.sh to fetch info in remote hosts
pscp.pssh -h hosts getdata.sh /tmp/tmp.sh >>/dev/null
pssh -h hosts -o ./data "sh /tmp/tmp.sh"
pssh -h hosts "rm /tmp/tmp.sh" >>/dev/null
#analize data in directory 'data'
sh ana.sh
|
matrixj/scmonitor
|
main.sh
|
Shell
|
apache-2.0
| 267 |
#!/bin/sh
set -e
: ${KUBE_MASTER_VCPUS:=2}
: ${KUBE_MASTER_MEM:=1024}
: ${KUBE_MASTER_DISK:=4G}
: ${KUBE_MASTER_UNTAINT:=n}
: ${KUBE_NODE_VCPUS:=2}
: ${KUBE_NODE_MEM:=4096}
: ${KUBE_NODE_DISK:=8G}
: ${KUBE_NETWORKING:=default}
: ${KUBE_RUN_ARGS:=}
: ${KUBE_EFI:=}
: ${KUBE_MAC:=}
: ${KUBE_CLEAR_STATE:=}
[ "$(uname -s)" = "Darwin" ] && KUBE_EFI=1
suffix=".iso"
[ -n "${KUBE_EFI}" ] && suffix="-efi.iso" && uefi="--uefi"
if [ $# -eq 0 ] ; then
img="kube-master"
# If $KUBE_MASTER_AUTOINIT is set, including if it is set to ""
# then we configure for auto init. If it is completely unset then
# we do not.
if [ -n "${KUBE_MASTER_AUTOINIT+x}" ] ; then
kubeadm_data="${kubeadm_data+$kubeadm_data, }\"init\": \"${KUBE_MASTER_AUTOINIT}\""
fi
if [ "${KUBE_MASTER_UNTAINT}" = "y" ] ; then
kubeadm_data="${kubeadm_data+$kubeadm_data, }\"untaint-master\": \"\""
fi
if [ -n "${kubeadm_data}" ] ; then
data="{ \"kubeadm\": { ${kubeadm_data} } }"
fi
state="kube-master-state"
: ${KUBE_VCPUS:=$KUBE_MASTER_VCPUS}
: ${KUBE_MEM:=$KUBE_MASTER_MEM}
: ${KUBE_DISK:=$KUBE_MASTER_DISK}
elif [ $# -ge 1 ] ; then
case $1 in
''|*[!0-9]*)
echo "Node number must be a number"
exit 1
;;
0)
echo "Node number must be greater than 0"
exit 1
;;
*) ;;
esac
img="kube-node"
name="node-${1}"
shift
if [ $# -ge 1 ] ; then
data="{\"kubeadm\": {\"join\": \"${*}\"} }"
fi
state="kube-${name}-state"
: ${KUBE_VCPUS:=$KUBE_NODE_VCPUS}
: ${KUBE_MEM:=$KUBE_NODE_MEM}
: ${KUBE_DISK:=$KUBE_NODE_DISK}
else
echo "Usage:"
echo " - Boot master:"
echo " ${0}"
echo " - Boot node:"
echo " ${0} <node> <join_args>"
exit 1
fi
set -x
if [ -n "${KUBE_CLEAR_STATE}" ] ; then
rm -rf "${state}"
mkdir "${state}"
if [ -n "${KUBE_MAC}" ] ; then
echo -n "${KUBE_MAC}" > "${state}"/mac-addr
fi
fi
linuxkit run ${KUBE_RUN_ARGS} -networking ${KUBE_NETWORKING} -cpus ${KUBE_VCPUS} -mem ${KUBE_MEM} -state "${state}" -disk size=${KUBE_DISK} -data "${data}" ${uefi} "${img}${suffix}"
|
mor1/linuxkit
|
projects/kubernetes/boot.sh
|
Shell
|
apache-2.0
| 2,119 |
#!/bin/bash
# This script sets up a go workspace locally and builds all go components.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
"${OS_ROOT}/hack/build-go.sh" tools/gendocs tools/genman
# Find binary
gendocs="$(os::build::find-binary gendocs)"
genman="$(os::build::find-binary genman)"
if [[ -z "$gendocs" ]]; then
{
echo "It looks as if you don't have a compiled gendocs binary"
echo
echo "If you are running from a clone of the git repo, please run"
echo "'./hack/build-go.sh tools/gendocs'."
} >&2
exit 1
fi
if [[ -z "$genman" ]]; then
{
echo "It looks as if you don't have a compiled genman binary"
echo
echo "If you are running from a clone of the git repo, please run"
echo "'./hack/build-go.sh tools/genman'"
} >&2
exit 1
fi
OUTPUT_DIR_REL=${1:-""}
OUTPUT_DIR="${OS_ROOT}/${OUTPUT_DIR_REL}/docs/generated"
MAN_OUTPUT_DIR="${OS_ROOT}/${OUTPUT_DIR_REL}/docs/man/man1"
mkdir -p "${OUTPUT_DIR}" || echo $? > /dev/null
mkdir -p "${MAN_OUTPUT_DIR}" || echo $? > /dev/null
os::build::gen-docs "${gendocs}" "${OUTPUT_DIR}"
os::build::gen-man "${genman}" "${MAN_OUTPUT_DIR}" "oc"
os::build::gen-man "${genman}" "${MAN_OUTPUT_DIR}" "openshift"
os::build::gen-man "${genman}" "${MAN_OUTPUT_DIR}" "oadm"
|
gashcrumb/origin
|
hack/update-generated-docs.sh
|
Shell
|
apache-2.0
| 1,257 |
#!/bin/bash -
###############################################################################
##
## ___________ _____________ ___ .____ .___ _________ _____
## \_ _____// _____/\ \/ / | | | |/ _____/ / _ \
## | __)_ \_____ \ \ / ______ | | | |\_____ \ / /_\ \
## | \/ \ / \ /_____/ | |___| |/ \/ | \
## /_______ /_______ //___/\ \ |_______ \___/_______ /\____|__ /
## \/ \/ \_/ \/ \/ \/
##
###############################################################################
##
## ESX-LISA is an automation testing framework based on github.com/LIS/lis-test
## project. In order to support ESX, ESX-LISA uses PowerCLI to automate all
## aspects of vSphere maagement, including network, storage, VM, guest OS and
## more. This framework automates the tasks required to test the
## Redhat Enterprise Linux Server on WMware ESX Server.
##
###############################################################################
##
## Revision:
## v1.0 - xiaofwan - 12/29/2016 - Fork from github.com/LIS/lis-test.
## v1.1 - xiaofwan - 1/12/2017 - Comment header upgrade
## v1.2 - xiaofwan - 1/25/2016 - Add a new result status - Skipped, which marks
## test case not applicable in current scenario.
## v1.3 - xiaofwan - 1/26/2016 - Remove TC_COVERED param due to useless any more
## v1.4 - xuli - 02/10/2017 - add call trace check function CheckCallTrace()
###############################################################################
###############################################################################
##
## Description:
## This script contains all distro-specific functions, as well as other common
## functions used in the LIS test scripts.
## Private variables used in scripts should use the __VAR_NAME notation. Using
## the bash built-in `declare' statement also restricts the variable's scope.
## Same for "private" functions.
##
###############################################################################
# Set IFS to space\t\n
IFS=$' \t\n'
# Include guard
[ -n "$__LIS_UTILS_SH_INCLUDE_GUARD" ] && exit 200 || readonly __LIS_UTILS_SH_INCLUDE_GUARD=1
##################################### Global variables #####################################
# Because functions can only return a status code, global vars will be used for communicating with the caller
# All vars are first defined here
# Directory containing all files pushed by LIS framework
declare LIS_HOME="$HOME"
# LIS state file used by powershell to get the test's state
declare __LIS_STATE_FILE="$LIS_HOME/state.txt"
# LIS possible states recorded in state file
declare __LIS_TESTRUNNING="TestRunning" # The test is running
declare __LIS_TESTCOMPLETED="TestCompleted" # The test completed successfully
declare __LIS_TESTSKIPPED="TestSkipped" # The test is not supported by this scenario
declare __LIS_TESTABORTED="TestAborted" # Error during setup of test
declare __LIS_TESTFAILED="TestFailed" # Error during execution of test
# LIS constants file which contains the paramaters passed to the test
declare __LIS_CONSTANTS_FILE="$LIS_HOME/constants.sh"
# LIS summary file. Should be less verbose than the separate log file
declare __LIS_SUMMARY_FILE="$LIS_HOME/summary.log"
# DISTRO used for setting the distro used to run the script
declare DISTRO=''
# SYNTH_NET_INTERFACES is an array containing all synthetic network interfaces found
declare -a SYNTH_NET_INTERFACES
# LEGACY_NET_INTERFACES is an array containing all legacy network interfaces found
declare -a LEGACY_NET_INTERFACES
######################################## Functions ########################################
# Convenience function used to set-up most common variables
UtilsInit()
{
if [ -d "$LIS_HOME" ]; then
cd "$LIS_HOME"
else
LogMsg "Warning: LIS_HOME $LIS_HOME directory missing. Unable to initialize testscript"
return 1
fi
# clean-up any remaining files
if [ -e "$__LIS_STATE_FILE" ]; then
if [ -d "$__LIS_STATE_FILE" ]; then
rm -rf "$__LIS_STATE_FILE"
LogMsg "Warning: Found $__LIS_STATE_FILE directory"
else
rm -f "$__LIS_STATE_FILE"
fi
fi
if [ -e "$__LIS_SUMMARY_FILE" ]; then
if [ -d "$__LIS_SUMMARY_FILE" ]; then
rm -rf "$__LIS_SUMMARY_FILE"
LogMsg "Warning: Found $__LIS_SUMMARY_FILE directory"
else
rm -f "$__LIS_SUMMARY_FILE"
fi
fi
# Set standard umask for root
umask 022
# sync data
sync
# Create state file and update test state
touch "$__LIS_STATE_FILE"
SetTestStateRunning || {
LogMsg "Warning: unable to update test state-file. Cannot continue initializing testscript"
return 2
}
# sync data
sync
touch "$__LIS_SUMMARY_FILE"
if [ -f "$__LIS_CONSTANTS_FILE" ]; then
. "$__LIS_CONSTANTS_FILE"
else
LogMsg "Error: constants file $__LIS_CONSTANTS_FILE missing or not a regular file. Cannot source it!"
SetTestStateAborted
UpdateSummary "Error: constants file $__LIS_CONSTANTS_FILE missing or not a regular file. Cannot source it!"
return 3
fi
GetDistro && LogMsg "Testscript running on $DISTRO" || LogMsg "Warning: test running on unknown distro!"
LogMsg "Successfully initialized testscript!"
return 0
}
# Functions used to update the current test state
# Should not be used directly. $1 should be one of __LIS_TESTRUNNING __LIS_TESTCOMPLETE __LIS_TESTABORTED __LIS_TESTFAILED
__SetTestState()
{
if [ -f "$__LIS_STATE_FILE" ]; then
if [ -w "$__LIS_STATE_FILE" ]; then
echo "$1" > "$__LIS_STATE_FILE"
else
LogMsg "Warning: state file $__LIS_STATE_FILE exists and is a normal file, but is not writable"
chmod u+w "$__LIS_STATE_FILE" && { echo "$1" > "$__LIS_STATE_FILE" && return 0 ; } || LogMsg "Warning: unable to make $__LIS_STATE_FILE writeable"
return 1
fi
else
LogMsg "Warning: state file $__LIS_STATE_FILE either does not exist or is not a regular file. Trying to create it..."
echo "$1" > "$__LIS_STATE_FILE" || return 2
fi
return 0
}
SetTestStateFailed()
{
__SetTestState "$__LIS_TESTFAILED"
return $?
}
SetTestStateAborted()
{
__SetTestState "$__LIS_TESTABORTED"
return $?
}
SetTestStateSkipped()
{
__SetTestState "$__LIS_TESTSKIPPED"
return $?
}
SetTestStateCompleted()
{
__SetTestState "$__LIS_TESTCOMPLETED"
return $?
}
SetTestStateRunning()
{
__SetTestState "$__LIS_TESTRUNNING"
return $?
}
# Logging function. The way LIS currently runs scripts and collects log files, just echo the message
# $1 == Message
LogMsg()
{
echo $(date "+%a %b %d %T %Y") : "${1}"
}
# Update summary file with message $1
# Summary should contain only a few lines
UpdateSummary()
{
if [ -f "$__LIS_SUMMARY_FILE" ]; then
if [ -w "$__LIS_SUMMARY_FILE" ]; then
echo "$1" >> "$__LIS_SUMMARY_FILE"
else
LogMsg "Warning: summary file $__LIS_SUMMARY_FILE exists and is a normal file, but is not writable"
chmod u+w "$__LIS_SUMMARY_FILE" && echo "$1" >> "$__LIS_SUMMARY_FILE" || LogMsg "Warning: unable to make $__LIS_SUMMARY_FILE writeable"
return 1
fi
else
LogMsg "Warning: summary file $__LIS_SUMMARY_FILE either does not exist or is not a regular file. Trying to create it..."
echo "$1" >> "$__LIS_SUMMARY_FILE" || return 2
fi
return 0
}
# Function to get current distro
# Sets the $DISTRO variable to one of the following: suse, centos_{5, 6, 7}, redhat_{5, 6, 7}, fedora, ubuntu
# The naming scheme will be distroname_version
# Takes no arguments
GetDistro()
{
# Make sure we don't inherit anything
declare __DISTRO
#Get distro (snipper take from alsa-info.sh)
__DISTRO=$(grep -ihs "Ubuntu\|SUSE\|Fedora\|Debian\|CentOS\|Red Hat Enterprise Linux" /etc/{issue,*release,*version})
case $__DISTRO in
*Ubuntu*12*)
DISTRO=ubuntu_12
;;
*Ubuntu*13*)
DISTRO=ubuntu_13
;;
*Ubuntu*14*)
DISTRO=ubuntu_14
;;
# ubuntu 14 in current beta state does not use the number 14 in its description
*Ubuntu*Trusty*)
DISTRO=ubuntu_14
;;
*Ubuntu*)
DISTRO=ubuntu_x
;;
*Debian*7*)
DISTRO=debian_7
;;
*Debian*)
DISTRO=debian_x
;;
*SUSE*12*)
DISTRO=suse_12
;;
*SUSE*11*)
DISTRO=suse_11
;;
*SUSE*)
DISTRO=suse_x
;;
*CentOS*5.*)
DISTRO=centos_5
;;
*CentOS*6.*)
DISTRO=centos_6
;;
*CentOS*7*)
DISTRO=centos_7
;;
*CentOS*)
DISTRO=centos_x
;;
*Fedora*18*)
DISTRO=fedora_18
;;
*Fedora*19*)
DISTRO=fedora_19
;;
*Fedora*20*)
DISTRO=fedora_20
;;
*Fedora*)
DISTRO=fedora_x
;;
*Red*5.*)
DISTRO=redhat_5
;;
*Red*6.*)
DISTRO=redhat_6
;;
*Red*7*)
DISTRO=redhat_7
;;
*Red*8*)
DISTRO=redhat_8
;;
*Red*)
DISTRO=redhat_x
;;
*)
DISTRO=unknown
return 1
;;
esac
return 0
}
# Function to checks if "Call Trace" message appears in the system logs
# if have, return 1, else retun 0
CheckCallTrace()
{
[[ -f "/var/log/syslog" ]] && logfile="/var/log/syslog" || logfile="/var/log/messages"
content=$(grep -i "Call Trace" $logfile)
if [[ -n $content ]]; then
LogMsg "Error: System get Call Trace in $logfile"
return 1
else
LogMsg "No Call Trace in $logfile"
return 0
fi
}
# Function to get all synthetic network interfaces
# Sets the $SYNTH_NET_INTERFACES array elements to an interface name suitable for ifconfig etc.
# Takes no arguments
GetSynthNetInterfaces()
{
#Check for distribuion version
case $DISTRO in
redhat_5)
check="net:*"
extract_ifname=`echo "${__SYNTH_NET_ADAPTERS_PATHS[$__index]}" | awk -F: '{print $2}'`
;;
*)
check="net"
extract_ifname=$(ls "${__SYNTH_NET_ADAPTERS_PATHS[$__index]}" | head -n 1)
;;
esac
extraction() {
case $DISTRO in
redhat_5)
SYNTH_NET_INTERFACES[$1]=`echo "${__SYNTH_NET_ADAPTERS_PATHS[$1]}" | awk -F: '{print $2}'`
;;
*)
SYNTH_NET_INTERFACES[$1]=$(ls "${__SYNTH_NET_ADAPTERS_PATHS[$1]}" | head -n 1)
;;
esac
}
# declare array
declare -a __SYNTH_NET_ADAPTERS_PATHS
# Add synthetic netadapter paths into __SYNTH_NET_ADAPTERS_PATHS array
if [ -d '/sys/devices' ]; then
while IFS= read -d $'\0' -r path ; do
__SYNTH_NET_ADAPTERS_PATHS=("${__SYNTH_NET_ADAPTERS_PATHS[@]}" "$path")
done < <(find /sys/devices -name $check -a -path '*vmbus*' -print0)
else
LogMsg "Cannot find Synthetic network interfaces. No /sys/devices directory."
return 1
fi
# Check if we found anything
if [ 0 -eq ${#__SYNTH_NET_ADAPTERS_PATHS[@]} ]; then
LogMsg "No synthetic network adapters found."
return 2
fi
# Loop __SYNTH_NET_ADAPTERS_PATHS and get interfaces
declare -i __index
for __index in "${!__SYNTH_NET_ADAPTERS_PATHS[@]}"; do
if [ ! -d "${__SYNTH_NET_ADAPTERS_PATHS[$__index]}" ]; then
LogMsg "Synthetic netadapter dir ${__SYNTH_NET_ADAPTERS_PATHS[$__index]} disappeared during processing!"
return 3
fi
# extract the interface names
extraction $__index
if [ -z "${SYNTH_NET_INTERFACES[$__index]}" ]; then
LogMsg "No network interface found in ${__SYNTH_NET_ADAPTERS_PATHS[$__index]}"
return 4
fi
done
unset __SYNTH_NET_ADAPTERS_PATHS
# Everything OK
return 0
}
# Function to get all legacy network interfaces
# Sets the $LEGACY_NET_INTERFACES array elements to an interface name suitable for ifconfig/ip commands.
# Takes no arguments
GetLegacyNetInterfaces()
{
# declare array
declare -a __LEGACY_NET_ADAPTERS_PATHS
# Add legacy netadapter paths into __LEGACY_NET_ADAPTERS_PATHS array
if [ -d '/sys/devices' ]; then
while IFS= read -d $'\0' -r path ; do
__LEGACY_NET_ADAPTERS_PATHS=("${__LEGACY_NET_ADAPTERS_PATHS[@]}" "$path")
done < <(find /sys/devices -name net -a ! -path '*vmbus*' -print0)
else
LogMsg "Cannot find Legacy network interfaces. No /sys/devices directory."
return 1
fi
# Check if we found anything
if [ 0 -eq ${#__LEGACY_NET_ADAPTERS_PATHS[@]} ]; then
LogMsg "No synthetic network adapters found."
return 2
fi
# Loop __LEGACY_NET_ADAPTERS_PATHS and get interfaces
declare -i __index
for __index in "${!__LEGACY_NET_ADAPTERS_PATHS[@]}"; do
if [ ! -d "${__LEGACY_NET_ADAPTERS_PATHS[$__index]}" ]; then
LogMsg "Legacy netadapter dir ${__LEGACY_NET_ADAPTERS_PATHS[$__index]} disappeared during processing!"
return 3
fi
# ls should not yield more than one interface, but doesn't hurt to be sure
LEGACY_NET_INTERFACES[$__index]=$(ls ${__LEGACY_NET_ADAPTERS_PATHS[$__index]} | head -n 1)
if [ -z "${LEGACY_NET_INTERFACES[$__index]}" ]; then
LogMsg "No network interface found in ${__LEGACY_NET_ADAPTERS_PATHS[$__index]}"
return 4
fi
done
# Everything OK
return 0
}
# Validate that $1 is an IPv4 address
CheckIP()
{
if [ 1 -ne $# ]; then
LogMsg "CheckIP accepts 1 arguments: IP address"
return 100
fi
declare ip
declare stat
ip=$1
stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS="$IFS"
IFS='.'
ip=($ip)
IFS="$OIFS"
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
# Validate that $1 is an IPv6 address
CheckIPV6()
{
if [ 1 -ne $# ]; then
LogMsg "CheckIPV6 accepts 1 arguments: IPV6 address"
return 100
fi
declare ip
declare stat
ip=$1
stat=1
if [[ $ip =~ ^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$ ]]; then
stat=$?
fi
return $stat
}
# Check that $1 is a MAC address
CheckMAC()
{
if [ 1 -ne $# ]; then
LogMsg "CheckIP accepts 1 arguments: IP address"
return 100
fi
# allow lower and upper-case, as well as : (colon) or - (hyphen) as separators
echo "$1" | grep -E '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' >/dev/null 2>&1
return $?
}
# Function to set interface $1 to whatever the dhcp server assigns
SetIPfromDHCP()
{
if [ 1 -ne $# ]; then
LogMsg "SetIPfromDHCP accepts 1 argument: network interface to assign the ip to"
return 100
fi
# Check first argument
ip link show "$1" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "Network adapter $1 is not working."
return 1
fi
ip -4 addr flush "$1"
GetDistro
case $DISTRO in
redhat*)
dhclient -r "$1" ; dhclient "$1"
if [ 0 -ne $? ]; then
LogMsg "Unable to get dhcpd address for interface $1"
return 2
fi
;;
centos*)
dhclient -r "$1" ; dhclient "$1"
if [ 0 -ne $? ]; then
LogMsg "Unable to get dhcpd address for interface $1"
return 2
fi
;;
debian*)
dhclient -r "$1" ; dhclient "$1"
if [ 0 -ne $? ]; then
LogMsg "Unable to get dhcpd address for interface $1"
return 2
fi
;;
suse*)
dhcpcd -k "$1" ; dhcpcd "$1"
if [ 0 -ne $? ]; then
LogMsg "Unable to get dhcpd address for interface $1"
return 2
fi
;;
debian*|ubuntu*)
dhclient -r "$1" ; dhclient "$1"
if [ 0 -ne $? ]; then
LogMsg "Unable to get dhcpd address for interface $1"
return 2
fi
;;
*)
LogMsg "Platform not supported yet!"
return 3
;;
esac
declare __IP_ADDRESS
# Get IP-Address
__IP_ADDRESS=$(ip -o addr show "$1" | grep -vi inet6 | cut -d '/' -f1 | awk '{print $NF}')
if [ -z "$__IP_ADDRESS" ]; then
LogMsg "IP address did not get assigned to $1"
return 3
fi
# OK
return 0
}
# Set static IP $1 on interface $2
# It's up to the caller to make sure the interface is shut down in case this function fails
# Parameters:
# $1 == static ip
# $2 == interface
# $3 == netmask optional
SetIPstatic()
{
if [ 2 -gt $# ]; then
LogMsg "SetIPstatic accepts 3 arguments: 1. static IP, 2. network interface, 3. (optional) netmask"
return 100
fi
CheckIP "$1"
if [ 0 -ne $? ]; then
LogMsg "Parameter $1 is not a valid IPv4 Address"
return 1
fi
ip link show "$2" > /dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "Network adapter $2 is not working."
return 2
fi
declare __netmask
declare __interface
declare __ip
__netmask=${3:-255.255.255.0}
__interface="$2"
__ip="$1"
echo "$__netmask" | grep '.' >/dev/null 2>&1
if [ 0 -eq $? ]; then
__netmask=$(NetmaskToCidr "$__netmask")
if [ 0 -ne $? ]; then
LogMsg "SetIPstatic: $__netmask is not a valid netmask"
return 3
fi
fi
if [ "$__netmask" -ge 32 -o "$__netmask" -le 0 ]; then
LogMsg "SetIPstatic: $__netmask is not a valid cidr netmask"
return 4
fi
ip link set "$__interface" down
ip addr flush "$__interface"
ip addr add "$__ip"/"$__netmask" dev "$__interface"
ip link set "$__interface" up
if [ 0 -ne $? ]; then
LogMsg "Unable to assign address $__ip/$__netmask to $__interface."
return 5
fi
# Get IP-Address
declare __IP_ADDRESS
__IP_ADDRESS=$(ip -o addr show "${SYNTH_NET_INTERFACES[$__iterator]}" | grep -vi inet6 | cut -d '/' -f1 | awk '{print $NF}' | grep -vi '[a-z]')
if [ -z "$__IP_ADDRESS" ]; then
LogMsg "IP address $__ip did not get assigned to $__interface"
return 3
fi
# Check that addresses match
if [ "$__IP_ADDRESS" != "$__ip" ]; then
LogMsg "New address $__IP_ADDRESS differs from static ip $__ip on interface $__interface"
return 6
fi
# OK
return 0
}
# translate network mask to CIDR notation
# Parameters:
# $1 == valid network mask
NetmaskToCidr()
{
if [ 1 -ne $# ]; then
LogMsg "NetmaskToCidr accepts 1 argument: a valid network mask"
return 100
fi
declare -i netbits=0
oldifs="$IFS"
IFS=.
for dec in $1; do
case $dec in
255)
netbits=$((netbits+8))
;;
254)
netbits=$((netbits+7))
;;
252)
netbits=$((netbits+6))
;;
248)
netbits=$((netbits+5))
;;
240)
netbits=$((netbits+4))
;;
224)
netbits=$((netbits+3))
;;
192)
netbits=$((netbits+2))
;;
128)
netbits=$((netbits+1))
;;
0) #nothing to add
;;
*)
LogMsg "NetmaskToCidr: $1 is not a valid netmask"
return 1
;;
esac
done
echo $netbits
return 0
}
# Remove all default gateways
RemoveDefaultGateway()
{
while ip route del default >/dev/null 2>&1
do : #nothing
done
return 0
}
# Create default gateway
# Parameters:
# $1 == gateway ip
# $2 == interface
CreateDefaultGateway()
{
if [ 2 -ne $# ]; then
LogMsg "CreateDefaultGateway expects 2 argument"
return 100
fi
# check that $1 is an IP address
CheckIP "$1"
if [ 0 -ne $? ]; then
LogMsg "CreateDefaultGateway: $1 is not a valid IP Address"
return 1
fi
# check interface exists
ip link show "$2" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "CreateDefaultGateway: no interface $2 found."
return 2
fi
declare __interface
declare __ipv4
__ipv4="$1"
__interface="$2"
# before creating the new default route, delete any old route
RemoveDefaultGateway
# create new default gateway
ip route add default via "$__ipv4" dev "$__interface"
if [ 0 -ne $? ]; then
LogMsg "CreateDefaultGateway: unable to set $__ipv4 as a default gateway for interface $__interface"
return 3
fi
# check to make sure default gateway actually was created
ip route show | grep -i "default via $__ipv4 dev $__interface" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "CreateDefaultGateway: Route command succeded, but gateway does not appear to have been set."
return 4
fi
return 0
}
# Create Vlan Config
# Parameters:
# $1 == interface for which to create the vlan config file
# $2 == static IP to set for vlan interface
# $3 == netmask for that interface
# $4 == vlan ID
CreateVlanConfig()
{
if [ 4 -ne $# ]; then
LogMsg "CreateVlanConfig expects 4 argument"
return 100
fi
# check interface exists
ip link show "$1" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "CreateVlanConfig: no interface $1 found."
return 1
fi
# check that $2 is an IP address
CheckIP "$2"
if [[ $? -eq 0 ]]; then
netmaskConf="NETMASK"
ifaceConf="inet"
ipAddress="IPADDR"
else
CheckIPV6 "$2"
if [[ $? -eq 0 ]]; then
netmaskConf="PREFIX"
ifaceConf="inet6"
ipAddress="IPV6ADDR"
else
LogMsg "CreateVlanConfig: $2 is not a valid IP Address"
return 2
fi
fi
declare __noreg='^[0-4096]+'
# check $4 for valid vlan range
if ! [[ $4 =~ $__noreg ]] ; then
LogMsg "CreateVlanConfig: invalid vlan ID $4 received."
return 3
fi
# check that vlan driver is loaded
lsmod | grep 8021q
if [ 0 -ne $? ]; then
modprobe 8021q
fi
declare __interface
declare __ip
declare __netmask
declare __vlanID
declare __file_path
declare __vlan_file_path
__interface="$1"
__ip="$2"
__netmask="$3"
__vlanID="$4"
GetDistro
case $DISTRO in
redhat*)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface"
if [ -e "$__file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__file_path already exists."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
__vlan_file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface.$__vlanID"
if [ -e "$__vlan_file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__vlan_file_path already exists."
if [ -d "$__vlan_file_path" ]; then
rm -rf "$__vlan_file_path"
else
rm -f "$__vlan_file_path"
fi
fi
cat <<-EOF > "$__file_path"
DEVICE=$__interface
TYPE=Ethernet
BOOTPROTO=none
ONBOOT=yes
EOF
cat <<-EOF > "$__vlan_file_path"
DEVICE=$__interface.$__vlanID
BOOTPROTO=none
$ipAddress=$__ip
$netmaskConf=$__netmask
ONBOOT=yes
VLAN=yes
EOF
ifdown "$__interface"
ifup "$__interface"
ifup "$__interface.$__vlanID"
;;
suse_12*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface"
if [ -e "$__file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__file_path already exists."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
__vlan_file_path="/etc/sysconfig/network/ifcfg-$__interface.$__vlanID"
if [ -e "$__vlan_file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__vlan_file_path already exists."
if [ -d "$__vlan_file_path" ]; then
rm -rf "$__vlan_file_path"
else
rm -f "$__vlan_file_path"
fi
fi
cat <<-EOF > "$__file_path"
TYPE=Ethernet
BOOTPROTO=none
STARTMODE=auto
EOF
if [[ $netmaskConf == "NETMASK" ]]; then
cat <<-EOF > "$__vlan_file_path"
ETHERDEVICE=$__interface
BOOTPROTO=static
IPADDR=$__ip
$netmaskConf=$__netmask
STARTMODE=auto
VLAN=yes
EOF
else
cat <<-EOF > "$__vlan_file_path"
ETHERDEVICE=$__interface
BOOTPROTO=static
IPADDR=$__ip/$__netmask
STARTMODE=auto
VLAN=yes
EOF
fi
# bring real interface down and up again
wicked ifdown "$__interface"
wicked ifup "$__interface"
# bring also vlan interface up
wicked ifup "$__interface.$__vlanID"
;;
suse*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface"
if [ -e "$__file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__file_path already exists."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
__vlan_file_path="/etc/sysconfig/network/ifcfg-$__interface.$__vlanID"
if [ -e "$__vlan_file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__vlan_file_path already exists."
if [ -d "$__vlan_file_path" ]; then
rm -rf "$__vlan_file_path"
else
rm -f "$__vlan_file_path"
fi
fi
cat <<-EOF > "$__file_path"
BOOTPROTO=static
IPADDR=0.0.0.0
STARTMODE=auto
EOF
if [[ $netmaskConf == "NETMASK" ]]; then
cat <<-EOF > "$__vlan_file_path"
BOOTPROTO=static
IPADDR=$__ip
$netmaskConf=$__netmask
STARTMODE=auto
VLAN=yes
ETHERDEVICE=$__interface
EOF
else
cat <<-EOF > "$__vlan_file_path"
BOOTPROTO=static
IPADDR=$__ip/$__netmask
STARTMODE=auto
VLAN=yes
ETHERDEVICE=$__interface
EOF
fi
ifdown "$__interface"
ifup "$__interface"
ifup "$__interface.$__vlanID"
;;
debian*|ubuntu*)
#Check for vlan package and install it in case of absence
dpkg -s vlan
if [ 0 -ne $? ]; then
apt-get -y install vlan
if [ 0 -ne $? ]; then
LogMsg "Failed to install VLAN package. Please try manually."
return 90
fi
fi
__file_path="/etc/network/interfaces"
if [ ! -e "$__file_path" ]; then
LogMsg "CreateVlanConfig: warning, $__file_path does not exist. Creating it..."
if [ -d "$(dirname $__file_path)" ]; then
touch "$__file_path"
else
rm -f "$(dirname $__file_path)"
LogMsg "CreateVlanConfig: Warning $(dirname $__file_path) is not a directory"
mkdir -p "$(dirname $__file_path)"
touch "$__file_path"
fi
fi
declare __first_iface
declare __last_line
declare __second_iface
# delete any previously existing lines containing the desired vlan interface
# get first line number containing our interested interface
__first_iface=$(awk "/iface $__interface/ { print NR; exit }" "$__file_path")
# if there was any such line found, delete it and any related config lines
if [ -n "$__first_iface" ]; then
# get the last line of the file
__last_line=$(wc -l $__file_path | cut -d ' ' -f 1)
# sanity check
if [ "$__first_iface" -gt "$__last_line" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . First iface line is gt last line in file"
return 100
fi
# get the last x lines after __first_iface
__second_iface=$((__last_line-__first_iface))
# if the first_iface was also the last line in the file
if [ "$__second_iface" -eq 0 ]; then
__second_iface=$__last_line
else
# get the line number of the seconf iface line
__second_iface=$(tail -n $__second_iface $__file_path | awk "/iface/ { print NR; exit }")
if [ -z $__second_iface ]; then
__second_iface="$__last_line"
else
__second_iface=$((__first_iface+__second_iface-1))
fi
if [ "$__second_iface" -gt "$__last_line" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . Second iface line is gt last line in file"
return 100
fi
if [ "$__second_iface" -le "$__first_iface" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . Second iface line is gt last line in file"
return 100
fi
fi
# now delete all lines between the first iface and the second iface
sed -i "$__first_iface,${__second_iface}d" "$__file_path"
fi
sed -i "/auto $__interface/d" "$__file_path"
# now append our config to the end of the file
cat << EOF >> "$__file_path"
auto $__interface
iface $__interface inet static
address 0.0.0.0
auto $__interface.$__vlanID
iface $__interface.$__vlanID $ifaceConf static
address $__ip
netmask $__netmask
EOF
ifdown "$__interface"
ifup $__interface
ifup $__interface.$__vlanID
;;
*)
LogMsg "Platform not supported yet!"
return 4
;;
esac
sleep 5
# verify change took place
cat /proc/net/vlan/config | grep " $__vlanID "
if [ 0 -ne $? ]; then
LogMsg "/proc/net/vlan/config has no vlanID of $__vlanID"
return 5
fi
return 0
}
# Remove Vlan Config
# Parameters:
# $1 == interface from which to remove the vlan config file
# $2 == vlan ID
RemoveVlanConfig()
{
if [ 2 -ne $# ]; then
LogMsg "RemoveVlanConfig expects 2 argument"
return 100
fi
# check interface exists
ip link show "$1" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "RemoveVlanConfig: no interface $1 found."
return 1
fi
declare __noreg='^[0-4096]+'
# check $2 for valid vlan range
if ! [[ $2 =~ $__noreg ]] ; then
LogMsg "RemoveVlanConfig: invalid vlan ID $2 received."
return 2
fi
declare __interface
declare __ip
declare __netmask
declare __vlanID
declare __file_path
__interface="$1"
__vlanID="$2"
GetDistro
case $DISTRO in
redhat*)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface.$__vlanID"
if [ -e "$__file_path" ]; then
LogMsg "RemoveVlanConfig: found $__file_path ."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
service network restart 2>&1
# make sure the interface is down
ip link set "$__interface.$__vlanID" down
;;
suse_12*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface.$__vlanID"
if [ -e "$__file_path" ]; then
LogMsg "RemoveVlanConfig: found $__file_path ."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
wicked ifdown "$__interface.$__vlanID"
# make sure the interface is down
ip link set "$__interface.$__vlanID" down
;;
suse*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface.$__vlanID"
if [ -e "$__file_path" ]; then
LogMsg "RemoveVlanConfig: found $__file_path ."
if [ -d "$__file_path" ]; then
rm -rf "$__file_path"
else
rm -f "$__file_path"
fi
fi
ifdown $__interface.$__vlanID
ifdown $__interface
ifup $__interface
# make sure the interface is down
ip link set "$__interface.$__vlanID" down
;;
debian*|ubuntu*)
__file_path="/etc/network/interfaces"
if [ ! -e "$__file_path" ]; then
LogMsg "RemoveVlanConfig: warning, $__file_path does not exist."
return 0
fi
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "RemoveVlanConfig: warning, $(dirname $__file_path) does not exist."
return 0
else
rm -f "$(dirname $__file_path)"
LogMsg "CreateVlanConfig: Warning $(dirname $__file_path) is not a directory"
mkdir -p "$(dirname $__file_path)"
touch "$__file_path"
fi
declare __first_iface
declare __last_line
declare __second_iface
# delete any previously existing lines containing the desired vlan interface
# get first line number containing our interested interface
__first_iface=$(awk "/iface $__interface.$__vlanID/ { print NR; exit }" "$__file_path")
# if there was any such line found, delete it and any related config lines
if [ -n "$__first_iface" ]; then
# get the last line of the file
__last_line=$(wc -l $__file_path | cut -d ' ' -f 1)
# sanity check
if [ "$__first_iface" -gt "$__last_line" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . First iface line is gt last line in file"
return 100
fi
# get the last x lines after __first_iface
__second_iface=$((__last_line-__first_iface))
# if the first_iface was also the last line in the file
if [ "$__second_iface" -eq 0 ]; then
__second_iface=$__last_line
else
# get the line number of the seconf iface line
__second_iface=$(tail -n $__second_iface $__file_path | awk "/iface/ { print NR; exit }")
if [ -z $__second_iface ]; then
__second_iface="$__last_line"
else
__second_iface=$((__first_iface+__second_iface-1))
fi
if [ "$__second_iface" -gt "$__last_line" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . Second iface line is gt last line in file"
return 100
fi
if [ "$__second_iface" -le "$__first_iface" ]; then
LogMsg "CreateVlanConfig: error while parsing $__file_path . Second iface line is gt last line in file"
return 100
fi
fi
# now delete all lines between the first iface and the second iface
sed -i "$__first_iface,${__second_iface}d" "$__file_path"
fi
sed -i "/auto $__interface.$__vlanID/d" "$__file_path"
;;
*)
LogMsg "Platform not supported yet!"
return 3
;;
esac
return 0
}
# Create ifup config file
# Parameters:
# $1 == interface name
# $2 == static | dhcp
# $3 == IP Address
# $4 == Subnet Mask
# if $2 is set to dhcp, $3 and $4 are ignored
CreateIfupConfigFile()
{
if [ 2 -gt $# -o 4 -lt $# ]; then
LogMsg "CreateIfupConfigFile accepts between 2 and 4 arguments"
return 100
fi
# check interface exists
ip link show "$1" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "CreateIfupConfigFile: no interface $1 found."
return 1
fi
declare __interface_name="$1"
declare __create_static=0
declare __ip
declare __netmask
declare __file_path
ipv6=false
case "$2" in
static)
__create_static=1
;;
dhcp)
__create_static=0
;;
*)
LogMsg "CreateIfupConfigFile: \$2 needs to be either static or dhcp (received $2)"
return 2
;;
esac
if [ "$__create_static" -eq 0 ]; then
# create config file for dhcp
GetDistro
case $DISTRO in
suse_12*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
STARTMODE='auto'
BOOTPROTO='dhcp'
EOF
wicked ifdown "$__interface_name"
wicked ifup "$__interface_name"
;;
suse*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
STARTMODE=manual
BOOTPROTO=dhcp
EOF
ifdown "$__interface_name"
ifup "$__interface_name"
;;
redhat_7|centos_7)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
DEVICE="$__interface_name"
BOOTPROTO=dhcp
EOF
ifdown "$__interface_name"
ifup "$__interface_name"
;;
redhat_6|centos_6)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
DEVICE="$__interface_name"
BOOTPROTO=dhcp
IPV6INIT=yes
EOF
ifdown "$__interface_name"
ifup "$__interface_name"
;;
redhat_5|centos_5)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
DEVICE="$__interface_name"
BOOTPROTO=dhcp
IPV6INIT=yes
EOF
cat <<-EOF >> "/etc/sysconfig/network"
NETWORKING_IPV6=yes
EOF
ifdown "$__interface_name"
ifup "$__interface_name"
;;
debian*|ubuntu*)
__file_path="/etc/network/interfaces"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
#Check if interface is already configured. If so, delete old config
if grep -q "$__interface_name" $__file_path
then
LogMsg "CreateIfupConfigFile: Warning will delete older configuration of interface $__interface_name"
sed -i "/$__interface_name/d" $__file_path
fi
cat <<-EOF >> "$__file_path"
auto $__interface_name
iface $__interface_name inet dhcp
EOF
service network-manager restart
ifdown "$__interface_name"
ifup "$__interface_name"
;;
*)
LogMsg "CreateIfupConfigFile: Platform not supported yet!"
return 3
;;
esac
else
# create config file for static
if [ $# -ne 4 ]; then
LogMsg "CreateIfupConfigFile: if static config is selected, please provide 4 arguments"
return 100
fi
if [[ $3 == *":"* ]]; then
CheckIPV6 "$3"
if [ 0 -ne $? ]; then
LogMsg "CreateIfupConfigFile: $3 is not a valid IPV6 Address"
return 2
fi
ipv6=true
else
CheckIP "$3"
if [ 0 -ne $? ]; then
LogMsg "CreateIfupConfigFile: $3 is not a valid IP Address"
return 2
fi
fi
__ip="$3"
__netmask="$4"
declare -i lineNumber
GetDistro
case $DISTRO in
suse_12*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
if [[ $ipv6 == false ]]; then
cat <<-EOF > "$__file_path"
STARTMODE=manual
BOOTPROTO=static
IPADDR="$__ip"
NETMASK="$__netmask"
EOF
else
cat <<-EOF > "$__file_path"
STARTMODE=manual
BOOTPROTO=static
IPADDR="$__ip/$__netmask"
EOF
fi
wicked ifdown "$__interface_name"
wicked ifup "$__interface_name"
;;
suse*)
__file_path="/etc/sysconfig/network/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
cat <<-EOF > "$__file_path"
STARTMODE=manual
BOOTPROTO=static
IPADDR="$__ip"
NETMASK="$__netmask"
EOF
ifdown "$__interface_name"
ifup "$__interface_name"
;;
redhat*|centos*)
__file_path="/etc/sysconfig/network-scripts/ifcfg-$__interface_name"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
if [[ $ipv6 == false ]]; then
cat <<-EOF > "$__file_path"
DEVICE="$__interface_name"
BOOTPROTO=none
IPADDR="$__ip"
NETMASK="$__netmask"
NM_CONTROLLED=no
EOF
else
cat <<-EOF > "$__file_path"
DEVICE="$__interface_name"
BOOTPROTO=none
IPV6ADDR="$__ip"
IPV6INIT=yes
PREFIX="$__netmask"
NM_CONTROLLED=no
EOF
fi
ifdown "$__interface_name"
ifup "$__interface_name"
;;
debian*|ubuntu*)
__file_path="/etc/network/interfaces"
if [ ! -d "$(dirname $__file_path)" ]; then
LogMsg "CreateIfupConfigFile: $(dirname $__file_path) does not exist! Something is wrong with the network config!"
return 3
fi
if [ -e "$__file_path" ]; then
LogMsg "CreateIfupConfigFile: Warning will overwrite $__file_path ."
fi
#Check if interface is already configured. If so, delete old config
if grep -q "$__interface_name" $__file_path
then
LogMsg "CreateIfupConfigFile: Warning will delete older configuration of interface $__interface_name"
lineNumber=$(cat -n $__file_path |grep "iface $__interface_name"| awk '{print $1;}')
if [ $lineNumber ]; then
lineNumber=$lineNumber+1
sed -i "${lineNumber},+1 d" $__file_path
fi
sed -i "/$__interface_name/d" $__file_path
fi
if [[ $ipv6 == false ]]; then
cat <<-EOF >> "$__file_path"
auto $__interface_name
iface $__interface_name inet static
address $__ip
netmask $__netmask
EOF
else
cat <<-EOF >> "$__file_path"
auto $__interface_name
iface $__interface_name inet6 static
address $__ip
netmask $__netmask
EOF
fi
service network-manager restart
ifdown "$__interface_name"
ifup "$__interface_name"
;;
*)
LogMsg "CreateIfupConfigFile: Platform not supported yet!"
return 3
;;
esac
fi
sysctl -w net.ipv4.conf.all.rp_filter=0
sysctl -w net.ipv4.conf.default.rp_filter=0
sysctl -w net.ipv4.conf.eth0.rp_filter=0
sysctl -w net.ipv4.conf.$__interface_name.rp_filter=0
sleep 5
return 0
}
# Control Network Manager
# Parameters:
# $1 == start | stop
ControlNetworkManager()
{
if [ 1 -ne $# ]; then
LogMsg "ControlNetworkManager accepts 1 argument: start | stop"
return 100
fi
# Check first argument
if [ x"$1" != xstop ]; then
if [ x"$1" != xstart ]; then
LogMsg "ControlNetworkManager accepts 1 argument: start | stop."
return 100
fi
fi
GetDistro
case $DISTRO in
redhat*)
# check that we have a NetworkManager service running
service NetworkManager status
if [ 0 -ne $? ]; then
LogMsg "NetworkManager does not appear to be running."
return 0
fi
# now try to start|stop the service
service NetworkManager $1
if [ 0 -ne $? ]; then
LogMsg "Unable to $1 NetworkManager."
return 1
else
LogMsg "Successfully ${1}ed NetworkManager."
fi
;;
centos*)
# check that we have a NetworkManager service running
service NetworkManager status
if [ 0 -ne $? ]; then
LogMsg "NetworkManager does not appear to be running."
return 0
fi
# now try to start|stop the service
service NetworkManager $1
if [ 0 -ne $? ]; then
LogMsg "Unable to $1 NetworkManager."
return 1
else
LogMsg "Successfully ${1}ed NetworkManager."
fi
;;
debian*)
# check that we have a NetworkManager service running
service network-manager status
if [ 0 -ne $? ]; then
LogMsg "NetworkManager does not appear to be running."
return 0
fi
# now try to start|stop the service
service network-manager $1
if [ 0 -ne $? ]; then
LogMsg "Unable to $1 NetworkManager."
return 1
else
LogMsg "Successfully ${1}ed NetworkManager."
fi
;;
suse*)
# no service file
# edit /etc/sysconfig/network/config and set NETWORKMANAGER=no
declare __nm_activated
if [ x"$1" = xstart ]; then
__nm_activated=yes
else
__nm_activated=no
fi
if [ -f /etc/sysconfig/network/config ]; then
grep '^NETWORKMANAGER=' /etc/sysconfig/network/config
if [ 0 -eq $? ]; then
sed -i "s/^NETWORKMANAGER=.*/NETWORKMANAGER=$__nm_activated/g" /etc/sysconfig/network/config
else
echo "NETWORKMANAGER=$__nm_activated" >> /etc/sysconfig/network/config
fi
# before restarting service, save the LIS network interface details and restore them after restarting. (or at least try)
# this needs to be done in the caller, as this function cannot be expected to read the constants file and know which interface to reconfigure.
service network restart
else
LogMsg "No network config file found at /etc/sysconfig/network/config"
return 1
fi
LogMsg "Successfully ${1}ed NetworkManager."
;;
debian*|ubuntu*)
# check that we have a NetworkManager service running
service network-manager status
if [ 0 -ne $? ]; then
LogMsg "NetworkManager does not appear to be running."
return 0
fi
# now try to start|stop the service
service network-manager $1
if [ 0 -ne $? ]; then
LogMsg "Unable to $1 NetworkManager."
return 1
else
LogMsg "Successfully ${1}ed NetworkManager."
fi
;;
*)
LogMsg "Platform not supported yet!"
return 3
;;
esac
return 0
}
# Convenience Function to disable NetworkManager
DisableNetworkManager()
{
ControlNetworkManager stop
# propagate return value from ControlNetworkManager
return $?
}
# Convenience Function to enable NetworkManager
EnableNetworkManager()
{
ControlNetworkManager start
# propagate return value from ControlNetworkManager
return $?
}
# Setup a bridge named br0
# $1 == Bridge IP Address
# $2 == Bridge netmask
# $3 - $# == Interfaces to attach to bridge
# if no parameter is given outside of IP and Netmask, all interfaces will be added (except lo)
SetupBridge()
{
if [ $# -lt 2 ]; then
LogMsg "SetupBridge needs at least 2 parameters"
return 1
fi
declare -a __bridge_interfaces
declare __bridge_ip
declare __bridge_netmask
CheckIP "$1"
if [ 0 -ne $? ]; then
LogMsg "SetupBridge: $1 is not a valid IP Address"
return 2
fi
__bridge_ip="$1"
__bridge_netmask="$2"
echo "$__bridge_netmask" | grep '.' >/dev/null 2>&1
if [ 0 -eq $? ]; then
__bridge_netmask=$(NetmaskToCidr "$__bridge_netmask")
if [ 0 -ne $? ]; then
LogMsg "SetupBridge: $__bridge_netmask is not a valid netmask"
return 3
fi
fi
if [ "$__bridge_netmask" -ge 32 -o "$__bridge_netmask" -le 0 ]; then
LogMsg "SetupBridge: $__bridge_netmask is not a valid cidr netmask"
return 4
fi
if [ 2 -eq $# ]; then
LogMsg "SetupBridge received no interface argument. All network interfaces found will be attached to the bridge."
# Get all synthetic interfaces
GetSynthNetInterfaces
# Remove the loopback interface
SYNTH_NET_INTERFACES=(${SYNTH_NET_INTERFACES[@]/lo/})
# Get the legacy interfaces
GetLegacyNetInterfaces
# Remove the loopback interface
LEGACY_NET_INTERFACES=(${LEGACY_NET_INTERFACES[@]/lo/})
# Remove the bridge itself
LEGACY_NET_INTERFACES=(${LEGACY_NET_INTERFACES[@]/br0/})
# concat both arrays and use this new variable from now on.
__bridge_interfaces=("${SYNTH_NET_INTERFACES[@]}" "${LEGACY_NET_INTERFACES[@]}")
if [ ${#__bridge_interfaces[@]} -eq 0 ]; then
LogMsg "SetupBridge: No interfaces found"
return 3
fi
else
# get rid of the first two parameters
shift
shift
# and loop through the remaining ones
declare __iterator
for __iterator in "$@"; do
ip link show "$__iterator" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "SetupBridge: Interface $__iterator not working or not present"
return 4
fi
__bridge_interfaces=("${__bridge_interfaces[@]}" "$__iterator")
done
fi
# create bridge br0
brctl addbr br0
if [ 0 -ne $? ]; then
LogMsg "SetupBridge: unable to create bridge br0"
return 5
fi
# turn off stp
brctl stp br0 off
declare __iface
# set all interfaces to 0.0.0.0 and then add them to the bridge
for __iface in ${__bridge_interfaces[@]}; do
ip link set "$__iface" down
ip addr flush dev "$__iface"
ip link set "$__iface" up
ip link set dev "$__iface" promisc on
#add interface to bridge
brctl addif br0 "$__iface"
if [ 0 -ne $? ]; then
LogMsg "SetupBridge: unable to add interface $__iface to bridge br0"
return 6
fi
LogMsg "SetupBridge: Added $__iface to bridge"
echo "1" > /proc/sys/net/ipv4/conf/"$__iface"/proxy_arp
echo "1" > /proc/sys/net/ipv4/conf/"$__iface"/forwarding
done
#setup forwarding on bridge
echo "1" > /proc/sys/net/ipv4/conf/br0/forwarding
echo "1" > /proc/sys/net/ipv4/conf/br0/proxy_arp
echo "1" > /proc/sys/net/ipv4/ip_forward
ip link set br0 down
ip addr add "$__bridge_ip"/"$__bridge_netmask" dev br0
ip link set br0 up
LogMsg "$(brctl show br0)"
LogMsg "SetupBridge: Successfull"
# done
return 0
}
# TearDown Bridge br0
TearDownBridge()
{
ip link show br0 >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "TearDownBridge: No interface br0 found"
return 1
fi
brctl show br0
if [ 0 -ne $? ]; then
LogMsg "TearDownBridge: No bridge br0 found"
return 2
fi
# get Mac Addresses of interfaces attached to the bridge
declare __bridge_macs
__bridge_macs=$(brctl showmacs br0 | grep -i "yes" | cut -f 2)
# get the interfaces associated with those macs
declare __mac
declare __bridge_interfaces
for __mac in $__bridge_macs; do
__bridge_interfaces=$(grep -il "$__mac" /sys/class/net/*/address)
if [ 0 -ne $? ]; then
msg="TearDownBridge: MAC Address $__mac does not belong to any interface."
LogMsg "$msg"
UpdateSummary "$msg"
SetTestStateFailed
return 3
fi
# get just the interface name from the path
__bridge_interfaces=$(basename "$(dirname "$__sys_interface")")
ip link show "$__bridge_interfaces" >/dev/null 2>&1
if [ 0 -ne $? ]; then
LogMsg "TearDownBridge: Could not find interface $__bridge_interfaces"
return 4
fi
brctl delif br0 "$__bridge_interfaces"
done
# remove the bridge itself
ip link set br0 down
brctl delbr br0
return 0
}
# Check free space
# $1 path to directory to check for free space
# $2 number of bytes to compare
# return == 0 if total free space is greater than $2
# return 1 otherwise
IsFreeSpace()
{
if [ 2 -ne $# ]; then
LogMsg "IsFreeSpace takes 2 arguments: path/to/dir to check for free space and number of bytes needed free"
return 100
fi
declare -i __total_free_bytes=0
__total_free_bytes=$(($(df "$1" | awk '/[0-9]%/{print $(NF-2)}')*1024)) #df returnes size in kb-blocks
if [ "$2" -gt "$__total_free_bytes" ]; then
return 1
fi
return 0
}
declare os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
########################################################################
# Determine what OS is running
########################################################################
# GetOSVersion
function GetOSVersion {
# Figure out which vendor we are
if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
# OS/X
os_VENDOR=`sw_vers -productName`
os_RELEASE=`sw_vers -productVersion`
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
os_PACKAGE=""
if [[ "$os_RELEASE" =~ "10.7" ]]; then
os_CODENAME="lion"
elif [[ "$os_RELEASE" =~ "10.6" ]]; then
os_CODENAME="snow leopard"
elif [[ "$os_RELEASE" =~ "10.5" ]]; then
os_CODENAME="leopard"
elif [[ "$os_RELEASE" =~ "10.4" ]]; then
os_CODENAME="tiger"
elif [[ "$os_RELEASE" =~ "10.3" ]]; then
os_CODENAME="panther"
else
os_CODENAME=""
fi
elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
os_VENDOR=$(lsb_release -i -s)
os_RELEASE=$(lsb_release -r -s)
os_UPDATE=""
os_PACKAGE="rpm"
if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
os_PACKAGE="deb"
elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
lsb_release -d -s | grep -q openSUSE
if [[ $? -eq 0 ]]; then
os_VENDOR="openSUSE"
fi
elif [[ $os_VENDOR == "openSUSE project" ]]; then
os_VENDOR="openSUSE"
elif [[ $os_VENDOR =~ Red.*Hat ]]; then
os_VENDOR="Red Hat"
fi
os_CODENAME=$(lsb_release -c -s)
elif [[ -r /etc/redhat-release ]]; then
# Red Hat Enterprise Linux Server release 5.5 (Tikanga)
# Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
# CentOS release 5.5 (Final)
# CentOS Linux release 6.0 (Final)
# Fedora release 16 (Verne)
# XenServer release 6.2.0-70446c (xenenterprise)
os_CODENAME=""
for r in "Red Hat" CentOS Fedora XenServer; do
os_VENDOR=$r
if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
os_CODENAME=${ver#*|}
os_RELEASE=${ver%|*}
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
break
fi
os_VENDOR=""
done
os_PACKAGE="rpm"
elif [[ -r /etc/SuSE-release ]]; then
for r in openSUSE "SUSE Linux"; do
if [[ "$r" = "SUSE Linux" ]]; then
os_VENDOR="SUSE LINUX"
else
os_VENDOR=$r
fi
if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
break
fi
os_VENDOR=""
done
os_PACKAGE="rpm"
# If lsb_release is not installed, we should be able to detect Debian OS
elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
os_VENDOR="Debian"
os_PACKAGE="deb"
os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
fi
export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
}
#######################################################################
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
#######################################################################
function is_fedora {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ]
}
#######################################################################
# Determine if current distribution is a Rhel/CentOS 7 distribution
#######################################################################
function is_rhel7 {
if [[ -z "$os_RELEASE" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] && \
[[ $os_RELEASE =~ 7.* ]]
}
#######################################################################
# Determine if current distribution is a SUSE-based distribution
# (openSUSE, SLE).
#######################################################################
function is_suse {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
}
#######################################################################
# Determine if current distribution is an Ubuntu-based distribution
# It will also detect non-Ubuntu but Debian-based distros
#######################################################################
function is_ubuntu {
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
[ "$os_PACKAGE" = "deb" ]
}
|
HiRoySoft/ESX-LISA
|
remote-scripts/utils.sh
|
Shell
|
apache-2.0
| 55,551 |
#!/bin/bash
# Copyright 2017 - 2022 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'}
source "${CRUNCHY_DIR}/bin/restore_common_lib.sh"
enable_debugging
# Warn the user of any deprecated env vars due to full transition to env vars for configuring
check_for_deprecated_env_vars() {
if [[ -v STANZA ]] && [[ ! -v PGBACKREST_STANZA ]]
then
echo_warn "STANZA has been deprecated and will be removed in a future release - please use PGBACKREST_STANZA instead."
echo_warn "PGBACKREST_STANZA will be set to '${STANZA}' for this restore."
export PGBACKREST_STANZA="${STANZA}"
fi
if [[ -v DELTA ]] && [[ ! -v PGBACKREST_DELTA ]]
then
echo_warn "DELTA has been deprecated and will be removed in a future release - please use PGBACKREST_DELTA instead."
echo_warn "PGBACKREST_DELTA will be set to 'y' as a result of setting DELTA for this restore."
export PGBACKREST_DELTA="y"
fi
if [[ -v PITR_TARGET ]] && [[ ! -v PGBACKREST_TARGET ]]
then
echo_warn "PITR_TARGET has been deprecated and will be removed in a future release - please use PGBACKREST_TARGET instead."
echo_warn "PGBACKREST_TARGET will be set to the value specified for PITR_TARGET for this restore."
export PGBACKREST_TARGET="${PITR_TARGET}"
fi
if [[ -v BACKREST_CUSTOM_OPTS ]]
then
echo_warn "BACKREST_CUSTOM_OPTS has been deprecated and will be removed in a future release."
echo_warn "Please use applicable pgbackrest env vars to customize your pgbackrest restore instead."
fi
if [[ -v PG_HOSTNAME && \
( ! -v PGBACKREST_PG1_PATH && ! -v PGBACKREST_DB1_PATH && ! -v PGBACKREST_DB_PATH ) ]]
then
echo_warn "PG_HOSTNAME has been deprecated and will be removed in a future release - please use PGBACKREST_PG1_PATH instead."
echo_warn "PGBACKREST_PG1_PATH will be set to '/pgdata/${PG_HOSTNAME}' for this restore."
export PGBACKREST_PG1_PATH="/pgdata/${PG_HOSTNAME}"
fi
}
display_config_details() {
if [[ -v PGBACKREST_DELTA ]]
then
echo_info "Delta restore detected."
fi
if [[ -v PGBACKREST_TYPE ]]
then
echo_info "The following type of recovery will be attempted: ${PGBACKREST_TYPE:-default}"
if [[ -v PGBACKREST_TARGET ]]
then
echo_info "The target for the restore is: ${PGBACKREST_TARGET}"
fi
fi
}
# create an empty pgdata directory for a full restore if it does not already exist
create_restore_pgdata_dir() {
if [[ -v PGBACKREST_PG1_PATH ]]
then
pgdata_dir="${PGBACKREST_PG1_PATH}"
elif [[ -v PGBACKREST_DB1_PATH ]]
then
pgdata_dir="${PGBACKREST_DB1_PATH}"
elif [[ -v PGBACKREST_DB_PATH ]]
then
pgdata_dir="${PGBACKREST_DB_PATH}"
fi
if [[ ! -d "${pgdata_dir}" ]]
then
mkdir -p "${pgdata_dir}"
echo_info "Created new pgdata directory ${pgdata_dir}"
fi
}
check_for_deprecated_env_vars
display_config_details
create_restore_pgdata_dir
echo_info "Starting restore.."
echo_info "The following pgbackrest env vars have been set:"
( set -o posix ; set | grep -oP "^PGBACKREST.*" )
echo_info "Initiating pgbackrest restore.."
pgbackrest restore ${BACKREST_CUSTOM_OPTS:-}
echo_info "Restore completed. Exiting.."
exit 0
|
CrunchyData/crunchy-containers
|
bin/pgbackrest-restore/pgbackrest-restore.sh
|
Shell
|
apache-2.0
| 3,905 |
#!/bin/bash
set -e
TEST_DIR=$PWD
echo "Generating the dax..."
export PYTHONPATH=`pegasus-config --python`
python daxgen.py dax.xml
cat > sites.yml <<END
pegasus: "5.0"
sites:
-
name: "local"
arch: "x86_64"
os.type: "linux"
directories:
-
type: "sharedStorage"
path: "$TEST_DIR/outputs"
fileServers:
-
operation: "all"
url: "file://$TEST_DIR/outputs"
-
type: "sharedScratch"
path: "$TEST_DIR/work"
fileServers:
-
operation: "all"
url: "file://$TEST_DIR/work"
END
echo "Planning the workflow..."
pegasus-plan \
--conf pegasusrc \
--dir submit \
--dax dax.xml \
--sites local \
--output-sites local \
--cleanup leaf
exit $?
|
pegasus-isi/pegasus
|
test/core/020-pmc-only/plan.sh
|
Shell
|
apache-2.0
| 724 |
#!/bin/bash
# Script to update application variables for continuous deployments
# Just replace you github details here and modify the paths as needed
githib_id=
github_secret=
sed -i.bak "s/your-github-app-client-id/$githib_id/g" backend/src/main/resources/application.yml
sed -i.bak "s/your-github-app-client-secret/$github_secret/g" backend/src/main/resources/application.yml
|
orionhealth/XBDD
|
deploy-scripts/updateProperties.sh
|
Shell
|
apache-2.0
| 381 |
find -iname "* .*"
find -iname "* *"
find -iname " *"
find -iname "* "
find -iname "*."
rename -v -n "s/ / /g" *
rename -v "s/ / /g" *
|
bernardladenthin/BroomCabinet
|
LinuxHelper/NTFSNamingCheck.sh
|
Shell
|
apache-2.0
| 148 |
#!/bin/bash
# The IP for the server you wish to ping (8.8.8.8 is a public Google DNS server)
SERVER=8.8.8.8
LOOP_TIME=600
# Only send two pings, sending output to /dev/null
LAST_CHECK=0
while [ "1" == "1" ]; do
CUR_TIME=$(date +%s)
DELTA_TIME=$(( $CUR_TIME - $LAST_CHECK ))
if [ "$DELTA_TIME" -gt "$LOOP_TIME" ]; then
ping -c2 ${SERVER} > /dev/null
# If the return code from ping ($?) is not 0 (meaning there was an error)
if [ $? != 0 ]; then
# Restart the wireless interface
ifdown --force wlan0
ifup wlan0
fi
LAST_CHECK="$CUR_TIME"
fi
sleep 30
done
|
JohnOmernik/pimeup
|
wifi_checker/wifi_checker.sh
|
Shell
|
apache-2.0
| 650 |
#!/bin/bash
set -e
DEPLOY=`dirname $0`/..
#
# EDIT HERE
# Variables prepended with ### are suitable for replacement
# by autogenerated scripts
#
###MAINJAR=
export MAINCLASS=dk.statsbiblioteket.summa.search.tools.SummaSearcherRunner
export CODEBASE_BASEURL="file://$DEPLOY/lib"
export PRINT_CONFIG=
export NAME=summa-seacher
###LIBDIRS=
###JAVA_HOME=
###JVM_OPTS="$JVM_OPTS -Dsumma.configuration=$1"
###SECURITY_POLICY=
###ENABLE_JMX=
###JMX_PORT=
###JMX_SSL=
###JMX_ACCESS=
###JMX_PASS=
# Custom code
export CONFIGURATION=$1
if [ ! -f "$1" ]; then
echo "You must specify a configuration as first parameter" 1>&2
exit 1
fi
# All is ready, execute!
exec $DEPLOY/bin/generic_start.sh
|
statsbiblioteket/summa
|
Summix/bin/summa-searcher.sh
|
Shell
|
apache-2.0
| 706 |
#!/bin/bash
set -euxo pipefail
docker image build -t lhci-server .
docker volume create lhci-data-test
docker run --name lhci-server-container --mount='source=lhci-data-test,target=/data' --detach lhci-server
set +e
docker exec -i lhci-server-container bash < test-in-container.sh
EXIT_CODE=$?
set -e
docker logs lhci-server-container
docker stop lhci-server-container
docker rm lhci-server-container
docker volume rm lhci-data-test
exit $EXIT_CODE
|
GoogleChrome/lighthouse-ci
|
docs/recipes/docker-server/test.sh
|
Shell
|
apache-2.0
| 453 |
#!/bin/bash
supervisorctl -c /usr/local/supervisord/nimbus.cfg stop storm-ui
supervisorctl -c /usr/local/supervisord/nimbus.cfg stop storm-nimbus
|
roboconf/roboconf-examples
|
storm-bash/src/main/model/graph/storm_nimbus/scripts/stop.sh
|
Shell
|
apache-2.0
| 148 |
#!/bin/sh
set -x
cat > /etc/systemd/system/build-harbor.service <<EOF
[Unit]
Description=Builds HarborOS
[Service]
Type=simple
ExecStart=/usr/local/bin/build-harbor
EOF
cat > /etc/systemd/system/build-harbor.timer <<EOF
[Unit]
Description=Builds HarborOS Every 60 minutes
[Timer]
OnCalendar=*-*-* *:00:00
EOF
cat > /usr/local/bin/build-harbor <<EOF
#!/bin/bash
docker rm -f freeipa-repo || true
docker rm -f mandracchio-repo || true
docker rm -f ipsilon-repo || true
docker rm -f openvswitch-repo || true
docker rm \$(docker ps -a -q) || true
docker rmi \$(docker images -q -f dangling=true) || true
docker run -d --name openvswitch-repo -p 172.17.0.1:80:80/tcp port/openvswitch-rpm:latest
docker run -d --name mandracchio-repo -p 172.17.0.1:81:80/tcp port/mandracchio-rpms:latest
docker run -d --name freeipa-repo -p 172.17.0.1:83:80/tcp port/freeipa-rpm:latest
docker run -d --name ipsilon-repo -p 172.17.0.1:82:80/tcp port/ipsilon-rpm:latest
#docker run -d --name cinder-docker-repo -p 172.17.0.1:83:80/tcp port/openstack-cinder-docker-rpm:latest
cd /home/harbor/Documents/Builder/github/harbor
./tools/update-build-links
./tools/make-scripts-exec.sh
./tools/build-all-docker-images --release --push #--squash
EOF
chmod +x /usr/local/bin/build-harbor
systemctl daemon-reload
systemctl enable build-harbor.timer
|
portdirect/harbor
|
tools/setup-release-build.sh
|
Shell
|
apache-2.0
| 1,320 |
#!/bin/bash
# Copyright 2014 Paul R. Dixon ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Simple script to take a wav specified in the first
# argument and run it through the feature extraction
# and decoding pipeline
# Examples assumes the Librispeech smal models have been
# downloaded and
# ./librispeech-decode-wav.sh tmp.wav
../src/kaldibin/online2-wav-nnet2-am-compute2 \
--online=true \
--apply-log=true \
--config=online_nnet2_decoding.conf \
nnet_a/final.mdl \
$1 "ark:-" 2> /dev/null| \
../src/bin/dcd-recog \
--word_symbols_table=words.txt \
--decoder_type=hmm_lattice \
--beam=15 \
--acoustic_scale=0.1 \
--gen_lattice=true \
graph_test_tgsmall/arcs.far \
graph_test_tgsmall/C.det.L.G.fst \
ark:- /dev/null
|
opendcd/opendcd
|
egs/librispeech-decode-wav.sh
|
Shell
|
apache-2.0
| 1,322 |
#!/bin/sh
sudo -n /opt/engines/system/scripts/ssh/sudo/_install_cert.sh $1 $2
exit $?
|
EnginesOS/System
|
system/scripts/ssh/install_cert.sh
|
Shell
|
apache-2.0
| 87 |
#!/bin/sh
# install blas libraries
echo 'compiling blas ...'
cd Blas
gfortran -O2 -c *.f
ar cr libblas.a *.o
mv libblas.a ../
cd ..
# install lapack libraries
echo 'install lapack ...'
cd Lapack
make all
mv *.a ../
cd ..
# install lapack95 interface
echo 'install lapack95 interface ...'
cd Lapack95/SRC
make single_double_complex_dcomplex
cd ../..
#mv *.a ../
# install netcdf fortran
echo 'install netcdf fortran...'
cd Netcdf_fortran
export NCDIR=/usr/lib64
./configure --prefix=/usr
make check
sudo make install
cd ..
# compile CASTf90
echo 'compile CASTf90...'
make -f Makefile.home
|
sradanov/flyingpigeon
|
flyingpigeon/Fsrc/install.sh
|
Shell
|
apache-2.0
| 613 |
#!/bin/bash
clear
g++ -Wall -o bin/fe src/main.cxx src/*.cpp -lX11 -lGL -lGLU -lm -I/usr/include/freetype2 -lfreetype
|
Dotrar/Fountain
|
oldEngine/build.sh
|
Shell
|
apache-2.0
| 119 |
sudo rpm -Uvh http://repos.mesosphere.com/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
sudo yum -y install mesos marathon wget git java-1.7.0-openjdk maven
sudo systemctl stop firewalld
sudo sed -i '1s/^/nameserver 10.111.111.10\n /' /etc/resolv.conf
mkdir -p /var/zookeeper
wget http://www-eu.apache.org/dist/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz
tar -xvf zookeeper-3.4.9.tar.gz
cd zookeeper-3.4.9
sudo mv conf/zoo_sample.cfg conf/zoo.cfg
sudo bin/zkServer.sh start
sudo nohup mesos master --work_dir=/var/lib/mesos/ --log_dir=/var/log/mesos/ --zk=zk://localhost:2181/mesos --quorum=1 --ip=10.111.111.10 > mesos.log 2>&1 &
sudo nohup sudo marathon > marathon.log 2>&1 &
cd /home/vagrant/
wget http://www-eu.apache.org/dist/kafka/0.10.1.0/kafka_2.10-0.10.1.0.tgz
wget https://github.com/mesos/kafka/releases/download/v0.9.5.1/kafka-mesos-0.9.5.1.jar
export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so
export LIBPROCESS_IP=10.111.111.10
sudo nohup sudo java -jar kafka-mesos-0.9.5.1.jar scheduler --master=10.111.111.10:5050 --api=http://10.111.111.10:7000 --zk=10.111.111.10:2181 --bind-address=10.111.111.10 > kafka-mesos.log 2>&1 &
cd /home/vagrant/
wget https://github.com/mesosphere/mesos-dns/releases/download/v0.6.0/mesos-dns-v0.6.0-linux-amd64
chmod +x mesos-dns-v0.6.0-linux-amd64
sudo nohup sudo ./mesos-dns-v0.6.0-linux-amd64 > mesos-dns.log 2>&1 &
# add nginx to marathon to test dns
#curl -X POST -H "Content-Type: application/json" http://10.111.111.10:8080/v2/apps [email protected]
#curl -v nginx.marathon.mesos
# another dns test from outside of vm
# dig @10.111.111.10 nginx.marathon.mesos
|
lastcosmonaut/mesos-playground
|
vagrant/master.sh
|
Shell
|
apache-2.0
| 1,635 |
#!/bin/bash -xe
core_vpn="default"
edge_vpn="default"
edge_name=`hostname -s`
edge_dns="localhost"
semp_port="8080"
while getopts "c:d:e:f:n:p:s:u:" opt; do
case "$opt" in
c) core_vpn=$OPTARG
;;
d) core_dns=$OPTARG
;;
e) edge_vpn=$OPTARG
;;
f) edge_dns=$OPTARG
;;
n) edge_name=$OPTARG
;;
p) password=$OPTARG
;;
s) semp_port=$OPTARG
;;
u) username=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
url="http://${core_dns}:${semp_port}/SEMP/v2/config/msgVpns/${edge_vpn}/queues"
body="{\"queueName\":\"CoreQueue-${edge_name}\",\"egressEnabled\":true,\"ingressEnabled\":true,\"permission\":\"delete\"}"
/tmp/sempv2_config_command.sh -n ${username} -p ${password} -u ${url} -d "${body}"
url="http://${core_dns}:${semp_port}/SEMP/v2/config/msgVpns/${edge_vpn}/queues/CoreQueue-${edge_name}/subscriptions"
body='{"subscriptionTopic":"in/*/svc1/>"}'
/tmp/sempv2_config_command.sh -n ${username} -p ${password} -u ${url} -d "${body}"
|
KenBarr/Solace_testing_in_AWS
|
scripts/core-queue-configure.sh
|
Shell
|
apache-2.0
| 1,059 |
# Install chisel for debugging in lldb
brew install chisel
if [[ ! -e ~/.lldbinit ]]; then
cp ~/.macbootstrap/config/.lldbinit ~/.lldbinit
else
if grep -q "/usr/local/opt/chisel/libexec/fblldb.py" "$HOME/.lldbinit"; then
echo "Chisel is installed"
else
echo "command script import /usr/local/opt/chisel/libexec/fblldb.py" >> ~/.lldbinit
fi
# import my own lldb configuration
echo "" >> ~/.lldbinit
echo "# load lldb commmand alias and configuration provided by bestswifter" >> ~/.lldbinit
echo "command source ~/.macbootstrap/zsh-config/bs_lldb_extension" >> ~/.lldbinit
fi
# CodeRunner
# ---------------
brew cask install coderunner
|
bestswifter/macbootstrap
|
install-steps/dependencies.before.sh
|
Shell
|
apache-2.0
| 684 |
#!/bin/bash
# this is not needed as I disable NM
HOSTS=$(ruby -rjson -e 'nodes=JSON.parse(File.read("nodes.json"))["nodes"];str=[];nodes.each do |n| str<< n[0] end;puts str.join(" ")')
for i in ${HOSTS}
do
#if [[ ${i} =~ slave* ]]
#then
echo vagrant ssh ${i} -c \"sudo systemctl restart network.service\"
#fi
done
|
ashayh/playa-mesos-centos
|
restart-network.sh
|
Shell
|
apache-2.0
| 328 |
# -----------------------------------------------------------------------------
#
# Package : webmozart/assert
# Version : 1.10.0
# Source repo : https://github.com/webmozarts/assert
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=webmozart/assert
PACKAGE_VERSION=1.10.0
PACKAGE_URL=https://github.com/webmozarts/assert
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq curl php php-curl php-dom php-mbstring php-json nodejs make gcc-c++ patch diffutils php-gd php-pecl-zip
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" && php composer-setup.php --install-dir=/bin --filename=composer
composer require --dev phpunit/phpunit --with-all-dependencies ^7
mkdir output
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
if ! composer install; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! /home/tester/vendor/bin/phpunit; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
w/webmozart__assert/webmozart__assert_rhel_8.3.sh
|
Shell
|
apache-2.0
| 2,686 |
#! /bin/bash
# Install latest docker
sudo apt-key -y adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
# Docker without sudo
sudo groupadd docker
sudo gpasswd -a ${USER} docker
sudo service docker restart
# log out after this change
# Upgrade
sudo apt-get purge lxc-docker
sudo apt-get autoremove --purge
sudo apt-get install lxc-docker
# pull some images
docker pull ubuntu
docker pull tpires/neo4j
docker pull redis
docker pull mongodb
docker pull postgres
docker pull ipedrazas/elasticsearch
docker pull poklet/cassandra
docker pull python:2
docker pull python:3
docker pull nornagon/postgres
docker pull clue/ttrss
docker pull ipedrazas/taiga-front
docker pull ipedrazas/taiga-back
docker pull php:5.6-apache
docker pull node:0.10-onbuild
|
ipedrazas/pycones-docker
|
setup.sh
|
Shell
|
apache-2.0
| 952 |
#!/bin/bash
START=$(date +"%s")
echo -en "\nTo release all of the resources, inside the directory execute: ./reposeDemo-7-Release.sh\n"
sudo shutdown -h now
STOP=$(date +"%s")
DIFF=$(($STOP-$START))
echo -en "\nTime to complete: $(($DIFF / 60)) minutes and $(($DIFF % 60)) seconds\n\n"
|
wdschei/ReposeQuickStart
|
reposeDemo-6-Shutdown.sh
|
Shell
|
apache-2.0
| 290 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
CPP_BUILD_DIR=$TRAVIS_BUILD_DIR/dist/
pushd java
if [ $TRAVIS_OS_NAME == "linux" ]; then
SO_DEP=ldd
GANDIVA_LIB="$CPP_BUILD_DIR"libgandiva_jni.so
WHITELIST=(linux-vdso libz librt libdl libpthread libstdc++ libm libgcc_s libc ld-linux-x86-64)
else
SO_DEP="otool -L"
GANDIVA_LIB="$CPP_BUILD_DIR"libgandiva_jni.dylib
WHITELIST=(libgandiva_jni libz libncurses libSystem libc++)
fi
# print the shared library dependencies
eval "$SO_DEP" "$GANDIVA_LIB"
if [[ $CHECK_SHARED_DEPENDENCIES ]] ; then
# exit if any shared library not in whitelisted set is found
echo "Checking shared dependencies"
while read -r line
do
found=false
for item in "${WHITELIST[@]}"
do
if [[ "$line" == *"$item"* ]] ; then
found=true
fi
done
if [[ "$found" == false ]] ; then
echo "Unexpected shared dependency found"
exit 1
fi
done < <(eval "$SO_DEP" "$GANDIVA_LIB" | awk '{print $1}')
fi
# build the entire project
mvn clean install -q -DskipTests -P arrow-jni -Darrow.cpp.build.dir=$CPP_BUILD_DIR
# test only gandiva
mvn test -q -P arrow-jni -pl gandiva -Dgandiva.cpp.build.dir=$CPP_BUILD_DIR
# copy the jars to distribution folder
find gandiva/target/ -name "*.jar" -not -name "*tests*" -exec cp {} $CPP_BUILD_DIR \;
popd
|
xhochy/arrow
|
dev/tasks/gandiva-jars/build-java.sh
|
Shell
|
apache-2.0
| 2,157 |
#!/bin/bash
# Automatic build script for simple-share
# for iOS and iOSSimulator
#
# Created by Felix Schulze on 01.06.12.
# Copyright 2012 Felix Schulze. All rights reserved.
###########################################################################
#
SDKVERSION="6.1"
#
###########################################################################
#
# Don't change anything here
DEVICESDK="iphoneos${SDKVERSION}"
SIMSDK="iphonesimulator${SDKVERSION}"
echo "Building simple-share for iPhoneSimulator and iPhoneOS ${SDKVERSION}"
# Clean the targets
if ! xcodebuild -project "simple-share.xcodeproj" -target simple-share -configuration "Release" -sdk "$DEVICESDK" clean ; then
exit 1
fi
if ! xcodebuild -project "simple-share.xcodeproj" -target simple-share -configuration "Release" -sdk "$SIMSDK" clean ; then
exit 1
fi
# Build the targets
if ! xcodebuild -project "simple-share.xcodeproj" -target simple-share -configuration "Release" -sdk "$DEVICESDK" -arch "armv7 armv7s" build ; then
exit 1
fi
if ! xcodebuild -project "simple-share.xcodeproj" -target simple-share -configuration "Release" -sdk "$SIMSDK" build ; then
exit 1
fi
echo "Build library..."
lipo "build/Release-iphoneos/libsimple-share.a" "build/Release-iphonesimulator/libsimple-share.a" -create -output "libsimple-share.a"
cp -R build/Release-iphoneos/include .
echo "Building done."
|
x2on/simple-share-ios
|
build.sh
|
Shell
|
apache-2.0
| 1,362 |
#!/usr/bin/env bash
# exit immediately when a command fails
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
kubectl -n prometheus-operator-e2e-tests exec -it prometheus-test-0 -c prometheus '/bin/sh -c "cat /etc/prometheus/rules/rules-0/test.rules"'
|
jescarri/prometheus-operator
|
hack/show-rules.sh
|
Shell
|
apache-2.0
| 332 |
sudo apt-get install -y gcc-mingw32 #- The GNU Compiler Collection (cross compiler for MingW32)
sudo apt-get install -y libconfig++8 #- parsing and manipulation of structured configuration files(C++ binding)
sudo apt-get install -y libconfig++8-dev #- parsing and manipulation of structured config files(C++ development)
sudo apt-get install -y libconfig8 #- parsing and manipulation of structured configuration files
sudo apt-get install -y libconfig8-dev #- parsing and manipulation of structured config files(development)
sudo apt-get install -y mingw-w64 #- Minimalist GNU w64 (cross) runtime
sudo apt-get install -y mingw32 #- Minimalist GNU win32 (cross) compiler
sudo apt-get install -y mingw32-binutils #- Minimalist GNU win32 (cross) binutils
sudo apt-get install -y mingw32-runtime #- Minimalist GNU win32 (cross) runtime
|
bayvictor/distributed-polling-system
|
bin/install_mingw_for_win32_target_cross-compiler.sh
|
Shell
|
apache-2.0
| 834 |
#!/bin/bash
# Make sure only root can run our script
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# dc = delete-cache
# up = upgrade
# it = install
eopkg dc
eopkg up -y
eopkg it -y -c system.devel
eopkg it -y cmus curl emacs git htop lsof neofetch nmap strace tcpdump vim wget wine wine-32bit
# intel-microcode nvidia-glx-driver-current nvidia-glx-driver-32bit
eopkg it -y audacity bleachbit gimp guvcview lutris pavucontrol putty redshift steam virtualbox-current mpv wireshark
# Codecs.
eopkg it -y libbluray libdvdcss libdvdnav libdvdread
# Google Chrome.
eopkg bi --ignore-safety https://raw.githubusercontent.com/solus-project/3rd-party/master/network/web/browser/google-chrome-stable/pspec.xml
eopkg it -y google-chrome-*.eopkg
# Skype.
eopkg bi --ignore-safety https://raw.githubusercontent.com/solus-project/3rd-party/master/network/im/skype/pspec.xml
eopkg it -y skype-*.eopkg
eopkg dc
|
Bean6754/unixscripts
|
setup-scripts/Solus-Desktop.sh
|
Shell
|
bsd-2-clause
| 941 |
#!/bin/bash
################################################################################
#
# Script to download and extact the trec queries
#
# [1] http://www.cim.mcgill.ca/~dudek/206/Logs/AOL-user-ct-collection/
#
################################################################################
echo "Starting downloading the queries"
wget http://trec.nist.gov/data/million.query/07/07-million-query-topics.1-10000.gz
wget http://trec.nist.gov/data/million.query/08/08.million-query-topics.10001-20000.gz
wget http://trec.nist.gov/data/million.query/09/09.mq.topics.20001-60000.gz
echo "Downloading complete, extracting"
gzip -d 07-million-query-topics.1-10000.gz
gzip -d 08.million-query-topics.10001-20000.gz
gzip -d 09.mq.topics.20001-60000.gz
cat 07-million-query-topics.1-10000 08.million-query-topics.10001-20000 09.mq.topics.20001-60000 >> trec_queries.txt
rm 07-million-query-topics.1-10000 08.million-query-topics.10001-20000 09.mq.topics.20001-60000
echo "Removing line numbers"
sed -i 's/[0-9]*://g' trec_queries.txt
echo "Finished"
|
kurpicz/dpt
|
data/trec_queries/dl_and_prepare_trec_queries.sh
|
Shell
|
bsd-2-clause
| 1,059 |
#fourweeks
cp /ramcache/www/3weeksago.png /ramcache/www/4weeksago.png
#threeweeks
cp /ramcache/www/lastweek.png /ramcache/www/3weeksago.png
#lastweek
cp /ramcache/www/thisweek.png /ramcache/www/lastweek.png
#weekly graph
rrdtool graph '/ramcache/www/thisweek.png' \
--title "Week `date -d "last week" +%U` Sensor Data" \
--width '1440' \
--height '400' \
--start end-1w \
'DEF:Sunlight=/ramcache/sensors.rrd:Sunlight:AVERAGE' \
'DEF:Temperature=/ramcache/sensors.rrd:Temperature:AVERAGE' \
'DEF:SumpWater=/ramcache/sensors.rrd:SumpWater:AVERAGE' \
'DEF:Pump=/ramcache/sensors.rrd:Pump:AVERAGE' \
'DEF:CPU=/ramcache/sensors.rrd:CPU:AVERAGE' \
'CDEF:Pumpmag=Pump,1000,*' \
'LINE1:Sunlight#FFCC00:Sunlight' \
'LINE1:Temperature#FF0000:Temperature' \
'LINE1:SumpWater#006600:Sump Water' \
'AREA:Pumpmag#0033FF:Pump on/off' \
'LINE1:CPU#6633CC:CPU MHz'
|
trelane/aquaponics
|
weeklyrotate.sh
|
Shell
|
bsd-2-clause
| 863 |
#!/bin/bash
CONFIG_PATH="$1"
ACCESS_KEY="$(jq -r .credential.accessKey $CONFIG_PATH)"
SECRET_KEY="$(jq -r .credential.secretKey $CONFIG_PATH)"
ARN="arn:minio:sqs:us-east-1:1:webhook"
URL="http://localhost:9000"
ALIAS="local-minio"
BUCKET="layers"
function check_configuration() {
local s=$(mc config host list | grep $ALIAS | wc -l)
test $s -eq 1
}
function configure() {
mc config host add $ALIAS $URL $ACCESS_KEY $SECRET_KEY
}
function check_bucket() {
mc ls $ALIAS/$BUCKET
}
function create_bucket() {
mc mb $ALIAS/$BUCKET
}
function event_handler_ok() {
local s=$(mc events list $ALIAS/$BUCKET $ARN | wc -l)
test $s -eq 1
}
function add_event_handler() {
mc events add $ALIAS/$BUCKET "$ARN" --events put
}
check_configuration || configure
check_bucket || create_bucket
event_handler_ok || add_event_handler
|
cvmfs/docker-graphdriver
|
provision/setup_minio_webhook.sh
|
Shell
|
bsd-3-clause
| 866 |
#!/bin/bash
# Copyright (c) 2013 The libmumble Developers
# The use of this source code is goverened by a BSD-style
# license that can be found in the LICENSE-file.
# This script re-generates the Xcode project files for iOS.
# Check for bad files in testdata.
./testdata/vet.bash
if [ $? -ne 0 ]; then
echo "There seems to be non-unique files in the testdata directory."
echo "This is not supported by the iOS test runner, so generate.bash"
echo "will not be able to continue."
echo
echo "Unable to generate Xcode project files."
exit 1
fi
GYP=./gyp
GYPFLAGS="-I common.gypi"
${GYP} libmumble.gyp ${GYPFLAGS} -f xcode --depth . -Dlibrary=static_library -Dopenssl_asm=gnuas-arm -DOS=ios --generator-out=build/iphoneos/xcode
|
metredigm/libmumble
|
build/iphoneos/generate.bash
|
Shell
|
bsd-3-clause
| 732 |
#!/bin/bash
# deploy sxcmd
# requires... php, git, composer, box https://github.com/box-project/box2
set -e
# Check tag parameter
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <tag>"
exit 65 # EX_DATAERR
fi
TAG=$1
php -r "if(preg_match('/^\d+\.\d+\.\d+\$/',\$argv[1])) exit(0); else { echo 'tag is invalid' . PHP_EOL ; exit(65); }" $TAG
# Clean vendor of req-dev
composer install --no-dev
# Tag latest commit
git tag ${TAG}
# Remove previous build
if [ -f ./build/sxcmd-${TAG}.phar ]; then
rm -f ./build/sxcmd-${TAG}.phar
fi
# Build phar
time box build
# Re-Install req-dev vendor stuff
composer install
# Manifest
php build/manifest.php ${TAG}
# Upload phar
bin/sxcmd file:upload sxcmd-deploy ./build/sxcmd-${TAG}.phar download:/sxcmd/release/sxcmd-${TAG}.phar --time
bin/sxcmd file:upload sxcmd-deploy ./build/sxcmd-${TAG}.phar download:/sxcmd/release/sxcmd-latest.phar --time
bin/sxcmd file:upload sxcmd-deploy ./manifest.json download:/sxcmd/release/manifest.json --time
# Commit new version
git commit -m "Version ${TAG}" ./manifest.json
git push --tags --progress "mplx-gitlab" master
|
mplx/sxcmd
|
deploy.sh
|
Shell
|
bsd-3-clause
| 1,114 |
#!/bin/bash
#
# If this script is being run by Travis, the Travis install script will
# already be in the correct directory.
# If this script is being run by desiInstall, then we need to make sure
# we are running this in ${INSTALL_DIR}.
#
[[ -n "${INSTALL_DIR}" ]] && /bin/mkdir -p ${INSTALL_DIR} && cd ${INSTALL_DIR}
#
# Make sure DESIMODEL_VERSION is set.
#
if [[ -z "${DESIMODEL_VERSION}" ]]; then
echo "DESIMODEL_VERSION is not set!"
exit 1
fi
if [[ "${DESIMODEL_VERSION}" == "tags/master" ]]; then
svn checkout https://desi.lbl.gov/svn/code/desimodel/trunk/data
else
svn export https://desi.lbl.gov/svn/code/desimodel/${DESIMODEL_VERSION}/data
fi
#
# Set this for subsequent Travis tests. For desiInstall, this environment
# variable should already be set when the desimodel Module file is
# processed.
#
[[ -z "${DESIMODEL}" ]] && export DESIMODEL=$(pwd)
|
desihub/desimodel
|
etc/desimodel_data.sh
|
Shell
|
bsd-3-clause
| 879 |
#! /bin/bash
set -e
./configure --prefix=$PREFIX || { cat config.log ; exit 1 ; }
make -j$(getconf _NPROCESSORS_ONLN)
make install
|
klauer/conda-prescriptions
|
gsl-1.16/build.sh
|
Shell
|
bsd-3-clause
| 134 |
export UHWMTEST_CI=yes
export UHWMTEST_TIMEOUT=8
|
tjouan/uh-wm
|
bin/ci/travis/build-before.sh
|
Shell
|
bsd-3-clause
| 49 |
#!/bin/bash
. /etc/environment
. /etc/grassroot
CURR=$PWD
cd /var/grassroot
./scripts/startmqtt.sh
nohup java -Dspring.profiles.active=$PROFILE -jar grassroot-webapp/build/libs/grassroot-webapp-1.0.0.M5.jar `cat /home/ubuntu/cmd_line_arguments` > grassroot-app.log 2>&1 &
echo $! > .pid
sleep 1
chgrp sudo /var/grassroot/grassroot-app.log
chmod 640 /var/grassroot/grassroot-app.log
cd $CURR
|
mokoka/grassroot-platform
|
scripts/startgrassroot.sh
|
Shell
|
bsd-3-clause
| 394 |
#!/bin/bash
seeds="42 132 243 314 4562"
dbs="mushroom pumsb_star"
epss="0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0"
rmaxs="3 5"
nis="50 60"
bfs="2 4 8"
for db in ${dbs}; do
for eps in ${epss}; do
for rmax in ${rmaxs}; do
for ni in ${nis}; do
for bf in ${bfs}; do
for seed in ${seeds}; do
echo ${db} ${eps} ${rmax} ${ni} ${bf} ${seed}
done
done
done
done
done
done | xargs -n6 -P7 ./do_run_tree.sh
|
mihaimaruseac/dphcar
|
tools/run_scripts/run_tree.sh
|
Shell
|
bsd-3-clause
| 540 |
#!/usr/bin/env bash
phpdoc -d ./lib -t ./docs -ti 'phpScenario Documentation' -o HTML:default:default -s on -p on
|
jsylvanus/phpScenario
|
docgen.sh
|
Shell
|
bsd-3-clause
| 114 |
#!/bin/bash
echo "Installing SASS 3.4.10"
echo "Ensuring Ruby (dependency) is installed and up to date"
yum install -y ruby ruby-devel rubygems
echo "Ruby check complete. Version: `ruby --version`"
echo "#!/bin/bash" > /etc/profile.d/local-bin.sh
echo "pathmunge /usr/local/bin after" >> /etc/profile.d/local-bin.sh
gem install -N sass -v 3.4.10
echo "Installed SASS version: `/usr/local/bin/sass -v`"
echo "Installing Compass"
gem install -N compass -v 1.0.3
echo "Compass installed `/usr/local/bin/compass -v`"
|
Firesphere/vagrant-skeleton
|
scripts/sass.sh
|
Shell
|
bsd-3-clause
| 518 |
setup() {
GIT_DUET_TEST_DIR="${BATS_TMPDIR}/git-duet"
mkdir "$GIT_DUET_TEST_DIR"
unset GIT_DUET_GLOBAL
unset GIT_DUET_ROTATE_AUTHOR
export GIT_DUET_CONFIG_NAMESPACE='foo.bar'
export GIT_DUET_AUTHORS_FILE="${GIT_DUET_TEST_DIR}/.git-authors"
export GIT_DUET_TEST_LOOKUP="${GIT_DUET_TEST_DIR}/email-lookup"
export GIT_DUET_TEST_REPO="${GIT_DUET_TEST_DIR}/repo"
cat > "$GIT_DUET_AUTHORS_FILE" <<EOF
---
pairs:
jd: Jane Doe
fb: Frances Bar
al: Abraham Lincoln; abe
on: Oscar
zp: Zubaz Pants
email:
domain: hamster.info.local
email_addresses:
jd: [email protected]
fb: [email protected]
EOF
cat > "$GIT_DUET_TEST_LOOKUP" <<EOF
#!/usr/bin/env ruby
addr = {
'jd' => '[email protected]',
'fb' => '[email protected]'
}[ARGV.first]
puts addr
EOF
chmod +x "$GIT_DUET_TEST_LOOKUP"
git init -q "$GIT_DUET_TEST_REPO"
cd "$GIT_DUET_TEST_REPO"
touch foo
git add foo
git config user.name 'Test User'
git config user.email '[email protected]'
git commit -m 'test commit for reverting'
}
teardown() {
git config --global --remove-section $GIT_DUET_CONFIG_NAMESPACE || true
rm -rf "$GIT_DUET_TEST_DIR"
}
add_file() {
if [ $# -eq 0 ]; then
touch file.txt
git add file.txt
else
touch $1
git add $1
fi
}
create_branch_commit() {
if [ $# -eq 0 ]; then
git checkout -b new_branch
else
git checkout -b $1
fi
if [ $# -lt 2 ]; then
add_file branch_file.txt
else
add_file $2
fi
git commit -q -m 'Adding a branch commit'
git checkout master
}
assert_head_is_merge () {
msha=$(git rev-list --merges HEAD~1..HEAD)
[ -z "$msha" ] && return 1
return 0
}
set_custom_email_template() {
clear_custom_email_template
echo "email_template: '$1'" >> "$GIT_DUET_AUTHORS_FILE"
}
clear_custom_email_template() {
cat "$GIT_DUET_AUTHORS_FILE" | grep -v email_template > "$GIT_DUET_AUTHORS_FILE.bak"
mv "$GIT_DUET_AUTHORS_FILE.bak" "$GIT_DUET_AUTHORS_FILE"
}
flunk() {
{ if [ "$#" -eq 0 ]; then cat -
else echo "$@"
fi
} >&2
return 1
}
assert_success() {
if [ "$status" -ne 0 ]; then
flunk "command failed with exit status $status"
elif [ "$#" -gt 0 ]; then
assert_output "$1"
fi
}
assert_failure() {
if [ "$status" -eq 0 ]; then
flunk "expected failed exit status"
elif [ "$#" -gt 0 ]; then
assert_output "$1"
fi
}
assert_equal() {
if [ "$1" != "$2" ]; then
{ echo "expected: $1"
echo "actual: $2"
} | flunk
fi
}
assert_output() {
local expected
if [ $# -eq 0 ]; then expected="$(cat -)"
else expected="$1"
fi
assert_equal "$expected" "$output"
}
assert_line() {
if [ "$1" -ge 0 ] 2>/dev/null; then
assert_equal "$2" "${lines[$1]}"
else
local line
for line in "${lines[@]}"; do
if [ "$line" = "$1" ]; then return 0; fi
done
flunk "expected line \`$1'"
fi
}
refute_line() {
if [ "$1" -ge 0 ] 2>/dev/null; then
local num_lines="${#lines[@]}"
if [ "$1" -lt "$num_lines" ]; then
flunk "output has $num_lines lines"
fi
else
local line
for line in "${lines[@]}"; do
if [ "$line" = "$1" ]; then
flunk "expected to not find line \`$line'"
fi
done
fi
}
|
svett/git-duet
|
test/test_helper.bash
|
Shell
|
mit
| 3,238 |
#!/bin/bash
# power controller
CTL="${BASEURL}index.php?/module/power/"
# Get the scripts in the proper directories
"${CURL[@]}" "${CTL}get_script/power.sh" -o "${MUNKIPATH}preflight.d/power.sh"
# Check exit status of curl
if [ $? = 0 ]; then
# Make executable
chmod a+x "${MUNKIPATH}preflight.d/power.sh"
# Set preference to include this file in the preflight check
setreportpref "power" "${CACHEPATH}powerinfo.plist"
# Delete the older style cached file
if [[ -f "${MUNKIPATH}preflight.d/cache/powerinfo.txt" ]] ; then
rm -f "${MUNKIPATH}preflight.d/cache/powerinfo.txt"
fi
else
echo "Failed to download all required components!"
rm -f "${MUNKIPATH}preflight.d/power.sh"
# Signal that we had an error
ERR=1
fi
|
poundbangbash/munkireport-php
|
app/modules/power/scripts/install.sh
|
Shell
|
mit
| 752 |
#!/bin/sh -e
docker exec -it php-skeleton_mysql-service_1 mysql --user=root --password=root php_skeleton <tmp/php_skeleton.sql
|
FunTimeCoding/php-skeleton
|
script/docker-compose/mysql-restore.sh
|
Shell
|
mit
| 128 |
## smart urls
autoload -U url-quote-magic
zle -N self-insert url-quote-magic
## file rename magick
bindkey "^[m" copy-prev-shell-word
## jobs
setopt long_list_jobs
## pager
export PAGER=less
export LC_CTYPE=en_US.UTF-8
|
nacengineer/dotfiles
|
oh-my-zsh/lib/misc.zsh
|
Shell
|
mit
| 222 |
#!/usr/bin/env bash
# Sends out marks for students based on the folder structure used by Submit,
# the coursework submission system used in the School of Informatics at the
# University of Edinburgh.
#
# This script assumes the following folder structure:
#
# $DIR/sXXXXXXX/$CW/$FILE
#
# The variable DIR refers to the directory passed in as an argument to the
# script. The variable XXXXXXX refers to the student ID, and it is assumed
# that
#
# [email protected]
#
# is a valid email address. The variable $CW refers to the coursework
# of which the students are meant to be notified (e.g., cw1). The directory
# DIR/sXXXXXXX/$CW/ should only contain a single file, which should be
# specified using the FILE parameter.
#
# Usage:
#
# ./send_marks.sh [DIR] [CW] [FILE]
DIR="$1"
shift
CW="$1"
shift
FILE="$1"
shift
for ATTACHMENT in "${DIR%/}/s"*"/$CW/$FILE"; do
SUBJ="Mark for coursework $CW"
BODY=""
SID=$(echo "$ATTACHMENT" | sed 's|.*/\(s[0-9]\{7\}\)/.*|\1|')
ADDR="[email protected]"
CMD="echo \"$BODY\" | mail -c [email protected] -s \"$SUBJ\" -a \"$ATTACHMENT\" \"$ADDR\""
echo "You are about to run the following command:"
echo -e "\n$CMD\n"
read -p "Are you sure? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
eval "$CMD"
fi
done
|
wenkokke/sf
|
versions/20.07/courses/tspl/send_marks.sh
|
Shell
|
mit
| 1,313 |
#!/bin/bash
set -euxo "pipefail"
IFS=$'\n\t'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd "$DIR"/..
source .env
if [ -z "$SERVER_NAME" ]; then
exit 0
fi
curl \
--fail \
--resolve "$SERVER_NAME:443:localhost" \
"https://$SERVER_NAME/health"
|
albertyw/base-flask
|
bin/healthcheck.sh
|
Shell
|
mit
| 285 |
#!/bin/bash
GDAL_VERSION=$1
echo "Building GDAL $GDAL_VERSION"
wget http://download.osgeo.org/gdal/$GDAL_VERSION/gdal-$GDAL_VERSION.tar.xz
tar xJf gdal-$GDAL_VERSION.tar.xz; cd gdal-$GDAL_VERSION
./configure --prefix=/usr --enable-debug --without-libtool
make -j4
sudo make install
cd ..
gdalinfo --version
|
mysidewalk/pgsql-ogr-fdw
|
ci/gdal_build.sh
|
Shell
|
mit
| 310 |
#!/bin/bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
race=""
# The race detector is broken on Alpine. That is #14481 (and #9918).
# So disable it for now.
if [ "${GOARCH}" = "amd64" ] && [ ! -f /etc/alpine-release ]; then
race="-race"
fi
go test $race -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done
exit 0
|
palourde/uchiwa
|
build/tests.sh
|
Shell
|
mit
| 460 |
#!/bin/bash
# create multiresolution windows icon
ICON_SRC=../../src/qt/res/icons/hempcoin.png
ICON_DST=../../src/qt/res/icons/hempcoin.ico
convert ${ICON_SRC} -resize 16x16 hempcoin-16.png
convert ${ICON_SRC} -resize 32x32 hempcoin-32.png
convert ${ICON_SRC} -resize 48x48 hempcoin-48.png
convert hempcoin-16.png hempcoin-32.png hempcoin-48.png ${ICON_DST}
|
taimpal/hempcoin
|
share/qt/make_windows_icon.sh
|
Shell
|
mit
| 359 |
#!/bin/bash
##########################################################################
# This script configures Transmission daemon to be ready to use.
# @author César Rodríguez González
# @version 1.3, 2016-09-21
# @license MIT
##########################################################################
# Check if the script is being running by a root or sudoer user
if [ "$(id -u)" != "0" ]; then echo ""; echo "This script must be executed by a root or sudoer user"; echo ""; exit 1; fi
# Parameters
if [ -n "$1" ]; then scriptRootFolder="$1"; else scriptRootFolder="`pwd`/../.."; fi
if [ -n "$2" ]; then username="$2"; else username="`whoami`"; fi
if [ -n "$3" ]; then homeFolder="$3"; else homeFolder="$HOME"; fi
# Add common variables
. $scriptRootFolder/common/commonVariables.properties
# Add credentials for authentication
. $credentialFolder/Transmission_server.properties
service transmission-daemon stop 2>/dev/null
### VARIABLES ############################################################
TRANSMISSION_DAEMON_DOWNLOAD_FOLDER="$homeDownloadFolder/Transmission"
TRANSMISSION_DAEMON_TEMP_FOLDER="$homeFolder/.Temporal/Transmission"
TRANSMISSION_DAEMON_TORRENT_FOLDER="$homeDownloadFolder/torrents"
TRANSMISSION_DAEMON_CLIENT_AND_WEB_PORT="9091"
TRANSMISSION_DAEMON_TCP_PORT="51413"
TRANSMISSION_DAEMON_FILE="/etc/systemd/system/transmission-daemon.service"
### COPY SYSTEMD SERVICE SCRIPT ##########################################
yes | cp -f $scriptRootFolder/etc/systemd.service $TRANSMISSION_DAEMON_FILE
### CREATE FOLDERS #######################################################
sudo -u $username mkdir -p $TRANSMISSION_DAEMON_DOWNLOAD_FOLDER $TRANSMISSION_DAEMON_TEMP_FOLDER $TRANSMISSION_DAEMON_TORRENT_FOLDER $homeFolder/.config/transmission-daemon
### SETUP APPLICATION CONFIG FILES #######################################
cp /var/lib/transmission-daemon/info/settings.json /var/lib/transmission-daemon/info/settings.json.backup
# Suppress variables to modify from transmission config file
transmissionVariablesToModify="\"download-dir\"\|\"incomplete-dir\"\|\"peer-port\"\|\"rpc-password\"\|\"rpc-username\"\|\"rpc-whitelist\"\|\"umask\""
cat /var/lib/transmission-daemon/info/settings.json | grep -v "$transmissionVariablesToModify" | tr -d '}' > /tmp/transmission.json
# Add comma character to last line
lastLine="`awk '/./{line=$0} END{print line}' /tmp/transmission.json`"
sed -i "s/$lastLine/$lastLine,/g" /tmp/transmission.json
# Add modified variables
echo "\"download-dir\": \"$TRANSMISSION_DAEMON_DOWNLOAD_FOLDER\",
\"incomplete-dir\": \"$TRANSMISSION_DAEMON_TEMP_FOLDER\",
\"incomplete-dir-enabled\": true,
\"peer-port\": $TRANSMISSION_DAEMON_TCP_PORT,
\"rpc-password\": \"$appPassword\",
\"rpc-username\": \"$appUsername\",
\"rpc-whitelist\": \"*\",
\"rpc-port\": $TRANSMISSION_DAEMON_CLIENT_AND_WEB_PORT,
\"umask\": 7,
\"watch-dir\": \"$TRANSMISSION_DAEMON_TORRENT_FOLDER\",
\"watch-dir-enabled\": true
}" >> /tmp/transmission.json
# Move temp file to transmission config file
mv /tmp/transmission.json /var/lib/transmission-daemon/info/settings.json
### SETUP SYSTEMD SERVICE ################################################
sed -i "s/=DESCRIPTION.*/=Transmission Daemon/g" $TRANSMISSION_DAEMON_FILE
sed -i "s/=man:PACKAGE.*/=man:transmission-daemon/g" $TRANSMISSION_DAEMON_FILE
sed -i "s/=SYSTEMD_TYPE.*/=simple/g" $TRANSMISSION_DAEMON_FILE
sed -i "s/=USERNAME.*/=$username/g" $TRANSMISSION_DAEMON_FILE
sed -i "s/=GROUP.*/=debian-transmission/g" $TRANSMISSION_DAEMON_FILE
sed -i "s/=COMMAND_AND_PARAMETERS_TO_START_SERVICE.*/=\/usr\/bin\/transmission-daemon -f --log-error --config-dir=\/var\/lib\/transmission-daemon\/info/g" $TRANSMISSION_DAEMON_FILE
### CREATE DIRECT LINKS IN STARTUP MENU ##################################
# Create menu launcher for transmission-daemon's web client.
echo "[Desktop Entry]
Name=Transmission Web
Exec=xdg-open http://localhost:$TRANSMISSION_DAEMON_CLIENT_AND_WEB_PORT
Icon=transmission
Terminal=false
Type=Application
Categories=Network;P2P;
Comment=Transmission Web" > /usr/share/applications/transmission-web.desktop
# Create menu launcher to start transmission-daemon.
echo "[Desktop Entry]
Name=Transmission daemon start
Exec=gksudo systemctl start transmission-daemon
Icon=transmission
Terminal=false
Type=Application
Categories=Network;P2P;
Comment=Start Transmission server" > /usr/share/applications/transmission-start.desktop
# Create menu launcher to stop transmission-daemon.
echo "[Desktop Entry]
Name=Transmission daemon stop
Exec=gksudo systemctl stop transmission-daemon
Icon=transmission
Terminal=false
Type=Application
Categories=Network;P2P;
Comment=Stop Transmission server" > /usr/share/applications/transmission-stop.desktop
### OTHERS ###############################################################
# Remove upstart script installed automatically by transmission daemon. Not used by systemd service manager
rm /etc/init.d/transmission-daemon 2>/dev/null
# Add user to debian-transmission group
usermod -a -G debian-transmission $username
# Set ownership of config files and/or folders
chown -R $username:debian-transmission $TRANSMISSION_DAEMON_DOWNLOAD_FOLDER $TRANSMISSION_DAEMON_TEMP_FOLDER $TRANSMISSION_DAEMON_TORRENT_FOLDER /var/lib/transmission-daemon
# Set permissions
chmod -R 770 $TRANSMISSION_DAEMON_DOWNLOAD_FOLDER $TRANSMISSION_DAEMON_TEMP_FOLDER $TRANSMISSION_DAEMON_TORRENT_FOLDER
find $homeFolder/.config/transmission-daemon/* -type f -print0 2>/dev/null | xargs -0 chmod 660 2>/dev/null
find $homeFolder/.config/transmission-daemon/* -type d -print0 2>/dev/null | xargs -0 chmod 770 2>/dev/null
### PREPARE DAEMON TO START ON SYSTEM BOOT AND START DAEMON NOW ##########
systemctl enable /etc/systemd/system/transmission-daemon.service
systemctl daemon-reload
systemctl start transmission-daemon
|
cesar-rgon/desktop-app-installer
|
post-installation/ubuntu/Transmission_server.sh
|
Shell
|
mit
| 5,826 |
#!/bin/bash
set -e
rm -rf build.paho
mkdir build.paho
cd build.paho
echo "travis build dir $TRAVIS_BUILD_DIR pwd $PWD"
cmake ..
make
python ../test/mqttsas2.py localhost 1883 1885 &
ctest -VV --timeout 600
kill %1
killall mosquitto
|
eclipse/paho.mqtt.embedded-c
|
travis-build.sh
|
Shell
|
epl-1.0
| 234 |
MACHINE=
SCRIPT_NAME=elf
OUTPUT_FORMAT="elf32-littlearm"
BIG_OUTPUT_FORMAT="elf32-bigarm"
LITTLE_OUTPUT_FORMAT="elf32-littlearm"
TEXT_START_ADDR=0x00100000
TEMPLATE_NAME=elf32
EXTRA_EM_FILE=armelf
OTHER_TEXT_SECTIONS='*(.glue_7t) *(.glue_7)'
OTHER_BSS_SYMBOLS='__bss_start__ = .;'
OTHER_BSS_END_SYMBOLS='_bss_end__ = . ; __bss_end__ = . ; __end__ = . ;'
DATA_START_SYMBOLS='__data_start = . ;';
GENERATE_SHLIB_SCRIPT=yes
ARCH=arm
MACHINE=
MAXPAGESIZE=0x1000
ENTRY=_start
# This sets the stack to the top of the simulator memory (2^19 bytes).
STACK_ADDR=0x80000
|
ryo-on/binutils-2.14-SCO-OpenServer5
|
ld/emulparams/armnto.sh
|
Shell
|
gpl-2.0
| 566 |
#/bin/bash
OUTPUT=`lava-tool submit-job http://admin:l7y5cz0ttbzedtg7ai1okum11eic3n49igs4t6uiraou0amk3bbs2uqh0impai0y7u9a6mue0ep3m081qjwnw0xtoskocz2xnyina3edkkdjooblh94110e41fl66uq1@10.192.242.176/RPC2 \
/home/r64343/workspace/lava-test/test/imx6sx_sabresd/imx6sx_sdb_vte_regression.json`
if [ $? -eq 0 ]
then
JOB_ID=$(grep -Po -- 'id: \K\w*' <<< "$OUTPUT")
echo $JOB_ID
while [ 1 ]; do
OUTPUT=$(lava-tool job-status http://admin:l7y5cz0ttbzedtg7ai1okum11eic3n49igs4t6uiraou0amk3bbs2uqh0impai0y7u9a6mue0ep3m081qjwnw0xtoskocz2xnyina3edkkdjooblh94110e41fl66uq1@10.192.242.176/RPC2 $JOB_ID)
echo $OUTPUT
JOB_STATUS=$(grep -Po -- 'Status: \K\w*' <<< "$OUTPUT")
if [ "$JOB_STATUS" = "Complete" ] || [ "$JOB_STATUS" = "Incomplete" ] || [ "$JOB_STATUS" = "Canceled" ]
then
echo "Job: $JOB_ID finished....., continue to do the next test"
break
else
echo "job not finished: $JOB_STATUS ..."
sleep 60
fi
done
else
echo "job submit failed...."
fi
|
eslover/lava-test
|
test/imx6sx_sabresd/start_ci_regression.sh
|
Shell
|
gpl-2.0
| 977 |
#! /bin/sh
# Copyright (C) 2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Tests error messages when '%reldir%' and '%canon_reldir%' substitutions
# (and their shorthands '%D%' and '%C%') are involved.
. test-init.sh
cat >> configure.ac <<'END'
AC_PROG_CC
AC_PROG_RANLIB
AM_PROG_AR
END
: > ar-lib
mkdir sub sub/sub2
cat > Makefile.am <<'END'
%canon_reldir%_x1_SOURCES = bar.c
include sub/local.mk
END
cat > sub/local.mk <<'END'
AUTOMAKE_OPTIONS = -Wno-extra-portability
include %D%/sub2/more.mk
noinst_LIBRARIES = %reldir%-one.a %D%-two.a
%C%_x2_SOURCES = foo.c
END
cat > sub/sub2/more.mk <<'END'
%C%_UNDEFINED +=
END
$ACLOCAL
AUTOMAKE_fails
cat > expected << 'END'
sub/sub2/more.mk:1: sub_sub2_UNDEFINED must be set with '=' before using '+='
Makefile.am:2: 'sub/local.mk' included from here
sub/local.mk:2: 'sub/sub2/more.mk' included from here
sub/local.mk:3: 'sub-one.a' is not a standard library name
sub/local.mk:3: did you mean 'libsub-one.a'?
Makefile.am:2: 'sub/local.mk' included from here
sub/local.mk:3: 'sub-two.a' is not a standard library name
sub/local.mk:3: did you mean 'libsub-two.a'?
Makefile.am:2: 'sub/local.mk' included from here
Makefile.am:1: variable 'x1_SOURCES' is defined but no program or
Makefile.am:1: library has 'x1' as canonical name (possible typo)
sub/local.mk:4: variable 'sub_x2_SOURCES' is defined but no program or
sub/local.mk:4: library has 'sub_x2' as canonical name (possible typo)
Makefile.am:2: 'sub/local.mk' included from here
END
# We need to break these substitutions into multiple sed invocations
# to avoid spuriously triggering the 'sc_tests_logs_duplicate_prefixes'
# maintainer check.
sed -e '/warnings are treated as errors/d' stderr > t1
sed -e 's/: warning:/:/' t1 > t2
sed -e 's/: error:/:/' t2 > t3
sed -e 's/ */ /g' t3 > obtained
diff expected obtained
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/preproc-errmsg.sh
|
Shell
|
gpl-2.0
| 2,442 |
#!/bin/sh
# TopGit - A different patch queue manager
# (c) Petr Baudis <[email protected]> 2008
# GPLv2
force= # Whether to delete non-empty branch
name=
## Parse options
while [ -n "$1" ]; do
arg="$1"; shift
case "$arg" in
-f)
force=1;;
-*)
echo "Usage: tg [...] delete [-f] NAME" >&2
exit 1;;
*)
[ -z "$name" ] || die "name already specified ($name)"
name="$arg";;
esac
done
## Sanity checks
[ -n "$name" ] || die "no branch name specified"
branchrev="$(git rev-parse --verify "$name" 2>/dev/null)" ||
die "invalid branch name: $name"
baserev="$(git rev-parse --verify "refs/top-bases/$name" 2>/dev/null)" ||
die "not a TopGit topic branch: $name"
! git symbolic-ref HEAD >/dev/null || [ "$(git symbolic-ref HEAD)" != "refs/heads/$name" ] ||
die "cannot delete your current branch"
nonempty=
branch_empty "$name" || nonempty=1
[ -z "$nonempty" ] || [ -n "$force" ] || die "branch is non-empty: $name"
## Wipe out
git update-ref -d "refs/top-bases/$name" "$baserev"
git update-ref -d "refs/heads/$name" "$branchrev"
# vim:noet
|
jwhitley/topgit
|
tg-delete.sh
|
Shell
|
gpl-2.0
| 1,057 |
#internet access
sudo route del default gw 10.2.47.254 && sudo route add default gw 10.2.47.253
sudo route add -net 10.11.0.0 netmask 255.255.0.0 gw 10.2.47.254
sudo route add -net 10.2.0.0 netmask 255.255.240.0 gw 10.2.47.254
if [ ! -d "/usr/local/src/802.11ah-ns3" ]; then
# install everything first
bash /proj/wall2-ilabt-iminds-be/ns3ah/setup/installscript.sh
else
echo "802.11ah Folder exists, no installation necessary"
fi
cd /usr/local/src/802.11ah-ns3
git pull
cd ns-3
CXXFLAGS="-std=c++0x" ./waf configure --build-profile=optimized --disable-examples --disable-tests
./waf
cd ../simulations
# start slave
mono SimulationBuilder.exe --slave http://ns3ah.ns3ah.wall2-ilabt-iminds-be.wall2.ilabt.iminds.be:12345/SimulationHost/ >> "/proj/wall2-ilabt-iminds-be/ns3ah/logs/$HOSTNAME.slave.log" &
#mono SimulationBuilder.exe --slave http://pi.dragonbyte.info:8086/SimulationHost/ >> "/proj/wall2-ilabt-iminds-be/ns3ah/logs/$HOSTNAME.slave.log" &
|
drake7707/802.11ah-ns3
|
setup/bootscript.sh
|
Shell
|
gpl-2.0
| 959 |
#!/bin/sh
. ../../dttools/test/test_runner_common.sh
test_dir=`basename $0 .sh`.dir
test_output=`basename $0 .sh`.output
prepare()
{
mkdir $test_dir
cd $test_dir
ln -sf ../../src/makeflow .
echo "hello" > file.1
cat > test.jx << EOF
{
"rules" :
[
{
"command" : format("cp file.%d file.%d",i,i+1),
"inputs" : [ "file."+i ],
"outputs" : [ "file."+(i+1) ]
} for i in range(1,10)
]
}
EOF
exit 0
}
run()
{
cd $test_dir
echo "+++++ first run: should make 10 files +++++"
./makeflow --jx test.jx | tee output.1
echo "+++++ deleting file.5 manually +++++"
rm file.5
echo "+++++ second run: should rebuild 6 files +++++"
./makeflow --jx test.jx | tee output.2
count=`grep "deleted" output.2 | wc -l`
echo "+++++ $count files deleted +++++"
if [ $count -ne 6 ]
then
exit 1
fi
# Note: sleep to ensure different timestamp
echo "+++++ changing file.2 manually +++++"
sleep 2
touch file.2
echo "+++++ third run: should rebuild 8 files +++++"
./makeflow --jx test.jx | tee output.3
count=`grep "deleted" output.3 | wc -l`
echo "+++++ $count files deleted +++++"
if [ $count -ne 8 ]
then
exit 1
fi
exit 0
}
clean()
{
rm -fr $test_dir $test_output
exit 0
}
dispatch "$@"
# vim: set noexpandtab tabstop=4:
|
nkremerh/cctools
|
makeflow/test/TR_makeflow_restart.sh
|
Shell
|
gpl-2.0
| 1,260 |
# ----------------------------------------------------------------------------
# http://www.infomoney.com.br
# Busca cotações do dia de algumas moedas em relação ao Real (compra e venda).
# Uso: zzcotacao
# Ex.: zzcotacao
#
# Autor: Itamar <itamarnet (a) yahoo com br>
# Desde: 2013-03-19
# Versão: 3
# Licença: GPL
# Requisitos: zzsemacento
# ----------------------------------------------------------------------------
zzcotacao ()
{
zzzz -h cotacao "$1" && return
$ZZWWWDUMP "http://www.infomoney.com.br/mercados/cambio" |
sed -n '/^Real vs. Moedas/,/^Cota/p' |
sed -n '3p;/^ [DLPFIE]/p' |
sed 's/Venda *Var/Venda Var/;s/\[//g;s/\]//g' |
zzsemacento |
awk '{
if ( NR == 1 ) printf "%18s %6s %6s %6s\n", "", $2, $3, $4
if ( NR > 1 ) {
if (NF == 4) printf "%-18s %6s %6s %6s\n", $1, $2, $3, $4
if (NF == 5) printf "%-18s %6s %6s %6s\n", $1 " " $2, $3, $4, $5
}
}'
}
|
jgentina/funcoeszz
|
zz/zzcotacao.sh
|
Shell
|
gpl-2.0
| 908 |
#!/bin/bash
# Try to autodetect OOFFICE and OOOPYTHON.
OOFFICE=`ls /usr/bin/openoffice /usr/bin/ooffice /usr/lib/openoffice/program/soffice | head -n 1`
OOOPYTHON=`ls /usr/lib/openoffice/program/python /usr/bin/python | head -n 1`
if [ ! -x "$OOFFICE" ]
then echo "Could not auto-detect OpenOffice.org binary"
exit
fi
if [ ! -x "$OOOPYTHON" ]
then echo "Could not auto-detect OpenOffice.org Python"
exit
fi
echo "Detected OpenOffice.org binary: $OOFFICE"
echo "Detected OpenOffice.org python: $OOOPYTHON"
# Reference: http://wiki.services.openoffice.org/wiki/Using_Python_on_Linux
# If you use the OpenOffice.org that comes with Fedora or Ubuntu, uncomment the following line:
export PYTHONPATH="/usr/lib/openoffice/program"
# If you want to simulate for testing that there is no X server, uncomment the next line.
#unset DISPLAY
# Kill any running OpenOffice.org processes.
killall -u `whoami` -q soffice
# Start OpenOffice.org in listening mode on TCP port 8100.
$OOFFICE "-accept=socket,host=localhost,port=8100;urp;StarOffice.ServiceManager" -norestore -nofirststartwizard -nologo -headless &
# Wait a few seconds to be sure it has started.
sleep 5s
|
chisimba/modules
|
webpresent/startooo.sh
|
Shell
|
gpl-2.0
| 1,188 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
DIE=0
if [ -n "$GNOME2_DIR" ]; then
ACLOCAL_FLAGS="-I $GNOME2_DIR/share/aclocal $ACLOCAL_FLAGS"
LD_LIBRARY_PATH="$GNOME2_DIR/lib:$LD_LIBRARY_PATH"
PATH="$GNOME2_DIR/bin:$PATH"
export PATH
export LD_LIBRARY_PATH
fi
(test -f $srcdir/configure.ac) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level package directory"
exit 1
}
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`autoconf' installed."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(grep "^IT_PROG_INTLTOOL" $srcdir/configure.ac >/dev/null) && {
(intltoolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`intltool' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
(grep "^AM_PROG_XML_I18N_TOOLS" $srcdir/configure.ac >/dev/null) && {
(xml-i18n-toolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`xml-i18n-toolize' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
LIBTOOL_BIN="libtool"
(grep "^AM_PROG_LIBTOOL" $srcdir/configure.ac >/dev/null) && {
("$LIBTOOL_BIN" --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`libtool' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
}
(grep "^AM_GLIB_GNU_GETTEXT" $srcdir/configure.ac >/dev/null) && {
(grep "sed.*POTFILES" $srcdir/configure.ac) > /dev/null || \
(glib-gettextize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`glib' installed."
echo "You can get it from: ftp://ftp.gtk.org/pub/gtk"
DIE=1
}
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`automake' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
NO_AUTOMAKE=yes
}
# if no automake, don't bother testing for aclocal
test -n "$NO_AUTOMAKE" || (aclocal --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: Missing \`aclocal'. The version of \`automake'"
echo "installed doesn't appear recent enough."
echo "You can get automake from ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
if test "$DIE" -eq 1; then
exit 1
fi
if test -z "$*"; then
echo "**Warning**: I am going to run \`configure' with no arguments."
echo "If you wish to pass any to it, please specify them on the"
echo \`$0\'" command line."
echo
fi
case $CC in
xlc )
am_opt=--include-deps;;
esac
for coin in `find $srcdir -path $srcdir/CVS -prune -o -name configure.ac -print`
do
dr=`dirname $coin`
if test -f $dr/NO-AUTO-GEN; then
echo skipping $dr -- flagged as no auto-gen
else
echo processing $dr
( cd $dr
aclocalinclude="$ACLOCAL_FLAGS"
if grep "^AM_GLIB_GNU_GETTEXT" configure.ac >/dev/null; then
echo "Creating $dr/aclocal.m4 ..."
test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
echo "Running glib-gettextize... Ignore non-fatal messages."
echo "no" | glib-gettextize --force --copy
echo "Making $dr/aclocal.m4 writable ..."
test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
fi
if grep "^IT_PROG_INTLTOOL" configure.ac >/dev/null; then
echo "Running intltoolize..."
intltoolize --copy --force --automake
fi
if grep "^AM_PROG_XML_I18N_TOOLS" configure.ac >/dev/null; then
echo "Running xml-i18n-toolize..."
xml-i18n-toolize --copy --force --automake
fi
if grep "^AM_PROG_LIBTOOL" configure.ac >/dev/null; then
if test -z "$NO_LIBTOOLIZE" ; then
echo "Running libtoolize..."
"$LIBTOOL_BIN"ize --force --copy
fi
fi
echo "Running aclocal $aclocalinclude ..."
aclocal $aclocalinclude
if grep "^A[CM]_CONFIG_HEADER" configure.ac >/dev/null; then
echo "Running autoheader..."
autoheader
fi
echo "Running automake --gnu $am_opt ..."
automake --add-missing --gnu $am_opt
echo "Running autoconf ..."
autoconf
)
fi
done
conf_flags="--enable-maintainer-mode"
if test x$NOCONFIGURE = x; then
echo Running $srcdir/configure $conf_flags "$@" ...
$srcdir/configure $conf_flags "$@" \
&& echo Now type \`make\' to compile. || exit 1
else
echo Skipping configure process.
fi
|
acirmandello/ardesia
|
autogen.sh
|
Shell
|
gpl-3.0
| 4,544 |
# common.sh
#
# Common functions
#
# This file is part of Sosaria Rebourne. See authors.txt for copyright
# details.
#
# Sosaria Rebourne is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sosaria Rebourne is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sosaria Rebourne. If not, see <http://www.gnu.org/licenses/>.
# Global configuration
declare -r _common_screen_width=97
# Globals
declare -a _common_at_exit_handlers
declare _common_ms_sleep_factor
# Feature detection for the sleep function jiffy_sleep. This function sleeps
# for an amount of time represented in "jiffies", here one millisecond.
#
# Note that if the version of BASH is prior to 4 we will use a calibrated
# busy-wait.
#
# $1 The number of jiffies to sleep
#
# In bash 4+ we use read with a timeout because it's a built-in and fairly
# accurate.
if (( BASH_VERSINFO[0] >= 4 )); then
function jiffy_sleep
{
local ms=$(( $1 ))
printf -v sleep_format "%d.%03d" $(( ms / 1000 )) $(( ms % 1000 ))
IFS= read -st $sleep_format buffer
}
# Otherwise use a calibrated busy-wait
else
# Figure out how long it takes to invoke sleep
declare _common_sleep_invoke_time=0
function jiffy_sleep_calibrate
{
local idx tmpfile tmpfile_contents seconds ms itters=10000
tmpfile=$(mktemp)
echo "Calibrating Sleep Routine"
(time for((idx=0;idx<itters;idx++));do :;done) 2> $tmpfile
tmpfile_contents=($(cat $tmpfile) )
rm -f $tmpfile 2>/dev/null
seconds=${tmpfile_contents[1]#*m}
ms=${seconds#*.}
ms=${ms%s}
ms=${ms#0}
seconds=${seconds%.*}
(( ms += seconds * 1000 ))
(( _common_ms_sleep_factor = 10000000 / ms ))
}
function jiffy_sleep
{
local idx itters
(( itters = (_common_ms_sleep_factor * $1) / 1000 ))
for((idx=0;idx<itters;idx++));do :;done
}
jiffy_sleep_calibrate
fi
# Set up at_exit trap
trap "do_at_exit" 0
# Execute all at_exit handlers
function do_at_exit
{
local idx
for (( idx=0; idx < ${#_common_at_exit_handlers[@]}; idx++ )); do
eval "${_common_at_exit_handlers[$idx]}"
done
}
# Register an at_exit handler
#
# $1 Command string to execute
function at_exit
{
_common_at_exit_handlers=(${_common_at_exit_handlers[@]} "$1")
}
# Raise an error
#
# $1 Message text of the error
# $2 Exit code, if non-zero we will exit
function error
{
vt100_home
vt100_high
vt100_fg $COLOR_WHITE
vt100_bg $COLOR_RED
echo "ERROR: $1"
echo -n "Press Enter to Continue"
read
if [ "$2" -ne 0 ]; then
exit $2
fi
}
# Convert the first character of a string to uppercase, and place the new
# string in g_return.
#
# $1 String to uppercase
function uppercase_first_character
{
local octal_code
local first="${1:0:1}"
local rest="${1:1}"
printf -v first_code "%d" "'$first"
if (( first_code >= 97 && first_code <= 122 )); then
(( first_code -= 32 ))
fi
printf -v octal_code "%o" $first_code
printf -v first "\\$octal_code"
g_return="$first$rest"
}
# Place the class string for a class letter in g_return
#
# $1 Class letter
function get_class_string
{
case $1 in
A) g_return="Adventurer" ;;
F) g_return="Fighter" ;;
R) g_return="Rouge" ;;
S) g_return="Sorcerer" ;;
P) g_return="Paladin" ;;
T) g_return="Thief" ;;
M) g_return="Mage" ;;
*) g_return="Monster" ;;
esac
}
# Create a new save data directory. This sets g_save_data_path to the new save
# path.
function create_new_save
{
local i=0
while :; do
if [ -d "$g_dynamic_data_path/$i" ]; then
(( i++ ))
continue
fi
g_save_data_path="$g_dynamic_data_path/$i"
mkdir -p "$g_save_data_path/maps"
cp "$g_static_data_path/party.tab" "$g_save_data_path"
cp "$g_static_data_path/equipment.tab" "$g_save_data_path"
cp "$g_static_data_path/inventory.tab" "$g_save_data_path"
break
done
}
# Sets g_save_data_path to the newest save path.
#
# Returns non-zero if no save exists
function load_last_save
{
local dirname
for dirname in $(ls -drt $g_dynamic_data_path/* 2>/dev/null); do
if [ -d "$dirname" ]; then
g_save_data_path="$dirname"
combat_load_from_save
item_load_from_save
return 0
fi
done
return 1
}
# Calls all save routines for the current save.
function save_game
{
combat_save
}
# Debug proceedure. Put whatever you need to test here.
function debug_proc
{
save_game
}
|
brandon5052/sosaria-rebourne
|
src/common.sh
|
Shell
|
gpl-3.0
| 4,677 |
#!/bin/bash
npm install -g lintspaces-cli
sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test
sudo add-apt-repository --yes ppa:beineri/opt-qt591-trusty
sudo apt-get update
sudo apt-get install --yes build-essential gcc-4.9 g++-4.9 cmake pkg-config libjack-jackd2-dev libsndfile1-dev libasound2-dev libavahi-client-dev libreadline6-dev libfftw3-dev libicu-dev libxt-dev libudev-dev
if [[ -n "$1" && "$1" == "--qt=true" ]]; then
sudo apt-get install --yes libgl1-mesa-dev qt59base qt59location qt59declarative qt59tools qt59webengine qt59webchannel qt59xmlpatterns qt59svg qt59websockets
fi
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 60 --slave /usr/bin/g++ g++ /usr/bin/g++-4.9
sudo update-alternatives --auto gcc
|
gusano/supercollider
|
.travis/before-install-linux.sh
|
Shell
|
gpl-3.0
| 748 |
#!/bin/sh
autoreconf --force --install -v
|
stephaneguindon/phyml
|
autogen.sh
|
Shell
|
gpl-3.0
| 42 |
#!/bin/sh
MY_DIR=`dirname $0`
LOGS_DIR=/mnt/glow/log
next_file=`ls -t ${LOGS_DIR}/glow.*.log | head -1`
if [ -z "${next_file}" ]; then
exit 0
fi
${MY_DIR}/bart_process_log.sh ${next_file}
gzip ${next_file}
|
almossawi/mrburns
|
smithers/bart_dev.sh
|
Shell
|
mpl-2.0
| 215 |
#!/bin/bash
# Installs fbthrift's dependencies to /usr/local on a clean Ubuntu 12.04 x64
# system. Primarily intended for Travis CI, since most engineers don't run
# distributions this stale.
#
# WARNING: Uses 'sudo' to upgrade your system with impunity:
# - Adds several PPAs for missing/outdated dependencies
# - Installs several from-source dependencies in /usr/local
#
# Library sources & build files end up in fbthrift/thrift/deps.
. "$(dirname "$0")/deps_common.sh"
sudo apt-get install git cmake
# Folly sets up a bunch of other dependencies, so install it right after git.
install_folly ubuntu_12.04 # needs git
install_mstch ubuntu_12.04 # needs git, cmake
install_zstd ubuntu_12.04 # needs git
install_wangle ubuntu_12.04
# Uses PPAs set up by folly. TODO: According to the fbthrift docs,
# pkg-config is missing. However, things seem to build fine...
sudo apt-get install -y libboost-python1.54-dev libsasl2-dev python-dev \
libkrb5-dev libnuma-dev
# Install all the automake packages:
# - bison: The base system version is too old, does not make 'thrify.hh'
# - flex: Just in case -- the base system version probably does suffice...
# - automake: default 1.11.1 has bad bison support, does not make 'thrifty.hh'
# - autoconf, libtool: newer so as to be compatible with the new automake
for url in \
http://ftp.gnu.org/gnu/bison/bison-3.0.4.tar.gz \
http://downloads.sourceforge.net/project/flex/flex-2.5.37.tar.gz \
http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz \
http://ftp.gnu.org/gnu/automake/automake-1.15.tar.gz \
http://ftp.gnu.org/gnu/libtool/libtool-2.4.6.tar.gz \
; do
pkg=$(basename "$url")
wget "$url" -O "$pkg"
tar xzf "$pkg"
pushd "${pkg%.tar.gz}"
./configure
make
sudo make install
popd
done
|
getyourguide/fbthrift
|
thrift/build/deps_ubuntu_12.04.sh
|
Shell
|
apache-2.0
| 1,787 |
#!/usr/bin/env bash
#http://www.apache.org/licenses/LICENSE-2.0.txt
#
#
#Copyright 2015 Intel Corporation
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Support travis.ci environment matrix:
SNAP_TEST_TYPE="${SNAP_TEST_TYPE:-$1}"
if [[ "${SNAP_TEST_TYPE}" == small ]]; then
UNIT_TEST="${UNIT_TEST:-"gofmt goimports go_test go_cover"}"
else
UNIT_TEST="${UNIT_TEST:-"go_test go_cover"}"
fi
set -e
set -u
set -o pipefail
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
__proj_dir="$(dirname "$__dir")"
SNAP_PATH="${SNAP_PATH:-"${__proj_dir}/build"}"
export SNAP_PATH
# shellcheck source=scripts/common.sh
. "${__dir}/common.sh"
_debug "script directory ${__dir}"
_debug "project directory ${__proj_dir}"
[[ "$SNAP_TEST_TYPE" =~ ^(small|medium|large|legacy|build)$ ]] || _error "invalid/missing SNAP_TEST_TYPE (value must be 'small', 'medium', 'large', 'legacy', or 'build' received:${SNAP_TEST_TYPE}"
if [[ "${SNAP_TEST_TYPE}" == "build" ]]; then
_info "s3 builds are generated by travis.ci and only triggered by merges on master branch."
exit 0
fi
_go_path
# If the following tools don't exist, get them
_go_get github.com/smartystreets/goconvey
# Run test coverage on each subdirectories and merge the coverage profile.
echo "mode: count (${SNAP_TEST_TYPE})" > "profile-${SNAP_TEST_TYPE}.cov"
TEST_TYPE=$SNAP_TEST_TYPE
export TEST_TYPE
go_tests=(gofmt goimports golint go_vet go_race go_test go_cover)
_debug "available unit tests: ${go_tests[*]}"
_debug "user specified tests: ${UNIT_TEST}"
((n_elements=${#go_tests[@]}, max=n_elements - 1))
for ((i = 0; i <= max; i++)); do
if [[ "${UNIT_TEST}" =~ (^| )"${go_tests[i]}"( |$) ]]; then
_info "running ${go_tests[i]}"
_"${go_tests[i]}"
else
_debug "skipping ${go_tests[i]}"
fi
done
_info "test complete: ${SNAP_TEST_TYPE}"
|
mjbrender/snap
|
scripts/test.sh
|
Shell
|
apache-2.0
| 2,304 |
#! /bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# copy upstart-conf to init
echo 'configuring upstart...'
sudo cp $DIR/upstart-conf/faceswap.conf /etc/init/ &&
sudo service faceswap restart &&
sudo initctl reload-configuration &&
# copy cron job
echo 'adding cron email reminder...'
sudo cp $DIR/cron-bihourly-email /etc/cron.hourly/ &&
echo 'removing log file...'
sudo rm -rf /var/log/FaceSwap.log
echo 'server setup succeeded.'
|
Jamesjue/FaceSwap-server
|
server/script/server-setup.sh
|
Shell
|
apache-2.0
| 454 |
export SPARK_HOME=/usr/local/spark
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export MASTER=yarn-client
|
met-office-lab/docker-hadoop-spark
|
zeppelin/zeppelin-env.sh
|
Shell
|
apache-2.0
| 145 |
#!/bin/bash
#
# Copyright (C) 2015 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -x
PREFIX="I12_I13"
source common/infra-virt.function
if snapshot_exists RH7.0-I.1.2.1; then
snapshot_restore RH7.0-I.1.2.1
else
drop_hosts os-ci-test4 router os-ci-test10 os-ci-test11 os-ci-test12
deploy ~/data/sps-snippets/RH7.0-I.1.2.1-qcow
call_jenkins_job "puppet"
snapshot_create RH7.0-I.1.2.1
fi
drop_hosts os-ci-test4 router
deploy ~/data/sps-snippets/RH7.0-I.1.3-qcow
call_jenkins_job "upgrade"
|
enovance/infra-virt
|
scenarios-samples/RH7.0-I.1.2.1-to-RH7.0-I.1.3.0.sh
|
Shell
|
apache-2.0
| 1,051 |
#!/bin/sh
# the IP:port our fteproxy client listens on
CLIENT_IP=127.0.0.1
CLIENT_PORT=8079
# the IP:port our fteproxy server listens on
SERVER_IP=127.0.0.1
SERVER_PORT=8080
# the IP:port where our fteproxy forwards all connections
# in this test, it's the IP:port the server-side netcat will bind to
PROXY_IP=127.0.0.1
PROXY_PORT=8081
# start fteproxy client
./bin/fteproxy --mode client --quiet \
--client_ip $CLIENT_IP --client_port $CLIENT_PORT \
--server_ip $SERVER_IP --server_port $SERVER_PORT &
# start fteproxy server
./bin/fteproxy --mode server --quiet \
--server_ip $SERVER_IP --server_port $SERVER_PORT \
--proxy_ip $PROXY_IP --proxy_port $PROXY_PORT &
# start server-side netcat listener
netcat -k -l -p $PROXY_PORT
# start client-side netcat pusher in another window
# nc $CLIENT_IP $CLIENT_PORT
|
irdan/fteproxy
|
examples/netcat/netcat_verbose.sh
|
Shell
|
apache-2.0
| 879 |
#!/bin/bash
# --- begin runfiles.bash initialization v2 ---
# Copy-pasted from the Bazel Bash runfiles library v2.
set -uo pipefail; f=build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel/tools/bash/runfiles/runfiles.bash
source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
source "$0.runfiles/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
{ echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
# --- end runfiles.bash initialization v2 ---
readonly OUT=$($(rlocation "npm/testy/bin/testy.sh"))
readonly EXPECTED="Hello some_value && some_other_value"
if [ "${OUT}" != "${EXPECTED}" ]; then
echo "Expected output '${EXPECTED}' but was '${OUT}'"
exit 1
fi
|
alexeagle/rules_nodejs
|
internal/npm_install/test/bazel_bin_test.sh
|
Shell
|
apache-2.0
| 943 |
export CVMFS_PLATFORM_NAME="debian$(. /etc/os-release && echo "$VERSION_ID")-$(uname -m)"
export CVMFS_TIMESTAMP=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
# source the common platform independent functionality and option parsing
script_location=$(cd "$(dirname "$0")"; pwd)
. ${script_location}/common_test.sh
retval=0
echo "running unittests"
run_unittests --gtest_shuffle \
--gtest_death_test_use_fork || retval=1
CVMFS_EXCLUDE=
debian_release="$(lsb_release -cs)"
CVMFS_EXCLUDE=
# Kernel sources too old for gcc, TODO
CVMFS_EXCLUDE="$CVMFS_EXCLUDE src/006-buildkernel"
# Expected failure, see test case
CVMFS_EXCLUDE="$CVMFS_EXCLUDE src/628-pythonwrappedcvmfsserver"
# Hardlinks do not work with overlayfs
CVMFS_EXCLUDE="$CVMFS_EXCLUDE src/672-publish_stats_hardlinks"
export CVMFS_TEST_UNIONFS=overlayfs
cd ${SOURCE_DIRECTORY}/test
echo "running CernVM-FS client test cases..."
CVMFS_TEST_CLASS_NAME=ClientIntegrationTests \
./run.sh $CLIENT_TEST_LOGFILE -o ${CLIENT_TEST_LOGFILE}${XUNIT_OUTPUT_SUFFIX} \
-x src/004-davinci \
src/005-asetup \
src/007-testjobs \
src/024-reload-during-asetup \
src/094-attachmount \
src/095-fuser \
$CVMFS_EXCLUDE \
-- \
src/0* \
|| retval=1
if [ x"$(uname -m)" = x"x86_64" ]; then
echo "running CernVM-FS server test cases..."
CVMFS_TEST_CLASS_NAME=ServerIntegrationTests \
./run.sh $SERVER_TEST_LOGFILE -o ${SERVER_TEST_LOGFILE}${XUNIT_OUTPUT_SUFFIX} \
-x src/518-hardlinkstresstest \
src/600-securecvmfs \
src/647-bearercvmfs \
src/673-acl \
src/684-https_s3 \
src/686-azureblob_s3 \
src/687-import_s3 \
$CVMFS_EXCLUDE \
-- \
src/5* \
src/6* \
src/7* \
src/8* \
src/9* \
|| retval=1
fi
echo "running CernVM-FS client migration test cases..."
CVMFS_TEST_CLASS_NAME=ClientMigrationTests \
./run.sh $MIGRATIONTEST_CLIENT_LOGFILE \
-o ${MIGRATIONTEST_CLIENT_LOGFILE}${XUNIT_OUTPUT_SUFFIX} \
migration_tests/0* \
|| retval=1
if [ x"$(uname -m)" = x"x86_64" ]; then
echo "running CernVM-FS server migration test cases..."
CVMFS_TEST_CLASS_NAME=ServerMigrationTests \
./run.sh $MIGRATIONTEST_SERVER_LOGFILE \
-o ${MIGRATIONTEST_SERVER_LOGFILE}${XUNIT_OUTPUT_SUFFIX} \
migration_tests/5* \
|| retval=1
fi
exit $retval
|
DrDaveD/cvmfs
|
test/cloud_testing/platforms/debian_test.sh
|
Shell
|
bsd-3-clause
| 4,034 |
#!/bin/sh
# Because of a long-running npm issue (https://github.com/npm/npm/issues/3059)
# prepublish runs after `npm install` and `npm pack`.
# In order to only run prepublish before `npm publish`, we have to check argv.
if node -e "process.exit(($npm_config_argv).original[0].indexOf('pu') === 0)"; then
exit 0;
fi
# Publishing to NPM is currently supported by Travis CI, which ensures that all
# tests pass first and the deployed module contains the correct file structure.
# In order to prevent inadvertently circumventing this, we ensure that a CI
# environment exists before continuing.
if [ "$CI" != true ]; then
#echo "\n\n\n \033[101;30m Only CI can publish to NPM. \033[0m" 1>&2;
echo " Ensure git is left is a good state by backing out any commits and deleting any tags." 1>&2;
echo " Then read CONTRIBUTING.md to learn how to publish to NPM.\n\n\n" 1>&2;
#exit 1;
fi;
# Build before publishing
npm run build;
|
zerkalica/regexp-input
|
conf/resources/prepublish.sh
|
Shell
|
mit
| 938 |
#! /bin/bash
#this script should be sourced by all provisioning scripts
# it is NOT idempotent and will result in duplicate keys in authorized_keys if run a 2nd time
# in this lower security approach there is just one key pair used across every node
# we don't want a permanent set for whole cluster so it can't go in BakeCalavera.sh
# (tempting but we need at least adequate security)
# nor do we want keys in github, so both .ssh and rsa* are in gitignore.
# Ok here is the business rule
# ssh.sh is run out of calavera::default chef recipe
#
# and checks that the key pair exists in /mnt/shared/keys (this directory must be shared across the cluster)
# generates if it does not
# force copies keys to ~/.ssh
# (to reset all keys delete /mnt/shared/keys and kitchen/vagrant destroy/up)
#
echo "*** CALAVERA ssh.sh run on $HOSTNAME on "$(date +%F)" "$(date +%T)" UTC ***" >> /mnt/shared/keys/ssh.log
## so, if we ONLY run this on base, and someone reset Calavera, the keys would go away except they are in .gitignore.
mkdir -p /mnt/shared/keys # -p = no error if it exists (this part is idempotent b/c we don't know if another node has already keygen'd)
chown vagrant /mnt/shared/keys
if [[ $(ls /mnt/shared/keys) != *id_rsa[^\.]* ]] && \
[[ $(ls /mnt/shared/keys) != *id_rsa.pub ]] # either shared key or private key are missing in /mnt/shared/keys
then
echo missing key, regenerating both shared and private >> /mnt/shared/keys/ssh.log
ssh-keygen -t rsa -f "/mnt/shared/keys/id_rsa" -P "" \
-C "*** Host key auto-generated on Vagrant provisioning by node $HOSTNAME on "$(date +%F)" "$(date +%T)" UTC ***" >> /mnt/shared/keys/ssh.log
#regenerate both
chown vagrant /mnt/shared/keys/*
else
echo both keys appear to be there >> /mnt/shared/keys/ssh.log
fi
cp -f /mnt/shared/keys/id_rsa* ~/.ssh # copy both to user (vagrant for now) .ssh
echo "# CALAVERA: This file was updated by the $HOSTNAME provisioning process" >> /home/vagrant/.ssh/authorized_keys
cat /mnt/shared/keys/id_rsa.pub >> ~/.ssh/authorized_keys # not idempotent; script intended only to be run on initial vagrant up
chown vagrant /home/vagrant/.ssh/*
chmod 600 /home/vagrant/.ssh/id_rsa
echo $HOSTNAME done with ssh script >> /mnt/shared/keys/ssh.log
echo "***" >> /mnt/shared/keys/ssh.log
echo "" >> /mnt/shared/keys/ssh.log
|
dm-academy/Calavera
|
cookbooks/base/files/ssh.sh
|
Shell
|
mit
| 2,355 |
#!/bin/bash
camelcaseId=$1
[ $# -eq 0 ] && { echo "Usage: $0 <BindingIdInCamelCase>"; exit 1; }
id=`echo $camelcaseId | tr '[:upper:]' '[:lower:]'`
[ -d org.openhab.binding.$id ] || { echo "The binding with the id must exist: org.openhab.binding.$id"; exit 1; }
mvn -s ../archetype-settings.xml archetype:generate -N \
-DarchetypeGroupId=org.eclipse.smarthome.archetype \
-DarchetypeArtifactId=org.eclipse.smarthome.archetype.binding.test \
-DarchetypeVersion=0.9.0-SNAPSHOT \
-DgroupId=org.openhab.binding \
-DartifactId=org.openhab.binding.$id.test \
-Dpackage=org.openhab.binding.$id.test \
-Dversion=2.2.0-SNAPSHOT \
-DbindingId=$id.test \
-DbindingIdCamelCase=$camelcaseId.test \
-DvendorName=openHAB \
-Dnamespace=org.openhab
directory=`echo "org.openhab.binding."$id".test"/`
cp ../../src/etc/about.html "$directory"
|
AndyXMB/openhab2-addons
|
addons/binding/create_openhab_binding_test_skeleton.sh
|
Shell
|
epl-1.0
| 852 |
#!/bin/bash
# Find the top-level Rekall dir and change to it:
# Change working dir to one containing this script.
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Recurse up until we get to the top-level.
while [ ! -e "setup.py" ]
do
cd ..
if [[ "$(pwd)" == "/" ]]
then
echo "Cannot find rekall directory."
exit -1
fi
done
echo "Working directory is $(pwd)"
for f in $( git diff master --name-only -- rekall | grep ".py"); do
if [ -e $f ]; then
autopep8 --ignore E309,E711 -i -r --max-line-length 80 $f
pylint --rcfile tools/devel/pylintrc $f
fi
done
# Run the unit test suite.
./tools/testing/test_suite.py -c ../test/unit/tests.config
|
palaniyappanBala/rekall
|
tools/devel/presubmit.sh
|
Shell
|
gpl-2.0
| 682 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) Köry Maincent <[email protected]> 2020
# Copyright (c) 2015 Red Hat, Inc.
#
# SYNOPSIS:
# netns_comm.sh <NS_EXEC_PROGRAM> <IP_VERSION> <COMM_TYPE>
#
# OPTIONS:
# * NS_EXEC_PROGRAM (ns_exec|ip)
# Program which will be used to enter and run other commands
# inside a network namespace.
# * IP_VERSION (ipv4|ipv6)
# Version of IP. (ipv4|ipv6)
# * COMM_TYPE (netlink|ioctl)
# Communication type between kernel and user space
# for basic setup: enabling and assigning IP addresses
# to the virtual ethernet devices. (Uses 'ip' command for netlink
# and 'ifconfig' for ioctl.)
#
# Tests that a separate network namespace can configure and communicate
# over the devices it sees. Tests are done using netlink(7) ('ip' command)
# or ioctl(2) ('ifconfig' command) for controlling devices.
#
# There are three test cases:
# 1,2. communication over paired veth (virtual ethernet) devices
# from two different network namespaces (each namespace has
# one device)
# 3. communication over the lo (localhost) device in a separate
# network namespace
TST_POS_ARGS=3
TST_SETUP=do_setup
TST_TESTFUNC=do_test
. netns_helper.sh
PROG=$1
IP_VER=$2
COM_TYPE=$3
do_setup()
{
netns_setup $PROG $IP_VER $COM_TYPE "192.168.0.2" "192.168.0.3" "fd00::2" "fd00::3"
tst_res TINFO "NS interaction: $PROG | devices setup: $COM_TYPE"
}
do_test()
{
EXPECT_PASS $NS_EXEC $NS_HANDLE0 $NS_TYPE $tping -q -c2 -I veth0 $IP1 1>/dev/null
EXPECT_PASS $NS_EXEC $NS_HANDLE1 $NS_TYPE $tping -q -c2 -I veth1 $IP0 1>/dev/null
case "$IP_VER" in
ipv4) ip_lo="127.0.0.1" ;;
ipv6) ip_lo="::1" ;;
esac
case "$COM_TYPE" in
netlink)
$NS_EXEC $NS_HANDLE0 $NS_TYPE ip link set dev lo up || \
tst_brk TBROK "enabling lo device failed"
;;
ioctl)
$NS_EXEC $NS_HANDLE0 $NS_TYPE ifconfig lo up || \
tst_brk TBROK "enabling lo device failed"
;;
esac
EXPECT_PASS $NS_EXEC $NS_HANDLE0 $NS_TYPE $tping -q -c2 -I lo $ip_lo 1>/dev/null
}
tst_run
|
pevik/ltp
|
testcases/kernel/containers/netns/netns_comm.sh
|
Shell
|
gpl-2.0
| 2,066 |
#!/bin/bash
# Nginx proxy
docker run -d -p 80:80 -p 443:443 \
--name nginx-proxy \
--restart always \
-v /opt/certs:/etc/nginx/certs:ro \
-v /etc/nginx/vhost.d \
-v /usr/share/nginx/html \
-v /var/run/docker.sock:/tmp/docker.sock:ro \
jwilder/nginx-proxy:latest
# Let's Encrypt agent
docker run -d \
--restart always \
-v /opt/certs:/etc/nginx/certs:rw \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
--volumes-from nginx-proxy \
jrcs/letsencrypt-nginx-proxy-companion:latest
|
pstuart/Open-Dash
|
.scripts/ssl-setup.sh
|
Shell
|
gpl-3.0
| 506 |
#!/bin/sh
#
# Copyright (C) 2015, 2016 Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SYSTEMTESTTOP=..
. $SYSTEMTESTTOP/conf.sh
status=0
n=0
DIGOPTS="@10.53.0.1 -p 5300"
newtest() {
n=`expr $n + 1`
echo "${1} (${n})"
ret=0
}
test_add() {
host="$1"
type="$2"
ip="$3"
cat <<EOF > ns1/update.txt
server 10.53.0.1 5300
ttl 86400
update add $host $type $ip
send
EOF
newtest "I:adding $host $type $ip"
$NSUPDATE ns1/update.txt > /dev/null 2>&1 || {
[ "$should_fail" ] || \
echo "I:update failed for $host $type $ip"
return 1
}
out=`$DIG $DIGOPTS +noall +answer -t $type -q $host`
echo $out > added.a.out.$n
lines=`echo "$out" | grep "$ip" | wc -l`
[ $lines -eq 1 ] || {
[ "$should_fail" ] || \
echo "I:dig output incorrect for $host $type $cmd: $out"
return 1
}
out=`$DIG $DIGOPTS +noall +answer -x $ip`
echo $out > added.ptr.out.$n
lines=`echo "$out" | grep "$host" | wc -l`
[ $lines -eq 1 ] || {
[ "$should_fail" ] || \
echo "I:dig reverse output incorrect for $host $type $cmd: $out"
return 1
}
return 0
}
test_del() {
host="$1"
type="$2"
ip=`$DIG $DIGOPTS +short $host $type`
cat <<EOF > ns1/update.txt
server 10.53.0.1 5300
update del $host $type
send
EOF
newtest "I:deleting $host $type (was $ip)"
$NSUPDATE ns1/update.txt > /dev/null 2>&1 || {
[ "$should_fail" ] || \
echo "I:update failed deleting $host $type"
return 1
}
out=`$DIG $DIGOPTS +noall +answer -t $type -q $host`
echo $out > deleted.a.out.$n
lines=`echo "$out" | grep "$ip" | wc -l`
[ $lines -eq 0 ] || {
[ "$should_fail" ] || \
echo "I:dig output incorrect for $host $type $cmd: $out"
return 1
}
out=`$DIG $DIGOPTS +noall +answer -x $ip`
echo $out > deleted.ptr.out.$n
lines=`echo "$out" | grep "$host" | wc -l`
[ $lines -eq 0 ] || {
[ "$should_fail" ] || \
echo "I:dig reverse output incorrect for $host $type $cmd: $out"
return 1
}
return 0
}
test_add test1.ipv4.example.nil. A "10.53.0.10" || ret=1
status=`expr $status + $ret`
test_add test2.ipv4.example.nil. A "10.53.0.11" || ret=1
status=`expr $status + $ret`
test_add test3.ipv4.example.nil. A "10.53.0.12" || ret=1
status=`expr $status + $ret`
test_add test4.ipv6.example.nil. AAAA "2001:db8::1" || ret=1
status=`expr $status + $ret`
test_del test1.ipv4.example.nil. A || ret=1
status=`expr $status + $ret`
test_del test2.ipv4.example.nil. A || ret=1
status=`expr $status + $ret`
test_del test3.ipv4.example.nil. A || ret=1
status=`expr $status + $ret`
test_del test4.ipv6.example.nil. AAAA || ret=1
status=`expr $status + $ret`
newtest "I:checking parameter logging"
grep "loading params for dyndb 'sample' from .*named.conf:33" ns1/named.run > /dev/null || ret=1
grep "loading params for dyndb 'sample2' from .*named.conf:34" ns1/named.run > /dev/null || ret=1
status=`expr $status + $ret`
echo "I:checking dyndb still works after reload"
$RNDC -c ../common/rndc.conf -s 10.53.0.1 -p 9953 reload 2>&1 | sed 's/^/I:ns1 /'
test_add test5.ipv4.example.nil. A "10.53.0.10" || ret=1
status=`expr $status + $ret`
test_add test6.ipv6.example.nil. AAAA "2001:db8::1" || ret=1
status=`expr $status + $ret`
test_del test5.ipv4.example.nil. A || ret=1
status=`expr $status + $ret`
test_del test6.ipv6.example.nil. AAAA || ret=1
status=`expr $status + $ret`
echo "I:exit status: $status"
[ $status -eq 0 ] || exit 1
|
each/bind9-collab
|
bin/tests/system/dyndb/tests.sh
|
Shell
|
mpl-2.0
| 3,692 |
#!/bin/sh
if [ $# -ne 2 ]; then
echo "You need to supply two arguments, e.g.:"
echo "$0 mimetypes/text-plain mimetypes/text-x-generic"
exit
fi
# Split the two arguments into their category and icon name parts.
src="$1"
src_category=${src%/*}
src_icon=${src#*/}
dest="$2"
dest_category=${dest%/*}
dest_icon=${dest#*/}
# Copy the scalable icon.
if [ -f scalable/$src.svgz ]; then
echo "Copying scalable/$src.svgz to scalable/$dest.svgz..."
svn cp scalable/$src.svgz scalable/$dest.svgz
echo
fi
# Copy the optimized small versions of the icon.
for dir in 8x8 16x16 22x22 32x32 48x48 64x64 128x128 256x256; do
if [ -f scalable/$src_category/small/$dir/$src_icon.svgz ]; then
echo "Copying scalable/$src_category/small/$dir/$src_icon.svgz"
echo " to scalable/$dest_category/small/$dir/$dest_icon.svgz..."
svn cp scalable/$src_category/small/$dir/$src_icon.svgz scalable/$dest_category/small/$dir/$dest_icon.svgz
echo
fi
done
# Copy the rendered PNGs.
for dir in 8x8 16x16 22x22 32x32 48x48 64x64 128x128 256x256; do
if [ -f $dir/$src.png ]; then
echo "Copying $dir/$src.png to $dir/$dest.png..."
svn cp $dir/$src.png $dir/$dest.png
echo
fi
done
|
scandyna/multidiagtools
|
icons/oxygen/icon-copy.sh
|
Shell
|
lgpl-3.0
| 1,198 |
#
# /**------- <| --------------------------------------------------------**
# ** A Clan **
# **--- /.\ -----------------------------------------------------**
# ** <| [""M# change_clan_version_in_refs.sh **
# **- A | # -----------------------------------------------------**
# ** /.\ [""M# First version: 30/01/2009 **
# **- [""M# | # U"U#U -----------------------------------------------**
# | # | # \ .:/
# | # | #___| #
# ****** | "--' .-" *****************************************************
# * |"-"-"-"-"-#-#-## Clan : the Chunky Loop Analyser (experimental) *
# **** | # ## ###### ****************************************************
# * \ .::::'/ *
# * \ ::::'/ Copyright (C) 2008 Cedric Bastoul *
# * :8a| # # ## *
# * ::88a ### This is free software; you can redistribute it *
# * ::::888a 8a ##::. and/or modify it under the terms of the GNU *
# * ::::::::888a88a[]::: Lesser General Public License as published by *
# *::8:::::::::SUNDOGa8a::. the Free Software Foundation, either version 3 of *
# *::::::::8::::888:Y8888:: the License, or (at your option) *
# *::::':::88::::888::Y88a::::::::::::... any later version. *
# *::'::.. . ..... .. ... . *
# * This software is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General *
# * Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with software; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *
# * *
# * Clan, the Chunky Loop Analyser *
# * Written by Cedric Bastoul, [email protected] *
# * *
# *****************************************************************************/
if [ $# -ne 1 ]; then
echo "Usage: change_clan_version_in_refs.sh clan_version";
echo "clan_version: for instance, 0.5.0";
exit 1;
fi;
for i in `find . -name "*.scop"`; do
echo "Rename in $i";
cat "$i" | sed -e "s/\(.*generated by Clan \)\([0-9.]*\)\( .*].*\)/\1$1\3/g" > "$i.tmp";
mv "$i.tmp" "$i";
done;
|
ftynse/clan
|
tests/change_clan_version_in_refs.sh
|
Shell
|
lgpl-3.0
| 3,008 |
#!/bin/sh
set -e # Fail fast
# PEP8 is not ignored in ACME
pep8 --config=acme/.pep8 acme
pep8 \
setup.py \
letsencrypt \
letsencrypt-apache \
letsencrypt-nginx \
letsencrypt-compatibility-test \
letshelp-letsencrypt \
|| echo "PEP8 checking failed, but it's ignored in Travis"
# echo exits with 0
|
thanatos/lets-encrypt-preview
|
pep8.travis.sh
|
Shell
|
apache-2.0
| 316 |
#!/bin/bash
TARGET_ARCH=$1
TARGET_API=$2
CROSS_COMPILE=$3
GIT_INSTALL=${4:-master}
if [ -z "${TARGET_ARCH}" ]; then
echo STDERR "Missing TARGET_ARCH argument"
echo STDERR "e.g. x86 or arm"
exit 1
fi
if [ -z "${TARGET_API}" ]; then
echo STDERR "Missing TARGET_API argument"
echo STDERR "e.g. 21"
exit 1
fi
if [ -z "${CROSS_COMPILE}" ]; then
echo STDERR "Missing CROSS_COMPILE argument"
echo STDERR "e.g. i686-linux-android"
exit 1
fi
if [ -z "${GIT_INSTALL}" ] ; then
echo STDERR "Missing GIT_INSTALL argument"
echo STDERR "e.g. master or rc or tags/v1.4.0"
exit 1
fi
if [ -z "${OPENSSL_DIR}" ]; then
OPENSSL_DIR="openssl_${TARGET_ARCH}"
if [ -d "${OPENSSL_DIR}" ] ; then
echo "Found ${OPENSSL_DIR}"
elif [ -z "$5" ]; then
echo STDERR "Missing OPENSSL_DIR argument and environment variable"
echo STDERR "e.g. set OPENSSL_DIR=<path> for environment or openssl_${TARGET_ARCH}"
exit 1
else
OPENSSL_DIR=$5
fi
fi
if [ -z "${SODIUM_DIR}" ] ; then
SODIUM_DIR="libsodium_${TARGET_ARCH}"
if [ -d "${SODIUM_DIR}" ] ; then
echo "Found ${SODIUM_DIR}"
elif [ -z "$6" ]; then
echo STDERR "Missing SODIUM_DIR argument and environment variable"
echo STDERR "e.g. set SODIUM_DIR=<path> for environment or libsodium_${TARGET_ARCH}"
exit 1
else
SODIUM_DIR=$6
fi
fi
if [ -z "${LIBZMQ_DIR}" ] ; then
LIBZMQ_DIR="libzmq_${TARGET_ARCH}"
if [ -d "${LIBZMQ_DIR}" ] ; then
echo "Found ${LIBZMQ_DIR}"
elif [ -z "$7" ]; then
echo STDERR "Missing LIBZMQ_DIR argument and environment variable"
echo STDERR "e.g. set LIBZMQ_DIR=<path> for environment or libzmq_${TARGET_ARCH}"
exit 1
else
LIBZMQ_DIR=$7
fi
fi
if [ -z "${LIBINDY_DIR}" ] ; then
LIBINDY_DIR="libindy_${TARGET_ARCH}"
if [ -d "${LIBINDY_DIR}" ] ; then
echo "Found ${LIBINDY_DIR}"
elif [ -z "$8" ] ; then
echo STDERR "Missing LIBINDY_DIR argument and environment variable"
echo STDERR "e.g. set LIBINDY_DIR=<path> for environment or libindy_${TARGET_ARCH}"
exit 1
else
LIBINDY_DIR=$8
fi
fi
if [ ! -f "android-ndk-r16b-linux-x86_64.zip" ] ; then
echo "Downloading android-ndk-r16b-linux-x86_64.zip"
wget -q https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip
else
echo "Skipping download android-ndk-r16b-linux-x86_64.zip"
fi
_SDK_REPO="[email protected]:evernym/sdk.git"
if [ ! -d "sdk" ] ; then
echo "git cloning sdk"
git clone --branch ${GIT_INSTALL} ${_SDK_REPO}
else
echo "Skipping git clone of sdk"
_GIT_BRANCH=$(git --git-dir sdk/.git branch | head -n 1 | sed -e 's/^..//g')
echo "Current branch set to ${_GIT_BRANCH}"
GIT_INSTALL="${GIT_INSTALL//\//\/\/}"
echo "GIT_INSTALL set to ${GIT_INSTALL}"
_MATCH=$(echo "${_GIT_BRANCH}" | egrep "${GIT_INSTALL}")
if [ -z "${_MATCH}" ] ; then
echo STDERR "Branch is not set properly in sdk/.git"
exit 1
fi
fi
rm -f "sdk/vcx/libvcx/Cargo.lock"
docker build -t libvcx-android:latest . --build-arg target_arch=${TARGET_ARCH} --build-arg target_api=${TARGET_API} --build-arg cross_compile=${CROSS_COMPILE} --build-arg openssl_dir=${OPENSSL_DIR} --build-arg sodium_dir=${SODIUM_DIR} --build-arg libzmq_dir=${LIBZMQ_DIR} --build-arg libindy_dir=${LIBINDY_DIR} &&
docker run libvcx-android:latest && \
docker_id=$(docker ps -a | grep libvcx-android:latest | grep Exited | tail -n 1 | cut -d ' ' -f 1) && \
docker_image_id=$(docker image ls | grep libvcx-android | perl -pe 's/\s+/ /g' | cut -d ' ' -f 3) && \
docker cp ${docker_id}:/home/vcx_user/libvcx.so . && \
docker cp ${docker_id}:/home/vcx_user/libvcx.a . && \
docker rm ${docker_id} > /dev/null && \
docker rmi ${docker_image_id} > /dev/null
|
srottem/indy-sdk
|
vcx/libvcx/build_scripts/android/vcx/build.sh
|
Shell
|
apache-2.0
| 3,863 |
#!/bin/sh
if [ -z "$APP_PATH" ]; then
# TODO: set APP_PATH to the installed path of your application
APP_PATH='/usr/local/bin/tests_script_gen_highlevel_empty'
fi
if ! [ -f "$APP_PATH" ]; then
echo "ERROR: APP_PATH points to non-existent file" 1>&2
exit 1
fi
error_other_mp() {
echo "ERROR: another mountpoint already exists on spec:/tests/script/gen/highlevel/empty. Please umount first." 1>&2
exit 1
}
if kdb mount -13 | grep -Fxq 'spec:/tests/script/gen/highlevel/empty'; then
if ! kdb mount | grep -Fxq 'tests_script_gen_highlevel_empty.overlay.spec.eqd on spec:/tests/script/gen/highlevel/empty with name spec:/tests/script/gen/highlevel/empty'; then
error_other_mp
fi
MP=$(echo "spec:/tests/script/gen/highlevel/empty" | sed 's:\\:\\\\:g' | sed 's:/:\\/:g')
if [ -n "$(kdb get "system:/elektra/mountpoints/$MP/getplugins/#5#specload#specload#/config/file")" ]; then
error_other_mp
fi
if [ "$(kdb get "system:/elektra/mountpoints/$MP/getplugins/#5#specload#specload#/config/app")" != "$APP_PATH" ]; then
error_other_mp
fi
if [ -n "$(kdb ls "system:/elektra/mountpoints/$MP/getplugins/#5#specload#specload#/config/app/args")" ]; then
error_other_mp
fi
else
sudo kdb mount -R noresolver "tests_script_gen_highlevel_empty.overlay.spec.eqd" "spec:/tests/script/gen/highlevel/empty" specload "app=$APP_PATH"
fi
if kdb mount -13 | grep -Fxq '/tests/script/gen/highlevel/empty'; then
if ! kdb mount | grep -Fxq 'tests_gen_elektra_empty.ini on /tests/script/gen/highlevel/empty with name /tests/script/gen/highlevel/empty'; then
echo "ERROR: another mountpoint already exists on /tests/script/gen/highlevel/empty. Please umount first." 1>&2
exit 1
fi
else
sudo kdb spec-mount '/tests/script/gen/highlevel/empty'
fi
|
mpranj/libelektra
|
tests/shell/gen/highlevel/empty.expected.mount.sh
|
Shell
|
bsd-3-clause
| 1,749 |
#!/bin/bash
FN="MeSH.Laf.eg.db_1.13.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/MeSH.Laf.eg.db_1.13.0.tar.gz"
"https://bioarchive.galaxyproject.org/MeSH.Laf.eg.db_1.13.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.laf.eg.db/bioconductor-mesh.laf.eg.db_1.13.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.laf.eg.db/bioconductor-mesh.laf.eg.db_1.13.0_src_all.tar.gz"
)
MD5="522864613616481501ebb43a9d3d8913"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bebatut/bioconda-recipes
|
recipes/bioconductor-mesh.laf.eg.db/post-link.sh
|
Shell
|
mit
| 1,445 |
#!/bin/bash
FN="hapmap370k_1.0.1.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/hapmap370k_1.0.1.tar.gz"
"https://bioarchive.galaxyproject.org/hapmap370k_1.0.1.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hapmap370k/bioconductor-hapmap370k_1.0.1_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-hapmap370k/bioconductor-hapmap370k_1.0.1_src_all.tar.gz"
)
MD5="569556341a45da799372aaf1012be72e"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bebatut/bioconda-recipes
|
recipes/bioconductor-hapmap370k/post-link.sh
|
Shell
|
mit
| 1,412 |
#!/bin/sh
targetdir="$1"
notice() {
echo $* 1>&2
}
list_files() {
find . -type f \
! -name "Root" \
! -name "Repository" \
! -name "Entries.Old" \
! -name "Entries.Extra.Old" \
! -name "Template" \
! -name "Entries" \
! -name "Entries.Extra" \
| sed 's/\.\/\(.*\)/"\1"/'
}
list=$(cd "$targetdir" && list_files)
eval "files=($list)"
num_files=${#files[*]}
for ((i=0; $i<$num_files; i=$i+1)); do
f=${files[$i]}
notice "$f"
done
|
ifreecarve/nightingale-hacking
|
tools/scripts/list-files.sh
|
Shell
|
gpl-2.0
| 470 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.