code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
#
# Copyright (c) 2016 Caoimhe Chaos <[email protected]>,
# Ancient Solutions. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY ANCIENT SOLUTIONS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
set -e
die () {
echo "$@" 1>&2
exit 1
}
test -f /secrets/tls.crt || die "Error: No client certificate found at /secrets/tls.crt"
test -f /secrets/tls.key || die "Error: No client key found at /secrets/tls.key"
tmppath="$(mktemp -t quasselCert.pem.XXXXXX)"
cat /secrets/tls.crt /secrets/tls.key > "$tmppath"
rm -f /var/lib/quassel/quasselCert.pem
install -o quasselcore -g quassel -m 0440 "$tmppath" \
/var/lib/quassel/quasselCert.pem
rm -f "$tmppath"
exec quasselcore -c /var/lib/quassel --oidentd --loglevel=Info \
--port=4242 --logfile=/var/log/quassel/core.log
|
tonnerre/dockerfiles
|
apps/quassel/run-quasselcore.sh
|
Shell
|
bsd-3-clause
| 2,065 |
#!/bin/bash
# Fast fail the script on failures.
set -e
# Verify that the libraries are error free.
dartanalyzer --fatal-warnings \
lib/md_proc.dart \
test/library_test.dart \
tool/build.dart
# Linter
pub run linter:linter `find lib test tool -type f \( -iname "*.dart" ! -iname "*.g.dart" \)`
# TODO Use .analysis_config when Dart 1.12 is released
# Run the tests.
pub run test:test -p "vm,phantomjs"
# If the COVERALLS_TOKEN token is set on travis
# Install dart_coveralls
# Rerun tests with coverage and send to coveralls
if [ "$COVERALLS_TOKEN" ]; then
pub run dart_coveralls:dart_coveralls report \
--token $COVERALLS_TOKEN \
--retry 2 \
--exclude-test-files \
test/library_test.dart
fi
|
kevmoo/md_proc
|
tool/travis.sh
|
Shell
|
bsd-3-clause
| 721 |
#!/bin/sh
#
# Copyright (c) 2014 Final Level
# Author: Denys Misko <[email protected]>
# Distributed under BSD (3-Clause) License (See
# accompanying file LICENSE)
#
# chkconfig: 2345 61 39
# description: Metis Storage is a CDN and high available http server
# processname: metis_storage
# pidfile: /var/run/metis_storage.pid
# Source function library.
. /etc/rc.d/init.d/functions
. /etc/sysconfig/metis_storage
NAME=metis_storage
DESC="metis_storage"
PATH=/sbin:/bin:/usr/sbin:/usr/bin
# config
#BIN_PATH="/usr/bin/"
BIN_PATH="/home/draal/projects/fl/metis/"
WRAPPER_NAME="metis_storage_wrapper.sh"
WRAPPER="$BIN_PATH$WRAPPER_NAME"
PIDFILE="/var/run/metis_storage"
function check_pid_names {
local pid="$1"
local params="${2/-/\\-}"
local name="$3"
local PN=$( ps -o args -p $pid 2>/dev/null | grep "$params" | grep "$name" )
if [ -z "$PN" ]; then
return 1
else
return 0
fi
}
function start_server() {
local num="$1"
local params="$2"
local pidfile="$PIDFILE$num.pid"
local wrapper="$WRAPPER _${num}_ $params"
echo "... $NAME ($num - $params)"
if [ -f $pidfile ]; then
local dpid=$( tail -1 "$pidfile" )
local wpid=$( head -1 "$pidfile" )
if check_pid_names $wpid "_${num}_" $WRAPPER_NAME; then
echo " ... wrapper $wrapper is already running"
return 0
fi
if check_pid_names $dpid "$params" $NAME; then
echo " ... $NAME $dpid from $wrapper is already running"
return 0
fi
$wrapper &
else
$wrapper &
fi
return 1
}
function stop_server() {
local num="$1"
local params="$2"
local pidfile="$PIDFILE$num.pid"
if [ -f $pidfile ]; then
local dpid=$( tail -1 "$pidfile" )
local wpid=$( head -1 "$pidfile" )
if check_pid_names $wpid "_${num}_" $WRAPPER_NAME; then
kill -9 "$wpid"
else
echo " ... wrapper $WRAPPER_NAME _${num}_ hasn't run"
return 0
fi
if check_pid_names $dpid "$params" $NAME; then
kill -15 "$dpid" && sleep 5 && kill -9 "$dpid" 2>/dev/null
else
echo " ... $NAME $dpid from $WRAPPER_NAME _${num}_ hasn't run"
return 0
fi
echo -n > "$pidfile"
else
echo "... $NAME $num hasn't run"
fi
}
start() {
echo "."
for (( i = 1; i <= 36; i++ ))
do
local param=SERVER$i
if [ ! -z "${!param}" ]; then
echo "Starting $NAME $i (${!param})"
start_server $i "${!param}"
fi
done
}
stop() {
for (( i = 1; i <= 36; i++ ))
do
local param=SERVER$i
if [ ! -z "${!param}" ]; then
echo "Stopping $NAME $i (${!param})"
stop_server $i "${!param}"
fi
done
}
case "$1" in
start)
echo -n "Starting $DESC: $NAME"
start
echo "." ; sleep 2
;;
stop)
echo -n "Stopping $DESC: $NAME "
stop
exit $?
;;
restart|force-reload)
echo -n "Restarting $DESC: $NAME"
stop
sleep 1
start
exit $?
;;
status)
echo "Status $DESC: $NAME"
true
exit $?
;;
*)
N=/etc/init.d/${NAME}.sh
echo "Usage: $N {start|stop|restart|force-reload|status}" >&2
exit 1
;;
esac
exit 0
|
FinalLevel/metis
|
etc/init.d/metis_storage.sh
|
Shell
|
bsd-3-clause
| 2,932 |
#!/bin/bash
#------------------------------------------------------------------------------
#
#
#
#
#
#------------------------------------------------------------------------------
PACKAGE_BEEEON="com.rehivetech.beeeon.debug"
# force stop
adb shell am force-stop $PACKAGE_BEEEON
adb shell ps | awk '/com\.android\.commands\.monkey/ { system("adb shell kill " $2) }'
|
BeeeOn/android
|
tests/monkey/kill-test.sh
|
Shell
|
bsd-3-clause
| 376 |
#!/bin/bash
#Redpitaya INSTALL SDK install script
NAME=SDK
GREET_MSG="\nSTARTING REDPITAYA SDK INSTALLATION..."
LIB_MSG="\nINSTALLING REDPITAYA LIBRARIES...\n"
GCC_LINARO_MSG="INSTALLING GCC LINARO...\n"
RP_INCLUDE=./include
#ECLIPSE_DL=.
GCC_LINARO_DL=gcc-linaro-arm-linux-gnueabi-2012.03-20120326_linux
GCC_LINARO_DIR=gcc_linaro/bin
API_BLINK_EXAMPLE_DIR=pitaya_example/
echo -e $GREET_MSG
echo -e "DOWNLOADING CURL...\n"
sudo apt-get install curl
echo -e "DOWNLOADING PLINK...\n"
sudo apt-get install putty-tools
echo -e "\nINSTALLING DEPENDENCIES...\n"
#sudo apt-get install default-jre
sudo chmod 777 /etc/apt/sources.list.d/ia32-libs-raring.list
sudo echo "deb http://old-releases.ubuntu.com/ubuntu/ raring main restricted universe multiverse" > /etc/apt/sources.list.d/ia32-libs-raring.list
sudo apt-get update
sudo apt-get install ia32-libs
sudo apt-get install lib32z1 lib32ncurses5 lib32bz2-1.0 lib32stdc++6
#Install sshpass application
sudo apt-get install sshpass
#echo -e "\nDOWNLOADING ECLIPSE..."
#Determine machine type
#MACHINE_TYPE=`uname -m`
#if [ ${MACHINE_TYPE} == 'x86_64' ]; then
#echo -e "DETECTED 64 BIT OS ARCHITECTURE. DOWNLOADING APPROPRIATE ECLIPSE VERSION...\n"
#ECLIPSE_DL=eclipse-cpp-luna-SR1a-linux-gtk-x86_64.tar.gz
#else
#echo -e "DETECTED 32 BIT OS ARCHITECTURE. DOWNLOADING APPROPRIATE ECLIPSE VERSION...\n"
#ECLIPSE_DL=eclipse-cpp-luna-SR1a-linux-gtk.tar.gz
#fi
#Download eclipse
#curl --remote-name http://mirrors.linux-bg.org/eclipse/technology/epp/downloads/release/luna/SR1a/$ECLIPSE_DL
#tar xvf $ECLIPSE_DL eclipse
#rm $ECLIPSE_DL
#echo -e $LIB_MSG
#sudo cp $RP_INCLUDE/* /usr/lib
#sudo cp $RP_INCLUDE/* /usr/include
echo -e $GCC_LINARO_MSG
curl -L --remote-name https://launchpad.net/linaro-toolchain-binaries/trunk/2012.03/+download/$GCC_LINARO_DL.tar.bz2
tar xvf $GCC_LINARO_DL.tar.bz2
sudo mv $GCC_LINARO_DL gcc_linaro
rm -rf $GCC_LINARO_DL.tar.bz2
sudo chmod 777 /etc/bash.bashrc
echo PATH=$PATH:$PWD/$GCC_LINARO_DIR >> /etc/bash.bashrc
bash --login
#If everything went well, create a run.sh script for starting eclipse with target workspace
#touch run_eclipse.sh
#chmod +x run_eclipse.sh
#echo '#!/bin/bash' > run_eclipse.sh
#echo 'echo -e "STARTING ECLIPSE...\n"' >> run_eclipse.sh
#echo './eclipse/eclipse -data pitaya_example' >> run_eclipse.sh
#Create remote scp and execture script
#touch run.sh
chmod +x run.sh
|
ydre/kit-soft
|
RedPitaya/install.sh
|
Shell
|
bsd-3-clause
| 2,391 |
#!/bin/bash
INPUT=$1
OUTPUT_NAME=$2
OUTPUT='/tmp/movie/'
mkdir -p $OUTPUT
rm $OUTPUT/*
ffmpeg -i $INPUT -an -f image2 -r 4 -s 64x27 $OUTPUT/image_%05d.jpg
TexturePacker --algorithm MaxRects --padding 0 --disable-rotation --format libgdx $OUTPUT --data $OUTPUT_NAME.txt --sheet $OUTPUT_NAME.png
|
0x90sled/droidtowers
|
convert-movie.sh
|
Shell
|
mit
| 297 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "YwenKit/YwenKit/YwenKit/Resources/gesture_node_normal.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/gesture_node_selected.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/toast_err.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/toast_success.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "YwenKit/YwenKit/YwenKit/Resources/gesture_node_normal.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/gesture_node_selected.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/toast_err.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/toast_success.png"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
install_resource "YwenKit/YwenKit/YwenKit/Resources/[email protected]"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
yaliyingwy/YwenCrypto
|
ios/Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
mit
| 6,209 |
#!/bin/bash
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
build_root=$(cd "$(dirname "$0")/.." && pwd)
cd $build_root/node
# Set up links in the npm cache to ensure we're exercising all the code in
# the repo, rather than downloading released versions of our packages from
# npm.
build/dev-setup.sh
[ $? -eq 0 ] || exit $?
# Lint all JavaScript code and run unit + integration tests
build/build.sh --min --integration-tests --e2e-tests
[ $? -eq 0 ] || exit $?
# The 'npm link' commands in this script create symlinks to tracked repo
# files from ignored locations (under ./node_modules). This means a call to
# 'git clean -xdf' will delete tracked files from the repo's working
# directory. To avoid any complications, we'll unlink everything before
# exiting.
build/dev-teardown.sh
[ $? -eq 0 ] || exit $?
|
kevinledinh/azure-iot-sdks
|
jenkins/linux_node.sh
|
Shell
|
mit
| 914 |
#!/bin/bash
set -e
set -x
JENKINS_VERSION=$(curl -sq https://api.github.com/repos/jenkinsci/jenkins/tags | grep '"name":' | egrep -o '[0-9]+(\.[0-9]+)+' | sort --version-sort | uniq | tail -1)
echo "$JENKINS_VERSION"
JENKINS_SHA=$(curl "http://repo.jenkins-ci.org/simple/releases/org/jenkins-ci/main/jenkins-war/${JENKINS_VERSION}/jenkins-war-${JENKINS_VERSION}.war.sha1")
echo "$JENKINS_SHA"
docker build --build-arg "JENKINS_VERSION=$JENKINS_VERSION" \
--build-arg "JENKINS_SHA=$JENKINS_SHA" \
--no-cache --pull \
--tag "jenkinsci/jenkins:$JENKINS_VERSION" .
docker tag -f "jenkinsci/jenkins:$JENKINS_VERSION" jenkinsci/jenkins:latest
docker push "jenkinsci/jenkins:$JENKINS_VERSION"
docker push jenkinsci/jenkins:latest
|
WeDevBrasil/jenkins-php7
|
weekly.sh
|
Shell
|
mit
| 770 |
#!/usr/bin/env bash
#
# Generate release notes for Meza
#
# SET VARIABLES FOR COLORIZING BASH OUTPUT
#
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
#
# SETUP KNOWN VARS PRIOR TO USER INPUT
#
PREVIOUS_RELEASES=$(git tag -l | sed '/^v0/ d' | sed '/^v1/ d')
LATEST="${PREVIOUS_RELEASES##*$'\n'}"
GIT_HASH=$(git rev-parse HEAD | cut -c1-8)
#
# WELCOME MESSAGE
#
echo
echo "* * * * * * * * * * * * * * * * * * * * * * * *"
echo "* *"
echo "* Meza Release Generator *"
echo "* *"
echo "* * * * * * * * * * * * * * * * * * * * * * * *"
# Set current branch as base branch
BASE_BRANCH=$(git branch | grep \* | cut -d ' ' -f2)
if [ "${BASE_BRANCH}" != "master" ]; then
echo
echo -e "${RED}You are not on the 'master' branch, and you probably want to be.${NC}"
printf "^"
for ((i=1;i<=64;i++)); do
sleep 0.05
printf "\b ^"
done
printf "\b"
echo
echo
echo -e "If you want to be on 'master', press ${GREEN}ctrl+c${NC} to cancel this script then"
echo -e "do ${GREEN}git checkout master && git pull origin master --ff-only${NC} to switch."
printf "^"
for ((i=1;i<=70;i++)); do
sleep 0.05
printf "\b ^"
done
printf "\b"
echo
echo
fi
echo "Checking for any changes on GitHub..."
git fetch
#
# USER INPUT: CHOOSE OLD VERSION NUMBER TO BASE FROM
#
echo -e "${GREEN}"
echo "${PREVIOUS_RELEASES}"
echo -e "${NC}"
while [ -z "$OLD_VERSION" ]; do
read -p "Enter previous release number (options in green above): " -i "$LATEST" -e OLD_VERSION
done;
#
# SETUP LIST OF COMMITS FOR DISPLAY NOW AND INCLUSION IN RELEASE-NOTES.MD
#
COMMITS=$(git log --oneline --no-merges "${OLD_VERSION}..HEAD" | while read line; do echo "* $line"; done)
echo
echo -e "From ${GREEN}${OLD_VERSION}${NC} to ${GREEN}HEAD${NC}, these are the non-merge commits:"
echo -e "${GREEN}"
echo "${COMMITS}"
echo -e "${NC}"
#
# USER INPUT: CHOOSE NEW VERSION NUMBER
#
while [ -z "$NEW_VERSION" ]; do
read -p "Enter new version number in form X.Y.Z: " NEW_VERSION
done;
#
# USER INPUT: OVERVIEW TEXT
#
read -p "Based upon commits above, choose optional 1-line overview: " OVERVIEW
#
# SETUP VARS BASED UPON USER INPUT
#
MAJOR_VERSION=$(echo "$NEW_VERSION" | cut -f1 -d".")
VERSION_BRANCH="${MAJOR_VERSION}.x"
CONTRIBUTORS=$(git shortlog -sn "${OLD_VERSION}..HEAD" | while read line; do echo "* $line"; done)
#
# GENERATE RELEASE NOTES INTO TEMP FILE
#
RELEASE_NOTES_FILE=./.release-notes.tmp
cat > ${RELEASE_NOTES_FILE} <<- EOM
${OVERVIEW}
### Commits since $OLD_VERSION
${COMMITS}
### Contributors
${CONTRIBUTORS}
### How to upgrade
\`\`\`bash
sudo meza update ${NEW_VERSION}
sudo meza deploy <insert-your-environment-name>
\`\`\`
EOM
#
# OUTPUT RELEASE NOTES IN GREEN ON COMMAND LINE
#
# I think preferable not to output this here
# echo -e "${GREEN}"
# cat "${RELEASE_NOTES_FILE}"
# echo -e "${NC}"
#
# TO-DO: Automate edit of release notes
#
sed -i -e '/=============/r.release-notes.tmp' ./RELEASE-NOTES.md
sed -i "s/=============/\0\n\n## Meza $NEW_VERSION/" ./RELEASE-NOTES.md
#
# COMMIT CHANGE
#
git add RELEASE-NOTES.md
RELEASE_BRANCH="${NEW_VERSION}-release"
git checkout -b "${RELEASE_BRANCH}"
git commit -m "${NEW_VERSION} release"
# git push origin "$BASE_BRANCH"
#
# OUTPUT DIRECTIONS FOR COMPLETING RELEASE
#
echo
echo "* * * * * * * * * * * * * * * * * * * * * * * *"
echo "* *"
echo "* Release process started *"
echo "* *"
echo "* * * * * * * * * * * * * * * * * * * * * * * *"
echo
echo "Release notes generated, committed, and pushed. "
echo
echo -e "1. Check what you committed with ${RED}git diff HEAD~1..HEAD${NC}"
echo -e " If you want to alter anything, make your changes, then do:"
echo -e " ${RED}git add ."
echo -e " git commit --amend --no-edit${NC}"
echo -e "2. Push the change: ${GREEN}git push origin ${RELEASE_BRANCH}${NC}"
echo -e "3. Open a pull request at ${GREEN}https://github.com/enterprisemediawiki/meza/compare/${BASE_BRANCH}...${RELEASE_BRANCH}?expand=1${NC}"
echo "4. After the PR is merged create a new release of Meza with these details:"
echo " * Tag: $NEW_VERSION"
echo " * Title: Meza $NEW_VERSION"
echo -e " * Description: the ${GREEN}Meza $NEW_VERSION${NC} section from RELEASE-NOTES.md"
echo -e " (create a release here: ${GREEN}https://github.com/enterprisemediawiki/meza/releases/new${NC})"
echo -e "5. Move the ${GREEN}${VERSION_BRANCH}${NC} branch to the same point as the ${GREEN}${BASE_BRANCH}${NC} branch:"
echo -e " ${RED}git fetch"
echo -e " git checkout ${VERSION_BRANCH}"
echo " git merge origin/${BASE_BRANCH} --ff-only"
echo -e " git push origin ${VERSION_BRANCH}${NC}"
echo -e "6. Update ${GREEN}https://www.mediawiki.org/wiki/Meza/Version_history${NC}"
echo -e "7. Announce on ${GREEN}https://riot.im/app/#/room/#mwstake-MEZA:matrix.org${NC}"
echo -e "8. Update pages on ${GREEN}https://mediawiki.org/wiki/Meza${NC}"
echo
rm ${RELEASE_NOTES_FILE}
|
enterprisemediawiki/meza
|
src/scripts/do-release.sh
|
Shell
|
mit
| 5,100 |
#!/bin/bash
source ./envvars.sh
docker-compose up --build -d
|
cforlando/PetAdoption-API
|
start_services.sh
|
Shell
|
mit
| 61 |
#!/bin/sh
FileList="\
../source/src/highslide-full.packed.js\
../source/src/highslide.config.js\
../source/src/simi.js\
../source/src/spin.js\
../source/src/load-image.js\
../source/src/utils.js\
../source/src/js.cookie.js\
../source/src/thumbsContainer.js\
../source/src/vkApiWrapper.js\
../source/src/vkAppUtils.js\
../source/src/albumManager.js"
echo Generating production folder
echo &> log.txt
#clean
rm -Rf ../prod/* >> log.txt 2>&1
#resources
cp -R ../source/graphics ../prod/graphics >> log.txt 2>&1
cp -R ../source/images ../prod/images >> log.txt 2>&1
cp -R ../source/src/*.css ../prod/ >> log.txt 2>&1
cp -R ../source/src/*.png ../prod/ >> log.txt 2>&1
cp -R ../source/src/*.jpg ../prod/ >> log.txt 2>&1
cp -R ../source/src/*.html ../prod/ >> log.txt 2>&1
cp -R ../source/src/HackTimer*.js ../prod/ >> log.txt 2>&1
#replace links separate scripts to a single minified script
JSFILES="<!--JSFILES-->(.*)<!--EOF-JSFILES-->"
MINJSFILE='<script src=\"albumManager_min.js\" type=\"text/javascript\" charset=\"utf-8\"></script>'
awk -i inplace -v RS='' "{gsub(/$JSFILES/,\"$MINJSFILE\")}; { print }" ../prod/albumManager3.html >> log.txt 2>&1
#produce minified script
uglifyjs $FileList --compress --mangle --verbose --output ../prod/albumManager_min.js >> log.txt 2>&1
if [ $? -ne 0 ]; then
echo "Error, see log.txt for details"
exit 1
fi
echo Done!
exit 0
|
Leonid-SPB/MovePhotosVk
|
scripts/genprod.sh
|
Shell
|
mit
| 1,385 |
vim /etc/apt/sources.list
Ubuntu Sources List Generator
http://repogen.simplylinux.ch/
apt-get update
apt-get upgrade
apt-get dist-upgrade
apt-get autoremove
apt-get clean
dpkg-reconfigure tzdata
passwd
adduser cs
usermod -a -G sudo cs
visudo
# User privilege specification
root ALL=(ALL:ALL) ALL
cs ALL=(ALL:ALL) ALL
nano /etc/ssh/sshd_config
Port 53434
service ssh restart
https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-14-04
https://www.digitalocean.com/community/tutorials/initial-server-setup-with-debian-8
sudo apt-get install curl
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install nodejs
sudo npm install -g gulp bower ionic cordova phonegap sails@beta
sudo apt-get install nginx git
sudo nginx -s reload
sudo service nginx restart
https://www.digitalocean.com/community/tutorials/how-to-set-up-nginx-server-blocks-virtual-hosts-on-ubuntu-14-04-lts
https://help.ubuntu.com/community/Nginx
|
devotg/dev-deb
|
Ubuntu.sh
|
Shell
|
gpl-2.0
| 968 |
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#--------------------------------------------------------------------------------
#!/bin/sh
cp ../../../blk_mem_gen_v7_3.mif .
rm -rf simv* csrc DVEfiles AN.DB
echo "Compiling Core VHDL UNISIM/Behavioral model"
vhdlan ../../implement/results/routed.vhd
echo "Compiling Test Bench Files"
vhdlan ../bmg_tb_pkg.vhd
vhdlan ../addr_gen.vhd
vhdlan ../bmg_stim_gen.vhd
vhdlan ../blk_mem_gen_v7_3_synth.vhd
vhdlan ../blk_mem_gen_v7_3_tb.vhd
echo "Elaborating Design"
vcs +neg_tchk +vcs+lic+wait -debug blk_mem_gen_v7_3_tb
echo "Simulating Design"
./simv -ucli -i ucli_commands.key
dve -session vcs_session.tcl
|
dimitdim/pineapple
|
strawberry/fpga/blk_mem_gen_v7_3/simulation/timing/simulate_vcs.sh
|
Shell
|
gpl-2.0
| 2,798 |
#! /bin/sh
#
# This script prints all configure-sub.in in src/backends
# directory to standard output. This is meant to be used
# from m4_esyscmd inside configure.ac.
#
# The motivation for this non-standard approach was that
# it allows adding new backends without touching core
# files, which should have simplified the development of
# out-of-tree backends. Now git pretty much removes
# the need for such tricks, but it's still around.
tmpfile="configure.in.$$"
rm -f "$tmpfile"
for sub in src/backends/*/configure-sub.in
do
echo "# vvvvvvvvvvvvvv $sub vvvvvvvvvvvvvv" >>"$tmpfile"
cat "$sub" >>"$tmpfile"
echo "# ^^^^^^^^^^^^^^ $sub ^^^^^^^^^^^^^^" >>"$tmpfile"
echo >>"$tmpfile"
done
cat "$tmpfile"
rm -f "$tmpfile"
|
shadeslayer/SyncEvolution
|
build/gen-backends.sh
|
Shell
|
gpl-2.0
| 732 |
#!/bin/sh
. /lib/functions.sh
. ../netifd-proto.sh
init_proto "$@"
proto_dhcp_init_config() {
proto_config_add_string 'ipaddr:ipaddr'
proto_config_add_string 'netmask:ipaddr'
proto_config_add_string 'hostname:hostname'
proto_config_add_string clientid
proto_config_add_string vendorid
proto_config_add_boolean 'broadcast:ipaddr'
proto_config_add_string 'reqopts:list(string)'
proto_config_add_string iface6rd
proto_config_add_string sendopts
proto_config_add_boolean delegate
}
proto_dhcp_setup() {
local config="$1"
local iface="$2"
local ipaddr hostname clientid vendorid broadcast reqopts iface6rd sendopts delegate
json_get_vars ipaddr hostname clientid vendorid broadcast reqopts iface6rd sendopts delegate
local opt dhcpopts
for opt in $reqopts; do
append dhcpopts "-O $opt"
done
for opt in $sendopts; do
append dhcpopts "-x $opt"
done
[ "$broadcast" = 1 ] && broadcast="-B" || broadcast=
[ -n "$clientid" ] && clientid="-x 0x3d:${clientid//:/}" || clientid="-C"
[ -n "$iface6rd" ] && proto_export "IFACE6RD=$iface6rd"
[ "$delegate" = "0" ] && proto_export "IFACE6RD_DELEGATE=0"
proto_export "INTERFACE=$config"
proto_run_command "$config" udhcpc \
-p /var/run/udhcpc-$iface.pid \
-s /lib/netifd/dhcp.script \
-f -t 0 -i "$iface" \
${ipaddr:+-r $ipaddr} \
${hostname:+-H $hostname} \
${vendorid:+-V $vendorid} \
$clientid $broadcast $dhcpopts
}
proto_dhcp_teardown() {
local interface="$1"
proto_kill_command "$interface"
}
add_protocol dhcp
|
sprinkler/rainmachine-openwrt-os
|
package/network/config/netifd/files/lib/netifd/proto/dhcp.sh
|
Shell
|
gpl-2.0
| 1,504 |
# git-mergetool--lib is a shell library for common merge tool functions
: ${MERGE_TOOLS_DIR=$(git --exec-path)/mergetools}
IFS='
'
mode_ok () {
if diff_mode
then
can_diff
elif merge_mode
then
can_merge
else
false
fi
}
is_available () {
merge_tool_path=$(translate_merge_tool_path "$1") &&
type "$merge_tool_path" >/dev/null 2>&1
}
list_config_tools () {
section=$1
line_prefix=${2:-}
git config --get-regexp $section'\..*\.cmd' |
while read -r key value
do
toolname=${key#$section.}
toolname=${toolname%.cmd}
printf "%s%s\n" "$line_prefix" "$toolname"
done
}
show_tool_names () {
condition=${1:-true} per_line_prefix=${2:-} preamble=${3:-}
not_found_msg=${4:-}
extra_content=${5:-}
shown_any=
( cd "$MERGE_TOOLS_DIR" && ls ) | {
while read scriptname
do
setup_tool "$scriptname" 2>/dev/null
# We need an actual line feed here
variants="$variants
$(list_tool_variants)"
done
variants="$(echo "$variants" | sort -u)"
for toolname in $variants
do
if setup_tool "$toolname" 2>/dev/null &&
(eval "$condition" "$toolname")
then
if test -n "$preamble"
then
printf "%s\n" "$preamble"
preamble=
fi
shown_any=yes
printf "%s%s\n" "$per_line_prefix" "$toolname"
fi
done
if test -n "$extra_content"
then
if test -n "$preamble"
then
# Note: no '\n' here since we don't want a
# blank line if there is no initial content.
printf "%s" "$preamble"
preamble=
fi
shown_any=yes
printf "\n%s\n" "$extra_content"
fi
if test -n "$preamble" && test -n "$not_found_msg"
then
printf "%s\n" "$not_found_msg"
fi
test -n "$shown_any"
}
}
diff_mode () {
test "$TOOL_MODE" = diff
}
merge_mode () {
test "$TOOL_MODE" = merge
}
gui_mode () {
test "$GIT_MERGETOOL_GUI" = true
}
translate_merge_tool_path () {
echo "$1"
}
check_unchanged () {
if test "$MERGED" -nt "$BACKUP"
then
return 0
else
while true
do
echo "$MERGED seems unchanged."
printf "Was the merge successful [y/n]? "
read answer || return 1
case "$answer" in
y*|Y*) return 0 ;;
n*|N*) return 1 ;;
esac
done
fi
}
valid_tool () {
setup_tool "$1" && return 0
cmd=$(get_merge_tool_cmd "$1")
test -n "$cmd"
}
setup_user_tool () {
merge_tool_cmd=$(get_merge_tool_cmd "$tool")
test -n "$merge_tool_cmd" || return 1
diff_cmd () {
( eval $merge_tool_cmd )
}
merge_cmd () {
( eval $merge_tool_cmd )
}
list_tool_variants () {
echo "$tool"
}
}
setup_tool () {
tool="$1"
# Fallback definitions, to be overridden by tools.
can_merge () {
return 0
}
can_diff () {
return 0
}
diff_cmd () {
return 1
}
merge_cmd () {
return 1
}
translate_merge_tool_path () {
echo "$1"
}
list_tool_variants () {
echo "$tool"
}
# Most tools' exit codes cannot be trusted, so By default we ignore
# their exit code and check the merged file's modification time in
# check_unchanged() to determine whether or not the merge was
# successful. The return value from run_merge_cmd, by default, is
# determined by check_unchanged().
#
# When a tool's exit code can be trusted then the return value from
# run_merge_cmd is simply the tool's exit code, and check_unchanged()
# is not called.
#
# The return value of exit_code_trustable() tells us whether or not we
# can trust the tool's exit code.
#
# User-defined and built-in tools default to false.
# Built-in tools advertise that their exit code is trustable by
# redefining exit_code_trustable() to true.
exit_code_trustable () {
false
}
if test -f "$MERGE_TOOLS_DIR/$tool"
then
. "$MERGE_TOOLS_DIR/$tool"
elif test -f "$MERGE_TOOLS_DIR/${tool%[0-9]}"
then
. "$MERGE_TOOLS_DIR/${tool%[0-9]}"
else
setup_user_tool
return $?
fi
# Now let the user override the default command for the tool. If
# they have not done so then this will return 1 which we ignore.
setup_user_tool
if ! list_tool_variants | grep -q "^$tool$"
then
return 1
fi
if merge_mode && ! can_merge
then
echo "error: '$tool' can not be used to resolve merges" >&2
return 1
elif diff_mode && ! can_diff
then
echo "error: '$tool' can only be used to resolve merges" >&2
return 1
fi
return 0
}
get_merge_tool_cmd () {
merge_tool="$1"
if diff_mode
then
git config "difftool.$merge_tool.cmd" ||
git config "mergetool.$merge_tool.cmd"
else
git config "mergetool.$merge_tool.cmd"
fi
}
trust_exit_code () {
if git config --bool "mergetool.$1.trustExitCode"
then
:; # OK
elif exit_code_trustable
then
echo true
else
echo false
fi
}
# Entry point for running tools
run_merge_tool () {
# If GIT_PREFIX is empty then we cannot use it in tools
# that expect to be able to chdir() to its value.
GIT_PREFIX=${GIT_PREFIX:-.}
export GIT_PREFIX
merge_tool_path=$(get_merge_tool_path "$1") || exit
base_present="$2"
# Bring tool-specific functions into scope
setup_tool "$1" || return 1
if merge_mode
then
run_merge_cmd "$1"
else
run_diff_cmd "$1"
fi
}
# Run a either a configured or built-in diff tool
run_diff_cmd () {
diff_cmd "$1"
}
# Run a either a configured or built-in merge tool
run_merge_cmd () {
mergetool_trust_exit_code=$(trust_exit_code "$1")
if test "$mergetool_trust_exit_code" = "true"
then
merge_cmd "$1"
else
touch "$BACKUP"
merge_cmd "$1"
check_unchanged
fi
}
list_merge_tool_candidates () {
if merge_mode
then
tools="tortoisemerge"
else
tools="kompare"
fi
if test -n "$DISPLAY"
then
if test -n "$GNOME_DESKTOP_SESSION_ID"
then
tools="meld opendiff kdiff3 tkdiff xxdiff $tools"
else
tools="opendiff kdiff3 tkdiff xxdiff meld $tools"
fi
tools="$tools gvimdiff diffuse diffmerge ecmerge"
tools="$tools p4merge araxis bc codecompare"
tools="$tools smerge"
fi
case "${VISUAL:-$EDITOR}" in
*nvim*)
tools="$tools nvimdiff vimdiff emerge"
;;
*vim*)
tools="$tools vimdiff nvimdiff emerge"
;;
*)
tools="$tools emerge vimdiff nvimdiff"
;;
esac
}
show_tool_help () {
tool_opt="'git ${TOOL_MODE}tool --tool=<tool>'"
tab=' '
LF='
'
any_shown=no
cmd_name=${TOOL_MODE}tool
config_tools=$({
diff_mode && list_config_tools difftool "$tab$tab"
list_config_tools mergetool "$tab$tab"
} | sort)
extra_content=
if test -n "$config_tools"
then
extra_content="${tab}user-defined:${LF}$config_tools"
fi
show_tool_names 'mode_ok && is_available' "$tab$tab" \
"$tool_opt may be set to one of the following:" \
"No suitable tool for 'git $cmd_name --tool=<tool>' found." \
"$extra_content" &&
any_shown=yes
show_tool_names 'mode_ok && ! is_available' "$tab$tab" \
"${LF}The following tools are valid, but not currently available:" &&
any_shown=yes
if test "$any_shown" = yes
then
echo
echo "Some of the tools listed above only work in a windowed"
echo "environment. If run in a terminal-only session, they will fail."
fi
exit 0
}
guess_merge_tool () {
list_merge_tool_candidates
cat >&2 <<-EOF
This message is displayed because '$TOOL_MODE.tool' is not configured.
See 'git ${TOOL_MODE}tool --tool-help' or 'git help config' for more details.
'git ${TOOL_MODE}tool' will now attempt to use one of the following tools:
$tools
EOF
# Loop over each candidate and stop when a valid merge tool is found.
IFS=' '
for tool in $tools
do
is_available "$tool" && echo "$tool" && return 0
done
echo >&2 "No known ${TOOL_MODE} tool is available."
return 1
}
get_configured_merge_tool () {
keys=
if diff_mode
then
if gui_mode
then
keys="diff.guitool merge.guitool diff.tool merge.tool"
else
keys="diff.tool merge.tool"
fi
else
if gui_mode
then
keys="merge.guitool merge.tool"
else
keys="merge.tool"
fi
fi
merge_tool=$(
IFS=' '
for key in $keys
do
selected=$(git config $key)
if test -n "$selected"
then
echo "$selected"
return
fi
done)
if test -n "$merge_tool" && ! valid_tool "$merge_tool"
then
echo >&2 "git config option $TOOL_MODE.${gui_prefix}tool set to unknown tool: $merge_tool"
echo >&2 "Resetting to default..."
return 1
fi
echo "$merge_tool"
}
get_merge_tool_path () {
# A merge tool has been set, so verify that it's valid.
merge_tool="$1"
if ! valid_tool "$merge_tool"
then
echo >&2 "Unknown merge tool $merge_tool"
exit 1
fi
if diff_mode
then
merge_tool_path=$(git config difftool."$merge_tool".path ||
git config mergetool."$merge_tool".path)
else
merge_tool_path=$(git config mergetool."$merge_tool".path)
fi
if test -z "$merge_tool_path"
then
merge_tool_path=$(translate_merge_tool_path "$merge_tool")
fi
if test -z "$(get_merge_tool_cmd "$merge_tool")" &&
! type "$merge_tool_path" >/dev/null 2>&1
then
echo >&2 "The $TOOL_MODE tool $merge_tool is not available as"\
"'$merge_tool_path'"
exit 1
fi
echo "$merge_tool_path"
}
get_merge_tool () {
is_guessed=false
# Check if a merge tool has been configured
merge_tool=$(get_configured_merge_tool)
# Try to guess an appropriate merge tool if no tool has been set.
if test -z "$merge_tool"
then
merge_tool=$(guess_merge_tool) || exit
is_guessed=true
fi
echo "$merge_tool"
test "$is_guessed" = false
}
mergetool_find_win32_cmd () {
executable=$1
sub_directory=$2
# Use $executable if it exists in $PATH
if type -p "$executable" >/dev/null 2>&1
then
printf '%s' "$executable"
return
fi
# Look for executable in the typical locations
for directory in $(env | grep -Ei '^PROGRAM(FILES(\(X86\))?|W6432)=' |
cut -d '=' -f 2- | sort -u)
do
if test -n "$directory" && test -x "$directory/$sub_directory/$executable"
then
printf '%s' "$directory/$sub_directory/$executable"
return
fi
done
printf '%s' "$executable"
}
|
tacker66/git
|
git-mergetool--lib.sh
|
Shell
|
gpl-2.0
| 9,701 |
#!/bin/sh
# Calls gnome-autogen to build Makefiles and run configure
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
PKG_NAME="ocrfeeder"
(test -f $srcdir/configure.ac) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level $PKG_NAME directory"
exit 1
}
which gnome-autogen.sh || {
echo "You need to install gnome-common module and make"
echo "sure the gnome-autogen.sh script is in your \$PATH."
exit 1
}
USE_GNOME2_MACROS=1 . gnome-autogen.sh
|
thuydang/ocrfeeder
|
autogen.sh
|
Shell
|
gpl-3.0
| 511 |
#!/bin/bash
# flags
set -e
#.tmux.conf
cp tmux/.tmux.conf ~
exit 0
|
randradas/myenv
|
tmux/install.sh
|
Shell
|
gpl-3.0
| 69 |
#
# Test bandwidth of webcam stream and that it is working
# Press ^C just once and wait to exit
#
sudo mjpg_streamer -b -i "/usr/local/lib/input_uvc.so -r 640x360 -f 10" -o "/usr/local/lib/output_http.so -p 8081"
wget -O /dev/null http://localhost:8081/?action=stream
sudo killall mjpg_streamer
|
jsalin/catrover
|
test_webcam.sh
|
Shell
|
gpl-3.0
| 296 |
#!/bin/sh
F=$1
YAY="Success!"
echo "$0: grepping for $YAY in file $F";
R=`grep $YAY $F`
if [ "$R" = "" ]; then
exit 1;
else
exit 0;
fi
|
batmancn/MyLife
|
works/SipWrapper/sipstack/resiprocate-1.7/nICEr/src/test/success_check.sh
|
Shell
|
gpl-3.0
| 141 |
#!/bin/sh
test -n "$srcdir" || srcdir=`dirname "$0"`
test -n "$srcdir" || srcdir=.
olddir=`pwd`
cd $srcdir
AUTORECONF=`which autoreconf`
if test -z $AUTORECONF; then
echo "*** No autoreconf found, please install it ***"
exit 1
fi
set -e
mkdir -p m4
GTKDOCIZE=$(which gtkdocize 2>/dev/null || true)
if test -z "$GTKDOCIZE"; then
echo "You don't have gtk-doc installed, and thus won't be able to generate the documentation."
rm -f gtk-doc.make
cat > gtk-doc.make <<EOF
EXTRA_DIST =
CLEANFILES =
EOF
else
gtkdocize
fi
cd $olddir
if ! test -f libglnx/README.md || ! test -f bsdiff/README.md; then
git submodule update --init
fi
# Workaround automake bug with subdir-objects and computed paths
sed -e 's,$(libglnx_srcpath),'${srcdir}/libglnx,g < libglnx/Makefile-libglnx.am >libglnx/Makefile-libglnx.am.inc
sed -e 's,$(libbsdiff_srcpath),'${srcdir}/bsdiff,g < bsdiff/Makefile-bsdiff.am >bsdiff/Makefile-bsdiff.am.inc
autoreconf --force --install --verbose
test -n "$NOCONFIGURE" || "$srcdir/configure" "$@"
|
sujitfulse/ostree
|
autogen.sh
|
Shell
|
lgpl-2.1
| 1,063 |
#!/bin/bash
set -euxo pipefail
: ${ANSIBLE_MAJOR_VERSION:=2.10}
/usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core
/usr/bin/python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
mkdir -p /.ssh
mkdir -p cluster-dump
mkdir -p $HOME/.ssh
ansible-playbook --version
# in some cases we may need to bring in collections or roles from ansible-galaxy
# to compensate for missing functionality in older ansible versions
if [ -f requirements-${ANSIBLE_MAJOR_VERSION}.yml ] ; then
ansible-galaxy role install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml
ansible-galaxy collection install -r requirements-${ANSIBLE_MAJOR_VERSION}.yml
fi
|
kubernetes-sigs/kubespray
|
tests/scripts/testcases_prepare.sh
|
Shell
|
apache-2.0
| 670 |
#!/bin/bash
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: There is a similar test in system_test.sh
# Test /mod_pagespeed_message exists.
start_test Check if /mod_pagespeed_message page exists.
OUT=$($WGET --save-headers -q -O - $MESSAGE_URL | head -1)
check_200_http_response "$OUT"
|
pagespeed/mod_pagespeed
|
pagespeed/apache/system_tests/mod_pagespeed_message.sh
|
Shell
|
apache-2.0
| 795 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "=============== Web APP Building Start ==============="
echo "Environment Check..."
# Pre-build check
if [ -z "$(command -v git)" ]
then
echo "git not installed!"
exit 1
fi
if [ -z "$(command -v npm)" ]
then
echo "npm not installed!"
exit 1
fi
echo "Environment Check...Pass"
# npm install
cd src/main/webapp
echo "npm install..."
npm install
# grunt build
echo "grunt building..."
npm run grunt
echo "================ Web APP Building End ================"
|
sunlibin/incubator-eagle
|
eagle-webservice/ui-build.sh
|
Shell
|
apache-2.0
| 1,264 |
#!/bin/sh
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Checks that an nginx release builds and passes tests. This ensures that the
# PSOL tarball is good, and that it's compatible with the nginx code we intend
# to release.
#
# Usage:
#
# verify_nginx_release.sh [version] [binary tarball]
# verify_nginx_release.sh 1.10.33.6 /path/to/1.10.33.6.tar.gz
#
# To get the binary tarball, run build_psol_tarball.sh
set -e # exit script if any command returns an error
set -u # exit the script if any variable is uninitialized
if [ $# != 2 ]; then
echo "Usage: $0 version /path/to/psol-binary-tarball"
exit 1
fi
VERSION="$1"
TARBALL="$2"
# Absoluteify $TARBALL if it does not start with /
if [ -n "$TARBALL" -a "${TARBALL#/}" = "$TARBALL" ]; then
TARBALL="$PWD/$TARBALL"
fi
if [ ! -f "$TARBALL" ]; then
echo "$TARBALL should be a file"
exit 1
fi
die() {
echo "verify_nginx_release.sh: $@"
cd
rm -rf "$WORKDIR"
exit 1
}
WORKDIR=$(mktemp -d)
cd "$WORKDIR"
mkdir mod_pagespeed
cd mod_pagespeed
git clone https://github.com/apache/incubator-pagespeed-mod.git src/
cd src/
git checkout $VERSION
cd $WORKDIR
git clone https://github.com/apache/incubator-pagespeed-ngx.git
cd ngx_pagespeed
git checkout release-$VERSION-beta
# We now include the url for the PSOL binary that goes with a release in a
# separate file.
if [ ! -e PSOL_BINARY_URL ]; then
echo "$PWD/PSOL_BINARY_URL is missing"
exit 1
else
predicted_psol_binary_url="https://dl.google.com/dl/page-speed/psol/"
predicted_psol_binary_url+="$VERSION.tar.gz"
psol_binary_url=$(cat PSOL_BINARY_URL)
if [ "$predicted_psol_binary_url" != "$psol_binary_url" ]; then
echo "PSOL_BINARY_URL is wrong; did you forget to update it? Got:"
echo "$psol_binary_url"
exit 1
fi
fi
tar -xzf "$TARBALL"
cd $WORKDIR
git clone https://github.com/FRiCKLE/ngx_cache_purge.git
# If ldconfig is not found, add /sbin to the path. ldconfig is required
# for openresty with luajit.
if ! type -t ldconfig >/dev/null && [ -e /sbin/ldconfig ]; then
PATH=$PATH:/sbin
fi
wget https://openresty.org/download/openresty-1.9.7.3.tar.gz
tar xzvf openresty-*.tar.gz
cd openresty-*/
./configure --with-luajit
make
cd $WORKDIR
wget http://nginx.org/download/nginx-1.9.12.tar.gz
for is_debug in debug release; do
cd $WORKDIR
if [ -d nginx ]; then
rm -rf nginx/
fi
tar -xzf nginx-1.9.12.tar.gz
mv nginx-1.9.12 nginx
cd nginx/
nginx_root="$WORKDIR/$is_debug/"
extra_args=""
if [ "$is_debug" = "debug" ]; then
extra_args+=" --with-debug"
fi
if [ -x /usr/lib/gcc-mozilla/bin/gcc ]; then
PATH=/usr/lib/gcc-mozilla/bin:$PATH
extra_args+=" --with-cc=/usr/lib/gcc-mozilla/bin/gcc --with-ld-opt=-static-libstdc++"
fi
./configure \
--prefix="$nginx_root" \
--add-module="$WORKDIR/ngx_pagespeed" \
--add-module="$WORKDIR/ngx_cache_purge" \
--add-module="$WORKDIR/openresty-*/build/ngx_devel_kit-*/" \
--add-module="$WORKDIR/openresty-*/build/set-misc-nginx-*/" \
--add-module="$WORKDIR/openresty-*/build/headers-more-nginx-module-*/" \
--with-ipv6 \
--with-http_v2_module \
$extra_args
make install
cd "$WORKDIR"
USE_VALGRIND=false \
TEST_NATIVE_FETCHER=false \
TEST_SERF_FETCHER=true \
ngx_pagespeed/test/run_tests.sh 8060 8061 \
$WORKDIR/mod_pagespeed \
$nginx_root/sbin/nginx \
modpagespeed.com
cd
done
rm -rf "$WORKDIR"
echo "builds and tests completed successfully for both debug and release"
|
pagespeed/mod_pagespeed
|
install/verify_nginx_release.sh
|
Shell
|
apache-2.0
| 4,017 |
#!/bin/bash
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
# of the KOLLA_BOOTSTRAP variable being set, including empty.
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
# NOTE(SamYaple): Static gpt partcodes
CEPH_JOURNAL_TYPE_CODE="45B0969E-9B03-4F30-B4C6-B4B80CEFF106"
CEPH_OSD_TYPE_CODE="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
# Wait for ceph quorum before proceeding
ceph quorum_status
if [[ "${USE_EXTERNAL_JOURNAL}" == "False" ]]; then
# Formatting disk for ceph
sgdisk --zap-all -- "${OSD_DEV}"
sgdisk --new=2:1M:5G -- "${JOURNAL_DEV}"
sgdisk --largest-new=1 -- "${OSD_DEV}"
# NOTE(SamYaple): This command may throw errors that we can safely ignore
partprobe || true
fi
OSD_ID=$(ceph osd create)
OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
mkdir -p "${OSD_DIR}"
if [[ "${OSD_FILESYSTEM}" == "btrfs" ]]; then
mkfs.btrfs -f "${OSD_PARTITION}"
elif [[ "${OSD_FILESYSTEM}" == "ext4" ]]; then
mkfs.ext4 "${OSD_PARTITION}"
else
mkfs.xfs -f "${OSD_PARTITION}"
fi
mount "${OSD_PARTITION}" "${OSD_DIR}"
# This will through an error about no key existing. That is normal. It then
# creates the key in the next step.
ceph-osd -i "${OSD_ID}" --mkfs --osd-journal="${JOURNAL_PARTITION}" --mkkey
ceph auth add "osd.${OSD_ID}" osd 'allow *' mon 'allow profile osd' -i "${OSD_DIR}/keyring"
umount "${OSD_PARTITION}"
if [[ "${!CEPH_CACHE[@]}" ]]; then
CEPH_ROOT_NAME=cache
fi
# These commands only need to be run once per host but are safe to run
# repeatedly. This can be improved later or if any problems arise.
ceph osd crush add-bucket "${HOSTNAME}${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}" host
ceph osd crush move "${HOSTNAME}${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}" root=${CEPH_ROOT_NAME:-default}
# Adding osd to crush map
ceph osd crush add "${OSD_ID}" "${OSD_INITIAL_WEIGHT}" host="${HOSTNAME}${CEPH_ROOT_NAME:+-${CEPH_ROOT_NAME}}"
# Setting partition name based on ${OSD_ID}
sgdisk "--change-name=${OSD_PARTITION_NUM}:KOLLA_CEPH_DATA_${OSD_ID}" "--typecode=${OSD_PARTITION_NUM}:${CEPH_OSD_TYPE_CODE}" -- "${OSD_DEV}"
sgdisk "--change-name=${JOURNAL_PARTITION_NUM}:KOLLA_CEPH_DATA_${OSD_ID}_J" "--typecode=${JOURNAL_PARTITION_NUM}:${CEPH_JOURNAL_TYPE_CODE}" -- "${JOURNAL_DEV}"
exit 0
fi
OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
ARGS="-i ${OSD_ID} --osd-journal ${JOURNAL_PARTITION} -k ${OSD_DIR}/keyring"
|
rthallisey/kolla-kubernetes-personal
|
kolla/docker/ceph/ceph-osd/extend_start.sh
|
Shell
|
apache-2.0
| 2,545 |
DEBUG=false
info () { echo >&2 " * $*" ; }
debug () { ! ${DEBUG} || echo >&2 " ~ $*" ; }
error () { echo >&2 "ERROR: $*" ; false ; }
get_image_id ()
{
local old_IFS=$IFS
local result
# split "$1" into "$1 $2 .." on colons
IFS=:
set -- $1
IFS=$old_IFS
case $2 in
local)
# Default to $IMAGE_NAME if it is set since .image-id might not exist
echo "${IMAGE_NAME-$(cat "$1"/.image-id)}"
;;
remote)
local version=${1//\./}
case $OS in
rhel7)
ns=rhscl
if test "$version" -eq 92; then
ns=openshift3
fi
image=registry.redhat.io/$ns/postgresql-${version}-rhel7
;;
centos7)
ns=centos7
if test "$version" -eq 92; then
ns=openshift
fi
local image=quay.io/$ns/postgresql-${1//\./}-centos7
;;
rhel8)
ns=rhel8
local image=registry.redhat.io/$ns/postgresql-${version}
;;
esac
docker pull "$image" >/dev/null
echo "$image"
;;
esac
}
data_pagila_create ()
{
debug "initializing pagila database"
CID="$CID" ./test/pagila.sh
}
data_pagila_check ()
{
debug "doing pagila check"
local exp_output='28
16
2'
# Deliberately moving heredoc into the container, otherwise it does not work
# in podman 1.6.x due to https://bugzilla.redhat.com/show_bug.cgi?id=1827324
local output=$(docker exec -i "$CID" bash -c "psql -tA <<EOF
select count(*) from information_schema.tables where table_schema = 'public';
select count(*) from information_schema.triggers;
select count(*) from staff;
EOF"
)
test "$exp_output" = "$output" \
|| error "Unexpected output: '$output', expected: '$exp_output'"
}
data_empty_create ()
{
# Deliberately moving heredoc into the container, otherwise it does not work
# in podman 1.6.x due to https://bugzilla.redhat.com/show_bug.cgi?id=1827324
docker exec -i "$CID" bash -c "psql &>/dev/null <<EOF
create table blah (id int);
insert into blah values (1), (2), (3);
EOF"
}
data_empty_check ()
{
debug "doing empty check"
local exp_output='1
2
3'
# Deliberately moving heredoc into the container, otherwise it does not work
# in podman 1.6.x due to https://bugzilla.redhat.com/show_bug.cgi?id=1827324
local output=$(docker exec -i "$CID" bash -c "psql -tA <<EOF
select * from blah order by id;
EOF"
)
test "$exp_output" = "$output" || error "Unexpected output '$output'"
}
# wait_for_postgres CID
wait_for_postgres ()
{
local cid=$1
local stop_after=${2-30}
local counter=0
debug "Waiting for PG server to come up in $cid container"
while test $counter -lt "$stop_after"
do
# the "-h localhost" is crucial here as the container runs postgresql
# server twice and we don't want to connect to the first process (see
# run-postgresql script)
output=$(docker exec -i "$cid" bash -c \
"psql -h localhost -tA -c 'select 1;' 2>/dev/null || :")
case $output in
1*) return ;;
"") ;;
*) echo "$output" ; false ;;
esac
sleep 1
counter=$(( counter + 1 ))
done
}
# version2number VERSION [DEPTH] [WIDTH]
# --------------------------------------
version2number ()
{
local old_IFS=$IFS
local to_print= depth=${2-3} width=${3-2} sum=0 one_part
IFS='.'
set -- $1
while test $depth -ge 1; do
depth=$(( depth - 1 ))
part=${1-0} ; shift || :
printf "%0${width}d" "$part"
done
IFS=$old_IFS
}
# container_ip CONTAINER_ID
# -------------------------
container_ip()
{
docker inspect --format='{{.NetworkSettings.IPAddress}}' "$1"
}
# vi: set ft=sh
|
openshift/postgresql
|
test/pg-test-lib.sh
|
Shell
|
apache-2.0
| 3,928 |
#!/bin/bash
if [ "$JAVA_HOME" = "" ] ; then
echo "ERROR: JAVA_HOME not found in your environment."
echo
echo "Please, set the JAVA_HOME variable in your environment to match the"
echo "location of the Java Virtual Machine you want to use."
exit 1
fi
if [ -z "$EXIST_HOME" ]; then
P=$(dirname $0)
if test "$P" = "."
then
EXIST_HOME="`pwd`"
else
EXIST_HOME="$P"
fi
fi
ANT_HOME="$EXIST_HOME/tools/ant"
LOCALCLASSPATH=$CLASSPATH:$ANT_HOME/lib/ant-launcher.jar:$ANT_HOME/lib/junit-4.4.jar:.
JAVA_OPTS="-Dant.home=$ANT_HOME -Dexist.home=$EXIST_HOME"
echo Starting Ant...
echo
$JAVA_HOME/bin/java -Xms64000K -Xmx512000K $JAVA_OPTS -classpath $LOCALCLASSPATH org.apache.tools.ant.launch.Launcher $*
|
NCIP/cadsr-cgmdr-nci-uk
|
tools/XFormsFilter/build.sh
|
Shell
|
bsd-3-clause
| 746 |
#!/usr/bin/env bash
# Runs test for each package
exitcode=0
for PACKAGE in $(cat .scripts/RELEASABLE_PACKAGES) ; do
make test $PACKAGE || exitcode=$?
done
exit $exitcode
|
usm4n/cyclejs
|
.scripts/test-all.sh
|
Shell
|
mit
| 176 |
#!/usr/bin/env bash
set -x
set -e
tag=2017-GA
docker pull metaskills/mssql-server-linux-tinytds:$tag
container=$(docker ps -a -q --filter ancestor=metaskills/mssql-server-linux-tinytds:$tag)
if [[ -z $container ]]; then
docker run -p 1433:1433 -d metaskills/mssql-server-linux-tinytds:$tag && sleep 10
exit
fi
container=$(docker ps -q --filter ancestor=metaskills/mssql-server-linux-tinytds:$tag)
if [[ -z $container ]]; then
docker start $container && sleep 10
fi
|
aharpervc/tiny_tds
|
test/bin/setup.sh
|
Shell
|
mit
| 476 |
#!/bin/sh
set -x
function system {
"$@"
if [ $? -ne 0 ]; then
echo "make.sh: unsuccessful command $@"
echo "abort!"
exit 1
fi
}
if [ $# -eq 0 ]; then
echo 'bash make.sh slides1|slides2'
exit 1
fi
name=$1
rm -f *.tar.gz
opt="--encoding=utf-8"
opt=
rm -f *.aux
# Plain HTML documents
html=${name}
system doconce format html $name --pygments_html_style=default --html_style=bloodish --html_links_in_new_window --html_output=$html $opt
system doconce split_html $html.html --method=space10
# Bootstrap style
html=${name}-bs
system doconce format html $name --html_style=bootstrap --pygments_html_style=default --html_admon=bootstrap_panel --html_output=$html $opt
#system doconce split_html $html.html --method=split --pagination --nav_button=bottom
# IPython notebook
system doconce format ipynb $name $opt
# Ordinary plain LaTeX document
system doconce format pdflatex $name --print_latex_style=trac --latex_admon=paragraph $opt
system doconce ptex2tex $name envir=print
# Add special packages
doconce subst "% Add user's preamble" "\g<1>\n\\usepackage{simplewick}" $name.tex
doconce replace 'section{' 'section*{' $name.tex
pdflatex -shell-escape $name
pdflatex -shell-escape $name
mv -f $name.pdf ${name}.pdf
cp $name.tex ${name}.tex
# Publish
dest=../../../../Projects/2018
if [ ! -d $dest/$name ]; then
mkdir $dest/$name
mkdir $dest/$name/pdf
mkdir $dest/$name/html
mkdir $dest/$name/ipynb
fi
cp ${name}*.tex $dest/$name/pdf
cp ${name}*.pdf $dest/$name/pdf
cp -r ${name}*.html ._${name}*.html $dest/$name/html
# Figures: cannot just copy link, need to physically copy the files
if [ -d fig-${name} ]; then
if [ ! -d $dest/$name/html/fig-$name ]; then
mkdir $dest/$name/html/fig-$name
fi
cp -r fig-${name}/* $dest/$name/html/fig-$name
fi
cp ${name}.ipynb $dest/$name/ipynb
ipynb_tarfile=ipynb-${name}-src.tar.gz
if [ ! -f ${ipynb_tarfile} ]; then
cat > README.txt <<EOF
This IPython notebook ${name}.ipynb does not require any additional
programs.
EOF
tar czf ${ipynb_tarfile} README.txt
fi
cp ${ipynb_tarfile} $dest/$name/ipynb
|
CompPhysics/ComputationalPhysics2
|
doc/src/Projects/2018/Project2/make.sh
|
Shell
|
cc0-1.0
| 2,062 |
#!/bin/bash
#set -x
set -e
. $HOME/freeswan-regress-env.sh
recipients=`echo $NIGHTLY_WATCHERS | sed -e 's/,/ /g'`
#recipients='[email protected] [email protected]'
tmpfile=/tmp/msg$$
cat - >$tmpfile
sed -n -e '1,/^$/p' $tmpfile >$tmpfile.headers
sed -n -e '/^$/,$p' $tmpfile >$tmpfile.body
# encrypt body
#gpg --encrypt --armor -r [email protected] --batch --yes $tmpfile.body
# reset home just in case.
HOME=/freeswan/users/build export HOME
PGPPATH=$HOME/.pgp export PGPPATH
pgp -eat $tmpfile.body $recipients
(
cat $tmpfile.headers
echo
cat $tmpfile.body.asc
) | cat | /usr/sbin/sendmail -t
rm -f $tmpfile $tmpfile.headers $tmpfile.body $tmpfile.body.asc
|
y-trudeau/openswan-patch-meraki
|
testing/utils/teammail-sample.sh
|
Shell
|
gpl-2.0
| 678 |
# ditz bash completion
#
# author: Christian Garbs
#
# based on bzr.simple by Martin Pool
_ditz()
{
local cur=${COMP_WORDS[COMP_CWORD]}
if [ $COMP_CWORD -eq 1 ]; then
# no command yet, show all commands
COMPREPLY=( $( compgen -W "$(ditz --commands)" -- $cur ) )
else
unset COMP_WORDS[COMP_CWORD] # remove last
unset COMP_WORDS[0] # remove first
# add options if applicable...
local options
if [ "${cur:0:1}" = '-' ]; then
# ...but only if at least a dash is given
case "${COMP_WORDS[1]}" in
add|add_reference|add_release|assign|close|comment|release|set_component|start|stop|unassign)
options="--comment --no-comment"
;;
edit)
options="--comment --no-comment --silent"
;;
esac
fi
# let ditz parse the commandline and print available completions, then append the options form above
COMPREPLY=( $( compgen -W "$(ditz "${COMP_WORDS[@]}" '<options>' 2>/dev/null) $options" -- $cur ) )
fi
}
complete -F _ditz -o default ditz
|
moro/ditz
|
contrib/completion/ditz.bash
|
Shell
|
gpl-3.0
| 1,010 |
#!/usr/bin/env bash
#set -o nounset #exit if an unset variable is used
set -o errexit #exit on any single command fail
# find voltdb binaries in either installation or distribution directory.
if [ -n "$(which voltdb 2> /dev/null)" ]; then
VOLTDB_BIN=$(dirname "$(which voltdb)")
else
VOLTDB_BIN="$(dirname $(dirname $(pwd)))/bin"
echo "The VoltDB scripts are not in your PATH."
echo "For ease of use, add the VoltDB bin directory: "
echo
echo $VOLTDB_BIN
echo
echo "to your PATH."
echo
fi
# move voltdb commands into path for this script
PATH=$VOLTDB_BIN:$PATH
# installation layout has all libraries in $VOLTDB_ROOT/lib/voltdb
if [ -d "$VOLTDB_BIN/../lib/voltdb" ]; then
VOLTDB_BASE=$(dirname "$VOLTDB_BIN")
VOLTDB_LIB="$VOLTDB_BASE/lib/voltdb"
VOLTDB_VOLTDB="$VOLTDB_LIB"
# distribution layout has libraries in separate lib and voltdb directories
else
VOLTDB_BASE=$(dirname "$VOLTDB_BIN")
VOLTDB_LIB="$VOLTDB_BASE/lib"
VOLTDB_VOLTDB="$VOLTDB_BASE/voltdb"
fi
APPCLASSPATH=$CLASSPATH:$({ \
\ls -1 "$VOLTDB_VOLTDB"/voltdb-*.jar; \
\ls -1 "$VOLTDB_LIB"/*.jar; \
\ls -1 "$VOLTDB_LIB"/kafka*.jar; \
\ls -1 "$VOLTDB_LIB"/extension/*.jar; \
} 2> /dev/null | paste -sd ':' - )
CLIENTCLASSPATH=client.jar:$CLASSPATH:$({ \
\ls -1 "$VOLTDB_VOLTDB"/voltdbclient-*.jar; \
\ls -1 "$VOLTDB_LIB"/kafka*.jar; \
\ls -1 "$VOLTDB_LIB"/slf4j-api-1.6.2.jar; \
} 2> /dev/null | paste -sd ':' - )
# LOG4J="$VOLTDB_VOLTDB/log4j.xml"
LICENSE="$VOLTDB_VOLTDB/license.xml"
HOST="localhost"
# remove binaries, logs, runtime artifacts, etc... but keep the jars
function clean() {
rm -rf debugoutput voltdbroot log catalog-report.html \
statement-plans build/*.class clientbuild/*.class
}
# remove everything from "clean" as well as the jarfiles
function cleanall() {
ant clean
}
# compile the source code for procedures and the client into jarfiles
function jars() {
ant all
cp formatter.jar $VOLTDB_BASE/bundles
}
# compile the procedure and client jarfiles if they don't exist
function jars-ifneeded() {
rm -rf felix-cache
if [ ! -e sp.jar ] || [ ! -e client.jar ]; then
jars;
fi
}
# run the voltdb server locally
# note -- use something like this to create the Kafka topic, name
# matching the name used in the deployment file:
# /home/opt/kafka/bin/kafka-topics.sh --zookeeper kafka2:2181 --topic A7_KAFKAEXPORTTABLE2 --partitions 2 --replication-factor 1 --create
function server() {
jars-ifneeded
echo "Starting the VoltDB server."
echo "Remember -- the Kafka topic must exist before launching this test."
echo "To perform this action manually, use the command line: "
echo
echo "voltdb create -d deployment.xml -l $LICENSE -H $HOST"
echo
voltdb create -d deployment.xml -l $LICENSE -H $HOST
}
#kafka importer
function kafka() {
jars-ifneeded
echo "Starting the VoltDB server."
echo "To perform this action manually, use the command line: "
echo
echo "voltdb create -d deployment-kafka.xml -l $LICENSE -H $HOST"
echo
voltdb create -d deployment-kafka.xml -l $LICENSE -H $HOST
}
# load schema and procedures
function init() {
jars-ifneeded
sqlcmd < ddl.sql
}
# wait for backgrounded server to start up
function wait_for_startup() {
until sqlcmd --query=' exec @SystemInformation, OVERVIEW;' > /dev/null 2>&1
do
sleep 2
echo " ... Waiting for VoltDB to start"
if [[ $SECONDS -gt 60 ]]
then
echo "Exiting. VoltDB did not startup within 60 seconds" 1>&2; exit 1;
fi
done
}
# startup server in background and load schema
function background_server_andload() {
jars-ifneeded
# run the server in the background
voltdb create -B -d deployment.xml -l $LICENSE -H $HOST > nohup.log 2>&1 &
wait_for_startup
init
}
# run the client that drives the example
function client() {
async-benchmark
}
# Asynchronous benchmark sample
# Use this target for argument help
function async-benchmark-help() {
jars-ifneeded
java -classpath $CLIENTCLASSPATH kafkaimporter.client.kafkaimporter.KafkaImportBenchmark --help
}
# latencyreport: default is OFF
# ratelimit: must be a reasonable value if lantencyreport is ON
# Disable the comments to get latency report
function async-benchmark() {
jars-ifneeded
java -classpath $CLIENTCLASSPATH \
client.kafkaimporter.KafkaImportBenchmark \
--displayinterval=5 \
--duration=180 \
--alltypes=false \
--useexport=false \
--expected_rows=6000000 \
--servers=localhost
}
# The following two demo functions are used by the Docker package. Don't remove.
# compile the jars for procs and client code
function demo-compile() {
jars
}
function demo() {
echo "starting server in background..."
background_server_andload
echo "starting client..."
client
echo
echo When you are done with the demo database, \
remember to use \"voltadmin shutdown\" to stop \
the server process.
}
function help() {
echo "Usage: ./run.sh {clean|server|init|demo|client|async-benchmark|aysnc-benchmark-help}"
}
# Run the target passed as the first arg on the command line
# If no first arg, run server
if [ $# -gt 1 ]; then help; exit; fi
if [ $# = 1 ]; then $1; else server; fi
|
simonzhangsm/voltdb
|
tests/test_apps/kafkaimporter/run.sh
|
Shell
|
agpl-3.0
| 5,377 |
#!/bin/bash
PROG="${GRINS_BUILDSRC_DIR}/grins"
INPUT="${GRINS_TEST_INPUT_DIR}/simple_ode.in"
# FIXME: In theory we should be able to solve a scalar problem on
# multiple processors, where ranks 1+ just twiddle their thumbs.
# In practice we get libMesh errors.
#${LIBMESH_RUN:-} $PROG $INPUT
$PROG $INPUT
|
nicholasmalaya/grins
|
test/regression/simple_ode.sh
|
Shell
|
lgpl-2.1
| 308 |
#!/bin/bash
set -euo pipefail
source $(dirname $0)/../utils.sh
TEST_ID=$(generate_test_id)
echo "TEST_ID = $TEST_ID"
ROOT=$(dirname $0)/../..
env=nodejs-$TEST_ID
fn=nodejs-hello-$TEST_ID
cleanup() {
log "Cleaning up..."
clean_resource_by_id $TEST_ID
}
if [ -z "${TEST_NOCLEANUP:-}" ]; then
trap cleanup EXIT
else
log "TEST_NOCLEANUP is set; not cleaning up test artifacts afterwards."
fi
# Create a hello world function in nodejs, test it with an http trigger
log "Creating nodejs env"
fission env create --name $env --image $NODE_RUNTIME_IMAGE --mincpu 20 --maxcpu 100 --minmemory 128 --maxmemory 256
log "Creating function"
fission fn create --name $fn --env $env --code $ROOT/examples/nodejs/hello.js --executortype poolmgr
log "Creating route"
fission route create --function $fn --url /$fn --method GET
log "Waiting for router to catch up"
sleep 5
log "Doing an HTTP GET on the function's route"
response=$(curl http://$FISSION_ROUTER/$fn)
log "Checking for valid response"
echo $response | grep -i hello
log "Poolmgr ExecutorType: All done."
|
fission/fission
|
test/tests/test_backend_poolmgr.sh
|
Shell
|
apache-2.0
| 1,079 |
#!/bin/bash
set -e
trap ctrl_c INT
function ctrl_c() {
exit 1
}
function usage() {
me=$(basename "$0")
echo
echo "Usage: $me build|clean [sample_name]"
echo
exit 2
}
function buildProject() {
dir=$1
echo "*********************** Building $dir"
pushd "$mydir/samples/$dir"
npm link generator-jhipster
yo jhipster --force
if [ -f pom.xml ]; then
./mvnw verify
else
./gradlew test
fi
popd
}
function cleanProject() {
dir=$1
echo "*********************** Cleaning $dir"
pushd "$mydir/samples/$dir"
ls -a | grep -v .yo-rc.json | xargs rm -rf | true
popd
}
mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ "$1" = "build" ]; then
if [ "$2" != "" ]; then
buildProject "$2"
else
for dir in $(ls -1 "$mydir/samples"); do
buildProject "$dir"
done
fi
elif [ "$1" = "clean" ]; then
if [ "$2" != "" ]; then
cleanProject "$2"
else
for dir in $(ls -1 "$mydir/samples"); do
cleanProject "$dir"
done
fi
else
usage
fi
|
lrkwz/generator-jhipster
|
travis/build-samples.sh
|
Shell
|
apache-2.0
| 1,117 |
#!/bin/bash
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
total_cpus=`nproc`
config_nvme()
{
current_cpu=0
for dev in /sys/bus/pci/drivers/nvme/*
do
if [ ! -d $dev ]
then
continue
fi
for irq_info in $dev/msi_irqs/*
do
if [ ! -f $irq_info ]
then
continue
fi
current_cpu=$((current_cpu % total_cpus))
cpu_mask=`printf "%x" $((1<<current_cpu))`
irq=$(basename $irq_info)$a
echo Setting IRQ $irq smp_affinity to $cpu_mask
echo $cpu_mask > /proc/irq/$irq/smp_affinity
current_cpu=$((current_cpu+1))
done
done
}
config_scsi()
{
irqs=()
for device in /sys/bus/virtio/drivers/virtio_scsi/virtio*
do
ssd=0
for target_path in $device/host*/target*/*
do
if [ ! -f $target_path/model ]
then
continue
fi
model=$(cat $target_path/model)
if [[ $model =~ .*EphemeralDisk.* ]]
then
ssd=1
for queue_path in $target_path/block/sd*/queue
do
echo noop > $queue_path/scheduler
echo 0 > $queue_path/add_random
echo 512 > $queue_path/nr_requests
echo 0 > $queue_path/rotational
echo 0 > $queue_path/rq_affinity
echo 1 > $queue_path/nomerges
done
fi
done
if [[ $ssd == 1 ]]
then
request_queue=$(basename $device)-request
irq=$(cat /proc/interrupts |grep $request_queue| awk '{print $1}'| sed 's/://')
irqs+=($irq)
fi
done
irq_count=${#irqs[@]}
if [ $irq_count != 0 ]
then
stride=$((total_cpus / irq_count))
stride=$((stride < 1 ? 1 : stride))
current_cpu=0
for irq in "${irqs[@]}"
do
current_cpu=$(($current_cpu % $total_cpus))
cpu_mask=`printf "%x" $((1<<$current_cpu))`
echo Setting IRQ $irq smp_affinity to $cpu_mask
echo $cpu_mask > /proc/irq/$irq/smp_affinity
current_cpu=$((current_cpu+stride))
done
fi
}
config_nvme
config_scsi
|
syed/PerfKitBenchmarker
|
perfkitbenchmarker/data/set-interrupts.sh
|
Shell
|
apache-2.0
| 2,490 |
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
curl -u admin:admin http://localhost:4502/libs/granite/packaging/rcp
echo ""
|
tripodsan/jackrabbit-filevault
|
vault-rcp/src/test/resources/list_tasks.sh
|
Shell
|
apache-2.0
| 885 |
#!/bin/bash
# DESCRIPTION
# Defines general utility functions.
# Answers the file name.
# Parameters:
# $1 = The file path.
get_file_name() {
printf "${1##*/}" # Answers file or directory name.
}
export -f get_file_name
# Answers the file extension.
# Parameters:
# $1 = The file name.
get_file_extension() {
local name=$(get_file_name "$1")
local extension="${1##*.}" # Excludes dot.
if [[ "$name" == "$extension" ]]; then
printf ''
else
printf "$extension"
fi
}
export -f get_file_extension
# Answers the root install path for file name.
# Parameters:
# $1 = The file name.
get_install_root() {
local file_name="$1"
local file_extension=$(get_file_extension "$file_name")
# Dynamically build the install path based on file extension.
case $file_extension in
'')
printf "/usr/local/bin";;
'app')
printf "/Applications";;
'prefPane')
printf "/Library/PreferencePanes";;
'qlgenerator')
printf "/Library/QuickLook";;
*)
printf "/tmp/unknown";;
esac
}
export -f get_install_root
# Answers the full install path (including file name) for file name.
# Parameters:
# $1 = The file name.
get_install_path() {
local file_name="$1"
local install_path=$(get_install_root "$file_name")
printf "$install_path/$file_name"
}
export -f get_install_path
# Cleans work path for temporary processing of installs.
clean_work_path() {
printf "Cleaning: $WORK_PATH...\n"
rm -rf "$WORK_PATH"
}
export -f clean_work_path
# Configures and launches process.
# Parameters:
# $1 = The process config source path.
launch_process() {
local config_file="$1"
local config_name="$(get_file_name $config_file)"
ln -sfv "$config_file" "$HOME/Library/LaunchAgents/$config_name"
launchctl load "$HOME/Library/LaunchAgents/$config_name"
}
export -f launch_process
# Caffeinate machine.
caffeinate_machine() {
local pid=$(ps aux | grep caffeinate | grep -v grep | awk '{print $2}')
if [[ -n "$pid" ]]; then
printf "Whoa, tweaker, machine is already caffeinated!\n"
else
caffeinate -sudit 9999999999 &
printf "Machine caffeinated and energy saver settings disabled.\n"
fi
}
export -f caffeinate_machine
|
masterots/osx_setup
|
lib/utilities.sh
|
Shell
|
mit
| 2,194 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_bilevel.miff PAM
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_PAM_bilevel.sh
|
Shell
|
gpl-2.0
| 362 |
#
# Disable kdump.service for all systemd targets
#
systemctl disable kdump.service
#
# Stop kdump.service if currently running
#
systemctl stop kdump.service
|
mpreisler/scap-security-guide-debian
|
scap-security-guide-0.1.21/RHEL/7/input/fixes/bash/service_kdump_disabled.sh
|
Shell
|
gpl-2.0
| 160 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwblob ${SRCDIR}/input_truecolor.miff G3
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwblob_G3_truecolor.sh
|
Shell
|
gpl-2.0
| 363 |
#!/bin/sh
#
# Copyright (c) 2009, 2010 David Aguilar
#
test_description='git-difftool
Testing basic diff tool invocation
'
. ./test-lib.sh
if ! test_have_prereq PERL; then
skip_all='skipping difftool tests, perl not available'
test_done
fi
LF='
'
remove_config_vars()
{
# Unset all config variables used by git-difftool
git config --unset diff.tool
git config --unset diff.guitool
git config --unset difftool.test-tool.cmd
git config --unset difftool.prompt
git config --unset merge.tool
git config --unset mergetool.test-tool.cmd
git config --unset mergetool.prompt
return 0
}
restore_test_defaults()
{
# Restores the test defaults used by several tests
remove_config_vars
unset GIT_DIFF_TOOL
unset GIT_DIFFTOOL_PROMPT
unset GIT_DIFFTOOL_NO_PROMPT
git config diff.tool test-tool &&
git config difftool.test-tool.cmd 'cat $LOCAL'
git config difftool.bogus-tool.cmd false
}
prompt_given()
{
prompt="$1"
test "$prompt" = "Hit return to launch 'test-tool': branch"
}
# Create a file on master and change it on branch
test_expect_success 'setup' '
echo master >file &&
git add file &&
git commit -m "added file" &&
git checkout -b branch master &&
echo branch >file &&
git commit -a -m "branch changed file" &&
git checkout master
'
# Configure a custom difftool.<tool>.cmd and use it
test_expect_success 'custom commands' '
restore_test_defaults &&
git config difftool.test-tool.cmd "cat \$REMOTE" &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "master" &&
restore_test_defaults &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch"
'
# Ensures that git-difftool ignores bogus --tool values
test_expect_success 'difftool ignores bad --tool values' '
diff=$(git difftool --no-prompt --tool=bad-tool branch)
test "$?" = 1 &&
test "$diff" = ""
'
test_expect_success 'difftool honors --gui' '
git config merge.tool bogus-tool &&
git config diff.tool bogus-tool &&
git config diff.guitool test-tool &&
diff=$(git difftool --no-prompt --gui branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
test_expect_success 'difftool --gui works without configured diff.guitool' '
git config diff.tool test-tool &&
diff=$(git difftool --no-prompt --gui branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Specify the diff tool using $GIT_DIFF_TOOL
test_expect_success 'GIT_DIFF_TOOL variable' '
git config --unset diff.tool
GIT_DIFF_TOOL=test-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test the $GIT_*_TOOL variables and ensure
# that $GIT_DIFF_TOOL always wins unless --tool is specified
test_expect_success 'GIT_DIFF_TOOL overrides' '
git config diff.tool bogus-tool &&
git config merge.tool bogus-tool &&
GIT_DIFF_TOOL=test-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
GIT_DIFF_TOOL=bogus-tool &&
export GIT_DIFF_TOOL &&
diff=$(git difftool --no-prompt --tool=test-tool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt to difftool
# when $GIT_DIFFTOOL_NO_PROMPT is true
test_expect_success 'GIT_DIFFTOOL_NO_PROMPT variable' '
GIT_DIFFTOOL_NO_PROMPT=true &&
export GIT_DIFFTOOL_NO_PROMPT &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# git-difftool supports the difftool.prompt variable.
# Test that GIT_DIFFTOOL_PROMPT can override difftool.prompt = false
test_expect_success 'GIT_DIFFTOOL_PROMPT variable' '
git config difftool.prompt false &&
GIT_DIFFTOOL_PROMPT=true &&
export GIT_DIFFTOOL_PROMPT &&
prompt=$(echo | git difftool branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt when difftool.prompt is false
test_expect_success 'difftool.prompt config variable is false' '
git config difftool.prompt false &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that we don't have to pass --no-prompt when mergetool.prompt is false
test_expect_success 'difftool merge.prompt = false' '
git config --unset difftool.prompt
git config mergetool.prompt false &&
diff=$(git difftool branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that the -y flag can override difftool.prompt = true
test_expect_success 'difftool.prompt can overridden with -y' '
git config difftool.prompt true &&
diff=$(git difftool -y branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
# Test that the --prompt flag can override difftool.prompt = false
test_expect_success 'difftool.prompt can overridden with --prompt' '
git config difftool.prompt false &&
prompt=$(echo | git difftool --prompt branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# Test that the last flag passed on the command-line wins
test_expect_success 'difftool last flag wins' '
diff=$(git difftool --prompt --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults &&
prompt=$(echo | git difftool --no-prompt --prompt branch | tail -1) &&
prompt_given "$prompt" &&
restore_test_defaults
'
# git-difftool falls back to git-mergetool config variables
# so test that behavior here
test_expect_success 'difftool + mergetool config variables' '
remove_config_vars
git config merge.tool test-tool &&
git config mergetool.test-tool.cmd "cat \$LOCAL" &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
# set merge.tool to something bogus, diff.tool to test-tool
git config merge.tool bogus-tool &&
git config diff.tool test-tool &&
diff=$(git difftool --no-prompt branch) &&
test "$diff" = "branch" &&
restore_test_defaults
'
test_expect_success 'difftool.<tool>.path' '
git config difftool.tkdiff.path echo &&
diff=$(git difftool --tool=tkdiff --no-prompt branch) &&
git config --unset difftool.tkdiff.path &&
lines=$(echo "$diff" | grep file | wc -l) &&
test "$lines" -eq 1 &&
restore_test_defaults
'
test_expect_success 'difftool --extcmd=cat' '
diff=$(git difftool --no-prompt --extcmd=cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success 'difftool --extcmd cat' '
diff=$(git difftool --no-prompt --extcmd cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success 'difftool -x cat' '
diff=$(git difftool --no-prompt -x cat branch) &&
test "$diff" = branch"$LF"master
'
test_expect_success 'difftool --extcmd echo arg1' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"echo\ \$1\" branch)
test "$diff" = file
'
test_expect_success 'difftool --extcmd cat arg1' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"cat\ \$1\" branch)
test "$diff" = master
'
test_expect_success 'difftool --extcmd cat arg2' '
diff=$(git difftool --no-prompt --extcmd sh\ -c\ \"cat\ \$2\" branch)
test "$diff" = branch
'
test_done
|
gdb/git
|
t/t7800-difftool.sh
|
Shell
|
gpl-2.0
| 6,942 |
#!/bin/sh
export ZIPNAME=nubdist/iceball-indev-0.2a-6.zip
zip -r $ZIPNAME *.dll *.exe *.txt opencmd.bat docs/ \
clsave/config.json \
clsave/pub/user.json \
clsave/pub/controls.json \
clsave/vol/dummy \
svsave/pub/dummy \
svsave/vol/dummy \
pkg/iceball/halp/ \
pkg/iceball/launch/ \
pkg/iceball/config/ \
pkg/iceball/lib/ \
pkg/iceball/gfx/ \
#
|
10se1ucgo/iceball
|
zipdist.sh
|
Shell
|
gpl-3.0
| 359 |
#!/bin/sh
sudo chown -R privoxy: /etc/privoxy
sudo chmod -R go-rwx /etc/privoxy
|
vitvegl/AppArmor-profiles
|
ubuntu/x86_64/privoxy.sh
|
Shell
|
gpl-3.0
| 81 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 29-Feb-2012|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#######################PCI INFORMATION###############################
#Risk =
#######################PCI INFORMATION###############################
#Global Variables#
KUDSERVICE=$( service kudzu status | grep "running..." | wc -l )
#Start-Lockdown
if [ $KUDSERVICE -ne 0 ]
then
service kudzu stop
chkconfig --level 2345 kudzu off
fi
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/PCI/rhel6/dev/pci-dss-2-Disable_Kudzu.sh
|
Shell
|
apache-2.0
| 1,935 |
#!/bin/sh
#-
# Copyright (c) 2010 iXsystems, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
# Functions which runs commands on the system
. ${BACKEND}/functions.sh
. ${BACKEND}/functions-parse.sh
# Function which localizes a FreeBSD install
localize_freebsd()
{
sed -i.bak "s/lang=en_US/lang=${LOCALE}/g" ${FSMNT}/etc/login.conf
rm ${FSMNT}/etc/login.conf.bak
};
localize_x_desktops() {
# Check for and customize KDE lang
##########################################################################
# Check if we can localize KDE via skel
if [ -e "${FSMNT}/usr/share/skel/.kde4/share/config/kdeglobals" ] ; then
sed -i '' "s/Country=us/Country=${COUNTRY}/g" ${FSMNT}/usr/share/skel/.kde4/share/config/kdeglobals
sed -i '' "s/Country=us/Country=${COUNTRY}/g" ${FSMNT}/root/.kde4/share/config/kdeglobals
sed -i '' "s/Language=en_US/Language=${SETLANG}:${LOCALE}/g" ${FSMNT}/usr/share/skel/.kde4/share/config/kdeglobals
fi
# Check if we have a KDE root config
if [ -e "${FSMNT}/root/.kde4/share/config/kdeglobals" ] ; then
sed -i '' "s/Language=en_US/Language=${SETLANG}:${LOCALE}/g" ${FSMNT}/root/.kde4/share/config/kdeglobals
fi
# Check for KDM
if [ -e "${FSMNT}/usr/local/kde4/share/config/kdm/kdmrc" ] ; then
sed -i '' "s/Language=en_US/Language=${LOCALE}.UTF-8/g" ${FSMNT}/usr/local/kde4/share/config/kdm/kdmrc
fi
# Check for and customize GNOME / GDM lang
##########################################################################
# See if GDM is enabled and customize its lang
cat ${FSMNT}/etc/rc.conf 2>/dev/null | grep -q "gdm_enable=\"YES\"" 2>/dev/null
if [ "$?" = "0" ] ; then
echo "gdm_lang=\"${LOCALE}.UTF-8\"" >> ${FSMNT}/etc/rc.conf
fi
};
# Function which localizes a PC-BSD install
localize_pcbsd()
{
# Check if we have a localized splash screen and copy it
if [ -e "${FSMNT}/usr/local/share/pcbsd/splash-screens/loading-screen-${SETLANG}.pcx" ]
then
cp ${FSMNT}/usr/local/share/pcbsd/splash-screens/loading-screen-${SETLANG}.pcx ${FSMNT}/boot/loading-screen.pcx
fi
};
localize_x_keyboard()
{
KEYMOD="$1"
KEYLAY="$2"
KEYVAR="$3"
COUNTRY="$4"
OPTION="grp:alt_shift_toggle"
SETXKBMAP=""
if [ "${COUNTRY}" = "NONE" -o "${COUNTRY}" = "us" -o "${COUNTRY}" = "C" ] ; then
#In this case we don't need any additional language
COUNTRY=""
OPTION=""
else
COUNTRY=",${COUNTRY}"
fi
if [ "${KEYMOD}" != "NONE" ]
then
SETXKBMAP="-model ${KEYMOD}"
KXMODEL="${KEYMOD}"
else
KXMODEL="pc104"
fi
if [ "${KEYLAY}" != "NONE" ]
then
localize_key_layout "$KEYLAY"
SETXKBMAP="${SETXKBMAP} -layout ${KEYLAY}"
KXLAYOUT="${KEYLAY}"
else
KXLAYOUT="us"
fi
if [ "${KEYVAR}" != "NONE" ]
then
SETXKBMAP="${SETXKBMAP} -variant ${KEYVAR}"
KXVAR="(${KEYVAR})"
else
KXVAR=""
fi
# Setup .xprofile with our setxkbmap call now
if [ ! -z "${SETXKBMAP}" ]
then
if [ ! -e "${FSMNT}/usr/share/skel/.xprofile" ]
then
echo "#!/bin/sh" >${FSMNT}/usr/share/skel/.xprofile
fi
# Save the keyboard layout for user / root X logins
echo "setxkbmap ${SETXKBMAP}" >>${FSMNT}/usr/share/skel/.xprofile
chmod 755 ${FSMNT}/usr/share/skel/.xprofile
cp ${FSMNT}/usr/share/skel/.xprofile ${FSMNT}/root/.xprofile
# Save it for KDM
if [ -e "${FSMNT}/usr/local/kde4/share/config/kdm/Xsetup" ] ; then
echo "setxkbmap ${SETXKBMAP}" >>${FSMNT}/usr/local/kde4/share/config/kdm/Xsetup
fi
fi
# Create the kxkbrc configuration using these options
if [ -d "${FSMNT}/usr/share/skel/.kde4/share/config" ] ; then
echo "[Layout]
DisplayNames=${KXLAYOUT}${COUNTRY}
IndicatorOnly=false
LayoutList=${KXLAYOUT}${KXVAR}${COUNTRY}
Model=${KXMODEL}
Options=${OPTION}
ResetOldOptions=true
ShowFlag=true
ShowSingle=false
SwitchMode=WinClass
Use=true " >${FSMNT}/usr/share/skel/.kde4/share/config/kxkbrc
fi
};
localize_key_layout()
{
KEYLAYOUT="$1"
# Set the keylayout in rc.conf
case ${KEYLAYOUT} in
am) KEYLAYOUT_CONSOLE="hy.armscii-8" ;;
ca) KEYLAYOUT_CONSOLE="fr_CA.acc.iso" ;;
ch) KEYLAYOUT_CONSOLE="swissgerman.iso" ;;
cz) KEYLAYOUT_CONSOLE="cz.iso2" ;;
de) KEYLAYOUT_CONSOLE="german.iso" ;;
dk) KEYLAYOUT_CONSOLE="danish.iso" ;;
ee) KEYLAYOUT_CONSOLE="estonian.iso" ;;
es) KEYLAYOUT_CONSOLE="spanish.iso" ;;
fi) KEYLAYOUT_CONSOLE="finnish.iso" ;;
is) KEYLAYOUT_CONSOLE="icelandic.iso" ;;
jp) KEYLAYOUT_CONSOLE="jp.106" ;;
nl) KEYLAYOUT_CONSOLE="dutch.iso.acc" ;;
no) KEYLAYOUT_CONSOLE="norwegian.iso" ;;
pl) KEYLAYOUT_CONSOLE="pl_PL.ISO8859-2" ;;
ru) KEYLAYOUT_CONSOLE="ru.koi8-r" ;;
sk) KEYLAYOUT_CONSOLE="sk.iso2" ;;
se) KEYLAYOUT_CONSOLE="swedish.iso" ;;
tr) KEYLAYOUT_CONSOLE="tr.iso9.q" ;;
gb) KEYLAYOUT_CONSOLE="uk.iso" ;;
*) if [ ! -z "${KEYLAYOUT}" ]
then
KEYLAYOUT_CONSOLE="${KEYLAYOUT}.iso"
fi
;;
esac
if [ -n "${KEYLAYOUT_CONSOLE}" ]
then
echo "keymap=\"${KEYLAYOUT_CONSOLE}\"" >>${FSMNT}/etc/rc.conf
fi
};
# Function which prunes other l10n files from the KDE install
localize_prune_langs()
{
get_value_from_cfg localizeLang
KEEPLANG="$VAL"
if [ -z "$KEEPLANG" ] ; then
KEEPLANG="en"
fi
export KEEPLANG
echo_log "Pruning other l10n files, keeping ${KEEPLANG}"
# Create the script to do uninstalls
echo '#!/bin/sh
for i in `pkg_info -xEI kde-l10n`
do
echo "$i" | grep "${KEEPLANG}-kde"
if [ $? -ne 0 ] ; then
pkg_delete ${i}
fi
done
' > ${FSMNT}/.pruneLangs.sh
chmod 755 ${FSMNT}/.pruneLangs.sh
chroot ${FSMNT} /.pruneLangs.sh >/dev/null 2>/dev/null
rm ${FSMNT}/.pruneLangs.sh
};
# Function which sets COUNTRY SETLANG and LOCALE based upon $1
localize_get_codes()
{
TARGETLANG="${1}"
# Setup the presets for the specific lang
case $TARGETLANG in
af)
COUNTRY="C"
SETLANG="af"
LOCALE="af_ZA"
;;
ar)
COUNTRY="C"
SETLANG="ar"
LOCALE="en_US"
;;
az)
COUNTRY="C"
SETLANG="az"
LOCALE="en_US"
;;
ca)
COUNTRY="es"
SETLANG="es:ca"
LOCALE="ca_ES"
;;
be)
COUNTRY="be"
SETLANG="be"
LOCALE="be_BY"
;;
bn)
COUNTRY="bn"
SETLANG="bn"
LOCALE="en_US"
;;
bg)
COUNTRY="bg"
SETLANG="bg"
LOCALE="bg_BG"
;;
cs)
COUNTRY="cz"
SETLANG="cs"
LOCALE="cs_CZ"
;;
da)
COUNTRY="dk"
SETLANG="da"
LOCALE="da_DK"
;;
de)
COUNTRY="de"
SETLANG="de"
LOCALE="de_DE"
;;
en_GB)
COUNTRY="gb"
SETLANG="en_GB:cy"
LOCALE="en_GB"
;;
el)
COUNTRY="gr"
SETLANG="el:gr"
LOCALE="el_GR"
;;
es)
COUNTRY="es"
SETLANG="es"
LOCALE="es_ES"
;;
es_LA)
COUNTRY="us"
SETLANG="es:en_US"
LOCALE="es_ES"
;;
et)
COUNTRY="ee"
SETLANG="et"
LOCALE="et_EE"
;;
fr)
COUNTRY="fr"
SETLANG="fr"
LOCALE="fr_FR"
;;
he)
COUNTRY="il"
SETLANG="he:ar"
LOCALE="he_IL"
;;
hr)
COUNTRY="hr"
SETLANG="hr"
LOCALE="hr_HR"
;;
hu)
COUNTRY="hu"
SETLANG="hu"
LOCALE="hu_HU"
;;
it)
COUNTRY="it"
SETLANG="it"
LOCALE="it_IT"
;;
ja)
COUNTRY="jp"
SETLANG="ja"
LOCALE="ja_JP"
;;
ko)
COUNTRY="kr"
SETLANG="ko"
LOCALE="ko_KR"
;;
nl)
COUNTRY="nl"
SETLANG="nl"
LOCALE="nl_NL"
;;
nn)
COUNTRY="no"
SETLANG="nn"
LOCALE="en_US"
;;
pa)
COUNTRY="pa"
SETLANG="pa"
LOCALE="en_US"
;;
pl)
COUNTRY="pl"
SETLANG="pl"
LOCALE="pl_PL"
;;
pt)
COUNTRY="pt"
SETLANG="pt"
LOCALE="pt_PT"
;;
pt_BR)
COUNTRY="br"
SETLANG="pt_BR"
LOCALE="pt_BR"
;;
ru)
COUNTRY="ru"
SETLANG="ru"
LOCALE="ru_RU"
;;
sl)
COUNTRY="si"
SETLANG="sl"
LOCALE="sl_SI"
;;
sk)
COUNTRY="sk"
SETLANG="sk"
LOCALE="sk_SK"
;;
sv)
COUNTRY="se"
SETLANG="sv"
LOCALE="sv_SE"
;;
uk)
COUNTRY="ua"
SETLANG="uk"
LOCALE="uk_UA"
;;
vi)
COUNTRY="vn"
SETLANG="vi"
LOCALE="en_US"
;;
zh_CN)
COUNTRY="cn"
SETLANG="zh_CN"
LOCALE="zh_CN"
;;
zh_TW)
COUNTRY="tw"
SETLANG="zh_TW"
LOCALE="zh_TW"
;;
*)
COUNTRY="C"
SETLANG="${TARGETLANG}"
LOCALE="en_US"
;;
esac
export COUNTRY SETLANG LOCALE
};
# Function which sets the timezone on the system
set_timezone()
{
TZONE="$1"
cp ${FSMNT}/usr/share/zoneinfo/${TZONE} ${FSMNT}/etc/localtime
};
# Function which enables / disables NTP
set_ntp()
{
ENABLED="$1"
if [ "$ENABLED" = "yes" -o "${ENABLED}" = "YES" ]
then
cat ${FSMNT}/etc/rc.conf 2>/dev/null | grep -q 'ntpd_enable="YES"' 2>/dev/null
if [ $? -ne 0 ]
then
echo 'ntpd_enable="YES"' >>${FSMNT}/etc/rc.conf
echo 'ntpd_sync_on_start="YES"' >>${FSMNT}/etc/rc.conf
fi
else
cat ${FSMNT}/etc/rc.conf 2>/dev/null | grep -q 'ntpd_enable="YES"' 2>/dev/null
if [ $? -ne 0 ]
then
sed -i.bak 's|ntpd_enable="YES"||g' ${FSMNT}/etc/rc.conf
fi
fi
};
# Starts checking for localization directives
run_localize()
{
KEYLAYOUT="NONE"
KEYMOD="NONE"
KEYVAR="NONE"
while read line
do
# Check if we need to do any localization
echo $line | grep -q "^localizeLang=" 2>/dev/null
if [ $? -eq 0 ]
then
# Set our country / lang / locale variables
get_value_from_string "$line"
localize_get_codes ${VAL}
get_value_from_string "$line"
# If we are doing PC-BSD install, localize it as well as FreeBSD base
if [ "${INSTALLTYPE}" != "FreeBSD" ]
then
localize_pcbsd "$VAL"
fi
# Localize FreeBSD
localize_freebsd "$VAL"
# Localize any X pkgs
localize_x_desktops "$VAL"
fi
# Check if we need to do any keylayouts
echo $line | grep -q "^localizeKeyLayout=" 2>/dev/null
if [ $? -eq 0 ] ; then
get_value_from_string "$line"
KEYLAYOUT="$VAL"
fi
# Check if we need to do any key models
echo $line | grep -q "^localizeKeyModel=" 2>/dev/null
if [ $? -eq 0 ] ; then
get_value_from_string "$line"
KEYMOD="$VAL"
fi
# Check if we need to do any key variant
echo $line | grep -q "^localizeKeyVariant=" 2>/dev/null
if [ $? -eq 0 ] ; then
get_value_from_string "$line"
KEYVAR="$VAL"
fi
# Check if we need to set a timezone
echo $line | grep -q "^timeZone=" 2>/dev/null
if [ $? -eq 0 ] ; then
get_value_from_string "$line"
set_timezone "$VAL"
fi
# Check if we need to set a timezone
echo $line | grep -q "^enableNTP=" 2>/dev/null
if [ $? -eq 0 ] ; then
get_value_from_string "$line"
set_ntp "$VAL"
fi
done <${CFGF}
if [ "${INSTALLTYPE}" != "FreeBSD" ] ; then
# Do our X keyboard localization
localize_x_keyboard "${KEYMOD}" "${KEYLAYOUT}" "${KEYVAR}" "${COUNTRY}"
fi
# Check if we want to prunt any other KDE lang files to save some disk space
get_value_from_cfg localizePrune
if [ "${VAL}" = "yes" -o "${VAL}" = "YES" ] ; then
localize_prune_langs
fi
# Update the login.conf db, even if we didn't localize, its a good idea to make sure its up2date
run_chroot_cmd "/usr/bin/cap_mkdb /etc/login.conf" >/dev/null 2>/dev/null
};
|
jhbsz/OSI-OS
|
usr.sbin/pc-sysinstall/backend/functions-localize.sh
|
Shell
|
bsd-3-clause
| 12,871 |
#! /bin/sh
set -x
true \
&& rm -f aclocal.m4 \
&& rm -f -r autom4te.cache \
&& rm -f collectd-*.tar.bz2 \
&& rm -f collectd-*.tar.gz \
&& rm -f compile \
&& rm -f config.guess \
&& rm -f config.log \
&& rm -f config.status \
&& rm -f config.sub \
&& rm -f configure \
&& rm -f depcomp \
&& rm -f install-sh \
&& rm -f -r libltdl \
&& rm -f libtool \
&& rm -f ltmain.sh \
&& rm -f Makefile \
&& rm -f Makefile.in \
&& rm -f missing \
&& rm -f -r src/.deps \
&& rm -f -r src/.libs \
&& rm -f src/*.o \
&& rm -f src/*.la \
&& rm -f src/*.lo \
&& rm -f src/collectd \
&& rm -f src/collectd.1 \
&& rm -f src/config.h \
&& rm -f src/config.h.in \
&& rm -f src/config.h.in~ \
&& rm -f src/Makefile \
&& rm -f src/Makefile.in \
&& rm -f src/stamp-h1 \
&& rm -f src/stamp-h1.in \
&& rm -f -r src/libping/.libs \
&& rm -f src/libping/*.o \
&& rm -f src/libping/*.la \
&& rm -f src/libping/*.lo \
&& rm -f src/libping/config.h \
&& rm -f src/libping/config.h.in \
&& rm -f src/libping/Makefile \
&& rm -f src/libping/Makefile.in \
&& rm -f src/libping/stamp-h2 \
&& rm -f -r src/libcollectdclient/.libs \
&& rm -f src/libcollectdclient/*.o \
&& rm -f src/libcollectdclient/*.la \
&& rm -f src/libcollectdclient/*.lo \
&& rm -f bindings/.perl-directory-stamp \
&& rm -f -r bindings/buildperl
|
perfwatcher/collectd-pw
|
clean.sh
|
Shell
|
gpl-2.0
| 1,282 |
#!/bin/sh
test_description='git merge
Testing octopus merge with more than 25 refs.'
. ./test-lib.sh
test_expect_success 'setup' '
echo c0 > c0.c &&
git add c0.c &&
git commit -m c0 &&
git tag c0 &&
i=1 &&
while test $i -le 30
do
git reset --hard c0 &&
echo c$i > c$i.c &&
git add c$i.c &&
git commit -m c$i &&
git tag c$i &&
i=$(expr $i + 1) || return 1
done
'
test_expect_success 'merge c1 with c2, c3, c4, ... c29' '
git reset --hard c1 &&
i=2 &&
refs="" &&
while test $i -le 30
do
refs="$refs c$i"
i=$(expr $i + 1)
done &&
git merge $refs &&
test "$(git rev-parse c1)" != "$(git rev-parse HEAD)" &&
i=1 &&
while test $i -le 30
do
test "$(git rev-parse c$i)" = "$(git rev-parse HEAD^$i)" &&
i=$(expr $i + 1) || return 1
done &&
git diff --exit-code &&
i=1 &&
while test $i -le 30
do
test -f c$i.c &&
i=$(expr $i + 1) || return 1
done
'
cat >expected <<\EOF
Trying simple merge with c2
Trying simple merge with c3
Trying simple merge with c4
Merge made by the 'octopus' strategy.
c2.c | 1 +
c3.c | 1 +
c4.c | 1 +
3 files changed, 3 insertions(+)
create mode 100644 c2.c
create mode 100644 c3.c
create mode 100644 c4.c
EOF
test_expect_success 'merge output uses pretty names' '
git reset --hard c1 &&
git merge c2 c3 c4 >actual &&
test_i18ncmp expected actual
'
cat >expected <<\EOF
Merge made by the 'recursive' strategy.
c5.c | 1 +
1 file changed, 1 insertion(+)
create mode 100644 c5.c
EOF
test_expect_success 'merge reduces irrelevant remote heads' '
if test "$GIT_TEST_MERGE_ALGORITHM" = ort
then
mv expected expected.tmp &&
sed s/recursive/ort/ expected.tmp >expected &&
rm expected.tmp
fi &&
GIT_MERGE_VERBOSITY=0 git merge c4 c5 >actual &&
test_i18ncmp expected actual
'
cat >expected <<\EOF
Fast-forwarding to: c1
Trying simple merge with c2
Merge made by the 'octopus' strategy.
c1.c | 1 +
c2.c | 1 +
2 files changed, 2 insertions(+)
create mode 100644 c1.c
create mode 100644 c2.c
EOF
test_expect_success 'merge fast-forward output uses pretty names' '
git reset --hard c0 &&
git merge c1 c2 >actual &&
test_i18ncmp expected actual
'
test_done
|
tacker66/git
|
t/t7602-merge-octopus-many.sh
|
Shell
|
gpl-2.0
| 2,149 |
#!/usr/bin/env bash
HT_HOME=${INSTALL_DIR:-"$HOME/hypertable/current"}
SCRIPT_DIR=`dirname $0`
$HT_HOME/bin/ht-start-test-servers.sh --clear --no-thriftbroker
echo "CREATE NAMESPACE MATCH_VERSION; quit;" | $HT_HOME/bin/ht shell --batch
echo "USE MATCH_VERSION; CREATE TABLE VERSION('M' MAX_VERSIONS 10); quit;" | $HT_HOME/bin/ht shell --batch
echo "USE MATCH_VERSION; INSERT INTO VERSION VALUES('rowname', 'M', 'VALUE'); quit;" | $HT_HOME/bin/ht shell --batch
echo "USE MATCH_VERSION; INSERT INTO VERSION VALUES('rowname', 'M', 'VALUE'); quit;" | $HT_HOME/bin/ht shell --batch
sleep 2
LAST_TS=`date +"%F %T"`
echo "USE MATCH_VERSION; INSERT INTO VERSION VALUES('$LAST_TS', 'rowname', 'M', 'VALUE'); quit;" | $HT_HOME/bin/ht shell --batch
echo "[first dump]"
echo "USE MATCH_VERSION; SELECT M FROM VERSION DISPLAY_TIMESTAMPS;" | $HT_HOME/bin/ht shell --batch | tee "dump1.tsv"
# Delete last insert
echo "USE MATCH_VERSION; DELETE M FROM VERSION WHERE ROW='rowname' VERSION '$LAST_TS';"
echo "USE MATCH_VERSION; DELETE M FROM VERSION WHERE ROW='rowname' VERSION '$LAST_TS';" | $HT_HOME/bin/ht shell --batch
echo "[second dump]"
echo "USE MATCH_VERSION; SELECT M FROM VERSION DISPLAY_TIMESTAMPS;" | $HT_HOME/bin/ht shell --batch | tee "dump2.tsv"
COUNT=`wc -l dump2.tsv | cut -f1 -d' '`
if [ $COUNT -ne 2 ]; then
echo "dump2.tsv does not contain exactly two lines!"
exit 1
fi
exit 0
|
hypertable/hypertable
|
tests/integration/general/hql-delete-test.sh
|
Shell
|
gpl-3.0
| 1,399 |
#!/usr/bin/env bash
wget "https://valtman.name/files/telegram-cli-1222"
sudo apt-get -y install libreadline6 libreadline-dev libreadline-dev libreadline6-dev libconfig-dev libssl-dev tmux lua5.2 liblua5.2-dev lua-socket lua-sec lua-expat libevent-dev make unzip redis-server autoconf git g++ libjansson-dev libpython-dev expat libexpat1-dev ppa-purge python3-pip python3-dev software-properties-common python-software-properties gcc-6
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get upgrade
sudo apt-get dist-upgrade
sudo ppa-purge
sudo service redis-server restart
chmod 777 telegram-cli-1222
chmod 777 anticrash.sh
RED='\033[0;31m'
NC='\033[0m'
CYAN='\033[0;36m'
echo -e "${CYAN}Installation Completed! Create a bot with creator.lua (lua creator.lua)${NC}"
exit
|
DarKTeaMoNe/TabChi
|
install.sh
|
Shell
|
gpl-3.0
| 801 |
# 300_create_dp_restore_fs_list.sh
# Purpose: Generate a file system list of objects to restore
# $ /opt/omni/bin/omnidb -filesystem | grep $(hostname)
# test.internal.it3.be:/ '/' FileSystem
[ -f $TMP_DIR/DP_GUI_RESTORE ] && return # GUI restore explicetely requested
/opt/omni/bin/omnidb -session $(cat $TMP_DIR/dp_recovery_session) | cut -d"'" -f -2 > $TMP_DIR/list_of_fs_objects
[ -s $TMP_DIR/list_of_fs_objects ]
StopIfError "Data Protector did not find any file system objects for $(hostname)"
# check if we need to exclude a file system - exclude fs list = $VAR_DIR/recovery/exclude_mountpoints
if [ -f $VAR_DIR/recovery/exclude_mountpoints ]; then
HostObj=`tail -n 1 $TMP_DIR/list_of_fs_objects | cut -d: -f 1`
Log "Info: $VAR_DIR/recovery/exclude_mountpoints found. Remove from restore file system list."
sed -e 's;^/;'${HostObj}':/;' $VAR_DIR/recovery/exclude_mountpoints > $TMP_DIR/exclude_mountpoints
# $TMP_DIR/exclude_mountpoints contains e.g. test.internal.it3.be:/usr/sap
# use join to remove excluded file systems to restore
join -v 1 $TMP_DIR/list_of_fs_objects $TMP_DIR/exclude_mountpoints
fi
|
terreActive/rear
|
usr/share/rear/restore/DP/default/300_create_dp_restore_fs_list.sh
|
Shell
|
gpl-3.0
| 1,160 |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${OS_ROOT}/hack/util.sh"
source "${OS_ROOT}/hack/cmd_util.sh"
os::log::install_errexit
# This test validates template commands
os::cmd::expect_success 'oc get templates'
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-dockerbuild.json'
os::cmd::expect_success 'oc get templates'
os::cmd::expect_success 'oc get templates ruby-helloworld-sample'
os::cmd::expect_success 'oc get template ruby-helloworld-sample -o json | oc process -f -'
os::cmd::expect_success 'oc process ruby-helloworld-sample'
os::cmd::expect_success_and_text 'oc describe templates ruby-helloworld-sample' "BuildConfig.*ruby-sample-build"
os::cmd::expect_success 'oc delete templates ruby-helloworld-sample'
os::cmd::expect_success 'oc get templates'
# TODO: create directly from template
echo "templates: ok"
os::cmd::expect_success 'oc process -f test/templates/fixtures/guestbook.json -l app=guestbook | oc create -f -'
os::cmd::expect_success_and_text 'oc status' 'frontend-service'
echo "template+config: ok"
# Joined parameter values are honored
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser,ADMIN_PASSWORD=mypassword' '"myuser"'
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser,ADMIN_PASSWORD=mypassword' '"mypassword"'
# Individually specified parameter values are honored
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser -v ADMIN_PASSWORD=mypassword' '"myuser"'
os::cmd::expect_success_and_text 'oc process -f test/templates/fixtures/guestbook.json -v ADMIN_USERNAME=myuser -v ADMIN_PASSWORD=mypassword' '"mypassword"'
echo "template+parameters: ok"
# Run as cluster-admin to allow choosing any supplemental groups we want
# Ensure large integers survive unstructured JSON creation
os::cmd::expect_success 'oc create -f test/fixtures/template-type-precision.json'
# ... and processing
os::cmd::expect_success_and_text 'oc process template-type-precision' '1000030003'
os::cmd::expect_success_and_text 'oc process template-type-precision' '2147483647'
os::cmd::expect_success_and_text 'oc process template-type-precision' '9223372036854775807'
# ... and re-encoding as structured resources
os::cmd::expect_success 'oc process template-type-precision | oc create -f -'
# ... and persisting
os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' '1000030003'
os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' '2147483647'
os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' '9223372036854775807'
# Ensure patch computation preserves data
patch='{"metadata":{"annotations":{"comment":"patch comment"}}}'
os::cmd::expect_success "oc patch pod template-type-precision -p '${patch}'"
os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' '9223372036854775807'
os::cmd::expect_success_and_text 'oc get pod/template-type-precision -o json' 'patch comment'
os::cmd::expect_success 'oc delete template/template-type-precision'
os::cmd::expect_success 'oc delete pod/template-type-precision'
echo "template data precision: ok"
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-dockerbuild.json -n openshift'
os::cmd::expect_success 'oc policy add-role-to-user admin test-user'
os::cmd::expect_success 'oc login -u test-user -p password'
os::cmd::expect_success 'oc new-project test-template-project'
# make sure the permissions on the new project are set up
os::cmd::try_until_success 'oc get templates'
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-dockerbuild.json'
os::cmd::expect_success 'oc process template/ruby-helloworld-sample'
os::cmd::expect_success 'oc process templates/ruby-helloworld-sample'
os::cmd::expect_success 'oc process openshift//ruby-helloworld-sample'
os::cmd::expect_success 'oc process openshift/template/ruby-helloworld-sample'
echo "processing templates in different namespace: ok"
|
yepengxj/df_st_origin1
|
test/cmd/templates.sh
|
Shell
|
apache-2.0
| 4,188 |
java -cp ../../classes/production/:../../lib/jline-0.9.9.jar:../../lib/java-cup-11a.jar:../../lib/lpsolve55j.jar:../../lib/ thebeast.util.alchemy.AlchemyWeights2TheBeast \
model.pml bibserv-weight-order.txt \
< corpora/cora/folds/cora-all.trained-for-fold0of10.mln > /tmp/test.weights
|
52nlp/thebeast
|
applications/entityresolution/alchemy2pml-weight-convert.sh
|
Shell
|
lgpl-3.0
| 288 |
# $1 = file path $2 = (optional) file content
if [ ! -e $1 ]
then
echo [error] $1 does not exist
exit -1
fi
if [ $2 ]
then
content="`cat $1`"
if [ "$2" != "$content" ]
then
echo [error] $1 content "$content" != "$2"
exit -2
fi
fi
echo [success] file $1 = $2 exist
|
jcnelson/syndicate
|
tools/irods/fuse/test/file_exist.sh
|
Shell
|
apache-2.0
| 276 |
pylint --rcfile=scripts/pylint.rc apps scripts *.py
|
justintweaver/mtchi-cert-game
|
makahiki/scripts/run_pylint.sh
|
Shell
|
gpl-3.0
| 52 |
# {{{ - Install
# install mime-types, bells and whistles for the desktop
# see http://developers.sun.com/solaris/articles/integrating_gnome.html
# and freedesktop specs
install_tomb() {
# TODO: distro package deps (for binary)
# debian: zsh, cryptsetup, sudo
_message "updating mimetypes..."
cat <<EOF > /tmp/dyne-tomb.xml
<?xml version="1.0"?>
<mime-info xmlns='http://www.freedesktop.org/standards/shared-mime-info'>
<mime-type type="application/x-tomb-volume">
<comment>Tomb crypto volume</comment>
<glob pattern="*.tomb"/>
</mime-type>
<mime-type type="application/x-tomb-key">
<comment>Tomb crypto key</comment>
<glob pattern="*.tomb.key"/>
</mime-type>
</mime-info>
EOF
xdg-mime install /tmp/dyne-tomb.xml
xdg-icon-resource install --context mimetypes --size 32 monmort.xpm monmort
xdg-icon-resource install --size 32 monmort.xpm dyne-monmort
rm /tmp/dyne-tomb.xml
_message "updating desktop..."
cat <<EOF > /usr/share/applications/tomb.desktop
[Desktop Entry]
Version=1.0
Type=Application
Name=Tomb crypto undertaker
GenericName=Crypto undertaker
Comment=Keep your bones safe
Exec="${TOMBOPENEXEC}" %U
TryExec=tomb-open
Icon=monmort.xpm
Terminal=true
Categories=Utility;Security;Archiving;Filesystem;
MimeType=application/x-tomb-volume;
X-AppInstall-Package=tomb
EOF
update-desktop-database
_message "updating menus..."
cat <<EOF > /etc/menu/tomb
?package(tomb):command="tomb" icon="/usr/share/pixmaps/monmort.xpm" needs="text" \
section="Applications/Accessories" title="Tomb" hints="Crypto" \
hotkey="Tomb"
EOF
update-menus
_message "updating mime info..."
cat <<EOF > /usr/share/mime-info/tomb.keys
# actions for encrypted tomb storage
application/x-tomb-volume:
open="${TOMBOPENEXEC}" %f
view=tomb-open %f
icon-filename=monmort.xpm
short_list_application_ids_for_novice_user_level=tomb
EOF
cat <<EOF > /usr/share/mime-info/tomb.mime
# mime type for encrypted tomb storage
application/x-tomb-volume
ext: tomb
application/x-tomb-key
ext: tomb.key
EOF
cat <<EOF > /usr/lib/mime/packages/tomb
application/x-tomb-volume; tomb-open '%s'; priority=8
EOF
update-mime
_message "updating application entry..."
cat <<EOF > /usr/share/application-registry/tomb.applications
tomb
command=tomb-open
name=Tomb - Crypto Undertaker
can_open_multiple_files=false
expects_uris=false
requires_terminal=true
mime-types=application/x-tomb-volume,application/x-tomb-key
EOF
_message "Tomb is now installed."
}
# }}}
|
Narrat/Tomb
|
extras/desktop/install.zsh
|
Shell
|
gpl-3.0
| 2,536 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script executes all hive metastore upgrade scripts on an specific
# database server in order to verify that upgrade scripts are working
# properly.
# Cleanup existing installation + configuration.
echo "####################################################"
echo "Detecting any existing Oracle XE installation:"
echo "####################################################"
apt-get clean
HTTPS_INFO=($(dpkg -l apt-transport-https | grep ^i | tr -s ' '))
if [[ ${HTTPS_INFO[1]} == "apt-transport-https" ]]
then
echo "apt-transport-https package installed"
else
echo "apt-transport-https package not installed"
apt-get install -y --force-yes apt-transport-https
fi
INSTALL_INFO=($(dpkg -l oracle\* | grep ^i | tr -s ' '))
if [[ ${INSTALL_INFO[1]} == "oracle-xe" ]] && [[ ${INSTALL_INFO[2]} == 10.2* ]]
then
echo "Oracle XE already installed...Skipping"
else
echo "Oracle XE not installed or is of a different version"
apt-get purge -y --force-yes oracle-xe || /bin/true
echo "####################################################"
echo "Installing Oracle XE dependencies:"
echo "####################################################"
if grep -q "deb http://oss.oracle.com/debian unstable main non-free" /etc/apt/sources.list.d/oracle-xe.list
then
echo "Sources already listed"
else
echo "deb http://oss.oracle.com/debian unstable main non-free" > /etc/apt/sources.list.d/oracle-xe.list
fi
wget http://oss.oracle.com/el4/RPM-GPG-KEY-oracle -O- | sudo apt-key add - || /bin/true
apt-get update || /bin/true
ls -al /var/cache/apt/archives
apt-get install -y --force-yes oracle-xe:i386
fi
echo "####################################################"
echo "Configuring Oracle XE Environment:"
echo "####################################################"
echo "8080" > /tmp/silent.properties
echo "1521" >> /tmp/silent.properties
echo "hivepw" >> /tmp/silent.properties
echo "hivepw" >> /tmp/silent.properties
echo "y" >> /tmp/silent.properties
/etc/init.d/oracle-xe configure < /tmp/silent.properties > /tmp/silentInstall.log || /bin/true
export ORACLE_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server
export PATH=$PATH:$ORACLE_HOME/bin
echo "####################################################"
echo "Setting up user account and Table space:"
echo "####################################################"
echo "drop user hiveuser cascade;" > /tmp/oraInit.sql
echo "alter database default tablespace SYSTEM;" >> /tmp/oraInit.sql
echo "drop tablespace hive_tbspace including contents and datafiles;" >> /tmp/oraInit.sql
echo "create user hiveuser identified by hivepw;" >> /tmp/oraInit.sql
echo "grant connect to hiveuser;" >> /tmp/oraInit.sql
echo "grant create table to hiveuser;" >> /tmp/oraInit.sql
echo "create smallfile tablespace hive_tbspace datafile 'hive.dbf' size 100m;" >> /tmp/oraInit.sql
echo "alter database default tablespace hive_tbspace;" >> /tmp/oraInit.sql
echo "alter user hiveuser quota 100m on hive_tbspace;" >> /tmp/oraInit.sql
echo "exit;" >> /tmp/oraInit.sql
sqlplus -L SYSTEM/hivepw@XE @/tmp/oraInit.sql
echo "DONE!!!"
|
vineetgarg02/hive
|
testutils/metastore/dbs/oracle/prepare.sh
|
Shell
|
apache-2.0
| 3,896 |
#!/bin/bash
sed -i 's|localhost|'"${DBHOST}"'|g' app.pl
sed -i 's|user .*;|user '"$(id -u -n)"';|g' nginx.conf
sed -i 's|server unix.*frameworks-benchmark.sock;|server unix:'"${TROOT}"'/frameworks-benchmark.sock;|g' nginx.conf
fw_depends perl nginx
cpanm --notest --no-man-page \
[email protected] \
Dancer::Plugin::[email protected] \
[email protected] \
DBD::[email protected] \
JSON::[email protected] \
[email protected] \
[email protected]
nginx -c ${TROOT}/nginx.conf
plackup -E production -s Starman --workers=${MAX_THREADS} -l ${TROOT}/frameworks-benchmark.sock -a ./app.pl &
|
PermeAgility/FrameworkBenchmarks
|
frameworks/Perl/dancer/setup.sh
|
Shell
|
bsd-3-clause
| 581 |
#!/bin/sh
# SUMMARY: Test the node_exporter example
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=node_exporter
clean_up() {
rm -f ${NAME}*
}
trap clean_up EXIT
# Test code goes here
linuxkit build "${LINUXKIT_EXAMPLES_DIR}/${NAME}.yml"
exit 0
|
deitch/linuxkit
|
test/cases/000_build/100_examples/050_node_exporter/test.sh
|
Shell
|
apache-2.0
| 328 |
# no tunnels left, Ma!
: ==== cut ====
ipsec auto --status
cat /tmp/pluto.log
: ==== tuc ====
echo end
: ==== end ====
|
y-trudeau/openswan-patch-meraki
|
testing/pluto/dpd-07/final.sh
|
Shell
|
gpl-2.0
| 120 |
#! /bin/bash
echo "**************************"
echo "* PUSH CHANGES TO GITHUB *"
echo "**************************"
git diff | grep ^+++
git diff | grep ^---
read -p "You want to continue? [y|*N*]: " OPTION
if [ "$OPTION" == "y" ]; then
read -p "Write the commit message: " MESSAGE
git add . && \
git commit -m "$MESSAGE" && \
git push
fi
|
asanzdiego/curso-groovy-grails-2013
|
git-push.sh
|
Shell
|
agpl-3.0
| 360 |
#!/bin/bash
# usage xacro2urdf file1.xacro file2.urdf
echo "Converting file $1 in $2"
rosrun xacro xacro.py $1 > $2
|
mwimble/ArduinoConroller
|
summit-xl-ros-stack-read-only/trunk/trunk/summit_xl_sim_hydro/xl_terabot_description/urdf/xacro2urdf.sh
|
Shell
|
gpl-2.0
| 119 |
#!/bin/bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/find-flink-home.sh
_FLINK_HOME_DETERMINED=1
. "$FLINK_HOME"/bin/config.sh
FLINK_CLASSPATH=`constructFlinkClassPath`
PYTHON_JAR_PATH=`echo "$FLINK_OPT_DIR"/flink-python*.jar`
PYFLINK_PYTHON="${PYFLINK_PYTHON:-"python"}"
# So that python can find out Flink's Jars
export FLINK_BIN_DIR=$FLINK_BIN_DIR
export FLINK_HOME
# Add pyflink & py4j & cloudpickle to PYTHONPATH
export PYTHONPATH="$FLINK_OPT_DIR/python/pyflink.zip:$PYTHONPATH"
PY4J_ZIP=`echo "$FLINK_OPT_DIR"/python/py4j-*-src.zip`
CLOUDPICKLE_ZIP=`echo "$FLINK_OPT_DIR"/python/cloudpickle-*-src.zip`
export PYTHONPATH="$PY4J_ZIP:$CLOUDPICKLE_ZIP:$PYTHONPATH"
PARSER="org.apache.flink.client.python.PythonShellParser"
function parse_options() {
"${JAVA_RUN}" ${JVM_ARGS} -cp ${FLINK_CLASSPATH}:${PYTHON_JAR_PATH} ${PARSER} "$@"
printf "%d\0" $?
}
# Turn off posix mode since it does not allow process substitution
set +o posix
# If the command has option --help | -h, the script will directly
# run the PythonShellParser program to stdout the help message.
if [[ "$@" =~ '--help' ]] || [[ "$@" =~ '-h' ]]; then
"${JAVA_RUN}" ${JVM_ARGS} -cp ${FLINK_CLASSPATH}:${PYTHON_JAR_PATH} ${PARSER} "$@"
exit 0
fi
OPTIONS=()
while IFS= read -d '' -r ARG; do
OPTIONS+=("$ARG")
done < <(parse_options "$@")
COUNT=${#OPTIONS[@]}
LAST=$((COUNT - 1))
LAUNCHER_EXIT_CODE=${OPTIONS[$LAST]}
# Certain JVM failures result in errors being printed to stdout (instead of stderr), which causes
# the code that parses the output of the launcher to get confused. In those cases, check if the
# exit code is an integer, and if it's not, handle it as a special error case.
if ! [[ ${LAUNCHER_EXIT_CODE} =~ ^[0-9]+$ ]]; then
echo "${OPTIONS[@]}" | head -n-1 1>&2
exit 1
fi
if [[ ${LAUNCHER_EXIT_CODE} != 0 ]]; then
exit ${LAUNCHER_EXIT_CODE}
fi
OPTIONS=("${OPTIONS[@]:0:$LAST}")
export SUBMIT_ARGS=${OPTIONS[@]}
# -i: interactive
# -m: execute shell.py in the zip package
${PYFLINK_PYTHON} -i -m pyflink.shell
|
StephanEwen/incubator-flink
|
flink-python/bin/pyflink-shell.sh
|
Shell
|
apache-2.0
| 2,994 |
#!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $CURRENT_DIR/helpers/helpers.sh
source $CURRENT_DIR/helpers/resurrect_helpers.sh
create_tmux_test_environment_and_save() {
set_screen_dimensions_helper
$CURRENT_DIR/helpers/create_and_save_tmux_test_environment.exp
}
main() {
install_tmux_plugin_under_test_helper
mkdir -p /tmp/bar # setup required dirs
create_tmux_test_environment_and_save
if last_save_file_differs_helper "tests/fixtures/save_file.txt"; then
fail_helper "Saved file not correct (initial save)"
fi
exit_helper
}
main
|
ibizaman/conffiles
|
tmux/resurrect/tests/test_resurrect_save.sh
|
Shell
|
mit
| 592 |
#!/bin/bash
# Hudson Tomcat control script
#
# description: Provides easy control of Hudson-deployed tomcat instance(s)
# Variables for use within this script
DEPLOY_ROOT=$HOME/deploys/mifos-$JOB_NAME-deploy
JVM_TMPDIR=/tmp/hudson-$JOB_NAME-tomcat-tmp
# Variables for use by children/successors of this script
export CATALINA_HOME=$DEPLOY_ROOT/tomcat6
export CATALINA_OPTS="-Xmx512m -XX:MaxPermSize=128m -Djava.io.tmpdir=$JVM_TMPDIR -Djava.awt.headless=true"
export CATALINA_PID=$DEPLOY_ROOT/tomcat.pid
export MIFOS_CONF=$DEPLOY_ROOT/mifos_conf
[ -f $CATALINA_HOME/bin/catalina.sh ] || exit 0
[ -d $JVM_TMPDIR ] || mkdir -p $JVM_TMPDIR || exit 1
start_tomcat() {
$CATALINA_HOME/bin/startup.sh
}
stop_tomcat() {
if [ -e $CATALINA_PID ]
then
$CATALINA_HOME/bin/shutdown.sh -force
rm -f $CATALINA_PID
else
$CATALINA_HOME/bin/shutdown.sh
fi
}
case $1 in
start)
start_tomcat
;;
stop)
stop_tomcat
;;
status)
if [ -e $CATALINA_PID ]
then
echo "Tomcat appears to be running as process id `cat $CATALINA_PID`"
else
echo "$CATALINA_PID does not exist, Tomcat probably is not running."
fi
;;
restart)
stop_tomcat
sleep 1
start_tomcat
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
;;
esac
exit 0
|
AArhin/head
|
resources/continuous-integration/salesDemo-deploy/tomcat/control.sh
|
Shell
|
apache-2.0
| 1,429 |
#!/bin/sh
#
# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
#
# set -e
devdummy="test-dummy0"
ret=0
# set global exit status, but never reset nonzero one.
check_err()
{
if [ $ret -eq 0 ]; then
ret=$1
fi
}
# same but inverted -- used when command must fail for test to pass
check_fail()
{
if [ $1 -eq 0 ]; then
ret=1
fi
}
kci_add_dummy()
{
ip link add name "$devdummy" type dummy
check_err $?
ip link set "$devdummy" up
check_err $?
}
kci_del_dummy()
{
ip link del dev "$devdummy"
check_err $?
}
kci_test_netconf()
{
dev="$1"
r=$ret
ip netconf show dev "$dev" > /dev/null
check_err $?
for f in 4 6; do
ip -$f netconf show dev "$dev" > /dev/null
check_err $?
done
if [ $ret -ne 0 ] ;then
echo "FAIL: ip netconf show $dev"
test $r -eq 0 && ret=0
return 1
fi
}
# add a bridge with vlans on top
kci_test_bridge()
{
devbr="test-br0"
vlandev="testbr-vlan1"
ret=0
ip link add name "$devbr" type bridge
check_err $?
ip link set dev "$devdummy" master "$devbr"
check_err $?
ip link set "$devbr" up
check_err $?
ip link add link "$devbr" name "$vlandev" type vlan id 1
check_err $?
ip addr add dev "$vlandev" 10.200.7.23/30
check_err $?
ip -6 addr add dev "$vlandev" dead:42::1234/64
check_err $?
ip -d link > /dev/null
check_err $?
ip r s t all > /dev/null
check_err $?
for name in "$devbr" "$vlandev" "$devdummy" ; do
kci_test_netconf "$name"
done
ip -6 addr del dev "$vlandev" dead:42::1234/64
check_err $?
ip link del dev "$vlandev"
check_err $?
ip link del dev "$devbr"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: bridge setup"
return 1
fi
echo "PASS: bridge setup"
}
kci_test_gre()
{
gredev=neta
rem=10.42.42.1
loc=10.0.0.1
ret=0
ip tunnel add $gredev mode gre remote $rem local $loc ttl 1
check_err $?
ip link set $gredev up
check_err $?
ip addr add 10.23.7.10 dev $gredev
check_err $?
ip route add 10.23.8.0/30 dev $gredev
check_err $?
ip addr add dev "$devdummy" 10.23.7.11/24
check_err $?
ip link > /dev/null
check_err $?
ip addr > /dev/null
check_err $?
kci_test_netconf "$gredev"
ip addr del dev "$devdummy" 10.23.7.11/24
check_err $?
ip link del $gredev
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: gre tunnel endpoint"
return 1
fi
echo "PASS: gre tunnel endpoint"
}
# tc uses rtnetlink too, for full tc testing
# please see tools/testing/selftests/tc-testing.
kci_test_tc()
{
dev=lo
ret=0
tc qdisc add dev "$dev" root handle 1: htb
check_err $?
tc class add dev "$dev" parent 1: classid 1:10 htb rate 1mbit
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffe: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffd: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" parent 1:0 prio 5 handle ffc: protocol ip u32 divisor 256
check_err $?
tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 ht ffe:2: match ip src 10.0.0.3 flowid 1:10
check_err $?
tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:2 u32 ht ffe:2: match ip src 10.0.0.2 flowid 1:10
check_err $?
tc filter show dev "$dev" parent 1:0 > /dev/null
check_err $?
tc filter del dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32
check_err $?
tc filter show dev "$dev" parent 1:0 > /dev/null
check_err $?
tc qdisc del dev "$dev" root handle 1: htb
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: tc htb hierarchy"
return 1
fi
echo "PASS: tc htb hierarchy"
}
kci_test_polrouting()
{
ret=0
ip rule add fwmark 1 lookup 100
check_err $?
ip route add local 0.0.0.0/0 dev lo table 100
check_err $?
ip r s t all > /dev/null
check_err $?
ip rule del fwmark 1 lookup 100
check_err $?
ip route del local 0.0.0.0/0 dev lo table 100
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: policy route test"
return 1
fi
echo "PASS: policy routing"
}
kci_test_route_get()
{
ret=0
ip route get 127.0.0.1 > /dev/null
check_err $?
ip route get 127.0.0.1 dev "$devdummy" > /dev/null
check_err $?
ip route get ::1 > /dev/null
check_err $?
ip route get fe80::1 dev "$devdummy" > /dev/null
check_err $?
ip route get 127.0.0.1 from 127.0.0.1 oif lo tos 0x1 mark 0x1 > /dev/null
check_err $?
ip route get ::1 from ::1 iif lo oif lo tos 0x1 mark 0x1 > /dev/null
check_err $?
ip addr add dev "$devdummy" 10.23.7.11/24
check_err $?
ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy" > /dev/null
check_err $?
ip addr del dev "$devdummy" 10.23.7.11/24
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: route get"
return 1
fi
echo "PASS: route get"
}
kci_test_addrlabel()
{
ret=0
ip addrlabel add prefix dead::/64 dev lo label 1
check_err $?
ip addrlabel list |grep -q "prefix dead::/64 dev lo label 1"
check_err $?
ip addrlabel del prefix dead::/64 dev lo label 1 2> /dev/null
check_err $?
ip addrlabel add prefix dead::/64 label 1 2> /dev/null
check_err $?
ip addrlabel del prefix dead::/64 label 1 2> /dev/null
check_err $?
# concurrent add/delete
for i in $(seq 1 1000); do
ip addrlabel add prefix 1c3::/64 label 12345 2>/dev/null
done &
for i in $(seq 1 1000); do
ip addrlabel del prefix 1c3::/64 label 12345 2>/dev/null
done
wait
ip addrlabel del prefix 1c3::/64 label 12345 2>/dev/null
if [ $ret -ne 0 ];then
echo "FAIL: ipv6 addrlabel"
return 1
fi
echo "PASS: ipv6 addrlabel"
}
kci_test_ifalias()
{
ret=0
namewant=$(uuidgen)
syspathname="/sys/class/net/$devdummy/ifalias"
ip link set dev "$devdummy" alias "$namewant"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: cannot set interface alias of $devdummy to $namewant"
return 1
fi
ip link show "$devdummy" | grep -q "alias $namewant"
check_err $?
if [ -r "$syspathname" ] ; then
read namehave < "$syspathname"
if [ "$namewant" != "$namehave" ]; then
echo "FAIL: did set ifalias $namewant but got $namehave"
return 1
fi
namewant=$(uuidgen)
echo "$namewant" > "$syspathname"
ip link show "$devdummy" | grep -q "alias $namewant"
check_err $?
# sysfs interface allows to delete alias again
echo "" > "$syspathname"
ip link show "$devdummy" | grep -q "alias $namewant"
check_fail $?
for i in $(seq 1 100); do
uuidgen > "$syspathname" &
done
wait
# re-add the alias -- kernel should free mem when dummy dev is removed
ip link set dev "$devdummy" alias "$namewant"
check_err $?
fi
if [ $ret -ne 0 ]; then
echo "FAIL: set interface alias $devdummy to $namewant"
return 1
fi
echo "PASS: set ifalias $namewant for $devdummy"
}
kci_test_vrf()
{
vrfname="test-vrf"
ret=0
ip link show type vrf 2>/dev/null
if [ $? -ne 0 ]; then
echo "SKIP: vrf: iproute2 too old"
return 0
fi
ip link add "$vrfname" type vrf table 10
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: can't add vrf interface, skipping test"
return 0
fi
ip -br link show type vrf | grep -q "$vrfname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: created vrf device not found"
return 1
fi
ip link set dev "$vrfname" up
check_err $?
ip link set dev "$devdummy" master "$vrfname"
check_err $?
ip link del dev "$vrfname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: vrf"
return 1
fi
echo "PASS: vrf"
}
kci_test_encap_vxlan()
{
ret=0
vxlan="test-vxlan0"
vlan="test-vlan0"
testns="$1"
ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789 2>/dev/null
if [ $? -ne 0 ]; then
echo "FAIL: can't add vxlan interface, skipping test"
return 0
fi
check_err $?
ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan"
check_err $?
ip netns exec "$testns" ip link set up dev "$vxlan"
check_err $?
ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1
check_err $?
ip netns exec "$testns" ip link del "$vxlan"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: vxlan"
return 1
fi
echo "PASS: vxlan"
}
kci_test_encap_fou()
{
ret=0
name="test-fou"
testns="$1"
ip fou help 2>&1 |grep -q 'Usage: ip fou'
if [ $? -ne 0 ];then
echo "SKIP: fou: iproute2 too old"
return 1
fi
ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null
if [ $? -ne 0 ];then
echo "FAIL: can't add fou port 7777, skipping test"
return 1
fi
ip netns exec "$testns" ip fou add port 8888 ipproto 4
check_err $?
ip netns exec "$testns" ip fou del port 9999 2>/dev/null
check_fail $?
ip netns exec "$testns" ip fou del port 7777
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: fou"
return 1
fi
echo "PASS: fou"
}
# test various encap methods, use netns to avoid unwanted interference
kci_test_encap()
{
testns="testns"
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP encap tests: cannot add net namespace $testns"
return 1
fi
ip netns exec "$testns" ip link set lo up
check_err $?
ip netns exec "$testns" ip link add name "$devdummy" type dummy
check_err $?
ip netns exec "$testns" ip link set "$devdummy" up
check_err $?
kci_test_encap_vxlan "$testns"
kci_test_encap_fou "$testns"
ip netns del "$testns"
}
kci_test_macsec()
{
msname="test_macsec0"
ret=0
ip macsec help 2>&1 | grep -q "^Usage: ip macsec"
if [ $? -ne 0 ]; then
echo "SKIP: macsec: iproute2 too old"
return 0
fi
ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: can't add macsec interface, skipping test"
return 1
fi
ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
check_err $?
ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
check_err $?
ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef
check_err $?
ip macsec show > /dev/null
check_err $?
ip link del dev "$msname"
check_err $?
if [ $ret -ne 0 ];then
echo "FAIL: macsec"
return 1
fi
echo "PASS: macsec"
}
kci_test_gretap()
{
testns="testns"
DEV_NS=gretap00
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP gretap tests: cannot add net namespace $testns"
return 1
fi
ip link help gretap 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: gretap: iproute2 too old"
ip netns del "$testns"
return 1
fi
# test native tunnel
ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test external mode
ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: gretap"
ip netns del "$testns"
return 1
fi
echo "PASS: gretap"
ip netns del "$testns"
}
kci_test_ip6gretap()
{
testns="testns"
DEV_NS=ip6gretap00
ret=0
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP ip6gretap tests: cannot add net namespace $testns"
return 1
fi
ip link help ip6gretap 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: ip6gretap: iproute2 too old"
ip netns del "$testns"
return 1
fi
# test native tunnel
ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test external mode
ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ip6gretap"
ip netns del "$testns"
return 1
fi
echo "PASS: ip6gretap"
ip netns del "$testns"
}
kci_test_erspan()
{
testns="testns"
DEV_NS=erspan00
ret=0
ip link help erspan 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: erspan: iproute2 too old"
return 1
fi
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP erspan tests: cannot add net namespace $testns"
return 1
fi
# test native tunnel erspan v1
ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test native tunnel erspan v2
ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test external mode
ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: erspan"
ip netns del "$testns"
return 1
fi
echo "PASS: erspan"
ip netns del "$testns"
}
kci_test_ip6erspan()
{
testns="testns"
DEV_NS=ip6erspan00
ret=0
ip link help ip6erspan 2>&1 | grep -q "^Usage:"
if [ $? -ne 0 ];then
echo "SKIP: ip6erspan: iproute2 too old"
return 1
fi
ip netns add "$testns"
if [ $? -ne 0 ]; then
echo "SKIP ip6erspan tests: cannot add net namespace $testns"
return 1
fi
# test native tunnel ip6erspan v1
ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test native tunnel ip6erspan v2
ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
ip netns exec "$testns" ip link set dev $DEV_NS up
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
# test external mode
ip netns exec "$testns" ip link add dev "$DEV_NS" \
type ip6erspan external
check_err $?
ip netns exec "$testns" ip link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
echo "FAIL: ip6erspan"
ip netns del "$testns"
return 1
fi
echo "PASS: ip6erspan"
ip netns del "$testns"
}
kci_test_rtnl()
{
kci_add_dummy
if [ $ret -ne 0 ];then
echo "FAIL: cannot add dummy interface"
return 1
fi
kci_test_polrouting
kci_test_route_get
kci_test_tc
kci_test_gre
kci_test_gretap
kci_test_ip6gretap
kci_test_erspan
kci_test_ip6erspan
kci_test_bridge
kci_test_addrlabel
kci_test_ifalias
kci_test_vrf
kci_test_encap
kci_test_macsec
kci_del_dummy
}
#check for needed privileges
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
exit 0
fi
for x in ip tc;do
$x -Version 2>/dev/null >/dev/null
if [ $? -ne 0 ];then
echo "SKIP: Could not run test without the $x tool"
exit 0
fi
done
kci_test_rtnl
exit $ret
|
raumfeld/linux-am33xx
|
tools/testing/selftests/net/rtnetlink.sh
|
Shell
|
gpl-2.0
| 15,621 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ PhyloProfileData
|
blankenberg/bioconda-recipes
|
recipes/bioconductor-phyloprofiledata/pre-unlink.sh
|
Shell
|
mit
| 63 |
#
# Copyright (C) 2010 OpenWrt.org
#
. /lib/ramips.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ramips.sh
platform_check_image() {
local board=$(ramips_board_name)
local magic="$(get_magic_long "$1")"
[ "$#" -gt 1 ] && return 1
case "$board" in
3g-6200n | \
3g-6200nl | \
3g150b | \
3g300m | \
a5-v11 | \
air3gii | \
ai-br100 |\
all0239-3g | \
all0256n | \
all5002 | \
all5003 | \
ar725w | \
asl26555 | \
awapn2403 | \
awm002-evb | \
awm003-evb | \
bc2 | \
broadway | \
carambola | \
cf-wr800n | \
d105 | \
dap-1350 | \
dcs-930 | \
dcs-930l-b1 | \
dir-300-b1 | \
dir-300-b7 | \
dir-320-b1 | \
dir-600-b1 | \
dir-600-b2 | \
dir-615-d | \
dir-615-h1 | \
dir-620-a1 | \
dir-620-d1 | \
dir-810l | \
e1700 | \
ex2700 |\
esr-9753 | \
f7c027 | \
fonera20n | \
freestation5 | \
firewrt |\
pbr-m1 |\
hg255d | \
hlk-rm04 | \
ht-tm02 | \
hw550-3g | \
ip2202 | \
linkits7688 | \
linkits7688d | \
m2m | \
m3 | \
m4 | \
microwrt | \
mlw221 | \
mlwg2 | \
mofi3500-3gn | \
mpr-a1 | \
mpr-a2 | \
mr-102n | \
mzk-w300nh2 | \
nbg-419n | \
nw718 | \
omni-emb | \
omni-emb-hpm | \
omni-plug | \
olinuxino-rt5350f | \
olinuxino-rt5350f-evb | \
psr-680w | \
px4885 | \
re6500 | \
rp-n53 | \
rt-g32-b1 | \
rt-n10-plus | \
rt-n13u | \
rt-n14u | \
rt-n15 | \
rt-n56u | \
rut5xx | \
sl-r7205 | \
tew-691gr | \
tew-692gr | \
ur-326n4g |\
ur-336un |\
v22rw-2x2 | \
vocore | \
w150m | \
w306r-v20 |\
w502u |\
whr-g300n |\
whr-300hp2 |\
whr-600d |\
whr-1166d |\
wizfi630a |\
wsr-600 |\
wl-330n | \
wl-330n3g | \
wl-351 | \
wl341v3 | \
wli-tx4-ag300n | \
wzr-agl300nh | \
wmr300 |\
wnce2001 | \
wr512-3gn |\
wr6202 |\
wr8305rt |\
wrtnode |\
wt1520 |\
wt3020 |\
x5 |\
x8 |\
xiaomi-miwifi-mini |\
y1 |\
y1s |\
zbt-wa05 |\
zbt-wg2626 |\
zte-q7)
[ "$magic" != "27051956" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wsr-1166)
[ "$magic" != "48445230" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ar670w)
[ "$magic" != "6d000080" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
cy-swr1100 |\
dir-610-a1 |\
dir-645 |\
dir-860l-b1)
[ "$magic" != "5ea3a417" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
br-6475nd)
[ "$magic" != "43535953" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
c20i)
[ "$magic" != "03000000" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_do_upgrade() {
local board=$(ramips_board_name)
case "$board" in
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
blink_led() {
. /etc/diag.sh; set_state upgrade
}
append sysupgrade_pre_upgrade disable_watchdog
append sysupgrade_pre_upgrade blink_led
|
m2mselect/owrt
|
target/linux/ramips/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 2,983 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# NOTE: All output from this script needs to be copied back to the calling
# source tree. This is managed in kube::build::copy_output in build-tools/common.sh.
# If the output set is changed update that function.
"${KUBE_ROOT}/build-tools/run.sh" hack/update-generated-protobuf-dockerized.sh "$@"
# ex: ts=2 sw=2 et filetype=sh
|
jkhelil/kubernetes
|
hack/update-generated-protobuf.sh
|
Shell
|
apache-2.0
| 1,021 |
#!/bin/bash
if [ -f .veewee_params ]
then
. .veewee_params
fi
# postinstall.sh created from Mitchell's official lucid32/64 baseboxes
date > /etc/vagrant_box_build_time
# Installing the virtualbox guest additions
apt-get -y install dkms
VBOX_VERSION=$(cat /home/vagrant/.vbox_version)
cd /tmp
wget http://download.virtualbox.org/virtualbox/$VBOX_VERSION/VBoxGuestAdditions_$VBOX_VERSION.iso
mount -o loop VBoxGuestAdditions_$VBOX_VERSION.iso /mnt
sh /mnt/VBoxLinuxAdditions.run
umount /mnt
rm VBoxGuestAdditions_$VBOX_VERSION.iso
# Apt-install various things necessary for Ruby, guest additions,
# etc., and remove optional things to trim down the machine.
apt-get -y update
apt-get -y upgrade
apt-get -y install linux-headers-$(uname -r) build-essential
apt-get -y install zlib1g-dev libssl-dev libreadline5
apt-get clean
# Setup sudo to allow no-password sudo for "admin"
cp /etc/sudoers /etc/sudoers.orig
sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=admin' /etc/sudoers
sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers
# Install NFS client
apt-get -y install nfs-common
# Install Ruby from source in /opt so that users of Vagrant
# can install their own Rubies using packages or however.
# We must install the 1.8.x series since Puppet doesn't support
# Ruby 1.9 yet.
wget http://ftp.ruby-lang.org/pub/ruby/1.8/ruby-1.8.7-p352.tar.gz
tar xvzf ruby-1.8.7-p352.tar.gz
cd ruby-1.8.7-p352
./configure --prefix=/opt/ruby
make
make install
cd ..
rm -rf ruby-1.8.7-p352*
# Install RubyGems 1.7.2
wget http://production.cf.rubygems.org/rubygems/rubygems-1.7.2.tgz
tar xzf rubygems-1.7.2.tgz
cd rubygems-1.7.2
/opt/ruby/bin/ruby setup.rb
cd ..
rm -rf rubygems-1.7.2*
# Installing chef & Puppet
/opt/ruby/bin/gem install chef --no-ri --no-rdoc
/opt/ruby/bin/gem install puppet --no-ri --no-rdoc
# Add /opt/ruby/bin to the global path as the last resort so
# Ruby, RubyGems, and Chef/Puppet are visible
echo 'PATH=$PATH:/opt/ruby/bin/'> /etc/profile.d/vagrantruby.sh
# Installing vagrant keys
mkdir /home/vagrant/.ssh
chmod 700 /home/vagrant/.ssh
cd /home/vagrant/.ssh
wget --no-check-certificate 'http://github.com/mitchellh/vagrant/raw/master/keys/vagrant.pub' -O authorized_keys
chmod 600 /home/vagrant/.ssh/authorized_keys
chown -R vagrant /home/vagrant/.ssh
# Remove items used for building, since they aren't needed anymore
apt-get -y remove linux-headers-$(uname -r) build-essential
apt-get -y autoremove
# Zero out the free space to save space in the final image:
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
# Removing leftover leases and persistent rules
echo "cleaning up dhcp leases"
rm /var/lib/dhcp3/*
# Make sure Udev doesn't block our network
# http://6.ptmc.org/?p=164
echo "cleaning up udev rules"
rm /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm /lib/udev/rules.d/75-persistent-net-generator.rules
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
exit
|
jedi4ever/veewee
|
templates/ubuntu-11.10-server-amd64-ishaya/postinstall.sh
|
Shell
|
mit
| 3,063 |
#! /bin/sh
# Copyright (C) 2007 Red Hat, Inc.
# This file is part of Red Hat elfutils.
#
# Red Hat elfutils is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# Red Hat elfutils is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Red Hat elfutils; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
#
# Red Hat elfutils is an included package of the Open Invention Network.
# An included package of the Open Invention Network is a package for which
# Open Invention Network licensees cross-license their patents. No patent
# license is granted, either expressly or impliedly, by designation as an
# included package. Should you wish to participate in the Open Invention
# Network licensing program, please visit www.openinventionnetwork.com
# <http://www.openinventionnetwork.com>.
. $srcdir/test-subr.sh
original=${original:-testfile12}
stripped=${stripped:-testfile17}
debugfile=${debugfile:-${stripped}.debug}
testfiles $original $stripped $debugfile
tempfiles testfile.unstrip
# These are old reference output from run-test-strip6.sh, when
# strip left the .debug file with unchanged sh_size in
# stripped sections that shrank in the stripped file. strip
# no longer does that, but unstrip must still handle it.
testrun ../src/unstrip -o testfile.unstrip $stripped $debugfile
testrun ../src/elfcmp --hash-inexact $original testfile.unstrip
|
VanirAOSP/external_elfutils
|
tests/run-unstrip-test.sh
|
Shell
|
gpl-2.0
| 1,810 |
#!/bin/bash
set -e
ABSOLUTE_BUILD=`pwd`/build
POSTGRES_DATA="$ABSOLUTE_BUILD/postgres"
NODE_DATA="$ABSOLUTE_BUILD/node"
# Create Postgres instance
mkdir -p $POSTGRES_DATA
# TODO: Handle non-empty directories correctly?
TZ=UTC initdb --encoding="UTF8" --locale="C" $POSTGRES_DATA || true
# TODO: Start postgres and create the DB
# createdb mitro
|
adeubank/mitro
|
mitro-core/build.sh
|
Shell
|
gpl-3.0
| 350 |
#!/bin/bash
# check that a Sling staged release matches the corresponding svn tags
#
# usage:
# sh check_release_matches_tag.sh 004 /tmp/sling-staging
#
# Note that differences in line endings are not ignored by default.
# doing "EXPORT DIFFOPT=-b" before calling this ignores them.
BASE=$2/$1/org/apache/sling
TAGBASE=http://svn.apache.org/repos/asf/sling/tags/
function fail() {
echo $* >&2
exit 1
}
function check() {
TAG=$TAGBASE/$1
ZIP=$PWD/$2
WORKDIR=workdir/$1/$(date +%s)
CUR=$PWD
echo
echo "Checking $ZIP against $TAG"
mkdir -p $WORKDIR
cd $WORKDIR > /dev/null
unzip $ZIP > /dev/null
ZIPDIR=$PWD/$(ls)
svn export $TAG svnexport > /dev/null
cd svnexport > /dev/null
diff $DIFFOPT -r . $ZIPDIR
cd $CUR
}
CURDIR=`pwd`
cd $BASE || fail "Cannot cd to $BASE"
find . -name *.zip | cut -c 3- | sed 's/\// /g' | while read line
do
set $line
TAG=${1}-${2}
ZIP=${1}/${2}/${3}
check $TAG $ZIP
done
openssl sha1 $(find . -name *.zip)
cd $CURDIR
|
roele/sling
|
check_release_matches_tag.sh
|
Shell
|
apache-2.0
| 1,075 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
CLEAN_PATTERNS=(
"_tmp"
"doc_tmp"
"((?!staging\/src\/k8s\.io\/apiextensions-apiserver\/pkg\/generated\/openapi).)*/zz_generated.openapi.go"
"test/e2e/generated/bindata.go"
# TODO(bentheelder): remove this pattern after bazel is not in any supported releases
# see: https://github.com/kubernetes/enhancements/issues/2420
"bazel-.*"
)
for pattern in "${CLEAN_PATTERNS[@]}"; do
while IFS=$'\n' read -r match; do
echo "Removing ${match#${KUBE_ROOT}\/} .."
rm -rf "${match#${KUBE_ROOT}\/}"
done < <(find "${KUBE_ROOT}" -iregex "^${KUBE_ROOT}/${pattern}$")
done
# ex: ts=2 sw=2 et filetype=sh
|
krmayankk/kubernetes
|
hack/make-rules/clean.sh
|
Shell
|
apache-2.0
| 1,360 |
#!/bin/bash
# If clang_format_diff.py command is not specfied, we assume we are able to
# access directly without any path.
if [ -z $CLANG_FORMAT_DIFF ]
then
CLANG_FORMAT_DIFF="clang-format-diff.py"
fi
# Check clang-format-diff.py
if ! which $CLANG_FORMAT_DIFF &> /dev/null
then
echo "You didn't have clang-format-diff.py available in your computer!"
echo "You can download it by running: "
echo " curl http://goo.gl/iUW1u2"
exit 128
fi
# Check argparse, a library that clang-format-diff.py requires.
python 2>/dev/null << EOF
import argparse
EOF
if [ "$?" != 0 ]
then
echo "To run clang-format-diff.py, we'll need the library "argparse" to be"
echo "installed. You can try either of the follow ways to install it:"
echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse"
echo " 2. easy_install argparse (if you have easy_install)"
echo " 3. pip install argparse (if you have pip)"
exit 129
fi
# TODO(kailiu) following work is not complete since we still need to figure
# out how to add the modified files done pre-commit hook to git's commit index.
#
# Check if this script has already been added to pre-commit hook.
# Will suggest user to add this script to pre-commit hook if their pre-commit
# is empty.
# PRE_COMMIT_SCRIPT_PATH="`git rev-parse --show-toplevel`/.git/hooks/pre-commit"
# if ! ls $PRE_COMMIT_SCRIPT_PATH &> /dev/null
# then
# echo "Would you like to add this script to pre-commit hook, which will do "
# echo -n "the format check for all the affected lines before you check in (y/n):"
# read add_to_hook
# if [ "$add_to_hook" == "y" ]
# then
# ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH
# fi
# fi
set -e
uncommitted_code=`git diff HEAD`
# If there's no uncommitted changes, we assume user are doing post-commit
# format check, in which case we'll check the modified lines from latest commit.
# Otherwise, we'll check format of the uncommitted code only.
if [ -z "$uncommitted_code" ]
then
# Check the format of last commit
diffs=$(git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -p 1)
else
# Check the format of uncommitted lines,
diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1)
fi
if [ -z "$diffs" ]
then
echo "Nothing needs to be reformatted!"
exit 0
fi
# Highlight the insertion/deletion from the clang-format-diff.py's output
COLOR_END="\033[0m"
COLOR_RED="\033[0;31m"
COLOR_GREEN="\033[0;32m"
echo -e "Detect lines that doesn't follow the format rules:\r"
# Add the color to the diff. lines added will be green; lines removed will be red.
echo "$diffs" |
sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" |
sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/"
echo -e "Would you like to fix the format automatically (y/n): \c"
# Make sure under any mode, we can read user input.
exec < /dev/tty
read to_fix
if [ "$to_fix" != "y" ]
then
exit 1
fi
# Do in-place format adjustment.
git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -i -p 1
echo "Files reformatted!"
# Amend to last commit if user do the post-commit format check
if [ -z "$uncommitted_code" ]; then
echo -e "Would you like to amend the changes to last commit (`git log HEAD --oneline | head -1`)? (y/n): \c"
read to_amend
if [ "$to_amend" == "y" ]
then
git commit -a --amend --reuse-message HEAD
echo "Amended to last commit"
fi
fi
|
aishnogah/uplift_db
|
rocksdb-rocksdb-3.6.2/build_tools/format-diff.sh
|
Shell
|
bsd-3-clause
| 3,385 |
#compdef frontend
zstyle ':completion:*:descriptions' format '%B%d%b'
zstyle ':completion::complete:frontend:*:commands' group-name commands
zstyle ':completion::complete:frontend:*:frontend_points' group-name frontend_points
zstyle ':completion::complete:frontend::' list-grouped
zmodload zsh/mapfile
function _frontend() {
local CONFIG=$HOME/.frontend-search
local ret=1
local -a commands
local -a frontend_points
frontend_points=( "${(f)mapfile[$CONFIG]//$HOME/~}" )
commands=(
'jquery: Search in jQuery website'
'mdn: Search in MDN website'
'compass: Search in COMPASS website'
'html5please: Search in HTML5 Please website'
'caniuse: Search in Can I Use website'
'aurajs: Search in AuraJs website'
'dartlang: Search in Dart website'
'lodash: Search in Lo-Dash website'
'qunit: Search in Qunit website'
'fontello: Search in fontello website'
'bootsnipp: Search in bootsnipp website'
'cssflow: Search in cssflow website'
'codepen: Search in codepen website'
'unheap: Search in unheap website'
'bem: Search in BEM website'
'smacss: Search in SMACSS website'
'angularjs: Search in Angular website'
'reactjs: Search in React website'
'emberjs: Search in Ember website'
'stackoverflow: Search in StackOverflow website'
)
_arguments -C \
'1: :->first_arg' \
'2: :->second_arg' && ret=0
case $state in
first_arg)
_describe -t frontend_points "Warp points" frontend_points && ret=0
_describe -t commands "Commands" commands && ret=0
;;
second_arg)
case $words[2] in
jquery)
_describe -t points "Warp points" frontend_points && ret=0
;;
mdn)
_describe -t points "Warp points" frontend_points && ret=0
;;
compass)
_describe -t points "Warp points" frontend_points && ret=0
;;
html5please)
_describe -t points "Warp points" frontend_points && ret=0
;;
caniuse)
_describe -t points "Warp points" frontend_points && ret=0
;;
aurajs)
_describe -t points "Warp points" frontend_points && ret=0
;;
dartlang)
_describe -t points "Warp points" frontend_points && ret=0
;;
lodash)
_describe -t points "Warp points" frontend_points && ret=0
;;
qunit)
_describe -t points "Warp points" frontend_points && ret=0
;;
fontello)
_describe -t points "Warp points" frontend_points && ret=0
;;
bootsnipp)
_describe -t points "Warp points" frontend_points && ret=0
;;
cssflow)
_describe -t points "Warp points" frontend_points && ret=0
;;
codepen)
_describe -t points "Warp points" frontend_points && ret=0
;;
unheap)
_describe -t points "Warp points" frontend_points && ret=0
;;
bem)
_describe -t points "Warp points" frontend_points && ret=0
;;
smacss)
_describe -t points "Warp points" frontend_points && ret=0
;;
angularjs)
_describe -t points "Warp points" frontend_points && ret=0
;;
reactjs)
_describe -t points "Warp points" frontend_points && ret=0
;;
emberjs)
_describe -t points "Warp points" frontend_points && ret=0
;;
stackoverflow)
_describe -t points "Warp points" frontend_points && ret=0
;;
esac
;;
esac
return $ret
}
_frontend "$@"
# Local Variables:
# mode: Shell-Script
# sh-indentation: 2
# indent-tabs-mode: nil
# sh-basic-offset: 2
# End:
# vim: ft=zsh sw=2 ts=2 et
|
jesseke55/oh-my-zsh
|
plugins/frontend-search/_frontend-search.sh
|
Shell
|
mit
| 3,788 |
#!/bin/sh
fatal()
{
echo "fatal: $1" 1>&2
exit 1
}
info()
{
echo "info: $1" 1>&2
}
CURRENT_DIR=`pwd` ||
fatal "could not retrieve current directory"
rm -rf doc-out ||
fatal "could not remove doc-out"
xmllint \
--noout \
--xinclude \
--schema ext/structural-0.1.0/structural-01.xsd \
doc/documentation.xml ||
fatal "could not validate document"
mkdir doc-out ||
fatal "could not create output directory"
cd doc-out ||
fatal "could not switch to output directory"
saxon \
-xi:on \
-xsl:../ext/structural-0.1.0/structural-01-standalone-x20.xsl \
-s:../doc/documentation.xml ||
fatal "could not generate documentation"
cp ../ext/structural-0.1.0/*.css . || fatal "could not copy CSS"
cp ../doc/*.css . || fatal "could not copy CSS"
cd "${CURRENT_DIR}" ||
fatal "could not restore directory"
VERSION=`head -n 1 README-VERSION.txt | sed 's/ /-doc-/g'` ||
fatal "could not retrieve version"
mv doc-out "${VERSION}" ||
fatal "could not rename output directory"
|
io7m/jfppg
|
make-xmldoc.sh
|
Shell
|
isc
| 1,020 |
#!/bin/bash
source "names.sh";
source "comments.sh";
(
declare -A regexes;
echo "package ics";
echo;
echo "// File automatically generated with ./genParams.sh";
echo;
echo "import (";
echo " \"errors\"";
echo " \"fmt\"";
echo " \"regexp\"";
echo " \"strings\"";
echo " \"unicode/utf8\"";
echo;
echo " \"vimagination.zapto.org/parser\"";
echo ")";
echo;
{
while read line; do
keyword="$(echo "$line" | cut -d'=' -f1)";
type="$(getName "$keyword")";
values="$(echo "$line" | cut -d'=' -f2)";
getComment "$type";
echo -n "type Param$type ";
declare multiple=false;
declare freeChoice=false;
declare doubleQuote=false;
declare regex="";
declare vType="";
declare string=false;
declare -a choices=();
fc="${values:0:1}";
if [ "$fc" = "*" ]; then
echo -n "[]";
multiple=true
values="${values:1}";
fc="${values:0:1}";
fi;
if [ "$fc" = "?" ]; then
freeChoice=true
values="${values:1}";
fc="${values:0:1}";
fi;
if [ "$fc" = '"' ]; then
doubleQuote=true;
values="${values:1}";
fc="${values:0:1}";
string=true;
elif [ "$fc" = "'" ]; then
values="${values:1}";
string=true;
fc="${values:0:1}";
elif [ "$fc" = "~" ]; then
regex="${values:1}";
string=true;
values="${values:1}";
fc="${values:0:1}";
fi;
if [ "$fc" = "!" ]; then
values="${values:1}";
echo "$values";
vType="$values";
if [ "$vType" = "Boolean" ]; then
echo;
echo "// New$type returns a *Param$type for ease of use with optional values";
echo "func New$type(v Param$type) *Param$type {";
echo " return &v";
echo "}";
fi;
elif $string; then
echo "string";
echo;
echo "// New$type returns a *Param$type for ease of use with optional values";
echo "func New$type(v Param$type) *Param$type {";
echo " return &v";
echo "}";
if [ ! -z "$regex" ]; then
echo;
echo "var regex$type *regexp.Regexp";
regexes[$type]="$values";
fi;
else
if $freeChoice; then
choices=( $(echo "Unknown|$values" | tr "|" " ") );
else
choices=( $(echo "$values" | tr "|" " ") );
fi;
case ${#choices[@]} in
1)
echo "struct{}";;
*)
echo "uint8";
echo;
echo "// $type constant values";
echo "const (";
declare first=true;
for choice in ${choices[@]};do
echo -n " $type$(getName "$choice")";
if $first; then
echo -n " Param$type = iota";
first=false;
fi;
echo;
done;
echo ")";
echo;
echo "// New returns a pointer to the type (used with constants for ease of use with";
echo "// optional values)";
echo "func (t Param$type) New() *Param$type {";
echo " return &t";
echo "}";
esac;
choices=( $(echo "$values" | tr "|" " ") );
fi;
echo;
# decoder
echo "func (t *Param$type) decode(vs []parser.Token) error {";
declare indent="";
declare vName="vs[0]";
if $multiple; then
echo " for _, v := range vs {";
indent=" ";
vName="v";
else
echo " if len(vs) != 1 {";
echo " return fmt.Errorf(errDecodingType, c$type, ErrInvalidParam)";
echo " }";
fi;
if $doubleQuote; then
echo "$indent if ${vName}.Type != tokenParamQuotedValue {";
echo "$indent return fmt.Errorf(errDecodingType, c$type, ErrInvalidParam)";
echo "$indent }";
fi;
if [ ! -z "$vType" ]; then
echo "$indent var q $vType";
echo "$indent if err := q.decode(nil, ${vName}.Data); err != nil {";
echo "$indent return fmt.Errorf(errDecodingType, c$type, err)";
echo "$indent }";
if $multiple; then
echo " *t = append(*t, q)";
else
echo " *t = Param$type(q)";
fi;
elif [ ${#choices[@]} -eq 1 ]; then
echo " if strings.ToUpper(${vName}.Data) != \"${choices[0]}\" {";
echo " return fmt.Errorf(errDecodingType, c$type, ErrInvalidParam)";
echo " }";
elif [ ${#choices[@]} -gt 1 ]; then
echo "$indent switch strings.ToUpper(${vName}.Data) {";
for choice in ${choices[@]}; do
echo "$indent case \"$choice\":";
if $multiple; then
echo " *t = append(*t, $type$(getName "$choice")";
else
echo " *t = $type$(getName "$choice")";
fi;
done;
echo "$indent default:";
if $freeChoice; then
if $multiple; then
echo " *t = append(*t, {$type}Unknown)";
else
echo " *t = ${type}Unknown";
fi;
else
echo "$indent return fmt.Errorf(errDecodingType, c$type, ErrInvalidParam)";
fi;
echo "$indent }";
else
if [ -z "$regex" ]; then
if $multiple; then
echo " *t = append(*t, decode6868(${vName}.Data))";
else
echo " *t = Param$type(decode6868(${vName}.Data))";
fi;
else
echo "$indent if !regex${type}.MatchString(${vName}.Data) {";
echo "$indent return fmt.Errorf(errDecodingType, c$type, ErrInvalidParam)";
echo "$indent }";
echo "$indent *t = Param$type(${vName}.Data)";
fi;
fi;
if $multiple; then
echo " }";
fi;
echo " return nil";
echo "}";
echo;
#encoder
echo "func (t Param$type) encode(w writer) {";
if [ ${#choices} -eq 0 ] || $multiple; then
if [ "$vType" = "CALADDRESS" -o "$vType" = "URI" ]; then
echo " if len(t.String()) == 0 {";
echo " return";
echo " }";
elif [ "$vType" = "Boolean" ]; then
echo " if !t {";
echo " return";
echo " }";
else
echo " if len(t) == 0 {";
echo " return";
echo " }";
fi;
fi;
echo " w.WriteString(\";${keyword}=\")";
if $multiple; then
echo " for n, v := range t {";
echo " if n > 0 {";
echo " w.WriteString(\",\")";
echo " }";
else
vName="t";
fi;
if [ ! -z "$vType" ]; then
echo "$indent q := $vType($vName)";
echo "$indent q.encode(w)";
elif [ ${#choices[@]} -eq 1 ]; then
echo "$indent w.WriteString(\"${choices[0]}\")";
freeChoice=true;
elif [ ${#choices[@]} -gt 1 ]; then
echo "$indent switch $vName {";
for choice in ${choices[@]}; do
echo "$indent case $type$(getName "$choice"):";
echo "$indent w.WriteString(\"$choice\")";
done;
if $freeChoice; then
echo "$indent default:";
echo "$indent w.WriteString(\"UNKNOWN\")";
fi;
echo "$indent }";
else
if $doubleQuote; then
echo "$indent w.WriteString(\"\\\"\")";
echo "$indent w.Write(encode6868(string($vName)))";
echo "$indent w.WriteString(\"\\\"\")";
else
echo "$indent if strings.ContainsAny(string($vName), nonsafeChars[32:]) {";
echo "$indent w.WriteString(\"\\\"\")";
echo "$indent w.Write(encode6868(string($vName)))";
echo "$indent w.WriteString(\"\\\"\")";
echo "$indent } else {";
echo "$indent w.Write(encode6868(string($vName)))";
echo "$indent }";
fi;
fi;
if $multiple; then
echo " }";
fi;
echo "}";
echo;
#validator
echo "func (t Param$type) valid() error {";
if [ "$vType" = "Boolean" ]; then
echo " return nil";
elif [ ${#choices[@]} -eq 0 ] || ! $freeChoice; then
if $multiple; then
echo " for _, v := range t {";
fi;
if [ ! -z "$vType" ]; then
if $multiple; then
echo " if err := v.valid(); err != nil {"
echo " return fmt.Errorf(errValidatingType, c$type, err)";
echo " }";
else
echo " q := $vType(t)";
echo " if err := q.valid(); err != nil {";
echo " return fmt.Errorf(errValidatingType, c$type, err)";
echo " }";
echo " return nil";
fi;
elif [ ${#choices[@]} -gt 0 ]; then
echo "$indent switch $vName {";
echo -n "$indent case ";
first=false;
for choice in ${choices[@]}; do
if $first; then
echo -n ", ";
fi;
first=true;
echo -n "$type$(getName "$choice")";
done;
echo ":";
echo "$indent default:";
echo "$indent return fmt.Errorf(errValidatingType, c$type, ErrInvalidValue)";
echo "$indent }";
elif [ ! -z "$regex" ]; then
echo "$indent if !regex${type}.Match([]byte($vName)) {";
echo "$indent return fmt.Errorf(errValidatingType, c$type, ErrInvalidValue)";
echo "$indent }";
else
echo "$indent if strings.ContainsAny(string($vName), nonsafeChars[:31]) {";
echo "$indent return fmt.Errorf(errValidatingType, c$type, ErrInvalidText)";
echo "$indent }";
fi;
if $multiple; then
echo " }";
fi;
if [ -z "$vType" ] || $multiple; then
echo " return nil";
fi;
else
echo " return nil";
fi;
echo "}";
echo;
done;
} < params.gen
cat <<HEREDOC
func decode6868(s string) string {
t := parser.NewStringTokeniser(s)
d := make([]byte, 0, len(s))
var ru [4]byte
Loop:
for {
c := t.ExceptRun("^")
d = append(d, t.Get()...)
switch c {
case -1:
break Loop
case '^':
t.Accept("^")
switch t.Peek() {
case -1:
d = append(d, '^')
break Loop
case 'n':
d = append(d, '\n')
case '\'':
d = append(d, '"')
case '^':
d = append(d, '^')
default:
d = append(d, '^')
l := utf8.EncodeRune(ru[:], c)
d = append(d, ru[:l]...)
}
t.Except("")
}
}
return string(d)
}
func encode6868(s string) []byte {
t := parser.NewStringTokeniser(s)
d := make([]byte, 0, len(s))
Loop:
for {
c := t.ExceptRun("\n^\"")
d = append(d, t.Get()...)
switch c {
case -1:
break Loop
case '\n':
d = append(d, '^', 'n')
case '^':
d = append(d, '^', '^')
case '"':
d = append(d, '^', '\'')
}
}
return d
}
HEREDOC
echo "func init() {";
for key in ${!regexes[@]}; do
echo " regex$key = regexp.MustCompile(\"${regexes[$key]}\")";
done;
echo "}";
echo;
echo "// Errors";
echo "var (";
echo " ErrInvalidParam = errors.New(\"invalid param value\")";
echo " ErrInvalidValue = errors.New(\"invalid value\")";
echo ")";
echo;
echo "const ("
echo " errDecodingType = \"error decoding %s: %w\"";
echo " errValidatingType = \"error decoding %s: %w\"";
{
while read line; do
keyword="$(echo "$line" | cut -d'=' -f1)";
type="$(getName "$keyword")";
echo -n " c$type";
for i in $(seq $(( 26 - ${#type} ))); do
echo -n " ";
done;
echo "= \"$type\"";
done;
} < params.gen
echo ")";
) > params.go
|
MJKWoolnough/ics
|
genParams.sh
|
Shell
|
isc
| 10,330 |
#!/bin/bash
set -e
vivado -mode batch -source runme.tcl
echo "=========================================================="
md5sum wires_{INT,CLBLL,CLBLM}_[LR]_*.txt | sed -re 's,X[0-9]+Y[0-9]+,XY,' | sort | uniq -c | sort -k3
echo "=========================================================="
md5sum pips_{INT,CLBLL,CLBLM}_[LR]_*.txt | sed -re 's,X[0-9]+Y[0-9]+,XY,' | sort | uniq -c | sort -k3
|
SymbiFlow/prjxray-experiments-archive-2017
|
tiles_wires_pips/runme.sh
|
Shell
|
isc
| 394 |
#!/bin/bash
# vim: filetype=sh:tabstop=2:shiftwidth=2:expandtab
gcloud compute zones list --format='value(NAME)'
|
pinterb/devtoolbox
|
binfiles/gcp-zones.sh
|
Shell
|
mit
| 115 |
spec="consensus"
palette="/tmp/palette.png"
filters="fps=15,scale=320:-1:flags=lanczos"
$HOME/opt/ffmpeg/bin/ffmpeg -v warning -i final.$spec.%05d.tga -vf "$filters,palettegen" -y $palette
$HOME/opt/ffmpeg/bin/ffmpeg -v warning -i final.$spec.%05d.tga -i $palette -lavfi "$filters [x]; [x][1:v] paletteuse" -y $spec.gif
|
mdtraj/tftraj
|
examples/rmsd-consensus/render.sh
|
Shell
|
mit
| 321 |
~/dotfiles/shells/thinkpad_x13/disable_c6_state.sh
~/dotfiles/shells/thinkpad_x13/gdrive_sync.sh
|
dooooooooinggggg/dotfiles
|
shells/thinkpad_x13/startup_init.sh
|
Shell
|
mit
| 97 |
mongodump -h ds119873-a0.mlab.com:19873 -d hah-prod -u hah_prod_backup -p $HAH_PROD_BACKUP_PASSWORD -o tmp
mongorestore -h ds143559.mlab.com:43559 -d hah-dev -u hah_dev_restore -p $HAH_DEV_RESTORE_PASSWORD --drop tmp/hah-prod
rm -rf tmp/hah-prod
rm -rf tmp
|
HelpAssistHer/help-assist-her
|
server/data-import/import-from-prod/prod-to-dev.sh
|
Shell
|
mit
| 258 |
while [[ -e foo ]]; do sleep 1; done
|
grncdr/js-shell-parse
|
tests/fixtures/shellcheck-tests/while-clause/source.sh
|
Shell
|
mit
| 36 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-120-2
#
# Security announcement date: 2015-05-04 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:55 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - xorg-server:2:1.7.7-18+deb6u3
#
# Last versions recommanded by security team:
# - xorg-server:2:1.7.7-18+deb6u3
#
# CVE List:
# - CVE-2015-3418
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade xorg-server=2:1.7.7-18+deb6u3 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2015/DLA-120-2.sh
|
Shell
|
mit
| 633 |
#!/usr/bin/env bash
if [ -z "$1" ]; then
echo 'Invalid invocation'
exit 24
fi
DIR=/tmp/marina-ngrok
mkdir -p $DIR
pushd $DIR
curl -L 'https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip' > ngrok.zip
unzip ngrok.zip
mv ngrok "$1"
popd
rm -fr $DIR
|
shnhrrsn/marina-cli
|
scripts/install-ngrok.sh
|
Shell
|
mit
| 267 |
# Falkor Bootstrap to install missing packages etc...
# Install some key missed packages
apt-get -y install curl figlet vim bash-completion
# Puppet customization
apt-get -y install facter
# # Adding sid repository to install Lmod (Current version on Debian is too old for EasyBuild, so we use the one from the sid (rolling release) repo)
# printf "\n# sid repository - main, contrib and non-free branches\ndeb http://http.us.debian.org/debian sid main non-free contrib\ndeb-src http://http.us.debian.org/debian sid main non-free contrib\n" >> /etc/apt/sources.list
# printf "\n\n# Testing security updates repository\ndeb http://security.debian.org/ sid/updates main contrib non-free\ndeb-src http://security.debian.org/ sid/updates main contrib non-free\n" >> /etc/apt/sources.list
# # We set the priorities of the repositories, so that by default apt will install packages from the stable repository
# printf "\n\nPackage: *\nPin: release a=sid\nPin-Priority: -1\n" >> /etc/apt/preferences.d/priority_preferences
# # A changer: aller vers jessie quand jessie sera freeze (ou released)
# printf "APT::Default-Release "wheezy";" >> /etc/apt/apt.conf.d/99defaultrelease
apt-get update
# Create the alias
sed -i "s/^#alias/alias/g" ~vagrant/.bashrc
|
Falkor/vagrant-vms
|
scripts/Debian/bootstrap.sh
|
Shell
|
mit
| 1,258 |
#!/bin/sh
#
# Copyright (c) 2015 daemotron
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# set up the yajam environment
SCRIPTPATH=$(/bin/realpath $0)
SCRIPTPREFIX=${SCRIPTPATH%/*}
. ${SCRIPTPREFIX}/common.sh
usage() {
umsg="${COLOR_BOLD}usage: ${COLOR_RED}${COLOR_BOLD}yajam provide"
umsg="${umsg} ${COLOR_WHITE}[${COLOR_RED}${COLOR_BOLD}-f${COLOR_WHITE}]"
umsg="${umsg} [${COLOR_RED}${COLOR_BOLD}-s${COLOR_WHITE}]"
umsg="${umsg} [${COLOR_GREEN}${COLOR_BOLD}version${COLOR_WHITE}]"
echo -e "${umsg}
${COLOR_RED}${COLOR_BOLD}Options:
${COLOR_RED}${COLOR_BOLD} -f${COLOR_RESET} -- Force re-creation; i. e. delete any pre-
existing datasets for the specified version
${COLOR_RED}${COLOR_BOLD} -s${COLOR_RESET} -- Simulation mode. Do not apply any change,
just print out the to be performed operations
If ${COLOR_GREEN}${COLOR_BOLD}version${COLOR_RESET} is not specified, all existing version sets
are updated. The ${COLOR_RED}${COLOR_BOLD}-f${COLOR_RESET} option will then be ignored.
If ${COLOR_GREEN}${COLOR_BOLD}version${COLOR_RESET} is specified as ${COLOR_CYAN}major.minor${COLOR_RESET}, it is taken as RELENG version.
If ${COLOR_GREEN}${COLOR_BOLD}version${COLOR_RESET} is specified as ${COLOR_CYAN}major${COLOR_RESET}, it is taken as STABLE version.
If ${COLOR_GREEN}${COLOR_BOLD}version${COLOR_RESET} is specified as ${COLOR_CYAN}CURRENT${COLOR_RESET}, it is taken as CURRENT version.
"
}
# setup internal flags
PROVIDE_FORCE="no"
SIMULATE="no"
delete_version() {
zfs_destroy "${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}" "-r -f" "${SIMULATE}" "yes"
return $?
}
build_version() {
if [ $# -ne 1 ]; then
die 1 "build_version() expects 1 argument: version"
fi
prog_msg "Cleansing obj directory for version ${1}"
clean_obj ${1}
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
prog_msg "Building userland for version ${1}"
make_buildworld ${1}
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
prog_msg "Preparing templates for version ${1}"
make_template ${1}
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
log_info "Version ${1} updated to $(version_detailed ${1})"
return 0
}
create_version() {
if [ $# -ne 1 ]; then
die 1 "create_version() expects 1 argument: version"
fi
prog_msg "Creating structure for new FreeBSD version ${1}"
local branch=$(get_branch $1)
[ "$?" -ne "0" ] && prog_fail && return 1
zfs_exists "${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}"
if [ "$?" -eq "0" ]; then
if [ "${PROVIDE_FORCE}" = "yes" ]; then
zfs_destroy "${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}" "-r -f" \
"${SIMULATE}" "yes"
[ "$?" -ne "0" ] && prog_fail && return 1
else
prog_fail && return 1
fi
fi
local flags="-o atime=off -o exec=off -o setuid=off"
local zfs_list_parts="${YJ_SRC} ${YJ_TPL} ${YJ_TPL}/${YJ_MROOT}"
zfs_list_parts="${zfs_list_parts} ${YJ_TPL}/${YJ_SKEL}"
local zfs_list="${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}"
for i in ${zfs_list_parts}; do
zfs_list="${zfs_list} ${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}/${i}"
done
for i in ${zfs_list}; do
zfs_create "$i" "${flags}" "${SIMULATE}" "yes"
[ "$?" -ne "0" ] && prog_fail && return 1
done
flags="-o atime=off -o setuid=off"
zfs_create "${ZPOOL}/${ZROOTFS}/${YJ_WRK}/${1}/${YJ_OBJ}" "${flags}" \
"${SIMULATE}" "yes"
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
prog_msg "Checking out sources for version ${1}"
svn_checkout "${1}" "${ZMOUNT}/${YJ_WRK}/${1}/${YJ_SRC}"
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
prog_msg "Adding ${1} version entries to ${YJ_SYS_SRCCONF}"
insert_src_conf ${1}
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
build_version ${1}
return $?
}
update_version() {
if [ $# -ne 1 ]; then
die 1 "update_version() expects 1 argument: version"
fi
prog_msg "Updating source tree for FreeBSD version ${1}"
local revision_original=$(version_detailed "${1}")
svn_update "${ZMOUNT}/${YJ_WRK}/${1}/${YJ_SRC}"
[ "$?" -ne "0" ] && prog_fail && return 1
prog_success
local revision_updated=$(version_detailed "${1}")
[ "${revision_original}" != "${revision_updated}" ] && build_version ${1}
return $?
}
# evaluate command line options
while getopts "fsh" FLAG; do
case "${FLAG}" in
f)
PROVIDE_FORCE="yes"
;;
s)
SIMULATE="yes"
;;
h)
usage
exit 0
;;
*)
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
[ $# -ge 1 ] && VERSIONS=$@
[ $# -lt 1 ] && VERSIONS=$(versions_detect)
[ -z "${VERSIONS}" ] && VERSIONS=$(version_sys)
[ -z "${VERSIONS}" ] && die 1 "No versions found to be updated."
FAIL_FLAG=0
for version in "${VERSIONS}"; do
version_exists ${version}
if [ "$?" -eq "0" ]; then
if [ "${PROVIDE_FORCE}" = "yes" ]; then
delete_version ${version} && create_version ${version}
FAIL_FLAG=$((${FAIL_FLAG}+${?}))
else
update_version ${version}
FAIL_FLAG=$((${FAIL_FLAG}+${?}))
fi
else
create_version ${version}
FAIL_FLAG=$((${FAIL_FLAG}+${?}))
fi
done
# try ensuring tmp is mounted noexec even if some operations failed
tmp_noexec_on
[ "${FAIL_FLAG}" -gt "0" ] && exit 1
exit 0
|
daemotron/yajam
|
src/share/yajam/provide.sh
|
Shell
|
mit
| 6,590 |
#!/bin/bash
OUT_DIR="release/tmp"
DATE=$(date +"%m/%d/%Y")
SERVERVERSION="3.3.1"
TERRAFORMVERSION="0.15.4"
KUBECTLVERSION="1.19"
HELMVERSION="3.4.0"
KOTSVERSION="1.64.0"
COMMENT="FINAL"
# Build Overview
echo "Building Server Overview..."
asciidoctor-pdf -v
asciidoctor-pdf -a pdf-style=jekyll/_cci2/_release-pdf/circle-theme.yml -a pdf-fontsdir=jekyll/_cci2/_release-pdf/fonts -a skip-front-matter -a pdf=true -a revdate=${DATE} -a revnumber=${SERVERVERSION} -a revremark=${COMMENT} -a serverversion=${SERVERVERSION} -a terraformversion=${TERRAFORMVERSION} -a kubectlversion=${KUBECTLVERSION} -a helmversion=${HELMVERSION} -a kotsversion=${KOTSVERSION} -D ${OUT_DIR} -o CircleCI-Server-${VERSION}-Overview.pdf jekyll/_cci2/_server-3-overview.adoc
echo "Done!"
# Build Ops Guide
echo "Building Server Operations Guide..."
asciidoctor-pdf -a pdf-style=jekyll/_cci2/_release-pdf/circle-theme.yml -a pdf-fontsdir=jekyll/_cci2/_release-pdf/fonts -a skip-front-matter -a pdf=true -a revdate=${DATE} -a revnumber=${SERVERVERSION} -a revremark=${COMMENT} -a serverversion=${SERVERVERSION} -a terraformversion=${TERRAFORMVERSION} -a kubectlversion=${KUBECTLVERSION} -a helmversion=${HELMVERSION} -a kotsversion=${KOTSVERSION} -D ${OUT_DIR} -o CircleCI-Server-${VERSION}-Operations-Guide.pdf jekyll/_cci2/_server-3-ops-guide.adoc
echo "Done!"
# Build Install Guide for GCP
echo "Building Server Installation Guide for GCP..."
asciidoctor-pdf -a env-gcp=true -a pdf-style=jekyll/_cci2/_release-pdf/circle-theme-install.yml -a pdf-fontsdir=jekyll/_cci2/_release-pdf/fonts -a skip-front-matter -a pdf=true -a revdate=${DATE} -a revnumber=${SERVERVERSION} -a revremark=${COMMENT} -a serverversion=${SERVERVERSION} -a terraformversion=${TERRAFORMVERSION} -a kubectlversion=${KUBECTLVERSION} -a helmversion=${HELMVERSION} -a kotsversion=${KOTSVERSION} -D ${OUT_DIR} -o CircleCI-Server-${VERSION}-GCP-Installation-Guide.pdf jekyll/_cci2/_server-3-install-guide-gcp.adoc
echo "Done!"
# Build Install Guide for AWS
echo "Building Server Installation Guide for AWS..."
asciidoctor-pdf -a env-aws=true -a pdf-style=jekyll/_cci2/_release-pdf/circle-theme-install.yml -a pdf-fontsdir=jekyll/_cci2/_release-pdf/fonts -a skip-front-matter -a pdf=true -a revdate=${DATE} -a revnumber=${SERVERVERSION} -a revremark=${COMMENT} -a serverversion=${SERVERVERSION} -a terraformversion=${TERRAFORMVERSION} -a kubectlversion=${KUBECTLVERSION} -a helmversion=${HELMVERSION} -a kotsversion=${KOTSVERSION} -D ${OUT_DIR} -o CircleCI-Server-${VERSION}-AWS-Installation-Guide.pdf jekyll/_cci2/_server-3-install-guide-aws.adoc
echo "Done!"
|
circleci/circleci-docs
|
scripts/build_pdfs_asciidoc.sh
|
Shell
|
mit
| 2,607 |
#!/bin/bash
find ./lib/cfnlego/resources/ -iname *.yaml | cut -f6,7,8 -d/ | sed 's/.yaml//' | sed "s/\//::/g"| sort | uniq
|
allinwonder/cfnlego
|
utils/list-supported-resources.sh
|
Shell
|
mit
| 126 |
#!/bin/bash
# Script that restores a previous snapshot by name to all $storageNodes.
# Requires argument: Snapshot name.
cwd="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$cwd/settings.sh"
#Ask for the snapshot name to be passed in.
if [[ -z $1 ]]; then
echo "$(date +%x_%r) No snapshotName passed for argument 1, exiting." >> $output
exit
else
snapshotName=$1
fi
for i in "${storageNodes[@]}"
do
nonsense=$(timeout $sshTime ssh -o ConnectTimeout=$sshTimeout $hostsystem "echo wakeup")
nonsense=$(timeout $sshTime ssh -o ConnectTimeout=$sshTimeout $hostsystem "echo get ready")
sleep 5
echo "$(date +%x_%r) Restoring snapshot $snapshotName to $i" >> $output
ssh -o ConnectTimeout=$sshTimeout $hostsystem "virsh snapshot-revert $i $snapshotName > /dev/null 2>&1"
done
|
FOGProject/fog-community-scripts
|
fog-libvirtd-testing/restoreSnapshots.sh
|
Shell
|
mit
| 819 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${DIR}/../src/bashop/modules/globals.sh
source ${DIR}/../src/bashop/modules/printer.sh
source ${DIR}/../src/bashop/modules/config.sh
function check_parse() {
bashop::config::parse "${DIR}/fixtures/config"
assertion__equal "string" "${BASHOP_CONFIG['var1']}"
assertion__equal "1" "${BASHOP_CONFIG['VaR2']}"
bashop::config::parse "${DIR}/fixtures/custom_config" "CUSTOM_CONFIG"
assertion__equal "cust_string" "${CUSTOM_CONFIG['cust_var1']}"
assertion__equal "1" "${CUSTOM_CONFIG['Cust_VaR2']}"
}
function check_write() {
bashop::config::parse "${DIR}/fixtures/config"
BASHOP_CONFIG["var1"]="changed"
bashop::config::write "${DIR}/fixtures/tmp_config_new"
local file_content="$(< ${DIR}/fixtures/config_new)"
local tmp_file_content="$(< ${DIR}/fixtures/tmp_config_new)"
rm "${DIR}/fixtures/tmp_config_new"
assertion__equal "${file_content}" "${tmp_file_content}"
bashop::config::parse "${DIR}/fixtures/custom_config" "CUSTOM_CONFIG"
CUSTOM_CONFIG["cust_var1"]="cust_changed"
bashop::config::write "${DIR}/fixtures/tmp_custom_config_new" "CUSTOM_CONFIG"
local file_custom_content="$(< ${DIR}/fixtures/custom_config_new)"
local tmp_file_custom_content="$(< ${DIR}/fixtures/tmp_custom_config_new)"
rm "${DIR}/fixtures/tmp_custom_config_new"
assertion__equal "${file_custom_content}" "${tmp_file_custom_content}"
}
function check_read_var_from_user_value() {
mock__make_function_do_nothing "bashop::printer::info"
# Default config tests
mock__make_function_call "read" "value='value'"
bashop::config::read_var_from_user "test_var"
assertion__equal "value" "${BASHOP_CONFIG['test_var']}"
mock__make_function_call "read" "value='value2'"
bashop::config::read_var_from_user "test_var" "default_value"
assertion__equal "value2" "${BASHOP_CONFIG['test_var']}"
mock__make_function_do_nothing "read"
bashop::config::read_var_from_user "test_var" "default_value"
assertion__equal "value2" "${BASHOP_CONFIG['test_var']}"
bashop::config::read_var_from_user "test_var2" "default_value"
assertion__equal "default_value" "${BASHOP_CONFIG['test_var2']}"
# Custom config tests
mock__make_function_call "read" "value='custom_value'"
bashop::config::read_var_from_user "custom_var" "custom_default_value" "" "CUSTOM_CONFIG"
assertion__equal "custom_value" "${CUSTOM_CONFIG['custom_var']}"
mock__make_function_do_nothing "read"
bashop::config::read_var_from_user "custom_var" "custom_default_value" "" "CUSTOM_CONFIG"
assertion__equal "custom_value" "${CUSTOM_CONFIG['custom_var']}"
bashop::config::read_var_from_user "custom_var2" "custom_default_value" "" "CUSTOM_CONFIG"
assertion__equal "custom_default_value" "${CUSTOM_CONFIG['custom_var2']}"
}
function check_read_var_from_user_output() {
mock__make_function_do_nothing "read"
fnc_text=$(bashop::config::read_var_from_user "test_var2" "default_value")
test_string=$'\033[00;34mSet test_var2 [default_value]: \033[0m'
assertion__equal "${test_string}" "${fnc_text}"
fnc_text=$(bashop::config::read_var_from_user "test_var2" "default_value" "New text")
test_string=$'\033[00;34mNew text [default_value]: \033[0m'
assertion__equal "${test_string}" "${fnc_text}"
}
|
GM-Alex/bashop
|
tests/config_test.sh
|
Shell
|
mit
| 3,294 |
values="1 5 10 50 100 500 1000"
#wiGenRunner="\basura\wigen-0.0.1\wigen-0.0.1\bin\wigen.bat"
wiGenRunner=./wigen
for value in $values
do
outName="outDataSetsNR"
params="--countries 1 --dataSets $value --slices 1 --observations 1 --computations 1 --Indicators 1 --Organizations 1 --shex schemas/webindex_NonRecursive.shex --allTypes --time"
paramsSingle="$params --no-scopeNodes"
paramsBad="$params --badDataSets 1"
$wiGenRunner $params >> $outName
# $wiGenRunner $paramsSingle >> $outName
$wiGenRunner $paramsBad >> $outName
done
|
labra/wiGen
|
stats/testDataSetsNR.sh
|
Shell
|
mit
| 535 |
#!/bin/bash -ev
# Linux From Scratch
# Chapter 5: Constructing a Temporary System
# Section 15: ncurses-6.0
#####################################################################
source ../lfs_profile
tar -xvf ncurses-6.0.tar.gz
cd ncurses-6.0
./configure --prefix=/tools \
--with-shared \
--without-debug \
--without-ada \
--enable-widec \
--enable-overwrite
make
make install
cd ..
rm -rf ncurses-6.0
#####################################################################
#
# Proceed to Section 16: Bash-4.3
#
#####################################################################
|
pajamapants3000/LFS_scripts
|
scripts/5.150_ncurses.sh
|
Shell
|
mit
| 645 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AsyncSwift-tvOS/Async.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AsyncSwift-tvOS/Async.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
2h4u/Async
|
AsyncPodsExample/Pods/Target Support Files/Pods-AsyncExample tvOS/Pods-AsyncExample tvOS-frameworks.sh
|
Shell
|
mit
| 3,715 |
#!/usr/bin/env bash
mkdir -p ../postgres/{pg_tblspc,pg_twophase,pg_stat_tmp,pg_replslot,pg_stat,pg_snapshots,pg_logical/snapshots,pg_logical/mappings,pg_commit_ts}/
|
unsupo/ogame
|
ogamebotapp/src/main/resources/databases/postgres/fixdb.sh
|
Shell
|
mit
| 164 |
#!/bin/bash
echo "## Networks"
echo "[global] > docker network inspect global >/dev/null > exists?"
docker network inspect global >/dev/null
if [ $? -ne 0 ];then
echo "[global] > docker network create -d bridge --subnet=10.0.0.0/24 global > create"
docker network create -d bridge --subnet=10.0.0.0/24 global
else
SUB=$(docker network inspect global |jq ".[0].IPAM.Config[0].Subnet")
echo "[global] > already there (SUB: ${SUB})"
fi
echo "[int] > docker network inspect int >/dev/null > exists?"
docker network inspect int >/dev/null
if [ $? -ne 0 ];then
echo "[int] > docker network create -d bridge --subnet=192.168.1.0/24 int > create"
docker network create -d bridge --subnet=192.168.1.0/24 int
else
SUB=$(docker network inspect int |jq ".[0].IPAM.Config[0].Subnet")
echo "[int] > already there (SUB: ${SUB})"
fi
echo "#### Start stack"
echo "[server] > docker-compose up -d server > Start"
docker-compose up -d server
echo "[server] > docker network connect int server > Connect to int network"
docker network connect int server
sleep 2
echo "[server] > docker exec -ti server ip -o -4 addr > Display ip addresses"
docker exec -ti server ip -o -4 addr
echo "[ext0,int{1..3}] > docker-compose up -d ext0 int1 int2 int3 > Start"
docker-compose up -d ext0 int1 int2 int3
|
ChristianKniep/orchestra
|
consul-ambassador/start.sh
|
Shell
|
mit
| 1,428 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.