code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
CWD=$(pwd)
LOGFILE=/home/vkoshura/spelbuild.log
TESTSFILE=/home/vkoshura/spelbuildtests.log
USRFILE=/home/vkoshura/spelbuildbot.usr
LOCKFILE=/home/vkoshura/spelbuild.lock
TESTSSUMMARYFILE=/home/vkoshura/testssummary.log
SEPARATETESTFILE=/home/vkoshura/separatetest.log
LOGS=/home/vkoshura/linuxlogs.7z
ERRORFLAG=0
if mkdir "$LOCKFILE"; then
echo "Start SPEL Build"
else
exit 1
fi
cd /home/vkoshura/pose/
echo -e "Start build:\n\n" > "$LOGFILE"
hg pull &>> "$LOGFILE"
if [ $? -eq 0 ]; then
hg up &>> "$LOGFILE"
if [ $? -eq 0 ]; then
cd build
make clean &>> "$LOGFILE"
if [ $? -eq 0 ]; then
cmake ../src/ &>> "$LOGFILE"
if [ $? -eq 0 ]; then
make -j2 &>> "$LOGFILE"
if [ $? -eq 0 ]; then
cd tests
./speltests &> "$TESTSFILE"
if [ -f "$TESTSFILE" ]; then
echo -e "Tests Summary:\n\n" > $TESTSSUMMARYFILE
while IFS= read -r LINE || [[ -n "$LINE" ]]; do
[[ "$LINE" =~ (\[[-=[:alpha:][:space:]]{10}\].+) ]]
if [ -n "${BASH_REMATCH[1]}" ]; then
echo -e "${BASH_REMATCH[1]}" >> $TESTSSUMMARYFILE
fi
if [ -f "$SEPARATETESTFILE" ]; then
rm "$SEPARATETESTFILE"
fi
[[ "$LINE" =~ \[[[:space:]]RUN[[:space:]]{6}\][[:space:]](.+) ]]
if [ -n "${BASH_REMATCH[1]}" ]; then
mysql -u root -pXXXXXXXX -e "insert into pose_mediawiki.unittests (testname) select * from (select '"${BASH_REMATCH[1]}"') as t where not exists (select 1 from pose_mediawiki.unittests where testname = '"${BASH_REMATCH[1]}"');"
./speltests --gtest_filter="${BASH_REMATCH[1]}" &> "$SEPARATETESTFILE"
if [ -f "$SEPARATETESTFILE" ]; then
while IFS= read -r LINE || [[ -n "$LINE" ]]; do
[[ "$LINE" =~ \[[[:space:]]{7}OK[[:space:]]\][[:space:]](.+)[[:space:]]\(.+\) ]]
if [ -n "${BASH_REMATCH[1]}" ]; then
mysql -u root -pXXXXXXXX -e "update pose_mediawiki.unittests set linux=1 where testname = '"${BASH_REMATCH[1]}"';"
break
fi
[[ "$LINE" =~ \[[[:space:]]{2}FAILED[[:space:]]{2}\][[:space:]](.+)[[:space:]]\(.+\) ]]
if [ -n "${BASH_REMATCH[1]}" ]; then
mysql -u root -pXXXXXXXX -e "update pose_mediawiki.unittests set linux=0 where testname = '"${BASH_REMATCH[1]}"';"
break
fi
done < "$SEPARATETESTFILE"
fi
fi
done < "$TESTSFILE"
fi
else
ERRORFLAG=4
fi
else
ERRORFLAG=3
fi
else
ERRORFLAG=2
fi
else
ERRORFLAG=1
fi
else
ERRORFLAG=1
fi
if [ "$ERRORFLAG" -eq 0 ]; then
STATUS="SUCCESSFULL"
elif [ "$ERRORFLAG" -eq 1 ]; then
STATUS="FAILED: Repository update failed"
elif [ "$ERRORFLAG" -eq 2 ]; then
STATUS="FAILED: Clean failed"
elif [ "$ERRORFLAG" -eq 3 ]; then
STATUS="FAILED: CMake rebuild failed"
elif [ "$ERRORFLAG" -eq 4 ]; then
STATUS="FAILED: SPEL rebuild failed"
else
STATUS="FAILED: Unknown reason"
fi
if [ -f "$LOGS" ]; then
rm "$LOGS"
fi
if [ "$ERRORFLAG" -eq 0 ]; then
if [ -f "$TESTSFILE" ]; then
7z a "$LOGS" "$LOGFILE" "$TESTSFILE"
if [ -f "$LOGS" ]; then
if [ -f "$USRFILE" ]; then
while IFS='' read -r LINE || [[ -n "$LINE" ]]; do
if [ -f "$TESTSSUMMARYFILE" ]; then
mutt -a "$LOGS" -s "SPEL Build Bot: Build Report: $STATUS" -- "$LINE" < "$TESTSSUMMARYFILE"
else
echo -e "Build complete\n" | mutt -a "$LOGS" -s "SPEL Build Bot: Build Report: $STATUS" -- "$LINE"
fi
done < "$USRFILE"
fi
fi
else
7z a "$LOGS" "$LOGFILE"
if [ -f "$LOGS" ]; then
if [ -f "$USRFILE" ]; then
while IFS='' read -r LINE || [[ -n "$LINE" ]]; do
echo -e "Tests log is not present" | mutt -a "$LOGS" -s "SPEL Build Bot: Build Report: $STATUS" -- "$LINE"
done < "$USRFILE"
fi
fi
fi
else
7z a "$LOGS" "$LOGFILE"
if [ -f "$LOGS" ]; then
if [ -f "$USRFILE" ]; then
while IFS='' read -r LINE || [[ -n "$LINE" ]]; do
echo -e "Tests log is not present" | mutt -a "$LOGS" -s "SPEL Build Bot: Build Report: $STATUS" -- "$LINE"
done < "$USRFILE"
fi
fi
fi
if [ -f "$LOGS" ]; then
rm "$LOGS"
fi
if [ -f "$LOGFILE" ]; then
rm "$LOGFILE"
fi
if [ -f "$TESTSFILE" ]; then
rm "$TESTSFILE"
fi
if [ -f "$TESTSSUMMARYFILE" ]; then
rm "$TESTSSUMMARYFILE"
fi
if [ -f "$EPARATETESTFILE" ]; then
rm "$SEPARATETESTFILE"
fi
rm -r "$LOCKFILE"
cd "$CWD"
|
spelteam/spel-scripts
|
spelbuild.sh
|
Shell
|
gpl-3.0
| 4,748 |
#!/bin/bash
SENSORS=$(which sensors)
if [ $# -ne 0 ]
then
$SENSORS | grep "$1" | awk '{print $3}' | cut -d'+' -f2
else
$SENSORS | grep '\(Physical\|Core\)' | sed 's/Physical id \([0-9]\)*:/PCPU \1:/' | tr -d "+" | awk '{print " " $1 " " $2 " " $3}'
fi
|
the-paulus/conky
|
scripts/cputemp.sh
|
Shell
|
gpl-3.0
| 256 |
###
#code to make runner work on both osx and linux. Essentially, it creates a file path to the script directory and saves this path as $0. In detail: 'if the operating system type is darwin (a mac), then use the greadlink function when the readlink function is called. Then use the greadlink function to find the script file named. In doing so, find the path of the script files directory and save as 'scriptdir'. This change is made for Macs because readlink doesn't run properly, but greadlink does. If the OS is not mac (eg. Linux), find the script file using the readlink function and save the path to the script file directory as 'scriptdir.' By using readlink to find where the scripts are, it means if this pipeline is copied onto another computer, the files can still be found.
if
[[ $OSTYPE == darwin* ]]
then
readlink=$(which greadlink)
scriptdir="$(dirname $($readlink -f $0))"
else
scriptdir="$(dirname $(readlink -f $0))"
fi
###
usage="USAGE:
01-runner.sh <number of threads> <reads folder>"
######### Setup ################
threads=$1
reads=$2
# kefile format: (tab seperated)
#Ordinal Sample <factor1_name> [<factor2_name>]
if [ "$#" -lt "2" ]
then
echo $usage
exit -1
else
echo "initiating $1 parallel fastQC jobs on raw reads"
fi
########## Run #################
#script directory is set to the directory where this script is stored on network/computer/github. This will obviously change between runners. By not hardcoding this variable, the script can be used and opened on any computer by any person with access.
script=$scriptdir/01-fastqc.sh
#output directory is set as reads_fastqc. This changes between runners.
outdir="${reads}_fastqc"
###
#Defines the function 'findSamples' into the commandline, but does not run the function. The function 'findSamples' looks in the reads directory to find the sample directory names.
function findSamples () {
find $reads/ -mindepth 1 -maxdepth 1 -type d -exec basename {} \;| tr ' ' '\n'
}
#make a file called 'reads_fastqc.' This will be created in the currently open working directory.
mkdir ${outdir}
#Create a timestamp including the date, with the order year, month, day, hrs, mins, seconds.
timestamp=$(date +%Y%m%d-%H%M%S)
#Within the current working directory, make a directory called 'logs'
mkdir -p logs
#Define the variable logdir, which is like making a shortcut to the path workingdir/logs/outdir and is timestamped.
logdir="./logs/${outdir}.${timestamp}"
#Create a log directory (located in the logdir path). This log folder will contain the logs for the fastqc run.
mkdir $logdir
#Writes the fastqc and the runner scripts (called script.log) to a text file in the logs directory, so you know what version of the script you used during your run.
cat $script > "$logdir/script.log"
cat $0 > "$logdir/runner.log"
#Prints the fastqc script on the screen.
cat $script
#This says 'carry out the findSamples function. Then run the bash script 01-fastqc.sh on each of these files in parallel (ie. using multiple cores on computer). Store a script of the run to the logs directory within the reads_fastqc folder.
findSamples | parallel -j $threads bash $script {} $reads \>logs/${outdir}.${timestamp}/{}.log 2\>\&1
#To run, go to the reads directory and call:
#bash ~/path_to/01-runner.sh
|
pedrocrisp/NGS-pipelines
|
PARE_pipe1/01-runner.sh
|
Shell
|
gpl-3.0
| 3,277 |
#!/bin/sh
set -ex
shopt -s extglob
CDN_REPLACE_FILES=(
build/index.html
build/js/download-data.js
)
function copySrcToBuild {
rm -rf build/
mkdir build/
cp -r src/ build/
}
# replaceInFile(file, findText, replaceText)
function replaceInFile {
sed -i .tmp "s/$2/$3/" $1
rm $1.tmp
}
function getCdnVersions {
CDN_VERSION_1_2=$(./get-cdn-version.sh 1.2)
CDN_VERSION_1_3=$(./get-cdn-version.sh 1.3)
}
function replaceCdnVersionInFiles {
for FILE in "${CDN_REPLACE_FILES[@]}"
do
replaceInFile $FILE '${CDN_VERSION_1_2}' $CDN_VERSION_1_2
replaceInFile $FILE '${CDN_VERSION_1_3}' $CDN_VERSION_1_3
done
}
function testBuildResult {
export ANGULAR_HOME_HOST='http://localhost:8100';
export ANGULAR_DOWNLOAD_VERSIONS="$CDN_VERSION_1_2:1.2.x $CDN_VERSION_1_3:1.3.x"
export ANGULAR_VERSION="$CDN_VERSION_1_3"
export CHECK_SCRIPT_TAG="true"
function killServer () {
kill $serverPid
}
npm install .
./node_modules/.bin/webdriver-manager update
# Start basic webserver to serve the app
./node_modules/.bin/http-server -p 8100 build/ &
serverPid=$!
trap killServer EXIT
./node_modules/.bin/protractor protractorConf.js
}
function moveBuildToDist {
branch=$(git rev-parse --abbrev-ref HEAD)
git checkout dist
rm -rf !(build)
cp -rf build/* .
rm -rf build
git add . -A
git commit --allow-empty -m "update site from src"
git checkout $branch
}
function parseArgs {
# defaults if no args are given
if (( $# == 0 )); then
DO_COPY=1
DO_TEST=1
fi
# parse args
while (( $# > 0 )); do
case "$1" in
(copy) DO_COPY=1 ;;
(test) DO_TEST=1 ;;
(dist) DO_DIST=1 ;;
(*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
esac
shift
done
}
# --------------
# main
parseArgs "$@"
if [[ "$DO_COPY" ]]; then
copySrcToBuild
getCdnVersions
replaceCdnVersionInFiles
fi
if [[ "$DO_TEST" ]]; then
testBuildResult
fi
if [[ "$DO_DIST" ]]; then
moveBuildToDist
fi
|
omouse/gcal-invoice
|
bower_components/angularjs/build.sh
|
Shell
|
gpl-3.0
| 1,990 |
mkdir bin
zip dist/satellite-server-win.zip *.exe *.dll
zip -r dist/satellite-server-mac.zip *.app
python setup.py sdist
|
hauckwill/satellite-server
|
build.sh
|
Shell
|
gpl-3.0
| 123 |
#!/bin/bash
echo "Enabling Aliases..."
rm -r $HOME/.bash_it/aliases/enabled 2> /dev/null
mkdir -p $HOME/.bash_it/aliases/enabled
ln -s $HOME/.bash_it/aliases/available/general.aliases.bash $HOME/.bash_it/aliases/enabled/general.aliases.bash
ln -s $HOME/.bash_it/aliases/available/git.aliases.bash $HOME/.bash_it/aliases/enabled/git.aliases.bash
ln -s $HOME/.bash_it/aliases/available/vagrant.aliases.bash $HOME/.bash_it/aliases/enabled/vagrant.aliases.bash
echo "Enabling Completion..."
rm -r $HOME/.bash_it/completion/enabled 2> /dev/null
mkdir -p $HOME/.bash_it/completion/enabled
ln -s $HOME/.bash_it/completion/available/bash-it.completion.bash $HOME/.bash_it/completion/enabled/bash-it.completion.bash
ln -s $HOME/.bash_it/completion/available/capistrano.completion.bash $HOME/.bash_it/completion/enabled/capistrano.completion.bash
ln -s $HOME/.bash_it/completion/available/defaults.completion.bash $HOME/.bash_it/completion/enabled/defaults.completion.bash
ln -s $HOME/.bash_it/completion/available/gem.completion.bash $HOME/.bash_it/completion/enabled/gem.completion.bash
ln -s $HOME/.bash_it/completion/available/git.completion.bash $HOME/.bash_it/completion/enabled/git.completion.bash
ln -s $HOME/.bash_it/completion/available/git_flow.completion.bash $HOME/.bash_it/completion/enabled/git_flow.completion.bash
ln -s $HOME/.bash_it/completion/available/grunt.completion.bash $HOME/.bash_it/completion/enabled/grunt.completion.bash
ln -s $HOME/.bash_it/completion/available/ssh.completion.bash $HOME/.bash_it/completion/enabled/ssh.completion.bash
ln -s $HOME/.bash_it/completion/available/vagrant.completion.bash $HOME/.bash_it/completion/enabled/vagrant.completion.bash
ln -s $HOME/.bash_it/completion/available/composer.completion.bash $HOME/.bash_it/completion/enabled/composer.completion.bash
ln -s $HOME/.bash_it/completion/available/dirs.completion.bash $HOME/.bash_it/completion/enabled/dirs.completion.bash
ln -s $HOME/.bash_it/completion/available/pip.completion.bash $HOME/.bash_it/completion/enabled/pip.completion.bash
ln -s $HOME/.bash_it/completion/available/system.completion.bash $HOME/.bash_it/completion/enabled/system.completion.bash
echo "Enabling Plugins..."
rm -r $HOME/.bash_it/plugins/enabled 2> /dev/null
mkdir -p $HOME/.bash_it/plugins/enabled
ln -s $HOME/.bash_it/plugins/available/base.plugin.bash $HOME/.bash_it/plugins/enabled/base.plugin.bash
ln -s $HOME/.bash_it/plugins/available/browser.plugin.bash $HOME/.bash_it/plugins/enabled/browser.plugin.bash
ln -s $HOME/.bash_it/plugins/available/dirs.plugin.bash $HOME/.bash_it/plugins/enabled/dirs.plugin.bash
ln -s $HOME/.bash_it/plugins/available/extract.plugin.bash $HOME/.bash_it/plugins/enabled/extract.plugin.bash
ln -s $HOME/.bash_it/plugins/available/git.plugin.bash $HOME/.bash_it/plugins/enabled/git.plugin.bash
ln -s $HOME/.bash_it/plugins/available/ssh.plugin.bash $HOME/.bash_it/plugins/enabled/ssh.plugin.bash
ln -s $HOME/.bash_it/plugins/available/vagrant.plugin.bash $HOME/.bash_it/plugins/enabled/vagrant.plugin.bash
ln -s $HOME/.bash_it/plugins/available/alias-completion.plugin.bash $HOME/.bash_it/plugins/enabled/alias-completion.plugin.bash
echo "Removing previous bash configurations"...
rm $HOME/.bash_aliases $HOME/.bash_exports $HOME/.bash_logout $HOME/.bash_profile $HOME/.bashrc $HOME/.bash_symfony_autocomplete 2> /dev/null
echo "Creating new links..."
ln -s $HOME/.bash/bash_aliases $HOME/.bash_aliases
ln -s $HOME/.bash/bash_exports $HOME/.bash_exports
ln -s $HOME/.bash/bash_logout $HOME/.bash_logout
ln -s $HOME/.bash/bash_profile $HOME/.bash_profile
ln -s $HOME/.bash_profile $HOME/.bashrc
ln -s $HOME/.bash/bash_symfony_autocomplete $HOME/.bash_symfony_autocomplete
echo "Re-sourcing..."
source $HOME/.bashrc
echo "Done."
|
vpassapera/BashConfigs
|
install.sh
|
Shell
|
gpl-3.0
| 3,751 |
#!/bin/bash
home=`dirname $0`
$home/../../bin/join.sh $home/input1 $home/input2 /tmp/job-output 0 2
diff -s $home/expected_output /tmp/job-output
|
stupaq/hadoop-relational
|
inputs/join/test.sh
|
Shell
|
gpl-3.0
| 147 |
#!/bin/sh -x
INSTALL_PATH=
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=/usr/local
rm $INSTALL_PATH/bin/mdpreview
rm $INSTALL_PATH/lib/mdpreview -rf
|
fboender/mdpreview
|
uninstall.sh
|
Shell
|
gpl-3.0
| 145 |
#./bin/bzrflag --world=maps/four_ls.bzw --red-port=50100 --green-port=50101 --purple-port=50102 --blue-port=50103 $@ &
./bin/bzrflag --world=maps/rotated_box_world.bzw --red-port=50100 --green-port=50101 --purple-port=50102 --blue-port=50103 $@ &
sleep 2
python bzagents/agent1_spencer.py localhost 50100 &
# python bzagents/dumb_agent.py localhost 50101 &
python bzagents/dumb_agent.py localhost 50102 &
python bzagents/dumb_agent.py localhost 50103 &
|
edwardekstrom/BZRflag
|
rungame-vs2dumb.sh
|
Shell
|
gpl-3.0
| 454 |
java -jar /opt/Cryptogen/bin/CryptogenV1.0.jar
|
Zander-Labuschagne/Cryptogen
|
deployment/Cryptogen-1.0.1-mt-linux/bin/CryptogenV1.0.sh
|
Shell
|
gpl-3.0
| 47 |
#!/bin/bash
IMAGE=mahno
IMAGE_L=registry.gitlab.com/kdudkov/$IMAGE
TAG=latest
tar czf files.tar.gz actors config core static requirements.txt *.py
docker build . -t $IMAGE_L:$TAG -t $IMAGE:$TAG
rm files.tar.gz
docker push $IMAGE_L:$TAG
|
kdudkov/tiny-home-automation
|
build.sh
|
Shell
|
gpl-3.0
| 240 |
#!/bin/bash
#
# imagine a file with commented, uncommented and blank lines
#
# $ cat victim.txt
# # comment
# 0 1 2 * go hard
# 99 * this and that
# # another comment
#
# # There is a blank line above
# 12 34 56 /tmp
# hello
# a tab char contained in this line
#
# this oneliner adds a mark to all non-commented and non-blank lines:
#
sed -i -e 's/^[^#]/### MARK ###/' victim.txt
#
# hint: for your convenience, choose a ### MARK ### not already present in data
# use the following command to undo changes:
#
sed -i -e 's/### MARK ###//' victim.txt
# that\'s all folks
|
javierpeces/my-shell-collection
|
comments.sh
|
Shell
|
gpl-3.0
| 619 |
#!/bin/bash
###################################################################################
################this program will install environment automaticly##################
###################################################################################
PAHT=$PATH
export PATH
#当前脚本的的路径
CURRENT_DIR="$(cd $(dirname $0);pwd)"
release=""
#Log_File="${CURRENT_DIR}/inEnv.log"
#print usage message
print_msg()
{
echo "Usage: $0 [-s] [-q]"
echo " -s : set environment."
echo " -q : querry environment."
}
#初始化设置前的操作
#如检查当前服务器的版本
check_kernel_version()
{
linux_relese=`cat /etc/redhat-release | grep release | grep -v "grep" | awk -F"." '{print $1}' | awk '{print $NF}'`
if [ x"${linux_relese}" == x"7" ]
then
echo -e "\033[0;31;1m \n linux relese is release 7 \033[0m"
release="7"
elif [ x"${linux_relese}" == x"6" ]
then
echo -e "\033[0;31;1m linux relese is release 6 \033[0m"
release="6"
else
release="unknown"
fi
}
#修改主机名
modify_hostname()
{
###############################修改主机名称####################################
chmod +x /etc/rc.d/rc.local
echo -e "====================step 1 of 9 =============================== \n"
read -p "start to modify servername ====> please input the hostname of this server :" hostn
if [ x"${release}" == x"6" ]
then
cat /etc/sysconfig/network|grep HOSTNAME > /dev/null
if [ $? -eq 0 ]
then
sed -i s#`cat /etc/sysconfig/network|grep HOSTNAME`#HOSTNAME=${hostn}#g /etc/sysconfig/network
else
echo "HOSTNAME=${hostn}" >> /etc/sysconfig/network
fi
elif [ x"${release}" == x"7" ]
then
echo "${hostn}" > /etc/hostname
fi
}
#修改hosts
modify_hosts()
{
###############################修改hosts文件内容###############################
echo -e "====================step 2 of 9 =============================== \n"
echo -e "modify hosts \n"
echo "127.0.0.1 ${hostn}" >> /etc/hosts
}
#修改dns
modify_dns()
{
###############################配置DNS服务器###################################
echo -e "====================step 3 of 9 =============================== \n"
echo -e "modify resolv.conf \n"
echo "nameserver 202.101.172.35" > /etc/resolv.conf
echo "nameserver 172.16.12.66" >> /etc/resolv.conf
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
####beijing dns
#219.141.140.10 219.141.136.10
}
#挂载nfs共享
mount_nfs()
{
#########################################挂载nfs共享############################
echo -e "====================step 4 of 9 =============================== \n"
echo -e "mount nfs \n"
yum install -y nfs-utils > /dev/null
nfsstat=`rpm -qa|grep nfs-utils`
mount_flag=`df -h | grep backlog`
if [ x"${nfsstat}" != x"" -a x"${mount_flag}" == x"" ]; then
sleep 5s
mount -t nfs 172.16.13.197:/app/backlog /mnt -o nolock
cat /etc/rc.local | grep -v "backlog" > /etc/rc.local.tmp
mv /etc/rc.local /etc/rc.local.bak
mv /etc/rc.local.tmp /etc/rc.local
echo 'mount -t nfs 172.16.13.197:/app/backlog /mnt -o nolock' >> /etc/rc.local
else
echo -e "[error] unable to mount the nfs of 13.197,please check the nfs \n "
fi
}
#拷贝ssh key
create_ssh_key()
{
##############################创建ssh—key登录#######################################
echo -e "====================step 5 of 9 =============================== \n"
echo -e "create ssh-key \n"
cd /root
mkdir .ssh
cp -rf /mnt/admin/authorized_keys .ssh/
}
#关闭iptables
disable_iptables()
{
#########################################关闭iptables##############################
echo -e "====================step 6 of 9 =============================== \n"
echo -e "disable iptables \n"
if [ x"${release}" == x"6" ]
then
service iptables stop
chkconfig iptables off
elif [ x"${release}" == x"7" ]
then
systemctl stop firewalld.service
systemctl disable firewalld.service
fi
}
#安装jdk
install_jdk()
{
#################################安装JDK环境#######################################
echo -e "====================step 7 of 9 =============================== \n"
echo -e "install jdk_1.6.0_26\n"
if [ -f "/mnt/admin/yunwei/pack/jdk_1.6.0_26.tar.gz" ]
then
cp -rf /mnt/admin/yunwei/pack/jdk_1.6.0_26.tar.gz /usr/local/
sleep 5s;
cd /usr/local/
if [ -d "/usr/local/jdk" ]
then
mv /usr/local/jdk /usr/local/jdk_bak
fi
tar xf jdk_1.6.0_26.tar.gz
if [ $? -eq 0 ]
then
rm -rf jdk_1.6.0_26.tar.gz
fi
#if [ -f "/mnt/admin/yunwei/pack/java.sh" ]
#then
# if [ -f "/etc/profile.d/java.sh" ]
# then
# mv /etc/profile.d/java.sh /etc/profile.d/java.sh.bak
# fi
# cp -rf /mnt/admin/yunwei/pack/java.sh /etc/profile.d/
#else
# echo -e "[error] the file java.sh not found. \n"
#fi
cd "${CURRENT_DIR}"
else
echo -e "[error] install jdk failed,not found /mnt/admin/yunwei/pack/jdk_1.6.0_26.tar.gz \n"
fi
#touch /etc/profile.d/java.sh
echo -e "export JRE_HOME=/usr/local/jdk/jre\nexport JAVA_HOME=/usr/local/jdk\nexport PATH=\$JAVA_HOME/bin:\$PATH\nexport CLASSPATH=.:\$JAVA_HOME/lib/tools.jar:\$JAVA_HOME/lib/dt.jar:\$CLASSPATH" > /etc/profile.d/java.sh
source /etc/profile
}
#修改文件最大句柄数
modify_limits()
{
####################################修改文件最大句柄数#############################
echo -e "====================step 8 of 9 =============================== \n"
echo -e "modify limits \n"
cat /etc/security/limits.conf | grep -v "1000000" > /etc/security/limits.conf.tmp
mv /etc/security/limits.conf /etc/security/limits.conf.bak
mv /etc/security/limits.conf.tmp /etc/security/limits.conf
echo '* - nofile 1000000' >> /etc/security/limits.conf
}
#关闭selinux
disable_selinux()
{
####################################关闭SELINUX####################################
echo -e "====================step 9 of 9 =============================== \n"
echo -e "disable selinux \n"
sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
setenforce 0
}
#添加crontab任务
create_crontab()
{
#############################添加cron计划任务#######################################
echo -e "add crontab. \n"
if [ -f "/var/spool/cron/root" ]
then
cat /var/spool/cron/root | grep -v "ntpdate" | grep -v "system_admin.sh" > /var/spool/cron/root.tmp
mv /var/spool/cron/root /var/spool/cron/root.bak
mv /var/spool/cron/root.tmp /var/spool/cron/root
fi
echo "*/5 * * * * /usr/sbin/ntpdate asia.pool.ntp.org " >> /var/spool/cron/root
echo "21 21 * * * cd /mnt/admin;sh system_admin.sh > /dev/null 2>&1" >> /var/spool/cron/root
/usr/sbin/ntpdate asia.pool.ntp.org
}
#modify sshd_config
modify_sshd_config()
{
if [ -f "/etc/ssh/sshd_config" ]
then
sed -i '/UseDNS/d' /etc/ssh/sshd_config
sed -i '/GSSAPIAuthentication/d' /etc/ssh/sshd_config
echo "GSSAPIAuthentication no" >> /etc/ssh/sshd_config
echo "UseDNS no" >> /etc/ssh/sshd_config
else
echo "[error] modify sshd_config failed."
fi
}
#安装部署nagios
install_nagios()
{
#############################安装监控nagios客户端#######################################
echo -e "install nagios. \n"
if [ ! -d "/usr/local/nagios" ]
then
sh /mnt/admin/nagios.sh >/dev/null 2>&1
if [ $? -eq 0 ]
then
echo "[info] install nagios success."
else
echo "[error] install nagios failed."
fi
else
echo "nagios was installed."
fi
}
#初始化服务器后的最后一步检查所有设置
after_check()
{
echo -e "\033[0;33;1m \n#####################hostname#######################\033[0m"
if [ x"${release}" == x"6" ]
then
cat /etc/sysconfig/network|grep HOSTNAME
elif [ x"${release}" == x"7" ]
then
cat /etc/hostname
fi
echo -e "\033[0;33;1m \n#####################rc.local#######################\033[0m"
ls -l /etc/rc.d/rc.local | awk '{print $1}'
echo -e "\033[0;33;1m \n#####################hosts##########################\033[0m"
cat /etc/hosts | grep "127.0.0.1"
echo -e "\033[0;33;1m \n#####################resolv.conf####################\033[0m"
cat /etc/resolv.conf | grep "nameserver"
echo -e "\033[0;33;1m \n#####################nfs############################\033[0m"
df -h | grep backlog
echo -e "\033[0;33;1m \n#####################rc.local#######################\033[0m"
cat /etc/rc.local | grep backlog
echo -e "\033[0;33;1m \n#####################authorized_keys################\033[0m"
ls -l /root/.ssh/authorized_keys
echo -e "\033[0;33;1m \n#####################iptables#######################\033[0m"
if [ x"${release}" == x"6" ]
then
service iptables status
chkconfig --list | grep iptables
elif [ x"${release}" == x"7" ]
then
systemctl status firewalld.service | grep dead
fi
echo -e "\033[0;33;1m \n#####################jdk#############################\033[0m"
ls -l /etc/profile.d/java.sh
source /etc/profile 2>/dev/null
java -version | grep version
echo -e "\033[0;33;1m \n#####################limits##########################\033[0m"
cat /etc/security/limits.conf | grep nofile
echo -e "\033[0;33;1m \n#####################selinux#########################\033[0m"
cat /etc/selinux/config | grep "SELINUX=" | grep -v "^#"
echo -e "\033[0;33;1m \n#####################crontab#########################\033[0m"
crontab -l
echo -e "\033[0;33;1m \n#####################sshd_config################\033[0m"
echo "UseDNS|GSSAPIAuthentication"
cat /etc/ssh/sshd_config | egrep "UseDNS|GSSAPIAuthentication"
echo -e "\033[0;33;1m \n#####################nagios##########################\033[0m"
if [ -d "/usr/local/nagios" ]
then
echo "nagios was found."
else
echo "nagios not found."
fi
echo ""
}
#初始化服务器后,重启服务器
restart_system()
{
########################################重启server生效###############################
read -p "The server will reboot make the setup effectly,please input [y|n] :" rb
if [ "$rb" == "yes" -o "$rb" == "YES" -o "$rb" == "Y" -o "$rb" == "y" ]; then
echo "the server will reboot after 5s"
sleep 5s
reboot
elif [ "$rb" == "no" -o "$rb" == "NO" -o "$rb" == "N" -o "$rb" == "n" ]; then
echo "You give up reboot the server, but please remember to reboot the server manually "
else
echo "you input the wrong parameter, please reboot the server manually"
fi
}
main()
{
if [ x"${Opt_type}" == x"-s" ]
then
#初始化设置前的操作
check_kernel_version
#修改主机名
modify_hostname
#修改hosts
modify_hosts
#修改dns
modify_dns
#挂载nfs共享
mount_nfs
#拷贝ssh key
create_ssh_key
#关闭iptables
disable_iptables
#安装jdk
install_jdk
#修改文件最大句柄数
modify_limits
#关闭selinux
disable_selinux
#添加crontab任务
create_crontab
#modify sshd_config
modify_sshd_config
#安装部署nagios
install_nagios
#初始化服务器后的最后一步检查所有设置
after_check
#初始化服务器后,重启服务器
restart_system
elif [ x"${Opt_type}" == x"-q" ]
then
#初始化设置前的操作
check_kernel_version
#检查所有设置
after_check
fi
}
if [ $# -ne 1 ]
then
print_msg
exit 1
fi
if [ x"$1" != x"-s" -a x"$1" != x"-q" ]
then
print_msg
exit 2
else
Opt_type="$1"
fi
main $*
|
dishitequ/test_demo
|
work/work-scripts/初始化服务器/inEnvironment.sh
|
Shell
|
gpl-3.0
| 12,385 |
#!/bin/sh
# upgrade-ux postremove.sh
echo " * postremove.sh starts"
# remove man page
if [[ -f /usr/share/man/man8.Z/upgrade-ux.8 ]]; then
rm -f /usr/share/man/man8.Z/upgrade-ux.8
echo " * Removed /usr/share/man/man8.Z/upgrade-ux.8 man page"
fi
|
gdha/upgrade-ux
|
packaging/HP-UX/postremove.sh
|
Shell
|
gpl-3.0
| 267 |
#!/bin/bash
git clone --depth 1 https://travis-ci:${GITHUB_TOKEN}@github.com/guedouari/desktop.xstr.me.git
cd desktop.xstr.me/
# git remote rm origin
# git remote add origin https://travis-ci:${GITHUB_TOKEN}@github.com/guedouari/desktop.xstr.me.git
rm -rf www
cp -r ../www www
git add -f www/*
git commit -m "update package from xstr.me via script "$TRAVIS_TAG
git tag $TRAVIS_TAG
git push
git push --tags
|
guedouari/xstr.me
|
scripts/deploy-desktop.sh
|
Shell
|
gpl-3.0
| 418 |
#!/bin/bash
#
# FTP Bash Backup.
# Copyright (C) 2015 Johnnywoof
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# FTP server settings
USERNAME=""
PASSWORD=""
SERVER=""
PORT=21
# Remote server directory to upload backup
BACKUPDIR="/"
# Backups older than ndays will be removed
ndays=7
# The absolute local directory to pickup *.tar.gz file
# Please do not end the path with a forward slash.
LOCAL_DIRECTORY="/home"
# The absolute directory path to store temporary files in.
# This must be granted write access for this script.
# Please do not end the path with a forward slash.
TEMP_BACKUP_STORE="/tmp"
# Please note that if you want to support encryption for backups, openssl must be installed.
# Should backups be encrypted before uploading?
ENCRYPT_BACKUP=false
# The absolute file path to the AES password.
# You can generate a random password using the following command:
# openssl rand -base64 256 > aes.key
# The number 256 is the amount of bytes to generate.
AES_PASSWORD_FILE=""
# END CONFIGURATION
# Script below, no need to modify it
timestamp=$(date --iso)
backup_remote_file_name="$timestamp.tar.gz"
backup_file="$TEMP_BACKUP_STORE/$backup_remote_file_name"
# work out our cutoff date
MM=`date --date="$ndays days ago" +%b`
DD=`date --date="$ndays days ago" +%d`
echo "Removing files older than $MM $DD"
# get directory listing from remote source
listing=`ftp -i -n $SERVER $PORT <<EOMYF
user $USERNAME $PASSWORD
binary
cd $BACKUPDIR
ls
quit
EOMYF
`
lista=( $listing )
# loop over our files
for ((FNO=0; FNO<${#lista[@]}; FNO+=9));do
# month (element 5), day (element 6) and filename (element 8)
#echo Date ${lista[`expr $FNO+5`]} ${lista[`expr $FNO+6`]} File: ${lista[`expr $FNO+8`]}
# check the date stamp
if [ ${lista[`expr $FNO+5`]}=$MM ];
then
if [[ ${lista[`expr $FNO+6`]} -lt $DD ]];
then
# Remove this file
echo "Removing ${lista[`expr $FNO+8`]}"
ftp -i -n $SERVER $PORT <<EOMYF2
user $USERNAME $PASSWORD
binary
cd $BACKUPDIR
delete ${lista[`expr $FNO+8`]}
quit
EOMYF2
fi
fi
done
echo "Creating backup..."
tar -czf $backup_file $LOCAL_DIRECTORY
if [ "$ENCRYPT_BACKUP" == "true" ]
then
echo "Encrypting backup using OpenSSL..."
output_encrypted_file="$backup_file.enc"
openssl enc -aes-256-cbc -salt -in $backup_file -out $output_encrypted_file -pass file:$AES_PASSWORD_FILE
rm $backup_file
backup_file=$output_encrypted_file
fi
echo "Uploading backup $backup_file ..."
ftp -n -i $SERVER $PORT <<EOF
user $USERNAME $PASSWORD
cd $BACKUPDIR
put $backup_file $backup_remote_file_name
quit
EOF
echo "Deleting temporary files..."
rm $backup_file
echo "Backup complete."
|
johnnywoof/FTP-Bash-Backup
|
backupScript.bash
|
Shell
|
gpl-3.0
| 3,308 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="lr-parallel-n64"
rp_module_desc="N64 emu - Highly modified Mupen64Plus port for libretro"
rp_module_help="ROM Extensions: .z64 .n64 .v64\n\nCopy your N64 roms to $romdir/n64"
rp_module_licence="GPL2 https://raw.githubusercontent.com/libretro/parallel-n64/master/mupen64plus-core/LICENSES"
rp_module_section="opt"
function sources_lr-parallel-n64() {
gitPullOrClone "$md_build" https://github.com/libretro/parallel-n64.git
}
function build_lr-parallel-n64() {
rpSwap on 750
make clean
if isPlatform "rpi" || isPlatform "odroid-c1"; then
make platform="$__platform"
else
make
fi
rpSwap off
md_ret_require="$md_build/parallel_n64_libretro.so"
}
function install_lr-parallel-n64() {
md_ret_files=(
'parallel_n64_libretro.so'
'README.md'
)
}
function configure_lr-parallel-n64() {
mkRomDir "n64"
ensureSystemretroconfig "n64"
# Set core options
setRetroArchCoreOption "mupen64-gfxplugin" "rice"
setRetroArchCoreOption "mupen64-gfxplugin-accuracy" "low"
setRetroArchCoreOption "mupen64-screensize" "640x480"
# Copy config files
cat > $home/RetroPie/BIOS/gles2n64rom.conf << _EOF_
#rom specific settings
rom name=SUPER MARIO 64
target FPS=25
rom name=Kirby64
target FPS=25
rom name=Banjo-Kazooie
framebuffer enable=1
update mode=4
target FPS=25
rom name=BANJO TOOIE
hack banjo tooie=1
ignore offscreen rendering=1
framebuffer enable=1
update mode=4
rom name=STARFOX64
window width=864
window height=520
target FPS=27
rom name=MARIOKART64
target FPS=27
rom name=THE LEGEND OF ZELDA
texture use IA=0
hack zelda=1
target FPS=17
rom name=ZELDA MAJORA'S MASK
texture use IA=0
hack zelda=1
rom name=F-ZERO X
window width=864
window height=520
target FPS=55
rom name=WAVE RACE 64
window width=864
window height=520
target FPS=27
rom name=SMASH BROTHERS
framebuffer enable=1
window width=864
window height=520
target FPS=27
rom name=1080 SNOWBOARDING
update mode=2
target FPS=27
rom name=PAPER MARIO
update mode=4
rom name=STAR WARS EP1 RACER
video force=1
video width=320
video height=480
rom name=JET FORCE GEMINI
framebuffer enable=1
update mode=2
ignore offscreen rendering=1
target FPS=27
rom name=RIDGE RACER 64
window width=864
window height=520
enable lighting=0
target FPS=27
rom name=Diddy Kong Racing
target FPS=27
rom name=MarioParty
update mode=4
rom name=MarioParty3
update mode=4
rom name=Beetle Adventure Rac
window width=864
window height=520
target FPS=27
rom name=EARTHWORM JIM 3D
rom name=LEGORacers
rom name=GOEMONS GREAT ADV
window width=864
window height=520
rom name=Buck Bumble
window width=864
window height=520
rom name=BOMBERMAN64U2
window width=864
window height=520
rom name=ROCKETROBOTONWHEELS
window width=864
window height=520
rom name=GOLDENEYE
force screen clear=1
framebuffer enable=1
window width=864
window height=520
target FPS=25
rom name=Mega Man 64
framebuffer enable=1
target FPS=25
_EOF_
chown $user:$user "$biosdir/gles2n64rom.conf"
addEmulator 0 "$md_id" "n64" "$md_inst/parallel_n64_libretro.so"
addSystem "n64"
}
|
superjamie/RetroPie-Setup
|
scriptmodules/libretrocores/lr-parallel-n64.sh
|
Shell
|
gpl-3.0
| 3,499 |
#!/bin/bash
#SBATCH --job-name=pl.lamb.11.prod.1
#SBATCH --output=pl.lamb.11.prod.1.err
#SBATCH --time=48:00:00
#SBATCH --nodes=1
#SBATCH --exclusive
#SBATCH --mail-type=END # Type of email notification- Available Options (BEGIN,END,FAIL,ALL)
#SBATCH [email protected] # Email to which notifications will be sent
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/software/usr/gcc-4.9.2/lib64"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/software/usr/hpcx-v1.2.0-292-gcc-MLNX\
_OFED_LINUX-2.4-1.0.0-redhat6.6/ompi-mellanox-v1.8/lib"
export AMBERHOME="/mnt/lustre_fs/users/mjmcc/apps/amber14"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$AMBERHOME/lib"
srun -N1 -n20 $AMBERHOME/bin/pmemd.MPI -O -i pl.lamb.11.prod.1.in -o pl.lamb.11.prod.1.log -p ../../../1F40_ligand_solv.prmtop -c ../../lambda.10/production.1/pl.lamb.10.prod.1.rst -ref ../../lambda.10/production.1/pl.lamb.10.prod.1.rst -inf pl.lamb.11.prod.1.mdinfo -e pl.lamb.11.prod.1.en -x pl.lamb.11.prod.1.ncdf -r pl.lamb.11.prod.1.rst
|
dupontke/Thermodynamic_Integration
|
lambda/lambda.11/production.1/submit_pl.lamb.11.prod.1.sh
|
Shell
|
gpl-3.0
| 1,001 |
histoDistro Data32/full_meoh.{1,2,3,14,17}.32ps.traj 50 hist.dat
|
jeffcomer/DiffusionFusion
|
Examples/Smoluchowski/doHist.sh
|
Shell
|
gpl-3.0
| 65 |
#!/bin/bash
#if synclient -l | grep "TouchpadOff .*=.*0" ; then
#synclient TouchpadOff=1 ;
#else
#synclient TouchpadOff=0 ;
#fi
id="$(xinput | grep "Synaptics"| grep -oP 'id=([0-9]*)' | sed 's/id=//')"
if xinput list-props $id | grep "Device Enabled (153):.*1" >/dev/null
then
xinput disable $id
notify-send "Trackpad disabled"
else
xinput enable $id
notify-send "Trackpad enabled"
fi
|
luffy1012/Scripts
|
toggletouchpad.sh
|
Shell
|
gpl-3.0
| 401 |
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
#
# Copyright (c) 2014, Joyent, Inc.
#
git ls-tree -r --name-only HEAD | PATH=/usr/node/bin:$PATH xargs license-file -f
|
joyent/sdc-cnapi
|
tools/update-license.sh
|
Shell
|
mpl-2.0
| 325 |
#!/bin/bash
. ./util_functions.sh
# Default values
OTRS_BACKUP_DIR="/var/otrs/backups"
OTRS_CONFIG_DIR="${OTRS_ROOT}Kernel/"
OTRS_CONFIG_FILE="${OTRS_CONFIG_DIR}Config.pm"
OTRS_CONFIG_MOUNT_DIR="/Kernel"
mysqlcmd="mysql -uroot -h$OTRS_DB_SERVER -p$MYSQL_ROOT_PASSWORD "
function wait_for_db(){
while true; do
out="`$mysqlcmd -e "SELECT COUNT(*) FROM mysql.user;" 2>&1`"
print_info $out
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
print_info "MySQL server is up !"
break
fi
print_warning "DB server still isn't up, sleeping a little bit ..."
sleep 2
done
}
function create_db(){
print_info "Creating OTRS database..."
$mysqlcmd -e "CREATE DATABASE IF NOT EXISTS $OTRS_DATABASE DEFAULT CHARACTER SET = utf8;"
[ $? -gt 0 ] && print_error "Couldn't create OTRS database !!" && exit 1
$mysqlcmd -e "GRANT ALL ON $OTRS_DATABASE.* to '$OTRS_DB_USER'@'%' identified by '$OTRS_DB_PASSWORD';"
[ $? -gt 0 ] && print_error "Couldn't create database user !!" && exit 1
$mysqlcmd -e "FLUSH PRIVILEGES;"
}
function restore_backup(){
[ -z $1 ] && print_error "\n\e[1;31mERROR:\e[0m OTRS_BACKUP_DATE not set.\n" && exit 1
# Check if a host-mounted volume for configuration storage was added to this container
check_host_mount_dir
# Check if OTRS database exists
$mysqlcmd -e "USE $OTRS_DATABASE" 2>/dev/null
if [ $? -eq 0 ]; then
print_info "Dropping existing database...\n"
$mysqlcmd -e "DROP DATABASE $OTRS_DATABASE"
fi
create_db
# import database
print_info "decompress SQL-file ..."
gunzip $OTRS_BACKUP_DIR/$1/DatabaseBackup.sql.gz
print_info "cat SQL-file into database"
mysql -f -u${OTRS_DB_USER} -p${OTRS_DB_PASSWORD} -h${OTRS_DB_SERVER} ${OTRS_DATABASE} < $OTRS_BACKUP_DIR/$1/DatabaseBackup.sql
[ $? -gt 0 ] && failed=yes
print_info "compress SQL-file..."
gzip $OTRS_BACKUP_DIR/$1/DatabaseBackup.sql
if [ "$failed" == "yes" ]; then
print_error "Couldn't restore database from OTRS backup !!"
exit 1
fi
# Clear existing OTRS files
cd ${OTRS_ROOT}
find -maxdepth 1 -type d -not -name Kernel -not -name "." -not -name "var" -exec rm -r {} \;
rm -rf ${OTRS_ROOT}Kernel/*
cd var
find -maxdepth 1 -type d -not -name article -not -name "." -exec rm -r {} \;
rm -rf ${OTRS_ROOT}var/article/*
# restore OTRS files
print_info "Restoring ${OTRS_BACKUP_DIR}/${1}/Application.tar.gz ..."
print_info "This may take a while..."
cd ${OTRS_ROOT}
tar -xzf $OTRS_BACKUP_DIR/$1/Application.tar.gz
[ $? -gt 0 ] && print_error "Couldn't restore OTRS files from backup !!" && exit 1
update_database_settings
# Update file permissions
${OTRS_ROOT}bin/otrs.SetPermissions.pl --otrs-user=otrs --web-group=www-data ${OTRS_ROOT}
# Get hostname and admin email settings
OTRS_HOSTNAME=`su -c "${OTRS_ROOT}bin/otrs.Console.pl Maint::Config::Dump FQDN" -s /bin/bash otrs`
OTRS_ADMIN_EMAIL=`su -c "${OTRS_ROOT}bin/otrs.Console.pl Maint::Config::Dump AdminEmail" -s /bin/bash otrs`
}
function update_database_settings() {
#Update database settings in Config.pm
update_config_value "DatabaseHost" $OTRS_DB_SERVER
update_config_value "Database" $OTRS_DATABASE
update_config_value "DatabaseUser" $OTRS_DB_USER
update_config_value "DatabasePw" $OTRS_DB_PASSWORD
}
function update_config_value(){
if grep -v ".*#.*$1" ${OTRS_CONFIG_FILE} | grep -q "[{\"']$1[}\"']"
then
sed -i -r "s/($Self->\{['\"]?$1['\"]?\} *= *).*/\1'$2';/" ${OTRS_CONFIG_FILE}
else
sed -i "/$Self->{Home} = '\/opt\/otrs';/a \
\$Self->{'$1'} = '$2';" ${OTRS_CONFIG_FILE}
fi
}
function update_otrs_settings(){
/otrs-update-setting.pl SecureMode option-mod-item -value 1
/otrs-update-setting.pl ScriptAlias string-mod-item -value ""
/otrs-update-setting.pl LogModule option-mod-item -value "Kernel::System::Log::SysLog"
test -f /update-otrs-custom-settings.sh && . $_
}
function set_default_language(){
if [ ! -z $OTRS_LANGUAGE ]; then
print_info "Setting default language to: \e[92m'$OTRS_LANGUAGE'\e[0m"
/otrs-update-setting.pl DefaultLanguage string-mod-item -value $OTRS_LANGUAGE
fi
}
function set_ticket_counter() {
if [ ! -z "${OTRS_TICKET_COUNTER}" ]; then
print_info "Setting the start of the ticket counter to: \e[92m'$OTRS_TICKET_COUNTER'\e[0m"
echo "$OTRS_TICKET_COUNTER" > ${OTRS_ROOT}var/log/TicketCounter.log
fi
if [ ! -z $OTRS_NUMBER_GENERATOR ]; then
print_info "Setting ticket number generator to: \e[92m'$OTRS_NUMBER_GENERATOR'\e[0m"
/otrs-update-setting.pl "Ticket::NumberGenerator" option-mod-item -value "Kernel::System::Ticket::Number::${OTRS_NUMBER_GENERATOR}"
fi
}
function set_fetch_email_time(){
if [ ! -z $OTRS_POSTMASTER_FETCH_TIME ]; then
if [ $OTRS_POSTMASTER_FETCH_TIME -eq 0 ]; then
print_info "Disabling fetching of postmaster emails."
/otrs-update-setting.pl "Daemon::SchedulerCronTaskManager::Task###MailAccountFetch" disable
else
print_info "Setting Postmaster fetch emails time to \e[92m$OTRS_POSTMASTER_FETCH_TIME\e[0m minutes"
/otrs-update-setting.pl "Daemon::SchedulerCronTaskManager::Task###MailAccountFetch" hash-mod-key-value -key Schedule -value "*/${OTRS_POSTMASTER_FETCH_TIME} * * * *"
fi
fi
}
function check_host_mount_dir(){
# Copy the configuration from /Kernel (put there by the Dockerfile) to $OTRS_CONFIG_DIR
# to be able to use host-mounted volumes. copy only if ${OTRS_CONFIG_DIR} doesn't exist
if [ "$(ls -A ${OTRS_CONFIG_MOUNT_DIR})" ] && [ ! "$(ls -A ${OTRS_CONFIG_DIR})" ];
then
print_info "Found empty \e[92m${OTRS_CONFIG_DIR}\e[0m, copying default configuration to it..."
mkdir -p ${OTRS_CONFIG_DIR}
cp -rp ${OTRS_CONFIG_MOUNT_DIR}/* ${OTRS_CONFIG_DIR}
if [ $? -eq 0 ];
then
print_info "Done."
else
print_error "Can't move OTRS configuration directory to ${OTRS_CONFIG_DIR}" && exit 1
fi
else
print_info "Found existing configuration directory, Ok."
fi
rm -rf ${OTRS_CONFIG_MOUNT_DIR}
}
function reinstall_modules () {
print_info "Reinstalling OTRS modules..."
su -c "$OTRS_ROOT/bin/otrs.Console.pl Admin::Package::ReinstallAll > /dev/null 2>&1> /dev/null 2>&1" -s /bin/bash otrs
if [ $? -gt 0 ]; then
print_error "Could not reinstall OTRS modules, try to do it manually with the Package Manager at the admin section."
else
print_info "Done."
fi
}
function disable_phone_home_features() {
sed -i -e '/Task###OTRSBusinessEntitlementCheck/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\10\20\30\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###OTRSBusinessAvailabilityCheck"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\10\20\30\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###SupportDataCollectAsynchronous"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\10\20\30\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###RegistrationUpdateSend"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\10\20\30\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
}
function enable_phone_home_features() {
sed -i -e '/Task###OTRSBusinessEntitlementCheck/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\11\21\31\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###OTRSBusinessAvailabilityCheck"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\11\21\31\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###SupportDataCollectAsynchronous"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\11\21\31\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
sed -i -e '/Task###RegistrationUpdateSend"/ s/\(.*Required="\).\(" Valid="\).\(" ReadOnly="\).\(".*\)/\11\21\31\4/' -i ${OTRS_ROOT}Kernel/Config/Files/Daemon.xml
}
|
wdouglascampbell/docker-otrs
|
scripts/functions.sh
|
Shell
|
agpl-3.0
| 8,098 |
nosetests --with-coverage --cover-package=ease
|
TsinghuaX/ease
|
run_tests.sh
|
Shell
|
agpl-3.0
| 47 |
#!/bin/bash -e
# mock runner is not setting up the system correctly
# https://issues.redhat.com/browse/CPDEVOPS-242
if [[ "$(rpm --eval "%dist")" == ".el8" ]]; then
readarray -t pkgs < automation/check-merged.packages.el8stream
else
readarray -t pkgs < automation/check-merged.packages
fi
dnf install -y "${pkgs[@]}"
autopoint
autoreconf -ivf
./configure --disable-ansible-syntax-check
# make distcheck skipped due to bug afflicting automake.
# fc29: https://bugzilla.redhat.com/1716384
# fc30: https://bugzilla.redhat.com/1757854
# el8: https://bugzilla.redhat.com/1759942
# make distcheck
./automation/build-artifacts.sh
|
oVirt/ovirt-hosted-engine-setup
|
automation/check-merged.sh
|
Shell
|
lgpl-2.1
| 630 |
#!/bin/sh
# Sapwood server startup/shutdown script
if [ "x$AF_PIDDIR" = "x" ]; then
echo "$0: Error, AF_PIDDIR is not defined"
exit 2
fi
if [ ! -w $AF_PIDDIR ]; then
echo "$0: Error, directory $AF_PIDDIR is not writable"
exit 2
fi
PROG=/usr/lib/sapwood/sapwood-server
SVC="Sapwood image server"
case "$1" in
start) START=TRUE
;;
stop) START=FALSE
;;
*) echo "Usage: $0 {start|stop}"
exit 1
;;
esac
if [ $START = TRUE ]; then
# check that required environment is defined
if [ "x$DISPLAY" = "x" ]; then
echo "$0: Error, DISPLAY is not defined"
exit 2
fi
$LAUNCHWRAPPER_NICE start "$SVC" $PROG
else
$LAUNCHWRAPPER_NICE stop "$SVC" $PROG
fi
|
GNOME/sapwood
|
debian/sapwood-server.sh
|
Shell
|
lgpl-2.1
| 708 |
#!/bin/sh
[ -d ./tmp ] && rm -rf ./tmp
DEVKIT_ROOT=/sifteam/jackal/builds/opensif/gbquad/tmp/
CROSS=${DEVKIT_ROOT}/sysroots/x86_64-linux/usr/bin/mips32el-oe-linux/mipsel-oe-linux-
export CFLAGS+="-I${DEVKIT_ROOT}/sysroots/gbquad/usr/include \
-I${DEVKIT_ROOT}/sysroots/gbquad/usr/include/libxml2 \
-I${DEVKIT_ROOT}/sysroots/gbquad/usr/include/python3.8"
export CC=${CROSS}gcc
export STRIP=${CROSS}strip
export SWIG=${DEVKIT_ROOT}/sysroots/x86_64-linux/usr/bin/swig
export D=./tmp
make && make install
if [ $? != 0 ]; then
echo compile error
exit 1
fi
mkdir -p tmp/CONTROL
cp contrib/control tmp/CONTROL/
VERSION=`cat src/version.h | grep RELEASE | sed "s/.*RELEASE \"//" | sed "s/\"//" | sed "s/\ /-/" | sed "s/\ /-/" | sed "s/(//" | sed "s/)//"`
echo "Package: enigma2-plugin-systemplugins-crossepg" >> tmp/CONTROL/control
echo "Version: $VERSION-r0" >> tmp/CONTROL/control
echo "Architecture: mipsel" >> tmp/CONTROL/control
sh ipkg-build -o root -g root tmp/
[ ! -d out ] && mkdir out
mv *.ipk out
echo "Package moved in `pwd`/out folder"
|
oe-alliance/e2openplugin-CrossEPG
|
make_e2_oealliance.sh
|
Shell
|
lgpl-2.1
| 1,053 |
#!/bin/bash
rel=../..
if [ x"$TOP" == x ]; then TOP=`pwd`/$rel; fi
. $rel/linux-ow.sh
dos=1
doshuge=1
dostiny=1 # MS-DOS tiny model
if [ "$1" == "clean" ]; then
do_clean
rm -fv test.dsk test2.dsk nul.err tmp.cmd tmp1.cmd tmp2.cmd
exit 0
fi
if [ "$1" == "disk" ]; then
make_msdos_data_disk test.dsk || exit 1
mcopy -i test.dsk dos86s/test.exe ::test86.exe
mcopy -i test.dsk dos386f/test.exe ::test386.exe
mcopy -i test.dsk dos386f/dos4gw.exe ::dos4gw.exe
fi
if [[ "$1" == "build" || "$1" == "" ]]; then
make_buildlist
begin_bat
what=all
if [ x"$2" != x ]; then what="$2"; fi
if [ x"$3" != x ]; then build_list="$3"; fi
for name in $build_list; do
do_wmake $name "$what" || exit 1
bat_wmake $name "$what" || exit 1
done
end_bat
fi
|
joncampbell123/doslib
|
hw/pcie/make.sh
|
Shell
|
lgpl-2.1
| 813 |
#!/usr/bin/env bash
set -Eeuo pipefail
IFS=$'\n\t'
current="$(pwd)"
root="$(
cd "$(dirname "$0")"
pwd -P
)"
outDir="${root}/generated_media"
# Create example files.
echo "Not a valid video file" > "${root}/library/Demo.mov"
echo "Not a valid mp3 file" > "${root}/library/Demo.mp3"
# Delete old data
rm -rf "${outDir}"
###########################################################
# Movies
###########################################################
movieInput="${root}/library/movies_en.txt"
movieOutDir="${outDir}/movies"
printf "Creating fake movies... "
while IFS= read -r line; do
if [ "$line" != "" ]; then
mkdir -p "${movieOutDir}/${line}"
cp "${root}/library/Demo.mov" "${movieOutDir}/${line}/movie.mov"
fi
done < "$movieInput"
printf "[Done]\n"
echo "Creating a fake BluRay structure"
printf " for 'The Simpsons Movie'... "
mkdir -p "${outDir}/movies/The_Simpsons_Movie_2007"
cd "${outDir}/movies/The_Simpsons_Movie_2007"
mkdir BDMV
cd BDMV
mkdir PLAYLIST
mkdir CLIPINF
mkdir -p STREAM/SSIF
mkdir AUXDATA
mkdir BACKUP
touch PLAYLIST/12345.mpls
touch CLIPINF/12345.clpi
touch STREAM/12345.m2ts
touch STREAM/SSIF/12345.ssif
touch AUXDATA/sound.bdmv
touch AUXDATA/12345.otf
touch BACKUP/index.bdmv
touch BACKUP/MovieObject.bdmv
touch BACKUP/12345.mpls
touch BACKUP/12345.clpi
touch index.bdmv
touch MovieObject.bdmv
cd "${current}"
printf "[Done]\n"
###########################################################
# TV Shows
###########################################################
showBaseOutDir="${outDir}/tvshows_en"
# $1: TV Show directory
create_fake_show() {
local showInputDir=${1}
local showName
local showOutDir
showName=$(basename $1)
showOutDir="${showBaseOutDir}/${showName}"
for season in "${showInputDir}"/*.txt; do
local seasonFile
local seasonDir # Name without file extension
seasonFile=$(basename "${season}")
seasonDir="${showOutDir}/${seasonFile%.*}"
mkdir -p "${seasonDir}"
while IFS= read -r episode; do
if [ "$episode" != "" ]; then
cp "${root}/library/Demo.mov" "${seasonDir}/${episode}.mov"
fi
done < "$season"
done
}
printf "Creating fake tv shows... "
for show in "${root}/library/tvshows_en/"*; do
create_fake_show $show
done
printf "[Done]\n"
###########################################################
# Concerts
###########################################################
concertInput="${root}/library/concerts.txt"
concertOutDir="${outDir}/concerts"
printf "Creating fake concerts... "
while IFS= read -r line; do
if [ "$line" != "" ]; then
mkdir -p "${concertOutDir}/${line}"
cp "${root}/library/Demo.mov" "${concertOutDir}/${line}/concert.mov"
fi
done < "$concertInput"
printf " [Done]\n"
###########################################################
# Music
###########################################################
musicBaseOutDir="${outDir}/music"
# $1: Music directory
create_fake_music() {
local aristInputDir=${1}
local artistName
local artistOutDir
artistName=$(basename $1)
artistOutDir="${musicBaseOutDir}/${artistName}"
for album in "${aristInputDir}"/*.txt; do
local albumFile
local albumDir # Name without file extension
albumFile=$(basename "${album}")
albumDir="${artistOutDir}/${albumFile%.*}"
mkdir -p "${albumDir}"
while IFS= read -r song; do
if [ "$song" != "" ]; then
cp "${root}/library/Demo.mp3" "${albumDir}/${song}.mp3"
fi
done < "$album"
done
}
printf "Creating fake music albums... "
for artist in "${root}/library/music/"*; do
create_fake_music $artist
done
printf "[Done]\n"
###########################################################
# Downloads
###########################################################
downloadInput="${root}/library/downloads.txt"
downloadOutDir="${outDir}/downloads"
mkdir -p "${downloadOutDir}"
printf "Creating fake downloads... "
while IFS= read -r line; do
if [ "$line" != "" ]; then
cp "${root}/library/Demo.mov" "${downloadOutDir}/${line}"
fi
done < "$downloadInput"
printf " [Done]\n"
|
Komet/MediaElch
|
scripts/create_fakes.sh
|
Shell
|
lgpl-3.0
| 4,024 |
#! /bin/bash
filenames[0]="default.cow"
filenames[1]="neko.cow"
filenames[2]="tux.cow"
filenames[3]="penguin.cow"
DFORMAT="+%A %B %d, %Y %l:%M:%S %P"
SLEEPSECS=120
while [ 1 ]; do
for ii in 0 1 2 3; do
fname=${filenames[ii]}
./cowsay.scm --file ${fname} $(date "$DFORMAT")
sleep ${SLEEPSECS}
done
done
|
robertharamoto/Basics
|
Cowsay/say-time.sh
|
Shell
|
unlicense
| 338 |
#!/bin/bash
# Install rJava on Mac
export JAVA_HOME=/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home
export JAVA_CPPFLAGS=-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/
R CMD javareconf
echo "install.package('rJava')" | R --no-save
# Install rgdal on Mac
install.packages('rgeos', type="source")
install.packages('rgdal', type = "source", configure.args=c('--with-proj-include=/usr/local/include','--with-proj-lib=/usr/local/lib'))
|
caesar0301/warp-drive
|
utilities/insall_rjava_rgdal_mac.sh
|
Shell
|
apache-2.0
| 568 |
#!/usr/bin/env bash
rm -rf node_modules/cerebral
ln -s ../cerebral/packages/cerebral node_modules
rm -rf node_modules/cerebral-provider-firebase
ln -s ../cerebral/packages/cerebral-provider-firebase node_modules
rm -rf node_modules/cerebral-router
ln -s ../cerebral/packages/cerebral-router node_modules
|
saitodisse/scrap-cerebral-2
|
scripts/link-cerebral-source.sh
|
Shell
|
apache-2.0
| 307 |
#!/bin/bash
: ${SECURE_RANDOM:=true}
: ${EXPOSE_JMX_METRICS:=false}
: ${EXPOSE_JMX_METRICS_PORT:=20105}
: ${EXPOSE_JMX_METRICS_CONFIG:=config.yaml}
: ${TRUSTED_CERT_DIR:=/certs/trusted}
: ${EXPOSE_JMX_BIND_ADDRESS:=0.0.0.0}
echo "Importing certificates to the default Java certificate trust store."
if [ -d "$TRUSTED_CERT_DIR" ]; then
for cert in $(ls -A "$TRUSTED_CERT_DIR"); do
if [ -f "$TRUSTED_CERT_DIR/$cert" ]; then
if keytool -import -alias "$cert" -noprompt -file "$TRUSTED_CERT_DIR/$cert" -keystore /usr/local/openjdk-11/lib/security/cacerts -storepass changeit; then
echo -e "Certificate added to default Java trust store with alias $cert."
else
echo -e "WARNING: Failed to add $cert to trust store.\n"
fi
fi
done
fi
echo "Starting the Cloudbreak application..."
set -x
if [ "$SECURE_RANDOM" == "false" ]; then
CB_JAVA_OPTS="$CB_JAVA_OPTS -Djava.security.egd=file:/dev/./urandom"
fi
if [ "$EXPOSE_JMX_METRICS" == "true" ]; then
CB_JAVA_OPTS="$CB_JAVA_OPTS -javaagent:/jmx_prometheus_javaagent.jar=$EXPOSE_JMX_BIND_ADDRESS:$EXPOSE_JMX_METRICS_PORT:$EXPOSE_JMX_METRICS_CONFIG"
fi
eval "(java $CB_JAVA_OPTS -jar /cloudbreak.jar) & JAVAPID=\$!; trap \"kill \$JAVAPID; wait \$JAVAPID\" SIGINT SIGTERM; wait \$JAVAPID"
|
hortonworks/cloudbreak
|
docker-cloudbreak/bootstrap/start_cloudbreak_app.sh
|
Shell
|
apache-2.0
| 1,326 |
#!/bin/bash
#############################################################################
#
# (c) Copyright 2014 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
#############################################################################
#
# Authored: Tanya Wolff ([email protected])
# Modified:
# Xue Po Wang ([email protected])
#
# Script Inputs
# <WAS_PROFILE_ROOT> - /opt/IBM/WebSphere/Profiles/DefaultAppSrv01
# <WAS_SYS_USERNAME> - clmadmin
# <JAZZ_HOME> - /opt/IBM/JazzTeamServer
# <COMPONENT> - {"jts", "ccm", "qm", "rm"}
# <CONTEXT> - {"jts", "ccm", "ccm01", "ccm02", "qm", "rm"}
#
# Inputs - taken from 'environment'
# <WAS_PROFILE_ROOT> - /opt/IBM/WebSphere/Profiles/DefaultAppSrv01
# <WAS_SYS_USERNAME> - clmadmin
#
# Outputs - save to 'environment'
# <DEPLOY_CLMAPP_SCRIPTS> - The home directory of this script
#
#############################################################################
envProp=/etc/env.properties
touch $envProp
. $envProp
WAS_PROFILE_ROOT=$1
if [ -z "${WAS_PROFILE_ROOT}" ]; then
echo "WAS_PROFILE_ROOT must be provided."
exit 1
fi
if [ ! -d ${WAS_PROFILE_ROOT} ]; then
echo "The ${WAS_PROFILE_ROOT} does not exist."
exit 1
fi
WAS_SYS_USERNAME=$2
if [ -z "${WAS_SYS_USERNAME}" ]; then
echo "WAS_SYS_USERNAME must be provided."
exit 1
fi
JAZZ_HOME=$3
if [ -z "${JAZZ_HOME}" ]; then
echo "JAZZ_HOME must be provided."
exit 1
fi
if [ ! -d ${JAZZ_HOME} ]; then
echo "The ${JAZZ_HOME} does not exist."
exit 1
fi
JAZZ_LDAP_PROP_FILE=$4
if [ -z "${JAZZ_LDAP_PROP_FILE}" ]; then
echo "JAZZ_LDAP_PROP_FILE must be provided."
exit 1
fi
if [ ! -f ${JAZZ_LDAP_PROP_FILE} ]; then
echo "The file ${JAZZ_LDAP_PROP_FILE} does not exist."
exit 1
fi
COMPONENT=$5
if [ -z "${COMPONENT}" ]; then
echo "COMPONENT must be provided."
exit 1
fi
CONTEXT=$6
if [ -z "${CONTEXT}" ]; then
echo "CONTEXT must be provided."
exit 1
fi
DEPLOY_CLMAPP_SCRIPTS=`pwd -P`
echo "DEPLOY_CLMAPP_SCRIPTS=${DEPLOY_CLMAPP_SCRIPTS}" >> $envProp
echo "------Server Info------"
echo "JAZZ_HOME is: ${JAZZ_HOME}"
echo "COMPONENT is: ${COMPONENT}"
echo "CONTEXT is: ${CONTEXT}"
echo "Hostname is: `hostname`"
echo "Deploying CLM application(s) to WAS ..."
su - ${WAS_SYS_USERNAME} -c "${WAS_PROFILE_ROOT}/bin/wsadmin.sh -lang jython -f ${DEPLOY_CLMAPP_SCRIPTS}/InstallCLMapps.py ${JAZZ_HOME} ${JAZZ_LDAP_PROP_FILE} ${COMPONENT} ${CONTEXT}"
if [ $? -ne 0 ]; then
echo "CLM Application Deployment failed"
exit 1
fi
echo "CLM Application deployment succeeded"
echo "-----------------------"
echo " "
exit 0
|
CLMDev/leanJazz
|
automations/clm-e1/src/OpenSource-CLM/20140807001/DeployCLMApp/InstallCLMApp.sh
|
Shell
|
apache-2.0
| 3,117 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. $(dirname ${BASH_SOURCE})/../util.sh
desc "Resize the RC and watch the service backends change"
run "kubectl --namespace=demos scale rc hostnames --replicas=1"
run "kubectl --namespace=demos scale rc hostnames --replicas=2"
run "kubectl --namespace=demos scale rc hostnames --replicas=5"
desc "Fire up a cloud load-balancer"
run "kubectl --namespace=demos get svc hostnames -o yaml \\
| sed 's/ClusterIP/LoadBalancer/' \\
| kubectl replace -f -"
while true; do
run "kubectl --namespace=demos get svc hostnames -o yaml | grep loadBalancer -A 4"
if kubectl --namespace=demos get svc hostnames \
-o go-template='{{index (index .status.loadBalancer.ingress 0) "ip"}}' \
>/dev/null 2>&1; then
break
fi
done
|
jasonbishop/contrib
|
micro-demos/services/split1_rhs.sh
|
Shell
|
apache-2.0
| 1,370 |
#!/bin/bash
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
if [[ "$1" == "source" ]]; then
# no-op
:
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing networking-bgpvpn"
setup_develop $NETWORKING_BGPVPN_DIR
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
echo_summary "Enabling networking-bgpvpn service plugin"
_neutron_service_plugin_class_add $BGPVPN_PLUGIN_CLASS
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled q-svc; then
echo_summary "Configuring networking-bgpvpn"
mkdir -v -p $NEUTRON_CONF_DIR/policy.d && cp -v $NETWORKING_BGPVPN_DIR/etc/neutron/policy.d/bgpvpn.conf $NEUTRON_CONF_DIR/policy.d
mkdir -v -p $(dirname $NETWORKING_BGPVPN_CONF) && cp -v $NETWORKING_BGPVPN_DIR/etc/neutron/networking_bgpvpn.conf $NETWORKING_BGPVPN_CONF
inicomment $NETWORKING_BGPVPN_CONF service_providers service_provider
iniadd $NETWORKING_BGPVPN_CONF service_providers service_provider $NETWORKING_BGPVPN_DRIVER
fi
fi
if [[ "$1" == "unstack" ]]; then
#no-op
:
fi
if [[ "$1" == "clean" ]]; then
#no-op
:
fi
set +x
$xtrace
|
nikesh-mahalka/networking-bgpvpn
|
devstack/plugin.sh
|
Shell
|
apache-2.0
| 1,212 |
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
NODE_LABELS="${KUBE_NODE_LABELS:-}"
WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS:-}"
# KUBE_CREATE_NODES can be used to avoid creating nodes, while master will be sized for NUM_NODES nodes.
# Firewalls and node templates are still created.
KUBE_CREATE_NODES="${KUBE_CREATE_NODES:-true}"
# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
WINDOWS_NODE_OS_DISTRIBUTION=${WINDOWS_NODE_OS_DISTRIBUTION:-win2019}
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# GPUs supported in GCE do not have compatible drivers in Debian 7.
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
NODE_ACCELERATORS=""
fi
# By default a cluster will be started with the master and nodes
# on Container-optimized OS (cos, previously known as gci). If
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
GCI_VERSION=${KUBE_GCI_VERSION:-cos-81-12871-59-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-}
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-}
if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-containerd}
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import}
fi
# Ability to inject custom versions (Ubuntu OS images ONLY)
# if KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION or KUBE_UBUNTU_INSTALL_RUNC_VERSION
# is set to empty then we do not override the version(s) and just
# use whatever is in the default installation of containerd package
UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:-}
UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:-}
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
NETWORK=${KUBE_GCE_NETWORK:-default}
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
if [[ "${NETWORK}" == "default" ]]; then
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
else
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
fi
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
fi
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
# It is the primary range in the subnet and is the range used for node instance IPs.
NODE_IP_RANGE="$(get-node-ip-range)"
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
# since it's a critical component, but in the first release we need a way to disable
# this in case of stability issues.
# TODO(piosz) remove this option once Metrics Server became a stable thing.
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
# Optional: Metadata agent to setup as part of the cluster bring up:
# none - No metadata agent
# stackdriver - Stackdriver metadata agent
# Metadata agent is a daemon set that provides metadata of kubernetes objects
# running on the same node for exporting metrics and logs.
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# One special node out of NUM_NODES would be created of this type if specified.
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Optional: Additional nodes would be created if their type and number is specified.
# NUM_NODES would be lowered respectively.
# Useful for running cluster-level addons that needs more resources than would fit
# on small nodes, like network plugins.
NUM_ADDITIONAL_NODES="${NUM_ADDITIONAL_NODES:-}"
ADDITIONAL_MACHINE_TYPE="${ADDITIONAL_MACHINE_TYPE:-}"
MASTER_NODE_LABELS="${KUBE_MASTER_NODE_LABELS:-}"
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS:-}"
if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS},cloud.google.com/gke-preemptible=true"
elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
fi
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
# Windows nodes do not support Calico.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
fi
# Optional: Enable netd.
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# To avoid running netd on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
# Windows nodes do not support netd.
if [[ ${ENABLE_NETD:-} == "true" ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true"
fi
ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}"
LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}"
# Enable metadata concealment by firewalling pod traffic to the metadata server
# and run a proxy daemonset on nodes.
#
# TODO(#8867) Enable by default.
ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
# Put the necessary label on the node so the daemonset gets scheduled.
NODE_LABELS="${NODE_LABELS},cloud.google.com/metadata-proxy-ready=true"
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: customize runtime config
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
fi
# If feature gates includes AllAlpha or EndpointSlice, and EndpointSlice has not been disabled, add EndpointSlice controller to list of controllers to run.
if [[ (( "${KUBE_FEATURE_GATES:-}" == *"AllAlpha=true"* ) || ( "${KUBE_FEATURE_GATES:-}" == *"EndpointSlice=true"* )) && "${KUBE_FEATURE_GATES:-}" != *"EndpointSlice=false"* ]]; then
RUN_CONTROLLERS="${RUN_CONTROLLERS:-*,endpointslice}"
fi
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-}"
if [[ ! -z "${NODE_ACCELERATORS}" ]]; then
if [[ -z "${FEATURE_GATES:-}" ]]; then
FEATURE_GATES="DevicePlugins=true"
else
FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true"
fi
if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
fi
fi
# Optional: Install cluster DNS.
# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
# none - Not run node problem detector.
# daemonset - Run node problem detector as daemonset.
# standalone - Run node problem detector as standalone system daemon.
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
# Enable standalone mode by default for gci.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}"
else
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
fi
NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}"
NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
CNI_SHA1="${CNI_SHA1:-}"
CNI_TAR_PREFIX="${CNI_TAR_PREFIX:-cni-plugins-linux-amd64-}"
CNI_STORAGE_URL_BASE="${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s-artifacts-cni/release}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}"
AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
fi
# Optional: Enable allocation of pod IPs using IP aliases.
#
# BETA FEATURE.
#
# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
# new subnetwork will be created for the cluster.
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
if [ ${ENABLE_IP_ALIASES} = true ]; then
# Number of Pods that can run on this node.
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
# Size of ranges allocated to each node.
IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})"
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
fi
# Reserve the services IP space to avoid being allocated for other GCP resources.
SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services}
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
# Add to the provider custom variables.
PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
exit 1
fi
# Enable GCE Alpha features.
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
fi
# Disable Docker live-restore.
if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
fi
# Override default GLBC image
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
fi
CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,RuntimeClass
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Storage backend. 'etcd2' supported, 'etcd3' experimental.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Networking plugin specific settings.
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
NON_MASQUERADE_CIDR="0.0.0.0/0"
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# Optional: install volume snapshot CRDs
ENABLE_VOLUME_SNAPSHOTS="${ENABLE_VOLUME_SNAPSHOTS:-true}"
# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
# authentication) in metadata should be treated as canonical, and therefore disk
# copies ought to be recreated/clobbered.
METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
fi
if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
fi
if [[ -n "${POD_LOG_MAX_FILE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_FILE"
fi
if [[ -n "${POD_LOG_MAX_SIZE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_SIZE"
fi
# Fluentd requirements
# YAML exists to trigger a configuration refresh when changes are made.
FLUENTD_GCP_YAML_VERSION="v3.2.0"
FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-1.6.17}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Optional: custom system banner for dashboard addon
CUSTOM_KUBE_DASHBOARD_BANNER="${CUSTOM_KUBE_DASHBOARD_BANNER:-}"
# Default Stackdriver resources version exported by Fluentd-gcp addon
LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
# prometheus-to-sd configuration
PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}"
# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
# Optional: duration of cluster signed certificates.
CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
# Optional: enable certificate rotation of the kubelet certificates.
ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
# The number of services that are allowed to sync concurrently. Will be passed
# into kube-controller-manager via `--concurrent-service-syncs`
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
ENABLE_NODE_TERMINATION_HANDLER="${ENABLE_NODE_TERMINATION_HANDLER:-false}"
# Override default Node Termination Handler Image
if [[ "${NODE_TERMINATION_HANDLER_IMAGE:-}" ]]; then
PROVIDER_VARS="${PROVIDER_VARS:-} NODE_TERMINATION_HANDLER_IMAGE"
fi
# Taint Windows nodes by default to prevent Linux workloads from being
# scheduled onto them.
WINDOWS_NODE_TAINTS="${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSchedule}"
# Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs.
GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}"
GCE_PRIVATE_CLUSTER_PORTS_PER_VM="${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:-}"
# Optional: Create apiserver konnectivity server and agent.
ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE="${KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:-false}"
KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE="${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}"
|
k82/kubernetes
|
cluster/gce/config-default.sh
|
Shell
|
apache-2.0
| 23,503 |
#!/bin/bash
# Docs at: https://github.com/docker-library/docs/tree/master/mysql
docker run -p 3306:3306 -e MYSQL_ROOT_PASSWORD=beatIt -e MYSQL_DATABASE=LOCAL -e MYSQL_USER=DEV -e MYSQL_PASSWORD=BEATIT -v /home/dev/mysqldata:/var/lib/mysql -d mysql:5.7
|
peeejeee/cinnamon-tools
|
database/runMySql.sh
|
Shell
|
apache-2.0
| 254 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs a Hadoop command as a daemon.
#
# Environment Variables
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HADOOP_LOG_DIR Where log files are stored. PWD by default.
# HADOOP_MASTER host:path where hadoop code should be rsync'd from
# HADOOP_PID_DIR The pid files are stored. /tmp by default.
# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
##
usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <hadoop-command> <args...>"
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
# get arguments
startStop=$1
shift
command=$1
shift
hadoop_rotate_log ()
{
log=$1;
num=5;
if [ -n "$2" ]; then
num=$2
fi
if [ -f "$log" ]; then # rotate logs
while [ $num -gt 1 ]; do
prev=`expr $num - 1`
[ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
num=$prev
done
mv "$log" "$log.$num";
fi
}
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
# get log directory
if [ "$HADOOP_LOG_DIR" = "" ]; then
export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
fi
mkdir -p "$HADOOP_LOG_DIR"
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_PID_DIR=/tmp
fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then
export HADOOP_IDENT_STRING="$USER"
fi
# some variables
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
export HADOOP_ROOT_LOGGER="INFO,RFA"
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
# Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then
export HADOOP_NICENESS=0
fi
case $startStop in
(start)
mkdir -p "$HADOOP_PID_DIR"
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
if [ "$HADOOP_MASTER" != "" ]; then
echo rsync from $HADOOP_MASTER
rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME"
fi
hadoop_rotate_log $log
echo starting $command, logging to $log
cd "$HADOOP_HOME"
nohup nice -n $HADOOP_NICENESS "$HADOOP_HOME"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
echo $! > $pid
sleep 1; head "$log"
;;
(stop)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo stopping $command
kill `cat $pid`
else
echo no $command to stop
fi
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac
|
yuanke/hadoop-hbase
|
bin/hadoop-daemon.sh
|
Shell
|
apache-2.0
| 3,689 |
#!/bin/bash
# Copyright 2013-2014 Sebastian Kreft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FILENAME=$1;
ARGV=("$@");
ARGC=$((${#ARGV[@]}-1));
FILENAME=${ARGV[$ARGC]};
unset ARGV[$ARGC];
remove_template.py $FILENAME |
tidy5 ${ARGV[@]} 2>&1 |
egrep -v "Warning: (.* proprietary attribute \"(itemtype|itemid|itemscope|itemprop)\"|missing <!DOCTYPE> declaration|inserting implicit <body>|inserting missing 'title' element)$" |
uniq
|
stfp/git-lint
|
scripts/custom_linters/tidy-wrapper.sh
|
Shell
|
apache-2.0
| 935 |
# Change DEVICE2 to some device detected by "sensors"
sensors DEVICE2 | cut -f2 -d"+" -s | cut -c1-2
|
romelperez/conky-command
|
conky/temperature2.sh
|
Shell
|
apache-2.0
| 101 |
#!/bin/bash
./frproxy.sh stop
echo "==============start stop;"
./frproxy.sh start
echo "$? expect 0"
./frproxy.sh stop
echo "$? expect 0"
echo "==============start restart stop"
./frproxy.sh start
echo "$? expect 0"
./frproxy.sh restart
echo "$? expect 0"
./frproxy.sh stop
echo "$? expect 0"
echo "==============restart start stop"
./frproxy.sh restart
echo "$? expect 0"
./frproxy.sh start
echo "$? expect 1"
./frproxy.sh stop
echo "$? expect 0"
echo "==============restart stop "
./frproxy.sh restart
echo "$? expect 0"
./frproxy.sh stop
echo "$? expect 0"
echo "==============start start stop"
./frproxy.sh start
echo "$? expect 0"
./frproxy.sh start
echo "$? expect 1"
./frproxy.sh stop
echo "$? expect 0"
echo "==============stop stop"
./frproxy.sh stop
echo "$? expect 1"
./frproxy.sh stop
echo "$? expect 1"
echo "==============stop start stop"
./frproxy.sh stop
echo "$? expect 1"
./frproxy.sh start
echo "$? expect 0"
./frproxy.sh stop
echo "$? expect 1"
|
cychenyin/sfproxy
|
releases/frproxytester.sh
|
Shell
|
apache-2.0
| 973 |
#!/bin/bash
export LC_ALL=C
set -o pipefail
set -o nounset
set -o errexit
awk -F"\t" '{print $1" "$2}' # multi-column-output
|
hxdone/cheatsheets
|
hadoop-stuff/multi-output/bin/get_result_b.sh
|
Shell
|
apache-2.0
| 127 |
#!/bin/bash
echo "Compressing Javascript..."
java -jar yui.jar ../js/route-calculator.js -o ../js/route-calculator.min.js
java -jar yui.jar ../js/station-calculator.js -o ../js/station-calculator.min.js
java -jar yui.jar ../js/init.js -o ../js/init.min.js
java -jar yui.jar ../js/station_ids.js -o ../js/station_ids.min.js
echo "Compressing Stylesheets..."
java -jar yui.jar ../css/styles.css -o ../css/styles.min.css
|
awhipp/RealEVEMarket
|
compressor/minimize.sh
|
Shell
|
apache-2.0
| 420 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
echo "========== Running dfsioe-read bench =========="
# configure
DIR=`cd $bin/../; pwd`
. "${DIR}/../bin/hibench-config.sh"
. "${DIR}/conf/configure.sh"
# path check
$HADOOP_HOME/bin/hadoop dfs -rmr ${INPUT_HDFS}/io_read
$HADOOP_HOME/bin/hadoop dfs -rmr ${INPUT_HDFS}/_*
# pre-running
SIZE=`$HADOOP_HOME/bin/hadoop fs -dus ${INPUT_HDFS} | awk '{ print $1 }'`
#OPTION="-read -skipAnalyze -nrFiles ${RD_NUM_OF_FILES} -fileSize ${RD_FILE_SIZE} -bufferSize 131072 -plotInteval 1000 -sampleUnit m -sampleInteval 200 -sumThreshold 0.5"
OPTION="-read -nrFiles ${RD_NUM_OF_FILES} -fileSize ${RD_FILE_SIZE} -bufferSize 131072 -plotInteval 1000 -sampleUnit m -sampleInteval 200 -sumThreshold 0.5 -tputReportTotal"
START_TIME=`timestamp`
# run bench
${HADOOP_HOME}/bin/hadoop jar ${DATATOOLS} org.apache.hadoop.fs.dfsioe.TestDFSIOEnh ${OPTION} -resFile ${DIR}/result_read.txt -tputFile ${DIR}/throughput_read.csv
# post-running
END_TIME=`timestamp`
gen_report "DFSIOE-READ" ${START_TIME} ${END_TIME} ${SIZE} >> ${HIBENCH_REPORT}
|
seraphin/hibench
|
dfsioe/bin/run-read.sh
|
Shell
|
apache-2.0
| 1,859 |
#!/usr/bin/env bash
#===============================================================================
#
# FILE: download.sh
#
# USAGE: ./download.sh
#
# DESCRIPTION: This script will allow the user to download a database backup
# from AWS S3 storage. You can get a list of the backups by
# providing the --list option and it will display a list of the
# backups with the most recent one last. There are four columes
# to the list output, date, time, size of backup, and the name.
# you can copy the name and add it as the --s3backupfile option
# or you can leave off that option and just provide the s3bucket
# and database name. This will go through that list and automatically
# get the lastest one and down load it.
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Gregg Jensen (), [email protected]
# Bob Lozano (), [email protected]
# ORGANIZATION: devops.center
# CREATED: 04/20/2017 11:33:18
# REVISION: ---
#
# Copyright 2014-2017 devops.center llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#===============================================================================
#set -o nounset # Treat unset variables as an error
#set -o errexit # exit immediately if command exits with a non-zero status
#set -x # essentially debug mode
BACKUP_DIR='.'
#--- FUNCTION ----------------------------------------------------------------
# NAME: usage
# DESCRIPTION: prints out the options for the script
# PARAMETERS:
# RETURNS:
#-------------------------------------------------------------------------------
function usage
{
echo "usage: ./download.sh [--s3backupfile s3-backup-file] [--list] [-n] [--profile theProfile] s3bucket database"
echo
echo "you might need to use the --profile option if you do not have the AWS credentials set up in either your"
echo "environment or the standard aws config and credentials files."
echo
echo "By running with the --list option it is easier to see the existing backup files along with their size and date"
echo "and then selecting from the list."
}
if [[ -z $1 ]]; then
usage
exit 1
fi
while [[ $# -gt 0 ]]; do
case $1 in
--s3backupfile ) shift
S3_BACKUP_FILE=$1
;;
--list ) LIST=1
;;
--profile|-p ) shift
PROFILE=$1
;;
-n ) NO_OVERWRITE=1
;;
[!-]* ) if [[ $# -eq 2 ]]; then
S3_BUCKET=$1
DB_NAME=$2
shift;
else
echo "Too many/few of the 2 required parameters."
usage
exit 1
fi
;;
* ) usage
exit 1
esac
shift
done
#-------------------------------------------------------------------------------
# store list of backups from s3 bucket
#-------------------------------------------------------------------------------
if [[ -z ${PROFILE} ]]; then
S3_AS_STRING=$(aws s3 ls --recursive s3://"${S3_BUCKET}"/|grep "${DB_NAME}".sql.gz)
else
S3_AS_STRING=$(aws --profile ${PROFILE} s3 ls --recursive s3://"${S3_BUCKET}"/|grep "${DB_NAME}".sql.gz)
fi
S3_SORTED_AS_STRING=$(echo "${S3_AS_STRING}" | sort)
IFS=$'\n'; S3_LIST=($S3_SORTED_AS_STRING); unset IFS;
# make sure there is something found, otherwise exit
if [[ ${#S3_LIST[@]} -eq 0 ]]; then
echo "NOTE: There were no backups found...exiting"
exit
fi
#--- FUNCTION ----------------------------------------------------------------
# NAME: convertSize
# DESCRIPTION: takes a raw file size and gives a more human readable output
# PARAMETERS:
# RETURNS:
#-------------------------------------------------------------------------------
convertSize() {
b=${1:-0}; d=''; s=0; S=(Bytes {K,M,G,T,E,P,Y,Z}iB)
while ((b > 1024)); do
d="$(printf ".%02d" $((b % 1024 * 100 / 1024)))"
b=$((b / 1024))
let s++
done
echo "$b$d ${S[$s]}"
}
#--- FUNCTION ----------------------------------------------------------------
# NAME: showNumberedList
# DESCRIPTION: shows the list as a numbered list and asks the user if they
# want to choose one from the list to download
# PARAMETERS:
# RETURNS:
#-------------------------------------------------------------------------------
showNumberedList() {
echo "S3 backups, with most recent listed last"
i=1
for line in "${S3_LIST[@]}"
do
dbLine=${line##* }
aSize=$(echo $line | cut -d " " -f 3)
theSize=$(convertSize aSize)
echo "{$i}.[$theSize] ${dbLine}"
i=$((i+1))
done
}
#-------------------------------------------------------------------------------
# if --list is specified, print list and exit
#-------------------------------------------------------------------------------
if ! [[ -z "$LIST" ]]; then
showNumberedList
echo -n "Enter the number of the backup you want to download or press return to quit "
read -r number
if [[ -z ${number} ]]; then
exit
else
indexNum=$((number-1))
selectedLine=${S3_LIST[$indexNum]}
S3_BACKUP_FILE=${selectedLine##* }
fi
fi
#-------------------------------------------------------------------------------
# if the backup file name is given, download the specified file otherwise get it
# from the list as the latest one
#-------------------------------------------------------------------------------
if [[ -z "$S3_BACKUP_FILE" ]]; then
# download the most recent
selectedLine=${S3_LIST[-1]}
S3_BACKUP_FILE=${selectedLine##* }
fi
#-------------------------------------------------------------------------------
# save off, from the end, everything up the last slash to get just the database backup name
#-------------------------------------------------------------------------------
JUST_THE_BACKUP_NAME=${S3_BACKUP_FILE##*/}
LOCAL_BACKUP_FILE="${BACKUP_DIR}/${JUST_THE_BACKUP_NAME}"
if [[ -f "$LOCAL_BACKUP_FILE" ]] && ! [[ -z "$NO_OVERWRITE" ]]; then
echo -e "\nFile $LOCAL_BACKUP_FILE already exists and -n option was given. Skipping."
else
echo "Getting the backup file: ${S3_BACKUP_FILE} from the s3bucket: ${S3_BUCKET}"
aws s3 cp "s3://${S3_BUCKET}/${S3_BACKUP_FILE}" "$LOCAL_BACKUP_FILE"
if [[ $USER == "ubuntu" ]]; then
sudo chown postgres:postgres "$LOCAL_BACKUP_FILE"
fi
fi
export LOCAL_BACKUP_FILE
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
devopscenter/dcUtils
|
db/download.sh
|
Shell
|
apache-2.0
| 7,342 |
#!/bin/bash
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
CLUSTER_NAME=${1-imager}
ZONE=us-central1-f
# Delete services
kubectl delete -f service_jenkins.yaml
kubectl delete -f service_ssl_proxy.yaml
# Delete firewall rules
gcloud compute firewall-rules delete --quiet ${CLUSTER_NAME}-jenkins-swarm-internal
gcloud compute firewall-rules delete --quiet ${CLUSTER_NAME}-jenkins-web-public
# Delete cluster
gcloud container clusters delete --quiet ${CLUSTER_NAME} --zone ${ZONE}
|
evandbrown/kube-jenkins-imager
|
cluster_down.sh
|
Shell
|
apache-2.0
| 997 |
#!/bin/bash
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This init script installs Google Cloud Datalab on the master node of a
# Dataproc cluster.
set -exo pipefail
readonly ROLE="$(/usr/share/google/get_metadata_value attributes/dataproc-role)"
readonly PROJECT="$(/usr/share/google/get_metadata_value ../project/project-id)"
readonly SPARK_PACKAGES="$(/usr/share/google/get_metadata_value attributes/spark-packages || true)"
readonly SPARK_CONF='/etc/spark/conf/spark-defaults.conf'
readonly DATALAB_DIR="${HOME}/datalab"
readonly PYTHONPATH="/env/python:$(find /usr/lib/spark/python/lib -name '*.zip' | paste -sd:)"
readonly DOCKER_IMAGE="$(/usr/share/google/get_metadata_value attributes/docker-image || \
echo 'gcr.io/cloud-datalab/datalab:local')"
# Expose every possible spark configuration to the container.
readonly VOLUMES="$(echo /etc/{hadoop*,hive*,*spark*} /usr/lib/hadoop/lib/{gcs,bigquery}*)"
readonly VOLUME_FLAGS="$(echo "${VOLUMES}" | sed 's/\S*/-v &:&/g')"
function update_apt_get() {
for ((i = 0; i < 10; i++)); do
if apt-get update; then
return 0
fi
sleep 5
done
return 1
}
function docker_pull() {
for ((i = 0; i < 10; i++)); do
if (gcloud docker -- pull $1); then
return 0
fi
sleep 5
done
return 1
}
function err() {
echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $@" >&2
return 1
}
function configure_master(){
update_apt_get || err 'Failed to update apt-get'
mkdir -p "${DATALAB_DIR}"
apt-get install -y -q docker.io || err 'Failed to install Docker'
docker_pull ${DOCKER_IMAGE} || err "Failed to pull ${DOCKER_IMAGE}"
# For some reason Spark has issues resolving the user's directory inside of
# Datalab.
# TODO(pmkc) consider fixing in Dataproc proper.
if ! grep -q '^spark\.sql\.warehouse\.dir=' "${SPARK_CONF}"; then
echo 'spark.sql.warehouse.dir=/root/spark-warehouse' >> "${SPARK_CONF}"
fi
# Docker gives a "too many symlinks" error if volumes are not yet automounted.
# Ensure that the volumes are mounted to avoid the error.
touch ${VOLUMES}
# Build PySpark Submit Arguments
pyspark_submit_args=''
for package in ${SPARK_PACKAGES//','/' '}; do
pyspark_submit_args+="--packages ${package} "
done
pyspark_submit_args+='pyspark-shell'
# Java is too complicated to simply volume mount into the image, so we need
# to install it in a child image.
mkdir -p datalab-pyspark
pushd datalab-pyspark
cp /etc/apt/trusted.gpg .
cp /etc/apt/sources.list.d/backports.list .
cp /etc/apt/sources.list.d/dataproc.list .
cat << EOF > Dockerfile
FROM ${DOCKER_IMAGE}
ADD backports.list /etc/apt/sources.list.d/
ADD dataproc.list /etc/apt/sources.list.d/
ADD trusted.gpg /tmp/vm_trusted.gpg
RUN apt-key add /tmp/vm_trusted.gpg
RUN apt-get update
RUN apt-get install -y hive spark-python openjdk-8-jre-headless
ENV SPARK_HOME='/usr/lib/spark'
ENV JAVA_HOME='${JAVA_HOME}'
ENV PYTHONPATH='${PYTHONPATH}'
ENV PYTHONSTARTUP='/usr/lib/spark/python/pyspark/shell.py'
ENV PYSPARK_SUBMIT_ARGS='${pyspark_submit_args}'
ENV DATALAB_ENV='GCE'
EOF
docker build -t datalab-pyspark .
popd
}
function run_datalab(){
if docker run -d --restart always --net=host \
-v "${DATALAB_DIR}:/content/datalab" ${VOLUME_FLAGS} datalab-pyspark; then
echo 'Cloud Datalab Jupyter server successfully deployed.'
else
err 'Failed to run Cloud Datalab'
fi
}
function main(){
if [[ "${ROLE}" == 'Master' ]]; then
configure_master
run_datalab
fi
}
main
|
lukeFalsina/dataproc-initialization-actions
|
datalab/datalab.sh
|
Shell
|
apache-2.0
| 4,022 |
#!/bin/bash
source ${CLOUDIFY_LOGGING}
webui_port=$(ctx node properties webui_port)
XAPDIR=`cat /tmp/gsdir` # left by install script
interfacename=$(ctx node properties interfacename)
IP_ADDR=$(ip addr | grep inet | grep ${interfacename} | awk -F" " '{print $2}'| sed -e 's/\/.*$//')
export LOOKUPLOCATORS=$IP_ADDR
if [ -f "/tmp/locators" ]; then
LOOKUPLOCATORS=""
for line in $(cat /tmp/locators); do
LOOKUPLOCATORS="${LOOKUPLOCATORS}${line},"
done
LOOKUPLOCATORS=${LOOKUPLOCATORS%%,} #trim trailing comma
export LOOKUPLOCATORS
fi
export LOOKUPLOCATORS
export NIC_ADDR=${IP_ADDR}
export EXT_JAVA_OPTIONS="-Dcom.gs.multicast.enabled=false -Dcom.gs.transport_protocol.lrmi.bind-port=7122-7222 -Dcom.gigaspaces.start.httpPort=7104 -Dcom.gigaspaces.system.registryPort=7102"
ctx logger info "locators=$LOOKUPLOCATORS"
export WEBUI_PORT=$webui_port
nohup $XAPDIR/bin/gs-webui.sh >/tmp/webui.nohup.out 2>&1 &
echo $! > /tmp/webui.pid
ctx logger info "webui started"
|
kemiz/xap-4-cloudify-3
|
xap-scripts/start-ui.sh
|
Shell
|
apache-2.0
| 977 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
ROOT_DIR=$(git rev-parse --show-toplevel)
JDK_COMMON_PKGS=java.lang:java.util:java.util.concurrent:java.nio:java.net:java.io
(
cd $ROOT_DIR
# Java client
javadoc \
-quiet \
-windowtitle "Pulsar Client Java API" \
-doctitle "Pulsar Client Java API" \
-overview site/javadoc/client.html \
-d site/api/client \
-subpackages org.apache.pulsar.client.api \
-noqualifier $JDK_COMMON_PKGS \
-notimestamp \
-Xdoclint:none \
`find pulsar-client/src/main/java/org/apache/pulsar/client/api -name *.java` \
`find pulsar-client-schema/src/main/java -name *.java | grep -v /impl/`
# Java admin
javadoc \
-quiet \
-windowtitle "Pulsar Admin Java API" \
-doctitle "Pulsar Admin Java API" \
-overview site/javadoc/admin.html \
-d site/api/admin \
-noqualifier $JDK_COMMON_PKGS \
-notimestamp \
-Xdoclint:none \
`find pulsar-client-admin -name *.java | grep -v /internal/` \
`find pulsar-common/src/main/java/org/apache/pulsar/common/policies -name *.java`
# Pulsar Functions Java SDK
javadoc \
-quiet \
-windowtitle "Pulsar Functions Java SDK" \
-doctitle "Pulsar Functions Java SDK" \
-overview site/javadoc/pulsar-functions.html \
-d site/api/pulsar-functions \
-noqualifier $JDK_COMMON_PKGS \
-notimestamp \
-Xdoclint:none \
-exclude lombok.extern.slf4j.Slf4j \
`find pulsar-functions/api-java/src/main/java/org/apache/pulsar/functions/api -name *.java`
# Broker
#javadoc \
# -quiet \
# -windowtitle "Pulsar Broker Java API" \
# -doctitle "Pulsar Broker Java API" \
# -overview site/scripts/javadoc-broker.html \
# -d site/api/broker \
# -noqualifier $JDK_COMMON_PKGS \
# -notimestamp \
# -Xdoclint:none \
# `find pulsar-broker -name *.java`
) || true
# The "|| true" is present here to keep this script from failing due to
# Javadoc errors
|
ArvinDevel/incubator-pulsar
|
site/scripts/javadoc-gen.sh
|
Shell
|
apache-2.0
| 2,705 |
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD={{admin_password}}
export OS_AUTH_URL=http://{{controller_addr}}:35357/v3
|
sandvine/os-ansible-deployment-lite
|
ansible/roles/keystone/templates/admin-openrc.sh
|
Shell
|
apache-2.0
| 244 |
#!/bin/bash
if [ -z "$RED5_HOME" ]; then
export RED5_HOME=.;
fi
export JAVA_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=0.0.0.0:8787,server=y,suspend=n $JAVA_OPTS"
# Start Red5
exec $RED5_HOME/start.sh "$@"
|
ant-media/Ant-Media-Server
|
src/main/server/start-debug.sh
|
Shell
|
apache-2.0
| 222 |
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
trap os::test::junit::reconcile_output EXIT
os::test::junit::declare_suite_start "cmd/quota"
os::cmd::expect_success 'oc new-project foo --as=deads'
os::cmd::expect_success 'oc label namespace/foo owner=deads'
os::cmd::expect_success 'oc create clusterquota for-deads --project-label-selector=owner=deads --hard=secrets=10'
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n foo --as deads -o name' "for-deads"
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads -n foo --as deads' "secrets.*9"
os::cmd::expect_success 'oc create clusterquota for-deads-by-annotation --project-annotation-selector=openshift.io/requester=deads --hard=secrets=50'
os::cmd::expect_success 'oc new-project bar --as=deads'
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n bar --as deads -o name' "for-deads-by-annotation"
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n foo --as deads -o name' "for-deads-by-annotation"
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads-by-annotation -n bar --as deads' "secrets.*18"
os::cmd::expect_success 'oc delete project foo'
os::cmd::expect_success 'oc delete project bar'
echo "quota: ok"
os::test::junit::declare_suite_end
|
gashcrumb/origin
|
test/cmd/quota.sh
|
Shell
|
apache-2.0
| 1,310 |
#!/bin/sh -e
_SCRIPT_DIR="$( cd -P -- "$(dirname -- "$(command -v -- "$0")")" && pwd -P )"
CONFIGURATION=Debug
RUNTESTS=true
while [ $# -gt 0 ]; do
case "$1" in
--configuration|-c)
CONFIGURATION=$2
shift
;;
--notest)
RUNTESTS=false
;;
*)
echo "Invalid argument: $1"
exit 1
;;
esac
shift
done
dotnet restore
dotnet build --configuration $CONFIGURATION
# run tests
if [ "$RUNTESTS" = "true" ]; then
dotnet test --configuration $CONFIGURATION --no-restore --no-build
fi
# create packages
dotnet pack --no-restore --no-build --configuration $CONFIGURATION
PACKAGE_DIR="$_SCRIPT_DIR/artifacts/packages/$CONFIGURATION"
PACKAGE_COUNT=$(ls "$PACKAGE_DIR"/*.nupkg | wc -l)
if [ "$PACKAGE_COUNT" -ne "1" ]; then
echo "Expected a single NuGet package but found $PACKAGE_COUNT at '$PACKAGE_DIR'"
exit 1
fi
|
IxMilia/Dxf
|
build-and-test.sh
|
Shell
|
apache-2.0
| 872 |
#!/bin/bash
FAUCETHOME=`dirname $0`"/.."
for i in clib faucet tests ; do ls -1 $FAUCETHOME/$i/[a-z]*.py ; done | xargs realpath | sort
|
wackerly/faucet
|
tests/src_files.sh
|
Shell
|
apache-2.0
| 136 |
#!/bin/sh
java -jar agent-1.0.jar
|
Kerbores/ONEKEY-AGENT
|
Agent/src/main/scripts/start.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/bash
# Generates client-side items, including building ember and copying CSS
cp -R ${DJANGO_ADMIN_STATIC} ${EDCTF_ADMIN_STATIC} \
&& cp -R ${REST_FRAMEWORK_CSS_DIR} ${EDCTF_REST_STATIC}
|
IAryan/edCTF
|
scripts/build_frontend-prod.bash
|
Shell
|
apache-2.0
| 196 |
#!/bin/bash
# set default values for variables if they are not already defined
ARCH=${ARCH-dpdk}
CTRL_PLANE_DIR=${CTRL_PLANE_DIR-./src/hardware_dep/shared/ctrl_plane}
# controllers for the examples
declare -A controller_for_example
controller_for_example["acceptor"]="netpaxos_controller"
controller_for_example["coordinator"]="netpaxos_controller"
controller_for_example["test"]="dpdk_controller"
controller_for_example["l2_switch_test"]="dpdk_controller"
controller_for_example["l3_routing_test"]="dpdk_l3_controller"
controller_for_example["l3_routing-full"]="dpdk_l3-full_controller"
controller_for_example["l2_l3-fixed"]="dpdk_l2_l3_controller"
print_usage_and_exit() {
(>&2 echo "Usage: $0 <switch executable> [controller name] [controller params file] -- <options for compiled switch>")
exit 1
}
if [ $# -lt 1 ]; then
(>&2 echo "Error: missing switch executable")
print_usage_and_exit
fi
P4_SWITCH=$1
shift
if [ ! -f "$P4_SWITCH" ]; then
(>&2 echo "Error: executable $P4_SWITCH does not exist")
print_usage_and_exit
fi
P4_SWITCH_BASENAME=${P4_SWITCH##*/}
CONTROLLER=${controller_for_example["$P4_SWITCH_BASENAME"]}
if [ $# -gt 0 ]; then
if [ "$1" = "default" ]; then
shift
elif [ "$1" != "--" ]; then
CONTROLLER=$1
shift
fi
fi
if [ "$CONTROLLER" = "" -o "$CONTROLLER" = "--" ]; then
(>&2 echo "Error: no default controller found for ${P4_SWITCH_BASENAME}")
print_usage_and_exit
fi
CONTROLLER_PARAMS_FILE=""
if [ $# -ge 0 -a "$1" != "--" ]; then
CONTROLLER_PARAMS_FILE="$1"
shift
fi
# Getting arguments for switch from command line if needed
if [ -z ${P4DPDK_OPTS+x} ]; then
while [ $# -gt 0 ]; do
if [ "$1" = "--" ]; then
shift
P4DPDK_EXEC_OPTS=$*
break
fi
shift
if [ $# -eq 0 ]; then
(>&2 echo "Error: no options for compiled switch given and \$P4DPDK_OPTS not set")
print_usage_and_exit
fi
done
else
echo "Using DPDK options set in \$P4DPDK_OPTS"
P4DPDK_EXEC_OPTS=${P4DPDK_OPTS}
fi
# Compile the controller
cd $CTRL_PLANE_DIR
make -j $CONTROLLER
cd - >/dev/null
# Stop all running controllers
for controller in "${controller_for_example[@]}"
do
sudo killall -q "$controller"
done
# Run controller
CONTROLLER_LOG=$(dirname $(dirname ${P4_SWITCH}))/controller.log
$CTRL_PLANE_DIR/$CONTROLLER $CONTROLLER_PARAMS_FILE > "${CONTROLLER_LOG}" &
sleep 0.05
echo "-------------------- Running switch"
echo "Parameters for DPDK: ${P4DPDK_EXEC_OPTS}"
echo "Controller : ${CONTROLLER}"
echo "Controller log file: ${CONTROLLER_LOG}"
# Start the program
sudo -E ${P4_SWITCH} ${P4DPDK_EXEC_OPTS}
|
usi-systems/p4paxos
|
dpdk_p4@elte/netpaxos_controller/run.sh
|
Shell
|
apache-2.0
| 2,721 |
HOME=$(dirname $0)
cd $HOME/..
destination=src/main/resources/com/graphhopper/util/
translations="en_US SKIP ar ast bg ca cs_CZ da_DK de_DE el es fa fil fi fr_FR fr_CH gl he hsb hu_HU it ja lt_LT ne nl pl_PL pt_BR pt_PT ro ru si sk sv_SE tr uk vi_VI zh_CN"
file=$1
# You can execute the following
# curl 'https://docs.google.com/spreadsheets/d/10HKSFmxGVEIO92loVQetVmjXT0qpf3EA2jxuQSSYTdU/export?format=tsv&id=10HKSFmxGVEIO92loVQetVmjXT0qpf3EA2jxuQSSYTdU&gid=0' > tmp.tsv
# ./files/update-translations.sh tmp.tsv && rm tmp.tsv
INDEX=1
for tr in $translations; do
INDEX=$(($INDEX + 1))
if [[ "x$tr" = "xSKIP" ]]; then
continue
fi
echo -e '# do not edit manually, instead use spreadsheet https://t.co/f086oJXAEI and script ./core/files/update-translations.sh\n' > $destination/$tr.txt
tail -n+5 "$file" | cut -s -f1,$INDEX --output-delimiter='=' >> $destination/$tr.txt
done
|
bendavidson/graphhopper
|
core/files/update-translations.sh
|
Shell
|
apache-2.0
| 891 |
#!/bin/bash
#
# Copyright (C) 2017 Dan Iverson
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#Uncomment next line for debugging output
#set -x
SCRIPT=$0
ovj_usage() {
cat <<EOF
usage:
$SCRIPT will install VirtualBox into the Linux environment.
If your system has internet access, then entering $SCRIPT
will do the installion.
$SCRIPT tests internet access by "pinging" google.com.
The ping command may also fail due to a firewall blocking it.
If you are sure the system is connected to the internet
and want to bypass this "ping" test, use
$SCRIPT noPing
EOF
exit 1
}
noPing=0
# process flag args
while [ $# -gt 0 ]; do
key="$1"
case $key in
-h|--help) ovj_usage ;;
noPing) noPing=1; shift ;;
-vv|--debug) set -x ;;
*)
# unknown option
echo "unrecognized argument: $key"
ovj_usage
;;
esac
shift
done
userId=$(/usr/bin/id | awk 'BEGIN { FS = " " } { print $1 }')
if [ $userId != "uid=0(root)" ]; then
echo
echo "Installing VirtualBox"
echo
s=1
t=2
while [[ $s = 1 ]] && [[ ! $t = 0 ]]; do
if [ -x /usr/bin/dpkg ]; then
echo "If requested, enter the admin (sudo) password"
sudo $0 $* ;
else
echo "Please enter this system's root user password"
su root -c "$0 $*";
fi
s=$?
t=$((t-1))
echo " "
done
if [ $t = 0 ]; then
echo "Access denied. Type cntrl-C to exit this window."
echo "Type $0 to start the installation program again"
echo ""
fi
exit
fi
checkNetwork() {
# The below seems to have disabled ping. Use google instead
local URL="google.com"
local pingRecv=0
pingRecv=`ping -c 1 -q -W 1 $URL | grep received | awk '{ print $4 }'`
if [[ ${pingRecv} -eq 1 ]] || [[ $noPing -eq 1 ]] ; then
echo "Test for internet access passed"
return 0
else
echo "Internet access failed"
echo "This is tested by doing \"ping $URL\". The ping"
echo "command may also fail due to a firewall blocking it."
echo "If you are sure the system is connected to the internet"
echo "and want to bypass this \"ping\" test, use"
echo "$SCRIPT noPing"
echo ""
return 1
fi
}
#
# Main program starts here
#
checkNetwork
if [[ $? -ne 0 ]]; then
exit 1
fi
if [ -x /usr/bin/dpkg ]; then
grep -v virtualbox /etc/apt/sources.list > /etc/apt/tmp
source /etc/os-release
echo "deb https://download.virtualbox.org/virtualbox/debian $UBUNTU_CODENAME contrib" >> /etc/apt/tmp
mv /etc/apt/tmp /etc/apt/sources.list
wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | apt-key add -
wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | apt-key add -
apt-get update
apt-get -y install virtualbox-6.1
else
if [ -f /etc/centos-release ]; then
rel=centos-release
# remove all characters up to the first digit
version=$(cat /etc/$rel | sed -E 's/[^0-9]+//')
elif [ -f /etc/redhat-release ]; then
rel=redhat-release
# remove all characters up to the first digit
version=$(cat /etc/$rel | sed -E 's/[^0-9]+//')
else
# Assume Linux variant is like CentOS 8
version="8.0"
fi
# remove all characters from end including first dot
releasever=${version%%.*}
cat <<EOF > /etc/yum.repos.d/Oracle.repo
[virtualbox]
name=Oracle Linux / RHEL / CentOS-$releasever / $basearch - VirtualBox
baseurl=http://download.virtualbox.org/virtualbox/rpm/el/$releasever/x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://www.virtualbox.org/download/oracle_vbox.asc
EOF
yum clean all
yum makecache
yum -y install gcc make perl VirtualBox-6.1
fi
# If the install fails, may need to do the following
# sudo apt-get remove VirtualBox-6.1
# sudo apt-get autoremove 'virtualbox*'
# sudo apt-get install linux-headers-5.3.0-1044-gke
# The actual headers may be different. The error message
# should identify the correct ones.
# Then rerun ovjGetVB
echo ""
echo "VirtualBox installation complete"
echo "You can start VirtualBox by running"
echo "virtualbox"
echo "from a terminal."
if [ -x /usr/bin/dpkg ]; then
echo "You can add it to your favorites list"
echo "by entering virtualbox in the Activities search box and then"
echo "right-clicking the VirtualBox Icon and selecting \"Add to favorites\""
fi
echo ""
exit
|
OpenVnmrJ/OpenVnmrJ
|
src/scripts/ovjGetVB.sh
|
Shell
|
apache-2.0
| 4,670 |
#!/bin/bash
#
# Summary: Inicialize PostgreSQL dockerized
# Author: Daniel Peña <[email protected]>
# Version: 9.6-0.2
#
set -eo pipefail
source ${PG_APP_HOME}/functions
[[ $DEBUG == true ]] && set -x
# Check if there are arguments to be passed to postgres
if [[ ${1:0:1} == "-" ]]; then
EXTRA_ARGS="$@"
set --
fi
#
# Set up replication in the future automatically
#
echo "Starting set up"
configure_postgresql
echo "Starting PostgreSQL ${PG_VERSION}..."
exec sudo -HEu ${PG_USER} -i ${PG_BINDIR}/postgres -D ${PG_HOME} $EXTRA_ARGS
|
dppascual/docker-postgresql
|
entrypoint.sh
|
Shell
|
apache-2.0
| 548 |
#!/bin/bash
#
# GWTriFold\tools\karst.sh
# simple organizational/build system for tertiary package resources
# (documentation, non-primary-programming-language assets)
# Copyright 2013-2014 Nathan Ross ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# karst is a build-system independent concept for concertedly preparing
# relatively static tertiary resources, which are later used in a build.
# It's domain of use and good ROI is exclusively small projects.
# On small projects the complexity cost of using a new small script in
# addition, and performance cost of no dynamic update in automation
# is less than the complexity cost adapting what otherwise would be simple
# scripts into build directives.
# important note:
# re-calling every script is nec. consequence of a tool like this not keeping
# a cache file and not assuming anything about the products of your build
# scripts. eg your script's mtime may be the same, but it may read input
# from the FS that has changed since the tool was last called
D="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
for i in `ls src`
do
mkdir -p "${D}/out/${i}"
# doesn't work for some reason. probably need to change -e to -f
if [ -d "${D}/src/${i}/copy/" ] && [ `ls -1 "${D}/src/${i}/copy/" | wc -l` -gt 0 ]
then cp -r ${D}/src/${i}/copy/* ${D}/out/${i}/
fi
if [ -e ${D}/src/${i}/*.md ]
then
for j in `ls ${D}/src/${i}/*.md`
do
of=${D}/out/$i/`basename $j .md`.html
cat ${D}/src/shared/markdownHeader.html > $of
markdown $j >> $of
echo "</body></html>" >> $of
done
fi
if [ -e ${D}/src/${i}/*.sh ]
then
mkdir -p "${D}/src/${i}/tmp/"
for j in `ls ${D}/src/${i}/*.sh`
do
${D}/src/${i}/./`basename $j` "${D}/src/${i}/tmp"
done
cp -r ${D}/src/${i}/tmp/* "${D}/out/${i}/" && rm -r "${D}/src/${i}/tmp/"
fi
done
|
nathanross/GWTriFold
|
GWTriFold/tools/karst.sh
|
Shell
|
apache-2.0
| 2,344 |
#! /bin/bash
# GRUB uses roughly 4.6 Mb of disk
GRUB_KB=5000
BASE_KB=$GRUB_KB
usage() {
echo "Usage: $0 [OPTIONS] BINARY" 1>&2
echo ""
echo "Create a bootable grub image for a multiboot compatible BINARY"
echo ""
echo "OPTIONS:"
echo "-c Unmount and clean previous image, for consecutive development runs"
echo "-e Exit after cleaning. Only makes sense in combination with -c"
echo "-k keep mountpoint open for inspection - don't unmount"
echo "-u update image with new kernel"
echo "-s base image size, to which BINARY size will be added. Default $GRUB_KB Kb"
exit 1
}
while getopts "cekus:" o; do
case "${o}" in
c)
clean=1
;;
e)
exit_on_clean=1
;;
k)
keep_open=1
;;
u)
update=1
;;
s)
BASE_KB=$OPTARG
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
[[ $1 ]] || usage
KERNEL=$1
DISK=${DISK-$KERNEL.grub.img}
MOUNTDIR=${MOUNTDIR-/mnt}
MOUNT_OPTS="rw"
set -e
function unmount {
# NOTE: Mounted Loopback devices won't get detached with losetup -d
# Instead we use umount -d to have mount detach the loopback device
#
# NOTE:
# If the script is interrupted before getting to this step you'll end up with
# lots of half-mounted loopback-devices after a while.
# Unmount by consecutive calls to command below.
echo -e ">>> Unmounting and detaching $LOOP"
sudo umount -vd $MOUNTDIR || :
}
function clean {
echo ">>> Removing previous $DISK"
rm -f $DISK
}
function create_disk {
if [ -f $DISK ]
then
echo -e ">>> $DISK allready exists. Preserving existing image as $DISK.bak"
mv $DISK $DISK.bak
fi
# Kernel size in Kb
KERN_KB=$(( ($(stat -c%s "$KERNEL") / 1024) ))
# Estimate some overhead for the FAT
FAT_KB=$(( ($KERN_KB + $BASE_KB) / 10 ))
DISK_KB=$(( $KERN_KB + $BASE_KB + $FAT_KB ))
echo ">>> Estimated disk size: $BASE_KB Kb Base size + $KERN_KB Kb kernel + $FAT_KB Kb FAT = $DISK_KB Kb"
echo ">>> Creating FAT file system on $DISK"
mkfs.fat -C $DISK $DISK_KB
}
function mount_loopback {
# Find first available loopback device
LOOP=$(sudo losetup -f)
echo -e ">>> Associating $LOOP with $DISK"
# NOTE: To list used loopback devices do
# losetup -a
# Associate loopback with disk file
sudo losetup $LOOP $DISK
echo -e ">>> Mounting ($MOUNT_OPTS) $DISK in $MOUNTDIR"
mkdir -p $MOUNTDIR
sudo mount -o $MOUNT_OPTS $LOOP $MOUNTDIR
}
function copy_kernel {
echo ">>> Copying kernel '$KERNEL' to $MOUNTDIR/boot/includeos_service"
sudo cp $KERNEL $MOUNTDIR/boot/includeos_service
sync
}
function build {
echo ">>> Building service"
pushd build
make
popd
}
# Unmount and Clean previous image
if [[ $clean ]]
then
echo ">>> Cleaning "
unmount
clean
if [[ $exit_on_clean ]]
then
echo "Exit option set. Exiting."
exit 0
fi
fi
# Update image and exit
if [[ $update ]]
then
mount_loopback
copy_kernel
unmount
exit
fi
# Exit on first error
set -e
# Default behavior
create_disk
mount_loopback
echo -e ">>> Creating boot dir"
sudo mkdir -p $MOUNTDIR/boot
echo -e ">>> Populating boot dir with grub config"
sudo mkdir -p $MOUNTDIR/boot/grub
GRUB_CFG='
set default="0"
set timeout=0
serial --unit=0 --speed=9600
terminal_input serial; terminal_output serial
menuentry IncludeOS {
multiboot /boot/includeos_service
}
'
if [[ ! -e grub.cfg ]]
then
echo -e ">>> Creating grub config file"
sudo echo "$GRUB_CFG" > grub.cfg
sudo mv grub.cfg $MOUNTDIR/boot/grub/grub.cfg
else
echo -e ">>> Copying grub config file"
sudo cp grub.cfg $MOUNTDIR/boot/grub/grub.cfg
fi
copy_kernel
# EFI?
sudo mkdir -p $MOUNTDIR/boot/efi
ARCH=${ARCH-i386}
TARGET=i386-pc
echo -e ">>> Running grub install for target $TARGET"
sudo grub-install --target=$TARGET --force --boot-directory $MOUNTDIR/boot/ $LOOP
echo -e ">>> Synchronize file cache"
sync
if [[ -z $keep_open ]]
then
unmount
else
echo -e ">>> NOTE: Keeping mountpoint open"
fi
echo -e ">>> Done. You can now boot $DISK"
|
ingve/IncludeOS
|
etc/scripts/grubify.sh
|
Shell
|
apache-2.0
| 4,035 |
#!/bin/bash
EXCLUDEFILE="/opt/pysk/etc/serverconfig/exclude/`hostname`"
EXCLUDES=""
if [ -s $EXCLUDEFILE ] ; then
for i in `cat $EXCLUDEFILE`; do
EXCLUDES="$EXCLUDES -x `basename $i`"
done
fi
diff -wur -x php-fpm.conf -x known_hosts $EXCLUDES /opt/pysk/serverconfig/ / | grep -v 'Only in /' | less
|
philwo/pysk
|
serverconfig/diff.sh
|
Shell
|
apache-2.0
| 318 |
#!/bin/sh
#
# builder_common.sh
#
# part of pfSense (https://www.pfsense.org)
# Copyright (c) 2004-2016 Rubicon Communications, LLC (Netgate)
# All rights reserved.
#
# FreeSBIE portions of the code
# Copyright (c) 2005 Dario Freni
# and copied from FreeSBIE project
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -z "${IMAGES_FINAL_DIR}" -o "${IMAGES_FINAL_DIR}" = "/" ]; then
echo "IMAGES_FINAL_DIR is not defined"
print_error_pfS
fi
kldload filemon >/dev/null 2>&1
lc() {
echo "${1}" | tr '[[:upper:]]' '[[:lower:]]'
}
git_last_commit() {
export CURRENT_COMMIT=$(git -C ${BUILDER_ROOT} log -1 --format='%H')
export CURRENT_AUTHOR=$(git -C ${BUILDER_ROOT} log -1 --format='%an')
echo ">>> Last known commit $CURRENT_AUTHOR - $CURRENT_COMMIT"
echo "$CURRENT_COMMIT" > $SCRATCHDIR/build_commit_info.txt
}
# Create core pkg repository
core_pkg_create_repo() {
if [ ! -d "${CORE_PKG_REAL_PATH}/All" ]; then
return
fi
############ ATTENTION ##############
#
# For some reason pkg-repo fail without / in the end of directory name
# so removing it will break command
#
# https://github.com/freebsd/pkg/issues/1364
#
echo -n ">>> Creating core packages repository... "
if pkg repo -q "${CORE_PKG_REAL_PATH}/"; then
echo "Done!"
else
echo "Failed!"
print_error_pfS
fi
# Use the same directory structure as poudriere does to avoid
# breaking snapshot repositories during rsync
ln -sf $(basename ${CORE_PKG_REAL_PATH}) ${CORE_PKG_PATH}/.latest
ln -sf .latest/All ${CORE_PKG_ALL_PATH}
ln -sf .latest/digests.txz ${CORE_PKG_PATH}/digests.txz
ln -sf .latest/meta.txz ${CORE_PKG_PATH}/meta.txz
ln -sf .latest/packagesite.txz ${CORE_PKG_PATH}/packagesite.txz
}
# Create core pkg (base, kernel)
core_pkg_create() {
local _template="${1}"
local _flavor="${2}"
local _version="${3}"
local _root="${4}"
local _findroot="${5}"
local _filter="${6}"
local _template_path=${BUILDER_TOOLS}/templates/core_pkg/${_template}
${BUILDER_SCRIPTS}/create_core_pkg.sh \
-t "${_template_path}" \
-f "${_flavor}" \
-v "${_version}" \
-r "${_root}" \
-s "${_findroot}" \
-F "${_filter}" \
-d "${CORE_PKG_REAL_PATH}/All" \
|| print_error_pfS
}
# This routine will output that something went wrong
print_error_pfS() {
echo
echo "####################################"
echo "Something went wrong, check errors!" >&2
echo "####################################"
echo
echo "NOTE: a lot of times you can run './build.sh --clean-builder' to resolve."
echo
[ -n "${LOGFILE}" -a -f "${LOGFILE}" ] && \
echo "Log saved on ${LOGFILE}" && \
echo
kill $$
exit 1
}
# This routine will verify that the kernel has been
# installed OK to the staging area.
ensure_kernel_exists() {
if [ ! -f "$1/boot/kernel/kernel.gz" ]; then
echo ">>> ERROR: Could not locate $1/boot/kernel.gz"
print_error_pfS
fi
KERNEL_SIZE=$(stat -f "%z" $1/boot/kernel/kernel.gz)
if [ "$KERNEL_SIZE" -lt 3500 ]; then
echo ">>> ERROR: Kernel $1/boot/kernel.gz appears to be smaller than it should be: $KERNEL_SIZE"
print_error_pfS
fi
}
get_pkg_name() {
echo "${PRODUCT_NAME}-${1}-${CORE_PKG_VERSION}"
}
# This routine builds all related kernels
build_all_kernels() {
# Set KERNEL_BUILD_PATH if it has not been set
if [ -z "${KERNEL_BUILD_PATH}" ]; then
KERNEL_BUILD_PATH=$SCRATCHDIR/kernels
echo ">>> KERNEL_BUILD_PATH has not been set. Setting to ${KERNEL_BUILD_PATH}!"
fi
[ -d "${KERNEL_BUILD_PATH}" ] \
&& rm -rf ${KERNEL_BUILD_PATH}
# Build embedded kernel
for BUILD_KERNEL in $BUILD_KERNELS; do
unset KERNCONF
unset KERNEL_DESTDIR
unset KERNEL_NAME
export KERNCONF=$BUILD_KERNEL
export KERNEL_DESTDIR="$KERNEL_BUILD_PATH/$BUILD_KERNEL"
export KERNEL_NAME=${BUILD_KERNEL}
LOGFILE="${BUILDER_LOGS}/kernel.${KERNCONF}.${TARGET}.log"
echo ">>> Building $BUILD_KERNEL kernel." | tee -a ${LOGFILE}
if [ -n "${NO_BUILDKERNEL}" -a -f "${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${KERNEL_NAME}).txz" ]; then
echo ">>> NO_BUILDKERNEL set, skipping build" | tee -a ${LOGFILE}
continue
fi
buildkernel
echo ">>> Staging $BUILD_KERNEL kernel..." | tee -a ${LOGFILE}
installkernel
ensure_kernel_exists $KERNEL_DESTDIR
echo ">>> Creating pkg of $KERNEL_NAME-debug kernel to staging area..." | tee -a ${LOGFILE}
core_pkg_create kernel-debug ${KERNEL_NAME} ${CORE_PKG_VERSION} ${KERNEL_DESTDIR} \
"./usr/lib/debug/boot" \*.debug
rm -rf ${KERNEL_DESTDIR}/usr
echo ">>> Creating pkg of $KERNEL_NAME kernel to staging area..." | tee -a ${LOGFILE}
core_pkg_create kernel ${KERNEL_NAME} ${CORE_PKG_VERSION} ${KERNEL_DESTDIR} "./boot/kernel ./boot/modules"
rm -rf $KERNEL_DESTDIR 2>&1 1>/dev/null
done
}
install_default_kernel() {
if [ -z "${1}" ]; then
echo ">>> ERROR: install_default_kernel called without a kernel config name"| tee -a ${LOGFILE}
print_error_pfS
fi
export KERNEL_NAME="${1}"
echo -n ">>> Installing kernel to be used by image ${KERNEL_NAME}..." | tee -a ${LOGFILE}
# Copy kernel package to chroot, otherwise pkg won't find it to install
if ! pkg_chroot_add ${FINAL_CHROOT_DIR} kernel-${KERNEL_NAME}; then
echo ">>> ERROR: Error installing kernel package $(get_pkg_name kernel-${KERNEL_NAME}).txz" | tee -a ${LOGFILE}
print_error_pfS
fi
# Set kernel pkg as vital to avoid user end up removing it for any reason
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name kernel-${KERNEL_NAME})
if [ ! -f $FINAL_CHROOT_DIR/boot/kernel/kernel.gz ]; then
echo ">>> ERROR: No kernel installed on $FINAL_CHROOT_DIR and the resulting image will be unusable. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
mkdir -p $FINAL_CHROOT_DIR/pkgs
if [ -z "${2}" -o -n "${INSTALL_EXTRA_KERNELS}" ]; then
cp ${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${KERNEL_NAME}).txz $FINAL_CHROOT_DIR/pkgs
if [ -n "${INSTALL_EXTRA_KERNELS}" ]; then
for _EXTRA_KERNEL in $INSTALL_EXTRA_KERNELS; do
_EXTRA_KERNEL_PATH=${CORE_PKG_ALL_PATH}/$(get_pkg_name kernel-${_EXTRA_KERNEL}).txz
if [ -f "${_EXTRA_KERNEL_PATH}" ]; then
echo -n ". adding ${_EXTRA_KERNEL_PATH} on image /pkgs folder"
cp ${_EXTRA_KERNEL_PATH} $FINAL_CHROOT_DIR/pkgs
else
echo ">>> ERROR: Requested kernel $(get_pkg_name kernel-${_EXTRA_KERNEL}).txz was not found to be put on image /pkgs folder!"
print_error_pfS
fi
done
fi
fi
echo "Done." | tee -a ${LOGFILE}
unset KERNEL_NAME
}
# This builds FreeBSD (make buildworld)
# Imported from FreeSBIE
make_world() {
LOGFILE=${BUILDER_LOGS}/buildworld.${TARGET}
echo ">>> LOGFILE set to $LOGFILE." | tee -a ${LOGFILE}
if [ -n "${NO_BUILDWORLD}" ]; then
echo ">>> NO_BUILDWORLD set, skipping build" | tee -a ${LOGFILE}
return
fi
echo ">>> $(LC_ALL=C date) - Starting build world for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/build_freebsd.sh -K -s ${FREEBSD_SRC_DIR} \
|| print_error_pfS
echo ">>> $(LC_ALL=C date) - Finished build world for ${TARGET} architecture..." | tee -a ${LOGFILE}
LOGFILE=${BUILDER_LOGS}/installworld.${TARGET}
echo ">>> LOGFILE set to $LOGFILE." | tee -a ${LOGFILE}
[ -d "${INSTALLER_CHROOT_DIR}" ] \
|| mkdir -p ${INSTALLER_CHROOT_DIR}
echo ">>> Installing world with bsdinstall for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -i -K \
-s ${FREEBSD_SRC_DIR} \
-d ${INSTALLER_CHROOT_DIR} \
|| print_error_pfS
# Copy additional installer scripts
install -o root -g wheel -m 0755 ${BUILDER_TOOLS}/installer/*.sh \
${INSTALLER_CHROOT_DIR}/root
# XXX set root password since we don't have nullok enabled
pw -R ${INSTALLER_CHROOT_DIR} usermod root -w yes
echo ">>> Installing world without bsdinstall for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -K \
-s ${FREEBSD_SRC_DIR} \
-d ${STAGE_CHROOT_DIR} \
|| print_error_pfS
# XXX It must go to the scripts
[ -d "${STAGE_CHROOT_DIR}/usr/local/bin" ] \
|| mkdir -p ${STAGE_CHROOT_DIR}/usr/local/bin
makeargs="DESTDIR=${STAGE_CHROOT_DIR}"
echo ">>> Building and installing crypto tools and athstats for ${TARGET} architecture... (Starting - $(LC_ALL=C date))" | tee -a ${LOGFILE}
(script -aq $LOGFILE make -C ${FREEBSD_SRC_DIR}/tools/tools/crypto ${makeargs} clean all install || print_error_pfS;) | egrep '^>>>' | tee -a ${LOGFILE}
# XXX FIX IT
# (script -aq $LOGFILE make -C ${FREEBSD_SRC_DIR}/tools/tools/ath/athstats ${makeargs} clean all install || print_error_pfS;) | egrep '^>>>' | tee -a ${LOGFILE}
echo ">>> Building and installing crypto tools and athstats for ${TARGET} architecture... (Finished - $(LC_ALL=C date))" | tee -a ${LOGFILE}
if [ "${PRODUCT_NAME}" = "pfSense" -a -n "${GNID_REPO_BASE}" ]; then
echo ">>> Building gnid... " | tee -a ${LOGFILE}
(\
cd ${GNID_SRC_DIR} && \
make INCLUDE_DIR=${GNID_INCLUDE_DIR} \
LIBCRYPTO_DIR=${GNID_LIBCRYPTO_DIR} clean gnid \
) || print_error_pfS
install -o root -g wheel -m 0700 ${GNID_SRC_DIR}/gnid \
${STAGE_CHROOT_DIR}/usr/sbin \
|| print_error_pfS
install -o root -g wheel -m 0700 ${GNID_SRC_DIR}/gnid \
${INSTALLER_CHROOT_DIR}/usr/sbin \
|| print_error_pfS
fi
unset makeargs
}
# This routine creates a ova image that contains
# a ovf and vmdk file. These files can be imported
# right into vmware or virtual box.
# (and many other emulation platforms)
# http://www.vmware.com/pdf/ovf_whitepaper_specification.pdf
create_ova_image() {
# XXX create a .ovf php creator that you can pass:
# 1. populatedSize
# 2. license
# 3. product name
# 4. version
# 5. number of network interface cards
# 6. allocationUnits
# 7. capacity
# 8. capacityAllocationUnits
LOGFILE=${BUILDER_LOGS}/ova.${TARGET}.log
local _mntdir=${OVA_TMP}/mnt
if [ -d "${_mntdir}" ]; then
local _dev
# XXX Root cause still didn't found but it doesn't umount
# properly on looped builds and then require this extra
# check
while true; do
_dev=$(mount -p ${_mntdir} 2>/dev/null | awk '{print $1}')
[ $? -ne 0 -o -z "${_dev}" ] \
&& break
umount -f ${_mntdir}
mdconfig -d -u ${_dev#/dev/}
done
chflags -R noschg ${OVA_TMP}
rm -rf ${OVA_TMP}
fi
mkdir -p $(dirname ${OVAPATH})
mkdir -p ${_mntdir}
if [ -z "${OVA_SWAP_PART_SIZE_IN_GB}" -o "${OVA_SWAP_PART_SIZE_IN_GB}" = "0" ]; then
# first partition size (freebsd-ufs)
local OVA_FIRST_PART_SIZE_IN_GB=${VMDK_DISK_CAPACITY_IN_GB}
# Calculate real first partition size, removing 256 blocks (131072 bytes) beginning/loader
local OVA_FIRST_PART_SIZE=$((${OVA_FIRST_PART_SIZE_IN_GB}*1024*1024*1024-131072))
# Unset swap partition size variable
unset OVA_SWAP_PART_SIZE
# Parameter used by mkimg
unset OVA_SWAP_PART_PARAM
else
# first partition size (freebsd-ufs)
local OVA_FIRST_PART_SIZE_IN_GB=$((VMDK_DISK_CAPACITY_IN_GB-OVA_SWAP_PART_SIZE_IN_GB))
# Use first partition size in g
local OVA_FIRST_PART_SIZE="${OVA_FIRST_PART_SIZE_IN_GB}g"
# Calculate real swap size, removing 256 blocks (131072 bytes) beginning/loader
local OVA_SWAP_PART_SIZE=$((${OVA_SWAP_PART_SIZE_IN_GB}*1024*1024*1024-131072))
# Parameter used by mkimg
local OVA_SWAP_PART_PARAM="-p freebsd-swap/swap0::${OVA_SWAP_PART_SIZE}"
fi
# Prepare folder to be put in image
customize_stagearea_for_image "ova"
install_default_kernel ${DEFAULT_KERNEL} "no"
# Fill fstab
echo ">>> Installing platform specific items..." | tee -a ${LOGFILE}
echo "/dev/gpt/${PRODUCT_NAME} / ufs rw 1 1" > ${FINAL_CHROOT_DIR}/etc/fstab
if [ -n "${OVA_SWAP_PART_SIZE}" ]; then
echo "/dev/gpt/swap0 none swap sw 0 0" >> ${FINAL_CHROOT_DIR}/etc/fstab
fi
# Create / partition
echo -n ">>> Creating / partition... " | tee -a ${LOGFILE}
truncate -s ${OVA_FIRST_PART_SIZE} ${OVA_TMP}/${OVFUFS}
local _md=$(mdconfig -a -f ${OVA_TMP}/${OVFUFS})
trap "mdconfig -d -u ${_md}; return" 1 2 15 EXIT
newfs -L ${PRODUCT_NAME} -j /dev/${_md} 2>&1 >>${LOGFILE}
if ! mount /dev/${_md} ${_mntdir} 2>&1 >>${LOGFILE}; then
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error mounting temporary vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
trap "sync; sleep 3; umount ${_mntdir} || umount -f ${_mntdir}; mdconfig -d -u ${_md}; return" 1 2 15 EXIT
echo "Done!" | tee -a ${LOGFILE}
clone_directory_contents ${FINAL_CHROOT_DIR} ${_mntdir}
sync
sleep 3
umount ${_mntdir} || umount -f ${_mntdir} >>${LOGFILE} 2>&1
mdconfig -d -u ${_md}
trap "-" 1 2 15 EXIT
# Create raw disk
echo -n ">>> Creating raw disk... " | tee -a ${LOGFILE}
mkimg \
-s gpt \
-f raw \
-b ${FINAL_CHROOT_DIR}/boot/pmbr \
-p freebsd-boot:=${FINAL_CHROOT_DIR}/boot/gptboot \
-p freebsd-ufs/${PRODUCT_NAME}:=${OVA_TMP}/${OVFUFS} \
${OVA_SWAP_PART_PARAM} \
-o ${OVA_TMP}/${OVFRAW} 2>&1 >> ${LOGFILE}
if [ $? -ne 0 -o ! -f ${OVA_TMP}/${OVFRAW} ]; then
if [ -f ${OVA_TMP}/${OVFUFS} ]; then
rm -f ${OVA_TMP}/${OVFUFS}
fi
if [ -f ${OVA_TMP}/${OVFRAW} ]; then
rm -f ${OVA_TMP}/${OVFRAW}
fi
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating temporary vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
# We don't need it anymore
rm -f ${OVA_TMP}/${OVFUFS} >/dev/null 2>&1
# Convert raw to vmdk
echo -n ">>> Creating vmdk disk... " | tee -a ${LOGFILE}
vmdktool -z9 -v ${OVA_TMP}/${OVFVMDK} ${OVA_TMP}/${OVFRAW}
if [ $? -ne 0 -o ! -f ${OVA_TMP}/${OVFVMDK} ]; then
if [ -f ${OVA_TMP}/${OVFRAW} ]; then
rm -f ${OVA_TMP}/${OVFRAW}
fi
if [ -f ${OVA_TMP}/${OVFVMDK} ]; then
rm -f ${OVA_TMP}/${OVFVMDK}
fi
echo "Failed!" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating vmdk image. STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
rm -f ${OVA_TMP}/${OVFRAW}
ova_setup_ovf_template
echo -n ">>> Writing final ova image... " | tee -a ${LOGFILE}
# Create OVA file for vmware
gtar -C ${OVA_TMP} -cpf ${OVAPATH} ${PRODUCT_NAME}.ovf ${OVFVMDK}
echo "Done!" | tee -a ${LOGFILE}
rm -f ${OVA_TMP}/${OVFVMDK} >/dev/null 2>&1
echo ">>> OVA created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
# called from create_ova_image
ova_setup_ovf_template() {
if [ ! -f ${OVFTEMPLATE} ]; then
echo ">>> ERROR: OVF template file (${OVFTEMPLATE}) not found."
print_error_pfS
fi
# OperatingSystemSection (${PRODUCT_NAME}.ovf)
# 42 FreeBSD 32-Bit
# 78 FreeBSD 64-Bit
if [ "${TARGET}" = "amd64" ]; then
local _os_id="78"
local _os_type="freebsd64Guest"
local _os_descr="FreeBSD 64-Bit"
else
echo ">>> ERROR: Platform not supported for OVA (${TARGET})"
print_error_pfS
fi
local POPULATED_SIZE=$(du -d0 -k $FINAL_CHROOT_DIR | cut -f1)
local POPULATED_SIZE_IN_BYTES=$((${POPULATED_SIZE}*1024))
local VMDK_FILE_SIZE=$(stat -f "%z" ${OVA_TMP}/${OVFVMDK})
sed \
-e "s,%%VMDK_FILE_SIZE%%,${VMDK_FILE_SIZE},g" \
-e "s,%%VMDK_DISK_CAPACITY_IN_GB%%,${VMDK_DISK_CAPACITY_IN_GB},g" \
-e "s,%%POPULATED_SIZE_IN_BYTES%%,${POPULATED_SIZE_IN_BYTES},g" \
-e "s,%%OS_ID%%,${_os_id},g" \
-e "s,%%OS_TYPE%%,${_os_type},g" \
-e "s,%%OS_DESCR%%,${_os_descr},g" \
-e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
-e "s,%%PRODUCT_NAME_SUFFIX%%,${PRODUCT_NAME_SUFFIX},g" \
-e "s,%%PRODUCT_VERSION%%,${PRODUCT_VERSION},g" \
-e "s,%%PRODUCT_URL%%,${PRODUCT_URL},g" \
-e "s#%%VENDOR_NAME%%#${VENDOR_NAME}#g" \
-e "s#%%OVF_INFO%%#${OVF_INFO}#g" \
-e "/^%%PRODUCT_LICENSE%%/r ${BUILDER_ROOT}/LICENSE" \
-e "/^%%PRODUCT_LICENSE%%/d" \
${OVFTEMPLATE} > ${OVA_TMP}/${PRODUCT_NAME}.ovf
}
# Cleans up previous builds
clean_builder() {
# Clean out directories
echo ">>> Cleaning up previous build environment...Please wait!"
staginareas_clean_each_run
if [ -d "${STAGE_CHROOT_DIR}" ]; then
echo -n ">>> Cleaning ${STAGE_CHROOT_DIR}... "
chflags -R noschg ${STAGE_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${STAGE_CHROOT_DIR}/* 2>/dev/null
echo "Done."
fi
if [ -d "${INSTALLER_CHROOT_DIR}" ]; then
echo -n ">>> Cleaning ${INSTALLER_CHROOT_DIR}... "
chflags -R noschg ${INSTALLER_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${INSTALLER_CHROOT_DIR}/* 2>/dev/null
echo "Done."
fi
if [ -z "${NO_CLEAN_FREEBSD_OBJ}" -a -d "${FREEBSD_SRC_DIR}" ]; then
OBJTREE=$(make -C ${FREEBSD_SRC_DIR} -V OBJTREE)
if [ -d "${OBJTREE}" ]; then
echo -n ">>> Cleaning FreeBSD objects dir staging..."
echo -n "."
chflags -R noschg ${OBJTREE} 2>&1 >/dev/null
echo -n "."
rm -rf ${OBJTREE}/*
echo "Done!"
fi
if [ -d "${KERNEL_BUILD_PATH}" ]; then
echo -n ">>> Cleaning previously built kernel stage area..."
rm -rf $KERNEL_BUILD_PATH/*
echo "Done!"
fi
fi
mkdir -p $KERNEL_BUILD_PATH
echo -n ">>> Cleaning previously built images..."
rm -rf $IMAGES_FINAL_DIR/*
echo "Done!"
echo -n ">>> Cleaning previous builder logs..."
if [ -d "$BUILDER_LOGS" ]; then
rm -rf ${BUILDER_LOGS}
fi
mkdir -p ${BUILDER_LOGS}
echo "Done!"
echo ">>> Cleaning of builder environment has finished."
}
clone_directory_contents() {
if [ ! -e "$2" ]; then
mkdir -p "$2"
fi
if [ ! -d "$1" -o ! -d "$2" ]; then
if [ -z "${LOGFILE}" ]; then
echo ">>> ERROR: Argument $1 supplied is not a directory!"
else
echo ">>> ERROR: Argument $1 supplied is not a directory!" | tee -a ${LOGFILE}
fi
print_error_pfS
fi
echo -n ">>> Using TAR to clone $1 to $2 ..."
tar -C ${1} -c -f - . | tar -C ${2} -x -p -f -
echo "Done!"
}
clone_to_staging_area() {
# Clone everything to the final staging area
echo -n ">>> Cloning everything to ${STAGE_CHROOT_DIR} staging area..."
LOGFILE=${BUILDER_LOGS}/cloning.${TARGET}.log
tar -C ${PRODUCT_SRC} -c -f - . | \
tar -C ${STAGE_CHROOT_DIR} -x -p -f -
if [ "${PRODUCT_NAME}" != "pfSense" ]; then
mv ${STAGE_CHROOT_DIR}/usr/local/sbin/pfSense-upgrade \
${STAGE_CHROOT_DIR}/usr/local/sbin/${PRODUCT_NAME}-upgrade
fi
mkdir -p ${STAGE_CHROOT_DIR}/etc/mtree
mtree -Pcp ${STAGE_CHROOT_DIR}/var > ${STAGE_CHROOT_DIR}/etc/mtree/var.dist
mtree -Pcp ${STAGE_CHROOT_DIR}/etc > ${STAGE_CHROOT_DIR}/etc/mtree/etc.dist
if [ -d ${STAGE_CHROOT_DIR}/usr/local/etc ]; then
mtree -Pcp ${STAGE_CHROOT_DIR}/usr/local/etc > ${STAGE_CHROOT_DIR}/etc/mtree/localetc.dist
fi
## Add buildtime and lastcommit information
# This is used for detecting updates.
echo "$BUILTDATESTRING" > $STAGE_CHROOT_DIR/etc/version.buildtime
# Record last commit info if it is available.
if [ -f $SCRATCHDIR/build_commit_info.txt ]; then
cp $SCRATCHDIR/build_commit_info.txt $STAGE_CHROOT_DIR/etc/version.lastcommit
fi
local _exclude_files="${SCRATCHDIR}/base_exclude_files"
sed \
-e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
-e "s,%%VERSION%%,${_version},g" \
${BUILDER_TOOLS}/templates/core_pkg/base/exclude_files \
> ${_exclude_files}
mkdir -p ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR} >/dev/null 2>&1
# Include a sample pkg stable conf to base
setup_pkg_repo \
${PKG_REPO_DEFAULT} \
${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/${PRODUCT_NAME}-repo.conf \
${TARGET} \
${TARGET_ARCH}
mtree \
-c \
-k uid,gid,mode,size,flags,sha256digest \
-p ${STAGE_CHROOT_DIR} \
-X ${_exclude_files} \
> ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/base.mtree
tar \
-C ${STAGE_CHROOT_DIR} \
-cJf ${STAGE_CHROOT_DIR}${PRODUCT_SHARE_DIR}/base.txz \
-X ${_exclude_files} \
.
core_pkg_create rc "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
core_pkg_create base "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
core_pkg_create default-config "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
local DEFAULTCONF=${STAGE_CHROOT_DIR}/conf.default/config.xml
# Save current WAN and LAN if value
local _old_wan_if=$(xml sel -t -v "${XML_ROOTOBJ}/interfaces/wan/if" ${DEFAULTCONF})
local _old_lan_if=$(xml sel -t -v "${XML_ROOTOBJ}/interfaces/lan/if" ${DEFAULTCONF})
# Change default interface names to match vmware driver
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/wan/if" -v "vmx0" ${DEFAULTCONF}
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/lan/if" -v "vmx1" ${DEFAULTCONF}
core_pkg_create default-config "vmware" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
# Restore default values to be used by serial package
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/wan/if" -v "${_old_wan_if}" ${DEFAULTCONF}
xml ed -P -L -u "${XML_ROOTOBJ}/interfaces/lan/if" -v "${_old_lan_if}" ${DEFAULTCONF}
# Activate serial console in config.xml
xml ed -L -P -d "${XML_ROOTOBJ}/system/enableserial" ${DEFAULTCONF}
xml ed -P -s "${XML_ROOTOBJ}/system" -t elem -n "enableserial" \
${DEFAULTCONF} > ${DEFAULTCONF}.tmp
xml fo -t ${DEFAULTCONF}.tmp > ${DEFAULTCONF}
rm -f ${DEFAULTCONF}.tmp
echo force > ${STAGE_CHROOT_DIR}/cf/conf/enableserial_force
core_pkg_create default-config-serial "" ${CORE_PKG_VERSION} ${STAGE_CHROOT_DIR}
rm -f ${STAGE_CHROOT_DIR}/cf/conf/enableserial_force
rm -f ${STAGE_CHROOT_DIR}/cf/conf/config.xml
# Make sure pkg is present
pkg_bootstrap ${STAGE_CHROOT_DIR}
# Make sure correct repo is available on tmp dir
mkdir -p ${STAGE_CHROOT_DIR}/tmp/pkg-repos
setup_pkg_repo \
${PKG_REPO_DEFAULT} \
${STAGE_CHROOT_DIR}/tmp/pkg-repos/repo.conf \
${TARGET} \
${TARGET_ARCH} \
staging
echo "Done!"
}
create_final_staging_area() {
if [ -z "${FINAL_CHROOT_DIR}" ]; then
echo ">>> ERROR: FINAL_CHROOT_DIR is not set, cannot continue!" | tee -a ${LOGFILE}
print_error_pfS
fi
if [ -d "${FINAL_CHROOT_DIR}" ]; then
echo -n ">>> Previous ${FINAL_CHROOT_DIR} detected cleaning up..." | tee -a ${LOGFILE}
chflags -R noschg ${FINAL_CHROOT_DIR} 2>&1 1>/dev/null
rm -rf ${FINAL_CHROOT_DIR}/* 2>&1 1>/dev/null
echo "Done." | tee -a ${LOGFILE}
fi
echo ">>> Preparing Final image staging area: $(LC_ALL=C date)" 2>&1 | tee -a ${LOGFILE}
echo ">>> Cloning ${STAGE_CHROOT_DIR} to ${FINAL_CHROOT_DIR}" 2>&1 | tee -a ${LOGFILE}
clone_directory_contents ${STAGE_CHROOT_DIR} ${FINAL_CHROOT_DIR}
if [ ! -f $FINAL_CHROOT_DIR/sbin/init ]; then
echo ">>> ERROR: Something went wrong during cloning -- Please verify!" 2>&1 | tee -a ${LOGFILE}
print_error_pfS
fi
}
customize_stagearea_for_image() {
local _image_type="$1"
local _default_config="" # filled with $2 below
local _image_variant="$3"
if [ -n "$2" ]; then
_default_config="$2"
elif [ "${_image_type}" = "memstickserial" -o \
"${_image_type}" = "memstickadi" ]; then
_default_config="default-config-serial"
elif [ "${_image_type}" = "ova" ]; then
_default_config="default-config-vmware"
else
_default_config="default-config"
fi
# Prepare final stage area
create_final_staging_area
pkg_chroot_add ${FINAL_CHROOT_DIR} rc
pkg_chroot_add ${FINAL_CHROOT_DIR} base
# Set base/rc pkgs as vital to avoid user end up removing it for any reason
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name rc)
pkg_chroot ${FINAL_CHROOT_DIR} set -v 1 -y $(get_pkg_name base)
if [ "${_image_type}" = "iso" -o \
"${_image_type}" = "memstick" -o \
"${_image_type}" = "memstickserial" -o \
"${_image_type}" = "memstickadi" ]; then
mkdir -p ${FINAL_CHROOT_DIR}/pkgs
cp ${CORE_PKG_ALL_PATH}/*default-config*.txz ${FINAL_CHROOT_DIR}/pkgs
fi
pkg_chroot_add ${FINAL_CHROOT_DIR} ${_default_config}
# XXX: Workaround to avoid pkg to complain regarding release
# repo on first boot since packages are installed from
# staging server during build phase
if [ -n "${USE_PKG_REPO_STAGING}" ]; then
_read_cmd="select value from repodata where key='packagesite'"
if [ -n "${_IS_RELEASE}" -o -n "${_IS_RC}" ]; then
local _tgt_server="${PKG_REPO_SERVER_RELEASE}"
else
local _tgt_server="${PKG_REPO_SERVER_DEVEL}"
fi
for _db in ${FINAL_CHROOT_DIR}/var/db/pkg/repo-*sqlite; do
_cur=$(/usr/local/bin/sqlite3 ${_db} "${_read_cmd}")
_new=$(echo "${_cur}" | sed -e "s,^${PKG_REPO_SERVER_STAGING},${_tgt_server},")
/usr/local/bin/sqlite3 ${_db} "update repodata set value='${_new}' where key='packagesite'"
done
fi
if [ -n "$_image_variant" -a \
-d ${BUILDER_TOOLS}/templates/custom_logos/${_image_variant} ]; then
mkdir -p ${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
cp -f \
${BUILDER_TOOLS}/templates/custom_logos/${_image_variant}/*.svg \
${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
cp -f \
${BUILDER_TOOLS}/templates/custom_logos/${_image_variant}/*.css \
${FINAL_CHROOT_DIR}/usr/local/share/${PRODUCT_NAME}/custom_logos
fi
# Remove temporary repo conf
rm -rf ${FINAL_CHROOT_DIR}/tmp/pkg-repos
}
create_distribution_tarball() {
mkdir -p ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist
echo -n ">>> Creating distribution tarball... " | tee -a ${LOGFILE}
tar -C ${FINAL_CHROOT_DIR} --exclude ./pkgs \
-cJf ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist/base.txz .
echo "Done!" | tee -a ${LOGFILE}
echo -n ">>> Creating manifest... " | tee -a ${LOGFILE}
(cd ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist && \
sh ${FREEBSD_SRC_DIR}/release/scripts/make-manifest.sh base.txz) \
> ${INSTALLER_CHROOT_DIR}/usr/freebsd-dist/MANIFEST
echo "Done!" | tee -a ${LOGFILE}
}
create_iso_image() {
local _variant="$1"
LOGFILE=${BUILDER_LOGS}/isoimage.${TARGET}
if [ -z "${ISOPATH}" ]; then
echo ">>> ISOPATH is empty skipping generation of ISO image!" | tee -a ${LOGFILE}
return
fi
echo ">>> Building bootable ISO image for ${TARGET}" | tee -a ${LOGFILE}
mkdir -p $(dirname ${ISOPATH})
local _image_path=${ISOPATH}
if [ -n "${_variant}" ]; then
_image_path=$(echo "$_image_path" | \
sed "s/${PRODUCT_NAME_SUFFIX}-/&${_variant}-/")
VARIANTIMAGES="${VARIANTIMAGES}${VARIANTIMAGES:+ }${_image_path}"
fi
customize_stagearea_for_image "iso" "" $_variant
install_default_kernel ${DEFAULT_KERNEL}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
rm -f ${LOADERCONF} ${BOOTCONF} >/dev/null 2>&1
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
cat ${LOADERCONF} > ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
FSLABEL=$(echo ${PRODUCT_NAME} | tr '[:lower:]' '[:upper:]')
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/mkisoimages.sh -b \
${FSLABEL} \
${_image_path} \
${INSTALLER_CHROOT_DIR}
if [ ! -f "${_image_path}" ]; then
echo "ERROR! ISO image was not built"
print_error_pfS
fi
gzip -qf $_image_path &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> ISO created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_image() {
local _variant="$1"
LOGFILE=${BUILDER_LOGS}/memstick.${TARGET}
if [ "${MEMSTICKPATH}" = "" ]; then
echo ">>> MEMSTICKPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKPATH})
local _image_path=${MEMSTICKPATH}
if [ -n "${_variant}" ]; then
_image_path=$(echo "$_image_path" | \
sed "s/-memstick-/-memstick-${_variant}-/")
VARIANTIMAGES="${VARIANTIMAGES}${VARIANTIMAGES:+ }${_image_path}"
fi
customize_stagearea_for_image "memstick" "" $_variant
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating memstick to ${_image_path}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
rm -f ${LOADERCONF} ${BOOTCONF} >/dev/null 2>&1
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
cat ${LOADERCONF} > ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/make-memstick.sh \
${INSTALLER_CHROOT_DIR} \
${_image_path}
if [ ! -f "${_image_path}" ]; then
echo "ERROR! memstick image was not built"
print_error_pfS
fi
gzip -qf $_image_path &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICK created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_serial_image() {
LOGFILE=${BUILDER_LOGS}/memstickserial.${TARGET}
if [ "${MEMSTICKSERIALPATH}" = "" ]; then
echo ">>> MEMSTICKSERIALPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKSERIALPATH})
customize_stagearea_for_image "memstickserial"
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating serial memstick to ${MEMSTICKSERIALPATH}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
echo ">>> Activating serial console..." 2>&1 | tee -a ${LOGFILE}
echo "-S115200 -D" > ${BOOTCONF}
# Activate serial console+video console in loader.conf
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
echo 'boot_multicons="YES"' >> ${LOADERCONF}
echo 'boot_serial="YES"' >> ${LOADERCONF}
echo 'console="comconsole,vidconsole"' >> ${LOADERCONF}
echo 'comconsole_speed="115200"' >> ${LOADERCONF}
cat ${BOOTCONF} >> ${FINAL_CHROOT_DIR}/boot.config
cat ${LOADERCONF} >> ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/make-memstick.sh \
${INSTALLER_CHROOT_DIR} \
${MEMSTICKSERIALPATH}
if [ ! -f "${MEMSTICKSERIALPATH}" ]; then
echo "ERROR! memstick serial image was not built"
print_error_pfS
fi
gzip -qf $MEMSTICKSERIALPATH &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICKSERIAL created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
create_memstick_adi_image() {
LOGFILE=${BUILDER_LOGS}/memstickadi.${TARGET}
if [ "${MEMSTICKADIPATH}" = "" ]; then
echo ">>> MEMSTICKADIPATH is empty skipping generation of memstick image!" | tee -a ${LOGFILE}
return
fi
mkdir -p $(dirname ${MEMSTICKADIPATH})
customize_stagearea_for_image "memstickadi"
install_default_kernel ${DEFAULT_KERNEL}
echo ">>> Creating serial memstick to ${MEMSTICKADIPATH}." 2>&1 | tee -a ${LOGFILE}
BOOTCONF=${INSTALLER_CHROOT_DIR}/boot.config
LOADERCONF=${INSTALLER_CHROOT_DIR}/boot/loader.conf
echo ">>> Activating serial console..." 2>&1 | tee -a ${LOGFILE}
echo "-S115200 -h" > ${BOOTCONF}
# Activate serial console+video console in loader.conf
echo 'autoboot_delay="3"' > ${LOADERCONF}
echo 'kern.cam.boot_delay=10000' >> ${LOADERCONF}
echo 'boot_serial="YES"' >> ${LOADERCONF}
echo 'console="comconsole"' >> ${LOADERCONF}
echo 'comconsole_speed="115200"' >> ${LOADERCONF}
echo 'comconsole_port="0x2F8"' >> ${LOADERCONF}
echo 'hint.uart.0.flags="0x00"' >> ${LOADERCONF}
echo 'hint.uart.1.flags="0x10"' >> ${LOADERCONF}
cat ${BOOTCONF} >> ${FINAL_CHROOT_DIR}/boot.config
cat ${LOADERCONF} >> ${FINAL_CHROOT_DIR}/boot/loader.conf
create_distribution_tarball
sh ${FREEBSD_SRC_DIR}/release/${TARGET}/make-memstick.sh \
${INSTALLER_CHROOT_DIR} \
${MEMSTICKADIPATH}
if [ ! -f "${MEMSTICKADIPATH}" ]; then
echo "ERROR! memstick ADI image was not built"
print_error_pfS
fi
gzip -qf $MEMSTICKADIPATH &
_bg_pids="${_bg_pids}${_bg_pids:+ }$!"
echo ">>> MEMSTICKADI created: $(LC_ALL=C date)" | tee -a ${LOGFILE}
}
# Create pkg conf on desired place with desired arch/branch
setup_pkg_repo() {
if [ -z "${4}" ]; then
return
fi
local _template="${1}"
local _target="${2}"
local _arch="${3}"
local _target_arch="${4}"
local _staging="${5}"
if [ -z "${_template}" -o ! -f "${_template}" ]; then
echo ">>> ERROR: It was not possible to find pkg conf template ${_template}"
print_error_pfS
fi
if [ -n "${_staging}" -a -n "${USE_PKG_REPO_STAGING}" ]; then
local _pkg_repo_server_devel=${PKG_REPO_SERVER_STAGING}
local _pkg_repo_branch_devel=${PKG_REPO_BRANCH_STAGING}
local _pkg_repo_server_release=${PKG_REPO_SERVER_STAGING}
local _pkg_repo_branch_release=${PKG_REPO_BRANCH_STAGING}
else
local _pkg_repo_server_devel=${PKG_REPO_SERVER_DEVEL}
local _pkg_repo_branch_devel=${PKG_REPO_BRANCH_DEVEL}
local _pkg_repo_server_release=${PKG_REPO_SERVER_RELEASE}
local _pkg_repo_branch_release=${PKG_REPO_BRANCH_RELEASE}
fi
mkdir -p $(dirname ${_target}) >/dev/null 2>&1
sed \
-e "s/%%ARCH%%/${_target_arch}/" \
-e "s/%%PKG_REPO_BRANCH_DEVEL%%/${_pkg_repo_branch_devel}/g" \
-e "s/%%PKG_REPO_BRANCH_RELEASE%%/${_pkg_repo_branch_release}/g" \
-e "s,%%PKG_REPO_SERVER_DEVEL%%,${_pkg_repo_server_devel},g" \
-e "s,%%PKG_REPO_SERVER_RELEASE%%,${_pkg_repo_server_release},g" \
-e "s,%%POUDRIERE_PORTS_NAME%%,${POUDRIERE_PORTS_NAME},g" \
-e "s/%%PRODUCT_NAME%%/${PRODUCT_NAME}/g" \
${_template} \
> ${_target}
}
# This routine ensures any ports / binaries that the builder
# system needs are on disk and ready for execution.
builder_setup() {
# If Product-builder is already installed, just leave
if pkg info -e -q ${PRODUCT_NAME}-builder; then
return
fi
if [ ! -f ${PKG_REPO_PATH} ]; then
[ -d $(dirname ${PKG_REPO_PATH}) ] \
|| mkdir -p $(dirname ${PKG_REPO_PATH})
update_freebsd_sources
local _arch=$(uname -m)
setup_pkg_repo \
${PKG_REPO_DEFAULT} \
${PKG_REPO_PATH} \
${_arch} \
${_arch} \
"staging"
# Use fingerprint keys from repo
sed -i '' -e "/fingerprints:/ s,\"/,\"${BUILDER_ROOT}/src/," \
${PKG_REPO_PATH}
fi
pkg install ${PRODUCT_NAME}-builder
}
# Updates FreeBSD sources
update_freebsd_sources() {
if [ "${1}" = "full" ]; then
local _full=1
local _clone_params=""
else
local _full=0
local _clone_params="--depth 1 --single-branch"
fi
if [ -n "${NO_BUILDWORLD}" -a -n "${NO_BUILDKERNEL}" ]; then
echo ">>> NO_BUILDWORLD and NO_BUILDKERNEL set, skipping update of freebsd sources" | tee -a ${LOGFILE}
return
fi
echo ">>> Obtaining FreeBSD sources (${FREEBSD_BRANCH})..."
${BUILDER_SCRIPTS}/git_checkout.sh \
-r ${FREEBSD_REPO_BASE} \
-d ${FREEBSD_SRC_DIR} \
-b ${FREEBSD_BRANCH}
if [ $? -ne 0 -o ! -d "${FREEBSD_SRC_DIR}/.git" ]; then
echo ">>> ERROR: It was not possible to clone FreeBSD src repo"
print_error_pfS
fi
if [ -n "${GIT_FREEBSD_COSHA1}" ]; then
echo -n ">>> Checking out desired commit (${GIT_FREEBSD_COSHA1})... "
( git -C ${FREEBSD_SRC_DIR} checkout ${GIT_FREEBSD_COSHA1} ) 2>&1 | \
grep -C3 -i -E 'error|fatal'
echo "Done!"
fi
if [ "${PRODUCT_NAME}" = "pfSense" -a -n "${GNID_REPO_BASE}" ]; then
echo ">>> Obtaining gnid sources..."
${BUILDER_SCRIPTS}/git_checkout.sh \
-r ${GNID_REPO_BASE} \
-d ${GNID_SRC_DIR} \
-b ${GNID_BRANCH}
fi
}
pkg_chroot() {
local _root="${1}"
shift
if [ $# -eq 0 ]; then
return -1
fi
if [ -z "${_root}" -o "${_root}" = "/" -o ! -d "${_root}" ]; then
return -1
fi
mkdir -p \
${SCRATCHDIR}/pkg_cache \
${_root}/var/cache/pkg \
${_root}/dev
/sbin/mount -t nullfs ${SCRATCHDIR}/pkg_cache ${_root}/var/cache/pkg
/sbin/mount -t devfs devfs ${_root}/dev
cp -f /etc/resolv.conf ${_root}/etc/resolv.conf
touch ${BUILDER_LOGS}/install_pkg_install_ports.txt
local _params=""
if [ -f "${_root}/tmp/pkg-repos/repo.conf" ]; then
_params="--repo-conf-dir /tmp/pkg-repos "
fi
script -aq ${BUILDER_LOGS}/install_pkg_install_ports.txt \
chroot ${_root} pkg ${_params}$@ >/dev/null 2>&1
local result=$?
rm -f ${_root}/etc/resolv.conf
/sbin/umount -f ${_root}/dev
/sbin/umount -f ${_root}/var/cache/pkg
return $result
}
pkg_chroot_add() {
if [ -z "${1}" -o -z "${2}" ]; then
return 1
fi
local _target="${1}"
local _pkg="$(get_pkg_name ${2}).txz"
if [ ! -d "${_target}" ]; then
echo ">>> ERROR: Target dir ${_target} not found"
print_error_pfS
fi
if [ ! -f ${CORE_PKG_ALL_PATH}/${_pkg} ]; then
echo ">>> ERROR: Package ${_pkg} not found"
print_error_pfS
fi
cp ${CORE_PKG_ALL_PATH}/${_pkg} ${_target}
pkg_chroot ${_target} add /${_pkg}
rm -f ${_target}/${_pkg}
}
pkg_bootstrap() {
local _root=${1:-"${STAGE_CHROOT_DIR}"}
setup_pkg_repo \
${PKG_REPO_DEFAULT} \
${_root}${PKG_REPO_PATH} \
${TARGET} \
${TARGET_ARCH} \
"staging"
pkg_chroot ${_root} bootstrap -f
}
# This routine assists with installing various
# freebsd ports files into the pfsense-fs staging
# area.
install_pkg_install_ports() {
local MAIN_PKG="${1}"
if [ -z "${MAIN_PKG}" ]; then
MAIN_PKG=${PRODUCT_NAME}
fi
echo ">>> Installing pkg repository in chroot (${STAGE_CHROOT_DIR})..."
[ -d ${STAGE_CHROOT_DIR}/var/cache/pkg ] || \
mkdir -p ${STAGE_CHROOT_DIR}/var/cache/pkg
[ -d ${SCRATCHDIR}/pkg_cache ] || \
mkdir -p ${SCRATCHDIR}/pkg_cache
echo -n ">>> Installing built ports (packages) in chroot (${STAGE_CHROOT_DIR})... "
# First mark all packages as automatically installed
pkg_chroot ${STAGE_CHROOT_DIR} set -A 1 -a
# Install all necessary packages
if ! pkg_chroot ${STAGE_CHROOT_DIR} install ${MAIN_PKG} ${custom_package_list}; then
echo "Failed!"
print_error_pfS
fi
# Make sure required packages are set as non-automatic
pkg_chroot ${STAGE_CHROOT_DIR} set -A 0 pkg ${MAIN_PKG} ${custom_package_list}
# pkg and MAIN_PKG are vital
pkg_chroot ${STAGE_CHROOT_DIR} set -y -v 1 pkg ${MAIN_PKG}
# Remove unnecessary packages
pkg_chroot ${STAGE_CHROOT_DIR} autoremove
echo "Done!"
}
staginareas_clean_each_run() {
echo -n ">>> Cleaning build directories: "
if [ -d "${FINAL_CHROOT_DIR}" ]; then
BASENAME=$(basename ${FINAL_CHROOT_DIR})
echo -n "$BASENAME "
chflags -R noschg ${FINAL_CHROOT_DIR} 2>&1 >/dev/null
rm -rf ${FINAL_CHROOT_DIR}/* 2>/dev/null
fi
echo "Done!"
}
# Imported from FreeSBIE
buildkernel() {
local _kernconf=${1:-${KERNCONF}}
if [ -n "${NO_BUILDKERNEL}" ]; then
echo ">>> NO_BUILDKERNEL set, skipping build" | tee -a ${LOGFILE}
return
fi
if [ -z "${_kernconf}" ]; then
echo ">>> ERROR: No kernel configuration defined probably this is not what you want! STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
local _old_kernconf=${KERNCONF}
export KERNCONF=${_kernconf}
echo ">>> $(LC_ALL=C date) - Starting build kernel for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/build_freebsd.sh -W -s ${FREEBSD_SRC_DIR} \
|| print_error_pfS
echo ">>> $(LC_ALL=C date) - Finished build kernel for ${TARGET} architecture..." | tee -a ${LOGFILE}
export KERNCONF=${_old_kernconf}
}
# Imported from FreeSBIE
installkernel() {
local _destdir=${1:-${KERNEL_DESTDIR}}
local _kernconf=${2:-${KERNCONF}}
if [ -z "${_kernconf}" ]; then
echo ">>> ERROR: No kernel configuration defined probably this is not what you want! STOPPING!" | tee -a ${LOGFILE}
print_error_pfS
fi
local _old_kernconf=${KERNCONF}
export KERNCONF=${_kernconf}
mkdir -p ${STAGE_CHROOT_DIR}/boot
echo ">>> Installing kernel (${_kernconf}) for ${TARGET} architecture..." | tee -a ${LOGFILE}
script -aq $LOGFILE ${BUILDER_SCRIPTS}/install_freebsd.sh -W -D -z \
-s ${FREEBSD_SRC_DIR} \
-d ${_destdir} \
|| print_error_pfS
export KERNCONF=${_old_kernconf}
}
# Launch is ran first to setup a few variables that we need
# Imported from FreeSBIE
launch() {
if [ "$(id -u)" != "0" ]; then
echo "Sorry, this must be done as root."
fi
echo ">>> Operation $0 has started at $(date)"
}
finish() {
echo ">>> Operation $0 has ended at $(date)"
}
pkg_repo_rsync() {
local _repo_path_param="${1}"
local _ignore_final_rsync="${2}"
if [ -z "${_repo_path_param}" -o ! -d "${_repo_path_param}" ]; then
return
fi
if [ -n "${SKIP_FINAL_RSYNC}" ]; then
_ignore_final_rsync="1"
fi
# Sanitize path
_repo_path=$(realpath ${_repo_path_param})
local _repo_dir=$(dirname ${_repo_path})
local _repo_base=$(basename ${_repo_path})
# Add ./ it's an rsync trick to make it chdir to directory before sending it
_repo_path="${_repo_dir}/./${_repo_base}"
if [ -z "${LOGFILE}" ]; then
local _logfile="/dev/null"
else
local _logfile="${LOGFILE}"
fi
if [ -n "${PKG_REPO_SIGNING_COMMAND}" -a -z "${DO_NOT_SIGN_PKG_REPO}" ]; then
# Detect poudriere directory structure
if [ -L "${_repo_path}/.latest" ]; then
local _real_repo_path=$(readlink -f ${_repo_path}/.latest)
else
local _real_repo_path=${_repo_path}
fi
echo -n ">>> Signing repository... " | tee -a ${_logfile}
############ ATTENTION ##############
#
# For some reason pkg-repo fail without / in the end of directory name
# so removing it will break command
#
# https://github.com/freebsd/pkg/issues/1364
#
if script -aq ${_logfile} pkg repo ${_real_repo_path}/ \
signing_command: ${PKG_REPO_SIGNING_COMMAND} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred trying to sign repo"
print_error_pfS
fi
local _pkgfile="${_repo_path}/Latest/pkg.txz"
if [ -e ${_pkgfile} ]; then
echo -n ">>> Signing Latest/pkg.txz for bootstraping... " | tee -a ${_logfile}
if sha256 -q ${_pkgfile} | ${PKG_REPO_SIGNING_COMMAND} \
> ${_pkgfile}.sig 2>/dev/null; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred trying to sign Latest/pkg.txz"
print_error_pfS
fi
fi
fi
if [ -z "${UPLOAD}" ]; then
return
fi
for _pkg_rsync_hostname in ${PKG_RSYNC_HOSTNAME}; do
# Make sure destination directory exist
ssh -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} \
"mkdir -p ${PKG_RSYNC_DESTDIR}"
echo -n ">>> Sending updated repository to ${_pkg_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} rsync -Have "ssh -p ${PKG_RSYNC_SSH_PORT}" \
--timeout=60 --delete-delay ${_repo_path} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname}:${PKG_RSYNC_DESTDIR} >/dev/null 2>&1
then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to remote hostname"
print_error_pfS
fi
if [ -z "${USE_PKG_REPO_STAGING}" -o -n "${_ignore_final_rsync}" ]; then
return
fi
if [ -n "${_IS_RELEASE}" -o "${_repo_path_param}" = "${CORE_PKG_PATH}" ]; then
for _pkg_final_rsync_hostname in ${PKG_FINAL_RSYNC_HOSTNAME}; do
# Send .real* directories first to prevent having a broken repo while transfer happens
local _cmd="rsync -Have \"ssh -p ${PKG_FINAL_RSYNC_SSH_PORT}\" \
--timeout=60 ${PKG_RSYNC_DESTDIR}/./${_repo_base%%-core}* \
--include=\"/*\" --include=\"*/.real*\" --include=\"*/.real*/***\" \
--exclude=\"*\" \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname}:${PKG_FINAL_RSYNC_DESTDIR}"
echo -n ">>> Sending updated packages to ${_pkg_final_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} ${_cmd} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to final hostname"
print_error_pfS
fi
_cmd="rsync -Have \"ssh -p ${PKG_FINAL_RSYNC_SSH_PORT}\" \
--timeout=60 --delete-delay ${PKG_RSYNC_DESTDIR}/./${_repo_base%%-core}* \
${PKG_FINAL_RSYNC_USERNAME}@${_pkg_final_rsync_hostname}:${PKG_FINAL_RSYNC_DESTDIR}"
echo -n ">>> Sending updated repositories metadata to ${_pkg_final_rsync_hostname}... " | tee -a ${_logfile}
if script -aq ${_logfile} ssh -p ${PKG_RSYNC_SSH_PORT} \
${PKG_RSYNC_USERNAME}@${_pkg_rsync_hostname} ${_cmd} >/dev/null 2>&1; then
echo "Done!" | tee -a ${_logfile}
else
echo "Failed!" | tee -a ${_logfile}
echo ">>> ERROR: An error occurred sending repo to final hostname"
print_error_pfS
fi
done
fi
done
}
poudriere_possible_archs() {
local _arch=$(uname -m)
local _archs=""
# If host is amd64, we'll create both repos, and if possible armv6
if [ "${_arch}" = "amd64" ]; then
_archs="amd64.amd64"
if [ -f /usr/local/bin/qemu-arm-static ]; then
# Make sure binmiscctl is ok
/usr/local/etc/rc.d/qemu_user_static forcestart >/dev/null 2>&1
if binmiscctl lookup armv6 >/dev/null 2>&1; then
_archs="${_archs} arm.armv6"
fi
fi
fi
if [ -n "${ARCH_LIST}" ]; then
local _found=0
for _desired_arch in ${ARCH_LIST}; do
_found=0
for _possible_arch in ${_archs}; do
if [ "${_desired_arch}" = "${_possible_arch}" ]; then
_found=1
break
fi
done
if [ ${_found} -eq 0 ]; then
echo ">>> ERROR: Impossible to build for arch: ${_desired_arch}"
print_error_pfS
fi
done
_archs="${ARCH_LIST}"
fi
echo ${_archs}
}
poudriere_jail_name() {
local _jail_arch="${1}"
if [ -z "${_jail_arch}" ]; then
return 1
fi
# Remove arch
echo "${PRODUCT_NAME}_${POUDRIERE_BRANCH}_${_jail_arch##*.}"
}
poudriere_rename_ports() {
if [ "${PRODUCT_NAME}" = "pfSense" ]; then
return;
fi
LOGFILE=${BUILDER_LOGS}/poudriere.log
local _ports_dir="/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}"
echo -n ">>> Renaming product ports on ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
for d in $(find ${_ports_dir} -depth 2 -type d -name '*pfSense*'); do
local _pdir=$(dirname ${d})
local _pname=$(echo $(basename ${d}) | sed "s,pfSense,${PRODUCT_NAME},")
local _plist=""
if [ -e ${_pdir}/${_pname} ]; then
rm -rf ${_pdir}/${_pname}
fi
cp -r ${d} ${_pdir}/${_pname}
if [ -f ${_pdir}/${_pname}/pkg-plist ]; then
_plist=${_pdir}/${_pname}/pkg-plist
fi
sed -i '' -e "s,pfSense,${PRODUCT_NAME},g" \
-e "s,https://www.pfsense.org,${PRODUCT_URL},g" \
-e "/^MAINTAINER=/ s,^.*$,MAINTAINER= ${PRODUCT_EMAIL}," \
${_pdir}/${_pname}/Makefile \
${_pdir}/${_pname}/pkg-descr ${_plist}
# PHP module is special
if echo "${_pname}" | grep -q "^php[0-9]*-${PRODUCT_NAME}-module"; then
local _product_capital=$(echo ${PRODUCT_NAME} | tr '[a-z]' '[A-Z]')
sed -i '' -e "s,PHP_PFSENSE,PHP_${_product_capital},g" \
-e "s,PFSENSE_SHARED_LIBADD,${_product_capital}_SHARED_LIBADD,g" \
-e "s,pfSense,${PRODUCT_NAME},g" \
-e "s,${PRODUCT_NAME}\.c,pfSense.c,g" \
${_pdir}/${_pname}/files/config.m4
sed -i '' -e "s,COMPILE_DL_PFSENSE,COMPILE_DL_${_product_capital}," \
-e "s,pfSense_module_entry,${PRODUCT_NAME}_module_entry,g" \
-e "/ZEND_GET_MODULE/ s,pfSense,${PRODUCT_NAME}," \
-e "/PHP_PFSENSE_WORLD_EXTNAME/ s,pfSense,${PRODUCT_NAME}," \
${_pdir}/${_pname}/files/pfSense.c \
${_pdir}/${_pname}/files/php_pfSense.h
fi
if [ -d ${_pdir}/${_pname}/files ]; then
for fd in $(find ${_pdir}/${_pname}/files -type d -name '*pfSense*'); do
local _fddir=$(dirname ${fd})
local _fdname=$(echo $(basename ${fd}) | sed "s,pfSense,${PRODUCT_NAME},")
mv ${fd} ${_fddir}/${_fdname}
done
fi
done
echo "Done!" | tee -a ${LOGFILE}
}
poudriere_create_ports_tree() {
LOGFILE=${BUILDER_LOGS}/poudriere.log
if ! poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
local _branch=""
if [ -z "${POUDRIERE_PORTS_GIT_URL}" ]; then
echo ">>> ERROR: POUDRIERE_PORTS_GIT_URL is not defined"
print_error_pfS
fi
if [ -n "${POUDRIERE_PORTS_GIT_BRANCH}" ]; then
_branch="-B ${POUDRIERE_PORTS_GIT_BRANCH}"
fi
echo -n ">>> Creating poudriere ports tree, it may take some time... " | tee -a ${LOGFILE}
if ! script -aq ${LOGFILE} poudriere ports -c -p "${POUDRIERE_PORTS_NAME}" -m git -U ${POUDRIERE_PORTS_GIT_URL} ${_branch} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating poudriere ports tree, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
poudriere_rename_ports
fi
}
poudriere_init() {
local _error=0
local _archs=$(poudriere_possible_archs)
LOGFILE=${BUILDER_LOGS}/poudriere.log
# Sanity checks
if [ -z "${ZFS_TANK}" ]; then
echo ">>> ERROR: \$ZFS_TANK is empty" | tee -a ${LOGFILE}
error=1
fi
if [ -z "${ZFS_ROOT}" ]; then
echo ">>> ERROR: \$ZFS_ROOT is empty" | tee -a ${LOGFILE}
error=1
fi
if [ -z "${POUDRIERE_PORTS_NAME}" ]; then
echo ">>> ERROR: \$POUDRIERE_PORTS_NAME is empty" | tee -a ${LOGFILE}
error=1
fi
if [ ${_error} -eq 1 ]; then
print_error_pfS
fi
# Check if zpool exists
if ! zpool list ${ZFS_TANK} >/dev/null 2>&1; then
echo ">>> ERROR: ZFS tank ${ZFS_TANK} not found, please create it and try again..." | tee -a ${LOGFILE}
print_error_pfS
fi
# Check if zfs rootfs exists
if ! zfs list ${ZFS_TANK}${ZFS_ROOT} >/dev/null 2>&1; then
echo -n ">>> Creating ZFS filesystem ${ZFS_TANK}${ZFS_ROOT}... "
if zfs create -o atime=off -o mountpoint=/usr/local${ZFS_ROOT} \
${ZFS_TANK}${ZFS_ROOT} >/dev/null 2>&1; then
echo "Done!"
else
echo "Failed!"
print_error_pfS
fi
fi
# Make sure poudriere is installed
if ! pkg info --quiet poudriere-devel; then
echo ">>> Installing poudriere-devel..." | tee -a ${LOGFILE}
if ! pkg install poudriere-devel >/dev/null 2>&1; then
echo ">>> ERROR: poudriere-devel was not installed, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
fi
# Create poudriere.conf
if [ -z "${POUDRIERE_PORTS_GIT_URL}" ]; then
echo ">>> ERROR: POUDRIERE_PORTS_GIT_URL is not defined"
print_error_pfS
fi
echo ">>> Creating poudriere.conf" | tee -a ${LOGFILE}
cat <<EOF >/usr/local/etc/poudriere.conf
ZPOOL=${ZFS_TANK}
ZROOTFS=${ZFS_ROOT}
RESOLV_CONF=/etc/resolv.conf
BASEFS=/usr/local/poudriere
USE_PORTLINT=no
USE_TMPFS=yes
NOLINUX=yes
DISTFILES_CACHE=/usr/ports/distfiles
CHECK_CHANGED_OPTIONS=yes
CHECK_CHANGED_DEPS=yes
ATOMIC_PACKAGE_REPOSITORY=yes
COMMIT_PACKAGES_ON_FAILURE=no
KEEP_OLD_PACKAGES=yes
KEEP_OLD_PACKAGES_COUNT=5
EOF
# Create specific items conf
[ ! -d /usr/local/etc/poudriere.d ] \
&& mkdir -p /usr/local/etc/poudriere.d
# Create DISTFILES_CACHE if it doesn't exist
if [ ! -d /usr/ports/distfiles ]; then
mkdir -p /usr/ports/distfiles
fi
# Remove old jails
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} already exists, deleting it..." | tee -a ${LOGFILE}
poudriere jail -d -j "${jail_name}" >/dev/null 2>&1
fi
done
# Remove old ports tree
if poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
echo ">>> Poudriere ports tree ${POUDRIERE_PORTS_NAME} already exists, deleting it..." | tee -a ${LOGFILE}
poudriere ports -d -p "${POUDRIERE_PORTS_NAME}"
fi
local native_xtools=""
# Now we are ready to create jails
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if [ "${jail_arch}" = "arm.armv6" ]; then
native_xtools="-x"
else
native_xtools=""
fi
echo -n ">>> Creating jail ${jail_name}, it may take some time... " | tee -a ${LOGFILE}
if ! script -aq ${LOGFILE} poudriere jail -c -j "${jail_name}" -v ${FREEBSD_BRANCH} \
-a ${jail_arch} -m git -U ${FREEBSD_REPO_BASE_POUDRIERE} ${native_xtools} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error creating jail ${jail_name}, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
done
poudriere_create_ports_tree
echo ">>> Poudriere is now configured!" | tee -a ${LOGFILE}
}
poudriere_update_jails() {
local _archs=$(poudriere_possible_archs)
LOGFILE=${BUILDER_LOGS}/poudriere.log
local native_xtools=""
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
local _create_or_update="-u"
local _create_or_update_text="Updating"
if ! poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} not found, creating..." | tee -a ${LOGFILE}
_create_or_update="-c -v ${FREEBSD_BRANCH} -a ${jail_arch} -m git -U ${FREEBSD_REPO_BASE_POUDRIERE}"
_create_or_update_text="Creating"
fi
if [ "${jail_arch}" = "arm.armv6" ]; then
native_xtools="-x"
else
native_xtools=""
fi
echo -n ">>> ${_create_or_update_text} jail ${jail_name}, it may take some time... " | tee -a ${LOGFILE}
if ! script -aq ${LOGFILE} poudriere jail ${_create_or_update} -j "${jail_name}" ${native_xtools} >/dev/null 2>&1; then
echo "" | tee -a ${LOGFILE}
echo ">>> ERROR: Error ${_create_or_update_text} jail ${jail_name}, aborting..." | tee -a ${LOGFILE}
print_error_pfS
fi
echo "Done!" | tee -a ${LOGFILE}
done
}
poudriere_update_ports() {
LOGFILE=${BUILDER_LOGS}/poudriere.log
# Create ports tree if necessary
if ! poudriere ports -l | grep -q -E "^${POUDRIERE_PORTS_NAME}[[:blank:]]"; then
poudriere_create_ports_tree
else
echo -n ">>> Resetting local changes on ports tree ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
script -aq ${LOGFILE} git -C "/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}" reset --hard >/dev/null 2>&1
script -aq ${LOGFILE} git -C "/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}" clean -fd >/dev/null 2>&1
echo "Done!" | tee -a ${LOGFILE}
echo -n ">>> Updating ports tree ${POUDRIERE_PORTS_NAME}... " | tee -a ${LOGFILE}
script -aq ${LOGFILE} poudriere ports -u -p "${POUDRIERE_PORTS_NAME}" >/dev/null 2>&1
echo "Done!" | tee -a ${LOGFILE}
poudriere_rename_ports
fi
}
poudriere_bulk() {
local _archs=$(poudriere_possible_archs)
LOGFILE=${BUILDER_LOGS}/poudriere.log
if [ -n "${UPLOAD}" -a -z "${PKG_RSYNC_HOSTNAME}" ]; then
echo ">>> ERROR: PKG_RSYNC_HOSTNAME is not set"
print_error_pfS
fi
rm -f ${LOGFILE}
poudriere_create_ports_tree
[ -d /usr/local/etc/poudriere.d ] || \
mkdir -p /usr/local/etc/poudriere.d
if [ -f "${BUILDER_TOOLS}/conf/pfPorts/make.conf" ]; then
cp -f "${BUILDER_TOOLS}/conf/pfPorts/make.conf" \
/usr/local/etc/poudriere.d/${POUDRIERE_PORTS_NAME}-make.conf
fi
cat <<EOF >>/usr/local/etc/poudriere.d/${POUDRIERE_PORTS_NAME}-make.conf
PKG_REPO_BRANCH_DEVEL=${PKG_REPO_BRANCH_DEVEL}
PKG_REPO_BRANCH_RELEASE=${PKG_REPO_BRANCH_RELEASE}
PKG_REPO_SERVER_DEVEL=${PKG_REPO_SERVER_DEVEL}
PKG_REPO_SERVER_RELEASE=${PKG_REPO_SERVER_RELEASE}
POUDRIERE_PORTS_NAME=${POUDRIERE_PORTS_NAME}
PRODUCT_NAME=${PRODUCT_NAME}
EOF
# Change version of pfSense meta ports for snapshots
if [ -z "${_IS_RELEASE}" ]; then
local _meta_pkg_version="$(echo "${PRODUCT_VERSION}" | sed 's,DEVELOPMENT,ALPHA,')-${DATESTRING}"
sed -i '' \
-e "/^DISTVERSION/ s,^.*,DISTVERSION= ${_meta_pkg_version}," \
-e "/^PORTREVISION=/d" \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/security/${PRODUCT_NAME}/Makefile \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/Makefile
fi
# Copy over pkg repo templates to pfSense-repo
mkdir -p /usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/files
cp -f ${PKG_REPO_BASE}/* \
/usr/local/poudriere/ports/${POUDRIERE_PORTS_NAME}/sysutils/${PRODUCT_NAME}-repo/files
for jail_arch in ${_archs}; do
jail_name=$(poudriere_jail_name ${jail_arch})
if ! poudriere jail -i -j "${jail_name}" >/dev/null 2>&1; then
echo ">>> Poudriere jail ${jail_name} not found, skipping..." | tee -a ${LOGFILE}
continue
fi
if [ -f "${POUDRIERE_BULK}.${jail_arch}" ]; then
_ref_bulk="${POUDRIERE_BULK}.${jail_arch}"
else
_ref_bulk="${POUDRIERE_BULK}"
fi
_bulk=${SCRATCHDIR}/poudriere_bulk.${POUDRIERE_BRANCH}
sed -e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" ${_ref_bulk} > ${_bulk}
local _exclude_bulk="${POUDRIERE_BULK}.exclude.${jail_arch}"
if [ -f "${_exclude_bulk}" ]; then
mv ${_bulk} ${_bulk}.tmp
sed -e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" ${_exclude_bulk} > ${_bulk}.exclude
cat ${_bulk}.tmp ${_bulk}.exclude | sort | uniq -u > ${_bulk}
rm -f ${_bulk}.tmp ${_bulk}.exclude
fi
if ! poudriere bulk -f ${_bulk} -j ${jail_name} -p ${POUDRIERE_PORTS_NAME}; then
echo ">>> ERROR: Something went wrong..."
print_error_pfS
fi
echo ">>> Cleaning up old packages from repo..."
if ! poudriere pkgclean -f ${_bulk} -j ${jail_name} -p ${POUDRIERE_PORTS_NAME} -y; then
echo ">>> ERROR: Something went wrong..."
print_error_pfS
fi
pkg_repo_rsync "/usr/local/poudriere/data/packages/${jail_name}-${POUDRIERE_PORTS_NAME}"
done
}
# This routine is called to write out to stdout
# a string. The string is appended to $SNAPSHOTSLOGFILE
snapshots_update_status() {
if [ -z "$1" ]; then
return
fi
if [ -z "${SNAPSHOTS}" -a -z "${POUDRIERE_SNAPSHOTS}" ]; then
return
fi
echo "$*"
echo "`date` -|- $*" >> $SNAPSHOTSLOGFILE
}
create_sha256() {
local _file="${1}"
if [ ! -f "${_file}" ]; then
return 1
fi
( \
cd $(dirname ${_file}) && \
sha256 $(basename ${_file}) > $(basename ${_file}).sha256 \
)
}
snapshots_create_latest_symlink() {
local _image="${1}"
if [ -z "${_image}" ]; then
return
fi
if [ -z "${TIMESTAMP_SUFFIX}" ]; then
return
fi
if [ ! -f "${_image}" ]; then
return
fi
local _symlink=$(echo ${_image} | sed "s,${TIMESTAMP_SUFFIX},-latest,")
ln -sf $(basename ${_image}) ${_symlink}
ln -sf $(basename ${_image}).sha256 ${_symlink}.sha256
}
snapshots_create_sha256() {
local _img=""
for _img in ${ISOPATH} ${MEMSTICKPATH} ${MEMSTICKSERIALPATH} ${MEMSTICKADIPATH} ${OVAPATH} ${VARIANTIMAGES}; do
if [ -f "${_img}.gz" ]; then
_img="${_img}.gz"
fi
if [ ! -f "${_img}" ]; then
continue
fi
create_sha256 ${_img}
snapshots_create_latest_symlink ${_img}
done
}
snapshots_scp_files() {
if [ -z "${RSYNC_COPY_ARGUMENTS}" ]; then
RSYNC_COPY_ARGUMENTS="-ave ssh --timeout=60"
fi
snapshots_update_status ">>> Copying core pkg repo to ${PKG_RSYNC_HOSTNAME}"
pkg_repo_rsync "${CORE_PKG_PATH}"
snapshots_update_status ">>> Finished copying core pkg repo"
for _rsyncip in ${RSYNCIP}; do
snapshots_update_status ">>> Copying files to ${_rsyncip}"
# Ensure directory(s) are available
ssh ${RSYNCUSER}@${_rsyncip} "mkdir -p ${RSYNCPATH}/installer"
if [ -d $IMAGES_FINAL_DIR/virtualization ]; then
ssh ${RSYNCUSER}@${_rsyncip} "mkdir -p ${RSYNCPATH}/virtualization"
fi
# ensure permissions are correct for r+w
ssh ${RSYNCUSER}@${_rsyncip} "chmod -R ug+rw ${RSYNCPATH}/."
rsync $RSYNC_COPY_ARGUMENTS $IMAGES_FINAL_DIR/installer/* \
${RSYNCUSER}@${_rsyncip}:${RSYNCPATH}/installer/
if [ -d $IMAGES_FINAL_DIR/virtualization ]; then
rsync $RSYNC_COPY_ARGUMENTS $IMAGES_FINAL_DIR/virtualization/* \
${RSYNCUSER}@${_rsyncip}:${RSYNCPATH}/virtualization/
fi
snapshots_update_status ">>> Finished copying files."
done
}
|
NOYB/pfsense
|
tools/builder_common.sh
|
Shell
|
apache-2.0
| 58,525 |
#!/bin/bash -e
echo "Not to be run in shell, view file instead"
exit 0
sudo su -
apt-get install backupninja hwinfo
cp /usr/share/doc/backupninja/examples/example.sys /etc/backup.d/10-sysinfo.sys
# if mysql is installed
#cp /usr/share/doc/backupninja/examples/example.mysql /etc/backup.d/50-databases.mysql
cp /usr/share/doc/backupninja/examples/example.rdiff /etc/backup.d/90-to_auron.rdiff
cd /etc/backup.d
chown root:root *
chmod 600 *
# disable it for now
mv 90-to_auron.rdiff 90-to_auron.rdiff.disabled
vi 10-sysinfo.sys
# set partitions = no on openvz containers
vi 90-to_auron.rdiff.disabled
# may want to turn off version check
# set hostname
# add any other folders needed. usually at least /srv but for mumble you'll need
# /var/lib/mumble-server as well
# for destination you can leave the defaults
ssh-kegen -t rsa -b 2048
vi ~/.ssh/config
Host backhost
HostName auron.vrillusions.com
User backupuser
# copy the contents of ~/.ssh/id_rsa.pub to ~/.ssh/authorized_keys for the backupuser on auron
# can reenable it now
mv 90-to_auron.rdiff.disabled 90-to_auron.rdiff
vi /etc/backupninja.conf
# leave reportsuccess on for a day or two
# can change the backup time if you want
|
rezapx/private
|
nano_nano/bash-scripts/install-scripts/backupninja.sh
|
Shell
|
apache-2.0
| 1,206 |
#!/bin/bash
# Archivo de instalación de herramientas necesarias para othello
# Instalar MongoDB
sudo apt-get install mongodb-server
# Instalar Python3
sudo apt-get install python3
# Instalar Pip
sudo apt-get install python3-pip
# Instalar dependencias
sudo pip install -r requirements.txt
|
dandelea/othello_tais
|
install.sh
|
Shell
|
apache-2.0
| 292 |
#!/bin/bash
set -o errexit
CMD="/usr/bin/ceph-osd"
ARGS="-f -d -i ${OSD_ID} --osd-journal ${OSD_DIR}/journal -k ${OSD_DIR}/keyring"
# Loading common functions.
source /opt/kolla/kolla-common.sh
# Execute config strategy
set_configs
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
# of the KOLLA_BOOTSTRAP variable being set, including empty.
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
# Creating a new label for the disk
parted "${OSD_DEV}" -s -- mklabel gpt
# Preparing the OSD for use with Ceph
ceph-disk prepare "${OSD_DEV}"
OSD_ID="$(ceph osd create)"
OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
mkdir -p "${OSD_DIR}"
mount "${OSD_DEV}1" "${OSD_DIR}"
ceph-osd -i "${OSD_ID}" --mkfs --mkkey
ceph auth add "osd.${OSD_ID}" osd 'allow *' mon 'allow proflie osd' -i "${OSD_DIR}/keyring"
# Adding osd to crush map
ceph osd crush add-bucket "$(hostname)" host
ceph osd crush move "$(hostname)" root=default
ceph osd crush add "${OSD_ID}" "${OSD_INITIAL_WEIGHT}" host="$(hostname)"
exit 0
fi
exec $CMD $ARGS
|
chenzhiwei/kolla
|
docker/ceph/ceph-osd/start.sh
|
Shell
|
apache-2.0
| 1,099 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to fetch latest swagger spec.
# Puts the updated spec at api/swagger-spec/
set -o errexit
set -o nounset
set -o pipefail
if ! which mvn > /dev/null 2>&1; then
echo "Maven is not installed."
exit
fi
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")
CLIENT_ROOT="${SCRIPT_ROOT}/../kubernetes"
CLIENT_VERSION=$(python "${SCRIPT_ROOT}/constants.py" CLIENT_VERSION)
PACKAGE_NAME=$(python "${SCRIPT_ROOT}/constants.py" PACKAGE_NAME)
DEVELOPMENT_STATUS=$(python "${SCRIPT_ROOT}/constants.py" DEVELOPMENT_STATUS)
pushd "${SCRIPT_ROOT}" > /dev/null
SCRIPT_ROOT=`pwd`
popd > /dev/null
pushd "${CLIENT_ROOT}" > /dev/null
CLIENT_ROOT=`pwd`
popd > /dev/null
TEMP_FOLDER=$(mktemp -d)
trap "rm -rf ${TEMP_FOLDER}" EXIT SIGINT
SETTING_FILE="${TEMP_FOLDER}/settings"
echo "export KUBERNETES_BRANCH=\"$(python ${SCRIPT_ROOT}/constants.py KUBERNETES_BRANCH)\"" > $SETTING_FILE
echo "export CLIENT_VERSION=\"$(python ${SCRIPT_ROOT}/constants.py CLIENT_VERSION)\"" >> $SETTING_FILE
echo "export PACKAGE_NAME=\"client\"" >> $SETTING_FILE
if [[ -z ${GEN_ROOT:-} ]]; then
GEN_ROOT="${TEMP_FOLDER}/gen"
echo ">>> Cloning gen repo"
git clone --recursive https://github.com/kubernetes-client/gen.git "${GEN_ROOT}"
else
echo ">>> Reusing gen repo at ${GEN_ROOT}"
fi
echo ">>> Running python generator from the gen repo"
"${GEN_ROOT}/openapi/python.sh" "${CLIENT_ROOT}" "${SETTING_FILE}"
mv "${CLIENT_ROOT}/swagger.json" "${SCRIPT_ROOT}/swagger.json"
echo ">>> updating version information..."
sed -i'' "s/^CLIENT_VERSION = .*/CLIENT_VERSION = \\\"${CLIENT_VERSION}\\\"/" "${SCRIPT_ROOT}/../setup.py"
sed -i'' "s/^__version__ = .*/__version__ = \\\"${CLIENT_VERSION}\\\"/" "${CLIENT_ROOT}/__init__.py"
sed -i'' "s/^PACKAGE_NAME = .*/PACKAGE_NAME = \\\"${PACKAGE_NAME}\\\"/" "${SCRIPT_ROOT}/../setup.py"
sed -i'' "s,^DEVELOPMENT_STATUS = .*,DEVELOPMENT_STATUS = \\\"${DEVELOPMENT_STATUS}\\\"," "${SCRIPT_ROOT}/../setup.py"
# This is a terrible hack:
# first, this must be in gen repo not here
# second, this should be ported to swagger-codegen
echo ">>> patching client..."
git apply "${SCRIPT_ROOT}/rest_client_patch.diff"
echo ">>> Done."
|
mbohlool/client-python
|
scripts/update-client.sh
|
Shell
|
apache-2.0
| 2,744 |
#!/bin/bash
coffee --watch --compile *.coffee&
coffee --watch --compile wwwfiles/js/*.coffee&
jade -w -P wwwfiles/*.jade&
echo "The development environment has ready!"
|
Preffer/iportal
|
development.sh
|
Shell
|
apache-2.0
| 169 |
echo "Testing wdc --get"
RES="$(wdc --get)"
test_result $? "$RES" "TestCommit1"
|
dolik-rce/thewatchdog
|
tests/integration/test_13_wdc_get_nonempty.sh
|
Shell
|
bsd-2-clause
| 80 |
#!/bin/sh
# ~/.macos — https://mths.be/macos
# Close any open System Preferences panes, to prevent them from overriding
# settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# General UI/UX #
###############################################################################
# Disable popup with special characters (accent marks) while holding keys
defaults write -g ApplePressAndHoldEnabled -bool false
# Colored iMac Accent color
# 3 - yellow, 4 - teal, ... 8
defaults write -g NSColorSimulateHardwareAccent -bool YES
defaults write -g NSColorSimulatedHardwareEnclosureNumber -int 4
# Don't close windows when quitting an app (use System Restoration)
# Useful for iTerm to restore previous opened tabs
# defaults write NSGlobalDomain NSQuitAlwaysKeepsWindows 1
# Reduce transparency
defaults write com.apple.universalaccess reduceTransparency -bool true
# Set sidebar icon size to medium
defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 3
# Increase window resize speed for Cocoa applications
# defaults write NSGlobalDomain NSWindowResizeTime -float 0.001
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Remove duplicates in the “Open With” menu (also see `lscleanup` alias)
/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user
# Set Help Viewer windows to non-floating mode
# defaults write com.apple.helpviewer DevMode -bool true
# Restart automatically if the computer freezes
sudo systemsetup -setrestartfreeze on
# Set language and text formats
# defaults write NSGlobalDomain AppleLanguages -array "en-RU" "ru-RU"
# defaults write NSGlobalDomain AppleLocale -string "en_RU"
# defaults write NSGlobalDomain AppleMeasurementUnits -string "Centimeters"
# defaults write NSGlobalDomain AppleMetricUnits -bool true
# # Disable automatic capitalization as it’s annoying when typing code
# defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# # Disable smart dashes as they’re annoying when typing code
# defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# # Disable automatic period substitution as it’s annoying when typing code
# defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# # Disable smart quotes as they’re annoying when typing code
# defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Trackpad: enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
###############################################################################
# Finder #
###############################################################################
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network or USB volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
# Disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# Automatically open a new Finder window when a volume is mounted
defaults write com.apple.frameworks.diskimages auto-open-ro-root -bool true
defaults write com.apple.frameworks.diskimages auto-open-rw-root -bool true
defaults write com.apple.finder OpenWindowForNewRemovableDisk -bool true
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "clmv"
# Show the ~/Library folder
chflags nohidden ~/Library
# Show the /Volumes folder
chflags nohidden ~/Library && xattr -d com.apple.FinderInfo ~/Library
# Expand the following File Info panes:
# “General”, “Open with”, and “Sharing & Permissions”
defaults write com.apple.finder FXInfoPanesExpanded -dict \
General -bool true \
OpenWith -bool true \
Privileges -bool true
###############################################################################
# Dock, Dashboard, and hot corners #
###############################################################################
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Group windows by application in Mission Control
defaults write com.apple.dock expose-group-apps -bool true
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Don’t show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
# Set the icon size of Dock items to 44 pixels
defaults write com.apple.dock tilesize -int 44
# Add a spacer to the left side of the Dock (where the applications are)
#defaults write com.apple.dock persistent-apps -array-add '{tile-data={}; tile-type="spacer-tile";}'
# Add a spacer to the right side of the Dock (where the Trash is)
#defaults write com.apple.dock persistent-others -array-add '{tile-data={}; tile-type="spacer-tile";}'
###############################################################################
# Safari & WebKit #
###############################################################################
# Show the full URL in the address bar (note: this still hides the scheme)
defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true
# Make Safari’s search banners default to Contains instead of Starts With
defaults write com.apple.Safari FindOnPageMatchesWordStartsOnly -bool false
# Enable the Develop menu and the Web Inspector in Safari
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
# Warn about fraudulent websites
defaults write com.apple.Safari WarnAboutFraudulentWebsites -bool true
# Enable “Do Not Track”
defaults write com.apple.Safari SendDoNotTrackHTTPHeader -bool true
# Update extensions automatically
defaults write com.apple.Safari InstallExtensionUpdatesAutomatically -bool true
# Disable notification popups
defaults write com.apple.Safari CanPromptForPushNotifications -bool false
###############################################################################
# Mail #
###############################################################################
# Copy email addresses as `[email protected]` instead of `Foo Bar <[email protected]>` in Mail.app
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool false
###############################################################################
# Time Machine #
###############################################################################
# Prevent Time Machine from prompting to use new hard drives as backup volume
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
###############################################################################
# Activity Monitor #
###############################################################################
# Show the main window when launching Activity Monitor
defaults write com.apple.ActivityMonitor OpenMainWindow -bool true
# Visualize CPU usage in the Activity Monitor Dock icon
defaults write com.apple.ActivityMonitor IconType -int 5
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Sort Activity Monitor results by CPU usage
defaults write com.apple.ActivityMonitor SortColumn -string "CPUUsage"
defaults write com.apple.ActivityMonitor SortDirection -int 0
###############################################################################
# Address Book, Dashboard, iCal, TextEdit, and Disk Utility #
###############################################################################
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Auto-play videos when opened with QuickTime Player
defaults write com.apple.QuickTimePlayerX MGPlayMovieOnOpen -bool true
###############################################################################
# Mac App Store #
###############################################################################
# Enable Debug Menu in the Mac App Store
# defaults write com.apple.appstore ShowDebugMenu -bool true
# Enable the automatic update check
defaults write com.apple.SoftwareUpdate AutomaticCheckEnabled -bool true
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Download newly available updates in background
defaults write com.apple.SoftwareUpdate AutomaticDownload -int 1
# Install System data files & security updates
defaults write com.apple.SoftwareUpdate CriticalUpdateInstall -int 1
# Automatically download apps purchased on other Macs
# defaults write com.apple.SoftwareUpdate ConfigDataInstall -int 1
# Turn on app auto-update
defaults write com.apple.commerce AutoUpdate -bool true
# Allow the App Store to reboot machine on macOS updates
defaults write com.apple.commerce AutoUpdateRestartRequired -bool true
###############################################################################
# Photos #
###############################################################################
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
###############################################################################
# Google Chrome & Google Chrome Canary #
###############################################################################
# Use the system-native print preview dialog
defaults write com.google.Chrome DisablePrintPreview -bool true
defaults write com.google.Chrome.canary DisablePrintPreview -bool true
# Expand the print dialog by default
defaults write com.google.Chrome PMPrintingExpandedStateForPrint2 -bool true
defaults write com.google.Chrome.canary PMPrintingExpandedStateForPrint2 -bool true
###############################################################################
# Magnet #
###############################################################################
# Disable ads
defaults write com.crowdcafe.windowmagnet alreadyClickedDynamoItem -bool true
defaults write com.crowdcafe.windowmagnet seenDynamo -bool true
# Disable all shortcuts
defaults write com.crowdcafe.windowmagnet centerWindowComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowCenterThirdComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowEastComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowLeftThirdComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowLeftTwoThirdsComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowNorthComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowNorthEastComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowNorthWestComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowRightThirdComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowRightTwoThirdsComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowSouthComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowSouthEastComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowSouthWestComboKey -dict
defaults write com.crowdcafe.windowmagnet expandWindowWestComboKey -dict
defaults write com.crowdcafe.windowmagnet maximizeWindowComboKey -dict
defaults write com.crowdcafe.windowmagnet moveWindowToNextDisplay -dict
defaults write com.crowdcafe.windowmagnet moveWindowToPreviousDisplay -dict
defaults write com.crowdcafe.windowmagnet restoreWindowComboKey -dict
###############################################################################
# Transmission.app #
###############################################################################
# Use `~/Downloads` to store completed downloads
defaults write org.m0k.transmission DownloadLocationConstant -bool true
# Trash original torrent files
defaults write org.m0k.transmission DeleteOriginalTorrent -bool true
# Hide the donate message
defaults write org.m0k.transmission WarningDonate -bool false
# Hide the legal disclaimer
defaults write org.m0k.transmission WarningLegal -bool false
###############################################################################
# Additional Hacks #
###############################################################################
# Enable Mac's startup chime
# sudo nvram StartupMute=%00
###############################################################################
# Xcode #
###############################################################################
# see https://xcode-tips.github.io
# Faster Xcode Rename Refactoring
defaults write com.apple.dt.Xcode CodeFoldingAnimationSpeed -int 0
# Show project build times in the activity viewer
defaults write com.apple.dt.Xcode ShowBuildOperationDuration -bool YES
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" \
"Address Book" \
"Calendar" \
"cfprefsd" \
"Contacts" \
"Dock" \
"Finder" \
"Google Chrome" \
"Mail" \
"Messages" \
"Photos" \
"Safari" \
"SystemUIServer" \
"Transmission" \
"iCal"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
|
podkovyrin/dotfiles
|
macos.sh
|
Shell
|
bsd-2-clause
| 16,926 |
#!/bin/sh
X=0
git status —porcelain > _.p
while read FILE; do
# ...
X=1
done < _.p
|
wkoszek/sh_advanced
|
debugging/status2.sh
|
Shell
|
bsd-2-clause
| 88 |
#!/bin/bash
MAJOR="0"
MINOR="2"
BUILD=`./build.version`
GITID=`git log | grep commit | head -n 1 | cut -d " " -f 2`
DATE=`date +%Y%m%d`
HOST=`hostname`
echo "(* This file is generated by version.sh *)"
echo "structure Version ="
echo "struct"
echo " val major = \"${MAJOR}\""
echo " val minor = \"${MINOR}\""
echo " val build = \"${BUILD}\""
echo " val gitId = \"${GITID}\""
echo " val date = \"${DATE}\""
echo " val hostname = \"${HOST}\""
echo " fun getVersionString() = major ^ \".\" ^ minor ^ \".\" ^ build"
echo "end"
|
gian/kuruc
|
src/kuruc/version.sh
|
Shell
|
bsd-2-clause
| 541 |
#!/bin/sh
#
# Convert a file into a #defined string using bourne shell and awk.
#
# Check if the awk interpreter is available.
awk -- '' </dev/null >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo "This script depends on awk, which I cannot find" 1>&2
exit 1
fi
# Check that the SYMBOL name to use was specified.
if [ "$#" -lt 1 ]
then
echo "usage: $0 SYMBOL [file(s)]" 1>&2
exit 1
fi
# Extract the SYMBOL name from the command line.
SYMBOL=$1
shift
# Process each specified file, or standard input.
awk -v "SYMBOL=$SYMBOL" -- '
BEGIN {
printf("// This file is automatically generated - dont edit it\n");
printf("#include \"Config.hpp\"\n");
printf("class Config%s\n", SYMBOL);
printf("{\n");
printf("public:\n");
printf(" Config%s() { Config::instance().registerDefaultConfig(\"%s\", config); }\n", SYMBOL, SYMBOL);
printf(" static Config%s instance;\n", SYMBOL);
printf(" static const char *config;\n");
printf("};\n");
printf("Config%s Config%s::instance;\n", SYMBOL, SYMBOL);
printf("const char *Config%s::config =\n", SYMBOL);
last = "who-there";
seenFirst = 0;
}
// {
gsub("\"", "\\\"");
if (seenFirst != 0)
{
printf("\"%s\\n\"\\\n", last);
}
last = $0;
seenFirst = 1;
}
END {
printf("\"%s\\n\";\n", last);
}' "$@"
exit $?
|
wjohnsaunders/tvguidefetch
|
make-config-from-file.sh
|
Shell
|
bsd-2-clause
| 1,342 |
#!/bin/sh
make clean
./configure --cc=`xcrun -f --sdk iphoneos clang` \
--arch=armv7 \
--cpu=cortex-a8 \
--sysroot=`xcrun --sdk iphoneos --show-sdk-path` \
--target-os=darwin \
--extra-cflags='-arch armv7 -Wno-asm-operand-widths -integrated-as' \
--extra-ldflags='-arch armv7 -miphoneos-version-min=8.0' \
--enable-cross-compile --enable-pic \
--disable-programs --incdir=/ --libdir=/ --prefix=/
make
make install DESTDIR=tmpdir
mkdir tmpdir/arm
mv tmpdir/lib* tmpdir/arm/
rm -R tmpdir/pkgconfig
|
insidegui/libav-ios-build
|
build_armv7.sh
|
Shell
|
bsd-2-clause
| 507 |
#!/bin/bash
docker exec -it $(docker-compose ps -q mysql | head -1) /bin/bash -c " \
export MYSQL_PWD=root;
echo 'DROP DATABASE IF EXISTS reference' | mysql -uroot; \
echo 'CREATE DATABASE reference' | mysql -uroot; \
"
docker-compose run liquibase \
--username=root \
--password=root \
--url=jdbc:mysql://mysql/reference \
--changeLogFile=/var/www/html/changelog/master.xml \
update
TS=$(date -u +%Y%m%d-%H%M%S)
docker-compose run liquibase \
--username=root \
--password=root \
--url=jdbc:mysql://mysql/reference?tinyInt1isBit=false \
--referenceUrl=jdbc:mysql://mysql/ajasta?tinyInt1isBit=false \
--referenceUsername=root \
--referencePassword=root \
--changeLogFile=/var/www/html/changelog/changelog-$TS.xml \
diffChangeLog
sed -i "
/<\/databaseChangeLog>/ \
<include relativeToChangelogFile=\"true\" file=\"/changelog-$TS.xml\"/>" changelog/master.xml
docker exec -it $(docker-compose ps -q application | head -1) chown -R $(id -u):$(id -g) /var/www/html/changelog
docker exec -it $(docker-compose ps -q mysql | head -1) /bin/bash -c " \
export MYSQL_PWD=root;
echo 'DROP DATABASE reference' | mysql -uroot; \
"
|
DASPRiD/Ajasta
|
docker/dev/create-changelog-diff.sh
|
Shell
|
bsd-2-clause
| 1,201 |
#!/bin/bash
# Setup miniconda environment that is compatible with manylinux2014 docker image
conda create -n buildenv -y conda conda-build anaconda-client
source /root/miniconda3/bin/activate buildenv
conda env list
conda install -y conda-build anaconda-client
|
numba/llvmlite
|
buildscripts/manylinux2014/configure_conda.sh
|
Shell
|
bsd-2-clause
| 261 |
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Check that we're able to submit from a directory that doesn't exist on the
# trunk. This tests for a previous bug where we ended up with an invalid CWD
# after switching to the merge branch.
set -e
. ./test-lib.sh
setup_initsvn
setup_gitsvn
(
set -e
cd git-svn
git config rietveld.server localhost:8080
# Create a branch and give it an issue.
git checkout -q -b new
mkdir dir
cd dir
echo "some work done on a branch" >> test
git add test; git commit -q -m "branch work"
export GIT_EDITOR=$(which true)
test_expect_success "upload succeeds" \
"$GIT_CL upload -m test master | grep -q 'Issue created'"
test_expect_success "git-cl dcommits ok" \
"$GIT_CL dcommit -f"
)
SUCCESS=$?
cleanup
if [ $SUCCESS == 0 ]; then
echo PASS
fi
|
coreos/depot_tools
|
tests/submit-from-new-dir.sh
|
Shell
|
bsd-3-clause
| 950 |
#!/usr/bin/env bash
# Prerequisites:
# - [Git version control system](https://git-scm.com/)
# Check if it is installed by running:
#
# bash -c 'command -v git'
#
# ... if that returns a path, then this script should work fine.
# Presumably, if you are using git pre-commit hooks, it is safe to say that
# you probably have git installed.
# This is based upon another script which had the following license:
#
# Copyright (c) 2010, Benjamin C. Meyer <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# shellcheck disable=SC2034
GREP_OPTIONS=""
function test_file {
file="${1}"
if [ ! -f "${file}" ] ; then
return
fi
echo "Running whitespace lint..."
# Set -e before and +e after for _required_ linters (i.e.: that will prevent
# commit, e.g.: syntax linters).
# Set +e before and -e after for _optional_ linters (i.e.: that will only
# output messages upon commit, e.g.: style linters).
set +e
if git rev-parse --verify HEAD >/dev/null 2>&1 ; then
head="HEAD"
else
# First commit, use an empty tree
head="4b825dc642cb6eb9a060e54bf8d69288fbee4904"
fi
git diff-index --check --cached "${head}" -- "$file"
set -e
}
case "${1}" in
--about )
echo "Check for introduced trailing whitespace or an indent that uses a space before a tab."
;;
* )
for file in $(git diff-index --cached --name-only HEAD | grep -v -E '\.(gif|gz|ico|jpeg|jpg|png|phar|exe|svgz|tff)') ; do
test_file "${file}"
done
;;
esac
|
mparker17/simple-linter-git-hooks
|
pre-commit/2-whitespace.sh
|
Shell
|
bsd-3-clause
| 2,990 |
#!/bin/sh
sysdir=`/var/disks/front_tray/identify-sys.sh $1`
if [ x"$sysdir" != "x" ]; then
cat $sysdir/model
fi
|
jncronin/monitor
|
identify-scripts/identify-model.sh
|
Shell
|
bsd-3-clause
| 116 |
#!/usr/bin/env bash
set -e
CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $CWD
python3 py.py
|
Dekken/maiken
|
test/test.sh
|
Shell
|
bsd-3-clause
| 105 |
#!/bin/sh
if [ "$1" == "" ]; then
echo "Usage: ./run.sh <times> <max>"
exit 1
fi
times=$1
max=$2
export TIME="user/system/elapsed: %U %S %e"
TIMER="/usr/bin/time --quiet"
# vanilla version
count=0
deadlock=0
rm -f OUTPUT.vanilla
touch OUTPUT.vanilla
while [ $count -lt $times ]; do
printf "vanilla:\tgood=$count, deadlocks=$deadlock\r"
if $TIMER ./boudol $max $max >> OUTPUT.vanilla 2>&1; then
count=$(( $count + 1 ))
else
deadlock=$(( $deadlock + 1 ))
fi
echo "" >> OUTPUT.vanilla
done
printf "vanilla:\tgood=$count, deadlocks=$deadlock\n"
echo "$times complete runs, $deadlock runs with deadlock." >> OUTPUT.vanilla
# modified version
count=0
deadlock=0
rm -f OUTPUT.modified
touch OUTPUT.modified
while [ $count -lt $times ]; do
printf "modified:\tgood=$count, deadlocks=$deadlock\r"
if $TIMER ./boudol_mod $max $max >> OUTPUT.modified 2>&1; then
count=$(( $count + 1 ))
else
deadlock=$(( $deadlock + 1 ))
fi
echo "" >> OUTPUT.modified
done
printf "modified:\tgood=$count, deadlocks=$deadlock\n"
echo "$times complete runs, $deadlock runs with deadlock." >> OUTPUT.modified
|
pgerakios/Deadlock-avoidance-for-C-pthreads
|
nulltests/bank_transactions/run.sh
|
Shell
|
bsd-3-clause
| 1,170 |
# set required variables
WORKLOAD_FILE=${TOP_DIR}/workloads/workload_SIMPLE_MULTICOLUMN_QUERIES_p500_w0_1234.sql
ALGORITHM=bc
|
dbgroup-at-ucsc/dbtune
|
resources/workloads/postgres/.unsupported/convergence/bc_queries/config.sh
|
Shell
|
bsd-3-clause
| 127 |
#!/bin/bash
set -u
. ~/.tikradiorc
while true; do
:> "$PUTKI"
rm -f -- "$KILLFILE"
./savepid $TAILPID tail -f $PUTKI | \
{ while [ ! -e $KILLFILE ]; do read file || exit 0; \
ln -sf "$file" "$HOME/music/now-playing.musaa"; \
$SOITA $SOITA_OPTS "$file" >/dev/null 2>$SOITA_OUT & echo $! > $SOITAPID; \
wait >/dev/null 2>/dev/null; \
rm -f -- "$file" "$TMP/tikplaystreaming"; \
"$HOME/tikradio/util/resethw.sh" ;done }
done
|
tietokilta-saato/tikplay
|
legacy/tikradio/server/tikradio.sh
|
Shell
|
mit
| 444 |
# Switch between python2 and python3 easily. (particularly useful for depot_tools on Arch)
ls -al /usr/bin/python
sudo ln -s -f /usr/bin/$@ /usr/bin/python
ls -al /usr/bin/python
|
jdashg/snippets
|
set-python.sh
|
Shell
|
mit
| 180 |
#!/bin/bash
REPEAT=4
for i in $(seq 1 1 $REPEAT)
do
echo "Run $i"
echo "******************************************"
./runner.sh
echo "******************************************"
done
|
radical-experiments/AIMES-Swift
|
Swift_Experiments/strategy_4/experiment.sh
|
Shell
|
mit
| 200 |
#!/usr/bin/env bash
# get the current path
CURRENT_PATH=$(pwd)
# find the script path
ROOT_PATH=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# change to the root path
cd $ROOT_PATH
# determine if condo.sh already exists
if [ -f condo-local.sh ]; then
# delete it
rm -f condo-local.sh
fi
# copy the template to the local path
cp template/condo.sh condo-local.sh
# run condo using local build
CONDO_SHELL="$ROOT_PATH/condo-local.sh"
$CONDO_SHELL --reset --local $@
# capture the exit code
EXIT_CODE=$?
# remove the local condo file
rm -f condo-local.sh
# change back to the current directory
cd $CURRENT_PATH
# exit using the exit code
exit $EXIT_CODE
|
automotiveMastermind/condo
|
condo.sh
|
Shell
|
mit
| 669 |
#/bin/bash
set -e
if [[ ! "$TESTSPACE" = /* ]] ||
[[ ! "$PATH_TO_REDMINE" = /* ]] ||
[[ ! "$REDMINE_VER" = * ]] ||
[[ ! "$NAME_OF_PLUGIN" = * ]] ||
[[ ! "$PATH_TO_PLUGIN" = /* ]];
then
echo "You should set"\
" TESTSPACE, PATH_TO_REDMINE, REDMINE_VER"\
" NAME_OF_PLUGIN, PATH_TO_PLUGIN"\
" environment variables"
echo "You set:"\
"$TESTSPACE"\
"$PATH_TO_REDMINE"\
"$REDMINE_VER"\
"$NAME_OF_PLUGIN"\
"$PATH_TO_PLUGIN"
exit 1;
fi
export RAILS_ENV=test
export REDMINE_GIT_REPO=git://github.com/redmine/redmine.git
export REDMINE_GIT_TAG=$REDMINE_VER
export BUNDLE_GEMFILE=$PATH_TO_REDMINE/Gemfile
# checkout redmine
git clone $REDMINE_GIT_REPO $PATH_TO_REDMINE
cd $PATH_TO_REDMINE
if [ ! "$REDMINE_GIT_TAG" = "master" ];
then
git checkout -b $REDMINE_GIT_TAG origin/$REDMINE_GIT_TAG
fi
mv $TESTSPACE/database.yml.travis config/database.yml
mv $TESTSPACE/additional_environment.rb config/
# add telegram_common plugin
git clone git://github.com/centosadmin/redmine_bots.git $PATH_TO_REDMINE/plugins/redmine_bots
# create a link to the backlogs plugin
ln -sf $PATH_TO_PLUGIN plugins/$NAME_OF_PLUGIN
# install gems
bundle install
# run redmine database migrations
bundle exec rake db:migrate
# run plugin database migrations
bundle exec rake redmine:plugins:migrate
# install redmine database
#bundle exec rake redmine:load_default_data REDMINE_LANG=en
bundle exec rake db:structure:dump
# run tests
# bundle exec rake TEST=test/unit/role_test.rb
bundle exec rake redmine:plugins:test NAME=$NAME_OF_PLUGIN
|
olemskoi/redmine_intouch
|
travis.sh
|
Shell
|
mit
| 1,590 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${BUILT_PRODUCTS_DIR}/podTestLibrary.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${BUILT_PRODUCTS_DIR}/podTestLibrary.bundle"
fi
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ `find . -name '*.xcassets' | wc -l` -ne 0 ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
find "${PWD}" -name "*.xcassets" -print0 | xargs -0 actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
rickykaka1899/podTestLibrary
|
Example/Pods/Target Support Files/Pods-podTestLibrary/Pods-podTestLibrary-resources.sh
|
Shell
|
mit
| 4,166 |
#! @shell@ -e
showSyntax() {
# !!! more or less cut&paste from
# system/switch-to-configuration.sh (which we call, of course).
cat <<EOF
Usage: $0 [OPTIONS...] OPERATION
The operation is one of the following:
switch: make the configuration the boot default and activate now
boot: make the configuration the boot default
test: activate the configuration, but don't make it the boot default
build: build the configuration, but don't make it the default or
activate it
build-vm: build a virtual machine containing the configuration
(useful for testing)
build-vm-with-bootloader:
like build-vm, but include a boot loader in the VM
dry-run: just show what store paths would be built/downloaded
Options:
--upgrade fetch the latest version of NixOS before rebuilding
--install-grub (re-)install the Grub bootloader
--no-build-nix don't build the latest Nix from Nixpkgs before
building NixOS
--rollback restore the previous NixOS configuration (only
with switch, boot, test, build)
--profile-name / -p install in the specified system profile
--fast same as --no-build-nix --show-trace
Various nix-build options are also accepted, in particular:
--show-trace show a detailed stack trace for evaluation errors
Environment variables affecting nixos-rebuild:
\$NIX_PATH Nix expression search path
\$NIXOS_CONFIG path to the NixOS system configuration specification
EOF
exit 1
}
# Parse the command line.
extraBuildFlags=()
action=
buildNix=1
rollback=
upgrade=
repair=
profile=/nix/var/nix/profiles/system
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
case "$i" in
--help)
showSyntax
;;
switch|boot|test|build|dry-run|build-vm|build-vm-with-bootloader)
action="$i"
;;
--install-grub)
export NIXOS_INSTALL_GRUB=1
;;
--no-build-nix)
buildNix=
;;
--rollback)
rollback=1
;;
--upgrade)
upgrade=1
;;
--repair)
repair=1
extraBuildFlags+=("$i")
;;
--show-trace|--no-build-hook|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair)
extraBuildFlags+=("$i")
;;
--max-jobs|-j|--cores|-I)
j="$1"; shift 1
extraBuildFlags+=("$i" "$j")
;;
--option)
j="$1"; shift 1
k="$1"; shift 1
extraBuildFlags+=("$i" "$j" "$k")
;;
--fast)
buildNix=
extraBuildFlags+=(--show-trace)
;;
--profile-name|-p)
if [ -z "$1" ]; then
echo "$0: ‘--profile-name’ requires an argument"
exit 1
fi
if [ "$1" != system ]; then
profile="/nix/var/nix/profiles/system-profiles/$1"
mkdir -p -m 0755 "$(dirname "$profile")"
fi
shift 1
;;
*)
echo "$0: unknown option \`$i'"
exit 1
;;
esac
done
if [ -z "$action" ]; then showSyntax; fi
if [ -n "$rollback" ]; then
buildNix=
fi
tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX)
trap 'rm -rf "$tmpDir"' EXIT
# If the Nix daemon is running, then use it. This allows us to use
# the latest Nix from Nixpkgs (below) for expression evaluation, while
# still using the old Nix (via the daemon) for actual store access.
# This matters if the new Nix in Nixpkgs has a schema change. It
# would upgrade the schema, which should only happen once we actually
# switch to the new configuration.
# If --repair is given, don't try to use the Nix daemon, because the
# flag can only be used directly.
if [ -z "$repair" ] && systemctl show nix-daemon.socket nix-daemon.service | grep -q ActiveState=active; then
export NIX_REMOTE=${NIX_REMOTE:-daemon}
fi
# If ‘--upgrade’ is given, run ‘nix-channel --update nixos’.
if [ -n "$upgrade" ]; then
nix-channel --update nixos
fi
# First build Nix, since NixOS may require a newer version than the
# current one. Of course, the same goes for Nixpkgs, but Nixpkgs is
# more conservative.
if [ "$action" != dry-run -a -n "$buildNix" ]; then
echo "building Nix..." >&2
if ! nix-build '<nixos>' -A config.environment.nix -o $tmpDir/nix "${extraBuildFlags[@]}" > /dev/null; then
if ! nix-build '<nixos>' -A nixFallback -o $tmpDir/nix "${extraBuildFlags[@]}" > /dev/null; then
nix-build '<nixpkgs>' -A nixUnstable -o $tmpDir/nix "${extraBuildFlags[@]}" > /dev/null
fi
fi
PATH=$tmpDir/nix/bin:$PATH
fi
# Update the version suffix if we're building from Git (so that
# nixos-version shows something useful).
if nixos=$(nix-instantiate --find-file nixos "${extraBuildFlags[@]}"); then
suffix=$(@shell@ $nixos/modules/installer/tools/get-version-suffix "${extraBuildFlags[@]}")
if [ -n "$suffix" ]; then
echo -n "$suffix" > "$nixos/.version-suffix" || true
fi
fi
if [ "$action" = dry-run ]; then
extraBuildFlags+=(--dry-run)
fi
# Either upgrade the configuration in the system profile (for "switch"
# or "boot"), or just build it and create a symlink "result" in the
# current directory (for "build" and "test").
if [ -z "$rollback" ]; then
echo "building the system configuration..." >&2
if [ "$action" = switch -o "$action" = boot ]; then
nix-env "${extraBuildFlags[@]}" -p "$profile" -f '<nixos>' --set -A system
pathToConfig="$profile"
elif [ "$action" = test -o "$action" = build -o "$action" = dry-run ]; then
nix-build '<nixos>' -A system -K -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
elif [ "$action" = build-vm ]; then
nix-build '<nixos>' -A vm -K -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
elif [ "$action" = build-vm-with-bootloader ]; then
nix-build '<nixos>' -A vmWithBootLoader -K -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
else
showSyntax
fi
else # [ -n "$rollback" ]
if [ "$action" = switch -o "$action" = boot ]; then
nix-env --rollback -p "$profile"
pathToConfig="$profile"
elif [ "$action" = test -o "$action" = build ]; then
systemNumber=$(
nix-env -p "$profile" --list-generations |
sed -n '/current/ {g; p;}; s/ *\([0-9]*\).*/\1/; h'
)
ln -sT "$profile"-${systemNumber}-link ./result
pathToConfig=./result
else
showSyntax
fi
fi
# If we're not just building, then make the new configuration the boot
# default and/or activate it now.
if [ "$action" = switch -o "$action" = boot -o "$action" = test ]; then
$pathToConfig/bin/switch-to-configuration "$action"
fi
if [ "$action" = build-vm ]; then
cat >&2 <<EOF
Done. The virtual machine can be started by running $(echo $pathToConfig/bin/run-*-vm).
EOF
fi
|
andreassalomonsson/nixos
|
modules/installer/tools/nixos-rebuild.sh
|
Shell
|
mit
| 7,010 |
#!/bin/bash
FN="pd.celegans_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/pd.celegans_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.celegans_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.celegans/bioconductor-pd.celegans_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.celegans/bioconductor-pd.celegans_3.12.0_src_all.tar.gz"
)
MD5="b90ea2e071522bb340c103a1c8270205"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-pd.celegans/post-link.sh
|
Shell
|
mit
| 1,424 |
#!/bin/bash
apt-get -y remove --purge mysql-server
|
vincent-zurczak/roboconf-docker-compliant-lamp
|
src/main/model/graph/Mysql/scripts/undeploy.sh
|
Shell
|
mit
| 53 |
#!/bin/bash
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -a --dbname=VOTER < /sql/create_database.sql
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -a --dbname=VOTER < /sql/create_tables.sql
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -a --dbname=VOTER < /sql/populate_static_data.sql
|
national-voter-file/national-voter-file
|
src/main/sql/dockerResources/z-init-db.sh
|
Shell
|
mit
| 308 |
#!/bin/bash
FN="XtraSNPlocs.Hsapiens.dbSNP144.GRCh38_0.99.12.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/XtraSNPlocs.Hsapiens.dbSNP144.GRCh38_0.99.12.tar.gz"
"https://bioarchive.galaxyproject.org/XtraSNPlocs.Hsapiens.dbSNP144.GRCh38_0.99.12.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-xtrasnplocs.hsapiens.dbsnp144.grch38/bioconductor-xtrasnplocs.hsapiens.dbsnp144.grch38_0.99.12_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-xtrasnplocs.hsapiens.dbsnp144.grch38/bioconductor-xtrasnplocs.hsapiens.dbsnp144.grch38_0.99.12_src_all.tar.gz"
)
MD5="c617aa805c7ecf60ee9283eb3c51b1c7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-xtrasnplocs.hsapiens.dbsnp144.grch38/post-link.sh
|
Shell
|
mit
| 1,604 |
#! /bin/sh
#
# cvs.sh
#
# This file contains support code from Makefile.common
# It defines a shell function for each known target
# and then does a case to call the correct function.
unset MAKEFLAGS
call_and_fix_autoconf()
{
$AUTOCONF || exit 1
}
strip_makefile()
{
if test ! -f $makefile_wo; then
perl -e '$in=0; while ( <> ) { $in = 1 if ($_ =~ m/^if / ); print $_ unless ($in || $_ =~ m/^include /); $in = 0 if ($_ =~ m/^endif/); }' < $makefile_am > $makefile_wo
fi
}
check_autotool_versions()
{
required_autoconf_version="2.53 or newer"
AUTOCONF_VERSION=`$AUTOCONF --version | head -n 1`
case $AUTOCONF_VERSION in
Autoconf*2.5* | autoconf*2.5* | autoconf*2.6* ) : ;;
"" )
echo "*** AUTOCONF NOT FOUND!."
echo "*** TDE requires autoconf $required_autoconf_version"
exit 1
;;
* )
echo "*** YOU'RE USING $AUTOCONF_VERSION."
echo "*** TDE requires autoconf $required_autoconf_version"
exit 1
;;
esac
AUTOHEADER_VERSION=`$AUTOHEADER --version | head -n 1`
case $AUTOHEADER_VERSION in
Autoconf*2.5* | autoheader*2.5* | autoheader*2.6* ) : ;;
"" )
echo "*** AUTOHEADER NOT FOUND!."
echo "*** TDE requires autoheader $required_autoconf_version"
exit 1
;;
* )
echo "*** YOU'RE USING $AUTOHEADER_VERSION."
echo "*** TDE requires autoheader $required_autoconf_version"
exit 1
;;
esac
AUTOMAKE_STRING=`$AUTOMAKE --version | head -n 1`
required_automake_version="1.6.1 or newer"
case $AUTOMAKE_STRING in
automake*1.6.* | automake*1.7* | automake*1.8* | automake*1.9* | automake*1.10* | automake*1.11* | automake*1.12* | automake*1.13* | automake*1.14* | automake*1.15* )
echo "*** $AUTOMAKE_STRING found."
UNSERMAKE=no
;;
"" )
echo "*** AUTOMAKE NOT FOUND!."
echo "*** TDE requires automake $required_automake_version"
exit 1
;;
*unsermake* ) :
echo "*** YOU'RE USING UNSERMAKE."
echo "*** GOOD LUCK!! :)"
UNSERMAKE=unsermake
;;
* )
echo "*** YOU'RE USING $AUTOMAKE_STRING."
echo "*** TDE requires automake $required_automake_version"
exit 1
;;
esac
unset required_automake_version
}
cvs()
{
check_autotool_versions
acinclude_m4
### Make new subdirs and configure.in.
### The make calls could be optimized away here,
### with a little thought.
if test -r configure.in.in; then
rm -f configure.in
echo "*** Creating list of subdirectories"
create_subdirs
if test -r Makefile.am.in; then
echo "*** Creating Makefile.am"
if grep '\$(top_srcdir)/Makefile.am:' $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo top_srcdir=. ./Makefile.am || exit 1
else
Makefile_am
fi
fi
configure_files
echo "*** Creating configure.in"
if grep '\$(top_srcdir)/configure.in:' $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo top_srcdir=. ./configure.in || exit 1
else
configure_in
fi
fi
echo "*** Creating aclocal.m4"
$ACLOCAL $ACLOCALFLAGS || exit 1
echo "*** Creating configure"
call_and_fix_autoconf
if egrep "^A[MC]_CONFIG_HEADER" configure.in >/dev/null 2>&1; then
echo "*** Creating config.h template"
$AUTOHEADER || exit 1
touch config.h.in
fi
echo "*** Creating Makefile templates"
$AUTOMAKE --add-missing || exit 1
if test "$UNSERMAKE" = no; then
echo "*** Postprocessing Makefile templates"
perl -w admin/am_edit || exit 1
fi
if egrep "^cvs-local:" $makefile_am >/dev/null; then \
strip_makefile
$MAKE -f $makefile_wo cvs-local top_srcdir=. || exit 1
fi
echo "*** Creating date/time stamp"
touch stamp-h.in
echo "*** Finished"
echo " Don't forget to run ./configure"
echo " If you haven't done so in a while, run ./configure --help"
}
dist()
{
check_autotool_versions
###
### First build all of the files necessary to do just "make"
###
acinclude_m4
if test -r configure.in.in; then
rm -f configure.in
create_subdirs
if test -r Makefile.am.in; then
if grep '\$(top_srcdir)/Makefile.am:' $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo top_srcdir=. ./Makefile.am || exit 1
else
Makefile_am
fi
fi
configure_files
if grep '\$(top_srcdir)/configure.in:' $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo top_srcdir=. ./configure.in || exit 1
else
configure_in
fi
fi
$ACLOCAL $ACLOCALFLAGS
if egrep "^A[MC]_CONFIG_HEADER" configure.in >/dev/null 2>&1; then
echo "*** Creating config.h template"
$AUTOHEADER || exit 1
touch config.h.in
fi
$AUTOMAKE --add-missing --foreign || exit 1
if test "$UNSERMAKE" = no; then
echo "*** Postprocessing Makefile templates"
perl -w admin/am_edit || exit 1
fi
call_and_fix_autoconf
touch stamp-h.in
if grep "^cvs-local:" $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo cvs-local top_srcdir=.
fi
###
### Then make messages
###
if test -d po; then
LIST=`find ./po -name "*.po"`
for i in $LIST; do
file2=`echo $i | sed -e "s#\.po#\.gmo#"`
msgfmt -o $file2 $i || touch $file2
done
fi
if grep "^cvs-dist-local:" $makefile_am >/dev/null; then
strip_makefile
$MAKE -f $makefile_wo cvs-dist-local top_srcdir=.
fi
}
subdir_dist()
{
$ACLOCAL $ACLOCALFLAGS
$AUTOHEADER
touch config.h.in
$AUTOMAKE
AUTOMAKE_STRING=`$AUTOMAKE --version | head -n 1`
case $AUTOMAKE_STRING in
*unsermake* ) :
;;
*)
perl -w ../admin/am_edit --path=../admin
esac
call_and_fix_autoconf
touch stamp-h.in
}
configure_in()
{
rm -f configure.in configure.in.new
kde_use_qt_param=
test -f configure.files || { echo "need configure.files for configure.in"; exit 1; }
list=`fgrep -v "configure.in.bot" < configure.files | fgrep -v "configure.in.mid"`
: > configure.in.new
for file in $list; do
echo "dnl =======================================================" >> configure.in.new
echo "dnl FILE: $file" >> configure.in.new
echo "dnl =======================================================" >> configure.in.new
echo "" >> configure.in.new
cat $file >> configure.in.new
done
echo "KDE_CREATE_SUBDIRSLIST" >> configure.in.new
if test -f Makefile.am.in; then
subdirs=`cat subdirs`
for dir in $subdirs; do
vdir=`echo $dir | sed -e 's,[-+.@],_,g'`
echo "AM_CONDITIONAL($vdir""_SUBDIR_included, test \"x\$$vdir""_SUBDIR_included\" = xyes)" >> configure.in.new
if test -f "$dir/configure.in"; then
echo "if test \"x\$$vdir""_SUBDIR_included\" = xyes; then " >> configure.in.new
echo " AC_CONFIG_SUBDIRS($dir)" >> configure.in.new
echo "fi" >> configure.in.new
fi
done
fi
echo "AC_CONFIG_FILES([ Makefile ])" >> configure.in.new
if test -f inst-apps; then
topleveldirs=`cat inst-apps`
else
topleveldirs=
for dir in `ls -1d * | sort`; do
if test "$dir" != "debian" && test -d $dir; then
topleveldirs="$topleveldirs $dir"
fi
done
fi
for topleveldir in $topleveldirs; do
if test -f $topleveldir/configure.in; then
continue
fi
if test -f $topleveldir/Makefile.am; then :; else
continue
fi
mfs=`find $topleveldir -follow -name Makefile.am -print | fgrep -v "/." | \
sed -e 's#\./##; s#/Makefile.am$##' | sort | sed -e 's#$#/Makefile#'`
for i in $mfs; do
echo "AC_CONFIG_FILES([ $i ])" >> configure.in.new
done
done
files=`cat configure.files`
list=`egrep '^dnl AC_OUTPUT\(.*\)' $files | sed -e "s#^.*dnl AC_OUTPUT(\(.*\))#\1#"`
for file in $list; do
echo "AC_CONFIG_FILES([ $file ])" >> configure.in.new
done
midfiles=`cat configure.files | fgrep "configure.in.mid"`
test -n "$midfiles" && cat $midfiles >> configure.in.new
echo "AC_OUTPUT" >> configure.in.new
modulename=
if test -f configure.in.in; then
if head -n 2 configure.in.in | egrep "^#MIN_CONFIG\(.*\)$" > /dev/null; then
kde_use_qt_param=`cat configure.in.in | sed -n -e "s/#MIN_CONFIG(\(.*\))/\1/p"`
fi
if head -n 2 configure.in.in | egrep "^#MIN_CONFIG" > /dev/null; then
line=`grep "^AM_INIT_AUTOMAKE(" configure.in.in`
if test -n "$line"; then
modulename=`echo $line | sed -e "s#AM_INIT_AUTOMAKE(\([^,]*\),.*#\1#"`
VERSION=`echo $line | sed -e "s#AM_INIT_AUTOMAKE([^,]*, *\([^)]*\)).*#\1#"`
fi
sed -e "s#AM_INIT_AUTOMAKE([^@].*#dnl PACKAGE set before#" \
configure.in.new > configure.in && mv configure.in configure.in.new
fi
fi
if test -z "$VERSION" || test "$VERSION" = "@VERSION@"; then
VERSION="\"3.5.10\""
fi
if test -z "$modulename" || test "$modulename" = "@MODULENAME@"; then
modulename=`pwd`;
modulename=`basename $modulename`
esc_VERSION=`echo $VERSION | sed -e "s#[^.0-9a-zA-Z]##g"`
modulename=`echo $modulename | sed -e "s#-$esc_VERSION##"`
fi
if test -n "$kde_use_qt_param"; then
sed -e "s#^dnl KDE_USE_QT#KDE_USE_QT($kde_use_qt_param)#" \
configure.in.new > configure.in && mv configure.in configure.in.new
fi
sed -e "s#@MODULENAME@#$modulename#" configure.in.new |
sed -e "s#@VERSION@#$VERSION#" > configure.in
botfiles=`cat configure.files | egrep "configure.in.bot"`
test -n "$botfiles" && cat $botfiles >> configure.in
cat $admindir/configure.in.bot.end >> configure.in
rm -f configure.in.new
}
configure_files()
{
echo "*** Creating configure.files"
admindir=NO
for i in . .. ../.. ../../..; do
if test -x $i/admin; then admindir=$i/admin; break; fi
done
rm -f configure.files
touch configure.files
if test -f configure.in.in && head -n 2 configure.in.in | grep "^#MIN_CONFIG" > /dev/null; then
echo $admindir/configure.in.min >> configure.files
fi
test -f configure.in.in && echo configure.in.in >> configure.files
# we collect files in the subdirs and do some sorting tricks, so subsubdirs come after subdirs
if test -f inst-apps; then
inst=`cat inst-apps`
list=""
for i in $inst; do
list="$list `find $i/ -follow -name "configure.in.in" -o -name "configure.in.bot" -o -name "configure.in.mid" | \
sed -e "s,/configure,/aaaconfigure," | sort | sed -e "s,/aaaconfigure,/configure,"`"
done
else
list=`find . -path "./.pc" -prune -o -follow -name "configure.in.in" -o -name "configure.in.bot" -o -name "configure.in.mid" | \
sed -e "s,/configure,/aaaconfigure," | sort | sed -e "s,/aaaconfigure,/configure,"`
fi
for i in $list; do if test -f $i && test `dirname $i` != "." ; then
echo $i >> configure.files
fi; done
test -f configure.in.mid && echo configure.in.mid >> configure.files
test -f configure.in.bot && echo configure.in.bot >> configure.files
if test ! -s configure.files; then
echo "There are no files to build a configure. Please check your checkout."
exit 1
fi
}
create_subdirs()
{
if grep '\$(top_srcdir)/subdirs:' $makefile_am >/dev/null; then
# as many modules contain rules to create subdirs without any
# dependencies make won't create it unless there is no file.
# so we check if that's a dummy rule or one that works
rm -f subdirs.cvs.sh.$$
if test -f subdirs; then
mv subdirs subdirs.cvs.sh.$$
fi
strip_makefile
$MAKE -f $makefile_wo top_srcdir=. ./subdirs || exit 1
if test -f subdirs.cvs.sh.$$; then
if test -s subdirs; then
rm subdirs.cvs.sh.$$
else
mv subdirs.cvs.sh.$$ subdirs
fi
fi
else
subdirs
fi
}
subdirs()
{
dirs=
idirs=
if test -f inst-apps; then
idirs=`cat inst-apps`
else
idirs=`ls -1 | sort`
fi
compilefirst=`sed -ne 's#^COMPILE_FIRST[ ]*=[ ]*##p' $makefile_am | head -n 1`
compilelast=`sed -ne 's#^COMPILE_LAST[ ]*=[ ]*##p' $makefile_am | head -n 1`
for i in $idirs; do
if test -f $i/Makefile.am; then
case " $compilefirst $compilelast " in
*" $i "*) ;;
*) dirs="$dirs $i"
esac
fi
done
: > ./_SUBDIRS
for d in $compilefirst; do
echo $d >> ./_SUBDIRS
done
(for d in $dirs; do
list=`sed -ne "s#^COMPILE_BEFORE_$d""[ ]*=[ ]*##p" $makefile_am | head -n 1`
for s in $list; do
echo $s $d
done
list=`sed -ne "s#^COMPILE_AFTER_$d""[ ]*=[ ]*##p" $makefile_am | head -n 1`
for s in $list; do
echo $d $s
done
echo $d $d
done ) | tsort >> ./_SUBDIRS
for d in $compilelast; do
echo $d >> ./_SUBDIRS
done
if test -r subdirs && cmp -s subdirs _SUBDIRS; then
rm -f _SUBDIRS
fi
test -r _SUBDIRS && mv _SUBDIRS subdirs || true
}
Makefile_am()
{
if test -f Makefile.am.in; then
compilefirst=`sed -ne 's#^COMPILE_FIRST[ ]*=[ ]*##p' $makefile_am | head -n 1`
compilelast=`sed -ne 's#^COMPILE_LAST[ ]*=[ ]*##p' $makefile_am | head -n 1`
idirs=
dirs=
if test -f inst-apps; then
idirs=`cat inst-apps`
else
idirs=`cat subdirs`
fi
for i in $idirs; do
case " $compilefirst $compilelast " in
*" $i "*) ;;
*) dirs="$dirs $i"
esac
done
adds=`fgrep '$(top_srcdir)/acinclude.m4:' Makefile.am.in | sed -e 's,^[^:]*: *,,; s,\$(top_srcdir)/,,g'`
if echo "$adds" | fgrep "*" >/dev/null ; then
adds=`ls -d -1 $adds 2>/dev/null`
fgrep -v '$(top_srcdir)/acinclude.m4:' Makefile.am.in > Makefile.am.in.adds
str='$(top_srcdir)/acinclude.m4:'
for add in $adds; do
str="$str \$(top_srcdir)/$add"
done
echo $str >> Makefile.am.in.adds
else
cat Makefile.am.in > Makefile.am.in.adds
fi
cat Makefile.am.in.adds | \
sed -e 's,^\s*\(COMPILE_BEFORE.*\),# \1,' | \
sed -e 's,^\s*\(COMPILE_AFTER.*\),# \1,' > Makefile.am
echo "SUBDIRS="'$(TOPSUBDIRS)' >> Makefile.am
rm Makefile.am.in.adds
fi
}
acinclude_m4()
{
echo "*** Creating acinclude.m4"
adds=
if grep '\$(top_srcdir)/acinclude.m4:' $makefile_am >/dev/null; then
strip_makefile
rm -f acinclude.m4
adds=`grep '\$(top_srcdir)/acinclude.m4:' $makefile_wo | sed -e 's,^[^:]*: *,,; s,\$(top_srcdir),.,g'`
if echo $adds | fgrep "*" >/dev/null ; then
adds=`ls -d -1 $adds 2>/dev/null`
else
$MAKE -f $makefile_wo top_srcdir=. ./acinclude.m4 || exit 1
fi
else
rm -f acinclude.m4
fi
# if it wasn't created up to now, then we do it better
if test ! -f acinclude.m4; then
cat admin/acinclude.m4.in admin/libtool.m4.in admin/pkg.m4.in $adds > acinclude.m4
fi
}
package_merge()
{
catalogs=$POFILES
for cat in $catalogs; do
msgmerge -o $cat.new $cat $PACKAGE.pot
if test -s $cat.new; then
grep -v "\"POT-Creation" $cat.new > $cat.new.2
grep -v "\"POT-Creation" $cat >> $cat.new.1
if diff $cat.new.1 $cat.new.2; then
rm $cat.new
else
mv $cat.new $cat
fi
rm -f $cat.new.1 $cat.new.2
fi
done
}
extract_messages()
{
podir=${podir:-$PWD/po}
files=`find . -name Makefile.am | xargs egrep -l '^messages:' `
dirs=`for i in $files; do echo \`dirname $i\`; done`
tmpname="$PWD/messages.log"
export PATH=$PATH:/opt/trinity/bin:/opt/trinity/bin
if test -z "$EXTRACTRC"; then EXTRACTRC=extractrc ; fi
if test -z "$PREPARETIPS"; then PREPARETIPS=preparetips ; fi
export EXTRACTRC PREPARETIPS
for subdir in $dirs; do
test -z "$VERBOSE" || echo "Making messages in $subdir"
(cd $subdir
if test -n "`grep -e '^messages:.*rc.cpp' Makefile.am`"; then
$EXTRACTRC *.rc *.ui *.kcfg > rc.cpp
else
candidates=`ls -1 *.rc *.ui *.kcfg 2>/dev/null`
if test -n "$candidates"; then
echo "$subdir has *.rc, *.ui or *.kcfg files, but not correct messages line"
fi
fi
if find . -name \*.c\* -o -name \*.h\* | fgrep -v ".svn" | xargs fgrep -s -q TDEAboutData ; then
echo -e 'i18n("_: NAME OF TRANSLATORS\\n"\n"Your names")\ni18n("_: EMAIL OF TRANSLATORS\\n"\n"Your emails")' > _translatorinfo.cpp
else echo " " > _translatorinfo.cpp
fi
perl -e '$mes=0; while (<STDIN>) { next if (/^(if\s|else\s|endif)/); if (/^messages:/) { $mes=1; print $_; next; } if ($mes) { if (/$\\(XGETTEXT\)/ && / -o/) { s/ -o \$\(podir\)/ _translatorinfo.cpp -o \$\(podir\)/ } print $_; } else { print $_; } }' < Makefile.am | egrep -v '^include ' > _transMakefile
kdepotpath=${includedir:-`tde-config --expandvars --install include`}/tde.pot
if ! test -f $kdepotpath; then
kdepotpath=`tde-config --expandvars --prefix`/include/tde.pot
fi
$MAKE -s -f _transMakefile podir=$podir EXTRACTRC="$EXTRACTRC" PREPARETIPS="$PREPARETIPS" srcdir=. \
XGETTEXT="${XGETTEXT:-xgettext} --foreign-user -C -ci18n -ki18n -ktr2i18n -kI18N_NOOP -kI18N_NOOP2 -kaliasLocale -x $kdepotpath" messages
exit_code=$?
if test "$exit_code" != 0; then
echo "make exit code: $exit_code"
fi
) 2>&1 | grep -v '^make\[1\]' > $tmpname
test -s $tmpname && { echo $subdir ; cat "$tmpname"; }
test -f $subdir/rc.cpp && rm -f $subdir/rc.cpp
rm -f $subdir/_translatorinfo.cpp
rm -f $subdir/_transMakefile
done
rm -f $tmpname
}
package_messages()
{
rm -rf po.backup
mkdir po.backup
for i in `ls -1 po/*.pot 2>/dev/null | sed -e "s#po/##"`; do
egrep -v '^#[^,]' po/$i | egrep '^.*[^ ]+.*$' | grep -v "\"POT-Creation" > po.backup/$i
cat po/$i > po.backup/backup_$i
touch -r po/$i po.backup/backup_$i
rm po/$i
done
extract_messages
for i in `ls -1 po.backup/*.pot 2>/dev/null | sed -e "s#po.backup/##" | egrep -v '^backup_'`; do
test -f po/$i || echo "disappeared: $i"
done
for i in `ls -1 po/*.pot 2>/dev/null | sed -e "s#po/##"`; do
sed -e 's,^"Content-Type: text/plain; charset=CHARSET\\n"$,"Content-Type: text/plain; charset=UTF-8\\n",' po/$i > po/$i.new && mv po/$i.new po/$i
#msgmerge -q -o po/$i po/$i po/$i
egrep -v '^#[^,]' po/$i | egrep '^.*[^ ]+.*$' | grep -v "\"POT-Creation" > temp.pot
if test -f po.backup/$i && ! cmp -s temp.pot po.backup/$i; then
echo "will update $i"
else
if test -f po.backup/backup_$i; then
test -z "$VERBOSE" || echo "I'm restoring $i"
mv po.backup/backup_$i po/$i
rm po.backup/$i
else
echo "will add $i"
fi
fi
done
rm -f temp.pot
rm -rf po.backup
}
# Make sure that sorting is always done the same way
LC_ALL=C
export LC_ALL
unset LANG || :
unset LC_CTYPE || :
unset LANGUAGE || :
unset CDPATH || :
admindir=`echo "$0" | sed 's%[\\/][^\\/][^\\/]*$%%'`
test "x$admindir" = "x$0" && admindir=.
test "x$MAKE" = x && MAKE=make
makefile_am=Makefile.am
makefile_wo=Makefile.am.wo
if test -f Makefile.am.in; then
makefile_am=Makefile.am.in
makefile_wo=Makefile.am.in.wo
rm -f $makefile_wo
fi
# Call script to find autoconf and friends. Uses eval since the script outputs
# sh-compatible code.
eval `$admindir/detect-autoconf.pl`
###
### Main
###
arg=`echo $1 | tr .- __`
case $arg in
cvs | dist | subdir_dist | configure_in | configure_files | subdirs | \
cvs_clean | package_merge | package_messages | Makefile_am | acinclude_m4 | extract_messages ) $arg ;;
configure ) call_and_fix_autoconf ;;
* ) echo "Usage: cvs.sh <target>"
echo "Target can be one of:"
echo " cvs svn dist"
echo " configure.in configure.files"
echo " package-merge package-messages"
echo ""
echo "Usage: anything but $1"
exit 1 ;;
esac
if test -f $makefile_wo; then
rm $makefile_wo
fi
exit 0
|
MagicGroup/eva
|
admin/cvs.sh
|
Shell
|
gpl-2.0
| 18,828 |
#!/bin/bash
set -x
PREFIX="$1";shift
ARCHIVE="$1";shift
mkdir -p "$(dirname $ARCHIVE)"
# Gather variables from bitbake
bitbake_vars="$(mktemp)"
bitbake -e orion-headless-image 2>&1 > $bitbake_vars
eval $(grep '^TOPDIR=' $bitbake_vars)
eval $(grep '^TMPDIR=' $bitbake_vars)
eval $(grep '^DEPLOY_DIR=' $bitbake_vars)
eval $(grep '^BUILDHISTORY_DIR=' $bitbake_vars)
# Gather files to archive into a text file to feed to tar
files_to_archive="$(mktemp)"
echo "${DEPLOY_DIR}" >> $files_to_archive
echo "${BUILDHISTORY_DIR}" >> $files_to_archive
find "${TMPDIR}" -type d -name "temp" >> $files_to_archive
echo "$*" >> $files_to_archive
# Remove leading and trailing slashes from TOPDIR
TOPDIR=${TOPDIR#/}; TOPDIR=${TOPDIR%/}
# Create the archive, substituting path prefixes of TOPDIR into rootdir
tar --verbose --create --auto-compress \
--transform "s!^${TOPDIR}!${PREFIX}!" \
--file "${ARCHIVE}" \
--files-from="${files_to_archive}"
# Clean up
rm $files_to_archive
rm $bitbake_vars
|
novatechweb/ansible-virtd-docker-mariadb-bacula-nginx
|
roles/buildbot-worker-ntel/files/ci-archive.sh
|
Shell
|
gpl-2.0
| 996 |
#!/bin/bash
tar -xzf python.tar.gz
export PATH=miniconda2/bin:$PATH
python process_NCEI_03_tmin_15d.py NCEI_WIS_$1 $1 /mnt/gluster/megarcia/WIS_Climatology/grids
|
megarcia/GT16_JGRA
|
htcondor/process_NCEI_03_tmin_15d.sh
|
Shell
|
gpl-3.0
| 163 |
#!/bin/bash
set -eux
CURRENTDIR=$(pwd)
if [ ${CURRENTDIR} == "/" ] ; then
cd /home
CURRENTDIR=/home
fi
export TEST_ARTIFACTS=${CURRENTDIR}/logs
if [ -z "${TEST_SUBJECTS:-}" ]; then
export TEST_SUBJECTS=${CURRENTDIR}/untested-atomic.qcow2
fi
# The test artifacts must be an empty directory
rm -rf ${TEST_ARTIFACTS}
mkdir -p ${TEST_ARTIFACTS}
# Invoke tests according to section 1.7.2 here:
# https://fedoraproject.org/wiki/Changes/InvokingTests
if [ -z "${package:-}" ]; then
if [ $# -lt 1 ]; then
echo "No package defined"
exit 1
else
package="$1"
fi
fi
# Make sure we have or have downloaded the test subject
if [ -z "${TEST_SUBJECTS:-}" ]; then
echo "No subject defined"
exit 1
elif ! file ${TEST_SUBJECTS:-}; then
wget -q -O testimage.qcow2 ${TEST_SUBJECTS}
export TEST_SUBJECTS=${PWD}/testimage.qcow2
fi
# Check out the upstreamfirst repository for this package
rm -rf ${package}
if ! git clone https://upstreamfirst.fedorainfracloud.org/${package}; then
echo "No upstreamfirst repo for this package! Exiting..."
exit 0
fi
# The specification requires us to invoke the tests in the checkout directory
pushd ${package}
function clean_up {
rm -rf tests/package
mkdir -p tests/package
cp ${TEST_ARTIFACTS}/* tests/package/
}
trap clean_up EXIT SIGHUP SIGINT SIGTERM
# The inventory must be from the test if present (file or directory) or defaults
if [ -e inventory ] ; then
ANSIBLE_INVENTORY=$(pwd)/inventory
export ANSIBLE_INVENTORY
fi
# Link test doesn't work on rawhide https://bugzilla.redhat.com/show_bug.cgi?id=1526615
# Loginctl test doesn't work on rawhide https://bugzilla.redhat.com/show_bug.cgi?id=1526621
if [ "$package" == "systemd" ]; then
sed -i '/link/c\' tests.yml
sed -i '/loginctl/c\' tests.yml
fi
set +u
PYTHON_INTERPRETER=""
if [[ ! -z "${python3}" && "${python3}" == "yes" ]] ; then
PYTHON_INTERPRETER='--extra-vars ansible_python_interpreter=/usr/bin/python3'
fi
set -u
# Invoke each playbook according to the specification
for playbook in tests*.yml; do
if [ -f ${playbook} ]; then
ansible-playbook --inventory=$ANSIBLE_INVENTORY $PYTHON_INTERPRETER \
--extra-vars "subjects=$TEST_SUBJECTS" \
--extra-vars "artifacts=$TEST_ARTIFACTS" \
--tags classic ${playbook}
fi
done
popd
|
arilivigni/ci-pipeline
|
config/Dockerfiles/singlehost-test/upstreamfirst-test.sh
|
Shell
|
gpl-3.0
| 2,288 |
#!/bin/bash
#
# libido: code=bash(three)
#
# libido: expand code
three ici
|
Sylvain303/libido
|
test/python/in_with_dep.sh
|
Shell
|
gpl-3.0
| 77 |
#!/bin/bash
# Removes files in folders
rm data/*
rm res/*
rm res_lab/*
|
francocurotto/GraphSLAM
|
src/python-helpers/v2-unknown-correspondence/removeFilesInFolders.sh
|
Shell
|
gpl-3.0
| 71 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.