code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
BUILD_DIR="build"
if [ "$1" == "clean" ]; then
rm -rf $BUILD_DIR
fi
if [ ! -d $BUILD_DIR ]; then
mkdir $BUILD_DIR || exit 1
fi
cd build
cmake ../ #-DMTM_EMULATOR=ON
make
cd ..
exit 0
|
leftbrainstrain/tpm-emulator
|
build.sh
|
Shell
|
gpl-2.0
| 204 |
#Copyright (C) 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
#
#The code contained herein is licensed under the GNU General Public
#License. You may obtain a copy of the GNU General Public License
#Version 2 or later at the following locations:
#
#http://www.opensource.org/licenses/gpl-license.html
#http://www.gnu.org/copyleft/gpl.html
###################################################################################################
#
# @file scc_scr.sh
#
# @brief xxx
#
###################################################################################################
#Revision History:
# Modification Tracking
#Author/core ID Date Number Description of Changes
#------------------------- ------------ ---------- -------------------------------------------
#S.Zavjalov/zvjs001c 23/11/2004 TLSbo39738 Initial version
#A.Ozerov/b00320 23/11/2006 TLSbo80386 logical error was fixed.
#
###################################################################################################
#!/bin/sh
LTPROOT=`cd \`dirname $0\` && echo $PWD`
cd $LTPROOT
TESTAPPSCC=./scc_test
TMP_RC=0
rc=0
res=0
anal_res()
{
TMP_RC=$?
echo""
if [ $TMP_RC -eq 0 ];
then
echo " testcase RESULT Exit value"
echo " ----------- --------- -----------"
echo " scc_test TPASS 0"
else
rc=1
res=1
echo " testcase RESULT Exit value"
echo " ----------- --------- -----------"
echo " scc_test TFAIL 1"
fi
echo""
}
anal_res_inv()
{
TMP_RC=$?
echo""
if [ $TMP_RC -eq 0 ];
then
echo " testcase RESULT Exit value"
echo " ----------- --------- -----------"
echo " scc_test TPASS 0"
else
rc=1
echo " testcase RESULT Exit value"
echo " ----------- --------- -----------"
echo " scc_test TFAIL 1"
fi
echo""
}
$TESTAPPSCC -La
anal_res
$TESTAPPSCC -Lr
anal_res_inv
$TESTAPPSCC -T -Lt
anal_res_inv
$TESTAPPSCC -Lzr
anal_res_inv
echo""
echo " final script RESULT "
echo " ------------ ----------------"
if [ $rc -eq 0 ];
then
echo " scc_scr.sh TPASS "
else
echo " scc_scr.sh TFAIL "
fi
if [ $res -eq 0 ];
then
echo""
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!I!!"
echo "! It is nessesary to reboot the EVB before running other tests !"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!I!!"
fi
echo""
exit $rc
|
wanghao-xznu/vte
|
testcases/vte_tests_suite/security_tests/scc_testcases/scc_scr.sh
|
Shell
|
gpl-2.0
| 2,918 |
#!/bin/bash
#Implementación de lo establecido en:
#http://wiki.flightgear.org/Building_FlightGear_-_Debian
#Usa: git aria2
#Más detalles en http://wiki.flightgear.org/Scripted_Compilation_on_Linux_Debian/Ubuntu
#Se puede mejorar revisando:
#https://sourceforge.net/p/flightgear/fgmeta/ci/next/tree/download_and_compile.sh#l21
parametros(){
version=$(git ls-remote --heads git://git.code.sf.net/p/flightgear/flightgear|grep '\/release\/'|cut -f4 -d'/'|sort -t . -k 1,1n -k2,2n -k3,3n|tail -1)
#~ version=2016.1 && version2=2
#Subversión (donde aplique):
version2=1
export FG_INSTALL_DIR=/usr/local/games/flightg
export FG_SRC_DIR=/var/tmp/FGsrc
mkdir -p $FG_SRC_DIR
nucleos=3
}
#Menciona algunas alternativas en dependencias: http://wiki.flightgear.org/Talk:Scripted_Compilation_on_Linux_Debian/Ubuntu
#HACER: Emplear íconos en $FG_SRC_DIR/flightgear.git/icons para accesos directos y elementos del menú
#MEJORA: Crear variable booleana para deterinar si es git o no y poner todo en función de ello.
#MEJORA: añadir menú al inicio con yad para los distintos pasos. Se pudiera separar descargadatos en otra terminal independiente.
#MEJORA: revisar si las dependencias están instaladas antes de proceder con instalarlas.
#HACER: usar nproc --all para ver la cantidad de nucleos y usar n-1 en la compilación (j)
#MEJORA: revisar http://wiki.flightgear.org/FlightGear_configuration_via_XML que hablan de la configuración predeterminada
#Mejora: Empaquetar en lugar de guión. Revisar https://community.linuxmint.com/tutorial/view/162 básicamente es usar sudo checkinstall en lugar del make install. Seguir https://www.debian.org/doc/manuals/maint-guide/index.es.html revisabdo https://www.debian.org/doc/manuals/maint-guide/build.es.html#git-buildpackage
lapausa(){
read -p "
Pulsa Enter para seguir con $1." a
}
0eliminaprevio(){
#Se deben desinstalar de repositorio los paquetes a instalar
lapausa "eliminar FG de repositorio"
sudo aptitude remove fgo fgrun flightgear flightgear-data-{ai,aircrafts,all,base,models} libplib1
}
1dependencias(){
lapausa "instalar dependencias para compilar"
#MEJORA: Hacer metapaquete con dependenciasm choques y recomendaciones. En las dependencias debe ir paralelos los que pueden serlo.
#~ sudo apt-get update
#~ sudo rm /var/lib/apt/lists/*
#Herramientas:
paquetes="automake cmake g++ gcc git make sed subversion"
sudo aptitude install $paquetes
#Depenencias:
#Luego de compilado debe permanecer libopenscenegraph-dev
paquetes="freeglut3-dev libboost-dev libcurl4-openssl-dev libdbus-1-dev libfltk1.3-dev libgtkglext1-dev libjpeg62-turbo-dev libopenal-dev libopenscenegraph-dev librsvg2-dev libxml2-dev"
#Adicionales:
#Para Fgrun
paquetes="$paquetes fluid"
paquetes="$paquetes libudev-dev"
#Para launcher experimental Qt5 (fgfs --launcher en http://wiki.flightgear.org/FlightGear_Qt_launcher)
paquetes="$paquetes qt5-default libqt5opengl5-dev"
sudo aptitude install $paquetes --visual-preview
#~ for i in $(echo $paquetes); do
#~ sudo aptitude install --visual-preview $i
#~ sleep 5
#~ done
}
Ainstaladatos() {
lapausa "descargar datos desde sourceforge ~1,5 Gb"
#Datos 2016.1(1.3 Gb):
# axel -an3
aria2c -c -k1M -x3 -d $FG_SRC_DIR https://sourceforge.net/projects/flightgear/files/release-$version/FlightGear-$version.$version2-data.tar.bz2/download && echo -e "\n\nInicia descompresión" && \
tar vxjf $FG_SRC_DIR/FlightGear-$version.$version2-data.tar.bz2 -C $FG_SRC_DIR && \
sudo mkdir -p $FG_INSTALL_DIR && echo "\n\nInicia copiado" && sudo rsync --remove-source-files -a -v $FG_SRC_DIR/fgdata $FG_INSTALL_DIR
find $FG_SRC_DIR -empty -delete
#Datos (Git)
#~ cd $FG_INSTALL_DIR
#~ git clone git://git.code.sf.net/p/flightgear/fgdata fgdata
}
2instalaplib() {
lapausa "la compilación e instalación de plib"
#plib
cd $FG_SRC_DIR
svn co https://svn.code.sf.net/p/plib/code/trunk plib.svn
cd plib.svn
sed s/PLIB_TINY_VERSION\ \ 5/PLIB_TINY_VERSION\ \ 6/ -i src/util/ul.h
./autogen.sh
./configure --prefix=$FG_INSTALL_DIR
make -j $nucleos && sudo make install
}
3instalasimgear(){
lapausa "la compilación e instalación de SimGear"
#SimGear
cd $FG_SRC_DIR
git clone git://git.code.sf.net/p/flightgear/simgear simgear.git
#~ echo "cosa $?"
if [ "$?" .gt. "0" ]; then
echo "actualizando"
cd simgear.git
git pull
fi
#~ git checkout
echo "cosa $?"
#~
#Solo 2016.n
#~ cd simgear.git
#~ git checkout release/$version
#2016 y git
#~ mkdir $FG_SRC_DIR/build-sg; cd $FG_SRC_DIR/build-sg
#~ cmake -D CMAKE_INSTALL_PREFIX:PATH="$FG_INSTALL_DIR" $FG_SRC_DIR/simgear.git
#~ make -j $nucleos && sudo make install
}
4instalafligtgear(){
lapausa "la compilación e instalación de FlightGear"
#Flightgear
cd $FG_SRC_DIR
git clone git://git.code.sf.net/p/flightgear/flightgear flightgear.git
cd flightgear.git
git checkout release/$version
mkdir $FG_SRC_DIR/build-fg; cd $FG_SRC_DIR/build-fg
cmake -D CMAKE_INSTALL_PREFIX:PATH="$FG_INSTALL_DIR" $FG_SRC_DIR/flightgear.git
make -j $nucleos && sudo make install
}
5pruebaflightgear(){
lapausa "la prueba de FG"
#Prueba final:
export LD_LIBRARY_PATH=$FG_INSTALL_DIR/lib/:$LD_LIBRARY_PATH
$FG_INSTALL_DIR/bin/fgfs --fg-root=$FG_INSTALL_DIR/fgdata
#Si todo bien, se puede cambiar:
read -p "¿Se ejecutó bien? (s/n)" a
if [ "$a" = "s" ]; then
echo "Activando /bin/fgfs"
sudo ln -fs $FG_INSTALL_DIR/bin/fgfs /bin/fgfs
fi
}
6instalafgrun(){
lapausa "la compilación e instalación de fgrun"
#Fgrun
cd $FG_SRC_DIR
git clone git://git.code.sf.net/p/flightgear/fgrun fgrun.git
#2016.1
cd fgrun.git
git checkout release/$version
mkdir $FG_SRC_DIR/build-fgrun; cd $FG_SRC_DIR/build-fgrun
cmake -D CMAKE_INSTALL_PREFIX:PATH="$FG_INSTALL_DIR" $FG_SRC_DIR/fgrun.git
make -j $nucleos && sudo make install
}
6reconfigurafgrun() {
zcat $FG_INSTALL_DIR/fgdata/Airports/metar.dat.gz > ~/.fltk/flightgear.org/fgrun/airports.txt
cat << FDA > ${HOME}/.fltk/flightgear.org/fgrun.prefs
; FLTK preferences file format 1.0
; vendor: flightgear.org
; application: fgrun. Modificado por Delldor
[.]
fg_exe_init:
fg_exe:$FG_INSTALL_DIR/bin/fgfs
fg_aircraft_init:
fg_aircraft:${HOME}/.fgfs/Aircraft/Aeronaves:${HOME}/.fgfs/Aircraft/org.
+flightgear.official/Aircraft
fg_root:$FG_INSTALL_DIR/fgdata
fg_scenery:$FG_INSTALL_DIR/fgdata/Scenery
fg_scenery:${HOME}/.fgfs/terraGIT:${FG_INSTALL_DIR}/fgdata/Scenery
show_3d_preview:0
runway:<por defecto>
horizon_effect:1
enhanced_lighting:1
clouds3d:1
specular_highlight:1
random_objects:1
time_of_day_value:noon
time_of_day:1
random_trees:1
ai_models:1
ai_traffic:1
terrasync:1
fetch_real_weather:1
show_cmd_line:1
show_console:1
FDA
#~ http://wiki.flightgear.org/FlightGear_Launch_Control
}
7pruebafgrun() {
lapausa "la prueba de Fgrun"
export LD_LIBRARY_PATH=$FG_INSTALL_DIR/lib/:$LD_LIBRARY_PATH
$FG_INSTALL_DIR/bin/fgrun
#Si todo bien, se puede cambiar:
read -p "¿Se ejecutó bien? (s/n)" a
if [ "$a" = "s" ]; then
echo "Activando /bin/fgrun"
sudo ln -fs $FG_INSTALL_DIR/bin/fgrun /bin/fgrun
fi
}
8finales(){
if [ $(read -p "Pulse s para borrar las carpetas de compilación " a; echo $a) = "s" ]; then
sudo rm -rv $FG_SRC_DIR/build-*
fi
}
desinstala(){
sudo rm -vri /bin/fg{fs,run}
sudo rm -rv $FG_INSTALL_DIR
rm -rv $HOME/.{fgfs,fltk/flightgear.org,fgo}
}
9terragear(){
#http://wiki.flightgear.org/TerraGear
#http://wiki.flightgear.org/Building_TerraGear_in_Ubuntu_910_%2832-_or_64-bit%29#Automatic_Installation
wget -cP $FG_SRC_DIR http://clement.delhamaide.free.fr/download_and_compile_tg.sh
#~ mv -v $FG_SRC_DIR/download_and_compile.sh\?format\=raw $FG_SRC_DIR/download_and_compile.sh
chmod 755 $FG_SRC_DIR/download_and_compile.sh
$FG_SRC_DIR/download_and_compile.sh SIMGEAR TERRAGEAR
}
Baeronaves(){
#Hacer:Descomprimir y ubicar en lugar correcto.
#Mejora: Manejar con un mensaje si sale con error. Ejemplo: 13 ya existe el archivo.
#HACER: un instalador gráfico con yad
aria2c -c -k1M -x3 -d $FG_SRC_DIR --allow-overwrite=false --auto-file-renaming=false https://github.com/FGMEMBERS/JPack/archive/master.zip
if [ $(read -p "¿Quiere instalar el A320Neo? (s para proceder) " a; echo $a) = "s" ]; then
aria2c -c -k1M -x3 -d $FG_SRC_DIR --allow-overwrite=false --auto-file-renaming=false https://codeload.github.com/FGMEMBERS/A320neo/zip/master
fi
if [ $(read -p "¿Quiere instalar la familia CRJ700/900/1000? (s para proceder) " a; echo $a) = "s" ]; then
aria2c -c -k1M -x3 -d $FG_SRC_DIR --allow-overwrite=false --auto-file-renaming=false https://codeload.github.com/FGMEMBERS-NONGPL/CRJ700-family/zip/master
fi
if [ $(read -p "¿Quiere instalar la familia Embrae rE? (s para proceder) " a; echo $a) = "s" ]; then
aria2c -c -k1M -x3 -d $FG_SRC_DIR --allow-overwrite=false --auto-file-renaming=false https://codeload.github.com/FGMEMBERS/E-jet-family/zip/master
fi
if [ $(read -p "¿Quiere instalar al 747-8? (s para proceder) " a; echo $a) = "s" ]; then
aria2c -c -k1M -x3 -d $FG_SRC_DIR --allow-overwrite=false --auto-file-renaming=false https://codeload.github.com/FGMEMBERS/747-8i/zip/master
fi
}
CterraGit(){
#Foro oficial http://thejabberwocky.net/viewforum.php?f=51
mkdir -p ~/.fgfs
cd ~/.fgfs
#La clonación crea el directorio terraGIT automáticamente y descarga 400 Mb en objetos base
git config color.ui true
git clone https://github.com/FGMEMBERS-TERRAGIT/terraGIT || cd terraGIT && git pull
#~ cd terraGIT
}
C1Caribe(){
~/.fgfs/terraGIT/install/tile w070n10
}
principal(){
parametros
#~ 0eliminaprevio
1dependencias
Ainstaladatos
2instalaplib
3instalasimgear
4instalafligtgear
5pruebaflightgear
6instalafgrun
6reconfigurafgrun
7pruebafgrun
8finales
#~ Baeronaves
#~ CterraGit
#~ C1Caribe
}
principal
#~ desinstala
|
DellDor/InstaladoresDebian
|
CompilaFlighGear.sh
|
Shell
|
gpl-2.0
| 9,586 |
#!/bin/bash
#
# A script to make building various images for the uWARP easier
#
# defines
LOGFILE_NAME="openuwarpbuild.log"
DEFAULT_BASE_DIR_PATH="./"
BASE_DIR_PATH=""
# text colours
NORMAL=`echo "\033[m"`
MENU=`echo "\033[36m"` #Blue
NUMBER=`echo "\033[33m"` #orange
FOREGROUND_RED=`echo "\033[41m"`
RED_TEXT=`echo "\033[31m"`
GREEN_TEXT=`echo "\033[32m"`
ENTER_LINE=`echo "\033[33m"`
WHITE_TEXT=`echo "\033[00m"`
FAIL='\033[01;31m' # bold red
PASS='\033[01;32m' # bold green
RESET='\033[00;00m' # normal white
#############
# Functions
#############
get_run_directory() {
local SOURCE="${BASH_SOURCE[0]}"
# resolve SOURCE until file is not a symlink
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
# if SOURCE was a relative symlink, resolve it relative to the path where symlink was located
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
BASE_DIR_PATH="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
}
set_logfile_path() {
get_run_directory
if [ -d $BASE_DIR_PATH ]; then
LOGFILE=$BASE_DIR_PATH/$LOGFILE_NAME
else
LOGFILE=$DEFAULT_BASE_DIR_PATH/$LOGFILE_NAME
fi
}
show_menu_first() {
echo -e "${MENU}Initial Configuration${RESET}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${MENU}**${NUMBER} 1)${MENU} Check for Updates ${NORMAL}- Update the source code."
echo -e "${MENU}**${NUMBER} 2)${MENU} Choose Build Option ${NORMAL}- Proceed to build option menu."
echo -e "${MENU}**${NUMBER} 3)${MENU} Quit ${NORMAL}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option or ${RED_TEXT}q to exit. ${NORMAL}"
read -n 1 opt1
echo ""
}
show_menu_second(){
echo -e "${MENU}Build Options${RESET}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${MENU}**${NUMBER} 1)${MENU} Default ${NORMAL}- Basic router functionality."
echo -e "${MENU}**${NUMBER} 2)${MENU} Custom ${NORMAL}- Select custom options."
echo -e "${MENU}**${NUMBER} 3)${MENU} Asterisk ${NORMAL}- Asterisk PBX."
echo -e "${MENU}**${NUMBER} 4)${MENU} VPN ${NORMAL}- Virtual Private Network."
echo -e "${MENU}**${NUMBER} 5)${MENU} Current ${NORMAL}- Keep current options."
echo -e "${MENU}**${NUMBER} 6)${MENU} Quit ${NORMAL}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option or ${RED_TEXT}q to exit. ${NORMAL}"
read -n 1 opt2
echo ""
}
show_menu_three() {
echo -e "${MENU}Board Type${RESET}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${MENU}**${NUMBER} 1)${MENU} 8MB ${NORMAL}- Build for Open uWARP with 8MB flash."
echo -e "${MENU}**${NUMBER} 2)${MENU} 16MB ${NORMAL}- Build for Open uWARP with 16MB flash."
echo -e "${MENU}**${NUMBER} 3)${MENU} Quit ${NORMAL}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option or ${RED_TEXT}q to exit. ${NORMAL}"
read -n 1 opt3
echo ""
}
show_menu_fourth() {
echo -e "${MENU}Custom Configuration${RESET}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${MENU}**${NUMBER} 1)${MENU} Yes ${NORMAL}- Customize build configuration."
echo -e "${MENU}**${NUMBER} 2)${MENU} No ${NORMAL}- Proceed to build."
echo -e "${MENU}**${NUMBER} 3)${MENU} Quit ${NORMAL}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option or ${RED_TEXT}q to exit. ${NORMAL}"
read -n 1 opt4
echo ""
}
show_menu_fifth() {
echo -e "${MENU}Ready to Build${RESET}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${MENU}**${NUMBER} 1)${MENU} Yes ${NORMAL}- Start the build"
echo -e "${MENU}**${NUMBER} 2)${MENU} No ${NORMAL}- Stop and exit."
echo -e "${MENU}**${NUMBER} 3)${MENU} Quit ${NORMAL}"
echo -e "${MENU}**********************************************************${NORMAL}"
echo -e "${ENTER_LINE}Please enter a menu option or ${RED_TEXT}q to exit. ${NORMAL}"
read -n 1 opt5
echo ""
}
function option_picked() {
local MESSAGE=${@:-"${FAIL}ERROR: No or invalid option picked!${RESET}"}
local CLEANMESSAGE=${@:-"ERROR: No or invalid option picked!"}
echo -e "\n${PASS}${MESSAGE}${RESET}\n"
echo -e "\n$CLEANMESSAGE\n" >> $LOGFILE
}
function say_goodbye() {
echo -e "\n${PASS}Goodbye!${RESET}\n"
echo -e "\nGoodbye!\n" >> $LOGFILE
exit 0
}
function print_error() {
local MESSAGE=${@:-"${FAIL}ERROR: ${RESET}Unknown"}
local CLEANMESSAGE=${@:-"ERROR: Unknown"}
echo -e "\n${FAIL}ERROR: ${RESET}${MESSAGE}"
echo -e "${FAIL}ERROR: ${RESET}Consult the \"$LOGFILE\" for further details\n"
echo -e "\nERROR: ${CLEANMESSAGE}\n" >> $LOGFILE
}
function print_success() {
local MESSAGE=${@:-"${PASS}SUCCESS: ${RESET}Unknown"}
local CLEANMESSAGE=${@:-"SUCCESS: Unknown"}
echo -e "\n${PASS}SUCCESS: ${RESET}${MESSAGE}\n"
echo -e "\nSUCCESS: ${CLEANMESSAGE}\n" >> $LOGFILE
}
#######################
# Main
######################
# For initial release, support only 16M boards
SUPPORT_8MB_UWARP=1
# set paths and logfile name
set_logfile_path
# put system info header in logfile for debug purposes
echo -e "###########################################################\n" > $LOGFILE
echo -e "Build Start: `date`" >> $LOGFILE
echo -e "Kernel Info: `uname -a`" >> $LOGFILE
for versionfile in `ls /etc/*_version`; do
echo -e "Version Info: $versionfile" >> $LOGFILE
cat $versionfile >> $LOGFILE
done
echo -e "\n###########################################################\n" >> $LOGFILE
# change to source directory and make sure build system is there
CURDIR=`pwd`
cd $BASE_DIR_PATH
if [ ! -d uwarp_configs ]; then
cd $CURDIR
if [ ! -d uwarp_configs ]; then
print_error "Cannot find source code."
exit 1
fi
fi
#
# give an intro
#
clear
echo -e "${PASS}Welcome${RESET}"
echo -e "This is the simple menu system to create images for the PIKA uWARP embedded device."
echo -e "Additional information on this device can be found at:"
echo -e " http://www.pikatechnologies.com/english/View.asp?x=1296 \n"
echo -e "To create a custom image for your Open uWARP device, please select your options from"
echo -e "the following menus as appropriate.\n\n\n"
#
# First menu
#
clear
show_menu_first
while [ opt1 != '' ]
do
if [ $opt1 = "" ]; then
clear;
option_picked;
show_menu_first;
else
case $opt1 in
1)
option_picked "Checking for updates ...";
echo -e "Updating source code ...\n" | tee -a $LOGFILE;
git pull 2>>$LOGFILE | tee -a $LOGFILE;
echo -e "Updating packages ...\n" | tee -a $LOGFILE;
./scripts/feeds update -a 2>>$LOGFILE | tee -a $LOGFILE;
./scripts/feeds install -a 2>>$LOGFILE | tee -a $LOGFILE;
if [ $? -eq 0 ]; then
print_success "Updated source code."
else
print_error "Failed to update source code."
fi
break;
;;
2)
option_picked "Choose build option";
break;
;;
3|q)
say_goodbye;
;;
*)
clear;
option_picked "Pick an option from the menu";
show_menu_first;
;;
esac
fi
done
#
# Second menu
#
sleep 1
clear
show_menu_second
while [ opt2 != '' ]
do
if [ $opt2 = "" ]; then
clear;
option_picked;
show_menu_second;
else
case $opt2 in
1|2)
option_picked "Using default image.";
cp uwarp_configs/default .config;
if [ $? -eq 0 ]; then
print_success "Default image set."
if [ $opt2 -eq 2 ]; then
option_picked "Custom configuration ...\nStarting menu ...";
echo -e "\nStarting custom configuration ...\n" >> $LOGFILE
make menuconfig
fi
else
print_error "Failed to set up default image."
fi
break;
;;
3)
option_picked "Using Asterisk image.";
cp uwarp_configs/asterisk .config;
if [ $? -eq 0 ]; then
print_success "Asterisk image set."
else
print_error "Failed to set up Asterisk image."
fi
break;
;;
4)
option_picked "Using VPN image.";
cp uwarp_configs/vpn .config;
if [ $? -eq 0 ]; then
print_success "VPN image set."
else
print_error "Failed to set up VPN image."
fi
break;
;;
5)
option_picked "Keeping Current selections.";
break;
;;
6|q)
say_goodbye;
;;
*)
clear;
option_picked "Pick an option from the menu";
show_menu_second;
;;
esac
fi
done
if [ $SUPPORT_8MB_UWARP -eq 0 ]; then
#
# Third menu
#
# don't select board if custom config ran in menu 2
if [ $opt2 -ne 2 ]; then
sleep 1
clear
show_menu_three
while [ opt3 != '' ]
do
if [ $opt3 = "" ]; then
clear;
option_picked;
show_menu_three;
else
case $opt3 in
1)
option_picked "Building for Open uWARP with 8MB flash ...";
echo -e "\nBuilding for Open uWARP with 8MB flash ...\n" >> $LOGFILE
fgrep -q "CONFIG_TARGET_ar71xx_generic_UWARP8MB=y" .config
if [ $? -ne 0 ]; then
sed -i "s?# CONFIG_TARGET_ar71xx_generic_UWARP8MB is not set?CONFIG_TARGET_ar71xx_generic_UWARP8MB=y?g" .config
sed -i "s?CONFIG_TARGET_ar71xx_generic_UWARP16MB=y?# CONFIG_TARGET_ar71xx_generic_UWARP16MB is not set?g" .config
fi
fgrep -q "CONFIG_ATH79_MACH_UWARP_SPI_8M=y" target/linux/ar71xx/config-3.10
if [ $? -ne 0 ]; then
sed -i "s?# CONFIG_ATH79_MACH_UWARP_SPI_8M is not set?CONFIG_ATH79_MACH_UWARP_SPI_8M=y?g" target/linux/ar71xx/config-3.10
sed -i "s?CONFIG_ATH79_MACH_UWARP_SPI_16M=y?# CONFIG_ATH79_MACH_UWARP_SPI_16M is not set?g" target/linux/ar71xx/config-3.10
touch target/linux/ar71xx/Makefile
# make clean before build new images
#echo -e "\nSwitching to 8MB flash, make clean so builds properly ...\n" >> $LOGFILE
#make clean 2>>$LOGFILE | tee -a $LOGFILE;
fi
break;
;;
2)
option_picked "Building for Open uWARP with 16MB flash ...";
echo -e "\nBuilding for Open uWARP with 16MB flash ...\n" >> $LOGFILE
fgrep -q "CONFIG_TARGET_ar71xx_generic_UWARP16MB=y" .config
if [ $? -ne 0 ]; then
sed -i "s?# CONFIG_TARGET_ar71xx_generic_UWARP16MB is not set?CONFIG_TARGET_ar71xx_generic_UWARP16MB=y?g" .config
sed -i "s?CONFIG_TARGET_ar71xx_generic_UWARP8MB=y?# CONFIG_TARGET_ar71xx_generic_UWARP8MB is not set?g" .config
fi
fgrep -q "CONFIG_ATH79_MACH_UWARP_SPI_16M=y" target/linux/ar71xx/config-3.10
if [ $? -ne 0 ]; then
sed -i "s?# CONFIG_ATH79_MACH_UWARP_SPI_16M is not set?CONFIG_ATH79_MACH_UWARP_SPI_16M=y?g" target/linux/ar71xx/config-3.10
sed -i "s?CONFIG_ATH79_MACH_UWARP_SPI_8M=y?# CONFIG_ATH79_MACH_UWARP_SPI_8M is not set?g" target/linux/ar71xx/config-3.10
touch target/linux/ar71xx/Makefile
# make clean before build new images
#echo -e "\nSwitching to 16MB flash, make clean so builds properly ...\n" >> $LOGFILE
#make clean 2>>$LOGFILE | tee -a $LOGFILE;
fi
break;
;;
3|q)
say_goodbye;
;;
*)
clear;
option_picked "Pick an option from the menu";
show_menu_three;
;;
esac
fi
done
fi
# SUPPORT_8MB_UWARP
fi
#
# Fourth menu
#
# don't run custom config if ran in menu 2
if [ $opt2 -ne 2 ]; then
sleep 1
clear
show_menu_fourth
while [ opt4 != '' ]
do
if [ $opt4 = "" ]; then
clear;
option_picked;
show_menu_fourth;
else
case $opt4 in
1)
option_picked "Customize build configuration ...\nStarting menu ...";
echo -e "\nStarting custom configuration ...\n" >> $LOGFILE
make menuconfig
break;
;;
2)
option_picked "Proceeding to build selection ....";
break;
;;
3|q)
say_goodbye;
;;
*)
clear;
option_picked "Pick an option from the menu";
show_menu_fourth;
;;
esac
fi
done
fi
#
# Fifth menu
#
if [ $opt2 -ne 2 ]; then
sleep 1
fi
clear
show_menu_fifth
while [ opt5 != '' ]
do
if [ $opt5 = "" ]; then
clear;
option_picked;
show_menu_fifth;
else
case $opt5 in
1)
option_picked "Starting the build ...\nThis may take a while ...";
echo -e "\nStarting the build ...\n" >> $LOGFILE
# remove old images first
echo -e "\nRemoving old build files...\n" >> $LOGFILE
rm bin/ar71xx/openwrt-ar71xx-generic-uwarp-ar7420-squashfs-* 2>>$LOGFILE
# build new images
make V=99 2>>$LOGFILE | tee -a $LOGFILE;
# check if images created
if [ -f bin/ar71xx/openwrt-ar71xx-generic-uwarp-ar7420-squashfs-factory.bin ] && [ -f bin/ar71xx/openwrt-ar71xx-generic-uwarp-ar7420-squashfs-sysupgrade.bin ]; then
print_success "Image files have been created. Find them in $BASE_DIR_PATH/bin/ar71xx/ directory."
else
print_error "Failed to create image files."
fi
break;
;;
2)
option_picked "Stopping to exit ....";
echo -e "\nStopping to exit ...\n" >> $LOGFILE
say_goodbye;
;;
3|q)
say_goodbye;
;;
*)
clear;
option_picked "Pick an option from the menu";
show_menu_fifth;
;;
esac
fi
done
say_goodbye
exit 0
|
pikatechnologies/openuwarp
|
open-uwarp-build.sh
|
Shell
|
gpl-2.0
| 12,942 |
#!/bin/sh
files=`find $1 -name "*.c" -o -name "*.h" \
| sort \
| grep -v intl \
| grep -v "Freeciv.h" \
| fgrep -v "_gen." \
| grep -v "config.h" \
| grep -v "config.mac.h" \
| grep -v amiga \
| grep -v gtkpixcomm \
| grep -v mmx.h \
| grep -v SDL_ttf \
| grep -v xaw/canvas \
| grep -v pixcomm`
echo "# No Freeciv Copyright:"
echo "# Excludes: generated files, amiga, various 3rd party sources"
for file in $files; do
# echo "testing $file..."
grep "Freeciv - Copyright" $file >/dev/null || echo $file
done
echo
echo "# No or other GPL:"
for file in $files; do
grep "GNU General Public License" $file >/dev/null || echo $file
done
echo
|
seggil/warciv
|
tests/copyright.sh
|
Shell
|
gpl-2.0
| 735 |
#!/bin/bash
cd test
runghc -i../src/ -i../lib/ EndToEndTest.hs
cd ..
|
ivanmoore/seedy
|
run-tests.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
rm -f seipp.zip
zip -r seipp.zip . -x ".git*" "scripts/*" "node_modules/*"
echo "Create seipp.zip"
|
jonatasrs/sei
|
scripts/make.sh
|
Shell
|
gpl-3.0
| 112 |
#!/bin/bash
test_info()
{
cat <<EOF
Verify that the CTDB_NFS_SKIP_SHARE_CHECK configuration option is respected.
We create a file in /etc/ctdb/rc.local.d/ that creates a function
called exportfs. This effectively hooks the exportfs command,
allowing us to provide a fake list of shares to check or not check.
We create another file in the same directory to set and unset the
CTDB_NFS_SKIP_SHARE_CHECK option, utilising the shell's "readonly"
built-in to ensure that our value for the option is used.
Prerequisites:
* An active CTDB cluster with at least 2 nodes with public addresses.
* Test must be run on a real or virtual cluster rather than against
local daemons. There is nothing intrinsic to this test that forces
this - it is because tests run against local daemons don't use the
regular eventscripts.
Steps:
1. Verify that the cluster is healthy.
2. Determine a timeout for state changes by adding MonitorInterval
and EventScriptTimeout.
3. Create a temporary directory on the test node using mktemp,
remember the name in $mydir.
4. On the test node create an executable file
/etc/ctdb/rc.local.d/fake-exportfs that contains a definiton for
the function exportfs, which prints a share definition for a
directory $mydir/foo (which does not currently exist).
5. On the test node create an executable file
/etc/ctdb/rc.local.d/nfs-skip-share-check that replaces the
loadconfig() function by one with equivalent functionality, but
which also sets CTDB_NFS_SKIP_SHARE_CHECK="no" if loading
"ctdb" configuration.
6. Wait for the test node to become unhealthy.
7. Create the directory $mydir/foo.
8. Wait for the test node to become healthy.
9. Modify /etc/ctdb/rc.local.d/nfs-skip-share-check so that it sets
CTDB_NFS_SKIP_SHARE_CHECK to "yes".
10. Remove the directory $mydir/foo.
11. Wait for a monitor event and confirm that the the node is still
healthy.
Expected results:
* When an NFS share directory is missing CTDB should only mark a node
as unhealthy if CTDB_NFS_SKIP_SHARE_CHECK is set to "no".
EOF
}
. ctdb_test_functions.bash
set -e
ctdb_test_init "$@"
ctdb_test_check_real_cluster
cluster_is_healthy
select_test_node_and_ips
# We need this for later, so we know how long to sleep.
try_command_on_node $test_node $CTDB getvar MonitorInterval
monitor_interval=${out#*= }
try_command_on_node $test_node $CTDB getvar EventScriptTimeout
event_script_timeout=${out#*= }
monitor_timeout=$(($monitor_interval + $event_script_timeout))
echo "Using timeout of ${monitor_timeout}s (MonitorInterval + EventScriptTimeout)..."
mydir=$(onnode -q $test_node mktemp -d)
rc_local_d="${CTDB_BASE:-/etc/ctdb}/rc.local.d"
my_exit_hook ()
{
ctdb_test_eventscript_uninstall
onnode -q $test_node "rm -f $mydir/*"
onnode -q $test_node "rmdir --ignore-fail-on-non-empty $mydir"
onnode -q $test_node "rm -f \"$rc_local_d/\"*"
onnode -q $test_node "rmdir --ignore-fail-on-non-empty \"$rc_local_d\""
}
ctdb_test_exit_hook_add my_exit_hook
ctdb_test_eventscript_install
foo_dir=$mydir/foo
try_command_on_node -v $test_node "mkdir -p \"$rc_local_d\""
f="$rc_local_d/fake-exportfs"
echo "Installing \"$f\"..."
try_command_on_node $test_node "echo \"function exportfs () { echo $foo_dir 127.0.0.1/32 ; }\" >\"$f\" ; chmod +x \"$f\""
n="$rc_local_d/nfs-skip-share-check"
n_contents='loadconfig() {
name="$1"
if [ -f /etc/sysconfig/$name ]; then
. /etc/sysconfig/$name
elif [ -f /etc/default/$name ]; then
. /etc/default/$name
elif [ -f $CTDB_BASE/sysconfig/$name ]; then
. $CTDB_BASE/sysconfig/$name
fi
if [ "$name" = "ctdb" ] ; then
CTDB_NFS_SKIP_SHARE_CHECK=no
fi
}
'
echo "Installing \"$n\" with CTDB_NSF_SKIP_SHARE_CHECK=no..."
try_command_on_node $test_node "echo '$n_contents' >\"$n\" ; chmod +x \"$n\""
wait_until_node_has_status $test_node unhealthy $monitor_timeout
try_command_on_node -v $test_node "mkdir $foo_dir"
wait_until_node_has_status $test_node healthy $monitor_timeout
echo "Re-installing \"$n\" with CTDB_NFS_SKIP_SHARE_CHECK=yes..."
try_command_on_node $test_node "echo '${n_contents/=no/=yes}' >\"$n\" ; chmod +x \"$n\""
try_command_on_node -v $test_node "rmdir $foo_dir"
wait_for_monitor_event $test_node
wait_until_node_has_status $test_node healthy 1
|
wolfmuel/ctdb
|
tests/complex/01_ctdb_nfs_skip_share_check.sh
|
Shell
|
gpl-3.0
| 4,320 |
#!/bin/bash
set -e
if [ -z "${CGAL_TEST_PLATFORM}" ]; then
export CGAL_TEST_PLATFORM="${HOSTNAME}"
echo "CGAL_TEST_PLATFORM not set. Using HOSTNAME:${HOSTNAME}"
fi
# HACK: We depend on this line to easily extract the platform name
# from the logs.
echo "CGAL_TEST_PLATFORM=${CGAL_TEST_PLATFORM}"
if [ -z "${CGAL_NUMBER_OF_JOBS}" ]; then
CGAL_NUMBER_OF_JOBS=1
echo "CGAL_NUMBER_OF_JOBS not set. Defaulting to 1."
else
echo "CGAL_NUMBER_OF_JOBS is ${CGAL_NUMBER_OF_JOBS}."
fi
declare -a "CGAL_CMAKE_FLAGS=${CGAL_CMAKE_FLAGS}"
echo "CGAL_CMAKE_FLAGS is ${CGAL_CMAKE_FLAGS[@]}."
# The directory where the release is stored.
CGAL_RELEASE_DIR="/mnt/testsuite/"
# Directory where CGAL sources are stored.
CGAL_SRC_DIR="${CGAL_RELEASE_DIR}src/"
# Directory where CGAL tests are stored.
CGAL_TEST_DIR="${CGAL_RELEASE_DIR}test/"
# The directory where testresults are stored.
CGAL_TESTRESULTS="/mnt/testresults/"
# The actual logfile.
CGAL_LOG_FILE="${CGAL_TESTRESULTS}${CGAL_TEST_PLATFORM}"
# The directory of the build tree. The layout is so convoluted so
# satisfy collect_cgal_testresults_from_cmake.
#
# It assumes the build directory containing CMakeCache.txt,
# include/CGAL/compiler_config.h, etc. to be the parent directory.
CGAL_DIR="$HOME/build/src/cmake/platforms/${CGAL_TEST_PLATFORM}/"
CGAL_SRC_BUILD_DIR="${CGAL_DIR}"
CGAL_TEST_BUILD_DIR="$HOME/build/src/cmake/platforms/${CGAL_TEST_PLATFORM}/test/"
export CGAL_DIR
export CGAL_TEST_PLATFORM
# Create the binary directories
if [ ! -d "${CGAL_SRC_BUILD_DIR}" ]; then
mkdir -p "${CGAL_SRC_BUILD_DIR}"
fi
if [ ! -d "${CGAL_TEST_BUILD_DIR}" ]; then
mkdir -p "${CGAL_TEST_BUILD_DIR}"
fi
# Build CGAL. The CGAL_CMAKE_FLAGS used here will affect all other
# builds using this binary directory.
cd "${CGAL_SRC_BUILD_DIR}"
cmake -DRUNNING_CGAL_AUTO_TEST=TRUE VERBOSE=1 \
${CGAL_CMAKE_FLAGS[@]} "${CGAL_RELEASE_DIR}" 2>&1 | tee "installation.log"
make VERBOSE=ON -k -fMakefile 2>&1 | tee -a "installation.log"
# collect_cgal_testresults_from_cmake expects installation.log in ../../
cp "installation.log" "../installation.log"
# Build and Execute the Tests
# We need to make a copy of the whole test dir because the current
# scripts don't allow out of source builds.
cp -r "${CGAL_TEST_DIR}/." "${CGAL_TEST_BUILD_DIR}"
cd "${CGAL_TEST_BUILD_DIR}"
make -j ${CGAL_NUMBER_OF_JOBS} -k -fmakefile2
# Copy version.h, so that collect_cgal_testresults_from_cmake can find it.
mkdir -p "$HOME/build/src/include/CGAL"
cp "${CGAL_RELEASE_DIR}/include/CGAL/version.h" "$HOME/build/src/include/CGAL"
./collect_cgal_testresults_from_cmake
# Those are the files generated by collect_cgal_testresults_from_cmake.
cp "results_${CGAL_TESTER}_${CGAL_TEST_PLATFORM}.tar.gz" "results_${CGAL_TESTER}_${CGAL_TEST_PLATFORM}.txt" \
"${CGAL_TESTRESULTS}/"
|
bo0ts/cgal-testsuite-dockerfiles
|
run-testsuite.sh
|
Shell
|
gpl-3.0
| 2,829 |
#!/usr/bin/env bash
# run scapy to scape links
# select the links containing similar tags/words
# run ner/sa
# print out results
SEARCH=$1
PWD=`pwd`
TIME=`date +"%s"`
CRAWL=./crawl/
echo "Scraping sites"
cd $CRAWL
NAME1="$TIME"-independet
# echo $NAME1
scrapy runspider ./crawl/spiders/independent.py --output ../scraped/"$NAME1".csv 2>&1 | tee -a ../scraped/log-"$TIME".log
NAME2="$TIME"-theguardian
# echo $NAME2
scrapy runspider ./crawl/spiders/theguardian.py --output ../scraped/"$NAME2".csv 2>&1 | tee -a ../scraped/log-"$TIME".log
cd ..
echo "Searching strings $SEARCH"
python ./search.py "$NAME1" "$NAME2" "$SEARCH"
echo "Which do you wish to compare from $NAME1?"
read WHICH1
echo "Which do you wish to compare it to, fom $NAME2?"
read WHICH2
# contains the guessing sequence by NN
|
v0idwalker/mgr-sources
|
scrape.sh
|
Shell
|
gpl-3.0
| 827 |
#!/bin/bash
#
# depositfiles.com module
# Copyright (c) 2010-2013 Plowshare team
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
MODULE_DEPOSITFILES_REGEXP_URL="https\?://\(www\.\)\?\(depositfiles\.\(com\|org\)\)\|\(dfiles\.\(eu\|ru\)\)/"
MODULE_DEPOSITFILES_DOWNLOAD_OPTIONS="
AUTH,a,auth,a=USER:PASSWORD,User account"
MODULE_DEPOSITFILES_DOWNLOAD_RESUME=yes
MODULE_DEPOSITFILES_DOWNLOAD_FINAL_LINK_NEEDS_COOKIE=unused
MODULE_DEPOSITFILES_DOWNLOAD_SUCCESSIVE_INTERVAL=
MODULE_DEPOSITFILES_UPLOAD_OPTIONS="
AUTH,a,auth,a=USER:PASSWORD,User account
API,,api,,Use new upload method/non-public API"
MODULE_DEPOSITFILES_UPLOAD_REMOTE_SUPPORT=no
MODULE_DEPOSITFILES_DELETE_OPTIONS=""
MODULE_DEPOSITFILES_LIST_OPTIONS=""
MODULE_DEPOSITFILES_PROBE_OPTIONS=""
# Static function. Proceed with login (free & gold account)
depositfiles_login() {
local AUTH=$1
local COOKIE_FILE=$2
local BASE_URL=$3
local LOGIN_DATA LOGIN_RESULT
LOGIN_DATA='go=1&login=$USER&password=$PASSWORD'
LOGIN_RESULT=$(post_login "$AUTH" "$COOKIE_FILE" "$LOGIN_DATA" \
"$BASE_URL/login.php" -b 'lang_current=en') || return
if match 'recaptcha' "$LOGIN_RESULT"; then
log_debug "recaptcha solving required for login"
local PUBKEY WCI CHALLENGE WORD ID
PUBKEY='6LdRTL8SAAAAAE9UOdWZ4d0Ky-aeA7XfSqyWDM2m'
WCI=$(recaptcha_process $PUBKEY) || return
{ read WORD; read CHALLENGE; read ID; } <<<"$WCI"
LOGIN_RESULT=$(post_login "$AUTH" "$COOKIE_FILE" "$LOGIN_DATA" \
"$BASE_URL/login.php" -b 'lang_current=en' \
-d "recaptcha_challenge_field=$CHALLENGE" \
-d "recaptcha_response_field=$WORD") || return
# <div class="error_message">Security code not valid.</div>
if match 'code not valid' "$LOGIN_RESULT"; then
captcha_nack $ID
log_debug "reCaptcha error"
return $ERR_CAPTCHA
fi
captcha_ack $ID
log_debug "correct captcha"
fi
# <div class="error_message">Your password or login is incorrect</div>
if match 'login is incorrect' "$LOGIN_RESULT"; then
return $ERR_LOGIN_FAILED
fi
}
# Output a depositfiles file download URL
# $1: cookie file
# $2: depositfiles.com url
# stdout: real file download link
depositfiles_download() {
local COOKIEFILE=$1
local URL=$2
local BASE_URL='http://depositfiles.com'
local START DLID WAITTIME DATA FID SLEEP FILE_URL
if [ -n "$AUTH" ]; then
depositfiles_login "$AUTH" "$COOKIEFILE" "$BASE_URL" || return
fi
if [ -s "$COOKIEFILE" ]; then
START=$(curl -L -b "$COOKIEFILE" -b 'lang_current=en' "$URL") || return
else
START=$(curl -L -b 'lang_current=en' "$URL") || return
fi
if match "no_download_msg" "$START"; then
# Please try again in 1 min until file processing is complete.
if match 'html_download_api-temporary_unavailable' "$START"; then
return $ERR_LINK_TEMP_UNAVAILABLE
# Attention! You have exceeded the 20 GB 24-hour limit.
elif match 'html_download_api-gold_traffic_limit' "$START"; then
log_error "Traffic limit exceeded (20 GB)"
return $ERR_LINK_TEMP_UNAVAILABLE
fi
return $ERR_LINK_DEAD
fi
test "$CHECK_LINK" && return 0
if match "download_started()" "$START"; then
FILE_URL=$(echo "$START" | parse_attr 'download_started()' 'href') || return
echo "$FILE_URL"
return 0
fi
DLID=$(echo "$START" | parse 'switch_lang' 'files%2F\([^"]*\)')
log_debug "download ID: $DLID"
if [ -z "$DLID" ]; then
log_error "Can't parse download id, site updated"
return $ERR_FATAL
fi
# 1. Check for error messages (first page)
# - You have reached your download time limit.<br>Try in 10 minutes or use GOLD account.
if match 'download time limit' "$START"; then
WAITTIME=$(echo "$START" | parse 'Try in' "in \([[:digit:]:]*\) minutes")
if [[ $WAITTIME -gt 0 ]]; then
echo $((WAITTIME * 60))
fi
return $ERR_LINK_TEMP_UNAVAILABLE
fi
DATA=$(curl --data "gateway_result=1" "$BASE_URL/en/files/$DLID") || return
# 2. Check if we have been redirected to initial page
if match '<input type="button" value="Gold downloading"' "$DATA"; then
log_error "FIXME"
return $ERR_FATAL
fi
# 3. Check for error messages (second page)
# - Attention! You used up your limit for file downloading!
# - Attention! Connection limit has been exhausted for your IP address!
if match 'limit for file\|exhausted for your IP' "$DATA"; then
WAITTIME=$(echo "$DATA" | \
parse 'class="html_download_api-limit_interval"' 'l">\([^<]*\)<')
log_debug "limit reached: waiting $WAITTIME seconds"
echo $((WAITTIME))
return $ERR_LINK_TEMP_UNAVAILABLE
# - Such file does not exist or it has been removed for infringement of copyrights.
elif match 'html_download_api-not_exists' "$DATA"; then
return $ERR_LINK_DEAD
# - We are sorry, but all downloading slots for your country are busy.
elif match 'html_download_api-limit_country' "$DATA"; then
return $ERR_LINK_TEMP_UNAVAILABLE
fi
FID=$(echo "$DATA" | parse 'var[[:space:]]fid[[:space:]]=' "[[:space:]]'\([^']*\)") ||
{ log_error "cannot find fid"; return $ERR_FATAL; }
SLEEP=$(echo "$DATA" | parse "download_waiter_remain" ">\([[:digit:]]\+\)<") ||
{ log_error "cannot get wait time"; return $ERR_FATAL; }
# Usual wait time is 60 seconds
wait $((SLEEP + 1)) seconds || return
DATA=$(curl --location "$BASE_URL/get_file.php?fid=$FID") || return
# reCaptcha page (challenge forced)
if match 'load_recaptcha();' "$DATA"; then
local PUBKEY WCI CHALLENGE WORD ID
PUBKEY='6LdRTL8SAAAAAE9UOdWZ4d0Ky-aeA7XfSqyWDM2m'
WCI=$(recaptcha_process $PUBKEY) || return
{ read WORD; read CHALLENGE; read ID; } <<<"$WCI"
DATA=$(curl --get --location -b 'lang_current=en' \
--data "fid=$FID&challenge=$CHALLENGE&response=$WORD" \
-H "X-Requested-With: XMLHttpRequest" --referer "$URL" \
"$BASE_URL/get_file.php") || return
if match 'Download the file' "$DATA"; then
captcha_ack $ID
log_debug "correct captcha"
echo "$DATA" | parse_form_action
return 0
fi
captcha_nack $ID
log_debug "reCaptcha error"
return $ERR_CAPTCHA
fi
echo "$DATA" | parse_form_action
}
# Upload a file to depositfiles
# $1: cookie file
# $2: input file (with full path)
# $3: remote filename
# stdout: depositfiles download link
depositfiles_upload() {
local COOKIEFILE=$1
local FILE=$2
local DESTFILE=$3
local -r BASE_URL='http://dfiles.eu'
local DATA DL_LINK DEL_LINK SIZE MAX_SIZE #used by both methods
local FORM_HTML FORM_URL FORM_UID FORM_GO FORM_AGREE # used by old method
local UP_URL STATUS MEMBER_KEY # used by new method
if [ -n "$AUTH" ]; then
depositfiles_login "$AUTH" "$COOKIEFILE" "$BASE_URL" || return
fi
if [ -n "$API" ]; then
if [ -n "$AUTH" ]; then
DATA=$(curl -b "$COOKIEFILE" "$BASE_URL") || return
MEMBER_KEY=$(echo "$DATA" | parse_attr 'upload index_upload' \
'sharedkey') || return
fi
DATA=$(curl -b "$COOKIEFILE" "$BASE_URL/api/upload/regular") || return
STATUS=$(echo "$DATA" | parse_json 'status') || return
if [ "$STATUS" != 'OK' ]; then
log_error "Unexpected remote error: $STATUS"
return $ERR_FATAL
fi
UP_URL=$(echo "$DATA" | parse_json 'upload_url') || return
MAX_SIZE=$(echo "$DATA" | parse_json 'max_file_size_mb') || return
MAX_SIZE=$(translate_size "${MAX_SIZE}MB") || return
log_debug "MEMBER_KEY: '$MEMBER_KEY'"
log_debug "UP_URL: $UP_URL"
else
DATA=$(curl -b "$COOKIEFILE" "$BASE_URL") || return
FORM_HTML=$(grep_form_by_id "$DATA" 'upload_form') || return
FORM_URL=$(echo "$FORM_HTML" | parse_form_action) || return
MAX_SIZE=$(echo "$FORM_HTML" | parse_form_input_by_name 'MAX_FILE_SIZE')
FORM_UID=$(echo "$FORM_HTML" | parse_form_input_by_name 'UPLOAD_IDENTIFIER')
FORM_GO=$(echo "$FORM_HTML" | parse_form_input_by_name_quiet 'go')
FORM_AGREE=$(echo "$FORM_HTML" | parse_form_input_by_name_quiet 'agree')
fi
# File size limit check
SIZE=$(get_filesize "$FILE") || return
if [ "$SIZE" -gt "$MAX_SIZE" ]; then
log_debug "File is bigger than $MAX_SIZE"
return $ERR_SIZE_LIMIT_EXCEEDED
fi
if [ -n "$API" ]; then
# Note: The website does an OPTIONS request to $UP_URL first, but
# curl cannot do this.
DATA=$(curl_with_log -b "$COOKIEFILE" \
-F "files=@$FILE;filename=$DESTFILE" -F 'format=html5' \
-F "member_passkey=$MEMBER_KEY" -F 'fm=_root' -F 'fmh=' \
"$UP_URL") || return
STATUS=$(echo "$DATA" | parse_json 'status') || return
if [ "$STATUS" != 'OK' ]; then
log_error "Unexpected remote error: $STATUS"
return $ERR_FATAL
fi
DL_LINK=$(echo "$DATA" | parse_json 'download_url') || return
DEL_LINK=$(echo "$DATA" | parse_json 'delete_url') || return
else
DATA=$(curl_with_log -b "$COOKIEFILE" \
-F "MAX_FILE_SIZE=$FORM_MAXFSIZE" \
-F "UPLOAD_IDENTIFIER=$FORM_UID" \
-F "go=$FORM_GO" \
-F "agree=$FORM_AGREE" \
-F "files=@$FILE;filename=$DESTFILE" \
-F "padding=$(add_padding)" \
"$FORM_URL") || return
# Invalid local or global uploads dirs configuration
if match 'Invalid local or global' "$DATA"; then
log_error "upload failure, rename file and/or extension and retry"
return $ERR_FATAL
fi
DL_LINK=$(echo "$DATA" | parse 'ud_download_url[[:space:]]' "'\([^']*\)'") || return
DEL_LINK=$(echo "$DATA" | parse 'ud_delete_url' "'\([^']*\)'") || return
fi
echo "$DL_LINK"
echo "$DEL_LINK"
}
# Delete a file on depositfiles
# (authentication not required, we can delete anybody's files)
# $1: cookie file (unused here)
# $2: delete link
depositfiles_delete() {
local URL=$2
local PAGE
PAGE=$(curl "$URL") || return
# File has been deleted and became inaccessible for download.
if matchi 'File has been deleted' "$PAGE"; then
return 0
# No such downlodable file or incorrect removal code.
else
log_error "bad deletion code"
return $ERR_FATAL
fi
}
# List a depositfiles shared file folder URL
# $1: depositfiles.com link
# $2: recurse subfolders (null string means not selected)
# stdout: list of links
depositfiles_list() {
local URL=$1
local PAGE LINKS NAMES
if ! match 'depositfiles\.com/\(../\)\?folders/' "$URL"; then
log_error "This is not a directory list"
return $ERR_FATAL
fi
test "$2" && log_debug "recursive folder does not exist in depositfiles"
PAGE=$(curl -L "$URL") || return
PAGE=$(echo "$PAGE" | parse_all 'target="_blank"' \
'\(<a href="http[^<]*</a>\)') || return $ERR_LINK_DEAD
NAMES=$(echo "$PAGE" | parse_all_attr '<a' title)
LINKS=$(echo "$PAGE" | parse_all_attr '<a' href)
list_submit "$LINKS" "$NAMES" || return
}
# http://img3.depositfiles.com/js/upload_utils.js
# check_form() > add_padding()
add_padding() {
local I STR
for ((I=0; I<3000; I++)); do
STR="$STR "
done
}
# Probe a download URL
# $1: cookie file (unused here)
# $2: Depositfiles url
# $3: requested capability list
# stdout: 1 capability per line
depositfiles_probe() {
local -r URL=$2
local -r REQ_IN=$3
local PAGE REQ_OUT
PAGE=$(curl --location -b 'lang_current=en' "$URL") || return
match 'This file does not exist' "$PAGE" && return $ERR_LINK_DEAD
REQ_OUT=c
if [[ $REQ_IN = *f* ]]; then
echo "$PAGE" | parse 'var filename' "'\([^']\+\)'" &&
REQ_OUT="${REQ_OUT}f"
fi
if [[ $REQ_IN = *s* ]]; then
echo "$PAGE" | parse 'var filesize' "'\([^']\+\)'" &&
REQ_OUT="${REQ_OUT}s"
fi
echo $REQ_OUT
}
|
mytskine/plowshare
|
src/modules/depositfiles.sh
|
Shell
|
gpl-3.0
| 13,070 |
#!/usr/bin/bash
RAM=`echo $2 | awk '{print $1}'`
CPU=`echo $3 | awk '{print $1}'`
VBoxManage createvm --name $1 --register
VBoxManage modifyvm $1 --vram $RAM --cpus $CPU
VBoxManage modifyvm $1 --nic1 bridged --bridgeadapter1 e1000g0
echo "Creada!!!"
|
Tsubasa1218/juandasc
|
scriptCrear.sh
|
Shell
|
gpl-3.0
| 253 |
#!/bin/sh
# Usage: apply-class-vlans.sh class 1 2 ...
# Apply the listed VLANs to the given switch classes
DEBUG=""
if [ "$#" -gt 0 ]; then
if [ "$1" = "--debug" ]; then
DEBUG=1
shift
fi
else
echo "Usage: $0 class/switch vlan ..." >&1
exit 1
fi
CLASS="$1"
shift
VLANS="$*"
. ./library.sh
if [ "$PROG" = "apply-switch-vlans.sh" ]; then
LIST=`get_routers $CLASS`
else
LIST=`get_routers_for_class $CLASS`
fi
for DEVICE in $LIST; do
echo "**** $DEVICE"
ID=`host_id $DEVICE`
FILE=tmp/vlans-$CLASS-$DEVICE
./gen-vlans.sh $DEVICE $VLANS > $FILE
if [ -n "$DEBUG" ]; then
cat $FILE
else
run_policy "`policy_filter < $FILE`" $DEVICE
fi
done
|
paulgear/procurve-rancid
|
apply-class-vlans.sh
|
Shell
|
gpl-3.0
| 659 |
#!/bin/sh
# echo roadnet unweighted
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -o seq fibheap
# echo roadnet weight 1000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000 -o seq fibheap
# echo roadnet weight 1000000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i roadNet-CA.txt -w 1000000 -o seq fibheap
# echo live unweighted
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -o seq fibheap
# echo live weight 1000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000 -o seq fibheap
# echo live weight 1000000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i soc-LiveJournal1.txt -w 1000000 -o seq fibheap
echo rand n 10000 p 0.5 unweighted
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -o seq fibheap
echo rand n 10000 p 0.5 weigh 1000
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000 -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000 -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000 -o seq fibheap
echo rand n 10000 p 0.5 weight 1000000
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000000 -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000000 -o seq fibheap
build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i random_graph.txt -w 1000000 -o seq fibheap
# echo rand n 1000000 p 0.0001 unweighted
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -o seq fibheap
# echo rand n 1000000 p 0.0001 weigh 1000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000 -o seq fibheap
# echo rand n 1000000 p 0.0001 weight 1000000
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000000 -o seq fibheap
# build/src/bench/file_shortest_paths_seq_no_lazy_relax -n 1 -i rand_graph3.txt -w 1000000 -o seq fibheap
|
kjellwinblad/klsm
|
run_seq.sh
|
Shell
|
gpl-3.0
| 4,096 |
#!/bin/bash
. config.in
YAS_COMPANY_URL="https://raw.githubusercontent.com/company-mode/company-mode/master/company-yasnippet.el"
function pkg_install
{
get_url_with_name company-yasnippet.el $YAS_COMPANY_URL
copy_to_local company-yasnippet.el company-yasnippet
}
function pkg_update
{
:
}
. include.in
|
restaurant-ide/restaurant
|
scripts/yasnippet.sh
|
Shell
|
gpl-3.0
| 321 |
#!/bin/bash
#
# Instalador desatendido para Openstack Juno sobre CENTOS7
# Reynaldo R. Martinez P.
# E-Mail: [email protected]
# Octubre del 2014
#
# Script de desinstalacion de OS para Centos 7
#
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
if [ -f ./configs/main-config.rc ]
then
source ./configs/main-config.rc
else
echo "No puedo acceder a mi archivo de configuración"
echo "Revise que esté ejecutando el instalador en su directorio"
echo "Abortando !!!!."
echo ""
exit 0
fi
clear
echo "Bajando y desactivando Servicios de OpenStack"
/usr/local/bin/openstack-control.sh stop
/usr/local/bin/openstack-control.sh disable
service mongod stop
chkconfig mongod off
killall -9 -u mongodb
killall -9 mongod
killall -9 dnsmasq
echo "Eliminando Paquetes de OpenStack"
yum -y erase openstack-glance \
openstack-utils \
openstack-selinux \
openstack-keystone \
python-psycopg2 \
qpid-cpp-server \
qpid-cpp-server-ssl \
qpid-cpp-client \
scsi-target-utils \
sg3_utils \
openstack-cinder \
openstack-neutron \
openstack-neutron-* \
openstack-nova-* \
openstack-swift-* \
openstack-ceilometer-* \
openstack-heat-* \
openstack-trove-* \
openstack-sahara* \
mongodb-server \
mongodb \
haproxy \
rabbitmq-server \
erlang-* \
openstack-dashboard \
openstack-packstack \
sysfsutils \
genisoimage \
libguestfs \
spice-html5 \
rabbitmq-server \
python-django-openstack-auth \
python-keystone* \
python-backports \
python-backports-ssl_match_hostname \
scsi-target-utils \
scsi-target-utils-gluster
yum -y erase openstack-puppet-modules openstack-packstack-puppet
yum -y erase qpid-cpp-server qpid-cpp-server-ssl qpid-cpp-client cyrus-sasl cyrus-sasl-md5 cyrus-sasl-plain
yum -y erase rabbitmq-server
if [ $cleanupdeviceatuninstall == "yes" ]
then
rm -rf /srv/node/$swiftdevice/accounts
rm -rf /srv/node/$swiftdevice/containers
rm -rf /srv/node/$swiftdevice/objects
rm -rf /srv/node/$swiftdevice/tmp
chown -R root:root /srv/node/
restorecon -R /srv
fi
echo "Eliminando Usuarios de Servicios de OpenStack"
userdel -f -r keystone
userdel -f -r glance
userdel -f -r cinder
userdel -f -r neutron
userdel -f -r nova
userdel -f -r mongodb
userdel -f -r ceilometer
userdel -f -r swift
userdel -f -r rabbitmq
userdel -f -r heat
userdel -f -r trove
userdel -f -r qpidd
echo "Eliminando Archivos remanentes"
rm -fr /etc/glance \
/etc/keystone \
/var/log/glance \
/var/log/keystone \
/var/lib/glance \
/var/lib/keystone \
/etc/cinder \
/var/lib/cinder \
/var/log/cinder \
/etc/sudoers.d/cinder \
/etc/tgt \
/etc/neutron \
/var/lib/neutron \
/var/log/neutron \
/etc/sudoers.d/neutron \
/etc/nova \
/etc/heat \
/etc/trove \
/var/log/trove \
/var/cache/trove \
/var/log/nova \
/var/lib/nova \
/etc/sudoers.d/nova \
/etc/openstack-dashboard \
/var/log/horizon \
/etc/sysconfig/mongod \
/var/lib/mongodb \
/etc/ceilometer \
/var/log/ceilometer \
/var/lib/ceilometer \
/etc/ceilometer-collector.conf \
/etc/swift/ \
/var/lib/swift \
/tmp/keystone-signing-swift \
/etc/openstack-control-script-config \
/var/lib/keystone-signing-swift \
/var/lib/rabbitmq \
/var/log/rabbitmq \
/etc/rabbitmq \
$dnsmasq_config_file \
/etc/dnsmasq-neutron.d \
/var/tmp/packstack \
/var/lib/keystone-signing-swift \
/var/lib/qpidd \
/etc/qpid
service crond restart
rm -f /root/keystonerc_admin
rm -f /root/ks_admin_token
rm -f /usr/local/bin/openstack-control.sh
rm -f /usr/local/bin/openstack-log-cleaner.sh
rm -f /usr/local/bin/openstack-keystone-tokenflush.sh
rm -f /usr/local/bin/openstack-vm-boot-start.sh
rm -f /etc/httpd/conf.d/openstack-dashboard.conf*
rm -f /etc/httpd/conf.d/rootredirect.conf*
rm -f /etc/cron.d/keystone-flush.crontab
if [ $snmpinstall == "yes" ]
then
if [ -f /etc/snmp/snmpd.conf.pre-openstack ]
then
rm -f /etc/snmp/snmpd.conf
mv /etc/snmp/snmpd.conf.pre-openstack /etc/snmp/snmpd.conf
service snmpd restart
else
service snmpd stop
yum -y erase net-snmp
rm -rf /etc/snmp
fi
rm -f /usr/local/bin/vm-number-by-states.sh \
/usr/local/bin/vm-total-cpu-and-ram-usage.sh \
/usr/local/bin/vm-total-disk-bytes-usage.sh \
/usr/local/bin/node-cpu.sh \
/usr/local/bin/node-memory.sh \
/etc/cron.d/openstack-monitor.crontab \
/var/tmp/node-cpu.txt \
/var/tmp/node-memory.txt \
/var/tmp/vm-cpu-ram.txt \
/var/tmp/vm-disk.txt \
/var/tmp/vm-number-by-states.txt
fi
echo "Reiniciando Apache sin archivos del Dashboard"
service httpd restart
service memcached restart
echo "Limpiando IPTABLES"
service iptables stop
echo "" > /etc/sysconfig/iptables
if [ $dbinstall == "yes" ]
then
echo ""
echo "Desinstalando software de Base de Datos"
echo ""
case $dbflavor in
"mysql")
service mysqld stop
sync
sleep 5
sync
yum -y erase mysql-server mysql mariadb-galera-server mariadb-galera-common mariadb-galera galera
userdel -r mysql
rm -f /root/.my.cnf /etc/my.cnf
;;
"postgres")
service postgresql stop
sync
sleep 5
sync
yum -y erase postgresql-server
userdel -r postgres
rm -f /root/.pgpass
;;
esac
fi
echo ""
echo "Desinstalación completada"
echo ""
|
tigerlinux/openstack-juno-installer-centos7
|
modules/uninstall.sh
|
Shell
|
gpl-3.0
| 5,118 |
check_pkg_python_pyopenssl() {
local br_version=$(grep -E "^$(echo ${1^^} | sed 's/-/_/g')_VERSION = " package/${1}/${1}.mk | awk '{print $3}')
local version=$(wget -q -O - https://pypi.python.org/pypi/pyopenssl | grep -o -E "pyOpenSSL [0-9]+\.[0-9]+\.[0-9]+" | head -1 | grep -o -E "[0-9]+\.[0-9]+\.[0-9]+")
if [[ "$br_version" != "$version" ]]; then
if [[ "$br_version" != "" ]] && [[ "$version" != "" ]]; then
packages="$packages $1"
br_versions="$br_versions $br_version"
versions="$versions $version"
else
echo "Warning: $1 code has a problem."
fi
fi
unset br_version
unset version
}
|
vriera/check-br-package-versions
|
packages/python-pyopenssl.sh
|
Shell
|
gpl-3.0
| 616 |
hash rsync 2>/dev/null || return
# rsync with my favourite options
alias jsync='rsync --archive --compress --itemize-changes --human-readable --partial --progress --verbose'
|
qjcg/dotfiles
|
.bash_profile.d/rsync.sh
|
Shell
|
gpl-3.0
| 175 |
#!/bin/bash
file=appdir/usr/local
root_path=$(pwd)
if [ ! -e "$file" ]
then
mkdir -p $file
mkdir -p $file/share/applications
mkdir -p $file/bin
fi
cp bin/electronpass $file/bin
cp data/electronpass.desktop $file/share/applications
cp data/electronpass.svg $file/share/applications
cd $file/bin
if [ ! -e "linuxdeployqt-continuous-x86_64.AppImage" ]
then
wget "https://github.com/probonopd/linuxdeployqt/releases/download/continuous/linuxdeployqt-continuous-x86_64.AppImage"
chmod a+x linuxdeployqt-continuous-x86_64.AppImage
fi
# unset QTDIR; unset QT_PLUGIN_PATH ; unset LD_LIBRARY_PATH
./linuxdeployqt-continuous-x86_64.AppImage ../share/applications/electronpass.desktop -bundle-non-qt-libs -qmldir=$root_path/app/qml/
./linuxdeployqt-continuous-x86_64.AppImage ../share/applications/electronpass.desktop -appimage -qmldir=$root_path/app/qml/
cp $root_path/$file/bin/ElectronPass-x86_64.AppImage $root_path
|
electronpass/electronpass-desktop
|
create-linux-appimage.sh
|
Shell
|
gpl-3.0
| 921 |
#!/bin/bash
DIST=./Dist
EXTRAS=./Extras
RESOURCES="$EXTRAS/Resources"
LPROJ="$RESOURCES/English.lproj";
SRCDIR=../../src
MANDIR=../../man
ROOT=./Root
PKG=./Pkg/Info
# Need to have absolute path so the 'default' utility sees them
INFO_PLIST=$(pwd)/Pkg/Info
DESC_PLIST=$(pwd)/Pkg/Description
myself=$(basename $0)
if [ -z "$1" ]; then
echo "Usage: $myself <version>"
exit
fi
echo "Checking version... $version"
version=$1
version_major=$(echo "$version" | cut -d. -f1);
version_minor=$(echo "$version" | cut -d. -f2);
version_full="$version_major.$version_minor.0";
DISTOUT="$DIST/gbsed v$version"
echo "Creating dist dir $DISTOUT..."
mkdir -p "$DISTOUT";
# Generate package information.
(
echo "Generating package extras..."
mkdir -p "$LPROJ"
for file in $EXTRAS/*.template;
do
bname=$(basename $file);
rtf=$(echo "$bname"|perl -ne's/\.template$//;print');
cp -f "$EXTRAS/$bname" "$LPROJ/$rtf";
perl -pi -e"s/%%VERSION%%/$version/g" "$LPROJ/$rtf";
done
)
# Copy dist files.
echo "Copying distribution files."
mkdir -p "$ROOT"
mkdir -p "$ROOT/usr/bin"
mkdir -p "$ROOT/usr/include"
mkdir -p "$ROOT/usr/lib"
mkdir -p "$ROOT/usr/man/man1"
mkdir -p "$ROOT/usr/man/man3"
cp -f "$SRCDIR/gbsed/gbsed" $ROOT/usr/bin/
cp -f "$SRCDIR/libgbsed/libgbsed.a" $ROOT/usr/lib/
cp -f "$SRCDIR/libgbsed/libgbsed.h" $ROOT/usr/include/
cp -f "$MANDIR/gbsed.1" $ROOT/usr/man/man1/
cp -f "$MANDIR/libgbsed.3" $ROOT/usr/man/man3/
echo "Removing DS_Store files from dist root..."
find "$ROOT" -name ".DS_Store" -exec rm -f \"{}\" \;
# Change version and such in the package's propertylist
echo "Setting up version and bundle information..."
cp -f "$INFO_PLIST.plist.template" "$INFO_PLIST.plist";
cp -f "$DESC_PLIST.plist.template" "$DESC_PLIST.plist";
defaults write "$INFO_PLIST" CFBundleGetInfoString -string "0x61736b.net"
defaults write "$INFO_PLIST" CFBundleIdentifier -string "net.0x61736b.gbsed"
defaults write "$INFO_PLIST" CFBundleShortVersionString -string "$version_full";
defaults write "$INFO_PLIST" IFMajorVersion -int "$version_major";
defaults write "$INFO_PLIST" IFMinorVersion -int "$version_minor";
defaults write "$DESC_PLIST" IFPkgDescriptionTitle -string "gbsed v$version";
# Build package
echo "Building package..."
PackageMaker -build -p "$DISTOUT/gbsed v$version Installer.pkg" \
-f Root -i Pkg/Info.plist -d Pkg/Description.plist \
-r "$RESOURCES"
echo "Copying DSstore to installer folder..."
cp -f ./DStore/DS_Store "$DISTOUT/.DS_Store"
dmgname="$DIST/gbsed.v$version.static.universal.osx.dmg"
echo "Creating disk image to $dmgname";
hdiutil create -fs HFS+ -volname "gbsed v$version" -scrub \
-imagekey zlib-level=9 -srcdir "$DISTOUT" -format UDZO \
-o "$dmgname"
echo "Setting the internet-enable flag..."
hdiutil internet-enable "$dmgname"
echo "Cleaning up..."
rm -rf "$RESOURCES"
rm -f "$INFO_PLIST.plist"
rm -f "$DESC_PLIST.plist"
rm -rf "$ROOT"
rm -rf "$DISTOUT"
echo "Done!"
|
ask/gbsed
|
osx/Package/Build.sh
|
Shell
|
gpl-3.0
| 3,076 |
if [[ $commands[fasd] ]]; then # check if fasd is installed
fasd_cache="$ZSH_CACHE_DIR/fasd-init-cache"
if [[ ! -e "$fasd_cache" ]]; then
mkdir -p "$fasd_cache"
fi
if test "$(command -v fasd)" -nt "$fasd_cache" -o ! -s "$fasd_cache"; then
fasd --init auto >| "$fasd_cache"
fi
source "$fasd_cache"
unset fasd_cache
alias v="f -e $EDITOR"
alias o='a -e open_command'
fi
|
alyptik/dotfiles
|
.zsh.d/plugins/fasd.plugin.zsh
|
Shell
|
gpl-3.0
| 394 |
for f in *.sql
do
echo -n "Importing $f into the database..."
mysql dspdb -u darkstar -pMariSeth79 < $f && echo "Success"
done
|
Kosmos82/kosmosdarkstar
|
sql/sql.sh
|
Shell
|
gpl-3.0
| 147 |
#!/bin/bash
echo "********"
echo "* CJMM *"
echo "********"
echo
echo "******************************************************"
echo "SELECT OST TENANT FOR JC1 ENVIRONMENTS"
echo "******************************************************"
OSTENVFILE="${HOME}/listostenvs.cnf"
LISTOSTENVS="$(cat ${OSTENVFILE} | egrep '^OSTENV=.*JC1.*')"
unset options i
while read -r myostenv
do
myostenv="$(echo "${myostenv}" | cut -d' ' -f1-2)"
options[i++]="${myostenv}"
done <<< "${LISTOSTENVS}"
echo
PS3='Please enter your choice: '
select opt in "${options[@]}"
do
echo "You chose choice <${opt}>"
break;
done
export PYTHONWARNINGS="ignore:Unverified HTTPS request"
alias nova="nova --insecure"
alias openstack="openstack --insecure"
alias cinder="cinder --insecure"
alias keystone="keystone --insecure"
alias neutron="neutron --insecure"
alias glance="glance --insecure"
for myostenv in $(echo "${LISTOSTENVS}" | grep "${opt} ")
do
export "${myostenv}"
done
export PS1="[\u@\h \W]\[\e[7;32m\][\${OSTENV}]-[\${OS_TENANT_NAME}]\[\e[m\]\$ "
env | grep ^OS
env | grep ^PS1
env | grep _INSTALL
|
Telefonica/iot-utils
|
myenvironments/bin/openstackenvJC1.sh
|
Shell
|
gpl-3.0
| 1,093 |
#!/bin/bash
# setMultiFinderMode.sh
# xbmclauncher
#
# Created by Stephan Diederich on 02/2009.
# Copyright 2009 University Heidelberg. All rights reserved.
PASSWD=frontrow
E_MISSING_MF=3
MULTIFINDER=/System/Library/CoreServices/Finder.app/Contents/Plugins/XBMCLauncher.frappliance/Contents/Resources/MultiFinder.app
case $1 in
OFF)
#should we delete MultiFinder.app here?
echo "Resetting loginwindow"
echo $PASSWD | sudo -S sh -c "/usr/bin/defaults delete /Library/Preferences/com.apple.loginwindow Finder"
# restart loginwindow
echo "Restarting loginwindow"
kill `ps awwx | grep [l]oginwindow | awk '{print $1}'`
kill `ps awwx | grep [F]inder | grep -v setMultFinderMode | awk '{print $1}'`
;;
ON)
# do some sanity checks here.
# does MultiFinder.app exist
if [ ! -d "$MULTIFINDER" ]; then
echo "MultiFinder.app not found in $MULTIFINDER. Bailing out..."
exit $E_MISSING_MF
fi
#set as default app on boot
echo "Setting loginwindow to $MULTIFINDER"
echo $PASSWD | sudo -S sh -c "/usr/bin/defaults write /Library/Preferences/com.apple.loginwindow Finder $MULTIFINDER"
# restart loginwindow
echo "Restarting loginwindow"
kill `ps awwx | grep [l]oginwindow | awk '{print $1}'`
kill `ps awwx | grep [F]inder | grep -v setMultFinderMode | awk '{print $1}'`
;;
*)
echo "USAGE: setMultiFinderMode.sh {ON|OFF}"
echo "ON: enables MultiFinder by setting loginwindow to $MULTIFINDER"
echo "OFF: deletes 'Finder' key in loginwindow's plist"
;;
esac
|
davilla/atv-xbmc-launcher
|
Launcher/setMultiFinderMode.sh
|
Shell
|
gpl-3.0
| 1,501 |
#!/bin/bash
echo 'Framing screenshots'
fastlane frameit
echo 'Moving framed'
mkdir -p framed
mv en-GB/*framed.png framed/
# No text
mv Framefile.json _Framefile.json
echo 'Framing screenshots (no-text)'
fastlane frameit
echo 'Moving framed (no-text)'
mkdir -p framed/no-text
mv en-GB/*framed.png framed/no-text
# Clean up
mv _Framefile.json Framefile.json
|
NERC-CEH/irecord-app
|
other/designs/framing/run.sh
|
Shell
|
gpl-3.0
| 363 |
#!/bin/bash
#SBATCH -J NGmerge
#SBATCH -n 4 # Use 1 cores for the job
#SBATCH -t 0-04:00 # Runtime in D-HH:MM
#SBATCH -p serial_requeue # Partition to submit to
#SBATCH --mem=100 # Memory pool for all cores (see also --mem-per-cpu)
#SBATCH -o NGmerge.%A.out # File to which STDOUT will be written
#SBATCH -e NGmerge.%A.err # File to which STDERR will be written
source new-modules.sh
module purge
module load ATAC-seq
NGmerge -a -n 4 -e 20 -v -1 $1.R1.fastq.gz -2 $1.R2.fastq.gz -o 20_$1
|
tsackton/ratite-genomics
|
08_atacseq/NGmerge_20.sh
|
Shell
|
gpl-3.0
| 551 |
#!/usr/bin/env bash
BASE_VERSION="2.5.0"
INSTALL_MODULE="install -r"
MKDIR_IF_NOT_EXIST="mkdir -vp"
PYTHON=python
REQUIREMENTS=requirements.txt
# source file directory
SRC_HOME=$HOME/bin/rosycloud_src
# syncing directory
ROSYCLOUD_HOME=$HOME/RosyCloud
# system wide configure
CONFIGS=$HOME/.rosycloud
HOME_BIN=$HOME/bin
DEST_CMD=$HOME_BIN/rosycloud
VENV_HOME=$HOME/.virtualenv
SCRIPT_TMPL=templates/rosycloud.tmpl
PYTHON_MODULE_INSTALLER=$VENV_HOME/bin/pip
# python release should be later than 2.5
echo $PYTHON
version=`$PYTHON -V 2>&1 | sed -n 's/^Python \(\([0-9]\+\.\?\)\{3\}\)/\1/p'`
if [ "$version" \< $BASE_VERSION ]; then
echo -n "Python version "
echo -n $version
echo " is too old."
echo -n "To run this app, python release should be late than "
echo -n $BASE_VERSION
echo "."
exit 255
fi
# install virtual env first
$PYTHON virtualenv.py $VENV_HOME
source $VENV_HOME/bin/activate
# install dependencies
$PYTHON_MODULE_INSTALLER $INSTALL_MODULE $REQUIREMENTS 2>/dev/null
# install rosycloud source
$MKDIR_IF_NOT_EXIST $SRC_HOME
$MKDIR_IF_NOT_EXIST $ROSYCLOUD_HOME
# copy source files
cp -rv src/* $SRC_HOME
# copy config files
cp -v templates/exclude.tmpl $ROSYCLOUD_HOME/
cp -v templates/config.tmpl $SRC_HOME/
# generate executable from template
sed -e "s|VIRTUALENV_HOME|$VENV_HOME|g" -e "s|SRC_HOME|$SRC_HOME|g" $SCRIPT_TMPL > $DEST_CMD
chmod u+x $DEST_CMD
# initialize system configure directory
mkdir -pv $CONFIGS
mkdir -pv $CONFIGS/cache
mkdir -pv $CONFIGS/dir
mkdir -pv $CONFIGS/snapshots
# temporary directory
mkdir -pv $CONFIGS/tmp
# installation done
echo
echo "Installation DONE!"
|
jzhou77/RosyCloud
|
setup.sh
|
Shell
|
gpl-3.0
| 1,645 |
#!/bin/bash
#
# To DB first, wipe, use
#
# psql -U postgres -h sasdb --dbname=qpidinfra --file=$LOFARROOT/share/qpidinfrastructure/sql/qpidinfradb.sql
#
# -----------------------------------------
# Configuration
# -----------------------------------------
# Whether to modify production (true) or test (false)
if [ "$LOFARENV" == "PRODUCTION" ]; then
PROD=true
PREFIX=
elif [ "$LOFARENV" == "TEST" ]; then
PROD=false
PREFIX="test."
else
PROD=false
PREFIX="devel."
fi
# Host names to use
if $PROD; then
echo "----------------------------------------------"
echo "Populating database for PRODUCTION environment"
echo ""
echo "Press ENTER to continue, or ^C to abort"
echo "----------------------------------------------"
read
CCU=ccu001.control.lofar
MCU=mcu001.control.lofar
SCU=scu001.control.lofar
LEXAR=lexar003.offline.lofar
MOM_SYSTEM=lcs023.control.lofar
MOM_INGEST=lcs029.control.lofar
else
CCU=ccu199.control.lofar
MCU=mcu199.control.lofar
SCU=scu199.control.lofar
LEXAR=lexar004.offline.lofar
MOM_SYSTEM=lcs028.control.lofar
MOM_INGEST=lcs028.control.lofar
fi
# -----------------------------------------
# Cobalt DataTapping (piggy-backing)
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.datatap.command # COBALT piggy-backing request-reply
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.datatap.notification # notification who has been granted COBALT piggy-backing
# -----------------------------------------
# MessageRouter -> MoM
# -----------------------------------------
#addtoQPIDDB.py --broker $CCU --queue ${PREFIX}mom.task.feedback.dataproducts --federation $MOM_SYSTEM
#addtoQPIDDB.py --broker $CCU --queue ${PREFIX}mom.task.feedback.processing --federation $MOM_SYSTEM
#addtoQPIDDB.py --broker $CCU --queue ${PREFIX}mom.task.feedback.state --federation $MOM_SYSTEM
# -----------------------------------------
# Feedback COBALT/CEP4 -> MAC
# -----------------------------------------
addtoQPIDDB.py --broker $CCU --queue ${PREFIX}mac.task.feedback.state
addtoQPIDDB.py --broker $MCU --queue ${PREFIX}otdb.task.feedback.dataproducts
addtoQPIDDB.py --broker $MCU --queue ${PREFIX}otdb.task.feedback.processing
# -----------------------------------------
# MACScheduler -> MessageRouter -> MoM
# -----------------------------------------
addtoQPIDDB.py --broker $MCU --queue ${PREFIX}lofar.task.specification.system --federation $CCU
addtoQPIDDB.py --broker $CCU --queue ${PREFIX}mom.task.specification.system --federation $MOM_SYSTEM
# -----------------------------------------
# MoM <-> MoM-OTDB-Adapter
# -----------------------------------------
addtoQPIDDB.py --broker $CCU --queue mom.command --federation $MOM_SYSTEM
addtoQPIDDB.py --broker $CCU --queue mom.importxml --federation $MOM_SYSTEM
addtoQPIDDB.py --broker $MOM_SYSTEM --queue mom-otdb-adapter.importxml --federation $CCU
# -----------------------------------------
# MoM Services
# -----------------------------------------
addtoQPIDDB.py --broker $MOM_SYSTEM --exchange ${PREFIX}lofar.mom.bus
addtoQPIDDB.py --broker $MOM_INGEST --exchange ${PREFIX}lofar.mom.bus
addtoQPIDDB.py --broker $MOM_SYSTEM --exchange ${PREFIX}lofar.mom.command
addtoQPIDDB.py --broker $MOM_SYSTEM --exchange ${PREFIX}lofar.mom.notification
# MoM queues that are unused functionally but still opened
addtoQPIDDB.py --broker $MOM_SYSTEM --queue ${PREFIX}mom.task.feedback.dataproducts
addtoQPIDDB.py --broker $MOM_SYSTEM --queue ${PREFIX}mom.task.feedback.processing
# -----------------------------------------
# MoM Services <-> ResourceAssignment
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.mom.bus --federation $MOM_SYSTEM
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.mom.command --federation $MOM_SYSTEM
addtoQPIDDB.py --broker $MOM_SYSTEM --exchange ${PREFIX}lofar.mom.notification --federation $SCU
# -----------------------------------------
# Ingest
# -----------------------------------------
addtoQPIDDB.py --broker $LEXAR --exchange ${PREFIX}lofar.lta.ingest.command
addtoQPIDDB.py --broker $LEXAR --exchange ${PREFIX}lofar.lta.ingest.notification
addtoQPIDDB.py --broker $LEXAR --queue ${PREFIX}lofar.lta.ingest.jobs
addtoQPIDDB.py --broker $LEXAR --queue ${PREFIX}lofar.lta.ingest.jobs.for_transfer
addtoQPIDDB.py --broker $LEXAR --queue ${PREFIX}lofar.lta.ingest.notification.jobmanager
addtoQPIDDB.py --broker $LEXAR --bind --exchange ${PREFIX}lofar.lta.ingest.notification --queue ${PREFIX}lofar.lta.ingest.notification.momingestadapter --routingkey LTAIngest.#
addtoQPIDDB.py --broker $LEXAR --bind --exchange ${PREFIX}lofar.lta.ingest.notification --queue ${PREFIX}lofar.lta.ingest.notification.jobmanager --routingkey LTAIngest.#
# -----------------------------------------
# ResourceAssignment
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.ra.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.ra.notification
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.otdb.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.otdb.notification
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.dm.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.dm.notification
# -----------------------------------------
# QA
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.qa.notification
addtoQPIDDB.py --broker $SCU --queue ${PREFIX}lofar.otdb.notification.for.qa_service
# -----------------------------------------
# Specification & Trigger Services
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.spec.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.spec.notification
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.trigger.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.trigger.notification
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.mac.command
addtoQPIDDB.py --broker $SCU --exchange ${PREFIX}lofar.mac.notification
# -----------------------------------------
# Specification -> MoM
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --queue mom.importxml --federation $MOM_SYSTEM
# -----------------------------------------
# Ingest -> SCU
# -----------------------------------------
addtoQPIDDB.py --broker $LEXAR --exchange ${PREFIX}lofar.lta.ingest.notification --federation $SCU
# -----------------------------------------
# Ingest -> ResourceAssignment @ SCU
# -----------------------------------------
addtoQPIDDB.py --broker $SCU --queue ${PREFIX}lofar.lta.ingest.notification.autocleanupservice
addtoQPIDDB.py --broker $SCU --bind --exchange ${PREFIX}lofar.lta.ingest.notification --queue ${PREFIX}lofar.lta.ingest.notification.autocleanupservice --routingkey LTAIngest.#
# -----------------------------------------
# Ingest -> LTA-storage-overview @ SCU
# -----------------------------------------
addtoQPIDDB.py --broker $LEXAR --queue ${PREFIX}lofar.lta.ingest.notification.for.ltastorageoverview
addtoQPIDDB.py --broker $LEXAR --bind --exchange ${PREFIX}lofar.lta.ingest.notification --queue ${PREFIX}lofar.lta.ingest.notification.for.ltastorageoverview --routingkey LTAIngest.#
|
kernsuite-debian/lofar
|
SAS/QPIDInfrastructure/bin/populateDB.sh
|
Shell
|
gpl-3.0
| 7,375 |
ARCH=i686
source distro/parabola/install.sh
|
mytbk/liveusb-builder
|
distro/parabola/32/install.sh
|
Shell
|
gpl-3.0
| 44 |
#!/bin/bash
function base-convert-newlines {
tr '\r' '\n' < $1 > $2
}
function convert-newlines {
base-convert-newlines source/marathon2/$1 unix/src/marathon2/$1
}
function convert-newlines2 {
mkdir -p unix/src/cseries-interfaces/
base-convert-newlines source/CSeriesInterfaces/$1 unix/src/cseries-interfaces/$1
}
function convert-newlines3 {
mkdir -p unix/src/cseries-libraries/
base-convert-newlines source/CSeriesLibraries/$1 unix/src/cseries-libraries/$1
}
function convert-newlines4 {
mkdir -p unix/src/cseries/
base-convert-newlines source/cseries/$1 unix/src/cseries/$1
}
function convert-newlines5 {
mkdir -p unix/src/cseries.lib/
base-convert-newlines source/cseries.lib/$1 unix/src/cseries.lib/$1
}
convert-newlines collection_definition.h
convert-newlines computer_interface.c
convert-newlines computer_interface.h
convert-newlines crc.c
convert-newlines crc.h
convert-newlines devices.c
convert-newlines editor.h
convert-newlines effect_definitions.h
convert-newlines effects.c
convert-newlines effects.h
convert-newlines environment.h
convert-newlines export_definitions.c
convert-newlines extensions.h
convert-newlines fades.c
convert-newlines fades.h
convert-newlines files_macintosh.c
convert-newlines find_files.c
convert-newlines find_files.h
convert-newlines flood_map.c
convert-newlines flood_map.h
convert-newlines game_dialogs.c
convert-newlines game_errors.c
convert-newlines game_errors.h
convert-newlines game_sound.c
convert-newlines game_sound.h
convert-newlines game_wad.c
convert-newlines game_wad.h
convert-newlines game_window.c
convert-newlines game_window.h
convert-newlines game_window_macintosh.c
convert-newlines images.c
convert-newlines images.h
convert-newlines import_definitions.c
convert-newlines input_sprocket_needs.h
convert-newlines interface.c
convert-newlines interface.h
convert-newlines interface_macintosh.c
convert-newlines interface_menus.h
convert-newlines item_definitions.h
convert-newlines items.c
convert-newlines items.h
convert-newlines keyboard_dialog.c
convert-newlines key_definitions.h
convert-newlines lightsource.c
convert-newlines lightsource.h
convert-newlines low_level_textures.c
convert-newlines macintosh_input.h
convert-newlines macintosh_network.h
convert-newlines map_accessors.c
convert-newlines map.c
convert-newlines map_constructors.c
convert-newlines map.h
convert-newlines marathon2.c
convert-newlines media.c
convert-newlines media_definitions.h
convert-newlines media.h
convert-newlines monster_definitions.h
convert-newlines monsters.c
convert-newlines monsters.h
convert-newlines motion_sensor.c
convert-newlines motion_sensor.h
convert-newlines mouse.c
convert-newlines mouse.h
convert-newlines music.c
convert-newlines music.h
convert-newlines network_adsp.c
convert-newlines network.c
convert-newlines network_ddp.c
convert-newlines network_dialogs.c
convert-newlines network_games.c
convert-newlines network_games.h
convert-newlines network.h
convert-newlines network_lookup.c
convert-newlines network_microphone.c
convert-newlines network_modem.c
convert-newlines network_modem.h
convert-newlines network_modem_protocol.c
convert-newlines network_modem_protocol.h
convert-newlines network_names.c
convert-newlines network_sound.h
convert-newlines network_speaker.c
convert-newlines network_stream.c
convert-newlines network_stream.h
convert-newlines overhead_map.c
convert-newlines overhead_map.h
convert-newlines overhead_map_macintosh.c
convert-newlines pathfinding.c
convert-newlines physics.c
convert-newlines physics_models.h
convert-newlines physics_patches.c
convert-newlines placement.c
convert-newlines platform_definitions.h
convert-newlines platforms.c
convert-newlines platforms.h
convert-newlines player.c
convert-newlines player.h
convert-newlines portable_files.h
convert-newlines preferences.c
convert-newlines preferences.h
convert-newlines preprocess_map_mac.c
convert-newlines progress.c
convert-newlines progress.h
convert-newlines projectile_definitions.h
convert-newlines projectiles.c
convert-newlines projectiles.h
convert-newlines render.c
convert-newlines render.h
convert-newlines scenery.c
convert-newlines scenery_definitions.h
convert-newlines scenery.h
convert-newlines scottish_textures.c
convert-newlines scottish_textures.h
convert-newlines screen.c
convert-newlines screen_definitions.h
convert-newlines screen_drawing.c
convert-newlines screen_drawing.h
convert-newlines screen.h
convert-newlines serial_numbers.c
convert-newlines shape_definitions.h
convert-newlines shape_descriptors.h
convert-newlines shapes.c
convert-newlines shapes_macintosh.c
convert-newlines shell.c
convert-newlines shell.h
convert-newlines song_definitions.h
convert-newlines sound_definitions.h
convert-newlines sound_macintosh.c
convert-newlines tags.h
convert-newlines textures.c
convert-newlines textures.h
convert-newlines valkyrie.c
convert-newlines valkyrie.h
convert-newlines vbl.c
convert-newlines vbl_definitions.h
convert-newlines vbl.h
convert-newlines vbl_macintosh.c
convert-newlines wad.c
convert-newlines wad.h
convert-newlines wad_macintosh.c
convert-newlines wad_prefs.c
convert-newlines wad_prefs.h
convert-newlines wad_prefs_macintosh.c
convert-newlines weapon_definitions.h
convert-newlines weapons.c
convert-newlines weapons.h
convert-newlines world.c
convert-newlines world.h
convert-newlines2 byte_swapping.h
convert-newlines2 checksum.h
convert-newlines2 cseries.h
convert-newlines2 InputSprocket.h
convert-newlines2 macintosh_cseries.h
convert-newlines2 macintosh_interfaces.c
convert-newlines2 my32bqd.h
convert-newlines2 mytm.h
convert-newlines2 preferences.h
convert-newlines2 proximity_strcmp.h
convert-newlines2 rle.h
convert-newlines3 DrawSprocketDebugLib
convert-newlines3 DrawSprocket.h
convert-newlines3 InputSprocket.h
convert-newlines3 InputSprocketStubLib
convert-newlines3 macintosh_interfaces881.d
convert-newlines3 macintosh_interfaces.d
convert-newlines3 SoundSprocket.h
convert-newlines4 DrawSprocket.h
convert-newlines4 InputSprocket.h
convert-newlines4 SoundSprocket.h
convert-newlines5 AfterDarkGestalt.h
convert-newlines5 beta.h
convert-newlines5 buildprogram
convert-newlines5 byte_swapping.c
convert-newlines5 byte_swapping.h
convert-newlines5 checksum.c
convert-newlines5 checksum.h
convert-newlines5 cseries.h
convert-newlines5 device_dialog.c
convert-newlines5 devices.c
convert-newlines5 dialogs.c
convert-newlines5 final.h
convert-newlines5 InputSprocket.h
convert-newlines5 macintosh_cseries.h
convert-newlines5 macintosh_interfaces.c
convert-newlines5 macintosh_interfaces.d.make
convert-newlines5 macintosh_interfaces.ppc
convert-newlines5 macintosh_utilities.c
convert-newlines5 makefile
convert-newlines5 my32bqd.c
convert-newlines5 my32bqd.h
convert-newlines5 mytm.c
convert-newlines5 mytm.h
convert-newlines5 preferences.c
convert-newlines5 preferences.h
convert-newlines5 proximity_strcmp.c
convert-newlines5 proximity_strcmp.h
convert-newlines5 RequestVideo.c
convert-newlines5 RequestVideo.h
convert-newlines5 rle.c
convert-newlines5 rle.h
convert-newlines5 serial_numbers.makeout
convert-newlines5 textures.h
convert-newlines5 Touch
|
DrItanium/moo
|
misc/INFINITY_SOURCE_CODE_LICENSE/convert-newlines.sh
|
Shell
|
gpl-3.0
| 7,120 |
killall -9 jackd > /dev/null
killall -9 itchy > /dev/null
killall -9 scratchy > /dev/null
killall -9 bbsync > /dev/null
killall -9 fluxa > /dev/null
|
nebogeo/scheme-bricks
|
stop-audio.sh
|
Shell
|
gpl-3.0
| 149 |
#!/bin/bash -e
. ../env/env
brief="application library for the Xfce desktop environment"
intro="
Extension library for Xfce, targeted at application development.
"
depends="gtk+3 (>= 3.24.10), libxfce4ui (>= 4.14.1), libxfce4util (>= 4.14.0),\
perl-module-uri (>= 1.76)"
version=0.12.8
srcfil=exo-$version.tar.bz2
srcdir=exo-$version
srcurl=http://archive.xfce.org/src/xfce/exo/0.12/$srcfil
srcmd5=e618ce760a12ac7427a48a44c69f3d31
build_src() {
tar -xf $srcfil && cd $srcdir
./configure --prefix=/usr \
--sysconfdir=/etc
make $JOBS
make DESTDIR=$OUTPUTDIR install
cleanup_src
}
build
|
fangxinmiao/projects
|
Architeture/OS/Linux/Distributions/LFS/build-scripts/blfs-9.0-systemd/scripts/e/exo.sh
|
Shell
|
gpl-3.0
| 622 |
#!/usr/bin/env bash
# -*- ENCODING: UTF-8 -*-
##
## @author Raúl Caro Pastorino
## @copyright Copyright © 2017 Raúl Caro Pastorino
## @license https://wwww.gnu.org/licenses/gpl.txt
## @email [email protected]
## @web https://fryntiz.es
## @github https://github.com/fryntiz
## @gitlab https://gitlab.com/fryntiz
## @twitter https://twitter.com/fryntiz
##
## Applied Style Guide:
## @style https://gitlab.com/fryntiz/bash-guide-style
############################
## INSTRUCTIONS ##
############################
## Descarga el gestor de control de versiones Gitkraken
############################
## FUNCIONES ##
############################
gitkraken_descargar() {
descargar "gitkraken.deb" "https://release.gitkraken.com/linux/gitkraken-amd64.deb"
}
gitkraken_preconfiguracion() {
echo -e "$VE Generando Pre-Configuraciones de$RO GitKraken$CL"
}
gitkraken_instalar() {
echo -e "$VE Instalando$RO GitKraken$CL"
sudo dpkg -i "$WORKSCRIPT/tmp/gitkraken.deb" && sudo apt install -f -y
}
gitkraken_postconfiguracion() {
echo -e "$VE Generando Post-Configuraciones$RO GitKraken$CL"
}
gitkraken_instalador() {
echo -e "$VE Comenzando instalación de$RO GitKraken$CL"
gitkraken_preconfiguracion
if [[ -f '/usr/bin/gitkraken' ]]; then
echo -e "$VE Ya esta$RO Gitkraken$VE instalado en el equipo, omitiendo paso$CL"
else
if [[ -f "$WORKSCRIPT/tmp/gitkraken.deb" ]]; then
gitkraken_instalar
else
gitkraken_descargar
gitkraken_instalar
fi
## Si falla la instalación se rellama la función tras limpiar
if [[ ! -f '/usr/bin/gitkraken' ]]; then
rm -f "$WORKSCRIPT/tmp/gitkraken.deb"
gitkraken_descargar
gitkraken_instalar
fi
fi
gitkraken_postconfiguracion
}
|
fryntiz/preparar_entorno
|
Apps/GitKraken.sh
|
Shell
|
gpl-3.0
| 1,901 |
#!/bin/bash
sudo mkdir -p /usr/local/src/
sudo chown $USER:$USER /usr/local/src
|
wdenton/conforguration
|
conforg/scripts/initialize.sh
|
Shell
|
gpl-3.0
| 80 |
#!/bin/bash
/usr/local/bin/node /nodeApp/queue/update.js
|
slidewiki/user-service
|
application/queue/updateQueue.sh
|
Shell
|
mpl-2.0
| 58 |
#!/bin/bash -ex
#
# Copyright 2015 Casey Marshall.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
if [ ! -d "$GOPATH" ]; then
echo "GOPATH not found"
exit 1
fi
for x in sf sfd basen; do
if [ ! -x "$GOPATH/bin/$x" ]; then
echo "missing binary: $x"
exit 1
fi
done
TESTDIR=$(mktemp -d)
TRAPEXIT="rm -rf $TESTDIR"
trap "$TRAPEXIT" EXIT
cd $TESTDIR
$GOPATH/bin/sfd &
SFD_PID=$!
TRAPEXIT="$TRAPEXIT;kill ${SFD_PID}" # shitty bash defer
trap "$TRAPEXIT" EXIT
sleep 1
SFD_KEY=$($GOPATH/bin/basen -encode 58 <(head -c 32 sfd.keypair))
if [ -z "$SFD_KEY" ]; then
echo "failed to read server key"
exit 1
fi
$GOPATH/bin/sf --homedir .alice --passphrase /dev/null addr create
ALICE_ADDR=$($GOPATH/bin/sf --homedir .alice --passphrase /dev/null addr default)
if [ -z "$ALICE_ADDR" ]; then
echo "failed to read alice address"
exit 1
fi
# Bob creates an address
$GOPATH/bin/sf --homedir .bob --passphrase /dev/null addr create
BOB_ADDR=$($GOPATH/bin/sf --homedir .bob --passphrase /dev/null addr default)
if [ -z "$BOB_ADDR" ]; then
echo "failed to read bob address"
exit 1
fi
# Exchange addresses
$GOPATH/bin/sf --homedir .alice name add bob $BOB_ADDR
$GOPATH/bin/sf --homedir .bob name add alice $ALICE_ADDR
# Alice sends Bob a message
ALICE_MSG=$(mktemp)
TRAPEXIT="$TRAPEXIT;rm -f $ALICE_MSG"
trap "$TRAPEXIT" EXIT
echo "hello" > $ALICE_MSG
$GOPATH/bin/sf --server-key ${SFD_KEY} --url http://localhost:8080 --homedir .alice --passphrase /dev/null msg push bob $ALICE_MSG
# Bob checks messages
$GOPATH/bin/sf --server-key ${SFD_KEY} --url http://localhost:8080 --homedir .bob --passphrase /dev/null msg pop
|
cmars/shadowfax
|
ftests/pushpop.bash
|
Shell
|
mpl-2.0
| 1,781 |
#!/bin/bash
DIR=$(dirname "${BASH_SOURCE[0]}")
FULL_DIR="$(cd "$DIR" && pwd)"
BASE_DIR=$(dirname "${FULL_DIR}")
source "${BASE_DIR}/python-env/bin/activate"
python "${BASE_DIR}/scrapy-webscanner/exchange_cron.py"
|
os2webscanner/os2webscanner
|
cron/run_exchange_cron_script.sh
|
Shell
|
mpl-2.0
| 215 |
#!/bin/bash
unset GIT_COMMIT_ID
unset EXPECTED_BRANCH
unset TARGET_BRANCH
unset GIT_REMOTE_URL
### Functions ###
usage() {
cat << USAGE_END
Usage: $0 options
Utility script for merging a branch to another branch, pushing the
result to a remote and doing checks along the way.
It's main purpose is to assist in propagating repo changes when using
CI environments, e.g., CircleCI.
OPTIONS:
-c The commit id you want to merge into the target branch. In case
it exists there already, we do nothing.
Example: <A Git commit id.>
-e The branch you expect to be tested in your CI/CD environment. If
set we exit with an error in case the current branch if the Git
repo *differs*.
Example: dev
-t The target branch you want to merge to.
Example: master
-r The remote you want to push to after a successful merge. We
don't push anything, if this is not set. The target branch you define
will be used as remote target branch as well.
Example: [email protected]:kayabendroth/cd-utils.git
-h Show this message.
-? Show this message.
USAGE_END
}
### Flow ###
# Command-line arguments.
while getopts "c: e: t: r: h ?" option ; do
case $option in
c ) GIT_COMMIT_ID="${OPTARG}"
;;
e ) EXPECTED_BRANCH="${OPTARG}"
;;
t ) TARGET_BRANCH="${OPTARG}"
;;
r ) GIT_REMOTE_URL="${OPTARG}"
;;
h ) usage
exit 0;;
? ) usage
exit 0;;
esac
done
# Basic checks.
GIT=
which git > /dev/null 2>&1
if [[ $? -ne 0 ]] ; then
echo 'Cannot find executable git.'
exit 1
else
GIT=$(which git)
fi
if [[ -z "${GIT_COMMIT_ID}" ]] ; then
echo 'No commit id given.'
exit 1
fi
# Right now we support CircleCI only.
#
# Test, if we're running in CircleCI environment.
if [[ -z "${CIRCLECI}" || "${CIRCLECI}" != 'true' ]] ; then
echo 'Not running in CircelCI environment. We only support CircleCI currently.'
exit 1
fi
# If the branch being tested is not 'dev', we exit immediately.
if [[ -z "${CIRCLE_BRANCH}" || "${CIRCLE_BRANCH}" != "${EXPECTED_BRANCH}" ]] ; then
echo "Merge to ${TARGET_BRANCH} is allowed only, if running on ${EXPECTED_BRANCH}."
exit 1
fi
# Merge to target branch and push to remote in case it has been requested.
echo "Switching to target branch ${TARGET_BRANCH}..."
${GIT} checkout "${TARGET_BRANCH}" || exit 1
# We only need to merge and push, if the local target branch is not on the
# commit we are about to merge.
echo "Checking, if we can skip the merge (and push), b/c local target branch already has the commit..."
${GIT} log -n1 --pretty=oneline | awk '{ print $1 }' | grep -q "${GIT_COMMIT_ID}"
if [[ "$?" -eq 1 ]] ; then
echo "Merging commit ${GIT_COMMIT_ID} onto ${TARGET_BRANCH}..."
${GIT} merge "${GIT_COMMIT_ID}" || exit 1
if [[ -n "${GIT_REMOTE_URL}" ]] ; then
echo 'Pushing local branch to remote ${GIT_REMOTE_URL}...'
${GIT} push "${GIT_REMOTE_URL}" "${TARGET_BRANCH}:${TARGET_BRANCH}" || exit 1
fi
fi
|
kayabendroth/cd-utils
|
scripts/git/merge-to.sh
|
Shell
|
mpl-2.0
| 3,024 |
#!/bin/bash
set -xe
trap 'jobs -p | xargs --no-run-if-empty kill' INT TERM EXIT
export PATH=$PATH:/usr/local/bin
export PIP_DOWNLOAD_CACHE=~/.pip_cache
WD=`pwd`
DB_HOST_IP=${DB_HOST_IP:=127.0.0.1}
POSTGRES_PORT=${POSTGRES_PORT:=5432}
echo "Downloading CKAN..."
git clone https://github.com/ckan/ckan
cd ckan
git checkout release-v2.2.2
cd $WD
echo "Checking Solr..."
SOLR_ACTIVE=`nc -z localhost 8983; echo $?`
if [ $SOLR_ACTIVE -ne 0 ]
then
echo "Downloading Solr..."
CACHE_DIR=~/.cache
FILE=solr-4.8.1.tgz
SOLAR_UNZIP_FOLDER=solr-4.8.1
# If the solar folder does not exist, we have to build it
if [ ! -d "$CACHE_DIR/$SOLAR_UNZIP_FOLDER" ]
then
# Download the solar installation file if it does not exist
wget --no-verbose --timestamping --directory-prefix=$CACHE_DIR https://archive.apache.org/dist/lucene/solr/4.8.1/$FILE
# Unzip the folder
tar -xf "$CACHE_DIR/$FILE" --directory "$CACHE_DIR"
# Delete the downloaded tar.gz
rm "$CACHE_DIR/$FILE"
fi
echo "Configuring and starting Solr..."
ln -s "$CACHE_DIR/$SOLAR_UNZIP_FOLDER" .
mv "$SOLAR_UNZIP_FOLDER/example/solr/collection1/conf/schema.xml" "$SOLAR_UNZIP_FOLDER/example/solr/collection1/conf/schema.xml.bak"
ln -s $WD/ckan/ckan/config/solr/schema.xml "$SOLAR_UNZIP_FOLDER/example/solr/collection1/conf/schema.xml"
cd solr-4.8.1/example
java -jar start.jar 2>&1 > /dev/null &
cd $WD
else
echo "Solar is already installed..."
fi
echo "Setting up virtualenv..."
virtualenv --no-site-packages virtualenv
source virtualenv/bin/activate
pip install --upgrade pip
echo "Installing CKAN dependencies..."
cd ckan
python setup.py develop
pip install -r requirements.txt
pip install -r dev-requirements.txt
cd ..
echo "Removing databases from old executions..."
sudo -u postgres psql -c "DROP DATABASE IF EXISTS datastore_test;"
sudo -u postgres psql -c "DROP DATABASE IF EXISTS ckan_test;"
sudo -u postgres psql -c "DROP USER IF EXISTS ckan_default;"
echo "Creating the PostgreSQL user and database..."
sudo -u postgres psql -c "CREATE USER ckan_default WITH PASSWORD 'pass';"
sudo -u postgres psql -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;'
sudo -u postgres psql -c 'CREATE DATABASE datastore_test WITH OWNER ckan_default;'
echo "Modifying the configuration to setup properly the Postgres port..."
mkdir -p data/storage
echo "
sqlalchemy.url = postgresql://ckan_default:pass@$DB_HOST_IP:$POSTGRES_PORT/ckan_test
ckan.datastore.write_url = postgresql://ckan_default:pass@$DB_HOST_IP:$POSTGRES_PORT/datastore_test
ckan.datastore.read_url = postgresql://datastore_default:pass@$DB_HOST_IP:$POSTGRES_PORT/datastore_test
ckan.storage_path=data/storage" >> test.ini
echo "Initializing the database..."
sed -i "s/\(postgresql:\/\/.\+\)@localhost\(:[0-9]\+\)\?/\1@$DB_HOST_IP:$POSTGRES_PORT/g" ckan/test-core.ini
cd ckan
paster db init -c test-core.ini
cd ..
echo "Installing ckanext-datarequests and its requirements..."
python setup.py develop
echo "Running tests..."
python setup.py nosetests
|
keitaroinc/ckanext-datarequests
|
bin/setup_and_test.sh
|
Shell
|
agpl-3.0
| 3,100 |
#!/bin/sh
export ARCH=$(uname -m)
if [ "${ARCH}" == "aarch64" ]; then
# https://docs.cypress.io/guides/getting-started/installing-cypress#Download-URLs
echo "Skipping this step on aarch64 because we're not running Cypress tests there."
exit 0
fi
POSTGRES_PASSWORD="v3rys3cr3t"
SUBNET_GROUP_NAME="default-vpc-b5c428ce"
aws rds create-db-instance \
--db-name astrobin \
--db-instance-identifier astrobin-test-${CODEBUILD_BUILD_NUMBER} \
--allocated-storage 100 \
--engine postgres \
--db-instance-class db.r5.large \
--master-username astrobin \
--master-user-password ${POSTGRES_PASSWORD} \
--availability-zone us-east-1a \
--no-multi-az \
--publicly-accessible \
--no-deletion-protection \
--db-subnet-group-name ${SUBNET_GROUP_NAME}
|
astrobin/astrobin
|
buildspecs/create-test-db.sh
|
Shell
|
agpl-3.0
| 798 |
cd "$(dirname "$0")"
gcc -I../../../../ -I../../include -c ../../src/luke_mods.cpp -o temp/luke_mods.o
gcc -I../../../../ -I../../include -c ../../src/steve_mods.cpp -o temp/steve_mods.o
ar rcs src.a temp/steve_mods.o temp/luke_mods.o
|
The-Green-Team/Visitor-Program
|
Tools/qb64/internal/c/parts/user_mods/os/osx/setup_build.command
|
Shell
|
agpl-3.0
| 236 |
#!/bin/bash -e
######################################################
# Snipe-It Install Script #
# Script created by Mike Tucker #
# [email protected] #
# This script is just to help streamline the #
# install process for Debian and CentOS #
# based distributions. I assume you will be #
# installing as a subdomain on a fresh OS install. #
# Right now I'm not going to worry about SMTP setup #
# #
# Feel free to modify, but please give #
# credit where it's due. Thanks! #
######################################################
#First things first, let's set some variables and find our distro.
clear
si="Snipe-IT"
hostname="$(hostname)"
hosts=/etc/hosts
distro="$(cat /proc/version)"
file=master.zip
dir=/var/www/snipe-it-master
ans=default
case $distro in
*Ubuntu*|*Debian*)
echo "Ubuntu/Debian detected. Carry forth."
distro=u
;;
*centos*)
echo "CentOS detected. Carry forth."
distro=c
;;
*)
echo "Not sure of this OS. Exiting for safety."
exit
;;
esac
#Get your FQDN.
echo ""
echo "$si install script - Installing $ans"
echo "Q. What is the FQDN of your server? (example: www.yourserver.com)"
read fqdn
echo ""
#Do you want to set your own passwords, or have me generate random ones?
ans=default
until [[ $ans == "yes" ]] || [[ $ans == "no" ]]; do
echo "Q. Do you want me to automatically create the MySQL root & user passwords? (y/n)"
read setpw
case $setpw in
[yY] | [yY][Ee][Ss] )
mysqlrootpw="$(echo `< /dev/urandom tr -dc _A-Za-z-0-9 | head -c8`)"
mysqluserpw="$(echo `< /dev/urandom tr -dc _A-Za-z-0-9 | head -c8`)"
echo "I'm putting this into /root/mysqlpasswords ... PLEASE REMOVE that file after you have recorded the passwords somewhere safe!"
ans="yes"
;;
[nN] | [n|N][O|o] )
echo "Q. What do you want your root PW to be?"
read mysqlrootpw
echo "Q. What do you want your snipeit user PW to be?"
read mysqluserpw
ans="no"
;;
*) echo "Invalid answer. Please type y or n"
;;
esac
done
#Snipe says we need a new 32bit key, so let's create one randomly and inject it into the file
random32="$(echo `< /dev/urandom tr -dc _A-Za-z-0-9 | head -c32`)"
#createstuff.sql will be injected to the database during install. mysqlpasswords.txt is a file that will contain the root and snipeit user passwords.
#Again, this file should be removed, which will be a prompt at the end of the script.
createstufffile=/root/createstuff.sql
passwordfile=/root/mysqlpasswords.txt
echo >> $createstufffile "CREATE DATABASE snipeit;"
echo >> $createstufffile "GRANT ALL PRIVILEGES ON snipeit.* TO snipeit@localhost IDENTIFIED BY '$mysqluserpw';"
echo >> $passwordfile "MySQL Passwords..."
echo >> $passwordfile "Root: $mysqlrootpw"
echo >> $passwordfile "User (snipeit): $mysqluserpw"
echo >> $passwordfile "32 bit random string: $random32"
echo "MySQL ROOT password: $mysqlrootpw"
echo "MySQL USER (snipeit) password: $mysqluserpw"
echo "32 bit random string: $random32"
echo "These passwords have been exported to /root/mysqlpasswords.txt...I recommend You delete this file for security purposes"
#Let us make it so only root can read the file. Again, this isn't best practice, so please remove these after the install.
chown root:root $passwordfile $creatstufffile
chmod 700 $passwordfile $createstufffile
if [[ $distro == "u" ]]; then
#Update/upgrade Debian/Ubuntu repositories, get the latest version of git.
apachefile=/etc/apache2/sites-available/$fqdn.conf
sudo apt-get update ; sudo apt-get -y upgrade ; sudo apt-get install -y git unzip
wget https://github.com/snipe/snipe-it/archive/$file
sudo unzip $file -d /var/www/
#We already established MySQL root & user PWs, so we dont need to be prompted. Let's go ahead and install Apache, PHP and MySQL.
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y lamp-server^
sudo apt-get install -y php5 php5-mcrypt php5-curl php5-mysql php-gd
#Create MySQL accounts
echo "Create MySQL accounts"
sudo mysqladmin -u root password $mysqlrootpw
sudo mysql -u root -p$mysqlrootpw < /root/createstuff.sql
#Enable mcrypt and rewrite
sudo php5enmod mcrypt
sudo a2enmod rewrite
sudo ls -al /etc/apache2/mods-enabled/rewrite.load
#Create a new virtual host for Apache.
echo >> $apachefile ""
echo >> $apachefile ""
echo >> $apachefile "<VirtualHost *:80>"
echo >> $apachefile "ServerAdmin webmaster@localhost"
echo >> $apachefile " <Directory $dir/public>"
echo >> $apachefile " Require all granted"
echo >> $apachefile " AllowOverride All"
echo >> $apachefile " </Directory>"
echo >> $apachefile " DocumentRoot $dir/public"
echo >> $apachefile " ServerName $fqdn"
echo >> $apachefile " ErrorLog "\${APACHE_LOG_DIR}"/error.log"
echo >> $apachefile " CustomLog "\${APACHE_LOG_DIR}"/access.log combined"
echo >> $apachefile "</VirtualHost>"
echo >> $hosts "127.0.0.1 $hostname $fqdn"
a2ensite $fqdn.conf
#Change permissions on directories
sudo chmod -R 755 $dir/app/storage
sudo chmod -R 755 $dir/app/private_uploads
sudo chmod -R 755 $dir/public/uploads
sudo chown -R www-data:www-data /var/www/
echo "Finished permission changes."
#Modify the Snipe-It files necessary for a production environment.
replace "'www.yourserver.com'" "'$hostname'" -- $dir/bootstrap/start.php
cp $dir/app/config/production/database.example.php $dir/app/config/production/database.php
replace "'snipeit_laravel'," "'snipeit'," -- $dir/app/config/production/database.php
replace "'travis'," "'snipeit'," -- $dir/app/config/production/database.php
replace " 'password' => ''," " 'password' => '$mysqluserpw'," -- $dir/app/config/production/database.php
replace "'http://production.yourserver.com'," "'http://$fqdn'," -- $dir/app/config/production/database.php
cp $dir/app/config/production/app.example.php $dir/app/config/production/app.php
replace "'http://production.yourserver.com'," "'http://$fqdn'," -- $dir/app/config/production/app.php
replace "'Change_this_key_or_snipe_will_get_ya'," "'$random32'," -- $dir/app/config/production/app.php
replace "'false'," "true," -- $dir/app/config/production/app.php
cp $dir/app/config/production/mail.example.php $dir/app/config/production/mail.php
#Install / configure composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
cd $dir/
composer install --no-dev --prefer-source
php artisan app:install --env=production
service apache2 restart
else
#Make directories so we can create a new apache vhost
sudo mkdir /etc/httpd/
sudo mkdir /etc/httpd/sites-available/
sudo mkdir /etc/httpd/sites-enabled/
apachefile=/etc/httpd/sites-available/$fqdn.conf
apachefileen=/etc/httpd/sites-enabled/$fqdn.conf
apachecfg=/etc/httpd/conf/httpd.conf
#Allow us to get the mysql engine
sudo rpm -Uvh http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
sudo yum -y install httpd mysql-server wget git unzip
wget https://github.com/snipe/snipe-it/archive/$file
sudo unzip $file -d /var/www/
sudo /sbin/service mysqld start
#Create MySQL accounts
echo "Create MySQL accounts"
sudo mysqladmin -u root password $mysqlrootpw
echo ""
echo "***Your Current ROOT password is---> $mysqlrootpw"
echo "***Use $mysqlrootpw at the following prompt for root login***"
sudo /usr/bin/mysql_secure_installation
#Install PHP stuff.
sudo yum -y install php php-mysql php-bcmath.x86_64 php-cli.x86_64 php-common.x86_64 php-embedded.x86_64 php-gd.x86_64 php-mbstring
wget http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
rpm -ivh epel-release-7-5.noarch.rpm
yum install -y --enablerepo="epel" php-mcrypt
#Create the new virtual host in Apache.
echo >> $apachefile ""
echo >> $apachefile ""
echo >> $apachefile "<VirtualHost *:80>"
echo >> $apachefile "ServerAdmin webmaster@localhost"
echo >> $apachefile " <Directory $dir/public>"
echo >> $apachefile " Require all granted"
echo >> $apachefile " AllowOverride All"
echo >> $apachefile " Options +Indexes"
echo >> $apachefile " </Directory>"
echo >> $apachefile " DocumentRoot $dir/public"
echo >> $apachefile " ServerName $fqdn"
echo >> $apachefile " ErrorLog /var/log/httpd/snipe.error.log"
echo >> $apachefile " CustomLog /var/log/access.log combined"
echo >> $apachefile "</VirtualHost>"
echo >> $hosts "127.0.0.1 $hostname $fqdn"
sudo ln -s $apachefile $apachefileen
#Enable rewrite and vhost
echo >> $apachecfg "LoadModule rewrite_module modules/mod_rewrite.so"
echo >> $apachecfg "IncludeOptional sites-enabled/*.conf"
#Change permissions on directories
sudo chmod -R 755 $dir/app/storage
sudo chmod -R 755 $dir/app/private_uploads
sudo chmod -R 755 $dir/public/uploads
sudo chown -R apache:apache /var/www/
service httpd restart
#Modify the Snipe-It files necessary for a production environment.
replace "'www.yourserver.com'" "'$hostname'" -- $dir/bootstrap/start.php
cp $dir/app/config/production/database.example.php $dir/app/config/production/database.php
replace "'snipeit_laravel'," "'snipeit'," -- $dir/app/config/production/database.php
replace "'travis'," "'snipeit'," -- $dir/app/config/production/database.php
replace " 'password' => ''," " 'password' => '$mysqluserpw'," -- $dir/app/config/production/database.php
replace "'http://production.yourserver.com'," "'http://$fqdn'," -- $dir/app/config/production/database.php
cp $dir/app/config/production/app.example.php $dir/app/config/production/app.php
replace "'http://production.yourserver.com'," "'http://$fqdn'," -- $dir/app/config/production/app.php
replace "'Change_this_key_or_snipe_will_get_ya'," "'$random32'," -- $dir/app/config/production/app.php
cp $dir/app/config/production/mail.example.php $dir/app/config/production/mail.php
#Install / configure composer
cd $dir
sudo mysql -u root -p$mysqlrootpw < /root/createstuff.sql
curl -sS https://getcomposer.org/installer | php
php composer.phar install --no-dev --prefer-source
php artisan app:install --env=production
#Add SELinux and firewall exception/rules. You'll have to allow 443 if you want ssl connectivity.
chcon -R -h -t httpd_sys_script_rw_t $dir/
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --reload
service httpd restart
fi
echo ""; echo ""; echo ""
echo "***I have no idea about your mail environment, so if you want email capability, open up the following***"
echo "nano -w $dir/app/config/production/mail.php"
echo "And edit the attributes appropriately."
sleep 1
echo "";echo "";echo ""
ans=default
until [[ $ans == "yes" ]] || [[ $ans == "no" ]]; do
echo "Q. Shall I delete the password files I created? (Remember to record the passwords before deleting) (y/n)"
read setpw
case $setpw in
[yY] | [yY][Ee][Ss] )
rm $createstufffile
rm $passwordfile
echo "$createstufffile and $passwordfile files have been removed."
ans=yes
;;
[nN] | [n|N][O|o] )
echo "Ok, I won't remove the file. Please for the love of security, record the passwords and delete this file regardless."
echo "$si cannot be held responsible if this file is compromised!"
echo "From Snipe: I cannot encourage or even facilitate poor security practices, and still sleep the few, frantic hours I sleep at night."
ans=no
;;
*)
echo "Please select a valid option"
;;
esac
done
echo ""; echo ""
echo "***If you want mail capabilities, open $dir/app/config/production/mail.php and fill out the attributes***"
echo ""; echo ""
echo "***$si should now be installed. open up http://$fqdn in a web browser to verify.***"
sleep 1
|
csd-dev/snipe-it
|
install.sh
|
Shell
|
agpl-3.0
| 12,128 |
#!/bin/bash
exitstatus=0
for i in $(go list -f '{{.Dir}}' github.com/juju/core/...)
do
src=$i/*_test.go
# The -s flag is needed to suppress errors when
# the above pattern does not match any files.
if grep -s -q -l 'launchpad.net/gocheck' $src &&
! egrep -l -q 'gc\.TestingT|testing\.(\w*)MgoTestPackage' $src
then
# There are _test.go files that use gocheck but
# don't call gocheck.TestingT.
echo $i uses gocheck but never calls TestingT
exitstatus=1
fi
done
exit $exitstatus
|
jkary/core
|
scripts/checktesting.bash
|
Shell
|
agpl-3.0
| 494 |
#!/bin/bash
#
# FIXME: ImageMagick can map to a color palette but the PNG encoder rearranges the palette to whatever whim it fancies.
# Telling the PNG encoder to preserve the color palette makes it forget there is a color palette (what?).
# We're going to just have to write something to take the PNGs and rearrange the palette back into an order that
# matches the damn palette we wanted in common across the PNGs in the first place. >:(
mkdir __DONE_PALUNORD__
convert "$1" -channel A -threshold 50% -channel RGB -ordered-dither o8x8,32,64,32 -map palette.png -define png:compression-level=9 -define png:compression-strategy=0 "png8:$1.palunord.png" || exit 1
mv -vn "$1" __DONE_PALUNORD__/
|
joncampbell123/doslib
|
games/dgjamfall2020/codenamesunfish3d/devasset/png2palette.sh
|
Shell
|
lgpl-2.1
| 715 |
#!/bin/sh
gource \
--seconds-per-day .1 \
--auto-skip-seconds .1 \
--file-idle-time 0 \
--multi-sampling \
--highlight-users \
--stop-at-end \
--key \
--title "History of GPlugin" \
--font-size 26 \
-1280x720 \
--max-files 0 \
--hide filenames,mouse,progress \
--output-framerate 30 \
-o - \
| ffmpeg \
-y \
-r 30 \
-f image2pipe \
-vcodec ppm \
-i - \
-vcodec libx264 \
-preset ultrafast \
-pix_fmt yuv420p \
-crf 1 \
-threads 0 \
-bf 0 \
gplugin.mp4
|
tieto/gplugin-pkg
|
scripts/makeviz.sh
|
Shell
|
lgpl-2.1
| 480 |
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: ipt
# Required-Start: $syslog $time
# Required-Stop: $syslog $time $networking
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Set iptables
# Description: Set iptables rules
### END INIT INFO
# Source function library.
. /lib/lsb/init-functions
SOURCE=/etc/ipt.conf
LOG=/var/log/ipt.log
IPT=/usr/local/bin/ipt
if [ ! -f $SOURCE ]
then
log_warning_msg "Not found: $SOURCE source file."
exit 1;
fi
if [ ! -x $IPT ]
then
log_warning_msg "Not found: $IPT executable file."
exit 1
fi
#
# See how we were called.
#
start() {
log_daemon_msg "Starting ipt(iptables)" "ipt"
$IPT start <$SOURCE >$LOG 2>&1
log_end_msg $?
}
stop() {
log_daemon_msg "Stopping ipt(iptables)" "ipt"
$IPT stop >/dev/null 2>&1
log_end_msg $?
}
restart() {
log_daemon_msg "Restarting ipt(iptables)" "ipt"
$IPT restart <$SOURCE >$LOG 2>&1
log_end_msg $?
}
check() {
echo -n "Check ipt source: "
$IPT check <$SOURCE >$LOG 2>&1
if [ $? -eq 0 ]; then
log_success_msg "$SOURCE file is good"
else
log_failure_msg "$SOURCE file is wrong"
fi
exit 0
}
case "$1" in
start)
start
;;
stop)
stop
;;
reload|restart)
restart
;;
check)
check
;;
*)
log_action_msg echo "Usage: $0 {start|stop|restart|reload|check}"
exit 2
esac
|
csikfer/ipt
|
debipt.sh
|
Shell
|
lgpl-2.1
| 1,385 |
#!/usr/bin/env bash
# For debug, uncomment:
# set -x
########################################################################
# This script is responsible of handing over the build process, in a
# proper out of source build directory. It takes care of calling cmake
# first if needed, and then cd's into the proper sub-directory of the
# build tree, and runs make there. The intent is that this script is
# called from a Makefile within the source tree.
# In particular, the following tasks are done,
# - check if the calling path is correct?
# - if exists, parse the file ${up_path}/local.sh
# - check if cmake is installed, if not install it.
# - "cmake" to generate Makefile
# - "make"
#
# Type "make ?" for more options.
########################################################################
: ${MAKE=make}
export MAKE
if echo "${MAKEFLAGS}" | grep -q "jobserver-fds=0," ; then
echo "# You are calling the top-level cado makefile with file descriptor 0 closed.">&2
echo "# This is unsupported (at least for a parallel build), because in that">&2
echo "# case GNU Make opens and uses a pipe on file descriptor 0, and we">&2
echo "# suspect that cmake closes it right away, causing the compilation to">&2
echo "# fail.">&2
echo "#">&2
echo "# Simple fix: make -j \$number_of_cpus < /dev/null">&2
echo "#">&2
exit 42
fi
args=("$@")
if [ "$1" = "show" ] ; then
if [ "$#" != 1 ] ; then
echo "Argument 'show' must be alone on the command line of $0" >&2
fi
set -- --show
else
set --
fi
source "$(dirname $0)/build_environment.sh"
if [ "$1" ] ; then
# we've done our deeds, finish.
exit 0
fi
set -e
set "${args[@]}"
########################################################################
# "make ?" or "make help" (when the Makefile does not exist)
info=""
function make_usage {
echo "-------------------------------------------------------------- "
echo "[Options] (see $0 for details)"
echo "-------------------------------------------------------------- "
echo " $info \"make ?\" -- this help"
echo " $info \"make show\" -- only show env variables"
echo " $info \"make cmake\" -- only run cmake to generate Makefile"
echo " $info \"make\" -- run cmake first and then make"
echo " $info \"make tidy\" -- delete folder $build_tree (dangerous)"
echo " $info Any other options will be passed to the actual make followed."
echo "-------------------------------------------------------------- "
exit 0
}
if [ "$1" == "?" ] ; then
make_usage
fi
if [ "$1" == "help" ] && [ ! -f "$build_tree/Makefile" ] ; then
make_usage
fi
########################################################################
# make tidy (warn, this delete the whole build folder)
wn="[warning]"
if [ "$1" == "tidy" ] ; then
echo "$wn this deletes the whole folder $build_tree. Do you want to continue? (n/Y)"
if [ -e "`tty`" ] ; then
read TIDY_BUILD
else
echo "$wn no input terminal, assuming no"
TIDY_BUILD=n
fi
if [ "$TIDY_BUILD" == "Y" ]; then
echo "$wn wiping out $build_tree"
rm -rf "$build_tree"
else
echo "$wn no action and quit now"
fi
exit 0
fi
########################################################################
# Make sure we have cmake, by the way !
: ${cmake_path:="`which cmake 2>/dev/null`"}
cmake_companion_install_location="$absolute_path_of_source/cmake-installed"
if [ "$?" != "0" ] || ! [ -x "$cmake_path" ] ; then
echo "CMake not found" >&2
cmake_path=
# Recall that (some versions of) bash do not want quoting for regex patterns.
elif [[ "`"$cmake_path" --version`" =~ ^cmake\ version\ [012] ]] && ! ( [[ "`"$cmake_path" --version`" =~ ^cmake\ version\ 2.(9|8.11|8.12) ]]) ; then
echo "CMake found, but not with version 2.8.11 or newer" >&2
cmake_path=
fi
if ! [ "$cmake_path" ] ; then
cmake_path="$cmake_companion_install_location/bin/cmake"
if [ -x "$cmake_path" ] ; then
echo "Using custom cmake in $cmake_companion_install_location" >&2
else
echo "I am about to download and compile a compatible version of Cmake."
echo "Do you want to continue ? (y/n)"
if [ -e "`tty`" ] ; then
read INSTALL_CMAKE
else
echo "No input terminal, assuming yes"
INSTALL_CMAKE=y
fi
if [ ! "$INSTALL_CMAKE" = "y" ]; then
echo "Please install a compatible version of Cmake."
exit 1
fi
echo "Need to get cmake first -- this takes long !"
cd "$up_path"
if ! scripts/install-cmake.sh "$cmake_companion_install_location" ; then
echo "cmake install Failed, sorry" >&2
exit 1
fi
cd "$called_from"
fi
fi
########################################################################
# handle "make clean"
if [ "$1" == "clean" ] && [ ! -f "$build_tree/Makefile" ] ; then
echo "There is no $build_tree/Makefile. Nothing to clean."
exit 0
fi
########################################################################
# call cmake (if Makefile does not exist)
if [ "$1" = "cmake" ] || [ ! -f "$build_tree/Makefile" ] ; then
mkdir -p "$build_tree"
absolute_path_of_build_tree="`cd "$build_tree" ; $pwdP`"
if [ ! "x$CMAKE_GENERATOR" == "x" ] ; then
CMAKE_GENERATOR_OPT="-G$CMAKE_GENERATOR"
fi
(cd "$absolute_path_of_build_tree" ; "$cmake_path" "$CMAKE_GENERATOR_OPT" $CMAKE_EXTRA_ARGS "$absolute_path_of_source")
fi
if [ "$1" = "cmake" ] ; then
exit 0
fi
########################################################################
# Now cd into the target directory, and build everything required.
# Note that it's useful to kill MAKELEVEL, or otherwise we'll get scores
# and scores of ``Entering directory'' messages (sure, there's the
# --no-print-directory option -- but it's not the right cure here).
# env | grep -i make
unset MAKELEVEL
absolute_path_of_build_tree="`cd "$build_tree" ; $pwdP`"
(cd "$absolute_path_of_build_tree$relative_path_of_cwd" ; ${MAKE} "$@")
|
mancoast/cado-nfs
|
scripts/call_cmake.sh
|
Shell
|
lgpl-2.1
| 6,127 |
#!/bin/bash
#
# pffinfo tool testing script
#
# Copyright (C) 2008-2015, Joachim Metz <[email protected]>
#
# Refer to AUTHORS for acknowledgements.
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
EXIT_SUCCESS=0;
EXIT_FAILURE=1;
EXIT_IGNORE=77;
list_contains()
{
LIST=$1;
SEARCH=$2;
for LINE in $LIST;
do
if test $LINE = $SEARCH;
then
return ${EXIT_SUCCESS};
fi
done
return ${EXIT_FAILURE};
}
test_info()
{
DIRNAME=$1;
INPUT_FILE=$2;
BASENAME=`basename ${INPUT_FILE}`;
rm -rf tmp;
mkdir tmp;
${TEST_RUNNER} ${PFFINFO} ${INPUT_FILE} | sed '1,2d' > tmp/${BASENAME}.log;
RESULT=$?;
if test -f "input/.pffinfo/${DIRNAME}/${BASENAME}.log.gz";
then
zdiff "input/.pffinfo/${DIRNAME}/${BASENAME}.log.gz" "tmp/${BASENAME}.log";
RESULT=$?;
else
mv "tmp/${BASENAME}.log" "input/.pffinfo/${DIRNAME}";
gzip "input/.pffinfo/${DIRNAME}/${BASENAME}.log";
fi
rm -rf tmp;
echo -n "Testing pffinfo of input: ${INPUT_FILE} ";
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
echo " (FAIL)";
else
echo " (PASS)";
fi
return ${RESULT};
}
PFFINFO="../pfftools/pffinfo";
if ! test -x ${PFFINFO};
then
PFFINFO="../pfftools/pffinfo.exe";
fi
if ! test -x ${PFFINFO};
then
echo "Missing executable: ${PFFINFO}";
exit ${EXIT_FAILURE};
fi
TEST_RUNNER="tests/test_runner.sh";
if ! test -x ${TEST_RUNNER};
then
TEST_RUNNER="./test_runner.sh";
fi
if ! test -x ${TEST_RUNNER};
then
echo "Missing test runner: ${TEST_RUNNER}";
exit ${EXIT_FAILURE};
fi
if ! test -d "input";
then
echo "No input directory found.";
exit ${EXIT_IGNORE};
fi
OLDIFS=${IFS};
IFS="
";
RESULT=`ls input/* | tr ' ' '\n' | wc -l`;
if test ${RESULT} -eq 0;
then
echo "No files or directories found in the input directory.";
EXIT_RESULT=${EXIT_IGNORE};
else
IGNORELIST="";
if ! test -d "input/.pffinfo";
then
mkdir "input/.pffinfo";
fi
if test -f "input/.pffinfo/ignore";
then
IGNORELIST=`cat input/.pffinfo/ignore | sed '/^#/d'`;
fi
for TESTDIR in input/*;
do
if test -d "${TESTDIR}";
then
DIRNAME=`basename ${TESTDIR}`;
if ! list_contains "${IGNORELIST}" "${DIRNAME}";
then
if ! test -d "input/.pffinfo/${DIRNAME}";
then
mkdir "input/.pffinfo/${DIRNAME}";
fi
if test -f "input/.pffinfo/${DIRNAME}/files";
then
TESTFILES=`cat input/.pffinfo/${DIRNAME}/files | sed "s?^?${TESTDIR}/?"`;
else
TESTFILES=`ls ${TESTDIR}/*`;
fi
for TESTFILE in ${TESTFILES};
do
if ! test_info "${DIRNAME}" "${TESTFILE}";
then
exit ${EXIT_FAILURE};
fi
done
fi
fi
done
EXIT_RESULT=${EXIT_SUCCESS};
fi
IFS=${OLDIFS};
exit ${EXIT_RESULT};
|
PythonForensics/libpff
|
tests/test_pffinfo.sh
|
Shell
|
lgpl-3.0
| 3,254 |
docker_psa() {
clear
ps_result=$(docker ps --all --format "table {{.Image}}\t{{.Names}}\t{{.Ports}}\t{{.Status}}")
ps_table_header=$(echo "${ps_result}" | head --lines=1)
ps_table_rows_up=$(
echo "${ps_result}" |
tail --lines=+2 |
\grep "Up" |
# List running instances first.
sort --ignore-leading-blanks --version-sort --key=4 --key=2 --key=1
)
ps_table_rows_exited=$(
echo "${ps_result}" |
tail --lines=+2 |
\grep "Exited" |
# List running instances first.
sort --ignore-leading-blanks --version-sort --key=4 --key=2 --key=1
)
i=0
echo "${ps_table_header}"
echo "${ps_table_rows_up}" | while read row; do
if [ $(( $i % 2 )) -eq 0 ]; then
echo -e "\e[48;5;235m${row}\e[0m"
else
echo -e "\e[48;5;232m${row}\e[0m"
fi
((i+=1))
done
echo "${ps_table_rows_exited}" | while read row; do
echo -e "\e[2;40;97m${row}\e[0m"
((i+=1))
done
echo
images_result="$(docker images)"
images_table_header=$(echo "${images_result}" | head --lines=1)
images_table_rows=$(
echo "${images_result}" |
tail --lines=+2
)
echo "${images_table_header}"
echo "${images_table_rows}" | while read row; do
if [ $(( $i % 2 )) -eq 0 ]; then
echo -e "\e[48;5;235m${row}\e[0m"
else
echo -e "\e[48;5;232m${row}\e[0m"
fi
((i+=1))
done
}
docker_watch_psa() {
while :; do
# Fetch result before clearing as the command can be slow. Without this,
# there will be a blank cleared screen while the command finishes.
docker_psa_result="$(docker_psa)"
clear
echo "${docker_psa_result}"
sleep 10
done
}
docker_image_prune() {
# Use --force option to skip confirmation prompt.
docker image prune --force
# Use `docker image prune --all' for removing dangling and ununsed images
# (images not referenced by any container).
until="$(date --rfc-3339="date" --date="3 months ago")"
docker image prune --all --filter="until=${until}"
}
_docker() {
# Run docker only if is not already running.
if (! docker stats --no-stream &> /dev/null); then
if [[ "${OSTYPE}" == "darwin"* ]]; then
open /Applications/Docker.app
# Wait until docker daemon is running and has completed initialization.
echo -n "Waiting for docker."
while (! docker stats --no-stream &> /dev/null); do
echo -n "."
sleep 1
done
echo ""
else
echo "docker is not running"
return
fi
fi
docker "${@}"
}
alias attach="docker attach"
alias dc="_docker"
alias doc="_docker"
alias docker="_docker"
alias img="clear; docker images; echo; docker ps -a"
alias pause="docker pause"
alias prune="docker_image_prune"
alias psa="docker_psa"
alias psaw="docker_watch_psa"
alias rmi="clear; docker rmi"
alias stop="docker stop"
alias wpsa="docker_watch_psa"
|
dot-star/dot-star
|
docker/.aliases.sh
|
Shell
|
unlicense
| 3,120 |
make mrproper
make INSTALL_HDR_PATH=dest headers_install
find dest/include \( -name .install -o -name ..install.cmd \) -delete
mkdir -p /usr/include
cp -rv dest/include/* /usr/include
|
pampa/cyberdeck
|
try4/usr/src/core/linux-headers.sh
|
Shell
|
unlicense
| 186 |
#!/bin/bash
# This is an example of the request format that OpenBazaar-Server accepts
: "${OB_USERNAME:?OB_USERNAME required in env}"
: "${OB_PASSWORD:?OB_PASSWORD required in env}"
: "${OB_PROTO:?OB_PROTO required in env}"
: "${OB_PORT:?OB_PORT required in env}"
: "${OB_HOST:?OB_HOST required in env}"
sep='##'
function heading() {
echo -e "\n\n\n$sep\n$sep $1\n$sep"
}
heading "(POST) login"
curl \
--data "username=$OB_USERNAME&password=$OB_PASSWORD" \
--dump-header ../blobs/test.headers.login \
--trace ../blobs/test.trace.login \
$OB_PROTO://$OB_HOST:$OB_PORT/api/v1/login
heading "(GET) get_notifications"
curl \
-L \
-b ../blobs/test.headers.login \
--dump-header ../blobs/test.headers.get_notifications \
--trace ../blobs/test.trace.get_notifications \
$OB_PROTO://$OB_HOST:$OB_PORT/api/v1/get_notifications
heading "(GET) get_image"
curl \
-L \
-b ../blobs/test.headers.login \
--dump-header ../blobs/test.headers.get_image \
--trace ../blobs/test.trace.get_image \
-o ../blobs/test.image.png \
$OB_PROTO://$OB_HOST:$OB_PORT/api/v1/get_image?hash=a37f9f18b1d064debc5908f84153124fc220e0c
heading "(POST) follow"
curl \
-L \
-b ../blobs/test.headers.login \
-X POST \
--data "guid=a06aa22a38f0e62221ab74464c311bd88305f88c" \
--dump-header ../blobs/test.headers.follow \
--trace ../blobs/test.trace.follow \
$OB_PROTO://$OB_HOST:$OB_PORT/api/v1/follow
heading "(DELETE) social_accounts"
curl \
-L \
-b ../blobs/test.headers.login \
-X DELETE \
--data "account_type=twitter" \
--dump-header ../blobs/test.headers.social_accounts \
--trace ../blobs/test.trace.social_accounts \
$OB_PROTO://$OB_HOST:$OB_PORT/api/v1/social_accounts
|
insanity54/insane-openbazaar-api
|
utils/curls.sh
|
Shell
|
unlicense
| 1,773 |
#!/bin/bash
sed -i 's/\(127\.0\.0\.1\)\s*.*\s*\(localhost\)\s/\1\t\2 /' /etc/hosts
|
andreabattaglia/redhat-jboss-mw-arch-vagrant
|
hostname.sh
|
Shell
|
apache-2.0
| 84 |
#! /bin/sh
# Copyright 2016 Aino.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AINO_HOME environment variable tells us where the aino configuration is
# If no AINO_HOME environment variable is defined, then assume it is at the same directory as this script
if [ "$AINO_HOME" = "" ]; then
AINO_HOME=.
fi
if [ -e "${AINO_HOME}/aino-library.sh" ]; then
. "${AINO_HOME}/aino-library.sh"
fi
print_help() {
echo "Usage: aino.sh --from \"<origin system>\" --to \"<target system>\" --status \"<success|failure>\" --message \"<message explaining transaction>\" \\"
echo " --operation \"<business process name>\" --payload \"<type of payload>\""
echo ""
echo "Optional flags:"
echo " --flowid \"[flowid]\" Send the specified value as flow ID"
echo " --verbose Do verbose output"
echo " --config <configuration file> Load aino.io configuration (e.g. API key) from this file"
echo " --no_gzip Disable gzipping of the payload"
echo ""
echo "Identifier handling:"
echo " Single field:"
echo " --id \"ID name\" \"ID value\""
echo " Or for multivalued IDs (arrays of values):"
echo " --id \"ID name\" \"ID value 1\" \"ID value 2\" \"ID value 3\""
echo ""
echo "Metadata (optional):"
echo " You can send any metadata related to the call by using the --metadata option:"
echo " --metadata \"Metadata field\" \"Value of field\""
echo " --send-hostname Sends the hostname where the script is running as metadata"
echo " --send-artifact Sends information about the script that is calling aino as metadata"
exit 1
}
if [ "$#" -lt 10 ]; then
print_help
exit 1
fi
while [ $# -gt 0 ]
do
case "$1" in
--help)
print_help
exit 0
;;
--config)
. "$2"
;;
--to)
TO="$2"
;;
--from)
FROM="$2"
;;
--status)
STATUS="$2"
;;
--message)
MESSAGE="$2"
;;
--operation)
OPERATION="$2"
;;
--payload)
PAYLOAD="$2"
;;
--flowid)
FLOWID="$2"
;;
--id)
if [ "$#" -lt 3 ]; then
echo "Invalid parameters passed to aino.sh: $*"
exit 1
fi
shift # Shift the --id off stack
ID_TYPE="$1"
shift # Shift the type off the stack
add_aino_id "$ID_TYPE" $*
;;
--metadata)
if [ "$#" -lt 3 ]; then
echo "Invalid parameters passed to aino.sh: $*"
exit 1
fi
shift # Shift the --id off stack
METADATA_FIELD="$1"
shift # Shift the type off the stack
add_aino_metadata "$METADATA_FIELD" "$1"
;;
--send-artifact)
add_aino_metadata "artifactType" "shellScript"
add_aino_metadata "artifactName" "`ps -o args= $PPID 2> /dev/null`"
;;
--send-hostname)
add_aino_metadata "serverName" "`hostname`"
;;
--verbose)
export VERBOSE_AINO=true
;;
--no_gzip)
export AINO_DISABLE_GZIP=true
;;
--help)
help
exit 0
;;
esac
shift
done
if [ "${FLOWID}" = "" ]; then
FLOWID=`generate_flow_id`
fi
aino_log "${FROM}" "${TO}" "${STATUS}" "${MESSAGE}" "${OPERATION}" "${PAYLOAD}" "${FLOWID}" "`get_aino_ids`"
|
Aino-io/agent-shell
|
scripts/aino.sh
|
Shell
|
apache-2.0
| 3,871 |
#!/usr/bin/env bash
PLUGINS_DIR="/etc/logstash/plugins"
LOGSTASH_GEM="/opt/logstash/Gemfile"
PLUGINS=$(ls -1A ${PLUGINS_DIR} | grep -v "add_plugins.sh" )
if [ ${#PLUGINS[@]} -eq 0 ]; then
echo "INFO: No local plugins in ${PLUGINS_DIR}"
else
for i in ${PLUGINS}; do
echo "gem '${i}', :path => '${PLUGINS_DIR}/${i}'" >> ${LOGSTASH_GEM}
done
fi
|
Enteee/EtherFlows
|
flowworker/logstash/plugins/add_plugins.sh
|
Shell
|
apache-2.0
| 367 |
#!/bin/bash
# Cleaning up apt and bash history before packaging the box.
sudo mkdir -p /etc/systemd/system/apt-daily.timer.d/
cat <<EOF | sudo tee -a /etc/systemd/system/apt-daily.timer.d/apt-daily.timer.conf > /dev/null
[Timer]
Persistent=false
EOF
sudo apt-get clean
cat /dev/null > ~/.bash_history && history -c && exit
|
openebs/openebs
|
k8s/lib/vagrant/boxes/openebs/cleanup_openebs.sh
|
Shell
|
apache-2.0
| 324 |
#!/usr/bin/env bash
# Start script. Docker compose seems to not support parameters of line 13/14, so we stay at bash scripting.
docker run --name todomanagerdb \
-v /$(PWD)/src/main/schema:/docker-entrypoint-initdb.d \
-e MYSQL_ROOT_PASSWORD=admin \
-d mysql:5.7.16
docker run --name todomanager \
--link todomanagerdb:mysql \
-p8080:8080 \
-d aliayhan/todomanager:1.0.0 \
-h"$MYSQL_PORT_3306_TCP_ADDR" \
-P"$MYSQL_PORT_3306_TCP_PORT"
|
aliayhan/todomanager
|
src/main/docker/docker.sh
|
Shell
|
apache-2.0
| 468 |
#!/bin/bash
cd /var/tmp
wget -O ec2id.tar.gz http://github.com/cread/ec2id/tarball/master
tar fxz ec2id.tar.gz
cd cread-ec2id-*
echo -n "Starting up ec2id in $(pwd)..."
nohup python ec2id.py > ec2id.log &
echo "done!"
|
cread/ec2id
|
ec2id.sh
|
Shell
|
apache-2.0
| 223 |
#!/usr/bin/env bash
set -e
directories=$(glide novendor)
for i in $directories
do
if [[ "$i" == "." ]]; then
continue
fi
go vet $i
golint $i
goimports -d $(dirname $i)
done
|
tangfeixiong/go-to-exercise
|
vendor/github.com/ory/graceful/ci-lint.sh
|
Shell
|
apache-2.0
| 189 |
#!/bin/sh
rm -rf ../pyharmony
cd ../fauxmo
git pull
cd ../PyISY
git pull
cd ../hue-upnp
git pull
cd ../ISYHelper
git pull
|
jimboca/ISYHelper
|
update.sh
|
Shell
|
apache-2.0
| 123 |
#!/bin/bash
# Copyright 2017 Chris Drake
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Execute test suite on Travis-CI
set -e # errexit
set -x # xtrace
# Print environment variables (for debug)
printenv | sort
# Generate build files
mkdir build
cd build
cmake \
-DCMAKE_BUILD_TYPE="$CONFIGURATION" \
-DENABLE_TESTING=ON \
-DENABLE_COVERAGE="$ENABLE_COVERAGE" \
..
# Build all targets
cmake --build . --target all
# Execute unit tests
ctest
|
cjdrake/cppftw
|
script/travis.sh
|
Shell
|
apache-2.0
| 967 |
#!/bin/bash
if [ ! -f /.redis_configured ]; then
if [ "$REDIS_MODE" == "LRU" ]; then
echo "=> Configuring redis as a LRU cache"
MAXMEMORY=${REDIS_MAXMEMORY:-"256mb"}
touch /etc/redis/redis_default.conf
echo "maxmemory $MAXMEMORY" >> /etc/redis/redis_default.conf
echo "maxmemory-policy allkeys-lru" >> /etc/redis/redis_default.conf
else
echo "=> Unknown $REDIS_MODE mode - ignoring"
fi
echo "=> Setting timeout to ${REDIS_TIMEOUT}"
echo timeout ${REDIS_TIMEOUT} >> /etc/redis/redis_default.conf
touch /.redis_configured
fi
if [ ! -f /.redis_password_set ]; then
/set_redis_password.sh
fi
exec /usr/bin/redis-server /etc/redis/redis_default.conf
|
dell-cloud-marketplace/docker-redis
|
run.sh
|
Shell
|
apache-2.0
| 730 |
#!/bin/sh
xx="1 10 100 500 1000 1500"
for y in $xx; do
for x in $xx; do
./cp-benchmark $y $x || exit 1
done
done
|
ivantishchenko/OpenMP-CUDA
|
cp8/benchmark.sh
|
Shell
|
apache-2.0
| 130 |
#!/bin/bash
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
. $(dirname $0)/../common.sh
set -x
rm -rf $CORPUS fuzz-*.log
mkdir $CORPUS
[ -e $EXECUTABLE_NAME_BASE ] && ./$EXECUTABLE_NAME_BASE -use_value_profile=1 -artifact_prefix=$CORPUS/ -jobs=$JOBS -workers=$JOBS $LIBFUZZER_FLAGS $CORPUS $SCRIPT_DIR/seeds
grep "AddressSanitizer: heap-buffer-overflow\|AddressSanitizer: SEGV on unknown address" fuzz-0.log || exit 1
|
google/fuzzer-test-suite
|
vorbis-2017-12-11/test-libfuzzer.sh
|
Shell
|
apache-2.0
| 488 |
#!/bin/bash
# Entrypoint script that starts a Docker daemon inside the Dock container
# for us so that it is always available.
set -euo pipefail
start_docker() {
# Don't do anything if daemon already running
if docker info >/dev/null 2>&1; then
return
fi
sudo dockerd>/dev/null 2>&1 &
dockerd_pid=$!
local max_tries=5
for i in {1..5}; do
if docker info >/dev/null 2>&1; then
break
fi
echo "Waiting for Docker daemon to start..." >&2
sleep 1
done
if ! docker info >/dev/null 2>&1; then
echo "Docker daemon failed to start!" >&2
return 1
fi
}
start_docker
exec "$@"
|
brigade/dock
|
script/entrypoint.bash
|
Shell
|
apache-2.0
| 625 |
#!/bin/sh
CMD="${1}"; shift; exec "${CMD}" "${@}"
|
kubernetes/cloud-provider-vsphere
|
test/vcsim/entrypoint.sh
|
Shell
|
apache-2.0
| 49 |
#!/bin/bash
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4 fileencoding=utf-8 :
#
# Copyright 2014 by James Burlingame
# Apache 2.0 License
#
FAMILY="mysql"
PACKAGE="mysql"
MAJOR_VERSION="5.5"
RELEASE="5.5.33"
#
#http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.13.tar.gz/from/http://cdn.mysql.com/
#http://cdn.mysql.com/Downloads/MySQL-5.6/mysql-5.6.13.tar.gz
TARBALL_URL="http://cdn.mysql.com/Downloads/MySQL-${MAJOR_VERSION}/mysql-${RELEASE}.tar.gz"
TARBALL_MD5="68e235f472718d0aaf2fc755f498a714"
TARBALL_FILE="mysql-${RELEASE}.tar.gz"
SRCDIR="mysql-${RELEASE}"
PREFIX="/opt/${FAMILY}/.release/${PACKAGE}-${RELEASE}"
BUILDDIR="build-${PACKAGE}-${RELEASE}"
#
LIBBITS=${LIBBITS:-"lib64"}
MYSQL_DATADIR=${MYSQL_DATADIR:-"/var/lib/mysql"}
MYSQL_SOCK=${MYSQL_SOCK:-"/var/lib/mysql/mysql.sock"}
CMAKE_ARGS="\
-DMYSQL_DATADIR:PATH=$MYSQL_DATADIR \
-DCMAKE_INSTALL_PREFIX:PATH=$PREFIX \
-DWITH_SSL:STRING=yes \
-DMYSQL_UNIX_ADDR:PATH=$MYSQL_SOCK \
-DINSTALL_LIBDIR:PATH=$LIBBITS \
-DINSTALL_PLUGINDIR:PATH=$LIBBITS/plugin \
-DSYSCONFDIR:PATH=/opt/mysql/${MAJOR_VERSION}/etc \
-DINSTALL_SYSCONFDIR:PATH=/opt/${PACKAGE}/${MAJOR_VERSION}/etc \
"
TOP_DIR=${INSTALL_MARINADB_TOP_DIR:=$(pwd)}
. installer-functions.sh
. mysql-common.sh
max_tries="1"
keep_existing="1"
echo "$0: INFO: MySQL ${RELEASE} started."
install_prereqs
makedir "$TARBALLS_DIR" 1
get_tarball "$TARBALL_URL" "$TARBALL_MD5" "$TARBALL_FILE"
cd "$TOP_DIR"
extract_tarball "$TARBALL_FILE" "$SRCDIR" z
makedir "$BUILDDIR"
rm -f "$TOP_DIR"/"$SRCDIR"/CMakeCache.txt
cd "$BUILDDIR"
cmake "$TOP_DIR"/"$SRCDIR" $CMAKE_ARGS
make_install "$SRCDIR" "$BUILDDIR"
link_default "$FAMILY" "$PREFIX"
link_major "$FAMILY" "$PREFIX" "$PACKAGE"-"$MAJOR_VERSION" etc
if [ ! -e "/opt/$FAMILY/.default" ]
then
(cd /opt/"$FAMILY"; rm -f .default; ln -sv "$MAJOR_VERSION" .default)
echo "$0: INFO: Set $PACKAGE $MAJOR_VERSION as the default."
fi
if [ ! -e "/opt/$FAMILY/$MAJOR_VERSION" ]
then
(cd /opt/"$FAMILY"; rm -f "$MAJOR_VERSION"; ln -sv "$PACKAGE"-"$MAJOR_VERSION" "$MAJOR_VERSION")
echo "$0: INFO: Set $PACKAGE-$MAJOR_VERSION as $MAJOR_VERSION."
fi
add_user 'mysql' '27' 'MySQL/MariaDB/Percona server' "$MYSQL_DATADIR"
chown -v 'mysql.' "$MYSQL_DATADIR"
echo "$0: INFO: MySQL ${RELEASE} completed."
|
samplx/installers
|
mysql55.sh
|
Shell
|
apache-2.0
| 2,334 |
#!/bin/bash
#There are two spark jobs in this demo
#A streaming job that listens to active twitter feed
#A batch job that aggregates inital sketch and creates a new perspectives
if [ `hostname` == 'node0' ]
then
echo "Building Steaming Job... output to streaming-job-build.log"
#Build Twitter Streaming Project
mvn package -f dse-sketching-demo/dse-sketching-streaming-job/pom.xml > streaming-job-build.log
#kickoff streaming job
#moved to demo application node0:8081/config based to make use of different user credentials
#nohup dse spark-submit --class com.se.stream.TwitterStreamSketchDemo --total-executor-cores 2 --executor-memory 1g /tmp/datastax-sketch-examples/dse-sketching-demo/dse-sketching-streaming-job/target/dse-sketching-streaming-job-jar-with-dependencies.jar > streaming.log &
#write out current crontab for batch
crontab -l > mycron
#echo new cron into cron file
echo "*/3 * * * * dse spark-submit --class com.se.rollup.DataSketchingRollup --total-executor-cores 2 --executor-memory 1g /tmp/datastax-sketch-examples/dse-sketching-demo/dse-sketching-streaming-job/target/dse-sketching-streaming-job-jar-with-dependencies.jar" >> mycron
#install new cron file
crontab mycron
echo "Finished cron job setup"
fi
|
michaelraney/datastax-sketch-examples
|
.startup/streaming.sh
|
Shell
|
apache-2.0
| 1,258 |
#!/bin/sh
# Go through nodes and display the public ip(s)
for id in $(dcos node --json | jq --raw-output '.[] | select(.attributes.public_ip == "true") | .id'); do dcos node ssh --option StrictHostKeyChecking=no --option LogLevel=quiet --master-proxy --mesos-id=$id "curl -s ifconfig.co" ; done 2>/dev/null
|
Zuehlke/SHMACK
|
scripts/get-public-ip.sh
|
Shell
|
apache-2.0
| 309 |
#!/usr/bin/env bash
chain=$(mktemp -d)
cd $chain
$burrow_bin spec -v1 -d2 | $burrow_bin configure -s- --curve-type secp256k1 > burrow.toml
$burrow_bin start &> /dev/null &
burrow_pid=$!
contracts=$(mktemp -d)
cd $contracts
function finish {
kill -TERM $burrow_pid
rm -rf "$chain"
rm -rf "$contracts"
}
trap finish EXIT
npm install -g truffle
truffle unbox metacoin
cat << EOF > truffle-config.js
module.exports = {
networks: {
burrow: {
host: "127.0.0.1",
port: 26660,
network_id: "*",
},
}
};
EOF
truffle test --network burrow
|
eris-ltd/eris-db
|
tests/web3/truffle.sh
|
Shell
|
apache-2.0
| 570 |
#!/bin/sh
DEMOSDIR=./demos
COUCHURL="http://localhost:5984"
COUCHDIR=`curl ${COUCHURL}/_config/couchdb/database_dir 2> /dev/null | sed s/\"//g`
echo "Found CouchDB at ${COUCHDIR}"
# fetch db snapshot
wget http://dev.medicmobile.org/downloads/temp/medic-demos-release-20151104.tar.xz
# set the expected output dir to DEMOSDIR
tar -xJf medic-demos-release-20151104.tar.xz
# turn off couchdb
sudo service couchdb stop
# copy couch databases over to the system
sudo cp ${DEMOSDIR}/* ${COUCHDIR}
# determine the couchdb unix user/group
COUCHOWNER=`ls -ld ${COUCHDIR} | awk '{print $3 ":" $4}'`
echo "Found CouchDB owner:group to be ${COUCHOWNER}"
# set the correct owner for the new files
sudo chown -R ${COUCHOWNER} ${COUCHDIR}
# start up couchdb
sudo service couchdb start
# this script will exit when couch is online
while [ ! `curl -f ${COUCHURL}` ]; do
echo "Waiting for system to come up..."
sleep 2
done
# Is the medic database there?
curl -X GET http://127.0.0.1:5984/medic/_all_docs > docs
wc -l docs
|
medic/medic-analytics
|
bin/replace_couch.sh
|
Shell
|
apache-2.0
| 1,012 |
#!/bin/sh
docker run --rm -it \
-e POSTGRES_DB=pachyderm \
-e POSTGRES_USER=pachyderm \
-e POSTGRES_HOST_AUTH_METHOD=trust \
-p 32228:5432 \
postgres:13.0-alpine
|
pachyderm/pfs
|
etc/contributing/run_postgres.sh
|
Shell
|
apache-2.0
| 162 |
#!/bin/bash
# ==========================================================
# ==========================================================
# THENOWFACTORY www.thenowfactory.com
#
# Author: Agustin Fernandez
# Creation date: 18/Aug/2011
#
# DESCRIPTION: This script fixes 'Failed' & 'UnConfigured'
# disks when it is possible.
#
# INPUT: No input requiered
#
# CRONTAB:
#
# Check MegaSAS RAID status and fixs it when possible
# */7 * * * * /apps/midas/scripts/check_MegaSAS.sh > /dev/null 2>&1
#
#
# ==========================================================
# =========== DISKs STATES Explanation ===================
# ==========================================================
#
# :Failed:
# A physical disk that was originally configured as Online or
# Hot Spare, but on which the firmware detects an unrecoverable
# error.
#
# :Unconfigured(Bad):
# A physical disk on which the firmware detects an unrecoverable
# error; the physical disk was Unconfigured Good or the physical
# disk could not be initialized.
#
# :Rebuild:
# A physical disk to which data is being written to restore full
# redundancy for a virtual disk.
#
# :Missing:
# A physical disk that was Online but which has been removed from
# its location.
#
# :Offline:
# A physical disk that is part of a virtual disk but which has
# invalid data as far as the RAID configuration is concerned.
#
#
# ==========================================================
# ============= C H A N G E S L O G ===================
# ==========================================================
# Date Author Ver Description
# ==========================================================
#
# 18/Aug/2011 Agustin Fernandez 1.0 Fixs Failed disks
# 22/Aug/2011 Agustin Fernandez 1.1 Fixs UnConfigure disks
# 22/Aug/2011 Agustin Fernandez 1.2 Fixs UnConfigure(bad) disks
# 29/Aug/2011 Agustin Fernandez 1.3 Retrieves Rebuilding process rate when happens
# Abort script when RAID controler is not responding
# 29/Aug/2011 Agustin Fernandez 1.4 Removes Rebuilding temporary file
# 29/Aug/2011 Agustin Fernandez 1.5 Small bug fixed. Script writes into log file properly now.
# 30/Aug/2011 Agustin Fernandez 1.6 Beeping is silence always instead when RAID is degraded.
# 30/Aug/2011 Agustin Fernandez 1.7 PID added to logs file.
# 31/Aug/2011 Agustin Fernandez 1.7 Added disk' states explanation.
#
# ==========================================================
VERSION="1.7 This script fixes disks & RAIDs when possible."
# ==========================================================
# === S E T T I N G S =====================================
# ==========================================================
MEGACLI="/usr/sbin/MegaCli"
AWK="/usr/bin/awk"
TODAY="`/bin/date +'%Y.%m.%d'`"
TEMP_DIR="/tmp/"
SCRIPT_NAME="`/usr/bin/basename $0`"
MegaSAS_VD="${TEMP_DIR}${SCRIPT_NAME}.VD.$$.tmp"
MegaSAS_FD="${TEMP_DIR}${SCRIPT_NAME}.FD.$$.tmp"
Failed_Disks="${TEMP_DIR}${SCRIPT_NAME}.Failed-Disks.$$.tmp"
Unconfigured_Disks="${TEMP_DIR}${SCRIPT_NAME}.Unconfigured-Disks.$$.tmp"
Rebuild_Disks="${TEMP_DIR}${SCRIPT_NAME}.Rebuild-Disks.$$.tmp"
LOGs_DIR="/apps/midas/scripts/output/MegaSAS/"
LOGs_PREFIX="MegaSAS-Status"
LOGs_EXT=".log"
LOGs_FILE="${LOGs_DIR}${LOGs_PREFIX}.${TODAY}${LOGs_EXT}"
REMOVE_AFTER_N_DAYS="90"
# ==========================================================
# === F U N C T I O N S ==================================
# ==========================================================
function log {
RightNow="`/bin/date +'%Y.%m.%d %H:%M:%S'`"
echo "${RightNow} ($$) $1" | /usr/bin/tee -a ${LOGs_FILE}
}
function validations {
[ $1 ] && check_Script_Release "$1"
[ ! -d ${LOGs_DIR} ] && /bin/mkdir -p ${LOGs_DIR}
[ -e ${LOGs_FILE} ] && /usr/bin/touch ${LOGs_FILE}
CheckInstances
if [ ! -x ${MEGACLI} ]; then
log "Not found binary: ${MEGACLI}, script aborted!"
exit 1
fi
if [ ! -x ${AWK} ]; then
log "Not found binary: ${AWK} script aborted!"
exit 1
fi
check_MegaCli_bin
}
function check_Script_Release {
([ "$1" == "-v" ] || [ "$1" == "-V" ]) && echo "${SCRIPT_NAME} ${VERSION}" && exit 0
}
function CheckInstances {
ScriptName="`/usr/bin/basename $0`"
Instances="${TEMP_DIR}${ScriptName}.Instances.$$.tmp"
myTimeStamp="`/bin/date +'%Y.%m.%d %H:%M:%S'`"
/usr/bin/lsof | /bin/grep -e "${ScriptName}$" | /bin/grep -v $$ > ${Instances}
if [ -s ${Instances} ]; then
log "Another instance of ${ScriptName} is still running, aborting current script!"
/bin/rm ${Instances}
exit 1
fi
/bin/rm ${Instances}
}
function check_MegaCli_bin {
${MEGACLI} -v | /bin/egrep -e '2009$' > /dev/null 2>&1
if [ $? -eq 1 ]; then
log "This script works with: MegaCli 'Ver 5.00.14 July 14, 2009' only. Script aborted!"
exit 1
fi
}
function get_RAID_Status {
${MEGACLI} -PDList -aAll > ${MegaSAS_FD}
if [ $? -eq 1 ]; then
log "RAID controler is not responding. Script aborted!"
exit 1
fi
${MEGACLI} -CfgDsply -aALL > ${MegaSAS_VD}
/bin/cat ${MegaSAS_VD} | ${AWK} ' \
($0 ~ /^DISK GROUPS:/) {printf "\n%s",$0} \
($0 ~ /^Number of PDs:/) {printf " %s %s",$3,$4} \
($0 ~ /^Number of VDs:/) {printf " %s %s",$3,$4} \
($0 ~ /^State:/) {printf " %s",$0} \
($0 ~ /^RAID Level:/) {printf " %s",$3} \
($0 ~ /^Enclosure Device ID:/) {printf "\n%s",$4} \
($0 ~ /^Slot Number:/) {printf " %s",$3} \
($0 ~ /^Firmware state:/) {printf " %s",$3} \
END {printf "\n"}' | /usr/bin/tee -a ${LOGs_FILE}
}
function check_Failed_disks {
/bin/cat ${MegaSAS_FD} | ${AWK} ' \
($0 ~ /^Enclosure Device ID:/) {ENC=$4} \
($0 ~ /^Slot Number:/) {SLT=$3} \
($0 ~ /^Firmware state: Failed/) {printf "%s %s %s\n",ENC,SLT,$3}' > ${Failed_Disks}
if [ -s ${Failed_Disks} ]; then
while read line; do
log "found: $line"
ENC="`echo $line | /usr/bin/cut -d' ' -f1`"
SLT="`echo $line | /usr/bin/cut -d' ' -f2`"
${MEGACLI} -PDOnline -PhysDrv[${ENC}:${SLT}] -a0 | /usr/bin/tee -a ${LOGs_FILE}
done < ${Failed_Disks}
else
log "Not found 'Failed' disks"
fi
}
function check_Unconfigured_disks {
/bin/cat ${MegaSAS_FD} | ${AWK} ' \
($0 ~ /^Enclosure Device ID:/) {ENC=$4} \
($0 ~ /^Slot Number:/) {SLT=$3} \
($0 ~ /^Firmware state: Unconfigured/) {printf "%s %s %s\n",ENC,SLT,$3}' > ${Unconfigured_Disks}
if [ -s ${Unconfigured_Disks} ]; then
while read line; do
log "found: $line"
ENC="`echo $line | /usr/bin/cut -d' ' -f1`"
SLT="`echo $line | /usr/bin/cut -d' ' -f2`"
STA="`echo $line | /usr/bin/cut -d' ' -f3`"
if [ "${STA}" == "Unconfigured(bad)" ]; then
${MEGACLI} -PDMakeGood -physDrv[${ENC}:${SLT}] -a0 | /usr/bin/tee -a ${LOGs_FILE}
fi
${MEGACLI} -CfgForeign -Scan -a0 | /usr/bin/tee -a ${LOGs_FILE}
${MEGACLI} -CfgForeign -Clear -a0 | /usr/bin/tee -a ${LOGs_FILE}
${MEGACLI} -PDHSP -Set -physDrv[${ENC}:${SLT}] -a0 | /usr/bin/tee -a ${LOGs_FILE}
done < ${Unconfigured_Disks}
else
log "Not found 'Unconfigured' disks"
fi
}
function check_Rebuild_disks {
/bin/cat ${MegaSAS_FD} | ${AWK} ' \
($0 ~ /^Enclosure Device ID:/) {ENC=$4} \
($0 ~ /^Slot Number:/) {SLT=$3} \
($0 ~ /^Firmware state: Rebuild/) {printf "%s %s %s\n",ENC,SLT,$3}' > ${Rebuild_Disks}
if [ -s ${Rebuild_Disks} ]; then
while read line; do
log "found: $line"
ENC="`echo $line | /usr/bin/cut -d' ' -f1`"
SLT="`echo $line | /usr/bin/cut -d' ' -f2`"
STA="`echo $line | /usr/bin/cut -d' ' -f3`"
${MEGACLI} -PDRbld -ShowProg -PhysDrv [${ENC}:${SLT}] -a0 | /usr/bin/tee -a ${LOGs_FILE}
done < ${Rebuild_Disks}
else
log "Not found 'Rebuild' disks"
fi
}
function Silence_Alarm {
${MEGACLI} -AdpSetProp AlarmSilence -aAll | /usr/bin/tee -a ${LOGs_FILE}
}
function remove_tempFiles {
[ -e ${MegaSAS_VD} ] && /bin/rm -f ${MegaSAS_VD}
[ -e ${MegaSAS_FD} ] && /bin/rm -f ${MegaSAS_FD}
[ -e ${Failed_Disks} ] && /bin/rm -f ${Failed_Disks}
[ -e ${Unconfigured_Disks} ] && /bin/rm -f ${Unconfigured_Disks}
[ -e ${Rebuild_Disks} ] && /bin/rm -f ${Rebuild_Disks}
}
function CleanUp_Old_Logs {
/usr/bin/find ${LOGs_DIR} -name "${LOGs_PREFIX}*${LOGs_EXT}" -mtime +$1 -exec /bin/rm {} \; -print | /usr/bin/tee -a "${LOGs_FILE}"
}
# ==========================================================
# === M A I N P R O G R A M ===============================
# ==========================================================
validations "$1"
log "Script starts ..."
get_RAID_Status
check_Failed_disks
check_Unconfigured_disks
check_Rebuild_disks
Silence_Alarm
remove_tempFiles
CleanUp_Old_Logs "${REMOVE_AFTER_N_DAYS}"
log "Script finished."
# ==========================================================
# === E N D ================================================
# ==========================================================
|
peter-watters/WebPortfolio
|
tnf_inventory/Sample_scripts/check_MegaSAS.sh
|
Shell
|
apache-2.0
| 9,454 |
#!/usr/bin/env bash
bundle exec jekyll serve --config _config.yml,_config_github.yml
|
freakonometrics/arthurcharpentier
|
src/run-jekyll-local-as-github.sh
|
Shell
|
apache-2.0
| 85 |
#!/bin/bash
# Copyright 2015 Insight Data Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PEG_ROOT=$(dirname ${BASH_SOURCE})/../..
CLUSTER_NAME=davids-eventsim-cluster
peg up ${PEG_ROOT}/examples/eventsim/master.yml &
peg up ${PEG_ROOT}/examples/eventsim/workers.yml &
wait
peg fetch ${CLUSTER_NAME}
peg install ${CLUSTER_NAME} ssh
peg install ${CLUSTER_NAME} aws
peg install ${CLUSTER_NAME} hadoop
peg install ${CLUSTER_NAME} spark
wait
peg service ${CLUSTER_NAME} hadoop start
peg service ${CLUSTER_NAME} spark start
|
InsightDataScience/pegasus
|
examples/eventsim/spark_hadoop.sh
|
Shell
|
apache-2.0
| 1,033 |
#!/bin/bash
# A small script that submits a code for code review.
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EXIT_FAILURE=1;
EXIT_MISSING_ARGS=2;
EXIT_SUCCESS=0;
SCRIPTNAME=`basename $0`;
BROWSER_PARAM="";
CACHE_PARAM="";
CL_NUMBER="";
USE_CL_FILE=0;
while test $# -gt 0;
do
case $1 in
--nobrowser | --no-browser | --no_browser )
BROWSER_PARAM="--no_oauth2_webbrowser";
shift;
;;
*)
CL_NUMBER=$1;
shift
;;
esac
done
if test -z "${CL_NUMBER}";
then
if test -f ._code_review_number;
then
CL_NUMBER=`cat ._code_review_number`
RESULT=`echo ${CL_NUMBER} | sed -e 's/[0-9]//g'`;
if ! test -z "${RESULT}";
then
echo "File ._code_review_number exists but contains an incorrect CL number.";
exit ${EXIT_FAILURE};
fi
USE_CL_FILE=1;
fi
fi
if test -z "${CL_NUMBER}";
then
echo "Usage: ./${SCRIPTNAME} [--nobrowser] CL_NUMBER";
echo "";
echo " CL_NUMBER: optional change list (CL) number that is to be submitted.";
echo " If no CL number is provided the value is read from:";
echo " ._code_review_number";
echo "";
exit ${EXIT_MISSING_ARGS};
fi
if ! test -f "utils/common.sh";
then
echo "Unable to find common functions, are you in the wrong directory?";
exit ${EXIT_FAILURE};
fi
# Source the common library.
. utils/common.sh
# Check if we're on the master branch.
BRANCH=`git branch | grep -e "^[*]" | sed "s/^[*] //"`;
if test "${BRANCH}" != "master";
then
echo "Submit aborted - current branch is not master.";
exit ${EXIT_FAILURE};
fi
# Check for double status codes, upload.py cannot handle these correctly.
STATUS_CODES=`git status -s | cut -b1,2 | grep '\S\S' | grep -v '??' | sort | uniq`;
if ! test -z "${STATUS_CODES}";
then
echo "Submit aborted - detected double git status codes."
echo "Run: 'git stash && git stash pop'.";
exit ${EXIT_FAILURE};
fi
# Check if the local repo is in sync with the origin.
git fetch
if test $? -ne 0;
then
echo "Submit aborted - unable to fetch updates from origin repo";
exit ${EXIT_FAILURE};
fi
NUMBER_OF_CHANGES=`git log HEAD..origin/master --oneline | wc -l`;
if test $? -ne 0;
then
echo "Submit aborted - unable to determine if local repo is in sync with origin";
exit ${EXIT_FAILURE};
fi
if test ${NUMBER_OF_CHANGES} -ne 0;
then
echo "Submit aborted - local repo out of sync with origin."
echo "Run: 'git stash && git pull && git stash pop'.";
exit ${EXIT_FAILURE};
fi
# Check if the linting is correct.
if ! linter;
then
echo "Submit aborted - fix the issues reported by the linter.";
exit ${EXIT_FAILURE};
fi
# Check if all the tests pass.
if test -e run_tests.py;
then
echo "Running tests.";
python run_tests.py
if test $? -ne 0;
then
echo "Submit aborted - fix the issues reported by the failing test.";
exit ${EXIT_FAILURE};
fi
fi
URL_CODEREVIEW="https://codereview.appspot.com";
# Get the description of the change list.
RESULT=`which json_xs`;
# TODO: check if curl exists.
if ! test -z "${RESULT}";
then
DESCRIPTION=`curl -s ${URL_CODEREVIEW}/api/${CL_NUMBER} | json_xs | grep '"subject"' | awk -F '"' '{print $(NF-1)}'`;
else
DESCRIPTION=`curl ${URL_CODEREVIEW}/${CL_NUMBER}/ -s | grep "Issue ${CL_NUMBER}" | awk -F ':' '{print $2}' | tail -1`;
fi
if test -z "${DESCRIPTION}";
then
echo "Submit aborted - unable to find change list with number: ${CL_NUMBER}.";
exit ${EXIT_FAILURE};
fi
# Update the version information.
echo "Updating version information to match today's date."
DATE_NOW=`date +"%Y%m%d"`
sed -i -e "s/^VERSION_DATE.*$/VERSION_DATE = '${DATE_NOW}'/g" plaso/__init__.py
COMMIT_DESCRIPTION="Code review: ${CL_NUMBER}: ${DESCRIPTION}";
echo "Submitting ${COMMIT_DESCRIPTION}";
# Check if we need to set --cache.
STATUS_CODES=`git status -s | cut -b1,2 | sed 's/\s//g' | sort | uniq`;
for STATUS_CODE in ${STATUS_CODES};
do
if test "${STATUS_CODE}" = "A";
then
CACHE_PARAM="--cache";
fi
done
python utils/upload.py \
--oauth2 ${BROWSER_PARAM} -y -i ${CL_NUMBER} ${CACHE_PARAM} \
-t "Submitted." -m "Code Submitted." --send_mail
git commit -a -m "${COMMIT_DESCRIPTION}";
git push
if test -f "~/codereview_upload_cookies";
then
curl -b ~/.codereview_upload_cookies ${URL_CODEREVIEW}/${CL_NUMBER}/close -d ''
else
echo "Could not find an authenticated session to codereview. You need to"
echo "manually close the ticket on the code review site."
fi
if ! test -z "${USE_CL_FILE}" && test -f "._code_review_number";
then
rm -f ._code_review_number
fi
exit ${EXIT_SUCCESS};
|
cvandeplas/plaso
|
utils/submit.sh
|
Shell
|
apache-2.0
| 5,182 |
#!/usr/bin/bash
python3 manage.py loaddata db_backups/default/init_database.json
|
ceos-seo/data_cube_ui
|
scripts/load_default_fixture.sh
|
Shell
|
apache-2.0
| 81 |
#! /bin/bash
help()
{
echo "USAGE : "
echo ""
echo "config.sh migrate"
echo ""
echo "config.sh unmigrate"
echo ""
echo "config.sh preload"
}
migrate()
{
python manage.py makemigrations
python manage.py migrate
}
unmigrate()
{
find . -path "*/migrations/*.py" -not -name "__init__.py" -delete
find . -path "*/migrations/*.pyc" -delete
rm -f devel.sqlite3
}
preload()
{
python manage.py preload
}
if [[ $1 == "migrate" ]]
then
migrate
elif [[ $1 == "unmigrate" ]]
then
unmigrate
elif [[ $1 == "preload" ]]
then
preload
else
help
fi
|
amitdhiman000/MyOffers
|
config.sh
|
Shell
|
apache-2.0
| 553 |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files.
set -o errexit
set -o nounset
set -o pipefail
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
# now, set a generic core_pattern that users can work with.
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# secure_random generates a secure random string of bytes. This function accepts
# a number of secure bytes desired and returns a base64 encoded string with at
# least the requested entropy. Rather than directly reading from /dev/urandom,
# we use uuidgen which calls getrandom(2). getrandom(2) verifies that the
# entropy pool has been initialized sufficiently for the desired operation
# before reading from /dev/urandom.
#
# ARGS:
# #1: number of secure bytes to generate. We round up to the nearest factor of 32.
function secure_random {
local infobytes="${1}"
if ((infobytes <= 0)); then
echo "Invalid argument to secure_random: infobytes='${infobytes}'" 1>&2
return 1
fi
local out=""
for (( i = 0; i < "${infobytes}"; i += 32 )); do
# uuids have 122 random bits, sha256 sums have 256 bits, so concatenate
# three uuids and take their sum. The sum is encoded in ASCII hex, hence the
# 64 character cut.
out+="$(
(
uuidgen --random;
uuidgen --random;
uuidgen --random;
) | sha256sum \
| head -c 64
)";
done
# Finally, convert the ASCII hex to base64 to increase the density.
echo -n "${out}" | xxd -r -p | base64 -w 0
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
# Do not consider loopback addresses as martian source or destination while
# routing. This enables the use of 127/8 for local routing purposes.
sysctl -w net.ipv4.conf.all.route_localnet=1
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP/SCTP packets.
if iptables -w -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
iptables -A INPUT -w -p SCTP -j ACCEPT
fi
if iptables -w -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP/SCTP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
iptables -A FORWARD -w -p SCTP -j ACCEPT
fi
# Flush iptables nat table
iptables -w -t nat -F || true
if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
echo "Add rules for ip masquerade"
iptables -w -t nat -N IP-MASQ
iptables -w -t nat -A POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ
iptables -w -t nat -A IP-MASQ -d 169.254.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 10.0.0.0/8 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 172.16.0.0/12 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -d 192.168.0.0/16 -m comment --comment "ip-masq: local traffic is not subject to MASQUERADE" -j RETURN
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
fi
# If METADATA_CONCEALMENT_NO_FIREWALL is set, don't create a firewall on this
# node because we don't expect the daemonset to run on this node.
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]] && [[ ! "${METADATA_CONCEALMENT_NO_FIREWALL:-}" == "true" ]]; then
echo "Add rule for metadata concealment"
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 127.0.0.1:988
fi
}
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Gets the total number of $(1) and $(2) type disks specified
# by the user in ${NODE_LOCAL_SSDS_EXT}
function get-local-disk-num() {
local interface="${1}"
local format="${2}"
localdisknum=0
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
IFS=";" read -r -a ssdgroups <<< "${NODE_LOCAL_SSDS_EXT:-}"
for ssdgroup in "${ssdgroups[@]}"; do
IFS="," read -r -a ssdopts <<< "${ssdgroup}"
local opnum="${ssdopts[0]}"
local opinterface="${ssdopts[1]}"
local opformat="${ssdopts[2]}"
if [[ "${opformat,,}" == "${format,,}" && "${opinterface,,}" == "${interface,,}" ]]; then
localdisknum=$((localdisknum+opnum))
fi
done
fi
}
# Creates a symlink for a ($1) so that it may be used as block storage
function safe-block-symlink(){
local device="${1}"
local symdir="${2}"
mkdir -p "${symdir}"
get-or-generate-uuid "${device}"
local myuuid="${retuuid}"
local sym="${symdir}/local-ssd-${myuuid}"
# Do not "mkdir -p ${sym}" as that will cause unintended symlink behavior
ln -s "${device}" "${sym}"
echo "Created a symlink for SSD $ssd at ${sym}"
chmod a+w "${sym}"
}
# Gets a pregenerated UUID from ${ssdmap} if it exists, otherwise generates a new
# UUID and places it inside ${ssdmap}
function get-or-generate-uuid(){
local device="${1}"
local ssdmap="/home/kubernetes/localssdmap.txt"
echo "Generating or getting UUID from ${ssdmap}"
if [[ ! -e "${ssdmap}" ]]; then
touch "${ssdmap}"
chmod +w "${ssdmap}"
fi
# each line of the ssdmap looks like "${device} persistent-uuid"
if [[ ! -z $(grep ${device} ${ssdmap}) ]]; then
#create symlink based on saved uuid
local myuuid=$(grep ${device} ${ssdmap} | cut -d ' ' -f 2)
else
# generate new uuid and add it to the map
local myuuid=$(uuidgen)
if [[ ! ${?} -eq 0 ]]; then
echo "Failed to generate valid UUID with uuidgen" >&2
exit 2
fi
echo "${device} ${myuuid}" >> "${ssdmap}"
fi
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${device} when symlinking." >&2
exit 2
fi
retuuid="${myuuid}"
}
#Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
local device="${1}"
local mountpoint="${2}"
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
chmod a+w "${mountpoint}"
}
# Gets a devices UUID and bind mounts the device to mount location in
# /mnt/disks/by-id/
function unique-uuid-bind-mount(){
local mountpoint="${1}"
local actual_device="${2}"
# Trigger udev refresh so that newly formatted devices are propagated in by-uuid
udevadm control --reload-rules
udevadm trigger
udevadm settle
# grep the exact match of actual device, prevents substring matching
local myuuid=$(ls -l /dev/disk/by-uuid/ | grep "/${actual_device}$" | tr -s ' ' | cut -d ' ' -f 9)
# myuuid should be the uuid of the device as found in /dev/disk/by-uuid/
if [[ -z "${myuuid}" ]]; then
echo "Failed to get a uuid for device ${actual_device} when mounting." >&2
exit 2
fi
# bindpoint should be the full path of the to-be-bound device
local bindpoint="${UUID_MNT_PREFIX}-${interface}-fs/local-ssd-${myuuid}"
safe-bind-mount "${mountpoint}" "${bindpoint}"
}
# Bind mounts device at mountpoint to bindpoint
function safe-bind-mount(){
local mountpoint="${1}"
local bindpoint="${2}"
# Mount device to the mountpoint
mkdir -p "${bindpoint}"
echo "Binding '${mountpoint}' at '${bindpoint}'"
mount --bind "${mountpoint}" "${bindpoint}"
chmod a+w "${bindpoint}"
}
# Mounts, bindmounts, or symlinks depending on the interface and format
# of the incoming device
function mount-ext(){
local ssd="${1}"
local devicenum="${2}"
local interface="${3}"
local format="${4}"
if [[ -z "${devicenum}" ]]; then
echo "Failed to get the local disk number for device ${ssd}" >&2
exit 2
fi
# TODO: Handle partitioned disks. Right now this code just ignores partitions
if [[ "${format}" == "fs" ]]; then
if [[ "${interface}" == "scsi" ]]; then
local actual_device=$(readlink -f "${ssd}" | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != sd* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd${devicenum}"
else
# This path is required because the existing Google images do not
# expose NVMe devices in /dev/disk/by-id so we are using the /dev/nvme instead
local actual_device=$(echo ${ssd} | cut -d '/' -f 3)
# Error checking
if [[ "${actual_device}" != nvme* ]]; then
echo "'actual_device' is not of the correct format. It must be the kernel name of the device, got ${actual_device} instead" >&2
exit 1
fi
local mountpoint="/mnt/disks/ssd-nvme${devicenum}"
fi
safe-format-and-mount "${ssd}" "${mountpoint}"
# We only do the bindmount if users are using the new local ssd request method
# see https://github.com/kubernetes/kubernetes/pull/53466#discussion_r146431894
if [[ ! -z "${NODE_LOCAL_SSDS_EXT:-}" ]]; then
unique-uuid-bind-mount "${mountpoint}" "${actual_device}"
fi
elif [[ "${format}" == "block" ]]; then
local symdir="${UUID_BLOCK_PREFIX}-${interface}-block"
safe-block-symlink "${ssd}" "${symdir}"
else
echo "Disk format must be either fs or block, got ${format}"
fi
}
# Local ssds, if present, are mounted or symlinked to their appropriate
# locations
function ensure-local-ssds() {
get-local-disk-num "scsi" "block"
local scsiblocknum="${localdisknum}"
local i=0
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
if [[ "${i}" -lt "${scsiblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "scsi" "block"
else
# GKE does not set NODE_LOCAL_SSDS so all non-block devices
# are assumed to be filesystem devices
mount-ext "${ssd}" "${devicenum}" "scsi" "fs"
fi
i=$((i+1))
else
echo "No local SCSI SSD disks found."
fi
done
# The following mounts or symlinks NVMe devices
get-local-disk-num "nvme" "block"
local nvmeblocknum="${localdisknum}"
local i=0
for ssd in /dev/nvme*; do
if [ -e "${ssd}" ]; then
# This workaround to find if the NVMe device is a disk is required because
# the existing Google images does not expose NVMe devices in /dev/disk/by-id
if [[ `udevadm info --query=property --name=${ssd} | grep DEVTYPE | sed "s/DEVTYPE=//"` == "disk" ]]; then
local devicenum=`echo ${ssd} | sed -e 's/\/dev\/nvme0n\([0-9]*\)/\1/'`
if [[ "${i}" -lt "${nvmeblocknum}" ]]; then
mount-ext "${ssd}" "${devicenum}" "nvme" "block"
else
mount-ext "${ssd}" "${devicenum}" "nvme" "fs"
fi
i=$((i+1))
fi
else
echo "No local NVMe SSD disks found."
fi
done
}
# Installs logrotate configuration files
function setup-logrotate() {
mkdir -p /etc/logrotate.d/
# Configure log rotation for all logs in /var/log, which is where k8s services
# are configured to write their log files. Whenever logrotate is ran, this
# config will:
# * rotate the log file if its size is > 100Mb OR if one day has elapsed
# * save rotated logs into a gzipped timestamped backup
# * log file timestamp (controlled by 'dateformat') includes seconds too. This
# ensures that logrotate can generate unique logfiles during each rotation
# (otherwise it skips rotation if 'maxsize' is reached multiple times in a
# day).
# * keep only 5 old (rotated) logs, and will discard older logs.
cat > /etc/logrotate.d/allvarlogs <<EOF
/var/log/*.log {
rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
copytruncate
missingok
notifempty
compress
maxsize ${LOGROTATE_MAX_SIZE:-100M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -m 700 -p "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# append_or_replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname="$(dirname ${file})"
local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
touch "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${tmpfile}"
echo "${prefix}${suffix}" >> "${tmpfile}"
mv "${tmpfile}" "${file}"
}
function write-pki-data {
local data="${1}"
local path="${2}"
(umask 077; echo "${data}" | base64 --decode > "${path}")
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
write-pki-data "${CA_CERT_BUNDLE}" "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
write-pki-data "${KUBELET_CERT}" "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
write-pki-data "${KUBELET_KEY}" "${KUBELET_KEY_PATH}"
fi
}
function create-master-pki {
echo "Creating master pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
CA_CERT_PATH="${pki_dir}/ca.crt"
write-pki-data "${CA_CERT}" "${CA_CERT_PATH}"
# this is not true on GKE
if [[ ! -z "${CA_KEY:-}" ]]; then
CA_KEY_PATH="${pki_dir}/ca.key"
write-pki-data "${CA_KEY}" "${CA_KEY_PATH}"
fi
if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
APISERVER_SERVER_CERT="${MASTER_CERT}"
APISERVER_SERVER_KEY="${MASTER_KEY}"
fi
APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
write-pki-data "${APISERVER_SERVER_CERT}" "${APISERVER_SERVER_CERT_PATH}"
APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
write-pki-data "${APISERVER_SERVER_KEY}" "${APISERVER_SERVER_KEY_PATH}"
if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
fi
APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
write-pki-data "${APISERVER_CLIENT_CERT}" "${APISERVER_CLIENT_CERT_PATH}"
APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
write-pki-data "${APISERVER_CLIENT_KEY}" "${APISERVER_CLIENT_KEY_PATH}"
if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
SERVICEACCOUNT_CERT="${MASTER_CERT}"
SERVICEACCOUNT_KEY="${MASTER_KEY}"
fi
SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
write-pki-data "${SERVICEACCOUNT_CERT}" "${SERVICEACCOUNT_CERT_PATH}"
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
write-pki-data "${SERVICEACCOUNT_KEY}" "${SERVICEACCOUNT_KEY_PATH}"
if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]]; then
AGGREGATOR_CA_KEY_PATH="${pki_dir}/aggr_ca.key"
write-pki-data "${AGGREGATOR_CA_KEY}" "${AGGREGATOR_CA_KEY_PATH}"
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
write-pki-data "${REQUESTHEADER_CA_CERT}" "${REQUESTHEADER_CA_CERT_PATH}"
PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
write-pki-data "${PROXY_CLIENT_KEY}" "${PROXY_CLIENT_KEY_PATH}"
PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
write-pki-data "${PROXY_CLIENT_CERT}" "${PROXY_CLIENT_CERT_PATH}"
fi
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
# enabled. In that case the basic_auth.csv file will be rewritten to make
# sure it matches the metadata source of truth.
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
if [[ -e "${basic_auth_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
# If METADATA_CLOBBERS_CONFIG is true, we want to rewrite the file
# completely, because if we're changing KUBE_USER and KUBE_PASSWORD, we
# have nothing to match on. The file is replaced just below with
# append_or_replace_prefixed_line.
rm "${basic_auth_csv}"
fi
append_or_replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
rm "${known_tokens_csv}"
fi
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
cat <<EOF >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
fi
if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${CONTAINER_API_ENDPOINT:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
container-api-endpoint = ${CONTAINER_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
project-id = ${PROJECT_ID}
EOF
fi
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
fi
if [[ -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-name = ${NODE_NETWORK}
EOF
fi
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
# split NODE_TAGS into an array by comma.
IFS=',' read -r -a node_tags <<< ${NODE_TAGS}
else
local -r node_tags="${NODE_INSTANCE_PREFIX}"
fi
cat <<EOF >>/etc/gce.conf
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
for tag in ${node_tags[@]}; do
cat <<EOF >>/etc/gce.conf
node-tags = ${tag}
EOF
done
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
# Multimaster indicates that the cluster is HA.
# Currently the only HA clusters are regional.
# If we introduce zonal multimaster this will need to be revisited.
if [[ -n "${MULTIMASTER:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
regional = ${MULTIMASTER}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
use_cloud_config="true"
# split GCE_ALPHA_FEATURES into an array by comma.
IFS=',' read -r -a alpha_features <<< ${GCE_ALPHA_FEATURES}
for feature in ${alpha_features[@]}; do
cat <<EOF >>/etc/gce.conf
alpha-features = ${feature}
EOF
done
fi
if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
use_cloud_config="true"
cat <<EOF >> /etc/gce.conf
secondary-range-name = ${SECONDARY_RANGE_NAME}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
# This is the config file for the image review webhook.
cat <<EOF >/etc/gcp_image_review.config
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# This is the config for the image review admission controller.
cat <<EOF >/etc/admission_controller.config
imagePolicy:
kubeConfigFile: /etc/gcp_image_review.config
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
}
# Write the config for the audit policy.
function create-master-audit-policy {
local -r path="${1}"
local -r policy="${2:-}"
if [[ -n "${policy}" ]]; then
echo "${policy}" > "${path}"
return
fi
# Known api groups
local -r known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
EOF
}
# Writes the configuration file used by the webhook advanced auditing backend.
function create-master-audit-webhook-config {
local -r path="${1}"
if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
# The webhook config file is a kubeconfig file describing the webhook endpoint.
cat <<EOF >"${path}"
clusters:
- name: gcp-audit-server
cluster:
server: ${GCP_AUDIT_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-audit-server
user: kube-apiserver
name: webhook
EOF
fi
}
# Arg 1: the IP address of the API server
function create-kubelet-kubeconfig() {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
if [[ "${CREATE_BOOTSTRAP_KUBECONFIG:-true}" == "true" ]]; then
echo "Creating kubelet bootstrap-kubeconfig file"
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
elif [[ "${FETCH_BOOTSTRAP_KUBECONFIG:-false}" == "true" ]]; then
echo "Fetching kubelet bootstrap-kubeconfig file from metadata"
get-metadata-value "instance/attributes/bootstrap-kubeconfig" >/var/lib/kubelet/bootstrap-kubeconfig
else
echo "Fetching kubelet kubeconfig file from metadata"
get-metadata-value "instance/attributes/kubeconfig" >/var/lib/kubelet/kubeconfig
fi
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig ${KUBELET_APISERVER}
fi
}
function create-kubeproxy-user-kubeconfig {
echo "Creating kube-proxy user kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-controller-manager
cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubescheduler-kubeconfig {
echo "Creating kube-scheduler kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
user:
token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler
current-context: kube-scheduler
EOF
}
function create-clusterautoscaler-kubeconfig {
echo "Creating cluster-autoscaler kubeconfig file"
mkdir -p /etc/srv/kubernetes/cluster-autoscaler
cat <<EOF >/etc/srv/kubernetes/cluster-autoscaler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: cluster-autoscaler
user:
token: ${KUBE_CLUSTER_AUTOSCALER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: cluster-autoscaler
name: cluster-autoscaler
current-context: cluster-autoscaler
EOF
}
function create-kubescheduler-policy-config {
echo "Creating kube-scheduler policy config file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/policy-config
${SCHEDULER_POLICY_CONFIG}
EOF
}
function create-node-problem-detector-kubeconfig {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create node-problem-detector kubeconfig file!"
exit 1
fi
echo "Creating node-problem-detector kubeconfig file"
mkdir -p /var/lib/node-problem-detector
cat <<EOF >/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: node-problem-detector
name: service-account-context
current-context: service-account-context
EOF
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function create-master-etcd-apiserver-auth {
if [[ -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes/pki"
ETCD_APISERVER_CA_KEY_PATH="${auth_dir}/etcd-apiserver-ca.key"
echo "${ETCD_APISERVER_CA_KEY}" | base64 --decode > "${ETCD_APISERVER_CA_KEY_PATH}"
ETCD_APISERVER_CA_CERT_PATH="${auth_dir}/etcd-apiserver-ca.crt"
echo "${ETCD_APISERVER_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-ca.crt"
ETCD_APISERVER_SERVER_KEY_PATH="${auth_dir}/etcd-apiserver-server.key"
echo "${ETCD_APISERVER_SERVER_KEY}" | base64 --decode > "${ETCD_APISERVER_SERVER_KEY_PATH}"
ETCD_APISERVER_SERVER_CERT_PATH="${auth_dir}/etcd-apiserver-server.crt"
echo "${ETCD_APISERVER_SERVER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-server.crt"
ETCD_APISERVER_CLIENT_KEY_PATH="${auth_dir}/etcd-apiserver-client.key"
echo "${ETCD_APISERVER_CLIENT_KEY}" | base64 --decode > "${auth_dir}/etcd-apiserver-client.key"
ETCD_APISERVER_CLIENT_CERT_PATH="${auth_dir}/etcd-apiserver-client.crt"
echo "${ETCD_APISERVER_CLIENT_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-apiserver-client.crt"
fi
}
function assemble-docker-flags {
echo "Assemble docker command line flags"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
docker_opts+=" --log-level=debug"
else
docker_opts+=" --log-level=warn"
fi
local use_net_plugin="true"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
docker_opts+=" --bip=169.254.123.1/24"
else
use_net_plugin="false"
docker_opts+=" --bridge=cbr0"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
# Configure docker logging
docker_opts+=" --log-driver=${DOCKER_LOG_DRIVER:-json-file}"
docker_opts+=" --log-opt=max-size=${DOCKER_LOG_MAX_SIZE:-10m}"
docker_opts+=" --log-opt=max-file=${DOCKER_LOG_MAX_FILE:-5}"
# Disable live-restore if the environment variable is set.
if [[ "${DISABLE_DOCKER_LIVE_RESTORE:-false}" == "true" ]]; then
docker_opts+=" --live-restore=false"
fi
echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
# Ensure TasksMax is sufficient for docker.
# (https://github.com/kubernetes/kubernetes/issues/51977)
echo "Extend the docker.service configuration to set a higher pids limit"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01tasksmax.conf
[Service]
TasksMax=infinity
EOF
systemctl daemon-reload
echo "Docker command line is updated. Restart docker to pick it up"
systemctl restart docker
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
# TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist
mkdir -p /var/lib/kubelet/pki/
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
local -r builtin_kubelet="/usr/bin/kubelet"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
# Determine which binary to use on test clusters. We use the built-in
# version only if the downloaded version is the same as the built-in
# version. This allows GCI to run some of the e2e tests to qualify the
# built-in kubelet.
if [[ -x "${builtin_kubelet}" ]]; then
local -r builtin_version="$("${builtin_kubelet}" --version=true | cut -f2 -d " ")"
if [[ "${builtin_version}" == "${version}" ]]; then
kubelet_bin="${builtin_kubelet}"
fi
fi
fi
echo "Using kubelet binary at ${kubelet_bin}"
local -r kubelet_env_file="/etc/default/kubelet"
local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}"
echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}"
echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start kubelet.service
}
# This function assembles the node problem detector systemd service file and
# starts it using systemctl.
function start-node-problem-detector {
echo "Start node problem detector"
local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
# TODO(random-liu): Handle this for alternative container runtime.
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/docker-monitor-counter.json"
echo "Using node problem detector binary at ${npd_bin}"
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
flags+=" --logtostderr"
flags+=" --system-log-monitors=${km_config},${dm_config}"
flags+=" --custom-plugin-monitors=${custom_km_config}"
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then
flags+=" ${EXTRA_NPD_ARGS}"
fi
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}
[Install]
WantedBy=multi-user.target
EOF
systemctl start node-problem-detector.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
# $2: the log owner uid to set for the log file.
# $3: the log owner gid to set for the log file.
function prepare-log-file {
touch $1
chmod 644 $1
chown "${2:-${LOG_OWNER_USER:-root}}":"${3:-${LOG_OWNER_GROUP:-root}}" $1
}
# Prepares parameters for kube-proxy manifest.
# $1 source path of kube-proxy manifest.
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4
if [[ $? -eq 0 ]];
then
params+=" --proxy-mode=ipvs"
else
# If IPVS modules are not present, make sure the node does not come up as
# healthy.
exit 1
fi
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
local container_env=""
local kube_cache_mutation_detector_env_name=""
local kube_cache_mutation_detector_env_value=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:"
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
sed -i -e "s@{{params}}@${params}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
fi
}
# Starts kube-proxy static pod.
function start-kube-proxy {
echo "Start kube-proxy static pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
prepare-kube-proxy-manifest-variables "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local host_ip=$(python -c "import socket;print(socket.gethostbyname(\"${host_name}\"))")
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_creds=""
local etcd_apiserver_creds="${ETCD_APISERVER_CREDS:-}"
local etcd_extra_args="${ETCD_EXTRA_ARGS:-}"
if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
fi
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
if [[ -n "${ETCD_APISERVER_CA_KEY:-}" && -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_SERVER_KEY:-}" && -n "${ETCD_APISERVER_SERVER_CERT:-}" ]]; then
etcd_apiserver_creds=" --client-cert-auth --trusted-ca-file ${ETCD_APISERVER_CA_CERT_PATH} --cert-file ${ETCD_APISERVER_SERVER_CERT_PATH} --key-file ${ETCD_APISERVER_SERVER_KEY_PATH} "
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *host_ip *}}@$host_ip@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
sed -i -e "s@{{ *liveness_probe_initial_delay *}}@${ETCD_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=${ETCD_QUOTA_BACKEND_BYTES:-4294967296}@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcd_apiserver_creds *}}@$etcd_apiserver_creds@g" "${temp_file}"
sed -i -e "s@{{ *etcd_extra_args *}}@$etcd_extra_args@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
function start-etcd-empty-dir-cleanup-pod {
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup.yaml"
cp "${src_file}" "/etc/kubernetes/manifests"
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
prepare-log-file /var/log/etcd.log
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
# FLEXVOLUME_HOSTPATH_MOUNT
# FLEXVOLUME_HOSTPATH_VOLUME
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="k8s.gcr.io"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
FLEXVOLUME_HOSTPATH_MOUNT=""
FLEXVOLUME_HOSTPATH_VOLUME=""
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
function prepare-mounter-rootfs {
echo "Prepare containerized mounter"
mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-apiserver {
echo "Start kubernetes api-server"
prepare-log-file "${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}"
prepare-log-file "${KUBE_API_SERVER_AUDIT_LOG_PATH:-/var/log/kube-apiserver-audit.log}"
# Calculate variables and assemble the command line.
local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --address=127.0.0.1"
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --etcd-servers=${ETCD_SERVERS:-http://127.0.0.1:2379}"
if [[ -z "${ETCD_SERVERS:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-/events#http://127.0.0.1:4002}"
elif [[ -n "${ETCD_SERVERS_OVERRIDES:-}" ]]; then
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
fi
if [[ -n "${ETCD_APISERVER_CA_KEY:-}" && -n "${ETCD_APISERVER_CA_CERT:-}" && -n "${ETCD_APISERVER_CLIENT_KEY:-}" && -n "${ETCD_APISERVER_CLIENT_CERT:-}" ]]; then
params+=" --etcd-cafile=${ETCD_APISERVER_CA_CERT_PATH}"
params+=" --etcd-certfile=${ETCD_APISERVER_CLIENT_CERT_PATH}"
params+=" --etcd-keyfile=${ETCD_APISERVER_CLIENT_KEY_PATH}"
fi
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
if [[ -s "${REQUESTHEADER_CA_CERT_PATH:-}" ]]; then
params+=" --requestheader-client-ca-file=${REQUESTHEADER_CA_CERT_PATH}"
params+=" --requestheader-allowed-names=aggregator"
params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-"
params+=" --requestheader-group-headers=X-Remote-Group"
params+=" --requestheader-username-headers=X-Remote-User"
params+=" --proxy-client-cert-file=${PROXY_CLIENT_CERT_PATH}"
params+=" --proxy-client-key-file=${PROXY_CLIENT_KEY_PATH}"
fi
params+=" --enable-aggregator-routing=true"
if [[ -e "${APISERVER_CLIENT_CERT_PATH}" ]] && [[ -e "${APISERVER_CLIENT_KEY_PATH}" ]]; then
params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}"
params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}"
fi
if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]]; then
params+=" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}"
fi
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
fi
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
params+=" --storage-backend=${STORAGE_BACKEND}"
fi
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${ETCD_COMPACTION_INTERVAL_SEC:-}" ]]; then
params+=" --etcd-compaction-interval=${ETCD_COMPACTION_INTERVAL_SEC}s"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${NUM_NODES:-}" ]]; then
# If the cluster is large, increase max-requests-inflight limit in apiserver.
if [[ "${NUM_NODES}" -ge 3000 ]]; then
params+=" --max-requests-inflight=3000 --max-mutating-requests-inflight=1000"
elif [[ "${NUM_NODES}" -ge 1000 ]]; then
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
fi
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
params+=" --service-account-issuer=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-api-audiences=${SERVICEACCOUNT_ISSUER}"
params+=" --service-account-signing-key-file=${SERVICEACCOUNT_KEY_PATH}"
local audit_policy_config_mount=""
local audit_policy_config_volume=""
local audit_webhook_config_mount=""
local audit_webhook_config_volume=""
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
local -r audit_policy_file="/etc/audit_policy.config"
params+=" --audit-policy-file=${audit_policy_file}"
# Create the audit policy file, and mount it into the apiserver pod.
create-master-audit-policy "${audit_policy_file}" "${ADVANCED_AUDIT_POLICY:-}"
audit_policy_config_mount="{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"${audit_policy_file}\", \"readOnly\": true},"
audit_policy_config_volume="{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"${audit_policy_file}\", \"type\": \"FileOrCreate\"}},"
if [[ "${ADVANCED_AUDIT_BACKEND:-log}" == *"log"* ]]; then
# The advanced audit log backend config matches the basic audit log config.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
# Batching parameters
if [[ -n "${ADVANCED_AUDIT_LOG_MODE:-}" ]]; then
params+=" --audit-log-mode=${ADVANCED_AUDIT_LOG_MODE}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_BUFFER_SIZE:-}" ]]; then
params+=" --audit-log-batch-buffer-size=${ADVANCED_AUDIT_LOG_BUFFER_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE:-}" ]]; then
params+=" --audit-log-batch-max-size=${ADVANCED_AUDIT_LOG_MAX_BATCH_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT:-}" ]]; then
params+=" --audit-log-batch-max-wait=${ADVANCED_AUDIT_LOG_MAX_BATCH_WAIT}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_THROTTLE_QPS:-}" ]]; then
params+=" --audit-log-batch-throttle-qps=${ADVANCED_AUDIT_LOG_THROTTLE_QPS}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_THROTTLE_BURST:-}" ]]; then
params+=" --audit-log-batch-throttle-burst=${ADVANCED_AUDIT_LOG_THROTTLE_BURST}"
fi
if [[ -n "${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF:-}" ]]; then
params+=" --audit-log-initial-backoff=${ADVANCED_AUDIT_LOG_INITIAL_BACKOFF}"
fi
# Truncating backend parameters
if [[ -n "${ADVANCED_AUDIT_TRUNCATING_BACKEND:-}" ]]; then
params+=" --audit-log-truncate-enabled=${ADVANCED_AUDIT_TRUNCATING_BACKEND}"
fi
fi
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
create-master-audit-webhook-config "${audit_webhook_config_file}"
audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},"
# Batching parameters
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MODE:-}" ]]; then
params+=" --audit-webhook-mode=${ADVANCED_AUDIT_WEBHOOK_MODE}"
else
params+=" --audit-webhook-mode=batch"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-max-size=${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-}" ]]; then
params+=" --audit-webhook-batch-max-wait=${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-}" ]]; then
params+=" --audit-webhook-batch-throttle-qps=${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-}" ]]; then
params+=" --audit-webhook-batch-throttle-burst=${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-}" ]]; then
params+=" --audit-webhook-initial-backoff=${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF}"
fi
# Truncating backend parameters
if [[ -n "${ADVANCED_AUDIT_TRUNCATING_BACKEND:-}" ]]; then
params+=" --audit-webhook-truncate-enabled=${ADVANCED_AUDIT_TRUNCATING_BACKEND}"
fi
fi
fi
if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
params+=" --enable-logs-handler=false"
fi
if [[ "${APISERVER_SET_KUBELET_CA:-false}" == "true" ]]; then
params+=" --kubelet-certificate-authority=${CA_CERT_BUNDLE_PATH}"
fi
local admission_controller_config_mount=""
local admission_controller_config_volume=""
local image_policy_webhook_config_mount=""
local image_policy_webhook_config_volume=""
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
params+=" --admission-control=${ADMISSION_CONTROL}"
if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then
params+=" --admission-control-config-file=/etc/admission_controller.config"
# Mount the file to configure admission controllers if ImagePolicyWebhook is set.
admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},"
admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}},"
# Mount the file to configure the ImagePolicyWebhook's webhook.
image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},"
image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}},"
fi
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
fi
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
params+=" --runtime-config=${RUNTIME_CONFIG}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]]; then
params+=" --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
fi
elif [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
local -r vm_external_ip=$(get-metadata-value "instance/network-interfaces/0/access-configs/0/external-ip")
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
params+=" --advertise-address=${vm_external_ip}"
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
fi
fi
local webhook_authn_config_mount=""
local webhook_authn_config_volume=""
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}},"
if [[ -n "${GCP_AUTHN_CACHE_TTL:-}" ]]; then
params+=" --authentication-token-webhook-cache-ttl=${GCP_AUTHN_CACHE_TTL}"
fi
fi
local authorization_mode="RBAC"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
# Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then
echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
sed -i -e "/{{kube_user}}/d" "${abac_policy_json}"
fi
cp "${abac_policy_json}" /etc/srv/kubernetes/
fi
params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl"
authorization_mode+=",ABAC"
fi
local webhook_config_mount=""
local webhook_config_volume=""
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
authorization_mode="${authorization_mode},Webhook"
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},"
if [[ -n "${GCP_AUTHZ_CACHE_AUTHORIZED_TTL:-}" ]]; then
params+=" --authorization-webhook-cache-authorized-ttl=${GCP_AUTHZ_CACHE_AUTHORIZED_TTL}"
fi
if [[ -n "${GCP_AUTHZ_CACHE_UNAUTHORIZED_TTL:-}" ]]; then
params+=" --authorization-webhook-cache-unauthorized-ttl=${GCP_AUTHZ_CACHE_UNAUTHORIZED_TTL}"
fi
fi
authorization_mode="Node,${authorization_mode}"
params+=" --authorization-mode=${authorization_mode}"
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env+="{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}"
fi
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
if [[ -n "${container_env}" ]]; then
container_env="${container_env}, "
fi
container_env+="{\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\"}"
fi
if [[ -n "${container_env}" ]]; then
container_env="\"env\":[${container_env}],"
fi
local -r src_file="${src_dir}/kube-apiserver.manifest"
# params is passed by reference, so no "$"
setup-etcd-encryption "${src_file}" params
# Evaluate variables.
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}"
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}"
sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}"
sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}"
sed -i -e "s@{{audit_webhook_config_mount}}@${audit_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{audit_webhook_config_volume}}@${audit_webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
cp "${src_file}" "${ETC_MANIFESTS:-/etc/kubernetes/manifests}"
}
# Sets-up etcd encryption.
# Configuration of etcd level encryption consists of the following steps:
# 1. Writing encryption provider config to disk
# 2. Adding encryption-provider-config flag to kube-apiserver
# 3. Add kms-socket-vol and kms-socket-vol-mnt to enable communication with kms-plugin (if requested)
#
# Expects parameters:
# $1 - path to kube-apiserver template
# $2 - kube-apiserver startup flags (must be passed by reference)
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG
# CLOUD_KMS_INTEGRATION
# ENCRYPTION_PROVIDER_CONFIG_PATH (will default to /etc/srv/kubernetes/encryption-provider-config.yml)
function setup-etcd-encryption {
local kube_apiserver_template_path
local -n kube_api_server_params
local default_encryption_provider_config_vol
local default_encryption_provider_config_vol_mnt
local encryption_provider_config_vol_mnt
local encryption_provider_config_vol
local default_kms_socket_dir
local default_kms_socket_vol_mnt
local default_kms_socket_vol
local kms_socket_vol_mnt
local kms_socket_vol
local encryption_provider_config_path
kube_apiserver_template_path="$1"
if [[ -z "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
sed -i -e " {
s@{{encryption_provider_mount}}@@
s@{{encryption_provider_volume}}@@
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
return
fi
kube_api_server_params="$2"
encryption_provider_config_path=${ENCRYPTION_PROVIDER_CONFIG_PATH:-/etc/srv/kubernetes/encryption-provider-config.yml}
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
kube_api_server_params+=" --encryption-provider-config=${encryption_provider_config_path}"
default_encryption_provider_config_vol=$(echo "{ \"name\": \"encryptionconfig\", \"hostPath\": {\"path\": \"${encryption_provider_config_path}\", \"type\": \"File\"}}" | base64 | tr -d '\r\n')
default_encryption_provider_config_vol_mnt=$(echo "{ \"name\": \"encryptionconfig\", \"mountPath\": \"${encryption_provider_config_path}\", \"readOnly\": true}" | base64 | tr -d '\r\n')
encryption_provider_config_vol_mnt=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL_MNT:-"${default_encryption_provider_config_vol_mnt}"}" | base64 --decode)
encryption_provider_config_vol=$(echo "${ENCRYPTION_PROVIDER_CONFIG_VOL:-"${default_encryption_provider_config_vol}"}" | base64 --decode)
sed -i -e " {
s@{{encryption_provider_mount}}@${encryption_provider_config_vol_mnt},@
s@{{encryption_provider_volume}}@${encryption_provider_config_vol},@
} " "${kube_apiserver_template_path}"
if [[ -n "${CLOUD_KMS_INTEGRATION:-}" ]]; then
default_kms_socket_dir="/var/run/kmsplugin"
default_kms_socket_vol_mnt=$(echo "{ \"name\": \"kmssocket\", \"mountPath\": \"${default_kms_socket_dir}\", \"readOnly\": false}" | base64 | tr -d '\r\n')
default_kms_socket_vol=$(echo "{ \"name\": \"kmssocket\", \"hostPath\": {\"path\": \"${default_kms_socket_dir}\", \"type\": \"DirectoryOrCreate\"}}" | base64 | tr -d '\r\n')
kms_socket_vol_mnt=$(echo "${KMS_PLUGIN_SOCKET_VOL_MNT:-"${default_kms_socket_vol_mnt}"}" | base64 --decode)
kms_socket_vol=$(echo "${KMS_PLUGIN_SOCKET_VOL:-"${default_kms_socket_vol}"}" | base64 --decode)
sed -i -e " {
s@{{kms_socket_mount}}@${kms_socket_vol_mnt},@
s@{{kms_socket_volume}}@${kms_socket_vol},@
} " "${kube_apiserver_template_path}"
else
sed -i -e " {
s@{{kms_socket_mount}}@@
s@{{kms_socket_volume}}@@
} " "${kube_apiserver_template_path}"
fi
}
# Applies encryption provider config.
# This function may be triggered in two scenarios:
# 1. Decryption of etcd
# 2. Encryption of etcd is added after the cluster is deployed
# Both cases require that the existing secrets in etcd be re-proceeded.
#
# Assumes vars (supplied via kube-env):
# ENCRYPTION_PROVIDER_CONFIG_FORCE
function apply-encryption-config() {
if [[ "${ENCRYPTION_PROVIDER_CONFIG_FORCE:-false}" == "false" ]]; then
return
fi
# need kube-apiserver to be ready
until kubectl get secret; do
sleep ${ENCRYPTION_PROVIDER_CONFIG_FORCE_DELAY:-5}
done
retries=${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRIES:-5}
# The command below may fail when a conflict is detected during an update on a secret (something
# else updated the secret in the middle of our update).
# TODO: Retry only on errors caused by a conflict.
until (( retries == 0 )); do
# forces all secrets to be re-written to etcd, and in the process either encrypting or decrypting them
# https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/
if kubectl get secrets --all-namespaces -o json | kubectl replace -f -; then
break
fi
(( retries-- ))
sleep "${ENCRYPTION_PROVIDER_CONFIG_FORCE_RETRY_SLEEP:-3}"
done
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubecontrollermanager-kubeconfig
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --root-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}"
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [[ -n "${CA_KEY:-}" ]]; then
params+=" --cluster-signing-cert-file=${CA_CERT_PATH}"
params+=" --cluster-signing-key-file=${CA_KEY_PATH}"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=${NODE_IPAM_MODE}"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
params+=" --flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
fi
if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]]; then
params+=" --experimental-cluster-signing-duration=$CLUSTER_SIGNING_DURATION"
fi
# Disable using HPA metrics REST clients if metrics-server isn't enabled,
# or if we want to explicitly disable it by setting HPA_USE_REST_CLIENT.
if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]] ||
[[ "${HPA_USE_REST_CLIENTS:-}" == "false" ]]; then
params+=" --horizontal-pod-autoscaler-use-rest-clients=false"
fi
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
params+=" --pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE"
params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE"
fi
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
params+=" --controllers=${RUN_CONTROLLERS}"
fi
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
# Evaluate variables.
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath_mount}}@${FLEXVOLUME_HOSTPATH_MOUNT}@g" "${src_file}"
sed -i -e "s@{{flexvolume_hostpath}}@${FLEXVOLUME_HOSTPATH_VOLUME}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_CONTROLLER_MANAGER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubescheduler-kubeconfig
prepare-log-file /var/log/kube-scheduler.log
# Calculate variables and set them in the manifest.
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
fi
if [[ -n "${SCHEDULER_POLICY_CONFIG:-}" ]]; then
create-kubescheduler-policy-config
params+=" --use-legacy-policy-config"
params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config"
fi
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
sed -i -e "s@{{cpurequest}}@${KUBE_SCHEDULER_CPU_REQUEST}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
create-clusterautoscaler-kubeconfig
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
params+=" --kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for setting up addon manifests.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: (optional) auxiliary manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/$1/$2"
copy-manifests "${src_dir}/$2" "${dst_dir}"
# If the PodSecurityPolicy admission controller is enabled,
# set up the corresponding addon policies.
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
local -r psp_dir="${src_dir}/${3:-$2}/podsecuritypolicies"
if [[ -d "${psp_dir}" ]]; then
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
local -r nth_dir="${src_dir}/${3:-$2}/node-termination-handler"
if [[ -d "${nth_dir}" ]]; then
copy-manifests "${nth_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
# manifests directory.
function download-extra-addons {
local -r out_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/gce-extras"
mkdir -p "${out_dir}"
local curl_cmd=(
"curl"
"--fail"
"--retry" "5"
"--retry-delay" "3"
"--silent"
"--show-error"
)
if [[ -n "${CURL_RETRY_CONNREFUSED:-}" ]]; then
curl_cmd+=("${CURL_RETRY_CONNREFUSED}")
fi
if [[ -n "${EXTRA_ADDONS_HEADER:-}" ]]; then
curl_cmd+=("-H" "${EXTRA_ADDONS_HEADER}")
fi
curl_cmd+=("-o" "${out_dir}/extras.json")
curl_cmd+=("${EXTRA_ADDONS_URL}")
"${curl_cmd[@]}"
}
# A function that fetches a GCE metadata value and echoes it out.
#
# $1: URL path after /computeMetadata/v1/ (without heading slash).
function get-metadata-value {
curl \
--retry 5 \
--retry-delay 3 \
${CURL_RETRY_CONNREFUSED} \
--fail \
--silent \
-H 'Metadata-Flavor: Google' \
"http://metadata/computeMetadata/v1/${1}"
}
# A helper function for copying manifests and setting dir/files
# permissions.
#
# $1: absolute source dir
# $2: absolute destination dir
function copy-manifests {
local -r src_dir="$1"
local -r dst_dir="$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Fluentd resources are modified using ScalingPolicy CR, which may not be
# available at this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
local any_overrides=false
if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
any_overrides=true
fi
if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
any_overrides=true
fi
if ! $any_overrides; then
# Nothing to do here.
exit
fi
# Wait until ScalingPolicy CRD is in place.
until kubectl get scalingpolicies.scalingpolicy.kope.io
do
sleep 10
done
# Single-shot, not managed by addon manager. Can be later modified or removed
# at will.
cat <<EOF | kubectl apply -f -
apiVersion: scalingpolicy.kope.io/v1alpha1
kind: ScalingPolicy
metadata:
name: fluentd-gcp-scaling-policy
namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: ${FLUENTD_GCP_CPU_REQUEST:-}
- resource: memory
base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
limits:
- resource: memory
base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
EOF
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Update {{ fluentd_container_runtime_service }} with actual container runtime name,
# and {{ container_runtime_endpoint }} with actual container runtime
# endpoint.
function update-container-runtime {
local -r file="$1"
local -r container_runtime_endpoint="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}"
sed -i \
-e "s@{{ *fluentd_container_runtime_service *}}@${FLUENTD_CONTAINER_RUNTIME_SERVICE:-${CONTAINER_RUNTIME_NAME:-docker}}@g" \
-e "s@{{ *container_runtime_endpoint *}}@${container_runtime_endpoint#unix://}@g" \
"${file}"
}
# Remove configuration in yaml file if node journal is not enabled.
function update-node-journal {
local -r configmap_yaml="$1"
if [[ "${ENABLE_NODE_JOURNAL:-}" != "true" ]]; then
# Removes all lines between two patterns (throws away node-journal)
sed -i -e "/# BEGIN_NODE_JOURNAL/,/# END_NODE_JOURNAL/d" "${configmap_yaml}"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
function update-prometheus-to-sd-parameters {
if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
else
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
fi
}
# Updates parameters in yaml file for prometheus-to-sd configuration in daemon sets, or
# removes component if it is disabled.
function update-daemon-set-prometheus-to-sd-parameters {
if [[ "${DISABLE_PROMETHEUS_TO_SD_IN_DS:-}" == "true" ]]; then
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
else
update-prometheus-to-sd-parameters $1
fi
}
# Updates parameters in yaml file for event-exporter configuration
function update-event-exporter {
local -r stackdriver_resource_model="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
sed -i -e "s@{{ exporter_sd_resource_model }}@${stackdriver_resource_model}@g" "$1"
}
function update-dashboard-controller {
if [ -n "${CUSTOM_KUBE_DASHBOARD_BANNER:-}" ]; then
sed -i -e "s@\( \+\)# PLATFORM-SPECIFIC ARGS HERE@\1- --system-banner=${CUSTOM_KUBE_DASHBOARD_BANNER}\n\1- --system-banner-severity=WARNING@" "$1"
fi
}
# Sets up the manifests of coreDNS for k8s addons.
function setup-coredns-manifest {
setup-addon-manifests "addons" "0-dns/coredns"
local -r coredns_file="${dst_dir}/0-dns/coredns/coredns.yaml"
mv "${dst_dir}/0-dns/coredns/coredns.yaml.in" "${coredns_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${COREDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of Fluentd configmap and yamls for k8s addons.
function setup-fluentd {
local -r dst_dir="$1"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
local -r fluentd_gcp_scaler_yaml="${dst_dir}/fluentd-gcp/scaler-deployment.yaml"
# Ingest logs against new resources like "k8s_container" and "k8s_node" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
# Ingest logs against old resources like "gke_container" and "gce_instance" if
# LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config"
else
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
fluentd_gcp_configmap_name="fluentd-gcp-config-old"
fi
sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}"
sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}"
fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}"
sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update ${fluentd_gcp_yaml}
update-container-runtime ${fluentd_gcp_configmap_yaml}
update-node-journal ${fluentd_gcp_configmap_yaml}
}
# Sets up the manifests of kube-dns for k8s addons.
function setup-kube-dns-manifest {
setup-addon-manifests "addons" "0-dns/kube-dns"
local -r kubedns_file="${dst_dir}/0-dns/kube-dns/kube-dns.yaml"
mv "${dst_dir}/0-dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$CUSTOM_KUBE_DNS_YAML
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
fi
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
local -r dns_autoscaler_file="${dst_dir}/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml"
sed -i'' -e "s@{{.Target}}@${KUBEDNS_AUTOSCALER}@g" "${dns_autoscaler_file}"
fi
}
# Sets up the manifests of local dns cache agent for k8s addons.
function setup-nodelocaldns-manifest {
setup-addon-manifests "addons" "0-dns/nodelocaldns"
local -r localdns_file="${dst_dir}/0-dns/nodelocaldns/nodelocaldns.yaml"
# Replace the sed configurations with variable values.
sed -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" "${localdns_file}"
sed -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" "${localdns_file}"
}
# Sets up the manifests of netd for k8s addons.
function setup-netd-manifest {
local -r netd_file="${dst_dir}/netd/netd.yaml"
mkdir -p "${dst_dir}/netd"
touch "${netd_file}"
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
# Replace with custom GCP netd deployment.
cat > "${netd_file}" <<EOF
$CUSTOM_NETD_YAML
EOF
fi
}
# A helper function to set up a custom yaml for a k8s addon.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
# $3: manifest file
# $4: custom yaml
function setup-addon-custom-yaml {
local -r manifest_path="/etc/kubernetes/$1/$2/$3"
local -r custom_yaml="$4"
if [ -n "${custom_yaml:-}" ]; then
# Replace with custom manifest.
cat > "${manifest_path}" <<EOF
$custom_yaml
EOF
fi
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac/kubelet-api-auth"
setup-addon-manifests "addons" "rbac/kubelet-cert-rotation"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
setup-addon-manifests "addons" "rbac/legacy-kubelet-user"
else
setup-addon-manifests "addons" "rbac/legacy-kubelet-user-disable"
fi
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
setup-addon-manifests "addons" "podsecuritypolicies"
fi
# Set up manifests of other addons.
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]]; then
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$CUSTOM_KUBE_PROXY_YAML
EOF
update-daemon-set-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
fi
# Setup prometheus stack for monitoring kubernetes cluster
if [[ "${ENABLE_PROMETHEUS_MONITORING:-}" == "true" ]]; then
setup-addon-manifests "addons" "prometheus"
fi
# Setup cluster monitoring using heapster
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
setup-addon-manifests "addons" "cluster-monitoring"
setup-addon-manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
base_eventer_memory="190Mi"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
nanny_memory="90Mi"
local heapster_min_cluster_size="16"
local metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-100Mi}"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-10m}"
metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
heapster_min_cluster_size="5"
fi
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
fi
controller_yaml="${dst_dir}/${file_dir}"
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
controller_yaml="${controller_yaml}/heapster-controller-combined.yaml"
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ cluster_location }}@${ZONE}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *heapster_min_cluster_size *}}@${heapster_min_cluster_size}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
use_old_resources="${HEAPSTER_USE_OLD_STACKDRIVER_RESOURCES:-true}"
use_new_resources="${HEAPSTER_USE_NEW_STACKDRIVER_RESOURCES:-false}"
sed -i -e "s@{{ use_old_resources }}@${use_old_resources}@g" "${controller_yaml}"
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
fi
fi
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] ||
([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]]; then
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
metadata_agent_cluster_level_cpu_request="${METADATA_AGENT_CLUSTER_LEVEL_CPU_REQUEST:-40m}"
metadata_agent_cluster_level_memory_request="${METADATA_AGENT_CLUSTER_LEVEL_MEMORY_REQUEST:-50Mi}"
setup-addon-manifests "addons" "metadata-agent/stackdriver"
metadata_agent_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_cpu_request }}@${metadata_agent_cluster_level_cpu_request}@g" "${metadata_agent_yaml}"
sed -i -e "s@{{ metadata_agent_cluster_level_memory_request }}@${metadata_agent_cluster_level_memory_request}@g" "${metadata_agent_yaml}"
fi
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
base_metrics_server_cpu="40m"
base_metrics_server_memory="40Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="16"
if [[ "${ENABLE_SYSTEM_ADDON_RESOURCE_OPTIMIZATIONS:-}" == "true" ]]; then
base_metrics_server_cpu="40m"
base_metrics_server_memory="35Mi"
metrics_server_memory_per_node="4"
metrics_server_min_cluster_size="5"
fi
local -r metrics_server_yaml="${dst_dir}/metrics-server/metrics-server-deployment.yaml"
sed -i -e "s@{{ base_metrics_server_cpu }}@${base_metrics_server_cpu}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ base_metrics_server_memory }}@${base_metrics_server_memory}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_memory_per_node }}@${metrics_server_memory_per_node}@g" "${metrics_server_yaml}"
sed -i -e "s@{{ metrics_server_min_cluster_size }}@${metrics_server_min_cluster_size}@g" "${metrics_server_yaml}"
fi
if [[ "${ENABLE_NVIDIA_GPU_DEVICE_PLUGIN:-}" == "true" ]]; then
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
fi
if [[ "${ENABLE_NODE_TERMINATION_HANDLER:-}" == "true" ]]; then
setup-addon-manifests "addons" "node-termination-handler"
setup-node-termination-handler-manifest
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
# Create a new directory for the DNS addon and prepend a "0" on the name.
# Prepending "0" to the directory ensures that add-on manager
# creates the dns service first. This ensures no other add-on
# can "steal" the designated DNS clusterIP.
BASE_ADDON_DIR=${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty
BASE_DNS_DIR=${BASE_ADDON_DIR}/dns
NEW_DNS_DIR=${BASE_ADDON_DIR}/0-dns
mkdir ${NEW_DNS_DIR} && mv ${BASE_DNS_DIR}/* ${NEW_DNS_DIR} && rm -r ${BASE_DNS_DIR}
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-coredns-manifest
else
setup-kube-dns-manifest
fi
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
setup-nodelocaldns-manifest
fi
fi
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
setup-netd-manifest
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
local -r fluentd_es_configmap_yaml="${dst_dir}/fluentd-elasticsearch/fluentd-es-configmap.yaml"
update-container-runtime ${fluentd_es_configmap_yaml}
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
setup-fluentd ${dst_dir}
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
update-event-exporter ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${event_exporter_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
local -r dashboard_controller_yaml="${dst_dir}/dashboard/dashboard-controller.yaml"
update-dashboard-controller ${dashboard_controller_yaml}
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
# Setup role binding for standalone node problem detector.
setup-addon-manifests "addons" "node-problem-detector/standalone" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range" "gce"
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
setup-addon-custom-yaml "addons" "calico-policy-controller" "calico-node-daemonset.yaml" "${CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
setup-addon-custom-yaml "addons" "calico-policy-controller" "typha-deployment.yaml" "${CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${FEATURE_GATES:-}" =~ "AllAlpha=true" || "${FEATURE_GATES:-}" =~ "CSIDriverRegistry=true" || "${FEATURE_GATES:-}" =~ "CSINodeInfo=true" ]]; then
setup-addon-manifests "addons" "storage-crds"
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-daemon-set-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
if [[ "${ENABLE_ISTIO:-}" == "true" ]]; then
if [[ "${ISTIO_AUTH_TYPE:-}" == "MUTUAL_TLS" ]]; then
setup-addon-manifests "addons" "istio/auth"
else
setup-addon-manifests "addons" "istio/noauth"
fi
fi
if [[ "${FEATURE_GATES:-}" =~ "RuntimeClass=true" ]]; then
setup-addon-manifests "addons" "runtimeclass"
fi
if [[ -n "${EXTRA_ADDONS_URL:-}" ]]; then
download-extra-addons
setup-addon-manifests "addons" "gce-extras"
fi
# Place addon manager pod manifest.
src_file="${src_dir}/kube-addon-manager.yaml"
sed -i -e "s@{{kubectl_extra_prune_whitelist}}@${ADDON_MANAGER_PRUNE_WHITELIST:-}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
function setup-node-termination-handler-manifest {
local -r nth_manifest="/etc/kubernetes/$1/$2/daemonset.yaml"
if [[ -n "${NODE_TERMINATION_HANDLER_IMAGE}" ]]; then
sed -i "s|image:.*|image: ${NODE_TERMINATION_HANDLER_IMAGE}|" "${nth_manifest}"
fi
}
# Setups manifests for ingress controller and gce-specific policies for service controller.
function start-lb-controller {
setup-addon-manifests "addons" "loadbalancing"
# Starts a l7 loadbalancing controller for ingress.
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
local -r src_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
local -r dest_manifest="/etc/kubernetes/manifests/glbc.manifest"
if [[ -n "${CUSTOM_INGRESS_YAML:-}" ]]; then
echo "${CUSTOM_INGRESS_YAML}" > "${dest_manifest}"
else
cp "${src_manifest}" "${dest_manifest}"
fi
# Override the glbc image if GCE_GLBC_IMAGE is specified.
if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
sed -i "s|image:.*|image: ${GCE_GLBC_IMAGE}|" "${dest_manifest}"
fi
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
}
# Override for GKE custom master setup scripts (no-op outside of GKE).
function gke-master-start {
if [[ -e "${KUBE_HOME}/bin/gke-internal-configure-helper.sh" ]]; then
echo "Running GKE internal configuration script"
. "${KUBE_HOME}/bin/gke-internal-configure-helper.sh"
gke-internal-master-start
fi
}
function reset-motd {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/home/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/home/kubernetes/LICENSES
EOF
}
function override-kubectl {
echo "overriding kubectl"
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
# Add ${KUBE_HOME}/bin into sudoer secure path.
local sudo_path
sudo_path=$(sudo env | grep "^PATH=")
if [[ -n "${sudo_path}" ]]; then
sudo_path=${sudo_path#PATH=}
(
umask 027
echo "Defaults secure_path=\"${KUBE_HOME}/bin:${sudo_path}\"" > /etc/sudoers.d/kube_secure_path
)
fi
}
function override-pv-recycler {
if [[ -z "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
echo "PV_RECYCLER_OVERRIDE_TEMPLATE is not set"
exit 1
fi
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
cat > ${PV_RECYCLER_OVERRIDE_TEMPLATE} <<EOF
version: v1
kind: Pod
metadata:
generateName: pv-recycler-
namespace: default
spec:
activeDeadlineSeconds: 60
restartPolicy: Never
volumes:
- name: vol
containers:
- name: pv-recycler
image: k8s.gcr.io/busybox:1.27
command:
- /bin/sh
args:
- -c
- test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z $(ls -A /scrub) || exit 1
volumeMounts:
- name: vol
mountPath: /scrub
EOF
}
########### Main Function ###########
function main() {
echo "Start to configure instance for kubernetes"
readonly UUID_MNT_PREFIX="/mnt/disks/by-uuid/google-local-ssds"
readonly UUID_BLOCK_PREFIX="/dev/disk/by-uuid/google-local-ssds"
readonly COREDNS_AUTOSCALER="Deployment/coredns"
readonly KUBEDNS_AUTOSCALER="Deployment/kube-dns"
# Resource requests of master components.
KUBE_CONTROLLER_MANAGER_CPU_REQUEST="${KUBE_CONTROLLER_MANAGER_CPU_REQUEST:-200m}"
KUBE_SCHEDULER_CPU_REQUEST="${KUBE_SCHEDULER_CPU_REQUEST:-75m}"
# Use --retry-connrefused opt only if it's supported by curl.
CURL_RETRY_CONNREFUSED=""
if curl --help | grep -q -- '--retry-connrefused'; then
CURL_RETRY_CONNREFUSED='--retry-connrefused'
fi
KUBE_HOME="/home/kubernetes"
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
if [[ -f "${KUBE_HOME}/kubelet-config.yaml" ]]; then
echo "Found Kubelet config file at ${KUBE_HOME}/kubelet-config.yaml"
KUBELET_CONFIG_FILE_ARG="--config ${KUBE_HOME}/kubelet-config.yaml"
fi
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
source "${KUBE_HOME}/kube-master-certs"
fi
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN="$(secure_random 32)"
KUBE_SCHEDULER_TOKEN="$(secure_random 32)"
KUBE_CLUSTER_AUTOSCALER_TOKEN="$(secure_random 32)"
setup-os-params
config-ip-firewall
create-dirs
setup-kubelet-dir
ensure-local-ssds
setup-logrotate
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
mount-master-pd
create-node-pki
create-master-pki
create-master-auth
create-master-kubelet-auth
create-master-etcd-auth
create-master-etcd-apiserver-auth
override-pv-recycler
gke-master-start
else
create-node-pki
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-kubeproxy-user-kubeconfig
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
create-node-problem-detector-kubeconfig ${KUBERNETES_MASTER_NAME}
fi
fi
override-kubectl
# Run the containerized mounter once to pre-cache the container image.
if [[ "${CONTAINER_RUNTIME:-docker}" == "docker" ]]; then
assemble-docker-flags
fi
start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
compute-master-manifest-variables
if [[ -z "${ETCD_SERVERS:-}" ]]; then
start-etcd-servers
start-etcd-empty-dir-cleanup-pod
fi
start-kube-apiserver
start-kube-controller-manager
start-kube-scheduler
start-kube-addons
start-cluster-autoscaler
start-lb-controller
apply-encryption-config &
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi
fi
reset-motd
prepare-mounter-rootfs
modprobe configs
echo "Done for the configuration for kubernetes"
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "${@}"
fi
|
therc/kubernetes
|
cluster/gce/gci/configure-helper.sh
|
Shell
|
apache-2.0
| 115,235 |
#!/bin/bash
###############################################################################
# Author: Jens V. Fischer
# Date: 11.11.2015
#
# Running a benchmark series on a Scalaris slurm installation with basho bench.
# The scripts reads the basho-bench.cfg, sets up a Scalaris ring using the slurm
# cluster management tool, using the slurm script collection of Scalaris with
# basho_bench.slurm as slurm script. It then waits for the ring to set up and
# starts multiple basho bench intances ("load # generators") on one or multiple
# machines by calling start-basho-bench.sh.
#
# Call:
# ./basho-bench.sh
#
# Configuration:
# All configuration settings, including documentation and default values can
# be found in basho-bench.cfg.
# A quick-config section is provided below for overriding values from the
# configuration file. This is meant for easy manipulation of the most commonly
# used configuration parameters.
###############################################################################
trap 'trap_cleanup' SIGTERM SIGINT
# QUICK-CONFIG ===============
# Values defined here override settings from the configuration file
# REPETITIONS=2
# DURATION=5
# LOAD_GENERATORS=4
#
# PARTITION="CUMU"
# TIMEOUT=15
# SLEEP1=30
# SLEEP2=30
#
# SCALARIS_LOCAL=true
# COLLECTL=true
#
# # size scalability series (only uncomment 'size' or 'load')
# KIND='size'
# NODES_SERIES="1 2 4 8 16 32"
# VMS_PER_NODE_SERIES="1"
# export ERL_SCHED_FLAGS="+S 32"
# LOAD_LEVEL=5
# vary value sizes
# KIND='value'
# WORKERS_PER_LG=44
# VALUE_SIZES="2 4 8 16 32 64 128 256 512 768 1024 1280 1536 1792 2048"
# NODES=32
# load scalability series
# KIND='load'
# NODES=32
# VMS_PER_NODE=1
# WORKERS_PER_LG_SERIES="1 2 4 8 16 32 64 128 256 512 1024 2048"
# KIND="lgs"
# LOAD_GENERATORS_SERIES="4 6 8 10 12 15"
# WORKERS="8400"
# NODES=32
# VMS_PER_NODE=4
# export ERL_SCHED_FLAGS="+S 8"
#=============================
main() {
is_lg_external
if ! $EXTERNAL_LG ; then
LG_HOSTS=`scontrol show hostnames`
LG_HOSTS=($LG_HOSTS)
fi
source $(pwd)/config/basho-bench.cfg
check_wdir
check_result_dir
setup_logging
print_env
check_compile
if [[ $KIND == "size" ]]; then
main_size
elif [[ $KIND == "load" ]]; then
main_load
elif [[ $KIND == "value" ]]; then
main_value
elif [[ $KIND == "lgs" ]]; then
main_lgs
else
log error "Unknown kind of benchmark, exiting"
exit 1
fi
}
main_lgs(){
for LOAD_GENERATORS in $LOAD_GENERATORS_SERIES; do
WORKERS_PER_LG=$((WORKERS/LOAD_GENERATORS))
log info "starting load benchmark with $LOAD_GENERATORS LOAD_GENERATORS"
log info "WORKERS=$WORKERS"
log info "WORKERS_PER_LG=$WORKERS_PER_LG"
PREFIX="lgs$(printf "%04i" $LOAD_GENERATORS)"
repeat_benchmark
done
}
main_value() {
for VALUE_SIZE in $VALUE_SIZES; do
local value=$(printf "%04i" $VALUE_SIZE)
PREFIX="value$value"
log info "starting value benchmark with $VALUE_SIZE"
repeat_benchmark
done
}
main_size(){
for NODES in $NODES_SERIES; do
if ((NODES==1)); then
NODELIST=""
elif ((NODES==2)); then
NODELIST="cumu01-00,cumu02-00"
else
half=$((NODES/2-1))
NODELIST="cumu01-[00-$half],cumu02-[00-$half]"
fi
for VMS_PER_NODE in $VMS_PER_NODE_SERIES; do
local ringsize=$((NODES*VMS_PER_NODE*DHT_NODES_PER_VM))
WORKERS=$((ringsize*LOAD_LEVEL))
WORKERS_PER_LG=$((WORKERS/LOAD_GENERATORS))
log info "RINGSIZE=$ringsize"
log info "WORKERS=$WORKERS"
log info "WORKERS_PER_LG=$WORKERS_PER_LG"
ringsize=$(printf "%04i" $ringsize)
PREFIX="size$ringsize"
repeat_benchmark
done
done
}
main_load(){
for WORKERS_PER_LG in $WORKERS_PER_LG_SERIES; do
WORKERS=$((WORKERS_PER_LG*LOAD_GENERATORS))
var=1
for OPS in $OPERATIONS_SERIES; do
log info "WORKERS=$WORKERS"
log info "WORKERS_PER_LG=$WORKERS_PER_LG"
OPERATIONS=$OPS
log info "OPERATIONS=$OPERATIONS"
WORKERS=$(printf "%04i" $WORKERS)
PREFIX="load$WORKERS-$var"
log info "starting load benchmark with $WORKERS ($WORKERS_PER_LG*$LOAD_GENERATORS)"
let "var++"
repeat_benchmark
done
done
}
repeat_benchmark() {
for run in $(seq 1 $REPETITIONS); do
NAME="${PREFIX}-r$run"
mkdir ${WD}/${NAME}
setup_directories
create_result_dir
# setup Scalaris log path
SLOGPATH=${SLOGPATH/?NAME/$NAME} # replace "?NAME" with "$NAME"
SCALARISCTL_PARAMS="$SLOGPATH $SCTL_PARAMS" # Prepend log path
echo ${!SCALARISCTL_PARAMS@}=$SCALARISCTL_PARAMS
COLLECTL_DIR=$WD/$NAME/collectl
echo ${!COLLECTL_DIR@}=$COLLECTL_DIR
log info "starting repetition $run..."
[[ $COLLECTL = true ]] && start_collectl
[[ $TOPLOG = true ]] && start_toplog
start_scalaris
wait_for_scalaris_startup
build_hostlist
test_ring
run_bbench
test_ring
stop_scalaris
rm_lockfile
log info "sleeping for $SLEEP1 seconds"; sleep $SLEEP1
[[ $COLLECTL = true ]] && stop_collectl
[[ $TOPLOG = true ]] && stop_toplog
done
if (( SLEEP2 > 0 )); then
log info "sleeping for $SLEEP2 seconds"
sleep $SLEEP2
fi
collect_bbench_results
}
#=====================
# FUNCTIONS
#=====================
check_wdir() {
# check if WD exists
if [[ ! -d $WD ]]; then
mkdir -p $WD
else
# check if WD is empty
if [ "$(ls $WD)" ]; then
log info "Working directory ($WD) is not empty, containing the following files/dirs:"
ls -l1 $WD
read -p "Delete all files? " -n 1 -r
echo # move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]; then
rm -r $WD/*
else
log error "aborting..."
exit 1
fi
fi
fi
}
setup_logging(){
LOGFILE="$WD/bbench-suite-$(date +%y.%m.%d-%H:%M:%S).log"
log info "writing output also to $LOGFILE"
# w/o -i option to tee, signal trapping does NOT work!
exec &>> >(tee -i $LOGFILE)
}
setup_directories(){
if [[ $COLLECTL = true && ! -d $WD/$NAME/collectl ]]; then
mkdir -p $WD/$NAME/collectl
fi
}
print_env(){
echo KIND=$KIND
if [[ $KIND == "load" ]]; then
echo RINGSIZE=$((NODES*VMS_PER_NODE*DHT_NODES_PER_VM))
echo NODES=$NODES
echo VMS_PER_NODE=$VMS_PER_NODE
echo WORKERS_PER_LG_SERIES=$WORKERS_PER_LG_SERIES
elif [[ $KIND == "size" ]]; then
echo NODES_SERIES=$NODES_SERIES
echo VMS_PER_NODE_SERIES=$VMS_PER_NODE_SERIES
echo LOAD_LEVEL=$LOAD_LEVEL
elif [[ $KIND == "value" ]]; then
echo RINGSIZE=$((NODES*VMS_PER_NODE*DHT_NODES_PER_VM))
echo WORKERS_PER_LG=$WORKERS_PER_LG
echo VALUE_SIZES=$VALUE_SIZES
fi
echo "ERL_SCHED_FLAGS=$ERL_SCHED_FLAGS"
echo TIMEOUT=$TIMEOUT
echo REPETITIONS=$REPETITIONS
echo DURATION=$DURATION
echo LOAD_GENERATORS=$LOAD_GENERATORS
echo LG_HOSTS=${LG_HOSTS[@]}
echo SLEEP1=$SLEEP1
echo SLEEP2=$SLEEP2
echo "COLLECTL=$COLLECTL"
echo "PARTITION=$PARTITION"
echo "VALUE_SIZE=$VALUE_SIZE"
}
check_compile(){
pushd $SCALARIS_DIR >/dev/null
local res=$(erl -pa contrib/yaws -pa ebin -noinput +B -eval 'R=make:all([noexec]), halt(0).')
popd >/dev/null
if [[ -n $res ]]; then
log error "Scalaris binaries do not match source version:"
echo $res
exit 1
fi
}
log(){
local level=$1
local message=$2
printf "%s %s\n" "$(tag $level)" "$message"
}
tag(){
local level=$1
printf "[bbench] %s [%s]" "$(date +%H:%M:%S)" "$level"
}
start_collectl() {
export COLLECTL_SUBSYSTEMS
export COLLECTL_INTERVAL
export COLLECTL_FLUSH
# start collectl at the load generators
for host in ${LG_HOSTS[@]}; do
log info "starting collectl on $host"
if [[ $(hostname -f) = $host ]]; then
collectl $COLLECTL_SUBSYSTEMS $COLLECTL_INTERVAL $COLLECTL_FLUSH -f $WD/$NAME/collectl/lg_$host 2>/dev/null &
else
if [[ $EXTERNAL_LG = true ]]; then
ssh $host collectl $COLLECTL_SUBSYSTEMS $COLLECTL_INTERVAL $COLLECTL_FLUSH -f $WD/$NAME/collectl/lg_$host 2>/dev/null &
else
srun --nodelist=$host -N1 bash -c "collectl $COLLECTL_SUBSYSTEMS $COLLECTL_INTERVAL $COLLECTL_FLUSH -f $WD/$NAME/collectl/lg_$host 2>/dev/null &"
fi
fi
done
}
stop_collectl(){
# stop collectl on load generators (collectl on slurm nodes are killed by the watchdog)
for host in ${LG_HOSTS[@]}; do
log info "killing collectl on $host"
if [[ $(hostname -f) = $host ]]; then
pkill -f lg_$host
else
if [[ $EXTERNAL_LG = true ]]; then
ssh $host pkill -f lg_$host
else
srun --nodelist=$host -N1 bash -c "pkill -f lg_$host"
fi
fi
done
}
start_toplog() {
# start toplog at the load generators
for host in ${LG_HOSTS[@]}; do
log info "starting toplog on $host"
if [[ $(hostname -f) = $host ]]; then
$SCALARIS_DIR/contrib/slurm/util/toplog.sh "$WD/$NAME" &
else
if [[ $EXTERNAL_LG = true ]]; then
ssh $host $SCALARIS_DIR/contrib/slurm/util/toplog.sh "$WD/$NAME" &
else
srun --nodelist=$host -N1 bash -c "$SCALARIS_DIR/contrib/slurm/util/toplog.sh "$WD/$NAME" &"
fi
fi
done
}
stop_toplog(){
# stop toplog on load generators
for host in ${LG_HOSTS[@]}; do
log info "killing toplog on $host"
if [[ $(hostname -f) = $host ]]; then
pkill -f toplog.sh
else
if [[ $EXTERNAL_LG = true ]]; then
ssh $host pkill -f toplog.sh
else
srun --nodelist=$host -N1 bash -c "pkill -f toplog.sh"
fi
fi
done
}
# Setup up a scalaris ring on with slurm on cumulus
start_scalaris() {
log info "starting scalaris..."
# setup environment
[[ -n $VMS_PER_NODE ]] && export VMS_PER_NODE
[[ -n $WATCHDOG_INTERVAL ]] && export WATCHDOG_INTERVAL
[[ -n $DHT_NODES_PER_VM ]] && export DHT_NODES_PER_VM
[[ -n $SHUFFLE_NODE_IDS ]] && export SHUFFLE_NODE_IDS
[[ -n $WD ]] && export WD
[[ -n $COLLECTL ]] && export COLLECTL
[[ -n $COLLECTL_DIR ]] && export COLLECTL_DIR
[[ -n $SCALARIS_LOCAL ]] && export SCALARIS_LOCAL
[[ -n $SCALARISCTL_PARAMS ]] && export SCALARISCTL_PARAMS
[[ -n $NAME ]] && export NAME
[[ -n $ERL_SCHED_FLAGS ]] && export ERL_SCHED_FLAGS
# start sbatch command and capture output
# the ${var:+...} expands only, if the variable is set and non-empty
RET=$( sbatch -A csr -o $WD/$NAME/slurm-%j.out \
${PARTITION:+-p $PARTITION} \
${NODES:+-N $NODES} \
${NODELIST:+ --nodelist=$NODELIST} \
${TIMEOUT:+ -t $TIMEOUT} \
basho-bench.slurm
)
# get the job id from the output of sbatch
REGEX="Submitted batch job ([[:digit:]]*)"
if [[ $RET =~ $REGEX ]]; then
SLURM_JOBID=${BASH_REMATCH[1]}
else
exit 1
fi
local nodes="$(($NODES*$VMS_PER_NODE*$DHT_NODES_PER_VM)) ($NODES*$VMS_PER_NODE*$DHT_NODES_PER_VM)"
log info "submitted batch job $SLURM_JOBID to start scalaris with $nodes"
}
wait_for_scalaris_startup() {
LOCKFILE="${WD}/${SLURM_JOBID}.lock"
echo -n "$(tag info) waiting for scalaris to start"
timer=0
until [[ -e $LOCKFILE ]]; do
((timer++))
# display status every 5 seconds
if ((timer%5==0)); then
echo -ne "."
fi
sleep 1
done
echo ": ok (${timer}s)"
}
test_ring() {
local retries=$1
local res=0
[[ -z "$retries" ]] && retries=0
local ringsize=$((NODES*VMS_PER_NODE*DHT_NODES_PER_VM))
log info "testing ring"
erl -setcookie "chocolate chip cookie" -name bench_ -noinput -eval \
"A = rpc:call($FIRST, admin, number_of_nodes, []),
case A of
$ringsize -> halt(0);
_ -> io:format('number_of_nodes: ~p~n', [A]), halt(1)
end."
res=$((res+=$?))
erl -setcookie "chocolate chip cookie" -name bench_ -noinput -eval \
"A = rpc:call($FIRST, admin, check_ring, []),
case A of
ok -> halt(0);
Error -> io:format('check_ring: ~p~n', [Error]), halt(1)
end."
res=$((res+=$?))
erl -setcookie "chocolate chip cookie" -name bench_ -noinput -eval \
"A = rpc:call($FIRST, admin, check_ring_deep, []),
case A of
ok -> halt(0);
Error -> io:format('check_ring_deep: ~p~n', [Error]), halt(1)
end."
res=$((res+=$?))
if [[ $res -eq 0 ]]; then
log info "testing ring was successful"
else
if (( retries++ >= 2 )); then
log error "test_ring failed, after $retries retries. Aborting..."
shutdown
kill_bbench
exit 1
else
local sleeptime=20
log error "testing ring failed, retrying in $sleeptime seconds..."
sleep $sleeptime
test_ring $retries
fi
fi
}
stop_scalaris(){
log info "stopping scalaris"
scancel $SLURM_JOBID
}
build_hostlist() {
local counter=0
declare -a hosts
NODELIST=$(scontrol show job $SLURM_JOBID | grep " NodeList" | awk -F= '{print $2}')
for host in $(scontrol show hostnames $NODELIST); do
counter=$(($counter+1))
max_port=$((14194+VMS_PER_NODE))
for port in $(seq 14195 $max_port); do
if (( ${#hosts[@]} == 0 )); then
hosts+=("'first@${host}.zib.de'")
else
hosts+=("'node${port}@${host}.zib.de'")
fi
done
done
FIRST=${hosts[0]}
HOSTLIST=$(join "${hosts[@]}")
}
join() {
local IFS=","
echo "$*"
}
write_config() {
local max_key=$((NODES*2**17))
local config=${WD}/${NAME}/lg${PARALLEL_ID}.config
cat > $config <<EOF
{rng_seed, $RANDOM_SEED}.
{mode, $MODE}.
{duration, $DURATION}.
{concurrent, $WORKERS_PER_LG}.
{operations, [{put,2}, {get, 8}]}.
{driver, basho_bench_driver_scalaris}.
{key_generator, {int_to_str, {uniform_int, $max_key}}}.
%%{key_generator, {int_to_str, {uniform_int, 16#FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF}}}.
%% size in Bytes
{value_generator, {fixed_bin, $VALUE_SIZE}}.
{scalarisclient_mynode, ['benchclient${PARALLEL_ID}']}.
{scalarisclient_cookie, 'chocolate chip cookie'}.
{report_interval, 1}.
{log_level, info}.
{scalarisclient_nodes, [$HOSTLIST]}.
EOF
}
run_bbench() {
declare -a lg_pids # the process id's of the load generators
local no_of_hosts=${#LG_HOSTS[*]}
local c # counter for indexing the LG_HOSTS array
for i in $(seq 1 $LOAD_GENERATORS); do
PARALLEL_ID=$i
RANDOM_SEED="{$((7*$i)), $((11*$i)), $((5*$i))}"
write_config
# build args. The ${var:+...} expands only, if the variable is set and non-empty
local arg1=${SLURM_JOBID:+"--jobid=$SLURM_JOBID"}
local arg2=${PARALLEL_ID:+"--parallel_id=$PARALLEL_ID"}
local arg3=${WD:+"--wd=$WD"}
local arg4=${NAME:+"--name=$NAME"}
local arg5=${BBENCH_DIR:+"--bbdir=$BBENCH_DIR"}
local arg6=${RESULT_DIR:+"--rdir=$RESULT_DIR"}
declare -a args=($arg1 $arg2 $arg3 $arg4 $arg5 $arg6)
# get current host and (post)increment counter
host=${LG_HOSTS[$((c++ % no_of_hosts))]}
if [[ $(hostname -f) = $host ]]; then
$SCALARIS_DIR/contrib/slurm/util/start-basho-bench.sh ${args[@]} &
lg_pids[$i]=$!
else
# using -t (pseudo-tty allocation) allows to terminate children of the
# ssh cmd at the remote node through kill the ssh process at the local node
if [[ $EXTERNAL_LG = true ]]; then
ssh -t -t $host $SCALARIS_DIR/contrib/slurm/util/start-basho-bench.sh ${args[@]} &
else
ARGSTRING=$(printf '%s ' "${args[@]}")
echo "Starting LG on $host (Total nummber of LG hosts $no_of_hosts)"
echo "Argstring = $ARGSTRING"
srun --nodelist=$host -N1 bash -c "$SCALARIS_DIR/contrib/slurm/util/start-basho-bench.sh $ARGSTRING" &
fi
lg_pids[$i]=$!
fi
done
# wait for load generators to finish
for pid in "${lg_pids[@]}"; do
wait $pid
done
}
is_lg_external() {
if [ -n ${SLURM_NODELIST} ]; then
EXTERNAL_LG=false
else
EXTERNAL_LG=true
fi
}
check_result_dir() {
for host in ${LG_HOSTS[@]}; do
local res=0
if [[ $(hostname -f) = $host ]]; then
$SCALARIS_DIR/contrib/slurm/util/checkdir.sh $RESULT_DIR
res=$((res+=$?))
else
if [[ $EXTERNAL_LG = true ]]; then
ssh -t -t $host "$SCALARIS_DIR/contrib/slurm/util/checkdir.sh $RESULT_DIR"
else
srun --nodelist=$host -N1 bash -c "$SCALARIS_DIR/contrib/slurm/util/checkdir.sh $RESULT_DIR"
fi
res=$((res+=$?))
fi
if [[ $res -ne 0 ]]; then
log error "Result dir ($RESULT_DIR) on $host not empty, aborting"
exit 1
fi
done
}
create_result_dir() {
for host in ${LG_HOSTS[@]}; do
log info "creating result dir on $host"
if [[ $(hostname -f) = $host ]]; then
mkdir -p $RESULT_DIR/$NAME
else
if [[ $EXTERNAL_LG = true ]]; then
ssh -t -t $host "bash -c \"mkdir -p $RESULT_DIR/$NAME\""
else
srun --nodelist=$host -N1 bash -c "mkdir -p $RESULT_DIR/$NAME"
fi
fi
done
}
collect_bbench_results() {
for host in ${LG_HOSTS[@]}; do
log info "collecting bbench results from $host"
if [[ $(hostname -f) = $host ]]; then
rsync -ayhx --progress $RESULT_DIR/ $WD/
if [[ $? == 0 ]]; then
log info "deleting $RESULT_DIR/$PREFIX* on $host"
rm -r $RESULT_DIR/$PREFIX*
fi
else
if [[ $EXTERNAL_LG = true ]]; then
ssh -t -t $host "bash -c \"rsync -ayhx --progress $RESULT_DIR/ $WD/\""
if [[ $? == 0 ]]; then
log info "deleting $RESULT_DIR/$PREFIX* on $host"
ssh -t -t $host "bash -c \"rm -r $RESULT_DIR/$PREFIX*\""
fi
else
srun --nodelist=$host -N1 bash -c "rsync -ayhx --progress $RESULT_DIR/ $WD/"
if [[ $? == 0 ]]; then
log info "deleting $RESULT_DIR/$PREFIX* on $host"
srun --nodelist=$host -N1 bash -c "rm -r $RESULT_DIR/$PREFIX*"
fi
fi
fi
done
}
kill_bbench(){
log info "killing bbench..."
# kill all load generators (or their ssh processes respectively)
for pid in "${lg_pids[@]}"; do
kill $pid
done
}
trap_cleanup(){
log info "received SIGTERM, cleaning up..."
kill_bbench
shutdown
# kill all remaining children of the current script
PGID=$(ps -o pgid= $$ | grep -o [0-9]*)
setsid kill -9 -- -$PGID
exit 1
}
shutdown(){
stop_scalaris
[[ $COLLECTL = true ]] && stop_collectl
[[ $TOPLOG = true ]] && stop_toplog
collect_bbench_results
rm_lockfile
}
rm_lockfile() {
# remove lockfile
local lockfile="${WD}/${SLURM_JOBID}.lock"
rm -f $lockfile
}
main
|
scalaris-team/scalaris
|
contrib/slurm/basho-bench.sh
|
Shell
|
apache-2.0
| 20,008 |
find . -name .git -exec rm {} \;
|
j-rivero/ihmc_valkyrie_ros-debian
|
debian/import-orig-script.bash
|
Shell
|
apache-2.0
| 33 |
#!/usr/bin/env bash
echo host start
|
ChaosXu/nerv
|
resources/scripts/nerv/compute/Host/start.sh
|
Shell
|
apache-2.0
| 37 |
#!/bin/bash
# ------------------------------------------------------------------
# build script
# install [jpetazzo/nsenter](https://github.com/jpetazzo/nsenter)
# ------------------------------------------------------------------
set -e
SUBJECT=y12docker-prq-dev
VERSION=0.1.0
USAGE="Usage: gc2mt.sh -vh args"
DOCKER='sudo docker'
DENTER='sudo docker-enter'
IMG=y12docker/prq:devtest
SEC_WAIT_BOOT=3
# --- Option processing --------------------------------------------
if [ $# == 0 ] ; then
echo "$USAGE"
exit 1;
fi
function cleandocker {
$DOCKER rm $($DOCKER ps -a -q)
$DOCKER rmi $($DOCKER images | grep "^<none>" | awk "{print $3}")
}
function build {
echo build a test image.
$DOCKER build -t $IMG .
}
function nosetests {
CID=$1
echo "[SystemTest] Container " $CID
echo "[SystemTest] boot and wait ...."
secs=$SEC_WAIT_BOOT
while [ $secs -gt 0 ]; do
echo -ne "$secs\033[0K\r"
sleep 1
: $((secs--))
done
$DENTER $CID bash /app/gc2mt.sh -c
}
function nosetests_in_container {
cd /app && /usr/local/bin/nosetests -v
}
function run {
local CID=$($DOCKER run -p 8980:8080 -d $IMG)
echo "$CID"
}
function stop {
$DOCKER ps
# set -e exit here
set +e
FOO=$($DOCKER ps | grep $IMG)
if [ ! -z "$FOO" ]; then
echo stop a test image.
echo FOO=$FOO
echo "$FOO" | awk '{print $1}' | xargs $DOCKER stop
echo [AFTER] stop the container
$DOCKER ps
else
echo any image named $IMG not fund.
fi
set -e
}
while getopts ":vhxbrstc" optname; do
case "$optname" in
"v")
echo "Version $VERSION"
exit 0;
;;
"x")
echo "clean all stopped containers and all untagged images"
cleandocker
exit 0;
;;
"b")
build
exit 0;
;;
"r")
run
exit 0;
;;
"t")
stop
build
CID=$(run)
$DOCKER ps
nosetests $CID
exit 0;
;;
"s")
stop
exit 0;
;;
"c")
nosetests_in_container
exit 0;
;;
"h")
echo "$USAGE"
exit 0;
;;
"?")
echo "Unknown option $OPTARG"
exit 0;
;;
":")
echo "No argument value for option $OPTARG"
exit 0;
;;
*)
echo "Unknown error while processing options"
exit 0;
;;
esac
done
shift "$($OPTIND - 1)"
# -----------------------------------------------------------------
LOCK_FILE=/tmp/${SUBJECT}.lock
if [ -f "$LOCK_FILE" ]; then
echo "Script is already running"
exit
fi
# -----------------------------------------------------------------
trap 'rm -f $LOCK_FILE' EXIT
touch $LOCK_FILE
|
y12studio/y12docker
|
prq/gc2mt.sh
|
Shell
|
apache-2.0
| 2,637 |
#!/bin/bash
# Given a file path, scp it to all the VMs running (to the home path)
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters; Please provide the path to the file as the parameter;"
exit 1
fi
num_vms=$num_vms
file_to_scp=$1
port=3022
for i in `seq 1 $num_vms`; do
scp -P $port $file_to_scp [email protected]:~/
port=`expr $port + 1`
done
|
utsaslab/crashmonkey
|
vm_scripts/scp_file_to_vms.sh
|
Shell
|
apache-2.0
| 364 |
#!/usr/bin/env bash
set -euxo pipefail
dir=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
rm -rf ${dir}/irma_configuration_invalid/irma-demo
rm -rf ${dir}/irma_configuration_updated/irma-demo
rm -rf ${dir}/irma_configuration_updated/test-requestors
cp -r ${dir}/irma_configuration/irma-demo ${dir}/irma_configuration_invalid/
cp -r ${dir}/irma_configuration/irma-demo ${dir}/irma_configuration_updated/
cp -r ${dir}/irma_configuration/test-requestors ${dir}/irma_configuration_updated/
irma scheme sign ${dir}/irma_configuration/irma-demo/sk.pem ${dir}/irma_configuration/irma-demo
irma scheme sign ${dir}/irma_configuration/test/sk.pem ${dir}/irma_configuration/test
irma scheme sign ${dir}/irma_configuration/test-requestors/sk.pem ${dir}/irma_configuration/test-requestors
# ensure the changed schemes receive a higher timestamp
sleep 1
# restore changes to studentCard and stempas credtype, then resign
git checkout -- ${dir}/irma_configuration_updated/irma-demo/RU/Issues/studentCard/description.xml
git checkout -- ${dir}/irma_configuration_updated/irma-demo/stemmen/Issues/stempas/description.xml
irma scheme sign ${dir}/irma_configuration_updated/irma-demo/sk.pem ${dir}/irma_configuration_updated/irma-demo
# restore changes to requestor scheme, then resign
git checkout -- ${dir}/irma_configuration_updated/test-requestors/requestors.json
irma scheme sign ${dir}/irma_configuration_updated/test-requestors/sk.pem ${dir}/irma_configuration_updated/test-requestors
# resign, then restore changes to studentCard credtype, invalidating the scheme
irma scheme sign ${dir}/irma_configuration_invalid/irma-demo/sk.pem ${dir}/irma_configuration_invalid/irma-demo
git checkout -- ${dir}/irma_configuration_invalid/irma-demo/RU/Issues/studentCard/description.xml
|
credentials/irmago
|
testdata/makeschemes.sh
|
Shell
|
apache-2.0
| 1,767 |
#!/bin/bash
#
# Copyright (c) 2001-2018 Primeton Technologies, Ltd.
# All rights reserved.
#
# author: ZhongWen Li (mailto:[email protected])
#
if [ -z ${TOMCAT_HOME} ]; then
echo "[`date`] [ERROR] \${TOMCAT_HOME} not found."
exit 1
fi
if [ ! -d ${TOMCAT_HOME} ]; then
echo "[`date`] [ERROR] TOMCAT_HOME=${TOMCAT_HOME} not exists."
exit 1
fi
if [ -z "${JAVA_VM_MEM_MIN}" ]; then
JVM_MIN_MEM=512
fi
if [ -z "${JAVA_VM_MEM_MAX}" ]; then
JVM_MAX_MEM=1024
fi
if [ ${JAVA_VM_MEM_MIN} -gt ${JAVA_VM_MEM_MAX} ]; then
echo "[`date`] [WARN ] JAVA_VM_MEM_MIN is bigger than JAVA_VM_MEM_MAX"
JAVA_VM_MEM_MAX=${JAVA_VM_MEM_MIN}
fi
JAVA_OPTS="${JAVA_OPTS} -Xms${JAVA_VM_MEM_MIN}m -Xmx${JAVA_VM_MEM_MAX}m"
JAVA_OPTS="${JAVA_OPTS} -Dfile.encoding=utf-8 -Duser.timezone=Asia/Shanghai"
# Java Remote Debug Enabled
if [ "YESX" == "${JAVA_DEBUG}X" ] || [ "yesX" == "${JAVA_DEBUG}X" ]; then
JAVA_OPTS="${JAVA_OPTS} -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8888"
fi
# Tomcat ThreadPool
if [ -z "${TOMCAT_MAX_THREADS}" ]; then
TOMCAT_MAX_THREADS=10000
fi
if [ -z "${TOMCAT_MIN_SEPARE_THREADS}" ]; then
TOMCAT_MIN_SEPARE_THREADS=10
fi
# Fix server.xml
if [ -f ${TOMCAT_HOME}/conf/server-template.xml ]; then
\cp -f ${TOMCAT_HOME}/conf/server-template.xml ${TOMCAT_HOME}/conf/server.xml
sed -i -e "s/TOMCAT_MAX_THREADS/${TOMCAT_MAX_THREADS}/g" ${TOMCAT_HOME}/conf/server.xml
sed -i -e "s/TOMCAT_MIN_SEPARE_THREADS/${TOMCAT_MIN_SEPARE_THREADS}/g" ${TOMCAT_HOME}/conf/server.xml
else
echo "[`date`] [WARN ] Template file ${TOMCAT_HOME}/conf/server-template.xml not found."
fi
export JAVA_OPTS
if [ `ls ${TOMCAT_HOME}/webapps/ | wc -l` -eq 0 ]; then
echo "[`date`] [WARN ] None application found in ${TOMCAT_HOME}/webapps, Copy ROOT to it."
\cp -rf ${TOMCAT_HOME}/backup/ROOT ${TOMCAT_HOME}/webapps/
fi
${TOMCAT_HOME}/bin/catalina.sh run "$@"
|
primeton-cloud-ltd/devops-registry
|
docker/tomcat/resources/entrypoint.sh
|
Shell
|
apache-2.0
| 1,877 |
# -----------------------------------------------------------------------------
#
# Package : raw-body
# Version : 2.3.2
# Source repo : https://github.com/stream-utils/raw-body
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=raw-body
PACKAGE_VERSION=2.3.2
PACKAGE_URL=https://github.com/stream-utils/raw-body
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
r/raw-body/raw-body_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,060 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function getInstallProperty() {
local propertyName=$1
local propertyValue=""
for file in "${INSTALL_ARGS}"
do
if [ -f "${file}" ]
then
propertyValue=`grep "^${propertyName}[ \t]*=" ${file} | awk -F= '{ sub("^[ \t]*", "", $2); sub("[ \t]*$", "", $2); print $2 }'`
if [ "${propertyValue}" != "" ]
then
break
fi
fi
done
echo ${propertyValue}
}
if [[ -z $1 ]]; then
echo "Invalid argument [$1];"
echo "Usage: Only start | stop | restart | version, are supported."
exit;
fi
action=$1
action=`echo $action | tr '[:lower:]' '[:upper:]'`
realScriptPath=`readlink -f $0`
realScriptDir=`dirname $realScriptPath`
cd $realScriptDir
cdir=`pwd`
ranger_usersync_max_heap_size=1g
for custom_env_script in `find ${cdir}/conf/ -name "ranger-usersync-env*"`; do
if [ -f $custom_env_script ]; then
. $custom_env_script
fi
done
if [ -z "${USERSYNC_PID_DIR_PATH}" ]; then
USERSYNC_PID_DIR_PATH=/var/run/ranger
fi
if [ -z "${USERSYNC_PID_NAME}" ]
then
USERSYNC_PID_NAME=usersync.pid
fi
if [ ! -d "${USERSYNC_PID_DIR_PATH}" ]
then
mkdir -p $USERSYNC_PID_DIR_PATH
chmod 660 $USERSYNC_PID_DIR_PATH
fi
# User can set their own pid path using USERSYNC_PID_DIR_PATH and
# USERSYNC_PID_NAME variable before calling the script. The user can modify
# the value of the USERSYNC_PID_DIR_PATH in ranger-usersync-env-piddir.sh to
# change pid path and set the value of USERSYNC_PID_NAME to change the
# pid file.
pidf=${USERSYNC_PID_DIR_PATH}/${USERSYNC_PID_NAME}
if [ -z "${UNIX_USERSYNC_USER}" ]; then
UNIX_USERSYNC_USER=ranger
fi
INSTALL_ARGS="${cdir}/install.properties"
RANGER_BASE_DIR=$(getInstallProperty 'ranger_base_dir')
JAVA_OPTS=" ${JAVA_OPTS} -XX:MetaspaceSize=100m -XX:MaxMetaspaceSize=200m -Xmx${ranger_usersync_max_heap_size} -Xms1g "
if [ "${action}" == "START" ]; then
#Export JAVA_HOME
if [ -f ${cdir}/conf/java_home.sh ]; then
. ${cdir}/conf/java_home.sh
fi
if [ "$JAVA_HOME" != "" ]; then
export PATH=$JAVA_HOME/bin:$PATH
fi
cp="${cdir}/dist/*:${cdir}/lib/*:${cdir}/conf:${RANGER_USERSYNC_HADOOP_CONF_DIR}/*"
cd ${cdir}
if [ -z "${logdir}" ]; then
logdir=${cdir}/logs
fi
if [ -z "${USERSYNC_CONF_DIR}" ]; then
USERSYNC_CONF_DIR=${cdir}/conf
fi
if [ -f "$pidf" ] ; then
pid=`cat $pidf`
if ps -p $pid > /dev/null
then
echo "Apache Ranger Usersync Service is already running [pid={$pid}]"
exit ;
else
rm -rf $pidf
fi
fi
SLEEP_TIME_AFTER_START=5
nohup java -Dproc_rangerusersync -Dlog4j.configuration=file:${USERSYNC_CONF_DIR}/log4j.properties ${JAVA_OPTS} -Duser=${USER} -Dhostname=${HOSTNAME} -Dlogdir="${logdir}" -cp "${cp}" org.apache.ranger.authentication.UnixAuthenticationService -enableUnixAuth > ${logdir}/auth.log 2>&1 &
VALUE_OF_PID=$!
echo "Starting Apache Ranger Usersync Service"
sleep $SLEEP_TIME_AFTER_START
if ps -p $VALUE_OF_PID > /dev/null
then
echo $VALUE_OF_PID > ${pidf}
chown ${UNIX_USERSYNC_USER} ${pidf}
chmod 660 ${pidf}
pid=`cat $pidf`
echo "Apache Ranger Usersync Service with pid ${pid} has started."
else
echo "Apache Ranger Usersync Service failed to start!"
fi
exit;
elif [ "${action}" == "STOP" ]; then
WAIT_TIME_FOR_SHUTDOWN=2
NR_ITER_FOR_SHUTDOWN_CHECK=15
if [ -f "$pidf" ] ; then
pid=`cat $pidf` > /dev/null 2>&1
echo "Getting pid from $pidf .."
else
pid=`ps -ef | grep java | grep -- '-Dproc_rangerusersync' | grep -v grep | awk '{ print $2 }'`
if [ "$pid" != "" ];then
echo "pid file($pidf) not present, taking pid from \'ps\' command.."
else
echo "Apache Ranger Usersync Service is not running"
return
fi
fi
echo "Found Apache Ranger Usersync Service with pid $pid, Stopping it..."
kill -15 $pid
for ((i=0; i<$NR_ITER_FOR_SHUTDOWN_CHECK; i++))
do
sleep $WAIT_TIME_FOR_SHUTDOWN
if ps -p $pid > /dev/null ; then
echo "Shutdown in progress. Will check after $WAIT_TIME_FOR_SHUTDOWN secs again.."
continue;
else
break;
fi
done
# if process is still around, use kill -9
if ps -p $pid > /dev/null ; then
echo "Initial kill failed, getting serious now..."
kill -9 $pid
fi
sleep 1 #give kill -9 sometime to "kill"
if ps -p $pid > /dev/null ; then
echo "Wow, even kill -9 failed, giving up! Sorry.."
exit 1
else
rm -rf $pidf
echo "Apache Ranger Usersync Service with pid ${pid} has been stopped."
fi
exit;
elif [ "${action}" == "RESTART" ]; then
echo "Restarting Apache Ranger Usersync"
${cdir}/ranger-usersync-services.sh stop
${cdir}/ranger-usersync-services.sh start
exit;
elif [ "${action}" == "VERSION" ]; then
cd ${cdir}/lib
java -cp ranger-util-*.jar org.apache.ranger.common.RangerVersionInfo
exit
else
echo "Invalid argument [$1];"
echo "Usage: Only start | stop | restart | version, are supported."
exit;
fi
|
gzsombor/ranger
|
unixauthservice/scripts/ranger-usersync-services.sh
|
Shell
|
apache-2.0
| 5,734 |
#!/bin/bash
set -exo pipefail
readonly PACKAGES=$(/usr/share/google/get_metadata_value attributes/CONDA_PACKAGES || true)
function main() {
if [[ -z "${PACKAGES}" ]]; then
echo "ERROR: Must specify CONDA_PACKAGES metadata key"
exit 1
fi
conda install ${PACKAGES}
}
main
|
dennishuo/dataproc-initialization-actions
|
python/conda-install.sh
|
Shell
|
apache-2.0
| 290 |
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Updates maven POM artifacts with version.
# Usage
# ./maven/maven-pom-version.sh VERSION
# Example
# ./maven/maven-pom-version.sh 0.14.1
if [ "$1" = "" ]; then
echo "ERROR: heron version missing. Usage './maven/maven-pom-version.sh VERSION' "
exit 1
fi
cat ./maven/heron-no-kryo.template.pom | \
sed "s/VERSION/$1/g" | \
sed "s/ARTIFACT_ID/heron-api/g" | \
sed "s/NAME/heron-api/g" | \
sed "s/DESCRIPTION/Heron API/g" \
>> ./heron-api-$1.pom
cat ./maven/heron-no-kryo.template.pom | \
sed "s/VERSION/$1/g" | \
sed "s/ARTIFACT_ID/heron-spi/g" | \
sed "s/NAME/heron-spi/g" | \
sed "s/DESCRIPTION/Heron SPI/g" \
>> ./heron-spi-$1.pom
cat ./maven/heron-with-kryo.template.pom | \
sed "s/VERSION/$1/g" | \
sed "s/ARTIFACT_ID/heron-storm/g" | \
sed "s/NAME/heron-storm/g" | \
sed "s/DESCRIPTION/Heron Storm/g" \
>> ./heron-storm-$1.pom
cat ./maven/heron-with-kryo.template.pom | \
sed "s/VERSION/$1/g" | \
sed "s/ARTIFACT_ID/heron-simulator/g" | \
sed "s/NAME/heron-simulator/g" | \
sed "s/DESCRIPTION/Heron Simulator/g" \
>> ./heron-simulator-$1.pom
|
mycFelix/heron
|
release/maven/maven-pom-version.sh
|
Shell
|
apache-2.0
| 1,698 |
#!/bin/bash
versionPattern="[0-9]+\.[0-9]+\.[0-9]+(\-beta)*"
tagName=""
while [[ ! $tagName =~ $versionPattern ]]; do
read -p "Tag name (version): " tagName
done
if ! grep -Fq "<version>$tagName</version>" ./pom.xml; then
echo -e "$(tput setaf 1)WOW, wait! The provided tag does not match the pom.xml version$(tput sgr0)"
exit 1
fi
if GIT_DIR=./.git git rev-parse $tagName >/dev/null 2>&1
then
echo -e "$(tput setaf 1)ERROR: Tag $tagName already exists$(tput sgr0)"
fi
if git tag $tagName; then
echo -e "$(tput setaf 2)Tag created$(tput sgr0)"
else
echo -e "$(tput setaf 1)ERROR: Failed to create tag locally$(tput sgr0)"
exit 1
fi
if git push origin $tagName; then
echo -e "$(tput setaf 2)Tag pushed$(tput sgr0)"
else
echo -e "$(tput setaf 1)ERROR: Failed to push tag$(tput sgr0)"
exit 1
fi
echo -e "$(tput setaf 2)Everything went fine :)$(tput sgr0)"
|
anthonyraymond/joal
|
publish.sh
|
Shell
|
apache-2.0
| 883 |
# Copyright 2018 The Kubernetes Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
function usage() {
echo -e "Usage: ./script.sh -n myCluster -z myZone [-c] [-r]\n"
echo " -c, --cleanup Cleanup resources created by a previous run of the script"
echo " -n, --cluster-name Name of the cluster (Required)"
echo " -z, --zone Zone the cluster is in (Required)"
echo -e " --help Display this help and exit"
exit
}
function arg_check {
# Check that the necessary arguments were provided and that they are correct.
if [[ -z "$ZONE" || -z "$CLUSTER_NAME" ]];
then
usage
fi
# Get gcloud credentials for the cluster so kubectl works automatically.
# Any error/typo in the required command line args will be caught here.
gcloud container clusters get-credentials ${CLUSTER_NAME} --zone=${ZONE}
[[ $? -eq 0 ]] || error_exit "Error-bot: Command line arguments were incorrect. See above error for more info."
}
function error_exit {
echo -e "${RED}$1${NC}" >&2
exit 1
}
function cleanup() {
arg_check
# Get the project id associated with the cluster.
PROJECT_ID=`gcloud config list --format 'value(core.project)' 2>/dev/null`
# Cleanup k8s and GCP resources in same order they are created.
# Note: The GCP service account key needs to be manually cleaned up.
# Note: We don't delete the default-http-backend we created so that when the
# GLBC is restored on the GKE master, the addon manager does not try to create a
# new one.
kubectl delete clusterrolebinding one-binding-to-rule-them-all
kubectl delete -f yaml/rbac.yaml
kubectl delete configmap gce-config -n kube-system
gcloud iam service-accounts delete glbc-service-account@${PROJECT_ID}.iam.gserviceaccount.com
gcloud projects remove-iam-policy-binding ${PROJECT_ID} \
--member serviceAccount:glbc-service-account@${PROJECT_ID}.iam.gserviceaccount.com \
--role roles/compute.admin
kubectl delete secret glbc-gcp-key -n kube-system
kubectl delete -f yaml/glbc.yaml
# Ask if user wants to reenable GLBC on the GKE master.
while true; do
echo -e "${GREEN}Script-bot: Do you want to reenable GLBC on the GKE master?${NC}"
echo -e "${GREEN}Script-bot: Press [C | c] to continue.${NC}"
read input
case $input in
[Cc]* ) break;;
* ) echo -e "${GREEN}Script-bot: Press [C | c] to continue.${NC}"
esac
done
gcloud container clusters update ${CLUSTER_NAME} --zone=${ZONE} --update-addons=HttpLoadBalancing=ENABLED
echo -e "${GREEN}Script-bot: Cleanup successful! You need to cleanup your GCP service account key manually.${NC}"
exit 0
}
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m'
CLEANUP_HELP="Invoking me with the -c option will get you back to a clean slate."
NO_CLEANUP="Nothing has to be cleaned up :)"
PERMISSION_ISSUE="If this looks like a permissions problem, see the README."
# Parsing command line arguments
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--help)
usage
shift
shift
;;
-c|--cleanup)
cleanup
shift
shift
;;
-n|--cluster-name)
CLUSTER_NAME=$2
shift
shift
;;
-z|--zone)
ZONE=$2
shift
;;
*)
shift
;;
esac
done
arg_check
# Check that the gce.conf is valid for the cluster
NODE_INSTANCE_PREFIX=`cat gce.conf | grep node-instance-prefix | awk '{print $3}'`
[[ "$NODE_INSTANCE_PREFIX" == "gke-${CLUSTER_NAME}" ]] || error_exit "Error bot: --cluster-name does not match gce.conf. ${NO_CLEANUP}"
# Get the project id associated with the cluster.
PROJECT_ID=`gcloud config list --format 'value(core.project)' 2>/dev/null`
# Store the nodePort for default-http-backend
NODE_PORT=`kubectl get svc default-http-backend -n kube-system -o yaml | grep "nodePort:" | cut -f2- -d:`
# Get the GCP user associated with the current gcloud config.
GCP_USER=`gcloud config list --format 'value(core.account)' 2>/dev/null`
# Grant permission to current GCP user to create new k8s ClusterRole's.
kubectl create clusterrolebinding one-binding-to-rule-them-all --clusterrole=cluster-admin --user=${GCP_USER}
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating a k8s ClusterRoleBinding. ${PERMISSION_ISSUE} ${NO_CLEANUP}"
# Create a new service account for glbc and give it a
# ClusterRole allowing it access to API objects it needs.
kubectl create -f yaml/rbac.yaml
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating the RBAC spec. ${CLEANUP_HELP}"
# Inject gce.conf onto the user node as a ConfigMap.
# This config map is mounted as a volume in glbc.yaml
kubectl create configmap gce-config --from-file=gce.conf -n kube-system
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating gce.conf ConfigMap. ${CLEANUP_HELP}"
# Create new GCP service acccount.
gcloud iam service-accounts create glbc-service-account \
--display-name "Service Account for GLBC"
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating a GCP service account. ${PERMISSION_ISSUE} ${CLEANUP_HELP}"
# Give the GCP service account the appropriate roles.
gcloud projects add-iam-policy-binding ${PROJECT_ID} \
--member serviceAccount:glbc-service-account@${PROJECT_ID}.iam.gserviceaccount.com \
--role roles/compute.admin
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating IAM role binding for service account. ${PERMISSION_ISSUE} ${CLEANUP_HELP}"
# Create key for the GCP service account.
gcloud iam service-accounts keys create \
key.json \
--iam-account glbc-service-account@${PROJECT_ID}.iam.gserviceaccount.com
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue creating GCP service account key. ${PERMISSION_ISSUE} ${CLEANUP_HELP}"
# Store the key as a secret in k8s. This secret is mounted
# as a volume in glbc.yaml
kubectl create secret generic glbc-gcp-key --from-file=key.json -n kube-system
if [[ $? -eq 1 ]];
then
error_exit "Error-bot: Issue creating a k8s secret from GCP service account key. ${PERMISSION_ISSUE} ${CLEANUP_HELP}"
fi
rm key.json
# Turn off the glbc running on the GKE master. This will not only delete the
# glbc pod, but it will also delete the default-http-backend
# deployment + service.
gcloud container clusters update ${CLUSTER_NAME} --zone=${ZONE} --update-addons=HttpLoadBalancing=DISABLED
[[ $? -eq 0 ]] || error_exit "Error-bot: Issue turning of GLBC. ${PERMISSION_ISSUE} ${CLEANUP_HELP}"
# Approximate amount of time it takes the API server to start accepting all
# requests.
sleep 90
# In case the previous sleep was not enough, prompt user so that they can choose
# when to proceed.
while true; do
echo -e "${GREEN}Script-bot: Before proceeding, please ensure your API server is accepting all requests.
Failure to do so may result in the script creating a broken state."
echo -e "${GREEN}Script-bot: Press [C | c] to continue.${NC}"
read input
case $input in
[Cc]* ) break;;
* ) echo -e "${GREEN}Script-bot: Press [C | c] to continue.${NC}"
esac
done
# Recreate the default-http-backend k8s service with the same NodePort as the
# service which was removed when turning of the glbc previously. This is to
# ensure that a brand new NodePort is not created.
# Wait till old service is removed
while true; do
kubectl get svc -n kube-system | grep default-http-backend &>/dev/null
if [[ $? -eq 1 ]];
then
break
fi
sleep 5
done
# Wait till old glbc pod is removed
while true; do
kubectl get pod -n kube-system | grep default-backend &>/dev/null
if [[ $? -eq 1 ]];
then
break
fi
sleep 5
done
# Recreates the deployment and service for the default backend.
sed -i "/name: http/a \ \ \ \ nodePort: ${NODE_PORT}" yaml/default-http-backend.yaml
kubectl create -f yaml/default-http-backend.yaml
if [[ $? -eq 1 ]];
then
# Prompt the user to finish the last steps by themselves. We don't want to
# have to cleanup and start all over again if we are this close to finishing.
error_exit "Error-bot: Issue starting default backend. ${PERMISSION_ISSUE}. We are so close to being done so just manually start the default backend with NodePort: ${NODE_PORT} and create glbc.yaml when ready"
fi
# Startup glbc
kubectl create -f yaml/glbc.yaml
[[ $? -eq 0 ]] || manual_glbc_provision
if [[ $? -eq 1 ]];
then
# Same idea as above, although this time we only need to prompt the user to start the glbc.
error_exit: "Error_bot: Issue starting GLBC. ${PERMISSION_ISSUE}. We are so close to being done so just manually create glbc.yaml when ready"
fi
# Do a final verification that the NodePort stayed the same for the
# default-http-backend.
NEW_NODE_PORT=`kubectl get svc default-http-backend -n kube-system -o yaml | grep "nodePort:" | cut -f2- -d:`
[[ "$NEW_NODE_PORT" == "$NODE_PORT" ]] || error_exit "Error-bot: The NodePort for the new default-http-backend service is different than the original. Please recreate this service with NodePort: ${NODE_PORT} or traffic to this service will time out."
echo -e "${GREEN}Script-bot: I'm done!${NC}"
|
GoogleCloudPlatform/k8s-multicluster-ingress
|
vendor/k8s.io/ingress-gce/deploy/glbc/script.sh
|
Shell
|
apache-2.0
| 9,396 |
#!/usr/bin/env bash
set -o nounset
set -o pipefail
# By default, agents are provisioned in parallel during boot machine provisioning.
# The following agent provisioning should only run if the boot machine provisioning has already occurred.
# This ready check validates that the boot machine is ready and not just being impersonated by DNS hijacking.
if [ "$(curl --fail --location --max-redir 0 --silent http://boot.dcos/ready)" != "ok" ]; then
echo "Skipping DC/OS private agent install (boot machine will provision in parallel)"
exit 0
fi
set -o errexit
echo ">>> Installing DC/OS slave"
curl --fail --location --max-redir 0 --silent --show-error --verbose http://boot.dcos/dcos_install.sh | bash -s -- slave
echo ">>> Executing DC/OS Postflight"
dcos-postflight
if [ -n "${DCOS_TASK_MEMORY:-}" ]; then
echo ">>> Setting Mesos Memory: ${DCOS_TASK_MEMORY} (role=*)"
mesos-memory ${DCOS_TASK_MEMORY}
echo ">>> Restarting Mesos Agent"
systemctl stop dcos-mesos-slave.service
rm -f /var/lib/mesos/slave/meta/slaves/latest
systemctl start dcos-mesos-slave.service --no-block
fi
|
timcharper/dcos-vagrant
|
provision/bin/type-agent-private.sh
|
Shell
|
apache-2.0
| 1,097 |
_dl_cmd help "list available commands and their help"
_dl_help () {
echo "$HELP"
}
|
jpetazzo/dockerlite
|
lib/dockerlite-help.sh
|
Shell
|
apache-2.0
| 87 |
#!/bin/sh
execpath=$(dirname $BASH_SOURCE)
if ! which psql >/dev/null; then
echo "Postgres isn't installed! Exiting...";
exit 1;
fi
# Set up database user. Run this script immediately after cloning the codebase and before rake db:setup.
echo "\nYour db name:"
read dbname;
echo "\nYour db username:"
read username;
echo "\nWould you like a password generated for you? (y or n)"
read passgen;
if $passgen == 'y' ; then
timestamp=$(date +%s)
hashstart=$dbname$timestamp
password=$(md5 -qs $hashstart);
else
echo "\nYour db password:"
read password;
fi;
createuser $username --login --createdb;
if [ $? -eq 0 ]; then
echo "$username created!"
else
echo "$username not created!"
fi
cp -f $execpath/pg.db.create.sql $execpath/pg.db.$dbname.create.sql
contents=$(sed -e s/DBNAME/"$dbname"/g -e s/USERNAME/"$username"/g -e s/PASSWD/"$password"/g $execpath/pg.db.$dbname.create.sql)
echo "$contents" > $execpath/pg.db.$dbname.create.sql
# sed -e "s|{dbname}|$dbname|g" ./pg.db.$dbname.create.sql
# sed -e "s|{username}|$username|g" ./pg.db.$dbname.create.sql
# sed -e "s|{password}|$password|g" ./pg.db.$dbname.create.sql
echo "Recreating the database '$dbname' in postgres."
# export PGPASSWORD="$password"
psql -h localhost -U postgres -a -f $execpath/pg.db.$dbname.create.sql
if [ $? -eq 0 ]; then
echo "Password to $dbname with user $username is '$password'"
echo "#!/bin/sh\n\necho \"Recreating the database '$dbname' in postgres.\"\npsql -h localhost -U postgres -a -f $execpath/pg.db.$dbname.create.sql" > $execpath/pg.db.$dbname.create.sh
chmod 755 $execpath/pg.db.$dbname.create.sh
else
echo "$dbname not created!"
fi
|
storjarn/node-toolbox
|
tools/db/pg.db.create.sh
|
Shell
|
apache-2.0
| 1,687 |
/system/bin/logcat -v time -b radio >> /data/rlog
|
baidurom/devices-onex
|
vendor/system/xbin/rlog.sh
|
Shell
|
apache-2.0
| 50 |
#!/bin/bash
namebase="tk"
topicbase="sf"
broker="a81:9092"
taskcfgfile="tktemp.json"
for i in $(seq 1 $1); do
name=${namebase}${i}
topic=${topicbase}${i}
cat > ${taskcfgfile} << ZZZ
{
"id": "/${name}",
"cmd": "java -cp \$MESOS_SANDBOX/rt-jar-with-dependencies.jar org.jennings.rt.source.tcp.TcpKafka \$PORT0 ${broker} ${topic} \$PORT1",
"container": {
"type": "MESOS"
},
"cpus": 0.5,
"disk": 0,
"fetch": [
{
"uri": "http://p1:81/apps/rt-jar-with-dependencies.jar",
"extract": false,
"executable": false,
"cache": false
}
],
"healthChecks": [
{
"gracePeriodSeconds": 300,
"intervalSeconds": 60,
"maxConsecutiveFailures": 3,
"portIndex": 1,
"timeoutSeconds": 20,
"delaySeconds": 15,
"protocol": "MESOS_HTTP",
"path": "/"
}
],
"instances": 1,
"mem": 1024,
"gpus": 0,
"networks": [
{
"mode": "host"
}
],
"portDefinitions": [
{
"labels": {
"VIP_0": "/${name}:5565"
},
"name": "default",
"protocol": "tcp"
},
{
"labels": {
"VIP_1": "/${name}:14000"
},
"name": "health",
"protocol": "tcp"
}
]
}
ZZZ
dcos marathon app add ${taskcfgfile}
done
|
david618/rt
|
scripts/add-tk.sh
|
Shell
|
apache-2.0
| 1,262 |
#!/bin/bash
bin_dir=$(realpath $(dirname "$0"))
cd "$bin_dir"
P='WW-ParserGen-PDA'
V='0.12.2'
PV="$P-$V"
rm -rf "/tmp/root/$PV"
mkdir -p "/tmp/root/$PV"
rm -f "$PV"
rsync --archive -v --relative $(<MANIFEST) "/tmp/root/$PV"
chown -R root:root "/tmp/root/$PV"
(cd /tmp/root; tar --gzip -cvf "$bin_dir/$PV.tar.gz" "$PV")
|
wwdev/WW-ParserGen-PDA
|
make-tar.sh
|
Shell
|
artistic-2.0
| 324 |
#! /usr/bin/env bash
set -e
[[ -s $HOME/.rvm/scripts/rvm ]] && source $HOME/.rvm/scripts/rvm
if [[ -z ${RUBY_RUNTIME} ]]; then
RUBY_RUNTIME=$1
fi
if [[ -z ${RUBY_RUNTIME} ]]; then
RUBY_RUNTIME=1.8.7
fi
if [[ -n "${JOB_NAME}" ]] ; then
GEMSET=`echo ${JOB_NAME} | sed "s!/RUBY_RUNTIME=.*!!" | sed "s/ /_/g"`
else
GEMSET=`basename $PWD`
fi
if [[ -z "$(rvm list | grep $RUBY_RUNTIME)" ]] ; then
rvm install ${RUBY_RUNTIME} -C --with-iconv-dir=/usr/local
fi
rvm use ${RUBY_RUNTIME} && \
rvm --force gemset delete $GEMSET && \
rvm gemset create $GEMSET && \
rvm gemset use $GEMSET || 0
gem install rails --version "2.3.8" --no-ri --no-rdoc
rake -t test
|
zephirworks/easy_multipart
|
hudson.sh
|
Shell
|
bsd-2-clause
| 673 |
#!/bin/sh
dst=~/.local/share/gnome-shell/extensions
edir=MyApps-in-UserMenu-bernard@pc9
ldir=libs
files="extension.js Entry.js Application.js metadata.json"
rm -fRv $dst/$edir
mkdir -p $dst/$edir
cp -Rav ../libs $dst/$edir
for f in $files
do
cp -av $edir/$f $dst/$edir
done
|
BernardBeefheart/gnome-applets
|
my-apps-menu/install.sh
|
Shell
|
bsd-3-clause
| 278 |
#!/bin/bash
set -ex
readonly BASE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly BUILD_DIR="${BASE}/../.build"
readonly TEST_IMAGES_BASE="${TRAVIS_BUILD_DIR}/rsqueakvm/test/images"
readonly TEST_IMAGES_BASE_URL="https://www.hpi.uni-potsdam.de/hirschfeld/artefacts/rsqueak/testing/images"
export OPTIONS=""
setup_osx() {
brew update
brew install pypy sdl2
}
setup_linux() {
sudo dpkg --add-architecture i386
sudo apt-add-repository multiverse
sudo apt-add-repository universe
sudo apt-get update -myq || true
case "$BUILD_ARCH" in
32bit|lldebug)
PACKAGES="
gcc-multilib \
libasound2-dev:i386 \
libbz2-1.0:i386 \
libc6-dev-i386 \
libc6:i386 \
libexpat1:i386 \
libffi-dev:i386 \
libffi6:i386 \
libfreetype6:i386 \
g++-multilib \
libgcrypt11:i386 \
libgl1-mesa-dev:i386 \
mesa-common-dev:i386 \
libgl1-mesa-dri:i386 \
libgl1-mesa-glx:i386 \
libglapi-mesa:i386 \
libglu1-mesa-dev:i386 \
libglu1-mesa:i386 \
libssl1.0.0:i386 \
libssl-dev:i386 \
libstdc++6:i386 \
libtinfo5:i386 \
libxext-dev:i386 \
libxt-dev:i386 \
zlib1g:i386 \
"
export OPTIONS="--32bit"
;;
64bit)
PACKAGES="
libfreetype6:i386 \
"
;;
arm*)
PACKAGES="
libsdl2-dev \
gcc-arm-linux-gnueabi \
gcc-arm-linux-gnueabihf \
qemu-system \
qemu-system-arm \
qemu-user \
qemu-user-static \
sbuild \
schroot \
scratchbox2 \
debootstrap \
zlib1g:i386 \
libstdc++6:i386 \
libffi-dev:i386 \
libffi6:i386 \
libssl1.0.0:i386 \
libssl-dev:i386 \
libbz2-1.0:i386 \
libc6-dev-i386 \
libc6:i386 \
libexpat1:i386 \
libtinfo5:i386 \
"
export OPTIONS="--32bit"
;;
esac
sudo apt-get install -yq \
--no-install-suggests --no-install-recommends --force-yes \
binfmt-support \
build-essential \
python-dev \
libffi-dev \
zlib1g-dev \
$PACKAGES
if [[ "${BUILD_ARCH}" = arm* ]]; then
"${BASE}/setup_arm.sh"
fi
}
load_test_images() {
local target
local url
if [[ -z "${TEST_TYPE}" ]]; then
return
fi
if [[ "${PLUGINS}" = "PythonPlugin" ]]; then
target="${TEST_IMAGES_BASE}/pypy.image"
url="${TEST_IMAGES_BASE_URL}/pypy.image"
curl -f -s -L --retry 3 -o "${target}" "${url}"
fi
}
# Only build arm on master
if [[ "${TRAVIS_BRANCH}" != "master" ]] && [[ "${BUILD_ARCH}" = arm* ]]; then
exit 0
fi
setup_$TRAVIS_OS_NAME
python .build/download_dependencies.py $OPTIONS
load_test_images
if [[ -d ".build/sqpyte" ]]; then
# Make sqlite/sqpyte for DatabasePlugin
pushd ".build/sqpyte" > /dev/null
chmod +x ./sqlite/configure
sudo make
popd > /dev/null
fi
|
HPI-SWA-Lab/RSqueak
|
.travis/install_requirements.sh
|
Shell
|
bsd-3-clause
| 2,802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.