code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
(echo " select distinct measurement_date,measurement_time,'aaaa',datatype,measurement_value
from measurement,station,creel_fishing_trips,creel_catches_species
where station.location_area = 2 and measurement_date >= '2005-12-01' and measurement_date <= '2006-02-24' and datatype = 'salinity' and measurement.station = station.station and measurement.measurement_date = creel_fishing_trips.census_date and creel_fishing_trips.login_name = creel_catches_species.login_name and creel_fishing_trips.census_date = creel_catches_species.census_date and creel_fishing_trips.census_time = creel_catches_species.census_time and creel_fishing_trips.interview_location = creel_catches_species.interview_location and creel_fishing_trips.interview_number = creel_catches_species.interview_number and creel_fishing_trips.census_date >= '2005-12-01' and creel_fishing_trips.census_date <= '2006-02-24' and creel_fishing_trips.location_area = 2 order by measurement_date,measurement_time,'aaaa',datatype,measurement_value;" |
tee /dev/tty |
sql.e '^' |
tr '^' ',' |
real_time2aggregate_value.e average 0 1 4 ',' daily n 2006-02-24 |
sed 's/,null,/,/' |
sort) |
sort |
sed 's/zzzz,//' |
sed 's/aaaa,//'
|
timhriley/appaserver
|
src_hydrology/test2.sh
|
Shell
|
gpl-3.0
| 1,313 |
#!/bin/bash
trap ctrl_c INT
function ctrl_c() {
exit 1;
}
if [ -z "$1" ]; then
TODAY=$(date +%Y%m%d)
DATADIR=data.$TODAY
else
DATADIR=$1
fi
senapy-cli doslegs_urls | tlfp-parse-many $DATADIR --only-promulgated
echo
echo "Handle non promulgated texts already parsed in data:"
ls data/ | grep '^p' | grep -v '_tmp' | while read id; do
ls $DATADIR/$id > /dev/null 2>&1 ||
ls $DATADIR/logs/$id > /dev/null 2>&1 ||
tlfp-parse $id $DATADIR
done
echo
python tlfp/generate_dossiers_csv.py $DATADIR
python tlfp/tools/assemble_procedures.py $DATADIR
python tlfp/tools/make_metrics_csv.py $DATADIR
python tlfp/tools/steps_as_dot.py $DATADIR | dot -Tsvg > $DATADIR/steps.svg
python tlfp/tools/steps_as_dot.py $DATADIR | dot -Tpng > $DATADIR/steps.png
python tlfp/tools/steps_as_dot.py $DATADIR 1 | dot -Tsvg > $DATADIR/steps-detailed.svg
python tlfp/tools/steps_as_dot.py $DATADIR 1 | dot -Tpng > $DATADIR/steps-detailed.png
for f in .htaccess HEADER.html; do
cp data/$f $DATADIR/$f
done
echo "Everything finished, data processed in $DATADIR"
echo "A few stats:"
echo
./stats.sh $DATADIR
echo
echo "Deploy built data with:"
echo "mv data data.$TODAY.old && mv $DATADIR data"
|
regardscitoyens/the-law-factory-parser
|
reparse_all.sh
|
Shell
|
gpl-3.0
| 1,209 |
cd ../server
node uploadgroup.js -H localhost -p 8123 -f ../test/groups/groups.json &
node uploaduser.js -H localhost -p 8123 -f ../test/users/users.json &
node upload.js -H localhost -p 8123 -d ../test/practica0.2/ &
cd -
|
catacs/catalyde
|
tests/tests/populate.sh
|
Shell
|
gpl-3.0
| 223 |
#!/bin/bash
# Copyright 2016-2018 Obsidian-Studios, Inc.
# Author William L. Thomson Jr.
# [email protected]
#
# Distributed under the terms of The GNU Public License v3.0 (GPLv3)
# Batch script file to modify MY_PV
COMMIT_MSG="Modified PV"
TREE="/usr/portage/local/os-xtoo"
cd "${TREE}" || exit 1
PKGS=($( ls dev-java/glassfish-hk2*/*ebuild ))
batch_cmds() {
echo "${COMMIT_MSG} -> ${ebuild}"
sed -i -e 's|MY_PV="${PV/_beta/-b0}"|MY_PV="${PV%*_beta*}-b$( printf "%02d" ${PV#*_beta*})"|' \
${ebuild} || return 1
return 0
}
|
Obsidian-StudiosInc/ebuild-batcher
|
scripts/modify-MY_PV.sh
|
Shell
|
gpl-3.0
| 537 |
#!/bin/bash
#sets up the regular work environment with
#junit
. workingAlias.sh
CLASSPATH=
export CLASSPATH=~+/junit.jar:~+
export LD_LIBRARY_PATH=~+
|
forflo/snippetbin
|
java/GESim/setupEnv.sh
|
Shell
|
gpl-3.0
| 153 |
#! /bin/bash
# A modification of Dean Clatworthy's deploy script as found here: https://github.com/deanc/wordpress-plugin-git-svn
# The difference is that this script lives in the plugin's git repo & doesn't require an existing SVN repo.
# main config
PLUGINSLUG="wp-keyword-monitor"
CURRENTDIR=`pwd`
MAINFILE="wp-keyword-monitor.php" # this should be the name of your main php file in the wordpress plugin
# git config
GITPATH="$CURRENTDIR/" # this file should be in the base of your git repository
# svn config
SVNPATH="/tmp/$PLUGINSLUG" # path to a temp SVN repo. No trailing slash required and don't add trunk.
SVNURL="http://plugins.svn.wordpress.org/wp-keyword-monitor/" # Remote SVN repo on wordpress.org, with no trailing slash
SVNUSER="filme-blog" # your svn username
# Let's begin...
echo ".........................................."
echo
echo "Preparing to deploy wordpress plugin"
echo
echo ".........................................."
echo
# Check version in readme.txt is the same as plugin file
NEWVERSION1=`grep "^Stable tag" $GITPATH/readme.txt | awk -F' ' '{print $3}'`
echo "readme version: $NEWVERSION1"
NEWVERSION2=`grep "^Version" $GITPATH/$MAINFILE | awk -F' ' '{print $2}'`
echo "$MAINFILE version: $NEWVERSION2"
if [ "$NEWVERSION1" != "$NEWVERSION2" ]; then echo "Versions don't match. Exiting...."; exit 1; fi
echo "Versions match in readme.txt and PHP file. Let's proceed..."
cd $GITPATH
echo -e "Enter a commit message for this new version: \c"
read COMMITMSG
git commit -am "$COMMITMSG"
echo "Tagging new version in git"
git tag -a "$NEWVERSION1" -m "Tagging version $NEWVERSION1"
echo "Pushing latest commit to origin, with tags"
git push origin master
git push origin master --tags
echo
echo "Creating local copy of SVN repo ..."
svn co $SVNURL $SVNPATH
echo "Exporting the HEAD of master from git to the trunk of SVN"
git checkout-index -a -f --prefix=$SVNPATH/trunk/
echo "Ignoring github specific & deployment script"
svn propset svn:ignore "deploy.sh
README.md
.git
.idea
.gitignore" "$SVNPATH/trunk/"
echo "Moving assets-wp-repo"
mkdir $SVNPATH/assets/
mv $SVNPATH/trunk/assets-wp-repo/* $SVNPATH/assets/
svn add $SVNPATH/assets/
svn delete $SVNPATH/trunk/assets-wp-repo
echo "Changing directory to SVN"
cd $SVNPATH/trunk/
# Add all new files that are not set to be ignored
svn status | grep -v "^.[ \t]*\..*" | grep "^?" | awk '{print $2}' | xargs svn add
echo "committing to trunk"
svn commit --username=$SVNUSER -m "$COMMITMSG"
echo "Updating WP plugin repo assets & committing"
cd $SVNPATH/assets/
svn commit --username=$SVNUSER -m "Updating wp-repo-assets"
echo "Creating new SVN tag & committing it"
cd $SVNPATH
svn copy trunk/ tags/$NEWVERSION1/
cd $SVNPATH/tags/$NEWVERSION1
svn commit --username=$SVNUSER -m "Tagging version $NEWVERSION1"
echo "Removing temporary directory $SVNPATH"
rm -fr $SVNPATH/
echo "*** FIN ***"
|
blackus3r/wp-keyword-monitor
|
deploy.sh
|
Shell
|
gpl-3.0
| 2,888 |
#!/bin/bash
source /usr/share/aif/tests/lib/framework-runtime
aiftest swap 48
aiftest lvm-lv cryptpool cryptroot '800.00 MB'
aiftest mount '/dev/sda3 on / type ext4 (rw)'
aiftest mount '/dev/sda4 on /home type ext3 (rw)'
for i in /etc/ / /root/ /home/ /var/
do
aiftest file "$i"test_file
done
aiftest file /home/important-userdata
aiftest ping 2 archlinux.org
aiftest-done
|
jdodds/aif
|
tests/runtime/automatic-reuse-fs-sda/perform_test.sh
|
Shell
|
gpl-3.0
| 377 |
# possible additional CUDA_CFLAGS
#-gencode=arch=compute_50,code=\"sm_50,compute_50\"
#-gencode=arch=compute_35,code=\"sm_35,compute_35\"
#-gencode=arch=compute_30,code=\"sm_30,compute_30\"
#--ptxas-options=\"-v -dlcm=cg\""
#extracflags="-march=native -D_REENTRANT -falign-functions=16 -falign-jumps=16 -falign-labels=16"
extracflags="-D_REENTRANT -falign-functions=16 -falign-jumps=16 -falign-labels=16"
CUDA_CFLAGS="-O3 -Xcompiler -Wall" ./configure CXXFLAGS="-O3 $extracflags" --with-cuda=/usr/local/cuda --with-nvml=libnvidia-ml.so
|
netswift/ccminer-vert
|
configure.sh
|
Shell
|
gpl-3.0
| 540 |
#!/bin/bash
#
# @brief Generating email signature
# @version ver.1.0
# @date Thu Jun 06 01:25:41 2015
# @company Frobas IT Department, www.frobas.com 2015
# @author Vladimir Roncevic <[email protected]>
#
UTIL_EMAIl_SIGN=email_sign
UTIL_EMAIl_SIGN_VERSION=ver.1.0
UTIL=/root/scripts/sh_util/${UTIL_EMAIl_SIGN_VERSION}
UTIL_CFG_ESIGNATURE=${UTIL}/conf/${UTIL_EMAIl_SIGN}.cfg
UTIL_LOG=${UTIL}/log
. ${UTIL}/bin/devel.sh
. ${UTIL}/bin/usage.sh
. ${UTIL}/bin/load_util_conf.sh
declare -A EMAIL_SIGN_Usage=(
[USAGE_TOOL]="${UTIL_EMAIl_SIGN}"
[USAGE_ARG1]="[NAME] Full name"
[Usage_ARG2]="[WP] Work position"
[Usage_ARG3]="[DN] Department"
[Usage_ARG4]="[IP] IP phone number"
[Usage_ARG5]="[MOB] Mobile number"
[Usage_ARG6]="[EMAIL] Email address"
[USAGE_EX_PRE]="# Example generating email signature"
[USAGE_EX]="${UTIL_EMAIl_SIGN} SIGN_STRUCT"
)
#
# @brief Generate email signature for employee
# @param Value required structure
# @retval Success return 0, else return 1
#
# @usage
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# declare -A SIGN_STRUCT=(
# [NAME]=$name # Full name
# [WP]=$wp # Work position as engineer, developer
# [DN]=$dn # Electronic, Design Service
# [IP]=$ip # IP phone number
# [MOB]=$mobile # Mobile phone number
# [EMAIL]=$email # Email address
# )
#
# email_sign SIGN_STRUCT
# local STATUS=$?
#
# if [ $STATUS -eq $SUCCESS ]; then
# # true
# # notify admin | user
# else
# # false
# # missing argument(s) | missing config file | failed to load config
# # return $NOT_SUCCESS
# # or
# # exit 128
# fi
#
function email_sign {
local -n SIGN_STRUCT=$1
if [ ${#SIGN_STRUCT[@]} -eq 6 ] ; then
local FUNC=${FUNCNAME[0]} MSG="None" STATUS
local NAME=${SIGN_STRUCT[NAME]} WP=${SIGN_STRUCT[WP]}
local DN=${SIGN_STRUCT[DN]} IP=${SIGN_STRUCT[IP]}
local MOB=${SIGN_STRUCT[MOB]} EMAIL=${SIGN_STRUCT[EMAIL]}
declare -A STATUS_STRUCT=()
if [[ -n "$NAME" && -n "$WP" ]]; then
STATUS_STRUCT[NAME]=$SUCCESS
STATUS_STRUCT[WP]=$SUCCESS
if [[ -n "$DN" && -n "$IP" ]]; then
STATUS_STRUCT[DN]=$SUCCESS
STATUS_STRUCT[IP]=$SUCCESS
if [[ -n "$MOB" && -n "$EMAIL" ]]; then
STATUS_STRUCT[MOB]=$SUCCESS
STATUS_STRUCT[EMAIL]=$SUCCESS
else
STATUS_STRUCT[MOB]=$NOT_SUCCESS
STATUS_STRUCT[EMAIL]=$NOT_SUCCESS
fi
else
STATUS_STRUCT[DN]=$NOT_SUCCESS
STATUS_STRUCT[IP]=$NOT_SUCCESS
fi
else
STATUS_STRUCT[NAME]=$NOT_SUCCESS
STATUS_STRUCT[WP]=$NOT_SUCCESS
fi
check_status STATUS_STRUCT
STATUS=$?
if [ $STATUS -eq $NOT_SUCCESS ]; then
usage EMAIL_SIGN_Usage
return $NOT_SUCCESS
fi
declare -A config_email_sign=()
load_util_conf $UTIL_CFG_ESIGNATURE config_email_sign
STATUS=$?
if [ $STATUS -eq $SUCCESS ]; then
local CDIR=${config_email_sign[COMPANY_EMPLOYEES]} SLINE
MSG="Checking directory [${CDIR}/]?"
info_debug_message_que "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
if [ -d "${CDIR}/" ]; then
MSG="[ok]"
info_debug_message_ans "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
local COMPANY COMPANY_SITE COMPANY_ADDRESS COMPANY_STATE
local COMPANY_PHONE COMPANY_FAX
COMPANY=${config_email_sign[COMPANY_NAME]}
COMPANY_SITE=${config_email_sign[COMPANY_SITE]}
COMPANY_ADDRESS=${config_email_sign[COMPANY_ADDRESS]}
COMPANY_STATE=${config_email_sign[COMPANY_STATE]}
COMPANY_PHONE=${config_email_sign[COMPANY_PHONE]}
COMPANY_FAX=${config_email_sign[COMPANY_FAX]}
echo -e "$SIGNATURE_CONTENT" > "$CDIR/$NAME"
MSG="Set permission!"
info_debug_message "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
eval "chmod 644 ${CDIR}/${NAME}"
info_debug_message_end "Done" "$FUNC" "$UTIL_EMAIl_SIGN"
return $SUCCESS
fi
MSG="[not ok]"
info_debug_message_ans "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
MSG="Please check directory [${CDIR}/]!"
info_debug_message "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
MSG="Force exit!"
info_debug_message_end "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
return $NOT_SUCCESS
fi
MSG="Force exit!"
info_debug_message_end "$MSG" "$FUNC" "$UTIL_EMAIl_SIGN"
return $NOT_SUCCESS
fi
usage EMAIL_SIGN_Usage
return $NOT_SUCCESS
}
|
vroncevic/sh-util
|
sh_tool/bin/email_sign.sh
|
Shell
|
gpl-3.0
| 4,881 |
#!/bin/bash
set -e
#USAGE:
# dat_mulvec.sh <factor> <file1.dat> <file2.dat> ...
#DESCRIPTION:
# This script reads "dat" file(s) containing
# six columns of real numbers, and multiplies
# columns 4 to 6 by the given <factor>.
# The purpose is to scale the vectors in dat files
# containing atom positions followed by a vector
# (x y z vx vy vz).
factor=0.0
if [[ -z "${@:2}" ]]; then
printf "Multiply the columns 4-6 by the given factor in data files (*.dat). \n"
printf "Usage: dat_mulvec.sh <factor> <file1.dat> [<file2.dat> ...] \n"
else
factor=$1
FILES="${@:2}"
for f in $FILES
do
printf ">>> Multiplying vectors by $factor in $f ..."
if [ -e $f ]; then
awk -v fac=$factor '{print $1 " " $2 " " $3 " " fac*$4 " " fac*$5 " " fac*$6}' $f >>/tmp/temp.dat
mv -f /tmp/temp.dat $f
printf " Done.\n"
else
printf " File doesn't exist, skipping.\n"
fi
done
fi
|
pierrehirel/atomsk
|
tools/dat_mulvec.sh
|
Shell
|
gpl-3.0
| 912 |
#!/bin/bash
###################################################################################################################
# Author : Louis DAUBIGNARD
# Date : 11/12/2014
#
# Description : Script pour :
# - cree le user et l'environnement d un user linux
#
# Syntax : crea_user.sh
#
###################################################################################################################
#CHEMIN RACINE
PATHROOT="$PWD"
#RECUPERATION DES PARAMETRES
. $PATHROOT/user_param.sh
#RECUPERATION DES FONCTIONS
. $PATHROOT/../lib/functions.sh
#RECUPERATION DES PROPERTIES
. $PATHROOT/../config/config.sh
echo "--------------------------------------------------------------------------------------------------"
echo " CREATION UTILISATEUR LINUX "
echo "--------------------------------------------------------------------------------------------------"
##################################################################################################################
# TEST PRE-REQUIS
# Vérifier que l'utilisateur est root
checkUserRoot
# Vérifie le dossier log
checkLog $PATHDEST_REPLOG $FICLOGNAME
# Vérifier que perl est installé pour la gestion des mots de passe
checkAppli perl
##################################################################################################################
# MISE EN PLACE DES VARIABLES
USER_NOM_COMPLET="$USER_PRENOM $USER_NOM"
USER_LOGIN=`getLoginName $USER_NOM $USER_PRENOM`
CREA_USER=$USER_LOGIN
CREA_GROUP=$USER_LOGIN
#DOSSIER
DIR_HOME=/home/$USER_LOGIN
DIR_MAIL=/var/mail
DIR_HOME_PROJECT=/home/$USER_LOGIN/projects
DIR_TPL=$PATHROOT/tpl
# Affichage des parametres
echo " | ---------------------------------------------"
echo " | - Nom : $USER_NOM"
echo " | - Prénom : $USER_PRENOM"
echo " | - Nom Complet : $USER_NOM_COMPLET"
echo " | - Login : $USER_LOGIN"
echo " | - Adresse email : $USER_ADRESSEMAIL"
echo " | - N° de bureau : $USER_NUM_BUREAU"
echo " | - Téléphone professionnel : $USER_TEL_PRO"
echo " | - Téléphone personnel : $USER_TEL_PERSO"
echo " | - Autre : $USER_AUTRE"
echo " | ---------------------------------------------"
# Deplacement dans le dossier personnel de l'utilisateur
cd ~
#Ajout du groupe utilisateur
#echo " | - AJOUT GROUP $CREA_GROUP"
#if ! getent group "$CREA_GROUP" > /dev/null 2>&1 ; then
# addgroup --system "$CREA_GROUP" --quiet
#fi
#Génération du mot de passe
USER_PASSWD=`getPasswd`
#Ajout de l utilisateur
###########################################################
# useradd [-c commentaire] [-d rép_perso]
# [-e date_expiration] [-f temps_inactivité]
# [-g groupe_initial] [-G groupe[,...]]
# [-m [-k rép_squelette] | -M] [-p mot_de_passe crypté]
# [-s shell] [-u uid [ -o]] [-n] [-r] login
# useradd -D [-g groupe_défaut] [-b rép_perso_défaut]
# [-f inactivité] [-e date_expiration_défaut]
# [-s shell_défaut]
###########################################################
egrep "^$CREA_USER" /etc/passwd >/dev/null
if [ $? -eq 0 ]; then
echo "[ERREUR] $CREA_USER exists!"
exit 1
else
useradd -m -p $(openssl passwd -1 $USER_PASSWD) -s /bin/bash -c "$USER_NOM_COMPLET,$USER_NUM_BUREAU,$USER_TEL_PRO,$USER_TEL_PERSO,$USER_AUTRE" -U $CREA_USER
if [ $? -eq 0 ]; then
echo -e "\033[32m[USER-ADD]\033[0m $CREA_USER a été ajouté au system!"
else
echo -e "\033[31m[ERREUR]\033[0m Impossible d'ajouter l'utilisateur $CREA_USER!"
exit 1
fi
fi
#Création de l'environnement
echo " | - CREATION ENVIRONNEMENT DOSSIER"
mkdir -p "$DIR_HOME" "$DIR_HOME_PROJECT" && chown "$CREA_USER":"$CREA_GROUP" "$DIR_HOME" "$DIR_HOME_PROJECT"
echo " | - CREATION ENVIRONNEMENT FICHIER"
#Création du fichier de bienvenue
touch "$DIR_HOME/Bienvenue_$CREA_USER" && chown "$CREA_USER":"$CREA_GROUP" "$DIR_HOME/Bienvenue_$CREA_USER"
#Création du fichier mail
echo " | | - CREATION FICHIER MAIL"
touch "$DIR_MAIL/$CREA_USER" && chown "$CREA_USER":"$CREA_GROUP" "$DIR_MAIL/$CREA_USER"
#Création du fichier .vimrc
echo " | | - CREATION FICHIER VIMRC"
getTplFic ".vimrc" "$CREA_USER" "$CREA_GROUP" "$DIR_HOME" "$DIR_TPL"
#Création du fichier .bash_aliases
echo " | | - CREATION FICHIER BASH_ALIASES"
getTplFic ".bash_aliases" "$CREA_USER" "$CREA_GROUP" "$DIR_HOME" "$DIR_TPL"
#Création du fichier .gitconfig
echo " | | - CREATION FICHIER GITCONFIG"
getTplFic ".gitconfig" "$CREA_USER" "$CREA_GROUP" "$DIR_HOME" "$DIR_TPL"
#Ajout du name et de l adresse mail git
echo " name = $CREA_USER" >> "$DIR_HOME/.gitconfig"
echo " email = $USER_ADRESSEMAIL" >> "$DIR_HOME/.gitconfig"
#creation des cle ssh
echo " | | - CREATION CLE SSH"
checkPathDst "$DIR_HOME/.ssh/" "Chemin Cle SSH"
chown -R "$CREA_USER:$CREA_USER" "$DIR_HOME"
ssh-keygen -C "$USER_ADRESSEMAIL" -t rsa -f "$DIR_HOME/.ssh/id_rsa" -q -N ""
chown -R "$CREA_USER:$CREA_USER" "$DIR_HOME"
#su -l "$CREA_USER" -c 'ssh-keygen -C "$USER_ADRESSEMAIL" -t rsa -f "$DIR_HOME/.ssh/id_rsa" -q -N ""'
# RECAP INFORMATION
echo " | ---------------------------------------------"
echo " | - USER : $CREA_USER"
echo " | - PASSWD : $USER_PASSWD"
echo " | - GROUP : $CREA_GROUP"
echo " | - Dossier utilisateur : $DIR_HOME"
echo " | - Dossier Git : $DIR_HOME_PROJECT"
echo " | - Cle SSH : $DIR_HOME/.ssh/"
echo " | ---------------------------------------------"
cat $DIR_HOME/.ssh/id_rsa.pub
echo " | ---------------------------------------------"
echo " | "
echo " FIN"
|
Acisia/SHELL-UNIX
|
CREA_USER/crea_user.sh
|
Shell
|
gpl-3.0
| 5,783 |
#!/bin/bash
#Author Niek Bats
#$1 wave
#$2 experiment
#$3 pseudocode
#lists all files, when found any grp-intake-folder using specified parameter(s)
#input check and build query
if [[ "$1" != "" ]] #if no wave dont do anything
then
query="like '%/grp-intake-%' AND DATA_PATH like '%$1%'"
if [[ "$2" != "" ]]
then
query="$query AND DATA_PATH like '%$2%'"
if [[ "$3" != "" ]]
then
query="$query AND DATA_PATH like '%$3%'"
fi
elif [[ "$3" != "" ]]
then
exit 1
fi
echo $query
#icommand format query is in printf format
output=$(iquest ""%s";%s" "SELECT DATA_PATH, DATA_SIZE WHERE DATA_PATH $query")
#echo $output
printf ""Filepath/name";"filesize"\n" > outputIntake.csv
printf "$output" >> outputIntake.csv
fi
|
UtrechtUniversity/irods-ruleset-youth-cohort
|
tools/intakeDataCheck.sh
|
Shell
|
gpl-3.0
| 857 |
#!/usr/bin/env bash
# shellcheck disable=SC2016,SC1091,SC2164
export test_description="Testing pass update."
cd tests
source ./commons
test_export "options"
test_pass_populate
test_expect_success 'Testing updating password with specific length' '
_pass update --force --length=50 France/bank &&
newpasswd="$(pass show France/bank | head -n 1)" &&
[[ "${#newpasswd}" == 50 ]]
'
test_expect_success 'Testing updating password with no symbols' '
_pass update --force --no-symbols Business/site.eu
'
test_expect_success 'Testing updating password with a provided password' '
echo -e "dummypass\ndummypass" | _pass update --force --provide Business/site.com &&
test_password_is Business/site.com dummypass
'
testing_password_notmatching() {
echo -e "pass\ndummypass" | _pass update --force --provide Business/site.com
}
test_expect_success 'Testing passwords not matching' '
test_must_fail testing_password_notmatching
'
test_expect_success 'Testing updating a multiline password' '
echo -e "dummypass\nlogin: dummylogin" | _pass update --force --multiline Business/site.eu &&
test_password_is Business/site.eu dummypass
'
test_expect_success 'Testing updating a password by editing it' "
_pass update --edit Business/site.eu &&
test_password_is Business/site.eu 'correct horse battery staple'
"
if test_have_prereq XCLIP; then
test_expect_success 'Testing updating password with clipboard output' '
_pass update --force --clip Email/donenfeld.com &&
test_password_is_not Email/donenfeld.com $PASSWORD
'
fi
test_done
|
roddhjav/pass-update
|
tests/20_options.sh
|
Shell
|
gpl-3.0
| 1,629 |
#!/bin/sh
# ANSI-Escape Color Strings (some definitions):
col_r='\033[1;31m';
col_g='\033[1;32m';
col_b='\033[1;34m';
col_m='\033[0;35m';
col_c='\033[0;36m';
col_y='\033[1;33m';
col_n='\033[0m';
#output variables (no project-support):
output_file="meterpreter.apk";
p1=$(echo $output_file | cut -f1 -d.);
p2=$(echo $output_file | cut -f2 -d.);
output_signed=$p1"-signed."$p2;
resource_script=$p1".rc";
# parameters ($1/$2 inside of functions, do not edit):
inject_file=$2;
mode=$1;
#check for user-input-issues in loops (do not edit):
flgChkLoop=1;
#debug:
#lhost='172.16.0.2';
#lport='4444';
#payload='android/meterpreter/reverse_tcp';
# some decorations and a banner:
print_banner ( ) {
clear;
echo $col_r" _______ "$col_b" _______ _______ ___ ___ ____________ ";
echo $col_r"| _ |"$col_b" | ___ | | ____| | | | | | | ";
echo $col_r"| | | |"$col_b" | | | | | | | | | | |__ _____| ";
echo $col_r"| |_| |"$col_b" | |___| | | | | | | | | | ";
echo $col_r"| |"$col_b" | ___| | |____ | |___ | | | | ___ ";
echo $col_r"| _ |"$col_b" | | | | | | | | | | | | ";
echo $col_r"|__| |__|"$col_b" |___| |_______| |_______| |___| |___| |___| "$col_g;
echo " APCLIT - Android Payload Creation & LAME Injection Toolkit ";
echo " (c) 2016 - 2020 by fugitivus $col_y([email protected]) \n\n"$col_n;
}
print_ok ( ) {
echo -n $col_g"[+] "$col_n;
}
print_nok ( ) {
echo -n $col_r"[!] "$col_n;
}
print_step ( ) {
echo -n $col_y"[*] "$col_n;
}
print_input ( ) {
echo -n $col_b"[?] "$col_n;
}
# check if lhost-variable is like an ip-address:
validiate_lhost_ip ( ) {
if expr "$lhost" : '[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$' >/dev/null; then
for i in 1 2 3 4; do
if [ $(echo "$lhost" | cut -d. -f$i) -gt 255 ]; then
print_nok;
echo "$lhost is not an ip-address..."
fi
done
print_ok;
echo "LHOST --> $lhost successfully set..."
flgChkLoop=0;
else
print_nok;
echo "$lhost is not an ip-address..."
fi
}
# select payload type used for msfvenom:
set_payload_android ( ) {
print_step;
echo $col_y"Select Payload Type:"$col_n;
echo "[1] android/meterpreter/reverse_tcp";
echo "[2] android/meterpreter/reverse_http";
echo "[3] android/meterpreter/reverse_https";
echo "[4] android/shell/reverse_tcp";
echo "[5] android/shell/reverse_http";
echo "[6] android/shell/reverse_https";
while [ $flgChkLoop -eq 1 ] ; do
print_input;
# ansi-escape-sequenze [cursor-line-up]:
read -p "PAYLOAD --> " sel;
echo -en "\033[1A";
flgChkLoop=0;
case $sel in
1) payload="android/meterpreter/reverse_tcp" ;;
2) payload="android/meterpreter/reverse_http" ;;
3) payload="android/meterpreter/reverse_https" ;;
4) payload="android/shell/reverse_tcp" ;;
5) payload="android/shell/reverse_http" ;;
6) payload="android/shell/reverse_https" ;;
*) flgChkLoop=1 ;;
esac
if [ $flgChkLoop -eq 1 ]
then
print_nok;
echo "PAYLOAD out of Range, choose between 1-6...";
fi
done;
print_ok;
echo "PAYLOAD = "$payload;
flgChkLoop=1;
}
# set lhost variable used for msfvenom:
set_lhost ( ) {
print_step;
echo $col_y"Select HOST-Listener-IP-Address..."$col_n;
while [ $flgChkLoop -eq 1 ] ; do
print_input;
read -p "LHOST --> " lhost;
validiate_lhost_ip;
done;
flgChkLoop=1;
}
# set lport variable used for msfvenom:
set_lport ( ) {
print_step;
echo $col_y"Select HOST-Listener-Port..."$col_n;
while [ $flgChkLoop -eq 1 ] ; do
print_input;
read -p "LPORT --> " lport;
# check if input is realy a number:
if expr "$lport" : '^[0-9][0-9]*$' > /dev/null;
then
dummy=$lport;
else
lport=65536;
fi
# check if port is insede of range:
if [ $lport -lt 1 -o $lport -gt 65535 ]
then
print_nok;
echo "LPORT is out of range (have to be between 1-65536)";
else
print_ok;
echo "LPORT --> $lport successfully set..."
flgChkLoop=0;
fi
done;
flgChkLoop=1;
}
create_payload ( ) {
print_step;
echo $col_y"Generating Android-APP with selected payload..."$col_n;
#print_step;
#echo "exec: msfvenom -p $payload LHOST=$lhost LPORT=$lport -o meterpreter.apk";
msfvenom -p $payload LHOST=$lhost LPORT=$lport -o output/$output_file 2> $PWD/chk.tmp;
# check if payload successfully created:
chkOK=$(cat chk.tmp | grep "Payload size:");
if [ -z "$chkOK" ] ; then
print_nok;
echo "Payload couldn't be created check $PWD/error.log for details...";
cp $PWD/chk.tmp $PWD/error.log 2> /dev/null;
rm $PWD/chk.tmp 2> /dev/null;
exit 1;
else
print_ok;
echo "Payload successfully created...";
print_ok;
echo "Unsigned Payload:";
print_ok;
echo "$PWD/output/$output_file";
fi
}
decompile_payload ( ) {
print_step;
echo $col_y"Decompiling generated payload..."$col_n;
apktool d -f -o $PWD/output/payload $PWD/output/$output_file >> $PWD/chk.tmp;
# check if was successfull:
if [ -d "$PWD/output/payload" ];then
print_ok;
echo "Payload successfull decompiled..."$col_n;
else
print_nak;
echo "ERROR: look at $PWD/error.log for more details...";
cp $PWD/chk.tmp $PWD/error.log;
exit 1;
fi
}
decompile_inject_file ( ) {
print_step;
echo $col_y"Decompiling $inject_file ..."$col_n;
apktool d -f -o $PWD/output/$(echo $inject_file | cut -f1 -d.) $inject_file >> $PWD/chk.tmp;
# check if was successfull:
if [ -d "$PWD/output/$(echo $inject_file | cut -f1 -d.)" ];then
print_ok;
echo "$inject_file successfull decompiled..."$col_n;
else
print_nok;
echo "ERROR: look at $PWD/error.log for more details...";
cp $PWD/chk.tmp $PWD/error.log;
exit 1;
fi
}
recompile_payload ( ) {
print_step;
echo $col_y"Recompiling Payload..."$col_n;
apktool b $PWD/output/payload -o $PWD/output/payload.apk >> $PWD/chk.tmp;
# check if was successfull:
if [ -f "$PWD/output/payload.apk" ];then
print_ok;
echo "Payload successfull recompiled...";
else
print_nok;
echo "ERROR: look at $PWD/error.log for more details...";
cp $PWD/chk.tmp $PWD/error.log;
exit 1;
fi
}
recompile_inject_file ( ) {
print_step;
echo $col_y"Recompiling $inject_file ..."$col_n;
apktool b $PWD/output/$(echo $inject_file | cut -f1 -d.) -o $PWD/output/$inject_file >> $PWD/chk.tmp;
# check if was successfull:
if [ -f "$PWD/output/$inject_file" ];then
print_ok;
echo "$inject_file successfull recompiled...";
else
print_nok;
echo "ERROR: look at $PWD/error.log for more details...";
cp $PWD/chk.tmp $PWD/error.log;
exit 1;
fi
}
generate_keystore ( ) {
rm $PWD/cert/apclit.keystore 2> /dev/null;
echo $col_r"Generating a new Android Keystore for apclit:\n";
echo $col_g;
keytool -genkeypair -v -keystore cert/apclit.keystore -storepass android -keypass android\
-keyalg RSA -keysize 2048 -validity 100000 -alias app;
echo $col_n;
}
sign_payload ( ) {
print_step;
echo $col_y"Signing $output_file with aplcit-keystore..."$col_n;
cp $PWD/output/$output_file $PWD/output/$output_signed;
jarsigner -tsa http://timestamp.digicert.com -sigalg SHA1withRSA -digestalg SHA1\
-keystore cert/apclit.keystore -storepass android -keypass android $PWD/output/$output_signed app\
> $PWD/chk.sng;
# check if payload successfully signed:
chkOK=$(cat chk.sng | grep "jar signed.");
if [ -n "$chkOK" ] ; then
print_ok;
echo "Payload successfully signed...";
print_ok;
echo "Signed Payload:";
print_ok;
echo "$PWD/output/$output_signed";
else
print_nok;
echo "Payload couldn't be created check $PWD/error.log for details...";
cp $PWD/chk.tmp $PWD/error.log 2> /dev/null;
rm $PWD/chk.tmp 2> /dev/null;
exit 1;
fi
}
sign_inject_file ( ) {
print_step;
echo $col_y"Signing recompiled $inject_file with aplcit-keystore..."$col_n;
cp $PWD/output/$inject_file $PWD/output/$(echo $inject_file | cut -f1 -d.)"-signed.apk";
jarsigner -tsa http://timestamp.digicert.com -sigalg SHA1withRSA -digestalg SHA1\
-keystore cert/apclit.keystore -storepass android -keypass android $PWD/output/$(echo $inject_file | cut -f1 -d.)"-signed.apk" app\
> $PWD/chk.sng;
# check if payload successfully signed:
chkOK=$(cat chk.sng | grep "jar signed.");
echo $chkOK >> apclit.log;
if [ -z "$chkOK" ] ; then
print_nok;
echo "$PWD/output/$(echo $inject_file | cut -f1 -d.)"-signed.apk" couldn't be created check $PWD/error.log for details...";
cp $PWD/chk.tmp $PWD/error.log 2> /dev/null;
rm $PWD/chk.tmp 2> /dev/null;
exit 1;
else
print_ok;
echo "$inject_file successfully signed...";
print_ok;
echo "Signed Payload:";
print_ok;
echo "$PWD/output/$(echo $inject_file | cut -f1 -d.)"-signed.apk"";
fi
}
sign_recompiled_payload ( ) {
print_step;
echo $col_y"Signing apk-file with aplcit-keystore..."$col_n;
cp $PWD/output/payload.apk $PWD/output/payload-signed.apk;
jarsigner -tsa http://timestamp.digicert.com -sigalg SHA1withRSA -digestalg SHA1\
-keystore cert/apclit.keystore -storepass android -keypass android $PWD/output/payload-signed.apk app\
> $PWD/chk.sng;
# check if payload successfully signed:
chkOK=$(cat chk.sng | grep "jar signed.");
echo $chkOK >> $PWD/apclit.log;
if [ -z "$chkOK" ] ; then
print_nok;
echo "Payload couldn't be created check $PWD/error.log for details...";
cp $PWD/chk.tmp $PWD/error.log 2> /dev/null;
rm $PWD/chk.tmp 2> /dev/null;
exit 1;
else
print_ok;
echo "Payload successfully signed...";
print_ok;
echo "Signed Payload:";
print_ok;
echo "$PWD/output/$output_signed";
fi
}
inject_payload ( ) {
print_step;
echo $col_y"Injecting $inject_file with selected Payload..."$col_n;
#Manual-Way-outdated but 4 some apps usefull:
#1 copy *.smali from payload to inject decompiled app dir
#2 insert startup hook in main acitivity
#3 add permissions
#4 recompile
#5 sign
#;???onCreate(Landroid/os/Bundle;)V
#invoke-static {p0}, Lcom/metasploit/stage/Payload;->start(Landroid/content/Context;)V
#f1="$PWD/output/payload/smali/*";
#f2="$PWD/output/$(echo $inject_file | cut -f1 -d.)/smali/";
#cp -r $f1 $f2;
#invoke-static {p0}, Lcom/metasploit/stage/Payload;->start(Landroid/content/Context;)V
tmp=$(echo $inject_file | cut -f1 -d.)"-injected.apk"
msfvenom -x $inject_file -p $payload --arch dalvik --platform Android LHOST=$lhost LPORT=$lport -o $tmp;
mv $tmp $PWD/output/
# name the resource file to injection file.rc:
resource_script=$(echo $inject_file | cut -f1 -d. | cut -f2 -d/ )"-injected.rc";
echo "Resource-Script:"$resource_script;
}
generate_rc_script ( ) {
print_step;
echo $col_y"Generating a Metasploit-Resource-Script..."$col_n;
echo "use exploit/multi/handler" > $PWD/output/$resource_script;
echo "set PAYLOAD "$payload >> $PWD/output/$resource_script;
echo "set LHOST "$lhost >> $PWD/output/$resource_script;
echo "set LPORT "$lport >> $PWD/output/$resource_script;
echo "set ExitOnSession false" >> $PWD/output/$resource_script;
echo "exploit -j -z\n" >> $PWD/output/$resource_script;
print_ok;
echo "Metasploit-Resource-Script:";
print_ok;
echo "$PWD/output/$resource_script";
print_ok;
echo "use with:";
print_ok;
echo "msfconsole -r $PWD/output/$resource_script";
}
verify_sign ( ) {
jarsigner -verify -verbose;
}
cleanup ( ) {
cp $PWD/chk.tmp $PWD/apclit.log 2> /dev/null;
rm $PWD/error.log 2> /dev/null;
rm $PWD/chk.tmp 2> /dev/null;
rm $PWD/chk.sng 2> /dev/null;
}
mode_install_dependencies ( ) {
#apt-get install lib32stdc++6 lib32ncurses5 lib32z1
apt-get install android-sdk metasploit-framework zipalign default-jdk apktool
}
## Main Activity:
# normal payload generation to a standalone *.apk, no injecion...
mode_standalone ( ) {
print_banner;
set_payload_android;
set_lhost;
set_lport;
create_payload;
sign_payload;
generate_rc_script;
cleanup;
}
# generate and inject payload into another *.apk...
mode_inject ( ) {
print_banner;
# check if file to inject exists:
if [ -n "$inject_file" ];then
if [ -f "$inject_file" ];then
#echo "$inject_file exists."
dummy="1";
else
print_nak;
echo "ERROR: $inject_file does not exist..."
exit 1;
fi
else
print_help;
exit 1;
fi
set_payload_android;
set_lhost;
set_lport;
#create_payload;
#decompile_payload;
#decompile_inject_file;
inject_payload;
#recompile_payload;
#recompile_inject_file;
#sign_payload;
#sign_inject_file;
#sign_recompiled_payload;
generate_rc_script;
cleanup;
}
# create a new android keystore (sign apps with custom cert)...
mode_create_keystore ( ) {
print_banner;
generate_keystore;
}
# install dependencies...
mode_setup ( ) {
print_banner;
apclit_setup;
}
start_web_server ( ) {
fIndex=30;
wString="";
templateFile="script/web-template.html";
indexFile="output/index.html";
apclitSrv="output/apclitSrv.sh";
apclitSrvPort="80";
print_step;
echo "Generating index.html from web-template...";
cp $templateFile $indexFile;
#adding output files to web template:
for file in "output/"*.apk ; do
print_step;
echo "Add file: "$file" to index.html";
#echo $file;
fileTmp=$(echo $file | cut -d/ -f2 );
#echo $fileTmp;
wString="<a href=\"$fileTmp\">$fileTmp</a><br>\n";
sed -i -e "$fIndex"c"$wString" $indexFile;
fIndex=`expr $fIndex + 1`;
done
#generating SimpleHttpServer-Script:
print_step;
echo "Starting Apclit-Web-Server on Port: "$apclitSrvPort;
echo "python -m SimpleHTTPServer "$apclitSrvPort > $apclitSrv;
#net ready yet...
cd output
sh apclitSrv.sh &
}
stop_web_server ( ) {
print_step;
echo "Stopping APCLIT-Web-Server..."
kill $(ps x |grep SimpleHTTPServer | cut -d? -f1) 2> /dev/null;
}
restart_web_server ( ) {
stop_web_server
start_web_server
}
#print help banner & show usage:
print_help ( ) {
print_banner;
echo $col_r'APCLIT usage:';
echo $col_r'apclit <parameter> <[optional]file/projectname>';
echo $col_b'\nParameters are:\n';
echo $col_n' --help --> show this help screen.';
echo $col_n' --create --> create standalone android-payload (*.apk)';
echo $col_n' --inject --> inject payload into exsisting (*.apk)';
echo $col_n' --inject-manual --> inject payload manual (testing only)';
echo $col_n' --create-keystore --> create new android-keystore for signing (*.apk)';
echo $col_n' --check-dependencies --> check if all dependencies are installed.';
echo $col_n' --install-dependencies --> install all dependencies (kali-linux-only)';
echo $col_n' --start-web-share --> start web-server 4 sharing (*.apk)';
echo $col_n' --stop-web-share --> stop web-server 4 sharing (*.apk)';
echo $col_n' --restart-web-share --> restart web-server 4 sharing (*.apk)';
echo '\n';
}
# check for parameters and choose mode:
case $mode in
"--help") print_help ;;
"--create") mode_standalone ;;
"--inject") mode_inject ;;
"--inject-manual") echo "inject manual n/a..." ;;
"--create-keystore") mode_create_keystore ;;
"--check-dependencies") echo "check-dependencies n/a...";;
"--install-dependencies") mode_install_dependencies ;;
"--start-web-share") start_web_server ;;
"--stop-web-share") stop_web_server ;;
"--restart-web-share") restart_web_server ;;
*) print_help ;;
esac
#[END]
|
fugitivus/apclit
|
script/apclit.sh
|
Shell
|
gpl-3.0
| 15,784 |
#!/bin/bash
for filename in $(ls)
do
ext=${filename##*\.}
case "$ext" in
c) echo "$filename : C source file"
;;
o) echo "$filename :Object file"
;;
sh) echo "$filename : Shell script"
;;
txt) echo "$filename : Text file"
;;
*) echo "$filename : Unknown file type"
;;
esac
done
|
pilotAlpal/fdi_so
|
ASOR/scripts/fileExtensions.sh
|
Shell
|
gpl-3.0
| 294 |
#!/bin/sh
echo This file recreates localization_data.h according resource.h
echo
# check that sed is available
type -P sed &>/dev/null || { echo "sed command not found. Aborting." >&2; exit 1; }
# Create the first sed command file
cat > cmd.sed <<\_EOF
# Insert header
1i /*\
* Rufus: The Reliable USB Formatting Utility\
* Localization tables - autogenerated from resource.h\
* Copyright © 2013-2022 Pete Batard <[email protected]>\
*\
* This program is free software: you can redistribute it and/or modify\
* it under the terms of the GNU General Public License as published by\
* the Free Software Foundation, either version 3 of the License, or\
* (at your option) any later version.\
*\
* This program is distributed in the hope that it will be useful,\
* but WITHOUT ANY WARRANTY; without even the implied warranty of\
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\
* GNU General Public License for more details.\
*\
* You should have received a copy of the GNU General Public License\
* along with this program. If not, see <http://www.gnu.org/licenses/>.\
*/\
\
#include <windows.h>\
#include "resource.h"\
#include "localization.h"\
\
#define LOC_CTRL(x) { #x, x }\
#define LOC_DLG(x) { x, NULL, {NULL, NULL} }\
\
// Control IDs\
const loc_control_id control_id[] = {\
// The dialog IDs must come first
# Add the control entries - must be in IDD_, IDC_, IDS_ or MSG_ (and not contain _XP or _RTL suffix)
s/^.* IDD_.*_RTL .*//
s/^.* IDD_.*_XP .*//
s/^#define \([I|M][D|S][D|C|S|G]_[^ ]*\) .*/\ LOC_CTRL(\1),/
# Add standard IDs from windows.h and close table
$a\
LOC_CTRL(IDOK),\
LOC_CTRL(IDCANCEL),\
LOC_CTRL(IDABORT),\
LOC_CTRL(IDRETRY),\
LOC_CTRL(IDIGNORE),\
LOC_CTRL(IDYES),\
LOC_CTRL(IDNO),\
LOC_CTRL(IDCLOSE),\
LOC_CTRL(IDHELP),\
\};\
# Remove everything else
/^[#|\/]/d
/^$/d
_EOF
# Run first part
sed -f cmd.sed resource.h > localization_data.h
# Create the second sed command file
cat > cmd.sed <<\_EOF
# Insert dialog table header
1i // Dialog data\
loc_dlg_list loc_dlg[] = {
# Add the dialog entries - must start with IDD_
s/^.* IDD_.*_RTL .*//
s/^.* IDD_.*_XP .*//
s/^#define \(IDD_[^ ]*\) .*/\ LOC_DLG(\1),/
# Close the table
$a\
};
# Remove everything else
/^[#|\/]/d
/^$/d
_EOF
# Run second part
sed -f cmd.sed resource.h >> localization_data.h
rm cmd.sed
echo Done.
|
pbatard/rufus
|
src/localization_data.sh
|
Shell
|
gpl-3.0
| 2,350 |
#!/bin/bash
cp -R -f $SRC_DIR $PREFIX/MIST
|
Jacob-Barhak/MIST
|
InstConda/build.sh
|
Shell
|
gpl-3.0
| 42 |
#!/bin/bash
python Run.py --model MG94 --paralog1 YLR406C --paralog2 YDL075W --force --no-clock
|
xjw1001001/IGCexpansion
|
ForceOmega/ShFiles/YLR406C_YDL075W_MG94_nonclock_force_omega.sh
|
Shell
|
gpl-3.0
| 96 |
#!/bin/bash
# bash x.sh NC_027250.1
if [ $# -eq 0 ]; then
echo "bash x.sh <ncbi id>"
exit
fi
NCBIID=$1
esearch -db nucleotide -query $NCBIID | efetch -format fasta > $NCBIID.fa
|
goshng/swu-seq
|
src/sh/fetch-ncbi-sequence.sh
|
Shell
|
gpl-3.0
| 186 |
#!/bin/bash
SUPPORT_EMAIL= ## To whom the email should be sent
TMP_HTML_LOC= ## Provide the path where you want to store the tmp html files.
THRESHOLD_TIME= ## Set the time in seconds, after which if any impala job found running will be considered as runaway job
for i in $(cat node_list.txt)
do
count=0;
curl -s --connect-timeout 3 http://$i:25000/queries > $TMP_HTML_LOC/impala_queries.html
for j in $(grep -A 3 RUNNING $TMP_HTML_LOC/impala_queries.html | grep Details | cut -d= -f3 | sed "s/'>.*//")
do
w3m -dump_both "http://$i:25000/query_profile?query_id=$j" > $TMP_HTML_LOC/impala_query_details.html
user=$(grep "User:" $TMP_HTML_LOC/impala_query_details.html | cut -d: -f2 | head -1)
query_status=$(grep "Query Status" $TMP_HTML_LOC/impala_query_details.html | cut -d: -f2 )
durat=$(grep "Duration:" $TMP_HTML_LOC/impala_query_details.html | cut -d: -f2 )
start_time=$(grep "Start Time" $TMP_HTML_LOC/impala_query_details.html | awk '{print $4}' | cut -d'.' -f1)
start_time_stamp=$(date -d $start_time +%s)
current_time_stamp=$(date +%s)
difference=$(( $current_time_stamp - $start_time_stamp ))
if [ $difference -gt $THRESHOLD_TIME ]
then
echo -e "The impala query submitted by user $user is running for $(expr $difference / 60 ):$(expr $difference % 60)). Please check it. \n $(sed -n '/Sql Statement/,/Coordinator/p' /data/scripts/impala_query_details.html | grep -v Coordinator)" | mail -s "Warning: $user Impala query running for more than $(expr $THRESHOLD_TIME / 60 ) mins" -r $SUPPORT_EMAIL "$SUPPORT_EMAIL"
fi
done
done
|
padmakumar05/hadoop-monitoring
|
impala_mon.sh
|
Shell
|
gpl-3.0
| 1,781 |
#!/bin/sh
QMAKE_BIN=
if [ -x "$(command -v qmake)" ]; then
QMAKE_BIN=qmake
elif [ -x "$(command -v qmake-qt5)" ]; then
QMAKE_BIN=qmake-qt5
fi
if [ -z "$QMAKE_BIN" ]; then
echo "error: Cannot find: qmake (or qmake-qt5)"
echo "\tPlease ensure that Qt5 is installed."
exit 1
fi
# Create the build directory
if [ ! -d "build" ]; then
mkdir build
fi
cd build
# Compile libQGLViewer (using qmake)
if [ -n "${TRAVIS}" ]; then
echo "travis_fold:start:qmake.QGLViewer.pro"
fi
echo "Compile libQGLViewer"
if [ ! -d "libQGLViewer" ]; then
mkdir libQGLViewer
fi
cd ..
cd 3rdparty/libQGLViewer/QGLViewer
$QMAKE_BIN PREFIX=$(pwd)/../../../build/libQGLViewer/installed/ QGLViewer.pro
make && make install
cd ../../..
if [ -n "${TRAVIS}" ]; then
echo "travis_fold:end:qmake.QGLViewer.pro"
fi
cd build
export QGLVIEWERROOT=$(pwd)/libQGLViewer/installed/
# CMake configure
echo "CMake configure"
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ../
cmakeConfigureResult=${?}
cd .. > /dev/null
if [ $cmakeConfigureResult -ne 0 ]; then
echo "error: cmake configure failed"
exit $cmakeConfigureResult
fi
|
Warzone2100/WMIT
|
configure_linux.sh
|
Shell
|
gpl-3.0
| 1,092 |
#!/bin/bash
test_info()
{
cat <<EOF
Verify that 'ctdb getdebug' works as expected.
Prerequisites:
* An active CTDB cluster with at least 2 active nodes.
Steps:
1. Verify that the status on all of the ctdb nodes is 'OK'.
2. Get the current debug level on a node, using 'ctdb getdebug -n <node>'.
3. Verify that colon-separated output is generated with the -Y option.
4. Verify that the '-n all' option shows the debug level on all nodes.
Expected results:
* 'ctdb getdebug' shows the debug level on all the nodes.
EOF
}
. ctdb_test_functions.bash
ctdb_test_init "$@"
set -e
cluster_is_healthy
try_command_on_node 0 "$CTDB listnodes | wc -l"
num_nodes="$out"
try_command_on_node -v 1 "onnode -q all $CTDB getdebug"
getdebug_onnode="$out"
sanity_check_output \
$num_nodes \
'^Node [[:digit:]]+ is at debug level [[:alpha:]]+ \([[:digit:]]+\)$' \
"$out"
try_command_on_node -v 1 "$CTDB getdebug -n all"
getdebug_all="$out"
cmd=""
n=0
while [ $n -lt $num_nodes ] ; do
cmd="${cmd}${cmd:+; }$CTDB getdebug -n $n"
n=$(($n + 1))
done
try_command_on_node -v 1 "$cmd"
getdebug_n="$out"
if [ "$getdebug_onnode" = "$getdebug_all" -a \
"$getdebug_all" = "$getdebug_n" ] ; then
echo "They're the same... cool!"
else
echo "Error: they differ."
testfailures=1
fi
colons=""
nl="
"
while read line ; do
t=$(echo "$line" | sed -r -e 's@Node [[:digit:]]+ is at debug level ([[:alpha:]]+) \((-?[[:digit:]]+)\)$@:\1:\2:@')
colons="${colons}${colons:+${nl}}:Name:Level:${nl}${t}"
done <<<"$getdebug_onnode"
cmd="$CTDB -Y getdebug -n all"
echo "Checking that \"$cmd\" produces expected output..."
try_command_on_node 1 "$cmd"
if [ "$out" = "$colons" ] ; then
echo "Yep, looks good!"
else
echo "Nope, it looks like this:"
echo "$out"
testfailures=1
fi
|
wolfmuel/ctdb
|
tests/simple/12_ctdb_getdebug.sh
|
Shell
|
gpl-3.0
| 1,810 |
#!/bin/bash
DIR="$(dirname "${0}")"
docker stop $(docker ps -a -q);
docker rm $(docker ps -a -q);
for i in {1..15};
# Left-pad with zero to two digits and
# call the container instantiating script.
do
ii="0${i}";
"${DIR}/run_one.sh" "${ii: -2}";
done
|
maxthomax/UsingTLSLab
|
start_containers.sh
|
Shell
|
gpl-3.0
| 262 |
#!/usr/bin/env bash
echo "This script needs SSH keys set up first"
if [ ! -d ~/.bootstrap ]; then
mkdir ~/.bootstrap
fi
pushd ~/.bootstrap
# install cli xcode tools
if [ ! -x /usr/bin/gcc ]; then
xcode-select --install
fi
# only install brew if we need to (https://brew.sh)
if [ ! $(which brew) ]; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
curl -fsSL https://raw.githubusercontent.com/jordanlabs/bootstrap/master/Brewfile -o Brewfile
# Brewfile is in the same directory as the bootstrap.sh script
brew update && brew bundle
# Gem installs
gem install rake rspec
# setup dotfiles
if [ ! -d ~/.dotfiles ]; then
git clone [email protected]:hexaddikt/dotfiles.git ~/.dotfiles
if [ $? -eq 0 ]; then
pushd ~/.dotfiles
rake install
popd
fi
fi
if [ ! -f ~/.vim/autoload/plug.vim ]; then
# install vim-plug
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
vim +PlugInstall +qall
fi
popd
echo "----------- DONE -----------"
|
jordanlabs/bootstrap
|
bootstrap.sh
|
Shell
|
gpl-3.0
| 1,130 |
#!/bin/bash
#
# perf_test.sh
#
# This file is part of the reversi program
# http://github.com/rcrr/reversi
#
# Author Roberto Corradini mailto:[email protected]
# @copyright 2015 Roberto Corradini. All rights reserved.
#
# License
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
# or visit the site <http://www.gnu.org/licenses/>.
#
# Runs: time ./perf_test.sh -j 4 --from 1 --to 30
#
TIME_CMD="/usr/bin/time -f 'Running %C, elapsed real time: %e'"
PROGRAM="./build/bin/endgame_solver"
ARG_F="-f db/gpdb-ffo.txt"
ARG_S="-s es"
ARGS="--pv-full-rec --pv-no-print"
MIN_FFO_INDEX=1
MAX_FFO_INDEX=79
launch_when_not_busy()
{
while [ $(jobs -r | wc -l) -ge $CJOBS ]
do
# at least $MAX_JOBS are still running.
sleep 1
done
eval "$@" &
}
is_an_integer()
{
re='^[0-9]+$'
if [[ $1 =~ $re ]] ; then
return 0
else
return 1
fi
}
#
# Concurrent jobs
#
CJOBS=1 # default value
#
# First FFO game index to solve
#
FROM=0 # default value
#
# Last FFO game index to solve
#
TO=0 # default value
# Use > 1 to consume two arguments per pass in the loop (e.g. each argument has a corresponding value to go with it).
# Use > 0 to consume one or more arguments per pass in the loop (e.g. some arguments don't have a corresponding value
# to go with it such as in the --default example).
while [[ $# > 1 ]]
do
key="$1"
case $key in
-j|--concurrent-jobs)
CJOBS="$2"
shift # past argument
;;
-f|--from)
FROM="$2"
shift # past argument
;;
-t|--to)
TO="$2"
shift # past argument
;;
*)
# unknown option
OPT="$1"
echo "$0: option $1 is unknown."
exit 1
;;
esac
shift # past argument or value
done
if [[ -n $1 ]]; then
echo "$0: Command line options are not complete, or have errors."
exit 1
fi
is_an_integer ${CJOBS};
if [ $? -eq 1 ]; then
echo "$0: option -j must have an integer value."
exit 1
fi
if [ $CJOBS -lt 1 ]; then
echo "$0: option -j must be greater than zero."
exit 1
fi
is_an_integer ${FROM};
if [ $? -eq 1 ]; then
echo "$0: option -f must have an integer value."
exit 1
fi
is_an_integer ${TO};
if [ $? -eq 1 ]; then
echo "$0: option -t must have an integer value."
exit 1
fi
if [ $FROM -gt $TO ]; then
echo "$0: option -f cannot be greater than option -t."
exit 1
fi
if [ $FROM -lt $MIN_FFO_INDEX ]; then
echo "$0: option -f out of range. Range is [$MIN_FFO_INDEX..$MAX_FFO_INDEX]."
exit 1
fi
if [ $TO -gt $MAX_FFO_INDEX ]; then
echo "$0: option -t out of range. Range is [$MIN_FFO_INDEX..$MAX_FFO_INDEX]."
exit 1
fi
echo Solving FFO entries from index $FROM to index $TO with $CJOBS concurrents jobs.
COUNTER=$FROM
while [ $COUNTER -le $TO ]; do
COUNTER_AS_STRING="$COUNTER"
COUNTER_AS_STRING_SIZE=${#COUNTER_AS_STRING}
if [ $COUNTER_AS_STRING_SIZE -eq 1 ]; then
COUNTER_AS_STRING="0$COUNTER_AS_STRING"
fi
ARG_Q="-q ffo-$COUNTER_AS_STRING"
ARG_D="-d out/es-pve-ffo-$COUNTER_AS_STRING.dat"
OUT_FILE="out/es-stdout-ffo-$COUNTER_AS_STRING.txt"
launch_when_not_busy $TIME_CMD $PROGRAM $ARG_F $ARG_S $ARG_Q $ARG_D $ARGS > $OUT_FILE
let COUNTER=COUNTER+1
done
wait
|
rcrr/reversi
|
c/perf_test.sh
|
Shell
|
gpl-3.0
| 4,003 |
QUANTHISTLINGPATH=/media/Daten/Projects/svn-googlecode/qlc/src/webapp/quanthistling
psql -c "copy (select * from entry) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/entry.csv
psql -c "copy (select * from annotation) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/annotation.csv
psql -c "copy (select * from dictdata) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/dictdata.csv
psql -c "copy (select * from nondictdata) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/nondictdata.csv
psql -c "copy (select * from book) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/book.csv
psql -c "copy (select * from language_iso) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/language_iso.csv
psql -c "copy (select * from language_bookname) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/language_bookname.csv
psql -c "copy (select * from language_src) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/language_src.csv
psql -c "copy (select * from language_tgt) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/language_tgt.csv
psql -c "copy (select * from component) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/component.csv
psql -c "copy (select * from corpusversion) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/corpusversion.csv
psql -c "copy (select * from wordlist_entry) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/wordlistentry.csv
psql -c "copy (select * from wordlist_annotation) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/wordlistannotation.csv
psql -c "copy (select * from wordlist_concept) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/wordlistconcept.csv
psql -c "copy (select * from wordlistdata) to STDOUT DELIMITER AS E'\t' CSV HEADER;" quanthistling > $QUANTHISTLINGPATH/tmp/csv/wordlistdata.csv
zip -uj $QUANTHISTLINGPATH/quanthistling/public/downloads/csv.zip $QUANTHISTLINGPATH/tmp/csv/*.csv
|
FrankNagel/qlc
|
src/webapp/quanthistling/scripts/exportcsv.sh
|
Shell
|
gpl-3.0
| 2,342 |
#!/bin/sh
wget https://github.com/VespucciProject/Vespucci_dependencies/releases/download/1/Vespucci_dependencies_linux.tar.gz
tar xvf Vespucci_dependencies_linux.tar.gz
sudo apt-add-repository -y ppa:ubuntu-sdk-team
sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test
sudo add-apt-repository -y ppa:beineri/opt-qt58-trusty
sudo apt-get -y update
sudo apt-get -y install g++-4.9 build-essential
sudo apt-get -y install qt58base qt58tools qt58imageformats
sudo apt-get -y install qt58location qt58declarative libsqlite3-dev qt58svg
sudo apt-get -y install mesa-common-dev freeglut3.dev
sudo apt-get -y install libarpack2-dev
sudo apt-get -y install libxml2-dev
sudo apt-get -y install libboost1.55-all-dev
sudo apt-get -y install zlib1g zlib1g-dev
wget https://support.hdfgroup.org/ftp/HDF5/current18/bin/linux-centos6-x86_64-gcc447/hdf5-1.8.18-linux-centos6-x86_64-gcc447-shared.tar.gz
tar xvf hdf5-1.8.18-linux-centos6-x86_64-gcc447-shared.tar.gz
cd hdf5-1.8.18-linux-centos6-x86_64-gcc447-shared
sudo cp include/* /usr/include
sudo cp lib/* /usr/lib
|
VespucciProject/Vespucci
|
getlinuxdeps.sh
|
Shell
|
gpl-3.0
| 1,056 |
#!/bin/bash
set -euo pipefail
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
phpVersions=( "$@" )
if [ ${#phpVersions[@]} -eq 0 ]; then
phpVersions=( php*.*/ )
fi
phpVersions=( "${phpVersions[@]%/}" )
current="$(curl -fsSL 'http://api.wordpress.org/core/version-check/1.7/' | jq -r '.offers[0].current')"
sha1="$(curl -fsSL "https://wordpress.org/wordpress-$current.tar.gz.sha1")"
declare -A variantExtras=(
[apache]='\nRUN a2enmod rewrite expires\n'
[fpm]=''
)
declare -A variantCmds=(
[apache]='apache2-foreground'
[fpm]='php-fpm'
)
travisEnv=
for phpVersion in "${phpVersions[@]}"; do
phpVersionDir="$phpVersion"
phpVersion="${phpVersion#php}"
for variant in apache fpm; do
dir="$phpVersionDir/$variant"
mkdir -p "$dir"
extras="${variantExtras[$variant]}"
cmd="${variantCmds[$variant]}"
(
set -x
sed -r \
-e 's!%%WORDPRESS_VERSION%%!'"$current"'!g' \
-e 's!%%WORDPRESS_SHA1%%!'"$sha1"'!g' \
-e 's!%%PHP_VERSION%%!'"$phpVersion"'!g' \
-e 's!%%VARIANT%%!'"$variant"'!g' \
-e 's!%%VARIANT_EXTRAS%%!'"$extras"'!g' \
-e 's!%%CMD%%!'"$cmd"'!g' \
Dockerfile.template > "$dir/Dockerfile"
cp docker-entrypoint.sh "$dir/docker-entrypoint.sh"
)
travisEnv+='\n - VARIANT='"$dir"
done
done
travis="$(awk -v 'RS=\n\n' '$1 == "env:" { $0 = "env:'"$travisEnv"'" } { printf "%s%s", $0, RS }' .travis.yml)"
echo "$travis" > .travis.yml
|
tgeek77/Dockerfiles
|
wordpress-arm/update.sh
|
Shell
|
gpl-3.0
| 1,395 |
#!/bin/bash
tutorial="DiversificationRate_CharacterDependent_Tutorial"
pdflatex RB_${tutorial}.tex
bibtex RB_${tutorial}
pdflatex RB_${tutorial}.tex
pdflatex RB_${tutorial}.tex
rm RB_${tutorial}.aux
rm RB_${tutorial}.bbl
rm RB_${tutorial}.blg
rm RB_${tutorial}.log
rm RB_${tutorial}.out
|
revbayes/revbayes_tutorial
|
tutorial_TeX/RB_DiversificationRate_CharacterDependent_Tutorial/build.sh
|
Shell
|
gpl-3.0
| 291 |
#!/bin/bash
cd /home/user/Desktop/Projects/xvol++/examples/ProcessMsg
LD_LIBRARY_PATH=../../lib ./ProcessMsg.exe
|
syncmast/xwings
|
examples/ProcessMsg/run.sh
|
Shell
|
gpl-3.0
| 115 |
#!/bin/bash
set -ev
mozdownload --type daily --branch mozilla-central
|
galgeek/firefox-ui-tests
|
.travis/before_script.sh
|
Shell
|
mpl-2.0
| 71 |
#!/bin/bash
#
# template.sh
# standard locals
alias cd='builtin cd'
P="$0"
USAGE="`basename ${P}` [-h(elp)] [-d(ebug)] [-n(no xyz)] [-i [1|2|3|...]"
DBG=:
OPTIONSTRING=hd1i:
RED='\033[0;31m' # red (use with echo -e)
BLUE='\033[1;34m' # blue
GREEN='\033[0;32m' # green
NC='\033[0m' # no color
# specific locals
INST_DIR=`dirname ${P}`
CLI=${INST_DIR}/rs485-api.sh
ONE_AT_A_TIME=
# message & exit if exit num present
usage() { echo -e Usage: $USAGE; [ ! -z "$1" ] && exit $1; }
# process options
while getopts $OPTIONSTRING OPTION
do
case $OPTION in
h) usage 0 ;;
d) DBG=echo ;;
1) ONE_AT_A_TIME="1" ;;
i) ABC="${OPTARG}" ;;
*) usage 1 ;;
esac
done
shift `expr $OPTIND - 1`
# cycle through all relay numbers up to 196, gradually turning them all on
doit() {
for BASE in 0 1 2 3
do
echo; echo BASE ${BASE}...
${CLI} -B ${BASE} -c off all
RELAY_SET=""
for OUTPUT in `seq 64`
do
if [ x$ONE_AT_A_TIME = x ]
then
RELAY_SET="${OUTPUT}"
else
RELAY_SET="${RELAY_SET} ${OUTPUT}"
fi
COMMAND="${CLI} -B ${BASE} -c on ${RELAY_SET}"
echo $COMMAND
$COMMAND
# [ x$ONE_AT_A_TIME = x ] && sleep 1
done
sleep 1
done
}
doit
|
hamishcunningham/fishy-wifi
|
walls/test-it.sh
|
Shell
|
agpl-3.0
| 1,240 |
# TODO(sqs): add disco user
sudo aptitude -y install git-core build-essential
mkdir -p $HOME/src && cd $HOME/src
#################
### FREEQUERY ###
#################
if [ -e $HOME/src/freequery ]
then
cd $HOME/src/freequery && git pull origin master
else
git clone 'git://github.com/sqs/freequery.git' $HOME/src/freequery && \
cd $HOME/src/freequery
fi
make install
#############
### DISCO ###
#############
if [ -e $HOME/src/disco ]
then
cd $HOME/src/disco && git pull origin master
else
git clone 'git://github.com/sqs/disco.git' $HOME/src/disco && \
cd $HOME/src/disco
fi
make && make install && cd contrib/discodex && python setup.py install
#############
### PATHS ###
#############
cp -R $HOME/src/disco/contrib/discodex/ /usr/local/lib/disco/
chown -R disco:disco /usr/local/lib/
mkdir -p /srv/disco /srv/disco/log /srv/disco/run /srv/disco/.ssh
chown -R disco:disco /srv/disco
|
sqs/freequery
|
bin/remote-install.sh
|
Shell
|
agpl-3.0
| 896 |
#!/usr/bin/env bash
# Run simple, full backup for all the relevant OpenEdX databases:
# Edx
# EdxPrivate
# EdxForum
# EdxPiazza
# If used outside of Stanford: change the target disk,
# which at Stanford is /lfs/datastage/1/MySQLBackup/
# Create new directory with name including current date and time:
# The part `echo \`date\` | sed -e 's/[ ]/_/g'`:
# ... \'date\': get date as string like "Fri Jun 20 08:54:42 PDT 2014"
# ... | sed -e 's/[ ]/_/g'` remove spaces within the date, and replace them with underscore
# Result example: "backupEdx_Fri_Jun_20_08:54:42_PDT_2014"
newDir=/lfs/datastage/1/MySQLBackup/backupEdx_`echo \`date\` | sed -e 's/[ ]/_/g'`
#echo $newDir
# The following will ask for sudo PWD, which limits
# automatic run for now. Need to fix this:
sudo mkdir $newDir
# Use mysqlhotcopy to grab one MySQL db at a time:
sudo mysqlhotcopy Edx $newDir # ~3hrs
sudo mysqlhotcopy EdxForum $newDir # instantaneous
sudo mysqlhotcopy EdxPiazza $newDir # instantaneous
sudo mysqlhotcopy EdxPrivate $newDir # ~3min
|
EDUlib/eTracesX
|
Translation_software/edx_to_MOOCdb_piping/import.openedx.apipe/scripts/backupEdx.sh
|
Shell
|
agpl-3.0
| 1,338 |
#!/bin/bash
# The purpose of this program is to initialise a new swarm.
# It also determines the size of the swarm.
rm insects.txt 2> /dev/null
rm -r pupa* 2>/dev/null
# redirect errors to /dev/null
for i in {1..20} # say how many prototype insects you will want
do
echo $i >> insects.txt
mkdir ./pupa$i
cp ./swarmdata.txt ./swarmhistory.txt ./swarminsistence.txt ./swarminput.txt ./swarmoutput.txt ./pupa$i
cd ./pupa$i
ln ../examen ./examen
cd ..
chmod +x ./pupa$i/examen
done
|
KedalionDaimon/larvarum-computationis-examen-iii
|
soloqueen.sh
|
Shell
|
agpl-3.0
| 498 |
P_VERSION=1.007000
PERL_VERSION=5.18.2
PERL_ARCHNAME=arm
P_SRC="Moo-$P_VERSION"
P_TAR="$P_SRC.tar.gz"
P_URL="http://search.cpan.org/CPAN/authors/id/H/HA/HAARG/"
source ../../scripts/env.sh ../../scripts
do_build()
{
if [ -f Build.PL ] ; then
PERL_MM_USE_DEFAULT=1 \
perl Biuld.PL \
--config ar="X_AR" \
--config full_ar="X_AR" \
--config cc="$X_CC" \
--config ccflags="$CFLAGS" \
--config optimize=" " \
--config ld="$X_CC" \
--config lddlflags="-shared $LDFLAGS" \
--config ldflags="$LDFLAGS" \
--destdir ${TARGET_DIR} \
--installdirs vendor \
--install_path lib=/usr/lib/perl5/site_perl/$PERL_VERSION \
--install_path arch=/usr/lib/perl5/site_perl/$PERL_VERSION/$PERL_ARCHNAME \
--install_path bin=/usr/bin \
--install_path script=/usr/bin \
--install_path bindoc=/usr/share/man/man1 \
--install_path libdoc=/usr/share/man/man3
# TODO
#--include_dirs $$(STAGING_DIR)/usr/lib/perl5/$PERL_VERSION/$PERL_ARCHNAME/CORE
else
PERL_MM_USE_DEFAULT=1 \
PERL_AUTOINSTALL=--skipdeps \
perl Makefile.PL \
AR="$X_AR" \
FULL_AR="$X_AR" \
CC="$X_CC" \
CCFLAGS="$CFLAGS" \
OPTIMIZE=" " \
LD="$X_CC" \
LDDLFLAGS="-shared $LDFLAGS" \
LDFLAGS="$LDFLAGS" \
DESTDIR=${TARGET_DIR} \
INSTALLDIRS=vendor \
INSTALLVENDORLIB=/usr/lib/perl5/site_perl/$PERL_VERSION \
INSTALLVENDORARCH=/usr/lib/perl5/site_perl/$PERL_VERSION/$PERL_ARCHNAME \
INSTALLVENDORBIN=/usr/bin \
INSTALLVENDORSCRIPT=/usr/bin \
INSTALLVENDORMAN1DIR=/usr/share/man/man1 \
INSTALLVENDORMAN3DIR=/usr/share/man/man3
fi
make ${X_MAKE_ARGS}
make install
}
#~ do_post_install()
#~ {
#~ true
#~ }
do_commands $@
|
Colibri-Embedded/colibri-linux
|
packages/perl-moo/build.sh
|
Shell
|
lgpl-3.0
| 1,645 |
#!/bin/sh
. /opt/pyrame/ports.sh
if test $# -lt 2
then
echo "usage $0 ki_6487_id voltage_limit"
exit 1
fi
chkpyr2.py localhost $KI_6487_PORT set_voltage_limit_ki_6487 $@
|
sbinet-staging/pyrame
|
ps/cmd_ki_6487/set_voltage_limit.sh
|
Shell
|
lgpl-3.0
| 174 |
[ "x${pV:0:1}" == "x2" ] && return 0
v=0.23.1
add_package -archive scikit-learn-$v.tar.gz \
https://github.com/scikit-learn/scikit-learn/archive/$v.tar.gz
pack_set -s $IS_MODULE
pack_set -install-query $(pack_get -LD)/python$pV/site-packages/sklearn/__init__.py
# Add requirments when creating the module
pack_set -module-requirement numpy \
-module-requirement scipy \
-module-requirement cython
pack_cmd "unset LDFLAGS"
pack_cmd "OMP_NUM_THREADS=$NPROCS $(get_parent_exec) setup.py build ${pNumpyInstallC}"
pack_cmd "OMP_NUM_THREADS=$NPROCS $(get_parent_exec) setup.py install --prefix=$(pack_get -prefix)"
add_test_package sklearn.test
pack_set -module-requirement pandas
pack_cmd "pytest --exe sklearn > $TEST_OUT 2>&1 || echo forced"
pack_store $TEST_OUT
|
zerothi/bash-build
|
python/scikit-learn.bash
|
Shell
|
lgpl-3.0
| 773 |
#!/usr/bin/env sh
./expect.sh test/*
|
ecanuto/expect.sh
|
runtests.sh
|
Shell
|
unlicense
| 37 |
#!/bin/bash
postman_pat='^jdk-([[:digit:]]{1,2})u([[:digit:]]+)-linux-x[[:digit:]]{2}\.tar\.gz'
if [[ $1 =~ $postman_pat ]]; then
JDK_VERISON_NAME=jdk1.${BASH_REMATCH[1]}.0_${BASH_REMATCH[2]}
else
echo '\2'
fi
echo $JDK_VERISON_NAME
file /sbin/init
mkdir -p /usr/local/java
cp -r $1 /usr/local/java
cd /usr/local/java
tar -xvzf $1
## Paste this shit out in the file
echo "
JAVA_HOME=/usr/local/java/"$JDK_VERISON_NAME"
PATH=$PATH:$HOME/bin:$JAVA_HOME/bin
JRE_HOME=/usr/local/java/"$JDK_VERISON_NAME"/jre
PATH=$PATH:$HOME/bin:$JRE_HOME/bin
export JAVA_HOME
export JRE_HOME
export PATH
" >> /etc/profile
### end of shit
update-alternatives --install "/usr/bin/java" "java" "/usr/local/java/"$JDK_VERISON_NAME"/jre/bin/java" 1
update-alternatives --install "/usr/bin/javac" "javac" "/usr/local/java/"$JDK_VERISON_NAME"/bin/javac" 1
update-alternatives --install "/usr/bin/jmc" "jmc" "/usr/local/java/"$JDK_VERISON_NAME"/bin/jmc" 1
update-alternatives --install "/usr/bin/javaws" "javaws" "/usr/local/java/"$JDK_VERISON_NAME"/bin/javaws" 1
update-alternatives --set java /usr/local/java/$JDK_VERISON_NAME/jre/bin/java
update-alternatives --set javac /usr/local/java/$JDK_VERISON_NAME/bin/javac
update-alternatives --set jmc /usr/local/java/$JDK_VERISON_NAME/bin/jmc
update-alternatives --set javaws /usr/local/java/$JDK_VERISON_NAME/bin/javaws
./etc/profile
java -version
javac -version
rm /usr/local/java/$1
|
arumoy/linux-oracle-java-installer
|
install-java.sh
|
Shell
|
unlicense
| 1,433 |
#!/bin/sh
# base16-shell (https://github.com/chriskempson/base16-shell)
# Base16 Shell template by Chris Kempson (http://chriskempson.com)
# Framer scheme by Framer (Maintained by Jesse Hoyos)
export BASE16_THEME=framer
color00="18/18/18" # Base 00 - Black
color01="FD/88/6B" # Base 08 - Red
color02="32/CC/DC" # Base 0B - Green
color03="FE/CB/6E" # Base 0A - Yellow
color04="20/BC/FC" # Base 0D - Blue
color05="BA/8C/FC" # Base 0E - Magenta
color06="AC/DD/FD" # Base 0C - Cyan
color07="D0/D0/D0" # Base 05 - White
color08="74/74/74" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="EE/EE/EE" # Base 07 - Bright White
color16="FC/47/69" # Base 09
color17="B1/5F/4A" # Base 0F
color18="15/15/15" # Base 01
color19="46/46/46" # Base 02
color20="B9/B9/B9" # Base 04
color21="E8/E8/E8" # Base 06
color_foreground="D0/D0/D0" # Base 05
color_background="18/18/18" # Base 00
if [ -n "$TMUX" ]; then
# Tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
put_template() { printf '\033Ptmux;\033\033]4;%d;rgb:%s\033\033\\\033\\' $@; }
put_template_var() { printf '\033Ptmux;\033\033]%d;rgb:%s\033\033\\\033\\' $@; }
put_template_custom() { printf '\033Ptmux;\033\033]%s%s\033\033\\\033\\' $@; }
elif [ "${TERM%%[-.]*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
put_template() { printf '\033P\033]4;%d;rgb:%s\007\033\\' $@; }
put_template_var() { printf '\033P\033]%d;rgb:%s\007\033\\' $@; }
put_template_custom() { printf '\033P\033]%s%s\007\033\\' $@; }
elif [ "${TERM%%-*}" = "linux" ]; then
put_template() { [ $1 -lt 16 ] && printf "\e]P%x%s" $1 $(echo $2 | sed 's/\///g'); }
put_template_var() { true; }
put_template_custom() { true; }
else
put_template() { printf '\033]4;%d;rgb:%s\033\\' $@; }
put_template_var() { printf '\033]%d;rgb:%s\033\\' $@; }
put_template_custom() { printf '\033]%s%s\033\\' $@; }
fi
# 16 color space
put_template 0 $color00
put_template 1 $color01
put_template 2 $color02
put_template 3 $color03
put_template 4 $color04
put_template 5 $color05
put_template 6 $color06
put_template 7 $color07
put_template 8 $color08
put_template 9 $color09
put_template 10 $color10
put_template 11 $color11
put_template 12 $color12
put_template 13 $color13
put_template 14 $color14
put_template 15 $color15
# 256 color space
put_template 16 $color16
put_template 17 $color17
put_template 18 $color18
put_template 19 $color19
put_template 20 $color20
put_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
put_template_custom Pg D0D0D0 # foreground
put_template_custom Ph 181818 # background
put_template_custom Pi D0D0D0 # bold color
put_template_custom Pj 464646 # selection color
put_template_custom Pk D0D0D0 # selected text color
put_template_custom Pl D0D0D0 # cursor
put_template_custom Pm 181818 # cursor text
else
put_template_var 10 $color_foreground
if [ "$BASE16_SHELL_SET_BACKGROUND" != false ]; then
put_template_var 11 $color_background
if [ "${TERM%%-*}" = "rxvt" ]; then
put_template_var 708 $color_background # internal border (rxvt)
fi
fi
put_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset -f put_template
unset -f put_template_var
unset -f put_template_custom
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
|
j-c-m/dotfiles
|
.config/base16-shell/scripts/base16-framer.sh
|
Shell
|
unlicense
| 3,981 |
#-------------------------------------------------------------------------------
# Shows how to build the samples vs. a released binary download of LLVM & Clang.
#
# Assumes the binary release was downloaded from http://llvm.org/releases/
# and untarred in some directory. The actual script code here uses a location
# I use on my machine, but that can be replaced by anything you fancy in the
# BINARY_DIR_PATH variable.
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
#!/bin/bash
set -eu
set -x
BINARY_DIR_PATH=${BINARY_DIR_PATH:-$HOME/llvm/llvm6.0-binaries}
make -j8 \
CXX=$BINARY_DIR_PATH/bin/clang++ \
LLVM_SRC_PATH=$BINARY_DIR_PATH \
LLVM_BUILD_PATH=$BINARY_DIR_PATH/bin \
LLVM_BIN_PATH=$BINARY_DIR_PATH/bin
make LLVM_BIN_PATH=$BINARY_DIR_PATH/bin test
|
eliben/llvm-clang-samples
|
build_vs_released_binary.sh
|
Shell
|
unlicense
| 878 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
ROOT="$DIR/.."
# Remove all the generated files
# Not removing .cxx/ and .gradle/ can cause problems when jumping between
# flutter versions.
cd "$ROOT"
flutter clean
cd "$ROOT/example/"
flutter clean
cd "$ROOT"
rm -rf android/.cxx/
rm -rf example/android/.gradle/
rm -f example/.packages
|
google/webcrypto.dart
|
tool/clean.sh
|
Shell
|
apache-2.0
| 955 |
#!/bin/bash
#
# File: moab_hold.sh
#
# Author: Gideon Juve ([email protected])
# Author: David Rebatto ([email protected])
#
# Description: Place a Moab job on hold
#
# Copyright 2015 University of Southern California.
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
. $(dirname $0)/blah_load_config.sh
jobid=$(echo $1 | cut -d/ -f3)
result=$(${moab_binpath}/checkjob $jobid | awk '
/State:/ {
print $2
}
')
#currently only holding idle or waiting jobs is supported
if [ "$2" == "1" ] ; then # I guess $2 is 1 when you want to force?
${moab_binpath}/mjobctl -h $jobid
elif [ "$result" == "Idle" ] ; then
${moab_binpath}/mjobctl -h $jobid
else
echo "unsupported for this job status" >&2
exit 1
fi
|
pegasus-isi/pegasus
|
share/pegasus/htcondor/glite/moab_hold.sh
|
Shell
|
apache-2.0
| 1,368 |
#!/bin/sh
spec=`ls ./resource/csdk/stack/samples/tizen/build/packaging/*.spec`
version=`rpm --query --queryformat '%{version}\n' --specfile $spec`
name=`echo $name|cut -d" " -f 1`
version=`echo $version|cut -d" " -f 1`
name=oicri
echo $1
export TARGET_TRANSPORT=$1
echo $2
export SECURED=$2
echo $3
export BUILD_SAMPLE=$3
echo $4
export RELEASE=$4
echo $5
export LOGGING=$5
echo $TARGET_TRANSPORT
echo $BUILD_SAMPLE
rm -rf $name-$version
builddir=`pwd`
sourcedir=`pwd`
echo `pwd`
mkdir ./tmp
mkdir ./tmp/extlibs/
mkdir ./tmp/packaging
cp -R ./extlibs/tinycbor $sourcedir/tmp/extlibs
cp -R ./extlibs/cjson $sourcedir/tmp/extlibs
cp -R ./extlibs/tinydtls $sourcedir/tmp/extlibs
cp -R ./extlibs/timer $sourcedir/tmp/extlibs
cp -R ./extlibs/rapidxml $sourcedir/tmp/extlibs
cp -R ./resource/csdk/stack/samples/tizen/build/packaging/*.spec $sourcedir/tmp/packaging
cp -R ./resource $sourcedir/tmp/
cp -R ./build_common/external_libs.scons $sourcedir/tmp/
cd $sourcedir
cd ./resource/csdk/stack/samples/tizen/build/
cp -R ./* $sourcedir/tmp/
rm -f $sourcedir/tmp/SConscript
cp SConstruct $sourcedir/tmp/
cp scons/SConscript $sourcedir/tmp/scons/
mkdir -p $sourcedir/tmp/iotivityconfig
cd $sourcedir/build_common/
cp -R ./iotivityconfig/* $sourcedir/tmp/iotivityconfig/
cp -R ./SConscript $sourcedir/tmp/
cd $sourcedir/tmp
echo `pwd`
whoami
# Initialize Git repository
if [ ! -d .git ]; then
git init ./
git config user.email "[email protected]"
git config user.name "Your Name"
git add ./
git commit -m "Initial commit"
fi
echo "Calling core gbs build command"
gbscommand="gbs build -A armv7l -B ~/GBS-ROOT-RI --include-all --define 'TARGET_TRANSPORT $1' --define 'SECURED $2' --define 'RELEASE $4' --define 'LOGGING $5' --repository ./"
echo $gbscommand
if eval $gbscommand; then
echo "Core build is successful"
else
echo "Core build failed. Try 'sudo find . -type f -exec dos2unix {} \;' in the 'connectivity/' folder"
cd $sourcedir
rm -rf $sourcedir/tmp
exit
fi
if echo $BUILD_SAMPLE|grep -qi '^ON$'; then
cd resource/csdk/stack/samples/tizen/SimpleClientServer
echo `pwd`
# Initialize Git repository
if [ ! -d .git ]; then
git init ./
git config user.email "[email protected]"
git config user.name "Your Name"
git add ./
git commit -m "Initial commit"
fi
echo "Calling sample gbs build command"
gbscommand="gbs build -A armv7l -B ~/GBS-ROOT-RI --include-all --define 'TARGET_TRANSPORT $1' --define 'SECURED $2' --define 'RELEASE $4' --define 'LOGGING $5' --repository ./"
echo $gbscommand
if eval $gbscommand; then
echo "Sample build is successful"
else
echo "Sample build is failed. Try 'sudo find . -type f -exec dos2unix {} \;' in the 'connectivity/' folder"
fi
else
echo "Sample build is not enabled"
fi
cd $sourcedir
rm -rf $sourcedir/tmp
|
WojciechLuczkow/iotivity
|
resource/csdk/stack/samples/tizen/build/gbsbuild.sh
|
Shell
|
apache-2.0
| 2,868 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
BUILD_TARGETS=(
cmd/libs/go2idl/client-gen
cmd/libs/go2idl/set-gen
)
make -C "${KUBE_ROOT}" WHAT="${BUILD_TARGETS[*]}"
clientgen=$(kube::util::find-binary "client-gen")
setgen=$(kube::util::find-binary "set-gen")
# Please do not add any logic to this shell script. Add logic to the go code
# that generates the set-gen program.
#
# This can be called with one flag, --verify-only, so it works for both the
# update- and verify- scripts.
${clientgen} "$@"
${clientgen} -t "$@"
${clientgen} --clientset-name="release_1_5" --input="api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1"
# Clientgen for federation clientset.
${clientgen} --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --input="../../federation/apis/federation/","api/","extensions/" --included-types-overrides="api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,api/Event" "$@"
${clientgen} --clientset-name=federation_release_1_5 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --input="../../federation/apis/federation/v1beta1","api/v1","extensions/v1beta1" --included-types-overrides="api/v1/Service,api/v1/Namespace,extensions/v1beta1/ReplicaSet,api/v1/Secret,extensions/v1beta1/Ingress,api/v1/Event" "$@"
${setgen} "$@"
# You may add additional calls of code generators like set-gen above.
|
DongyiYang/kubernetes
|
hack/update-codegen.sh
|
Shell
|
apache-2.0
| 2,182 |
#!/bin/bash
# start ssr
filename="/etc/ssr/client_conf.txt"
num=3
for line in $(cat $filename)
do
#echo "num: " $num
#echo "line: " $line
if [ $num -gt 0 ];then
if [ $num -eq 3 ];then
ip=$line
#echo "ip:" $ip
num=`expr $num - 1`
elif [ $num -eq 2 ];then
port=$line
#echo "port:" $port
num=`expr $num - 1`
elif [ $num -eq 1 ];then
#echo "------------>this is password"
password=$line
#echo "password": $password
num=`expr $num - 1`
fi
fi
if [ $num -eq 0 ];then
num=3
#echo "ip:" $ip
#echo "port:" $port
#echo "password": $password
screen -S ssr-local -Dm ssr-local -s $ip -p $port -k $password -o tls1.2_ticket_auth_compatible -O auth_sha1_v4 &
fi
done
|
SuperSuperSuperSuper5/shadowsocks-remix
|
script/client_startssr.sh
|
Shell
|
apache-2.0
| 869 |
#!/bin/bash
set -e
/geth --datadir=~/.ethereum/devchain init "/genesis.json"
/geth --networkid=636393 --rpc --rpccorsdomain="*" --rpcaddr="0.0.0.0" --rpcapi "miner,admin,db,personal,eth,net,web3" --ipcdisable --datadir=~/.ethereum/devchain --nodekeyhex=091bd6067cb4612df85d9c1ff85cc47f259ced4d4cd99816b14f35650f59c322
|
y12studio/dltdojo
|
dockerfiles/ethereumgo/tiguan2/startboot.sh
|
Shell
|
apache-2.0
| 318 |
#!/bin/bash
if [ $# -ne "1" ]; then
echo "Use: ./deploy.sh <tarball prefix>"
echo
echo "Script to deploy the latest stepup component with the specified prefix."
echo "Uses a lexical ordering (i.e. 'sort') of the matching filenames to pick te latest one."
echo "Script must be run from the Stepup-VM dir."
echo "Requires a local 'deploy' directory with a clone of the Stepup-Deploy repo"
echo
echo "Works especially well for development snapshots like 'Stepup-SelfService-develop-20150728150908Z-02ca2ed57a14a98d07e305efc7a67b6789cdd487.tar.bz2'"
exit 1
fi
echo "Looking for latest of: ${1}"
tarball=`ls ${1}* | sort | tail -1`
echo "Deploying: ${tarball}"
./deploy/scripts/deploy.sh ${tarball} -i environment/inventory -l app.stepup.example.com,ks.stepup.example.com
|
SURFnet/Stepup-VM
|
deploy-latest.sh
|
Shell
|
apache-2.0
| 787 |
#!/bin/bash -e
# this should be run after check-build finishes.
. /etc/profile.d/modules.sh
echo ${SOFT_DIR}
module add deploy
module add gcc/${GCC_VERSION}
module add openmpi/${OPENMPI_VERSION}-gcc-${GCC_VERSION}
cd ${WORKSPACE}/${NAME}-${VERSION}/build-${BUILD_NUMBER}-float
rm -rf *
echo "All tests have passed, will now build into ${SOFT_DIR}-gcc-${GCC_VERSION}"
echo "Configuring deploy for float"
CFLAGS='-fPIC' ../configure \
--prefix=$SOFT_DIR-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION} \
--enable-mpi \
--enable-openmp \
--enable-shared \
--enable-threads \
--enable-sse2
make all
make install
cd ${WORKSPACE}/${NAME}-${VERSION}/build-${BUILD_NUMBER}-double
rm -rf *
echo "All tests have passed, will now build into ${SOFT_DIR}-gcc-${GCC_VERSION}"
echo "Configuring deploy for double"
CFLAGS='-fPIC' ../configure \
--prefix=$SOFT_DIR-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION} \
--enable-mpi \
--enable-openmp \
--enable-shared \
--enable-threads \
--enable-long-double \
--with-pic
make all
make install
echo "Creating the modules file directory ${LIBRARIES}"
mkdir -p ${LIBRARIES}/${NAME}
(
cat <<MODULE_FILE
#%Module1.0
## $NAME modulefile
##
proc ModulesHelp { } {
puts stderr " This module does nothing but alert the user"
puts stderr " that the [module-info name] module is not available"
}
module add gcc/${GCC_VERSION}
module add openmpi/${OPENMPI_VERSION}-gcc-${GCC_VERSION}
module-whatis "$NAME $VERSION. compiled for OpenMPI ${OPENMPI_VERSION} and GCC version ${GCC_VERSION}"
setenv FFTW_VERSION $VERSION
setenv FFTW_DIR $::env(CVMFS_DIR)/$::env(SITE)/$::env(OS)/$::env(ARCH)/$NAME/$VERSION-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
prepend-path PATH $::env(FFTW_DIR)/bin
prepend-path PATH $::env(FFTW_DIR)/include
prepend-path PATH $::env(FFTW_DIR)/bin
prepend-path MANPATH $::env(FFTW_DIR)/man
prepend-path LD_LIBRARY_PATH $::env(FFTW_DIR)/lib
MODULE_FILE
) > ${LIBRARIES}/${NAME}/${VERSION}-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
# Testing module
module avail
module list
module add ${NAME}/${VERSION}-gcc-${GCC_VERSION}-mpi-${OPENMPI_VERSION}
echo "PATH is : $PATH"
echo "LD_LIBRARY_PATH is $LD_LIBRARY_PATH"
# confirm openmpi
cd ${WORKSPACE}
echo "Working directory is $PWD with : "
ls
echo "LD_LIBRARY_PATH is $LD_LIBRARY_PATH"
echo "Compiling serial code"
g++ -L${FFTW_DIR}/lib -I${FFTW_DIR}/include -lfftw3 -lm hello-world.cpp -o hello-world
echo "executing serial code"
./hello-world
# now try mpi version
echo "Compiling MPI code"
mpic++ hello-world-mpi.cpp -L${FFTW_DIR}/lib -I${FFTW_DIR}/include -lfftw3 -lfftw3_mpi -o hello-world-mpi
#mpic++ -lfftw3 hello-world-mpi.cpp -o hello-world-mpi -L$FFTW_DIR/lib -I$FFTW_DIR/include
echo "executing MPI code"
time mpirun -np 2 ./hello-world-mpi
|
SouthAfricaDigitalScience/fftw3-deploy
|
deploy.sh
|
Shell
|
apache-2.0
| 2,839 |
#!/bin/bash
cd x264
export NDK=/home/joe/Android/Sdk/ndk-bundle
export TOOLCHAIIN=$NDK/toolchains/arm-linux-androideabi-4.9/prebuild/linux-x86_64
export PLATFORM=$NDK/platforms/android-8/arch-arm
export PREFIX=../libx264
./configure \
--prefix=$PREFIX \
--enable-static \
--disable-shared \
--enable-pic \
--disable-asm \
--disable-cli \
--host=arm-linux \
--cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \
--sysroot=$PLATFORM
make -j8
make install
cd ..
|
joetang1989/Android-Universal-Image-Loader-Study
|
note/compile_01/build_x264.sh
|
Shell
|
apache-2.0
| 474 |
#!/bin/bash
#------------------------------------------------------------------------------#
# Copyright 2002-2015, OpenNebula Project, OpenNebula Systems #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#------------------------------------------------------------------------------#
ARGS=$*
usage() {
echo
echo "Usage: install.sh [-u install_user] [-g install_group]"
echo " [-d ONE_LOCATION] [-l] [-h]"
echo
echo "-d: target installation directory, if not defined it'd be root. Must be"
echo " an absolute path. Installation will be selfcontained"
echo "-l: creates symlinks instead of copying files, useful for development"
echo "-h: prints this help"
}
PARAMETERS="hlu:g:d:"
if [ $(getopt --version | tr -d " ") = "--" ]; then
TEMP_OPT=`getopt $PARAMETERS "$@"`
else
TEMP_OPT=`getopt -o $PARAMETERS -n 'install.sh' -- "$@"`
fi
if [ $? != 0 ] ; then
usage
exit 1
fi
eval set -- "$TEMP_OPT"
LINK="no"
ONEADMIN_USER=`id -u`
ONEADMIN_GROUP=`id -g`
SRC_DIR=$PWD
while true ; do
case "$1" in
-h) usage; exit 0;;
-d) ROOT="$2" ; shift 2 ;;
-l) LINK="yes" ; shift ;;
-u) ONEADMIN_USER="$2" ; shift 2;;
-g) ONEADMIN_GROUP="$2"; shift 2;;
--) shift ; break ;;
*) usage; exit 1 ;;
esac
done
export ROOT
if [ -z "$ROOT" ]; then
LIB_LOCATION="/usr/lib/one/ruby/oneapps"
BIN_LOCATION="/usr/bin"
PACKAGES_LOCATION="/usr/share/one/oneapps"
SHARE_LOCATION="/usr/share/one/oneapps"
ETC_LOCATION="/etc/one"
SUNSTONE_LOCATION="/usr/lib/one/sunstone"
else
LIB_LOCATION="$ROOT/lib/ruby/oneapps"
BIN_LOCATION="$ROOT/bin"
PACKAGES_LOCATION="$ROOT/share/oneapps"
SHARE_LOCATION="$ROOT/share/oneapps"
ETC_LOCATION="$ROOT/etc"
SUNSTONE_LOCATION="$ROOT/lib/sunstone"
fi
do_file() {
if [ "$UNINSTALL" = "yes" ]; then
rm $2/`basename $1`
else
if [ "$LINK" = "yes" ]; then
ln -fs $SRC_DIR/$1 $2
else
cp -R $SRC_DIR/$1 $2
fi
fi
}
copy_files() {
FILES=$1
DST=$DESTDIR$2
mkdir -p $DST
for f in $FILES; do
do_file src/$f $DST
done
}
create_dirs() {
DIRS=$*
for d in $DIRS; do
dir=$DESTDIR$d
mkdir -p $dir
done
}
change_ownership() {
DIRS=$*
for d in $DIRS; do
chown -R $ONEADMIN_USER:$ONEADMIN_GROUP $DESTDIR$d
done
}
(
cd src
## Client files
copy_files "client/lib/*" "$LIB_LOCATION/market"
copy_files "client/bin/*" "$BIN_LOCATION"
## Server files
# bin
copy_files "bin/*" "$BIN_LOCATION"
# dirs containing files
copy_files "controllers models public views" "$LIB_LOCATION/market"
# files
copy_files "lib/* models.rb config.ru Gemfile \
Rakefile config/init.rb config/appmarket_version.rb" "$LIB_LOCATION/market"
# Sunstone
copy_files "sunstone/public/app/tabs/*" "$SUNSTONE_LOCATION/public/app/tabs"
copy_files "sunstone/public/app/opennebula/*" "$SUNSTONE_LOCATION/public/app/opennebula"
copy_files "sunstone/public/images/*" "$SUNSTONE_LOCATION/public/images"
copy_files "sunstone/routes/*" "$SUNSTONE_LOCATION/routes"
# Do not link the ETC files
LINK="no"
copy_files "sunstone/etc/sunstone-appmarket.conf" "$ETC_LOCATION"
copy_files "config/appmarket-server.conf" "$ETC_LOCATION"
)
|
OpenNebula/addon-appmarket
|
install.sh
|
Shell
|
apache-2.0
| 4,241 |
#!/bin/bash -ex
source tools/common
source novarc
./bin/neutron-ext-net --network-type flat -g $GATEWAY -c $CIDR_EXT -f $FIP_RANGE ext_net
./bin/neutron-tenant-net --network-type gre -t admin -r provider-router -N $NAMESERVER private $CIDR_PRIV
create_demo_user
create_keypairs
set_quotas
create_secgroup_rules
delete_all_public_flavors
upload_image cloudimages xenial xenial-server-cloudimg-amd64-disk1.img raw
create_exclusive_aggregate orange 1 kvm
create_exclusive_flavor orange 40960 8 416 kvm
create_exclusive_aggregate grey 999999 kvm
create_exclusive_flavor grey 2048 2 20 kvm
|
ubuntu-openstack/bigdata-novalxd
|
tools/configure-openstack-kvm.sh
|
Shell
|
apache-2.0
| 591 |
#!/bin/bash
# Build Mac OS X apps in Release configuration
#
# Syntax:
# build-release.sh
CURRENT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONFIGURATION=Release
PROJECT=${CURRENT_PATH}/../HTTPServerKit.xcodeproj
# determine tag name
GIT_REVISION_HEAD=`git rev-parse HEAD`
DATE_REVISION=`date +"%Y%m%d"`
REVISION="${DATE_REVISION}-${GIT_REVISION_HEAD:0:7}"
UNPUSHED=`git log --branches --not --remotes --simplify-by-decoration --decorate --oneline`
# add VERSION file
echo "${REVISION}" > ${CURRENT_PATH}/../VERSION
# see what hasn't been pushed to origin
if [ "${UNPUSHED}" != "" ] ; then
echo "Error: unpushed commits, use 'git push origin' first"
echo " ${UNPUSHED}"
exit -1
fi
# perform the tagging
git tag -a -m "Tagging version ${REVISION}" "${REVISION}" || exit -1
git push origin --tags || exit -1
# build mac targets
xcodebuild -project ${PROJECT} -target "HTTPServerKit" -configuration ${CONFIGURATION} || exit -1
xcodebuild -project ${PROJECT} -target "HTTPServer" -configuration ${CONFIGURATION} || exit -1
# report
echo "Tagged release: ${REVISION}"
|
djthorpe/HTTPServerKit
|
etc/build-release.sh
|
Shell
|
apache-2.0
| 1,095 |
#!/bin/bash -euxo
echo "==> Installed packages before cleanup"
dpkg --get-selections | grep -v deinstall
# Clean up the apt cache
apt-get -y autoremove --purge
apt-get -y clean
apt-get -y autoclean
echo "==> Cleaning up udev rules"
rm -rf /dev/.udev/ /lib/udev/rules.d/75-persistent-net-generator.rules
echo "==> Cleaning up leftover dhcp leases"
if [ -d "/var/lib/dhcp" ]; then
rm /var/lib/dhcp/*
fi
echo "==> Removing man pages"
rm -rf /usr/share/man/*
echo "==> Removing APT files"
find /var/lib/apt -type f | xargs rm -f
echo "==> Removing anything in /usr/src"
rm -rf /usr/src/*
echo "==> Removing any docs"
rm -rf /usr/share/doc/*
echo "==> Removing caches"
find /var/cache -type f -exec rm -rf {} \;
echo "==> Cleaning up log files"
find /var/log -type f -exec sh -c 'echo -n > {}' \;
echo "==> Cleaning up tmp"
rm -rf /tmp/*
echo "==> Clearing last login information"
> /var/log/lastlog
> /var/log/wtmp
> /var/log/btmp
echo "==> Removing bash history"
unset HISTFILE
rm -f /root/.bash_history
rm -f /home/vagrant/.bash_history
|
holser/packer-templates
|
scripts/debian/cleanup.sh
|
Shell
|
apache-2.0
| 1,043 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://blog.linuxeye.com
#
# Notes: OneinStack for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
#
# Project home page:
# https://oneinstack.com
# https://github.com/lj2007331/oneinstack
Upgrade_Memcached() {
pushd ${oneinstack_dir}/src > /dev/null
[ ! -e "${memcached_install_dir}/bin/memcached" ] && echo "${CWARNING}Memcached is not installed on your system! ${CEND}" && exit 1
OLD_Memcached_version=`$memcached_install_dir/bin/memcached -V | awk '{print $2}'`
echo "Current Memcached Version: ${CMSG}$OLD_Memcached_version${CEND}"
while :; do echo
read -p "Please input upgrade Memcached Version(example: 1.4.39): " NEW_Memcached_version
if [ "${NEW_Memcached_version}" != "$OLD_Memcached_version" ]; then
[ ! -e "memcached-${NEW_Memcached_version}.tar.gz" ] && wget --no-check-certificate -c http://www.memcached.org/files/memcached-${NEW_Memcached_version}.tar.gz > /dev/null 2>&1
if [ -e "memcached-${NEW_Memcached_version}.tar.gz" ]; then
echo "Download [${CMSG}memcached-${NEW_Memcached_version}.tar.gz${CEND}] successfully! "
break
else
echo "${CWARNING}Memcached version does not exist! ${CEND}"
fi
else
echo "${CWARNING}input error! Upgrade Memcached version is the same as the old version${CEND}"
fi
done
if [ -e "memcached-${NEW_Memcached_version}.tar.gz" ]; then
echo "[${CMSG}memcached-${NEW_Memcached_version}.tar.gz${CEND}] found"
echo "Press Ctrl+c to cancel or Press any key to continue..."
char=`get_char`
tar xzf memcached-${NEW_Memcached_version}.tar.gz
pushd memcached-${NEW_Memcached_version}
make clean
./configure --prefix=${memcached_install_dir}
make -j ${THREAD}
if [ -e "memcached" ]; then
echo "Restarting Memcached..."
service memcached stop
make install
service memcached start
popd > /dev/null
echo "You have ${CMSG}successfully${CEND} upgrade from ${CWARNING}$OLD_Memcached_version${CEND} to ${CWARNING}${NEW_Memcached_version}${CEND}"
rm -rf memcached-${NEW_Memcached_version}
else
echo "${CFAILURE}Upgrade Memcached failed! ${CEND}"
fi
fi
popd > /dev/null
}
|
ivmm/mistack
|
include/upgrade_memcached.sh
|
Shell
|
apache-2.0
| 2,250 |
#!/bin/bash
INPUT_DIR=$1
OUTPUT_DIR=$2
MAX_EPSILON=$3
cd common
pip install Keras-2.0.8-py2.py3-none-any.whl --no-index --find-links=$(pwd)
pip install h5py-2.7.1-cp27-cp27mu-manylinux1_x86_64.whl --no-index --find-links=$(pwd)
cd ../
python attack.py \
--input-dir="${INPUT_DIR}" \
--output-dir="${OUTPUT_DIR}" \
--max-epsilon="${MAX_EPSILON}"
|
ckomaki/kaggle-nips-2017
|
defense/resnet_xception_vgg19_dual/common/run_attack.sh
|
Shell
|
apache-2.0
| 354 |
#!/bin/bash
WORKING_DIR="${BASH_SOURCE[0]}";
if ([ -h "${WORKING_DIR}" ]) then
while([ -h "${WORKING_DIR}" ]) do WORKING_DIR=`readlink "${WORKING_DIR}"`; done
fi
pushd . > /dev/null
cd `dirname ${WORKING_DIR}` > /dev/null
WORKING_DIR=`pwd`;
popd > /dev/null
if [ -z $LUMONGO_CLIENT_JAVA_SETTINGS ]
then
export LUMONGO_CLIENT_JAVA_SETTINGS="-Xmx256m"
fi
java $LUMONGO_CLIENT_JAVA_SETTINGS -cp $WORKING_DIR/@project@-@[email protected] org.lumongo.admin.IndexAdmin "$@"
|
lumongo/lumongo
|
lumongo-cluster/scripts/indexadmin.sh
|
Shell
|
apache-2.0
| 467 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
TF_SHARED_LIBRARY_NAME=$(grep -r TF_SHARED_LIBRARY_NAME .bazelrc | awk -F= '{print$2}')
POLICY_JSON=$(find / -name manylinux-policy.json)
sed -i "s/libresolv.so.2\"/libresolv.so.2\", $TF_SHARED_LIBRARY_NAME/g" $POLICY_JSON
cat $POLICY_JSON
auditwheel $@
|
tensorflow/lingvo
|
third_party/auditwheel.sh
|
Shell
|
apache-2.0
| 960 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# This command builds and runs a local kubernetes cluster.
# You may need to run this as root to allow kubelet to open docker's socket,
# and to write the test CA in /var/run/kubernetes.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
PSP_ADMISSION=${PSP_ADMISSION:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
# Name of the network plugin, eg: "kubenet"
NET_PLUGIN=${NET_PLUGIN:-""}
# Place the binaries required by NET_PLUGIN in this directory, eg: "/home/kubernetes/bin".
NET_PLUGIN_DIR=${NET_PLUGIN_DIR:-""}
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
# if enabled, must set CGROUP_ROOT
CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-false}
# this is not defaulted to preserve backward compatibility.
# if EXPERIMENTAL_CGROUPS_PER_QOS is enabled, recommend setting to /
CGROUP_ROOT=${CGROUP_ROOT:-""}
# name of the cgroup driver, i.e. cgroupfs or systemd
CGROUP_DRIVER=${CGROUP_DRIVER:-""}
# enables testing eviction scenarios locally.
EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi"}
EVICTION_SOFT=${EVICTION_SOFT:-""}
EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
# We disable cluster DNS by default because this script uses docker0 (or whatever
# container bridge docker is currently using) and we don't know the IP of the
# DNS pod to pass in as --cluster-dns. To set this up by hand, set this flag
# and change DNS_SERVER_IP to the appropriate IP.
ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-false}
DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
KUBECTL=${KUBECTL:-cluster/kubectl.sh}
WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-10}
ENABLE_DAEMON=${ENABLE_DAEMON:-false}
HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=true"}
# RBAC Mode options
ALLOW_ANY_TOKEN=${ALLOW_ANY_TOKEN:-false}
ENABLE_RBAC=${ENABLE_RBAC:-false}
KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
AUTH_ARGS=${AUTH_ARGS:-""}
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
# START_MODE can be 'all', 'kubeletonly', or 'nokubelet'
START_MODE=${START_MODE:-"all"}
# sanity check for OpenStack provider
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
if [ "${CLOUD_CONFIG}" == "" ]; then
echo "Missing CLOUD_CONFIG env for OpenStack provider!"
exit 1
fi
if [ ! -f "${CLOUD_CONFIG}" ]; then
echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
exit 1
fi
fi
if [ "$(id -u)" != "0" ]; then
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
fi
# Stop right away if the build fails
set -e
source "${KUBE_ROOT}/hack/lib/init.sh"
function usage {
echo "This script starts a local kube cluster. "
echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
}
# This function guesses where the existing cached binary build is for the `-O`
# flag
function guess_built_binary_path {
local hyperkube_path=$(kube::util::find-binary "hyperkube")
if [[ -z "${hyperkube_path}" ]]; then
return
fi
echo -n "$(dirname "${hyperkube_path}")"
}
### Allow user to supply the source directory.
GO_OUT=${GO_OUT:-}
while getopts "ho:O" OPTION
do
case $OPTION in
o)
echo "skipping build"
GO_OUT="$OPTARG"
echo "using source $GO_OUT"
;;
O)
GO_OUT=$(guess_built_binary_path)
if [ $GO_OUT == "" ]; then
echo "Could not guess the correct output directory to use."
exit 1
fi
;;
h)
usage
exit
;;
?)
usage
exit
;;
esac
done
if [ "x$GO_OUT" == "x" ]; then
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/hyperkube vendor/k8s.io/kube-aggregator"
else
echo "skipped the build."
fi
function test_rkt {
if [[ -n "${RKT_PATH}" ]]; then
${RKT_PATH} list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that ${RKT_PATH} is the path of rkt binary."
exit 1
fi
else
rkt list 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'rkt list', please verify that rkt is in \$PATH."
exit 1
fi
fi
}
# Shut down anyway if there's an error.
set +e
API_PORT=${API_PORT:-8080}
API_SECURE_PORT=${API_SECURE_PORT:-6443}
API_HOST=${API_HOST:-localhost}
API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
# By default only allow CORS for requests on localhost
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
# name of the cgroup driver, i.e. cgroupfs or systemd
if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
# default cgroup driver to match what is reported by docker to simplify local development
if [[ -z ${CGROUP_DRIVER} ]]; then
# match driver with docker runtime reported value (they must match)
CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | cut -f3- -d' ')
echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
fi
fi
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
function test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
if [[ "${API_PORT}" -gt "0" ]]; then
curl --silent -g $API_HOST:$API_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER insecure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_PORT"
exit 1
fi
fi
curl --silent -k -g $API_HOST:$API_SECURE_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER secure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on $API_HOST is serving already on $API_SECURE_PORT"
exit 1
fi
}
function detect_binary {
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
exit 1
;;
esac
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
}
cleanup_dockerized_kubelet()
{
if [[ -e $KUBELET_CIDFILE ]]; then
docker kill $(<$KUBELET_CIDFILE) > /dev/null
rm -f $KUBELET_CIDFILE
fi
}
cleanup()
{
echo "Cleaning up..."
# delete running images
# if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
# Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
# ${KUBECTL} --namespace=kube-system delete service kube-dns
# And this one hang forever:
# ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
# fi
# Check if the API server is still running
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
if [[ -n "$DOCKERIZE_KUBELET" ]]; then
cleanup_dockerized_kubelet
else
# Check if the kubelet is still running
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS}
fi
# Check if the proxy is still running
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS}
# Check if the scheduler is still running
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS}
# Check if the etcd is still running
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
[[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
exit 0
}
function warning {
message=$1
echo $(tput bold)$(tput setaf 1)
echo "WARNING: ${message}"
echo $(tput sgr0)
}
function start_etcd {
echo "Starting etcd"
kube::etcd::start
}
function set_service_accounts {
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
function start_apiserver {
security_admission=""
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
security_admission=",SecurityContextDeny"
fi
if [[ -n "${PSP_ADMISSION}" ]]; then
security_admission=",PodSecurityPolicy"
fi
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount${security_admission},ResourceQuota,DefaultStorageClass
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
anytoken_arg=""
if [[ "${ALLOW_ANY_TOKEN}" = true ]]; then
anytoken_arg="--insecure-allow-any-token "
KUBECONFIG_TOKEN="${KUBECONFIG_TOKEN:-system:admin/system:masters}"
fi
authorizer_arg=""
if [[ "${ENABLE_RBAC}" = true ]]; then
authorizer_arg="--authorization-mode=RBAC "
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
runtime_config=""
if [[ -n "${RUNTIME_CONFIG}" ]]; then
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
fi
# Let the API server pick a default address when API_HOST_IP
# is set to 127.0.0.1
advertise_address=""
if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
advertise_address="--advertise_address=${API_HOST_IP}"
fi
# Create CA signers
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
# Create auth proxy client ca
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
# serving cert for kube-apiserver
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST}
# Create client certs signed with client-ca, given id, given CN and a number of groups
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
# Create matching certificates for kube-aggregator
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP}
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
# TODO remove masters and add rolebinding
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
APISERVER_LOG=/tmp/kube-apiserver.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${anytoken_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
${advertise_address} \
--v=${LOG_LEVEL} \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
--admission-control="${ADMISSION_CONTROL}" \
--bind-address="${API_BIND_ADDR}" \
--secure-port="${API_SECURE_PORT}" \
--tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
--tls-ca-file="${CERT_DIR}/server-ca.crt" \
--insecure-bind-address="${API_HOST_IP}" \
--insecure-port="${API_PORT}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
# this uses the API port because if you don't have any authenticator, you can't seem to use the secure port at all.
# this matches what happened with the combination in 1.4.
# TODO change this conditionally based on whether API_PORT is on or off
kube::util::wait_for_url "http://${API_HOST_IP}:${API_PORT}/version" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} \
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
# Create kubeconfigs for all components, using client certs
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
if [[ -z "${AUTH_ARGS}" ]]; then
if [[ "${ALLOW_ANY_TOKEN}" = true ]]; then
# use token authentication
if [[ -n "${KUBECONFIG_TOKEN}" ]]; then
AUTH_ARGS="--token=${KUBECONFIG_TOKEN}"
else
AUTH_ARGS="--token=system:admin/system:masters"
fi
else
# default to the admin client cert/key
AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
fi
fi
# create the kube-public namespace for the aggregator
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create namespace kube-public
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${CONTROLPLANE_SUDO} chown $(whoami) "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:9443"
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
}
function start_controller_manager {
node_cidr_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args="--allocate-node-cidrs=true --cluster-cidr=10.1.0.0/16 "
fi
CTLRMGR_LOG=/tmp/kube-controller-manager.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" controller-manager \
--v=${LOG_LEVEL} \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--kubeconfig "$CERT_DIR"/controller.kubeconfig \
--use-service-account-credentials \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
CTLRMGR_PID=$!
}
function start_kubelet {
KUBELET_LOG=/tmp/kubelet.log
mkdir -p ${POD_MANIFEST_PATH} || true
cp ${KUBE_ROOT}/vendor/k8s.io/kube-aggregator/artifacts/hostpath-pods/insecure-etcd-pod.yaml ${POD_MANIFEST_PATH}/kube-aggregator.yaml
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
mkdir -p /var/lib/kubelet
if [[ -z "${DOCKERIZE_KUBELET}" ]]; then
# Enable dns
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
dns_args="--cluster-dns=${DNS_SERVER_IP} --cluster-domain=${DNS_DOMAIN}"
else
# To start a private DNS server set ENABLE_CLUSTER_DNS and
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
# DNS server for real world hostnames.
dns_args="--cluster-dns=8.8.8.8"
fi
net_plugin_args=""
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args="--network-plugin=${NET_PLUGIN}"
fi
auth_args=""
if [[ -n "${KUBELET_AUTHORIZATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authorization-mode=Webhook"
fi
if [[ -n "${KUBELET_AUTHENTICATION_WEBHOOK:-}" ]]; then
auth_args="${auth_args} --authentication-token-webhook"
fi
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
auth_args="${auth_args} --client-ca-file=${CLIENT_CA_FILE}"
fi
net_plugin_dir_args=""
if [[ -n "${NET_PLUGIN_DIR}" ]]; then
net_plugin_dir_args="--network-plugin-dir=${NET_PLUGIN_DIR}"
fi
container_runtime_endpoint_args=""
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
container_runtime_endpoint_args="--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}"
fi
image_service_endpoint_args=""
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
image_service_endpoint_args="--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}"
fi
sudo -E "${GO_OUT}/hyperkube" kubelet ${priv_arg}\
--v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="${HOSTNAME_OVERRIDE}" \
--cloud-provider="${CLOUD_PROVIDER}" \
--cloud-config="${CLOUD_CONFIG}" \
--address="${KUBELET_HOST}" \
--require-kubeconfig \
--kubeconfig "$CERT_DIR"/kubelet.kubeconfig \
--feature-gates="${FEATURE_GATES}" \
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
--enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" \
--cgroups-per-qos=${CGROUPS_PER_QOS} \
--cgroup-driver=${CGROUP_DRIVER} \
--cgroup-root=${CGROUP_ROOT} \
--keep-terminated-pod-volumes=true \
--eviction-hard=${EVICTION_HARD} \
--eviction-soft=${EVICTION_SOFT} \
--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD} \
--pod-manifest-path="${POD_MANIFEST_PATH}" \
${auth_args} \
${dns_args} \
${net_plugin_dir_args} \
${net_plugin_args} \
${container_runtime_endpoint_args} \
${image_service_endpoint_args} \
--port="$KUBELET_PORT" >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
# Quick check that kubelet is running.
if ps -p $KUBELET_PID > /dev/null ; then
echo "kubelet ( $KUBELET_PID ) is running."
else
cat ${KUBELET_LOG} ; exit 1
fi
else
# Docker won't run a container with a cidfile (container id file)
# unless that file does not already exist; clean up an existing
# dockerized kubelet that might be running.
cleanup_dockerized_kubelet
cred_bind=""
# path to cloud credentials.
cloud_cred=""
if [ "${CLOUD_PROVIDER}" == "aws" ]; then
cloud_cred="${HOME}/.aws/credentials"
fi
if [ "${CLOUD_PROVIDER}" == "gce" ]; then
cloud_cred="${HOME}/.config/gcloud"
fi
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
cloud_cred="${CLOUD_CONFIG}"
fi
if [[ -n "${cloud_cred}" ]]; then
cred_bind="--volume=${cloud_cred}:${cloud_cred}:ro"
fi
docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/dev:/dev \
${cred_bind} \
--net=host \
--privileged=true \
-i \
--cidfile=$KUBELET_CIDFILE \
gcr.io/google_containers/kubelet \
/kubelet --v=${LOG_LEVEL} --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --pod-manifest-path="${POD_MANIFEST_PATH}" --hostname-override="${HOSTNAME_OVERRIDE}" --cloud-provider="${CLOUD_PROVIDER}" --cloud-config="${CLOUD_CONFIG}" \ --address="127.0.0.1" --require-kubeconfig --kubeconfig "$CERT_DIR"/kubelet.kubeconfig --api-servers="https://${API_HOST}:${API_SECURE_PORT}" --port="$KUBELET_PORT" --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" &> $KUBELET_LOG &
fi
}
function start_kubeproxy {
PROXY_LOG=/tmp/kube-proxy.log
sudo "${GO_OUT}/hyperkube" proxy \
--v=${LOG_LEVEL} \
--hostname-override="${HOSTNAME_OVERRIDE}" \
--feature-gates="${FEATURE_GATES}" \
--kubeconfig "$CERT_DIR"/kube-proxy.kubeconfig \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$!
SCHEDULER_LOG=/tmp/kube-scheduler.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" scheduler \
--v=${LOG_LEVEL} \
--kubeconfig "$CERT_DIR"/scheduler.kubeconfig \
--master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
SCHEDULER_PID=$!
}
function start_kubedns {
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
echo "Creating kube-system namespace"
sed -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-controller.yaml.in" >| kubedns-deployment.yaml
if [[ "${FEDERATION:-}" == "true" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATIONS_DOMAIN_MAP:-}"
if [[ -z "${FEDERATIONS_DOMAIN_MAP}" && -n "${FEDERATION_NAME:-}" && -n "${DNS_ZONE_NAME:-}" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATION_NAME}=${DNS_ZONE_NAME}"
fi
if [[ -n "${FEDERATIONS_DOMAIN_MAP}" ]]; then
sed -i -e "s/{{ pillar\['federations_domain_map'\] }}/- --federations=${FEDERATIONS_DOMAIN_MAP}/g" kubedns-deployment.yaml
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-deployment.yaml
fi
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-deployment.yaml
fi
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-svc.yaml.in" >| kubedns-svc.yaml
# TODO update to dns role once we have one.
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create clusterrolebinding system:kube-dns --clusterrole=cluster-admin --serviceaccount=kube-system:default
# use kubectl to create kubedns deployment and service
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-deployment.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-svc.yaml
echo "Kube-dns deployment and service successfully deployed."
rm kubedns-deployment.yaml kubedns-svc.yaml
fi
}
function create_psp_policy {
echo "Create podsecuritypolicy policies for RBAC."
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f ${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml
}
function print_success {
if [[ "${START_MODE}" != "kubeletonly" ]]; then
cat <<EOF
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
Logs:
${APISERVER_LOG:-}
${CTLRMGR_LOG:-}
${PROXY_LOG:-}
${SCHEDULER_LOG:-}
EOF
fi
if [[ "${START_MODE}" == "all" ]]; then
echo " ${KUBELET_LOG}"
elif [[ "${START_MODE}" == "nokubelet" ]]; then
echo
echo "No kubelet was started because you set START_MODE=nokubelet"
echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
echo
cat <<EOF
To start using your cluster, you can open up another terminal/tab and run:
export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
cluster/kubectl.sh
Alternatively, you can write to the default kubeconfig:
export KUBERNETES_PROVIDER=local
cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
cluster/kubectl.sh config set-context local --cluster=local --user=myself
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
EOF
else
cat <<EOF
The kubelet was started.
Logs:
${KUBELET_LOG}
EOF
fi
}
# validate that etcd is: not running, in path, and has minimum required version.
kube::etcd::validate
if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
exit 1
fi
if [[ "${CONTAINER_RUNTIME}" == "rkt" ]]; then
test_rkt
fi
if [[ "${START_MODE}" != "kubeletonly" ]]; then
test_apiserver_off
fi
kube::util::test_openssl_installed
kube::util::test_cfssl_installed
### IF the user didn't supply an output/ for the build... Then we detect.
if [ "$GO_OUT" == "" ]; then
detect_binary
fi
echo "Detected host and ready to start services. Doing some housekeeping first..."
echo "Using GO_OUT $GO_OUT"
KUBELET_CIDFILE=/tmp/kubelet.cid
if [[ "${ENABLE_DAEMON}" = false ]]; then
trap cleanup EXIT
fi
echo "Starting services now!"
if [[ "${START_MODE}" != "kubeletonly" ]]; then
start_etcd
set_service_accounts
start_apiserver
start_controller_manager
start_kubeproxy
start_kubedns
fi
if [[ "${START_MODE}" != "nokubelet" ]]; then
## TODO remove this check if/when kubelet is supported on darwin
# Detect the OS name/arch and display appropriate error.
case "$(uname -s)" in
Darwin)
warning "kubelet is not currently supported in darwin, kubelet aborted."
KUBELET_LOG=""
;;
Linux)
start_kubelet
;;
*)
warning "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
;;
esac
fi
if [[ -n "${PSP_ADMISSION}" && "${ENABLE_RBAC}" = true ]]; then
create_psp_policy
fi
print_success
if [[ "${ENABLE_DAEMON}" = false ]]; then
while true; do sleep 1; done
fi
|
stu-gott/kubernetes
|
hack/local-up-cluster.sh
|
Shell
|
apache-2.0
| 31,437 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cd "${BASEDIR}"
if [[ -n ${JAVA_HOME}
&& ! -d ${JAVA_HOME} ]]; then
echo "JAVA_HOME: ${JAVA_HOME} does not exist. Dockermode: attempting to switch to another." 1>&2
JAVA_HOME=""
fi
if [[ -z ${JAVA_HOME} ]]; then
JAVA_HOME=$(find /usr/lib/jvm/ -name "java-*" -type d | tail -1)
export JAVA_HOME
fi
# Avoid out of memory errors in builds
MAVEN_OPTS=${MAVEN_OPTS:-"-Xms256m -Xmx1g"}
export MAVEN_OPTS
# strip out --docker param to prevent re-exec again
TESTPATCHMODE=${TESTPATCHMODE/--docker }
cd "${BASEDIR}"
PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P)
cd "${PATCH_DIR}/precommit/"
#shellcheck disable=SC2086
"${PATCH_DIR}/precommit/test-patch.sh" \
--reexec \
--dockermode ${TESTPATCHMODE} \
--basedir="${BASEDIR}" \
--patch-dir="${PATCH_DIR}" \
--java-home="${JAVA_HOME}" \
--plugins="${PATCH_DIR}/precommit/user-plugins" \
--jira-cmd=/opt/jiracli/jira-cli-2.2.0/jira.sh
|
aw-altiscale/snailtrail
|
precommit/src/main/test-patch-docker/launch-test-patch.sh
|
Shell
|
apache-2.0
| 1,726 |
#!/bin/sh
printf "Checking dependencies...\r"
which node >/dev/null || sudo apt install node
which npm >/dev/null || sudo apt install npm
which pnpm >/dev/null || sudo npm install -g pnpm
pnpm install colors console-ten watchr websocket better-sqlite3 node-ipc command-line-args errlop editions gm
printf "\r \r"
|
DroidScript/DroidScript-Web
|
scripts/install-deps.sh
|
Shell
|
apache-2.0
| 342 |
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2014, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
if [ -f /mnt/context.sh ]
then
. /mnt/context.sh
fi
echo $HOSTNAME > /etc/hostname
hostname $HOSTNAME
echo 127.0.0.1 $HOSTNAME >> /etc/hosts
if [ -n "$IP_PUBLIC" ]; then
ifconfig eth0 $IP_PUBLIC
fi
if [ -n "$NETMASK" ]; then
ifconfig eth0 netmask $NETMASK
fi
if [ -f /mnt/$ROOT_PUBKEY ]; then
mkdir -p /root/.ssh
cat /mnt/$ROOT_PUBKEY >> /root/.ssh/authorized_keys
chmod -R 600 /root/.ssh/
fi
if [ -n "$USERNAME" ]; then
useradd -s /bin/bash -m $USERNAME
if [ -f /mnt/$USER_PUBKEY ]; then
mkdir -p /home/$USERNAME/.ssh/
cat /mnt/$USER_PUBKEY >> /home/$USERNAME/.ssh/authorized_keys
chown -R $USERNAME:$USERNAME /home/$USERNAME/.ssh
chmod -R 600 /home/$USERNAME/.ssh/authorized_keys
fi
fi
|
dberzano/opennebula-torino
|
share/scripts/debian/context/init.sh
|
Shell
|
apache-2.0
| 1,912 |
#!/bin/bash
# Stop all the services
for SVC in $(ls /etc/init.d/hadoop*); do
echo $SVC
SVCNAME=$(basename $SVC)
echo $SVCNAME
service $SVCNAME stop
done
|
jctanner/odp-prototype
|
odp-deploy/ansible/roles/sandbox/files/sandbox_stop.sh
|
Shell
|
apache-2.0
| 171 |
#!/bin/bash
. sourceme
javac net/thecubic/mockbi/*.java ; jar cvf MockBI.jar net
|
thecubic/hadoop-mockbi
|
mk.sh
|
Shell
|
apache-2.0
| 83 |
#!/bin/bash
[ $# -lt 1 ] && echo "Usage `basename $0` <Example FQN> <debug> <debug_port>" && exit 1
EXAMPLE_FQN=$1
DEBUG=$2
if [[ $# -ge 2 && "debug" == $DEBUG ]]; then
[ $# -lt 3 ] && echo "Debug port not found. Usage `basename $0` <Example FQN> <debug> <debug_port>" && exit 1
fi
DEBUG_PORT=$3
DEPLOYMENT_UNIT_PATH=/tmp/workflows
DEPLOYMENT_UNIT_NAME=DU1/1
echo "Building flux modules..."
cd ../
mvn -q clean install -DskipTests
cd examples/
echo "Copying dependencies, this may take a while"
mvn -q dependency:copy-dependencies -DincludeScope=runtime -DskipTests
echo "Creating deployment unit structure"
mkdir -p $DEPLOYMENT_UNIT_PATH/$DEPLOYMENT_UNIT_NAME/main
mkdir -p $DEPLOYMENT_UNIT_PATH/$DEPLOYMENT_UNIT_NAME/lib
echo "Copying jars to deployment unit"
cp target/examples-* $DEPLOYMENT_UNIT_PATH/$DEPLOYMENT_UNIT_NAME/main
cp target/dependency/* $DEPLOYMENT_UNIT_PATH/$DEPLOYMENT_UNIT_NAME/lib
cp src/main/resources/flux_config.yml $DEPLOYMENT_UNIT_PATH/$DEPLOYMENT_UNIT_NAME/
if [[ $# -ge 2 && "debug" == $DEBUG ]]; then
echo "Starting flux runtime combined mode"
java -Dlog4j.configurationFile=./target/classes/log4j2.xml -cp "target/dependency/*" "com.flipkart.flux.initializer.FluxInitializer" start &
FLUX_PID=$!
else
echo "Starting flux runtime combined mode"
java -Dlog4j.configurationFile=./target/classes/log4j2.xml -cp "target/dependency/*" "com.flipkart.flux.initializer.FluxInitializer" start &
FLUX_PID=$!
fi
# kill the flux processes which are running in background on ctrl+c
trap "kill -9 $FLUX_PID" 2
sleep 15
echo "Running $EXAMPLE_FQN for you "
#The below code prints the lines in green color
echo "\033[33;32m $(java -Dlog4j.configurationFile=./target/classes/log4j2.xml -cp 'target/*:target/dependency/*' $EXAMPLE_FQN)"
#Reset the color
echo "\033[33;0m"
#wait for 3 seconds before displaying the below message so that it would be separated from the flux output
sleep 3
echo ""
echo "(Press Ctrl+C to stop Flux processes and exit)"
#wait until user presses ctrl+c
tail -f /dev/null
|
flipkart-incubator/flux
|
examples/run_example.sh
|
Shell
|
apache-2.0
| 2,051 |
#!/usr/bin/env bash
set -x
rm -f /etc/zypp/repos.d/openSUSE-13.1-1.10.repo
rpm --import http://download.opensuse.org/distribution/13.1/repo/oss/gpg-pubkey-3dbdc284-4be1884d.asc
rpm --import http://download.opensuse.org/distribution/13.1/repo/oss/gpg-pubkey-307e3d54-4be01a65.asc
zypper ar http://download.opensuse.org/distribution/13.1/repo/oss/ opensuse-13.1-oss
zypper ar http://download.opensuse.org/distribution/13.1/repo/non-oss/ opensuse-13.1-non-oss
zypper ar http://download.opensuse.org/update/13.1/ opensuse-13.1-update
zypper ar http://download.opensuse.org/update/13.1-non-oss/ opensuse-13.1-update-non-oss
zypper refresh
zypper update -y
|
b1-systems/packer-templates
|
scripts/opensuse/13.1/update.sh
|
Shell
|
apache-2.0
| 653 |
#! /bin/bash -x
################################################################################
#
# ------ FOR Linux64 & gnu C&fortran & openmpi -----
#
################################################################################
export FORT_FMT_RECL=400
ln -sv ../../../../bin/nhm_driver .
ln -sv ../../../../data/mnginfo/rl00-prc10.info .
ln -sv ../../../../data/grid/vgrid/vgrid40_24000-600m.dat .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000000 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000001 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000002 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000003 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000004 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000005 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000006 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000007 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000008 .
ln -sv ../../../../data/grid/boundary/gl05rl00pe10/boundary_GL05RL00.pe000009 .
# run
mpirun -np 10 ./nhm_driver || exit
################################################################################
|
kento/NICAM
|
test/case/jablonowski/gl05rl00z40pe10/run.sh
|
Shell
|
bsd-2-clause
| 1,332 |
#!/usr/bin/env bash
#
# Copyright (c) 2014-2019, Erik Dannenberg <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# variable type conventions:
#
# environment : SOME_VAR
# constant : _SOME_VAR
# local : some_var
# global : _some_var
# function return : __function_name
readonly _KUBLER_VERSION=0.9.6
readonly _KUBLER_BASH_MIN=4.2
readonly _KUBLER_CONF=kubler.conf
# shellcheck disable=SC1004
_help_header=' __ ___. .__
| | ____ _\_ |__ | | ___________
| |/ / | \ __ \| | _/ __ \_ __ \
| <| | / \_\ \ |_\ ___/| | \/
|__|_ \____/|___ /____/\___ >__|
\/ \/ \/'
function show_help() {
local help_commands header_current_cmd
help_commands="Commands:
build - Build image(s) or namespace(s)
clean - Remove build artifacts and/or delete built images
dep-graph - Visualize image dependencies
new - Create a new namespace, image or builder
push - Push image(s) or namespace(s) to a registry
update - Check for new stage3 releases and kubler namespace updates
${_KUBLER_BIN} <command> --help for more information on specific commands\\n"
header_current_cmd="${_KUBLER_VERSION}"
# shellcheck disable=SC2154
[[ "${_is_valid_cmd}" == 'true' ]] && header_current_cmd=" ${_arg_command}"
echo -e "${_help_header}${header_current_cmd}\\n"
[[ -n "${_help_command_description}" ]] && echo -e "${_help_command_description}\\n"
print_help
# only show command listing if no/invalid command was provided
[[ -z "${_arg_command}" || -z "${_is_valid_cmd}" ]] && echo -e "\\n${help_commands}"
}
# Get the absolute path for given file or directory, resolve symlinks.
# Adapted from: http://stackoverflow.com/a/697552/5731095
#
# Arguments:
# 1: path
function get_absolute_path() {
__get_absolute_path=
local path_in path_out current_dir current_link
path_in="$1"
path_out="$(cd -P -- "$(dirname -- "${path_in}")" && pwd -P)" \
|| die "Couldn't determine the script's running directory, aborting" 2
path_out="${path_out}/$(basename -- "${path_in}")" \
|| die "Couldn't determine the script's base name, aborting" 2
# resolve symlinks
while [[ -h "${path_out}" ]]; do
current_dir=$(dirname -- "${path_out}")
current_link=$(readlink "${path_out}")
path_out="$(cd "${current_dir}" && cd "$(dirname -- "${current_link}")" && pwd)/$(basename -- "${current_link}")"
done
# handle ./ or ../
regex='^[.]{1,2}\/?$'
[[ "${path_in}" =~ ${regex} ]] && path_out="$(dirname "${path_out}")"
# and once more if ../
regex='^[.]{2}\/?$'
[[ "${path_in}" =~ ${regex} ]] && path_out="$(dirname "${path_out}")"
__get_absolute_path="${path_out}"
}
# https://stackoverflow.com/a/44660519/5731095
# Compares two tuple-based, dot-delimited version numbers a and b (possibly
# with arbitrary string suffixes). Returns:
# 1 if a<b
# 2 if equal
# 3 if a>b
# Everything after the first character not in [0-9.] is compared
# lexicographically using ASCII ordering if the tuple-based versions are equal.
#
# Arguments:
# 1: version_one
# 2: version_two
function compare_versions() {
if [[ "$1" == "$2" ]]; then
return 2
fi
local IFS=.
# shellcheck disable=SC2206
local i a=(${1%%[^0-9.]*}) b=(${2%%[^0-9.]*})
local arem=${1#"${1%%[^0-9.]*}"} brem=${2#"${2%%[^0-9.]*}"}
for ((i=0; i<${#a[@]} || i<${#b[@]}; i++)); do
if ((10#${a[i]:-0} < 10#${b[i]:-0})); then
return 1
elif ((10#${a[i]:-0} > 10#${b[i]:-0})); then
return 3
fi
done
if [ "$arem" '<' "$brem" ]; then
return 1
elif [ "$arem" '>' "$brem" ]; then
return 3
fi
return 2
}
# Read config from /etc/kubler.conf or $_KUBLER_DIR/kubler.conf as fallback, then $KUBLER_DATA_DIR config if it exists
#
# Arguments:
# 1: kubler_dir
function source_base_conf() {
local kubler_dir conf_path
kubler_dir="$1"
conf_path=/etc/"${_KUBLER_CONF}"
if [[ ! -f "${conf_path}" ]]; then
conf_path="${kubler_dir}/${_KUBLER_CONF}"
[[ ! -f "${conf_path}" ]] && die "Couldn't find config at /etc/${_KUBLER_CONF} or ${conf_path}"
fi
# shellcheck source=kubler.conf
source "${conf_path}"
conf_path="${KUBLER_DATA_DIR}/${_KUBLER_CONF}"
# shellcheck source=template/docker/namespace/kubler.conf.multi
[[ -n "${KUBLER_DATA_DIR}" && -f "${conf_path}" ]] && source "${conf_path}"
}
# Arguments:
# 1: exit_message as string
# 2: exit_code as int, optional, default: 1
function die() {
local exit_message exit_code
exit_message="$1"
exit_code="${2:-1}"
[[ "$_PRINT_HELP" = 'yes' ]] && show_help >&2
if [[ -n "${exit_message}" ]]; then
if declare -F msg_error &>/dev/null; then
msg_error "fatal: ${exit_message}" >&2
else
echo -e 'fatal:' "${exit_message}" >&2
fi
fi
[[ "${KUBLER_BELL_ON_ERROR}" == 'true' ]] && tput bel
_kubler_internal_abort='true'
exit "${exit_code}"
}
function main() {
compare_versions "${BASH_VERSION}" "${_KUBLER_BASH_MIN}"
[[ $? -eq 1 ]] && die "Kubler needs Bash version ${_KUBLER_BASH_MIN} or greater, installed is ${BASH_VERSION}."
get_absolute_path "$0"
[[ -z "${__get_absolute_path}" ]] && die "Couldn't determine the script's real directory, aborting" 2
_KUBLER_DIR="$(dirname -- "${__get_absolute_path}")"
readonly _KUBLER_DIR
local kubler_bin lib_dir core parser working_dir cmd_script
kubler_bin="$(basename "$0")"
command -v "${kubler_bin}" > /dev/null
# use full path name if not in PATH
[[ $? -eq 1 ]] && kubler_bin="$0"
readonly _KUBLER_BIN="${kubler_bin}"
lib_dir="${_KUBLER_DIR}"/lib
[[ -d "${lib_dir}" ]] || die "Couldn't find ${lib_dir}" 2
readonly _LIB_DIR="${lib_dir}"
source_base_conf "${_KUBLER_DIR}"
core="${_LIB_DIR}"/core.sh
[[ -f "${core}" ]] || die "Couldn't read ${core}" 2
# shellcheck source=lib/core.sh
source "${core}"
# parse main args
get_include_path "cmd/argbash/opt-main.sh"
parser="${__get_include_path}"
# shellcheck source=cmd/argbash/opt-main.sh
file_exists_or_die "${parser}" && source "${parser}"
if [[ "${_arg_debug}" == 'on' ]]; then
readonly BOB_IS_DEBUG='true'
set -x
else
# shellcheck disable=SC2034
readonly BOB_IS_DEBUG='false'
fi
# KUBLER_WORKING_DIR overrides --working-dir, else use current working directory
get_absolute_path "${KUBLER_WORKING_DIR:-${_arg_working_dir}}"
working_dir="${__get_absolute_path}"
[[ -z "${working_dir}" ]] && working_dir="${PWD}"
detect_namespace "${working_dir}"
validate_or_init_data_dir "${KUBLER_DATA_DIR}"
# handle --help for main script
[[ -z "${_arg_command}" && "${_arg_help}" == 'on' ]] && { bc_helper; show_help; exit 0; }
if [[ -n "${_arg_working_dir}" ]]; then
# shellcheck disable=SC2034
readonly _KUBLER_BIN_HINT=" --working-dir=${working_dir}"
fi
# valid command?
get_include_path "cmd/${_arg_command}.sh" || { show_help; die "Unknown command, ${_arg_command}" 5; }
cmd_script="${__get_include_path}"
_is_valid_cmd='true'
# parse command args if a matching parser exists
get_include_path "cmd/argbash/${_arg_command}.sh"
parser="${__get_include_path}"
# shellcheck source=cmd/argbash/build.sh
[[ -f "${parser}" ]] && source "${parser}" "${_arg_leftovers[@]}"
# for this setting env overrides args
[[ "${KUBLER_VERBOSE}" == 'true' ]] && _arg_verbose='on'
# handle --help for command script
[[ "${_arg_help}" == 'on' ]] && { show_help; exit 0; }
if [[ "${KUBLER_CMD_LOG}" == 'true' && "${_arg_verbose}" == 'off' && ! -d "${_KUBLER_LOG_DIR}" ]];then
mkdir -p "${_KUBLER_LOG_DIR}" || die
fi
[[ "${_arg_verbose}" == 'off' ]] && file_exists_and_truncate "${_KUBLER_LOG_DIR}/${_arg_command}.log"
# run the selected command
trap "{ kubler_abort_handler; }" EXIT
# shellcheck source=cmd/build.sh
source "${cmd_script}" "${_arg_leftovers[@]}"
trap ' ' EXIT
}
main "$@"
|
edannenberg/gentoo-bb
|
kubler.sh
|
Shell
|
bsd-2-clause
| 9,443 |
#!/bin/bash
ulimit -c unlimited
rm -f core*
rm -f valgrind.log vgcore.* hs_err_pid*.log
DIR=`pwd`
mkdir -p logs/human_tracker/
cd logs/human_tracker/
rm -fr bak/
mkdir -p bak/intentions
mkdir -p intentions
mv human_tracker* bak/
mv intentions/* bak/intentions/
cd $DIR
time ./pfs $@ | tee human_tracker.output
|
aijunbai/pfs
|
tracker.sh
|
Shell
|
bsd-2-clause
| 315 |
#!/bin/bash
# add everything
echo "add build results to gh-pages"
(cd web/
if [[ "$TRAVIS" == "true" ]] ; then
../travis-setup.sh
fi
rm clean
git add .
STATUS=$(git status --porcelain)
if [[ -n "${STATUS}" ]] ; then
git commit -avm "AUTOMATIC gh-pages BUILD $(date +%F)"
fi
)
|
tidepool-org/deprecated-data-model
|
save-gh-pages.sh
|
Shell
|
bsd-2-clause
| 281 |
sudo LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/netbsddpdk ./tcp_with_select.bin -c 2 -n 1 --proc-type secondary
|
vadimsu/netbsd_dpdk_port
|
service/test_client/run_tcp_listener_with_select.sh
|
Shell
|
bsd-2-clause
| 112 |
# set required variables
WORKLOAD_FILE=${TOP_DIR}/workloads/workload_SIMPLE_MULTICOLUMN_QUERIES_p300_w2_c4_1234.sql
ALGORITHM=opt
# wfit-specific variables
HOT=40
STATES=2000
|
dbgroup-at-ucsc/dbtune
|
resources/workloads/postgres/.unsupported/working_set/opt_4/config.sh
|
Shell
|
bsd-3-clause
| 178 |
#!/usr/bin/env bash
time=`date +%H:%M:%S`
scrot -s "$time.png" -e "curl -sF 'name=@$time.png' http://eleveni386.7axu.com/Image/"
rm $time.png
|
chrisju/script
|
putscreen.sh
|
Shell
|
bsd-3-clause
| 144 |
#!/bin/sh
# This will build a new transphpile.phar inside the current directory
./vendor/bin/phar-composer -v build .
|
jaytaph/Transphpile
|
build-phar.sh
|
Shell
|
bsd-3-clause
| 120 |
echo -n "cleaning up... "
sh /home/user/svc/experiments_scripts/ycsb/stop.sh;
echo "done."
echo -n "loading new db... "
/home/user/svc/ycsb-0.1.4/bin/ycsb load mongodb -p mongodb.url="mongodb://datastore-001.svc.laas.fr:27017" -p mongodb.database="ycsb" -p mongodb.writeConcern="normal" -p mongodb.maxconnections=1000 -threads 40 -s -P /home/user/svc/ycsb-0.1.4/workloads/load
echo "done."
|
guthemberg/tejo
|
tejo/common/experiments_scripts/ycsb/load.sh
|
Shell
|
bsd-3-clause
| 393 |
# (C) 2015, Pedro Ivan Lopez <[email protected]>
#
# This source code is released under the new MIT license.
# Current working directory to clipboard
alias cwd2clip='echo $(pwd) | xclip'
|
lopezpdvn/poshutil
|
alias.sh
|
Shell
|
bsd-3-clause
| 191 |
#/bin/bash
# Creates the service account used by the backup cronjob.
../../kube/secrets/add-service-account.sh \
skia-public \
skia-public \
perf-cockroachdb-backup \
"The perf cockroachdb backup service account." \
roles/storage.objectAdmin
|
google/skia-buildbot
|
perf/secrets/create-perf-cockroachdb-backup-service-account.sh
|
Shell
|
bsd-3-clause
| 252 |
#!/bin/bash
# author: Liang Gong
if [ "$(uname)" == "Darwin" ]; then
# under Mac OS X platform
NODE='node'
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
# under GNU/Linux platform
NODE='nodejs'
fi
cd directory-traversal/wenluhong11
RED='\033[0;31m'
BLUE='\033[0;34m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
# start the server
echo -e "\t[${GREEN}start vulnerable server${NC}]: ${BLUE}wenluhong11${NC}"
$NODE test.js >/dev/null 2>&1 &
vulnpid=$!
# wait for the server to get started
sleep 1.5s
echo -e "\t[${GREEN}server root directory${NC}]: `pwd`"
# utilize directory traversal to get files outside the working directory
# trigger directory traversal issues: send a request to retrieve the confidential file outside the working directory
$NODE attack.js
# kill the vulnerable npm package's process
kill -9 $vulnpid
|
JacksonGL/NPM-Vuln-PoC
|
directory-traversal/wenluhong11/PoC.sh
|
Shell
|
bsd-3-clause
| 864 |
#!/bin/bash
#
# Copyright (c) 2009, Whispersoft s.r.l.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Whispersoft s.r.l. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
for f in $1/test_data/*.aac; do
outfile=/tmp/$(basename $f).out
echo " Test for: " $f
$2/media_file_printer --in=$f > $outfile || exit 1
diff -q $f".out" $outfile || exit 1
rm -f $outfile
done
|
cpopescu/whispercast
|
whisperstreamlib/aac/test/aac_tag_test.sh
|
Shell
|
bsd-3-clause
| 1,763 |
#!/bin/bash -e
# Transforms ETL results in GCS so that "N (null values in current Cloud SQL export) are replaced
# with an empty string. Output files are written to a "transformed" subdirectory.
#
# We will stop using this script once the Cloud SQL export API provides the option to specify a
# different null value.
USAGE="etl/transform_etl_results.sh --project <PROJECT> --account <ACCOUNT> --directory <DIRECTORY>"
while true; do
case "$1" in
--account) ACCOUNT=$2; shift 2;;
--project) PROJECT=$2; shift 2;;
--directory) DIRECTORY=$2; shift 2;;
-- ) shift; break ;;
* ) break ;;
esac
done
if [ -z "${ACCOUNT}" ] || [ -z "${PROJECT}" ] || [ -z "${DIRECTORY}" ]
then
echo "Usage: $USAGE"
exit 1
fi
CREDS_ACCOUNT=${ACCOUNT}
CSV_DIR=/tmp/rdr-export-csv
source tools/auth_setup.sh
function finish {
rm -rf ${CSV_DIR}
cleanup
}
trap finish EXIT
echo "Activating service account..."
gcloud iam service-accounts keys create $CREDS_FILE --iam-account=$SERVICE_ACCOUNT --account=$ACCOUNT
gcloud auth activate-service-account $SERVICE_ACCOUNT --key-file=$CREDS_FILE
echo "Copying CSV files from GCS..."
mkdir -p ${CSV_DIR}
mkdir -p ${CSV_DIR}/transformed
CLOUD_DIR=gs://${PROJECT}-cdm/${DIRECTORY}
gsutil cp ${CLOUD_DIR}/*.csv ${CSV_DIR}
echo "Transforming CSV files..."
for file in ${CSV_DIR}/*.csv
do
filename=$(basename "$file")
# Replace "N with empty string, but only when followed by a comma and then a comma, quote,
# or number, and not ([0-9],)*[0-9]- (which appear in concept_synonym)
cat $file | perl -pe 's/\"N,(?=[,\"0-9])(?!([0-9],)*[0-9]-)/,/g' | sed 's/\"N$//g' > ${CSV_DIR}/transformed/$filename
done
echo "Uploading files back to GCS..."
gsutil cp -r ${CSV_DIR}/transformed ${CLOUD_DIR}
echo "Done."
|
all-of-us/raw-data-repository
|
rdr_service/etl/transform_etl_results.sh
|
Shell
|
bsd-3-clause
| 1,769 |
#!/usr/bin/env bash
echo "Start config"
# get the script path http://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd -P`
popd > /dev/null
ORIGINAL_WD=${PWD}
cd ${SCRIPTPATH}
source ../common.sh
cp config/* /tmp/
echo "Config HiBench"
mv /tmp/hibench.conf ${HIBENCH_HOME}/conf/hibench.conf
mv /tmp/hadoop.conf ${HIBENCH_HOME}/conf/hadoop.conf
mv /tmp/spark.conf ${HIBENCH_HOME}/conf/spark.conf
echo "Finish config"
cd ${ORIGINAL_WD}
|
at15/hadoop-spark-perf
|
provision/single/hibench/config.sh
|
Shell
|
mit
| 537 |
#! bin/bash
cp ../Helper/convertTallFormatToWide.py .
|
srp33/WishBuilder
|
CCLE_mRNA_isoform_kallisto_Tatlow/install.sh
|
Shell
|
mit
| 56 |
#!/bin/bash
set -e
VENV=./venv
python -m virtualenv $VENV --always-copy
. $VENV/bin/activate
pip install -U pip setuptools
pip install -r requirements.txt
echo ""
echo "* Created virtualenv environment in $VENV."
echo "* Installed all dependencies into the virtualenv."
echo "* You can now activate the virtualenv: \`. $VENV/bin/activate\`"
|
ikoz/mitmproxy
|
dev.sh
|
Shell
|
mit
| 343 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${BUILT_PRODUCTS_DIR}/ZzishSDK.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${BUILT_PRODUCTS_DIR}/ZzishSDK.bundle"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
Zzish/zzishsdk-ios
|
Example/Pods/Target Support Files/Pods-ZzishSDK/Pods-ZzishSDK-resources.sh
|
Shell
|
mit
| 4,834 |
#!/bin/sh
########################################################################
########################################################################
##
## Tripwire(R) 2.3 for LINUX(R) Post-RPM installation script
##
## Copyleft information contained in footer
##
########################################################################
########################################################################
##=======================================================
## Setup
##=======================================================
# We can assume all the correct tools are in place because the
# RPM installed, didn't it?
##-------------------------------------------------------
## Set HOST_NAME variable
##-------------------------------------------------------
HOST_NAME='localhost'
if uname -n > /dev/null 2> /dev/null ; then
HOST_NAME=`uname -n`
fi
##-------------------------------------------------------
## Program variables - edited by RPM during initial install
##-------------------------------------------------------
# Site Passphrase variable
TW_SITE_PASS="tripwire"
# Complete path to site key
SITE_KEY="/etc/tripwire/site.key"
# Local Passphrase variable
TW_LOCAL_PASS="tripwire"
# Complete path to local key
LOCAL_KEY="/etc/tripwire/${HOST_NAME}-local.key"
# If clobber==true, overwrite files; if false, do not overwrite files.
CLOBBER="false"
# If prompt==true, ask for confirmation before continuing with install.
PROMPT="true"
# Name of twadmin executeable
TWADMIN="twadmin"
# Path to twadmin executeable
TWADMPATH=/usr/sbin
# Path to configuration directory
CONF_PATH="/etc/tripwire"
# Name of clear text policy file
TXT_POL=$CONF_PATH/twpol.txt
# Name of clear text configuration file
TXT_CFG=$CONF_PATH/twcfg.txt
# Name of encrypted configuration file
CONFIG_FILE=$CONF_PATH/tw.cfg
# Path of the final Tripwire policy file (signed)
SIGNED_POL=`grep POLFILE $TXT_CFG | sed -e 's/^.*=\(.*\)/\1/'`
##=======================================================
## Create Key Files
##=======================================================
##-------------------------------------------------------
## If user has to enter a passphrase, give some
## advice about what is appropriate.
##-------------------------------------------------------
if [ -z "$TW_SITE_PASS" ] || [ -z "$TW_LOCAL_PASS" ]; then
cat << END_OF_TEXT
----------------------------------------------
The Tripwire site and local passphrases are used to
sign a variety of files, such as the configuration,
policy, and database files.
Passphrases should be at least 8 characters in length
and contain both letters and numbers.
See the Tripwire manual for more information.
END_OF_TEXT
fi
##=======================================================
## Generate keys.
##=======================================================
echo
echo "----------------------------------------------"
echo "Creating key files..."
##-------------------------------------------------------
## Site key file.
##-------------------------------------------------------
# If clobber is true, and prompting is off (unattended operation)
# and the key file already exists, remove it. Otherwise twadmin
# will prompt with an "are you sure?" message.
if [ "$CLOBBER" = "true" ] && [ "$PROMPT" = "false" ] && [ -f "$SITE_KEY" ] ; then
rm -f "$SITE_KEY"
fi
if [ -f "$SITE_KEY" ] && [ "$CLOBBER" = "false" ] ; then
echo "The site key file \"$SITE_KEY\""
echo 'exists and will not be overwritten.'
else
cmdargs="--generate-keys --site-keyfile \"$SITE_KEY\""
if [ -n "$TW_SITE_PASS" ] ; then
cmdargs="$cmdargs --site-passphrase \"$TW_SITE_PASS\""
fi
eval "\"$TWADMPATH/$TWADMIN\" $cmdargs"
if [ $? -ne 0 ] ; then
echo "Error: site key generation failed"
exit 1
else chmod 640 "$SITE_KEY"
fi
fi
##-------------------------------------------------------
## Local key file.
##-------------------------------------------------------
# If clobber is true, and prompting is off (unattended operation)
# and the key file already exists, remove it. Otherwise twadmin
# will prompt with an "are you sure?" message.
if [ "$CLOBBER" = "true" ] && [ "$PROMPT" = "false" ] && [ -f "$LOCAL_KEY" ] ; then
rm -f "$LOCAL_KEY"
fi
if [ -f "$LOCAL_KEY" ] && [ "$CLOBBER" = "false" ] ; then
echo "The site key file \"$LOCAL_KEY\""
echo 'exists and will not be overwritten.'
else
cmdargs="--generate-keys --local-keyfile \"$LOCAL_KEY\""
if [ -n "$TW_LOCAL_PASS" ] ; then
cmdargs="$cmdargs --local-passphrase \"$TW_LOCAL_PASS\""
fi
eval "\"$TWADMPATH/$TWADMIN\" $cmdargs"
if [ $? -ne 0 ] ; then
echo "Error: local key generation failed"
exit 1
else chmod 640 "$LOCAL_KEY"
fi
fi
##=======================================================
## Sign the Configuration File
##=======================================================
echo
echo "----------------------------------------------"
echo "Signing configuration file..."
##-------------------------------------------------------
## If noclobber, then backup any existing config file.
##-------------------------------------------------------
if [ "$CLOBBER" = "false" ] && [ -s "$CONFIG_FILE" ] ; then
backup="${CONFIG_FILE}.$$.bak"
echo "Backing up $CONFIG_FILE"
echo " to $backup"
`mv "$CONFIG_FILE" "$backup"`
if [ $? -ne 0 ] ; then
echo "Error: backup of configuration file failed."
exit 1
fi
fi
##-------------------------------------------------------
## Build command line.
##-------------------------------------------------------
cmdargs="--create-cfgfile"
cmdargs="$cmdargs --cfgfile \"$CONFIG_FILE\""
cmdargs="$cmdargs --site-keyfile \"$SITE_KEY\""
if [ -n "$TW_SITE_PASS" ] ; then
cmdargs="$cmdargs --site-passphrase \"$TW_SITE_PASS\""
fi
##-------------------------------------------------------
## Sign the file.
##-------------------------------------------------------
eval "\"$TWADMPATH/$TWADMIN\" $cmdargs \"$TXT_CFG\""
if [ $? -ne 0 ] ; then
echo "Error: signing of configuration file failed."
exit 1
fi
# Set the rights properly
chmod 640 "$CONFIG_FILE"
##-------------------------------------------------------
## We keep the cleartext version around.
##-------------------------------------------------------
cat << END_OF_TEXT
A clear-text version of the Tripwire configuration file
$TXT_CFG
has been preserved for your inspection. It is recommended
that you delete this file manually after you have examined it.
END_OF_TEXT
##=======================================================
## Sign tripwire policy file.
##=======================================================
echo
echo "----------------------------------------------"
echo "Signing policy file..."
##-------------------------------------------------------
## If noclobber, then backup any existing policy file.
##-------------------------------------------------------
if [ "$CLOBBER" = "false" ] && [ -s "$POLICY_FILE" ] ; then
backup="${POLICY_FILE}.$$.bak"
echo "Backing up $POLICY_FILE"
echo " to $backup"
mv "$POLICY_FILE" "$backup"
if [ $? -ne 0 ] ; then
echo "Error: backup of policy file failed."
exit 1
fi
fi
##-------------------------------------------------------
## Build command line.
##-------------------------------------------------------
cmdargs="--create-polfile"
cmdargs="$cmdargs --cfgfile \"$CONFIG_FILE\""
cmdargs="$cmdargs --site-keyfile \"$SITE_KEY\""
if [ -n "$TW_SITE_PASS" ] ; then
cmdargs="$cmdargs --site-passphrase \"$TW_SITE_PASS\""
fi
##-------------------------------------------------------
## Sign the file.
##-------------------------------------------------------
eval "\"$TWADMPATH/$TWADMIN\" $cmdargs \"$TXT_POL\""
if [ $? -ne 0 ] ; then
echo "Error: signing of policy file failed."
exit 1
fi
# Set the proper rights on the newly signed policy file.
chmod 0640 "$SIGNED_POL"
##-------------------------------------------------------
## We keep the cleartext version around.
##-------------------------------------------------------
cat << END_OF_TEXT
A clear-text version of the Tripwire policy file
$TXT_POL
has been preserved for your inspection. This implements
a minimal policy, intended only to test essential
Tripwire functionality. You should edit the policy file
to describe your system, and then use twadmin to generate
a new signed copy of the Tripwire policy.
END_OF_TEXT
# Initialize tripwire database
/usr/sbin/tripwire --init --cfgfile $CONFIG_FILE --site-keyfile $SITE_KEY \
--local-passphrase $TW_LOCAL_PASS 2> /dev/null
########################################################################
########################################################################
#
# TRIPWIRE GPL NOTICES
#
# The developer of the original code and/or files is Tripwire, Inc.
# Portions created by Tripwire, Inc. are copyright 2000 Tripwire, Inc.
# Tripwire is a registered trademark of Tripwire, Inc. All rights reserved.
#
# This program is free software. The contents of this file are subject to
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version. You may redistribute it and/or modify it only in
# compliance with the GNU General Public License.
#
# This program is distributed in the hope that it will be useful. However,
# this program is distributed "AS-IS" WITHOUT ANY WARRANTY; INCLUDING THE
# IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
# Please see the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Nothing in the GNU General Public License or any other license to use the
# code or files shall permit you to use Tripwire's trademarks,
# service marks, or other intellectual property without Tripwire's
# prior written consent.
#
# If you have any questions, please contact Tripwire, Inc. at either
# [email protected] or www.tripwire.org.
#
########################################################################
########################################################################
|
akuster/meta-security
|
recipes-security/tripwire/files/twinstall.sh
|
Shell
|
mit
| 10,271 |
java -Djava.security.manager -Djava.security.policy=permission.any -jar GameShop_Advance_Server.jar
|
GameShopAdvance/GameShop-Advance
|
build/Server/GSA_server.sh
|
Shell
|
mit
| 99 |
#!/bin/sh -xe
sudo rm -rf distribute-0.7.3 setuptools-1.3.2 protobuf-2.5.0 urllib3-1.7.1
unzip distribute-0.7.3.zip
cd distribute-0.7.3
sudo python2.7 setup.py install
cd ..
tar xvf setuptools-1.3.2.tar.gz
cd setuptools-1.3.2
sudo python2.7 setup.py install
cd ..
tar xvf protobuf-2.5.0.tar.gz
cd protobuf-2.5.0
sudo python2.7 setup.py install
cd ..
tar xvf urllib3-1.7.1.tar.gz
cd urllib3-1.7.1
sudo python2.7 setup.py install
cd ..
sudo rm -rf distribute-0.7.3 setuptools-1.3.2 protobuf-2.5.0 urllib3-1.7.1
|
wanghq/goots
|
doc/ots_python_sdk_2.0.2/pymodules/install_modules_for_ots_python_sdk.sh
|
Shell
|
mit
| 514 |
#!/bin/bash
for i in $@; do
wget https://github.com/getnikola/wheelhouse/archive/v$i'.zip'
unzip 'v'$i'.zip'
pip install --use-wheel --no-index --find-links=wheelhouse-$i lxml Pillow ipykernel notebook PyYAML
rm -rf wheelhouse-$i 'v'$i'.zip'
done
|
knowsuchagency/nikola
|
scripts/getwheelhouse.sh
|
Shell
|
mit
| 263 |
#!/bin/bash
echo Done
|
malawski/hyperflow-deployment
|
scripts/worker/stop.sh
|
Shell
|
mit
| 22 |
#!/bin/bash
set -e
set -x
if [[ "$NGHTTP2" = true ]]; then
# GCC 4.6 seems to cause problems, so go straight to 4.8.
sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test
sudo apt-get update
sudo apt-get install g++-4.8 libstdc++-4.8-dev
export CXX="g++-4.8" CC="gcc-4.8"
$CC --version
# Install nghttp2. Right now I haven't built a PPA for this so we have to
# do it from source, which kinda sucks. First, install a ton of
# prerequisite packages.
sudo apt-get install autoconf automake autotools-dev libtool pkg-config \
zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libevent-dev libjansson-dev libjemalloc-dev
pip install cython
# Now, download and install nghttp2's latest version.
git clone https://github.com/tatsuhiro-t/nghttp2.git
cd nghttp2
autoreconf -i
automake
autoconf
./configure --disable-threads
make
sudo make install
# The makefile doesn't install into the active virtualenv. Install again.
cd python
python setup.py install
cd ../..
# Let's try ldconfig.
sudo sh -c 'echo "/usr/local/lib" > /etc/ld.so.conf.d/libnghttp2.conf'
sudo ldconfig
fi
if [[ "$HYPER_FAST_PARSE" = true ]]; then
pip install pycohttpparser~=1.0
fi
pip install .
pip install -r test_requirements.txt
|
jdecuyper/hyper
|
.travis/install.sh
|
Shell
|
mit
| 1,368 |
#!/bin/sh
#
# Copyright (c) 2007 Shawn Pearce
#
test_description='test git fast-import utility'
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh ;# test-lib chdir's into trash
verify_packs () {
for p in .git/objects/pack/*.pack
do
git verify-pack "$@" "$p" || return
done
}
file2_data='file2
second line of EOF'
file3_data='EOF
in 3rd file
END'
file4_data=abcd
file4_len=4
file5_data='an inline file.
we should see it later.'
file6_data='#!/bin/sh
echo "$@"'
###
### series A
###
test_expect_success 'empty stream succeeds' '
git config fastimport.unpackLimit 0 &&
git fast-import </dev/null
'
test_expect_success 'truncated stream complains' '
echo "tag foo" | test_must_fail git fast-import
'
test_expect_success 'A: create pack from stdin' '
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :2
data <<EOF
$file2_data
EOF
blob
mark :3
data <<END
$file3_data
END
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/master
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initial
COMMIT
M 644 :2 file2
M 644 :3 file3
M 755 :4 file4
tag series-A
from :5
data <<EOF
An annotated tag without a tagger
EOF
tag series-A-blob
from :3
data <<EOF
An annotated tag that annotates a blob.
EOF
tag to-be-deleted
from :3
data <<EOF
Another annotated tag that annotates a blob.
EOF
reset refs/tags/to-be-deleted
from 0000000000000000000000000000000000000000
tag nested
mark :6
from :4
data <<EOF
Tag of our lovely commit
EOF
reset refs/tags/nested
from 0000000000000000000000000000000000000000
tag nested
mark :7
from :6
data <<EOF
Tag of tag of our lovely commit
EOF
alias
mark :8
to :5
INPUT_END
git fast-import --export-marks=marks.out <input &&
git whatchanged master
'
test_expect_success 'A: verify pack' '
verify_packs
'
test_expect_success 'A: verify commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
initial
EOF
git cat-file commit master | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tree' '
cat >expect <<-EOF &&
100644 blob file2
100644 blob file3
100755 blob file4
EOF
git cat-file -p master^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file2' '
echo "$file2_data" >expect &&
git cat-file blob master:file2 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file3' '
echo "$file3_data" >expect &&
git cat-file blob master:file3 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify file4' '
printf "$file4_data" >expect &&
git cat-file blob master:file4 >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tag/series-A' '
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master)
type commit
tag series-A
An annotated tag without a tagger
EOF
git cat-file tag tags/series-A >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tag/series-A-blob' '
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master:file3)
type blob
tag series-A-blob
An annotated tag that annotates a blob.
EOF
git cat-file tag tags/series-A-blob >actual &&
test_cmp expect actual
'
test_expect_success 'A: verify tag deletion is successful' '
test_must_fail git rev-parse --verify refs/tags/to-be-deleted
'
test_expect_success 'A: verify marks output' '
cat >expect <<-EOF &&
:2 $(git rev-parse --verify master:file2)
:3 $(git rev-parse --verify master:file3)
:4 $(git rev-parse --verify master:file4)
:5 $(git rev-parse --verify master^0)
:6 $(git cat-file tag nested | grep object | cut -d" " -f 2)
:7 $(git rev-parse --verify nested)
:8 $(git rev-parse --verify master^0)
EOF
test_cmp expect marks.out
'
test_expect_success 'A: verify marks import' '
git fast-import \
--import-marks=marks.out \
--export-marks=marks.new \
</dev/null &&
test_cmp expect marks.new
'
test_expect_success 'A: tag blob by sha1' '
test_tick &&
new_blob=$(echo testing | git hash-object --stdin) &&
cat >input <<-INPUT_END &&
tag series-A-blob-2
from $(git rev-parse refs/heads/master:file3)
data <<EOF
Tag blob by sha1.
EOF
blob
mark :6
data <<EOF
testing
EOF
commit refs/heads/new_blob
committer <> 0 +0000
data 0
M 644 :6 new_blob
#pretend we got sha1 from fast-import
ls "new_blob"
tag series-A-blob-3
from $new_blob
data <<EOF
Tag new_blob.
EOF
INPUT_END
cat >expect <<-EOF &&
object $(git rev-parse refs/heads/master:file3)
type blob
tag series-A-blob-2
Tag blob by sha1.
object $new_blob
type blob
tag series-A-blob-3
Tag new_blob.
EOF
git fast-import <input &&
git cat-file tag tags/series-A-blob-2 >actual &&
git cat-file tag tags/series-A-blob-3 >>actual &&
test_cmp expect actual
'
test_expect_success 'A: verify marks import does not crash' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/verify--import-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
recreate from :5
COMMIT
from :5
M 755 :2 copy-of-file2
INPUT_END
git fast-import --import-marks=marks.out <input &&
git whatchanged verify--import-marks
'
test_expect_success 'A: verify pack' '
verify_packs
'
test_expect_success 'A: verify diff' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 A copy-of-file2
EOF
git diff-tree -M -r master verify--import-marks >actual &&
compare_diff_raw expect actual &&
test $(git rev-parse --verify master:file2) \
= $(git rev-parse --verify verify--import-marks:copy-of-file2)
'
test_expect_success 'A: export marks with large values' '
test_tick &&
mt=$(git hash-object --stdin < /dev/null) &&
>input.blob &&
>marks.exp &&
>tree.exp &&
cat >input.commit <<-EOF &&
commit refs/heads/verify--dump-marks
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
test the sparse array dumping routines with exponentially growing marks
COMMIT
EOF
i=0 l=4 m=6 n=7 &&
while test "$i" -lt 27
do
cat >>input.blob <<-EOF &&
blob
mark :$l
data 0
blob
mark :$m
data 0
blob
mark :$n
data 0
EOF
echo "M 100644 :$l l$i" >>input.commit &&
echo "M 100644 :$m m$i" >>input.commit &&
echo "M 100644 :$n n$i" >>input.commit &&
echo ":$l $mt" >>marks.exp &&
echo ":$m $mt" >>marks.exp &&
echo ":$n $mt" >>marks.exp &&
printf "100644 blob $mt\tl$i\n" >>tree.exp &&
printf "100644 blob $mt\tm$i\n" >>tree.exp &&
printf "100644 blob $mt\tn$i\n" >>tree.exp &&
l=$(($l + $l)) &&
m=$(($m + $m)) &&
n=$(($l + $n)) &&
i=$((1 + $i)) || return 1
done &&
sort tree.exp > tree.exp_s &&
cat input.blob input.commit | git fast-import --export-marks=marks.large &&
git ls-tree refs/heads/verify--dump-marks >tree.out &&
test_cmp tree.exp_s tree.out &&
test_cmp marks.exp marks.large
'
###
### series B
###
test_expect_success 'B: fail on invalid blob sha1' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/master
M 755 0000000000000000000000000000000000000001 zero1
INPUT_END
test_when_finished "rm -f .git/objects/pack_* .git/objects/index_*" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: accept branch name "TEMP_TAG"' '
cat >input <<-INPUT_END &&
commit TEMP_TAG
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
tag base
COMMIT
from refs/heads/master
INPUT_END
test_when_finished "rm -f .git/TEMP_TAG
git gc
git prune" &&
git fast-import <input &&
test -f .git/TEMP_TAG &&
test $(git rev-parse master) = $(git rev-parse TEMP_TAG^)
'
test_expect_success 'B: accept empty committer' '
cat >input <<-INPUT_END &&
commit refs/heads/empty-committer-1
committer <> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/empty-committer-1
git gc
git prune" &&
git fast-import <input &&
out=$(git fsck) &&
echo "$out" &&
test -z "$out"
'
test_expect_success 'B: accept and fixup committer with no name' '
cat >input <<-INPUT_END &&
commit refs/heads/empty-committer-2
committer <[email protected]> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/empty-committer-2
git gc
git prune" &&
git fast-import <input &&
out=$(git fsck) &&
echo "$out" &&
test -z "$out"
'
test_expect_success 'B: fail on invalid committer (1)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name email> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (2)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <e<mail> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (3)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <email>> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (4)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name <email $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
test_expect_success 'B: fail on invalid committer (5)' '
cat >input <<-INPUT_END &&
commit refs/heads/invalid-committer
committer Name<email> $GIT_COMMITTER_DATE
data <<COMMIT
empty commit
COMMIT
INPUT_END
test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
test_must_fail git fast-import <input
'
###
### series C
###
test_expect_success 'C: incremental import create pack from stdin' '
newf=$(echo hi newf | git hash-object -w --stdin) &&
oldf=$(git rev-parse --verify master:file2) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second
COMMIT
from refs/heads/master
M 644 $oldf file2/oldf
M 755 $newf file2/newf
D file3
INPUT_END
git fast-import <input &&
git whatchanged branch
'
test_expect_success 'C: verify pack' '
verify_packs
'
test_expect_success 'C: validate reuse existing blob' '
test $newf = $(git rev-parse --verify branch:file2/newf) &&
test $oldf = $(git rev-parse --verify branch:file2/oldf)
'
test_expect_success 'C: verify commit' '
cat >expect <<-EOF &&
parent $(git rev-parse --verify master^0)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second
EOF
git cat-file commit branch | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'C: validate rename result' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 f1fb5da718392694d0076d677d6d0e364c79b0bc A file2/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100 file2 file2/oldf
:100644 000000 0d92e9f3374ae2947c23aa477cbc68ce598135f1 0000000000000000000000000000000000000000 D file3
EOF
git diff-tree -M -r master branch >actual &&
compare_diff_raw expect actual
'
###
### series D
###
test_expect_success 'D: inline data in commit' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline newdir/interesting
data <<EOF
$file5_data
EOF
M 755 inline newdir/exec.sh
data <<EOF
$file6_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged branch
'
test_expect_success 'D: verify pack' '
verify_packs
'
test_expect_success 'D: validate new files added' '
cat >expect <<-EOF &&
:000000 100755 0000000000000000000000000000000000000000 e74b7d465e52746be2b4bae983670711e6e66657 A newdir/exec.sh
:000000 100644 0000000000000000000000000000000000000000 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 A newdir/interesting
EOF
git diff-tree -M -r branch^ branch >actual &&
compare_diff_raw expect actual
'
test_expect_success 'D: verify file5' '
echo "$file5_data" >expect &&
git cat-file blob branch:newdir/interesting >actual &&
test_cmp expect actual
'
test_expect_success 'D: verify file6' '
echo "$file6_data" >expect &&
git cat-file blob branch:newdir/exec.sh >actual &&
test_cmp expect actual
'
###
### series E
###
test_expect_success 'E: rfc2822 date, --date-format=raw' '
cat >input <<-INPUT_END &&
commit refs/heads/branch
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> Tue Feb 6 11:22:18 2007 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> Tue Feb 6 12:35:02 2007 -0500
data <<COMMIT
RFC 2822 type date
COMMIT
from refs/heads/branch^0
INPUT_END
test_must_fail git fast-import --date-format=raw <input
'
test_expect_success 'E: rfc2822 date, --date-format=rfc2822' '
git fast-import --date-format=rfc2822 <input
'
test_expect_success 'E: verify pack' '
verify_packs
'
test_expect_success 'E: verify commit' '
cat >expect <<-EOF &&
author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 1170778938 -0500
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1170783302 -0500
RFC 2822 type date
EOF
git cat-file commit branch | sed 1,2d >actual &&
test_cmp expect actual
'
###
### series F
###
test_expect_success 'F: non-fast-forward update skips' '
old_branch=$(git rev-parse --verify branch^0) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
reset refs/heads/other
from refs/heads/branch
INPUT_END
test_must_fail git fast-import <input &&
# branch must remain unaffected
test $old_branch = $(git rev-parse --verify branch^0)
'
test_expect_success 'F: verify pack' '
verify_packs
'
test_expect_success 'F: verify other commit' '
cat >expect <<-EOF &&
tree $(git rev-parse branch~1^{tree})
parent $(git rev-parse branch~1)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
losing things already?
EOF
git cat-file commit other >actual &&
test_cmp expect actual
'
###
### series G
###
test_expect_success 'G: non-fast-forward update forced' '
old_branch=$(git rev-parse --verify branch^0) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
losing things already?
COMMIT
from refs/heads/branch~1
INPUT_END
git fast-import --force <input
'
test_expect_success 'G: verify pack' '
verify_packs
'
test_expect_success 'G: branch changed, but logged' '
test $old_branch != $(git rev-parse --verify branch^0) &&
test $old_branch = $(git rev-parse --verify branch@{1})
'
###
### series H
###
test_expect_success 'H: deletall, add 1' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/H
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third
COMMIT
from refs/heads/branch^0
M 644 inline i-will-die
data <<EOF
this file will never exist.
EOF
deleteall
M 644 inline h/e/l/lo
data <<EOF
$file5_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged H
'
test_expect_success 'H: verify pack' '
verify_packs
'
test_expect_success 'H: validate old files removed, new files added' '
cat >expect <<-EOF &&
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file2/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file2/oldf
:100755 000000 85df50785d62d3b05ab03d9cbf7e4a0b49449730 0000000000000000000000000000000000000000 D file4
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100 newdir/interesting h/e/l/lo
:100755 000000 e74b7d465e52746be2b4bae983670711e6e66657 0000000000000000000000000000000000000000 D newdir/exec.sh
EOF
git diff-tree -M -r H^ H >actual &&
compare_diff_raw expect actual
'
test_expect_success 'H: verify file' '
echo "$file5_data" >expect &&
git cat-file blob H:h/e/l/lo >actual &&
test_cmp expect actual
'
###
### series I
###
test_expect_success 'I: export-pack-edges' '
cat >input <<-INPUT_END &&
commit refs/heads/export-boundary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
we have a border. its only 40 characters wide.
COMMIT
from refs/heads/branch
INPUT_END
git fast-import --export-pack-edges=edges.list <input
'
test_expect_success 'I: verify edge list' '
cat >expect <<-EOF &&
.git/objects/pack/pack-.pack: $(git rev-parse --verify export-boundary)
EOF
sed -e s/pack-.*pack/pack-.pack/ edges.list >actual &&
test_cmp expect actual
'
###
### series J
###
test_expect_success 'J: reset existing branch creates empty commit' '
cat >input <<-INPUT_END &&
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create J
COMMIT
from refs/heads/branch
reset refs/heads/J
commit refs/heads/J
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initialize J
COMMIT
INPUT_END
git fast-import <input
'
test_expect_success 'J: branch has 1 commit, empty tree' '
test 1 = $(git rev-list J | wc -l) &&
test 0 = $(git ls-tree J | wc -l)
'
test_expect_success 'J: tag must fail on empty branch' '
cat >input <<-INPUT_END &&
reset refs/heads/J2
tag wrong_tag
from refs/heads/J2
data <<EOF
Tag branch that was reset.
EOF
INPUT_END
test_must_fail git fast-import <input
'
###
### series K
###
test_expect_success 'K: reinit branch with from' '
cat >input <<-INPUT_END &&
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create K
COMMIT
from refs/heads/branch
commit refs/heads/K
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
redo K
COMMIT
from refs/heads/branch^1
INPUT_END
git fast-import <input
'
test_expect_success 'K: verify K^1 = branch^1' '
test $(git rev-parse --verify branch^1) \
= $(git rev-parse --verify K^1)
'
###
### series L
###
test_expect_success 'L: verify internal tree sorting' '
cat >input <<-INPUT_END &&
blob
mark :1
data <<EOF
some data
EOF
blob
mark :2
data <<EOF
other data
EOF
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
create L
COMMIT
M 644 :1 b.
M 644 :1 b/other
M 644 :1 ba
commit refs/heads/L
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
update L
COMMIT
M 644 :2 b.
M 644 :2 b/other
M 644 :2 ba
INPUT_END
cat >expect <<-EXPECT_END &&
:100644 100644 4268632... 55d3a52... M b.
:040000 040000 0ae5cac... 443c768... M b
:100644 100644 4268632... 55d3a52... M ba
EXPECT_END
git fast-import <input &&
GIT_PRINT_SHA1_ELLIPSIS="yes" git diff-tree --abbrev --raw L^ L >output &&
test_cmp expect output
'
test_expect_success 'L: nested tree copy does not corrupt deltas' '
cat >input <<-INPUT_END &&
blob
mark :1
data <<EOF
the data
EOF
commit refs/heads/L2
committer C O Mitter <[email protected]> 1112912473 -0700
data <<COMMIT
init L2
COMMIT
M 644 :1 a/b/c
M 644 :1 a/b/d
M 644 :1 a/e/f
commit refs/heads/L2
committer C O Mitter <[email protected]> 1112912473 -0700
data <<COMMIT
update L2
COMMIT
C a g
C a/e g/b
M 644 :1 g/b/h
INPUT_END
cat >expect <<-\EOF &&
g/b/f
g/b/h
EOF
test_when_finished "git update-ref -d refs/heads/L2" &&
git fast-import <input &&
git ls-tree L2 g/b/ >tmp &&
cat tmp | cut -f 2 >actual &&
test_cmp expect actual &&
git fsck $(git rev-parse L2)
'
###
### series M
###
test_expect_success 'M: rename file in same subdirectory' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/M1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf file2/n.e.w.f
EOF
git fast-import <input &&
git diff-tree -M -r M1^ M1 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename file to new subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/branch^0
R file2/newf i/am/new/to/you
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 file2/newf i/am/new/to/you
EOF
git fast-import <input &&
git diff-tree -M -r M2^ M2 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename subdirectory to new subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file rename
COMMIT
from refs/heads/M2^0
R i other/sub
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 i/am/new/to/you other/sub/am/new/to/you
EOF
git fast-import <input &&
git diff-tree -M -r M3^ M3 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'M: rename root to subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/M4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
rename root
COMMIT
from refs/heads/M2^0
R "" sub
INPUT_END
cat >expect <<-EOF &&
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100 file2/oldf sub/file2/oldf
:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 R100 file4 sub/file4
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100 i/am/new/to/you sub/i/am/new/to/you
:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 R100 newdir/exec.sh sub/newdir/exec.sh
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100 newdir/interesting sub/newdir/interesting
EOF
git fast-import <input &&
git diff-tree -M -r M4^ M4 >actual &&
cat actual &&
compare_diff_raw expect actual
'
###
### series N
###
test_expect_success 'N: copy file in same subdirectory' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/N1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
file copy
COMMIT
from refs/heads/branch^0
C file2/newf file2/n.e.w.f
INPUT_END
cat >expect <<-EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file2/n.e.w.f
EOF
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N1^ N1 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy then modify subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
clean directory copy
COMMIT
from refs/heads/branch^0
C file2 file3
commit refs/heads/N2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
cat >expect <<-EOF &&
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N2^^ N2 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy dirty subdirectory' '
cat >input <<-INPUT_END &&
commit refs/heads/N3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
git fast-import <input &&
test $(git rev-parse N2^{tree}) = $(git rev-parse N3^{tree})
'
test_expect_success 'N: copy directory by id' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual
'
test_expect_success PIPE 'N: read and copy directory' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
git update-ref -d refs/heads/N4 &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/N4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash, part 2
COMMIT
from refs/heads/branch^0
ls "file2"
EOF
read mode type tree filename &&
echo "M 040000 $tree file3"
) |
git fast-import --cat-blob-fd=3 3>backflow &&
git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
compare_diff_raw expect actual
'
test_expect_success PIPE 'N: empty directory reads as missing' '
cat <<-\EOF >expect &&
OBJNAME
:000000 100644 OBJNAME OBJNAME A unrelated
EOF
echo "missing src" >expect.response &&
git update-ref -d refs/heads/read-empty &&
rm -f backflow &&
mkfifo backflow &&
(
exec <backflow &&
cat <<-EOF &&
commit refs/heads/read-empty
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
read "empty" (missing) directory
COMMIT
M 100644 inline src/greeting
data <<BLOB
hello
BLOB
C src/greeting dst1/non-greeting
C src/greeting unrelated
# leave behind "empty" src directory
D src/greeting
ls "src"
EOF
read -r line &&
printf "%s\n" "$line" >response &&
cat <<-\EOF
D dst1
D dst2
EOF
) |
git fast-import --cat-blob-fd=3 3>backflow &&
test_cmp expect.response response &&
git rev-list read-empty |
git diff-tree -r --root --stdin |
sed "s/$OID_REGEX/OBJNAME/g" >actual &&
test_cmp expect actual
'
test_expect_success 'N: copy root directory by tree hash' '
cat >expect <<-\EOF &&
:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D file3/newf
:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D file3/oldf
EOF
root=$(git rev-parse refs/heads/branch^0^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy root directory by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $root ""
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: copy root by path' '
cat >expect <<-\EOF &&
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf oldroot/file2/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf oldroot/file2/oldf
:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 C100 file4 oldroot/file4
:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 C100 newdir/exec.sh oldroot/newdir/exec.sh
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting oldroot/newdir/interesting
EOF
cat >input <<-INPUT_END &&
commit refs/heads/N-copy-root-path
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy root directory by (empty) path
COMMIT
from refs/heads/branch^0
C "" oldroot
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r branch N-copy-root-path >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: delete directory by copying' '
cat >expect <<-\EOF &&
OBJID
:100644 000000 OBJID OBJID D foo/bar/qux
OBJID
:000000 100644 OBJID OBJID A foo/bar/baz
:000000 100644 OBJID OBJID A foo/bar/qux
EOF
empty_tree=$(git mktree </dev/null) &&
cat >input <<-INPUT_END &&
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
collect data to be deleted
COMMIT
deleteall
M 100644 inline foo/bar/baz
data <<DATA_END
hello
DATA_END
C "foo/bar/baz" "foo/bar/qux"
C "foo/bar/baz" "foo/bar/quux/1"
C "foo/bar/baz" "foo/bar/quuux"
M 040000 $empty_tree foo/bar/quux
M 040000 $empty_tree foo/bar/quuux
commit refs/heads/N-delete
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete subdirectory
COMMIT
M 040000 $empty_tree foo/bar/qux
INPUT_END
git fast-import <input &&
git rev-list N-delete |
git diff-tree -r --stdin --root --always |
sed -e "s/$OID_REGEX/OBJID/g" >actual &&
test_cmp expect actual
'
test_expect_success 'N: modify copied tree' '
cat >expect <<-\EOF &&
:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100 newdir/interesting file3/file5
:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100 file2/newf file3/newf
:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100 file2/oldf file3/oldf
EOF
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
cat >input <<-INPUT_END &&
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy by tree hash
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3
commit refs/heads/N5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
modify directory copy
COMMIT
M 644 inline file3/file5
data <<EOF
$file5_data
EOF
INPUT_END
git fast-import <input &&
git diff-tree -C --find-copies-harder -r N5^^ N5 >actual &&
compare_diff_raw expect actual
'
test_expect_success 'N: reject foo/ syntax' '
subdir=$(git rev-parse refs/heads/branch^0:file2) &&
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5B
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
M 040000 $subdir file3/
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in copy source' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5C
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
C file2/ file3
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in rename source' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5D
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
rename with invalid syntax
COMMIT
from refs/heads/branch^0
R file2/ file3
INPUT_END
'
test_expect_success 'N: reject foo/ syntax in ls argument' '
test_must_fail git fast-import <<-INPUT_END
commit refs/heads/N5E
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy with invalid syntax
COMMIT
from refs/heads/branch^0
ls "file2/"
INPUT_END
'
test_expect_success 'N: copy to root by id and modify' '
echo "hello, world" >expect.foo &&
echo hello >expect.bar &&
git fast-import <<-SETUP_END &&
commit refs/heads/N7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N7:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N8
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 644 inline foo/foo
data <<EOF
hello, world
EOF
INPUT_END
git show N8:foo/foo >actual.foo &&
git show N8:foo/bar >actual.bar &&
test_cmp expect.foo actual.foo &&
test_cmp expect.bar actual.bar
'
test_expect_success 'N: extract subtree' '
branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
cat >input <<-INPUT_END &&
commit refs/heads/N9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
extract subtree branch:newdir
COMMIT
M 040000 $branch ""
C "newdir" ""
INPUT_END
git fast-import <input &&
git diff --exit-code branch:newdir N9
'
test_expect_success 'N: modify subtree, extract it, and modify again' '
echo hello >expect.baz &&
echo hello, world >expect.qux &&
git fast-import <<-SETUP_END &&
commit refs/heads/N10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
hello, tree
COMMIT
deleteall
M 644 inline foo/bar/baz
data <<EOF
hello
EOF
SETUP_END
tree=$(git rev-parse --verify N10:) &&
git fast-import <<-INPUT_END &&
commit refs/heads/N11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy to root by id and modify
COMMIT
M 040000 $tree ""
M 100644 inline foo/bar/qux
data <<EOF
hello, world
EOF
R "foo" ""
C "bar/qux" "bar/quux"
INPUT_END
git show N11:bar/baz >actual.baz &&
git show N11:bar/qux >actual.qux &&
git show N11:bar/quux >actual.quux &&
test_cmp expect.baz actual.baz &&
test_cmp expect.qux actual.qux &&
test_cmp expect.qux actual.quux'
###
### series O
###
test_expect_success 'O: comments are all skipped' '
cat >input <<-INPUT_END &&
#we will
commit refs/heads/O1
# -- ignore all of this text
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
# $GIT_COMMITTER_NAME has inserted here for his benefit.
data <<COMMIT
dirty directory copy
COMMIT
# do not forget the import blank line!
#
# yes, we started from our usual base of branch^0.
# i like branch^0.
from refs/heads/branch^0
# and we need to reuse file2/file5 from N3 above.
M 644 inline file2/file5
# otherwise the tree will be different
data <<EOF
$file5_data
EOF
# do not forget to copy file2 to file3
C file2 file3
#
# or to delete file5 from file2.
D file2/file5
# are we done yet?
INPUT_END
git fast-import <input &&
test $(git rev-parse N3) = $(git rev-parse O1)
'
test_expect_success 'O: blank lines not necessary after data commands' '
cat >input <<-INPUT_END &&
commit refs/heads/O2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
dirty directory copy
COMMIT
from refs/heads/branch^0
M 644 inline file2/file5
data <<EOF
$file5_data
EOF
C file2 file3
D file2/file5
INPUT_END
git fast-import <input &&
test $(git rev-parse N3) = $(git rev-parse O2)
'
test_expect_success 'O: repack before next test' '
git repack -a -d
'
test_expect_success 'O: blank lines not necessary after other commands' '
cat >input <<-INPUT_END &&
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
checkpoint
commit refs/heads/O3
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
checkpoint
commit refs/heads/O3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
reset refs/tags/O3-2nd
from :5
reset refs/tags/O3-3rd
from :5
INPUT_END
cat >expect <<-INPUT_END &&
string
of
empty
commits
INPUT_END
git fast-import <input &&
test 8 = $(find .git/objects/pack -type f | grep -v multi-pack-index | wc -l) &&
test $(git rev-parse refs/tags/O3-2nd) = $(git rev-parse O3^) &&
git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual &&
test_cmp expect actual
'
test_expect_success 'O: progress outputs as requested by input' '
cat >input <<-INPUT_END &&
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zstring
COMMIT
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zof
COMMIT
progress Two commits down, 2 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zempty
COMMIT
progress Three commits down, 1 to go!
commit refs/heads/O4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
zcommits
COMMIT
progress done!
INPUT_END
git fast-import <input >actual &&
grep "progress " <input >expect &&
test_cmp expect actual
'
###
### series P (gitlinks)
###
test_expect_success 'P: superproject & submodule mix' '
cat >input <<-INPUT_END &&
blob
mark :1
data 10
test file
reset refs/heads/sub
commit refs/heads/sub
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 12
sub_initial
M 100644 :1 file
blob
mark :3
data <<DATAEND
[submodule "sub"]
path = sub
url = "$(pwd)/sub"
DATAEND
commit refs/heads/subuse1
mark :4
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :3 .gitmodules
M 160000 :2 sub
blob
mark :5
data 20
test file
more data
commit refs/heads/sub
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 11
sub_second
from :2
M 100644 :5 file
commit refs/heads/subuse1
mark :7
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :4
M 160000 :6 sub
INPUT_END
git fast-import <input &&
git checkout subuse1 &&
rm -rf sub &&
mkdir sub &&
(
cd sub &&
git init &&
git fetch --update-head-ok .. refs/heads/sub:refs/heads/master &&
git checkout master
) &&
git submodule init &&
git submodule update
'
test_expect_success 'P: verbatim SHA gitlinks' '
SUBLAST=$(git rev-parse --verify sub) &&
SUBPREV=$(git rev-parse --verify sub^) &&
cat >input <<-INPUT_END &&
blob
mark :1
data <<DATAEND
[submodule "sub"]
path = sub
url = "$(pwd)/sub"
DATAEND
commit refs/heads/subuse2
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 8
initial
from refs/heads/master
M 100644 :1 .gitmodules
M 160000 $SUBPREV sub
commit refs/heads/subuse2
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 7
second
from :2
M 160000 $SUBLAST sub
INPUT_END
git branch -D sub &&
git gc &&
git prune &&
git fast-import <input &&
test $(git rev-parse --verify subuse2) = $(git rev-parse --verify subuse1)
'
test_expect_success 'P: fail on inline gitlink' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/subuse3
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 inline sub
data <<DATA
$SUBPREV
DATA
INPUT_END
test_must_fail git fast-import <input
'
test_expect_success 'P: fail on blob mark in gitlink' '
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :1
data <<DATA
$SUBPREV
DATA
commit refs/heads/subuse3
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
corrupt
COMMIT
from refs/heads/subuse2
M 160000 :1 sub
INPUT_END
test_must_fail git fast-import <input
'
###
### series Q (notes)
###
test_expect_success 'Q: commit notes' '
note1_data="The first note for the first commit" &&
note2_data="The first note for the second commit" &&
note3_data="The first note for the third commit" &&
note1b_data="The second note for the first commit" &&
note1c_data="The third note for the first commit" &&
note2b_data="The second note for the second commit" &&
test_tick &&
cat >input <<-INPUT_END &&
blob
mark :2
data <<EOF
$file2_data
EOF
commit refs/heads/notes-test
mark :3
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
first (:3)
COMMIT
M 644 :2 file2
blob
mark :4
data $file4_len
$file4_data
commit refs/heads/notes-test
mark :5
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
second (:5)
COMMIT
M 644 :4 file4
commit refs/heads/notes-test
mark :6
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
third (:6)
COMMIT
M 644 inline file5
data <<EOF
$file5_data
EOF
M 755 inline file6
data <<EOF
$file6_data
EOF
blob
mark :7
data <<EOF
$note1_data
EOF
blob
mark :8
data <<EOF
$note2_data
EOF
commit refs/notes/foobar
mark :9
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:9)
COMMIT
N :7 :3
N :8 :5
N inline :6
data <<EOF
$note3_data
EOF
commit refs/notes/foobar
mark :10
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:10)
COMMIT
N inline :3
data <<EOF
$note1b_data
EOF
commit refs/notes/foobar2
mark :11
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:11)
COMMIT
N inline :3
data <<EOF
$note1c_data
EOF
commit refs/notes/foobar
mark :12
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
notes (:12)
COMMIT
deleteall
N inline :5
data <<EOF
$note2b_data
EOF
INPUT_END
git fast-import <input &&
git whatchanged notes-test
'
test_expect_success 'Q: verify pack' '
verify_packs
'
test_expect_success 'Q: verify first commit' '
commit1=$(git rev-parse notes-test~2) &&
commit2=$(git rev-parse notes-test^) &&
commit3=$(git rev-parse notes-test) &&
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
first (:3)
EOF
git cat-file commit notes-test~2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second commit' '
cat >expect <<-EOF &&
parent $commit1
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
second (:5)
EOF
git cat-file commit notes-test^ | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third commit' '
cat >expect <<-EOF &&
parent $commit2
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
third (:6)
EOF
git cat-file commit notes-test | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first notes commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:9)
EOF
git cat-file commit refs/notes/foobar~2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar~2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for first commit' '
echo "$note1_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for second commit' '
echo "$note2_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for third commit' '
echo "$note3_data" >expect &&
git cat-file blob refs/notes/foobar~2:$commit3 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second notes commit' '
cat >expect <<-EOF &&
parent $(git rev-parse --verify refs/notes/foobar~2)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:10)
EOF
git cat-file commit refs/notes/foobar^ | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
100644 blob $commit2
100644 blob $commit3
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar^^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second note for first commit' '
echo "$note1b_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for second commit' '
echo "$note2_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify first note for third commit' '
echo "$note3_data" >expect &&
git cat-file blob refs/notes/foobar^:$commit3 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third notes commit' '
cat >expect <<-EOF &&
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:11)
EOF
git cat-file commit refs/notes/foobar2 | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit1
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar2^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify third note for first commit' '
echo "$note1c_data" >expect &&
git cat-file blob refs/notes/foobar2:$commit1 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify fourth notes commit' '
cat >expect <<-EOF &&
parent $(git rev-parse --verify refs/notes/foobar^)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
notes (:12)
EOF
git cat-file commit refs/notes/foobar | sed 1d >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify fourth notes tree' '
cat >expect.unsorted <<-EOF &&
100644 blob $commit2
EOF
cat expect.unsorted | sort >expect &&
git cat-file -p refs/notes/foobar^{tree} | sed "s/ [0-9a-f]* / /" >actual &&
test_cmp expect actual
'
test_expect_success 'Q: verify second note for second commit' '
echo "$note2b_data" >expect &&
git cat-file blob refs/notes/foobar:$commit2 >actual &&
test_cmp expect actual
'
test_expect_success 'Q: deny note on empty branch' '
cat >input <<-EOF &&
reset refs/heads/Q0
commit refs/heads/note-Q0
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Note for an empty branch.
COMMIT
N inline refs/heads/Q0
data <<NOTE
some note
NOTE
EOF
test_must_fail git fast-import <input
'
###
### series R (feature and option)
###
test_expect_success 'R: abort on unsupported feature' '
cat >input <<-EOF &&
feature no-such-feature-exists
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: supported feature is accepted' '
cat >input <<-EOF &&
feature date-format=now
EOF
git fast-import <input
'
test_expect_success 'R: abort on receiving feature after data command' '
cat >input <<-EOF &&
blob
data 3
hi
feature date-format=now
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: only one import-marks feature allowed per stream' '
cat >input <<-EOF &&
feature import-marks=git.marks
feature import-marks=git2.marks
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: export-marks feature results in a marks file being created' '
cat >input <<-EOF &&
feature export-marks=git.marks
blob
mark :1
data 3
hi
EOF
cat input | git fast-import &&
grep :1 git.marks
'
test_expect_success 'R: export-marks options can be overridden by commandline options' '
cat input | git fast-import --export-marks=other.marks &&
grep :1 other.marks
'
test_expect_success 'R: catch typo in marks file name' '
test_must_fail git fast-import --import-marks=nonexistent.marks </dev/null &&
echo "feature import-marks=nonexistent.marks" |
test_must_fail git fast-import
'
test_expect_success 'R: import and output marks can be the same file' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
cat >expect <<-EOF &&
:1 $blob
:2 $blob
EOF
git fast-import --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :2
data 3
hi
EOF
test_cmp expect io.marks
'
test_expect_success 'R: --import-marks=foo --output-marks=foo to create foo fails' '
rm -f io.marks &&
test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF
blob
mark :1
data 3
hi
EOF
'
test_expect_success 'R: --import-marks-if-exists' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
echo ":1 $blob" >expect &&
git fast-import --import-marks-if-exists=io.marks --export-marks=io.marks <<-\EOF &&
blob
mark :1
data 3
hi
EOF
test_cmp expect io.marks
'
test_expect_success 'R: feature import-marks-if-exists' '
rm -f io.marks &&
git fast-import --export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=not_io.marks
EOF
test_must_be_empty io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
echo ":1 $blob" >io.marks &&
echo ":1 $blob" >expect &&
echo ":2 $blob" >>expect &&
git fast-import --export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=io.marks
blob
mark :2
data 3
hi
EOF
test_cmp expect io.marks &&
echo ":3 $blob" >>expect &&
git fast-import --import-marks=io.marks \
--export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=not_io.marks
blob
mark :3
data 3
hi
EOF
test_cmp expect io.marks &&
git fast-import --import-marks-if-exists=not_io.marks \
--export-marks=io.marks <<-\EOF &&
feature import-marks-if-exists=io.marks
EOF
test_must_be_empty io.marks
'
test_expect_success 'R: import to output marks works without any content' '
cat >input <<-EOF &&
feature import-marks=marks.out
feature export-marks=marks.new
EOF
cat input | git fast-import &&
test_cmp marks.out marks.new
'
test_expect_success 'R: import marks prefers commandline marks file over the stream' '
cat >input <<-EOF &&
feature import-marks=nonexistent.marks
feature export-marks=marks.new
EOF
cat input | git fast-import --import-marks=marks.out &&
test_cmp marks.out marks.new
'
test_expect_success 'R: multiple --import-marks= should be honoured' '
cat >input <<-EOF &&
feature import-marks=nonexistent.marks
feature export-marks=combined.marks
EOF
head -n2 marks.out > one.marks &&
tail -n +3 marks.out > two.marks &&
git fast-import --import-marks=one.marks --import-marks=two.marks <input &&
test_cmp marks.out combined.marks
'
test_expect_success 'R: feature relative-marks should be honoured' '
cat >input <<-EOF &&
feature relative-marks
feature import-marks=relative.in
feature export-marks=relative.out
EOF
mkdir -p .git/info/fast-import/ &&
cp marks.new .git/info/fast-import/relative.in &&
git fast-import <input &&
test_cmp marks.new .git/info/fast-import/relative.out
'
test_expect_success 'R: feature no-relative-marks should be honoured' '
cat >input <<-EOF &&
feature relative-marks
feature import-marks=relative.in
feature no-relative-marks
feature export-marks=non-relative.out
EOF
git fast-import <input &&
test_cmp marks.new non-relative.out
'
test_expect_success 'R: feature ls supported' '
echo "feature ls" |
git fast-import
'
test_expect_success 'R: feature cat-blob supported' '
echo "feature cat-blob" |
git fast-import
'
test_expect_success 'R: cat-blob-fd must be a nonnegative integer' '
test_must_fail git fast-import --cat-blob-fd=-1 </dev/null
'
test_expect_success !MINGW 'R: print old blob' '
blob=$(echo "yes it can" | git hash-object -w --stdin) &&
cat >expect <<-EOF &&
${blob} blob 11
yes it can
EOF
echo "cat-blob $blob" |
git fast-import --cat-blob-fd=6 6>actual &&
test_cmp expect actual
'
test_expect_success !MINGW 'R: in-stream cat-blob-fd not respected' '
echo hello >greeting &&
blob=$(git hash-object -w greeting) &&
cat >expect <<-EOF &&
${blob} blob 6
hello
EOF
git fast-import --cat-blob-fd=3 3>actual.3 >actual.1 <<-EOF &&
cat-blob $blob
EOF
test_cmp expect actual.3 &&
test_must_be_empty actual.1 &&
git fast-import 3>actual.3 >actual.1 <<-EOF &&
option cat-blob-fd=3
cat-blob $blob
EOF
test_must_be_empty actual.3 &&
test_cmp expect actual.1
'
test_expect_success !MINGW 'R: print mark for new blob' '
echo "effluentish" | git hash-object --stdin >expect &&
git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
blob
mark :1
data <<BLOB_END
effluentish
BLOB_END
get-mark :1
EOF
test_cmp expect actual
'
test_expect_success !MINGW 'R: print new blob' '
blob=$(echo "yep yep yep" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 12
yep yep yep
EOF
git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
blob
mark :1
data <<BLOB_END
yep yep yep
BLOB_END
cat-blob :1
EOF
test_cmp expect actual
'
test_expect_success !MINGW 'R: print new blob by sha1' '
blob=$(echo "a new blob named by sha1" | git hash-object --stdin) &&
cat >expect <<-EOF &&
${blob} blob 25
a new blob named by sha1
EOF
git fast-import --cat-blob-fd=6 6>actual <<-EOF &&
blob
data <<BLOB_END
a new blob named by sha1
BLOB_END
cat-blob $blob
EOF
test_cmp expect actual
'
test_expect_success 'setup: big file' '
(
echo "the quick brown fox jumps over the lazy dog" >big &&
for i in 1 2 3
do
cat big big big big >bigger &&
cat bigger bigger bigger bigger >big ||
exit
done
)
'
test_expect_success 'R: print two blobs to stdout' '
blob1=$(git hash-object big) &&
blob1_len=$(wc -c <big) &&
blob2=$(echo hello | git hash-object --stdin) &&
{
echo ${blob1} blob $blob1_len &&
cat big &&
cat <<-EOF
${blob2} blob 6
hello
EOF
} >expect &&
{
cat <<-\END_PART1 &&
blob
mark :1
data <<data_end
END_PART1
cat big &&
cat <<-\EOF
data_end
blob
mark :2
data <<data_end
hello
data_end
cat-blob :1
cat-blob :2
EOF
} |
git fast-import >actual &&
test_cmp expect actual
'
test_expect_success PIPE 'R: copy using cat-file' '
expect_id=$(git hash-object big) &&
expect_len=$(wc -c <big) &&
echo $expect_id blob $expect_len >expect.response &&
rm -f blobs &&
cat >frontend <<-\FRONTEND_END &&
#!/bin/sh
FRONTEND_END
mkfifo blobs &&
(
export GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GIT_COMMITTER_DATE &&
cat <<-\EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
EOF
cat big &&
cat <<-\EOF &&
BLOB
cat-blob :1
EOF
read blob_id type size <&3 &&
echo "$blob_id $type $size" >response &&
test_copy_bytes $size >blob <&3 &&
read newline <&3 &&
cat <<-EOF &&
commit refs/heads/copied
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
copy big file as file3
COMMIT
M 644 inline file3
data <<BLOB
EOF
cat blob &&
echo BLOB
) 3<blobs |
git fast-import --cat-blob-fd=3 3>blobs &&
git show copied:file3 >actual &&
test_cmp expect.response response &&
test_cmp big actual
'
test_expect_success PIPE 'R: print blob mid-commit' '
rm -f blobs &&
echo "A blob from _before_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
blob
mark :1
data <<BLOB
A blob from _before_ the commit.
BLOB
commit refs/heads/temporary
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
cat-blob :1
EOF
read blob_id type size <&3 &&
test_copy_bytes $size >actual <&3 &&
read newline <&3 &&
echo
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
test_expect_success PIPE 'R: print staged blob within commit' '
rm -f blobs &&
echo "A blob from _within_ the commit." >expect &&
mkfifo blobs &&
(
exec 3<blobs &&
cat <<-EOF &&
feature cat-blob
commit refs/heads/within
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
Empty commit
COMMIT
M 644 inline within
data <<BLOB
A blob from _within_ the commit.
BLOB
EOF
to_get=$(
echo "A blob from _within_ the commit." |
git hash-object --stdin
) &&
echo "cat-blob $to_get" &&
read blob_id type size <&3 &&
test_copy_bytes $size >actual <&3 &&
read newline <&3 &&
echo deleteall
) |
git fast-import --cat-blob-fd=3 3>blobs &&
test_cmp expect actual
'
test_expect_success 'R: quiet option results in no stats being output' '
cat >input <<-EOF &&
option git quiet
blob
data 3
hi
EOF
cat input | git fast-import 2> output &&
test_must_be_empty output
'
test_expect_success 'R: feature done means terminating "done" is mandatory' '
echo feature done | test_must_fail git fast-import &&
test_must_fail git fast-import --done </dev/null
'
test_expect_success 'R: terminating "done" with trailing gibberish is ok' '
git fast-import <<-\EOF &&
feature done
done
trailing gibberish
EOF
git fast-import <<-\EOF
done
more trailing gibberish
EOF
'
test_expect_success 'R: terminating "done" within commit' '
cat >expect <<-\EOF &&
OBJID
:000000 100644 OBJID OBJID A hello.c
:000000 100644 OBJID OBJID A hello2.c
EOF
git fast-import <<-EOF &&
commit refs/heads/done-ends
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<EOT
Commit terminated by "done" command
EOT
M 100644 inline hello.c
data <<EOT
Hello, world.
EOT
C hello.c hello2.c
done
EOF
git rev-list done-ends |
git diff-tree -r --stdin --root --always |
sed -e "s/$OID_REGEX/OBJID/g" >actual &&
test_cmp expect actual
'
test_expect_success 'R: die on unknown option' '
cat >input <<-EOF &&
option git non-existing-option
EOF
test_must_fail git fast-import <input
'
test_expect_success 'R: unknown commandline options are rejected' '\
test_must_fail git fast-import --non-existing-option < /dev/null
'
test_expect_success 'R: die on invalid option argument' '
echo "option git active-branches=-5" |
test_must_fail git fast-import &&
echo "option git depth=" |
test_must_fail git fast-import &&
test_must_fail git fast-import --depth="5 elephants" </dev/null
'
test_expect_success 'R: ignore non-git options' '
cat >input <<-EOF &&
option non-existing-vcs non-existing-option
EOF
git fast-import <input
'
test_expect_success 'R: corrupt lines do not mess marks file' '
rm -f io.marks &&
blob=$(echo hi | git hash-object --stdin) &&
cat >expect <<-EOF &&
:3 0000000000000000000000000000000000000000
:1 $blob
:2 $blob
EOF
cp expect io.marks &&
test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&
EOF
test_cmp expect io.marks
'
##
## R: very large blobs
##
test_expect_success 'R: blob bigger than threshold' '
blobsize=$((2*1024*1024 + 53)) &&
test-tool genrandom bar $blobsize >expect &&
cat >input <<-INPUT_END &&
commit refs/heads/big-file
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
R - big file
COMMIT
M 644 inline big1
data $blobsize
INPUT_END
cat expect >>input &&
cat >>input <<-INPUT_END &&
M 644 inline big2
data $blobsize
INPUT_END
cat expect >>input &&
echo >>input &&
test_create_repo R &&
git --git-dir=R/.git config fastimport.unpackLimit 0 &&
git --git-dir=R/.git fast-import --big-file-threshold=1 <input
'
test_expect_success 'R: verify created pack' '
(
cd R &&
verify_packs -v > ../verify
)
'
test_expect_success 'R: verify written objects' '
git --git-dir=R/.git cat-file blob big-file:big1 >actual &&
test_cmp_bin expect actual &&
a=$(git --git-dir=R/.git rev-parse big-file:big1) &&
b=$(git --git-dir=R/.git rev-parse big-file:big2) &&
test $a = $b
'
test_expect_success 'R: blob appears only once' '
n=$(grep $a verify | wc -l) &&
test 1 = $n
'
###
### series S
###
#
# Make sure missing spaces and EOLs after mark references
# cause errors.
#
# Setup:
#
# 1--2--4
# \ /
# -3-
#
# commit marks: 301, 302, 303, 304
# blob marks: 403, 404, resp.
# note mark: 202
#
# The error message when a space is missing not at the
# end of the line is:
#
# Missing space after ..
#
# or when extra characters come after the mark at the end
# of the line:
#
# Garbage after ..
#
# or when the dataref is neither "inline " or a known SHA1,
#
# Invalid dataref ..
#
test_expect_success 'S: initialize for S tests' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/S
mark :301
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 1
COMMIT
M 100644 inline hello.c
data <<BLOB
blob 1
BLOB
commit refs/heads/S
mark :302
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 2
COMMIT
from :301
M 100644 inline hello.c
data <<BLOB
blob 2
BLOB
blob
mark :403
data <<BLOB
blob 3
BLOB
blob
mark :202
data <<BLOB
note 2
BLOB
INPUT_END
git fast-import --export-marks=marks <input
'
#
# filemodify, three datarefs
#
test_expect_success 'S: filemodify with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 :403x hello.c
EOF
test_i18ngrep "space after mark" err
'
# inline is misspelled; fast-import thinks it is some unknown dataref
test_expect_success 'S: filemodify with garbage after inline must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 inlineX hello.c
data <<BLOB
inline
BLOB
EOF
test_i18ngrep "nvalid dataref" err
'
test_expect_success 'S: filemodify with garbage after sha1 must fail' '
sha1=$(grep :403 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit N
COMMIT
M 100644 ${sha1}x hello.c
EOF
test_i18ngrep "space after SHA1" err
'
#
# notemodify, three ways to say dataref
#
test_expect_success 'S: notemodify with garbage after mark dataref must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref markref
COMMIT
N :202x :302
EOF
test_i18ngrep "space after mark" err
'
test_expect_success 'S: notemodify with garbage after inline dataref must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref inline
COMMIT
N inlineX :302
data <<BLOB
note blob
BLOB
EOF
test_i18ngrep "nvalid dataref" err
'
test_expect_success 'S: notemodify with garbage after sha1 dataref must fail' '
sha1=$(grep :202 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note dataref sha1
COMMIT
N ${sha1}x :302
EOF
test_i18ngrep "space after SHA1" err
'
#
# notemodify, mark in commit-ish
#
test_expect_success 'S: notemodify with garbage after mark commit-ish must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/Snotes
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit S note commit-ish
COMMIT
N :202 :302x
EOF
test_i18ngrep "after mark" err
'
#
# from
#
test_expect_success 'S: from with garbage after mark must fail' '
test_must_fail \
git fast-import --import-marks=marks --export-marks=marks <<-EOF 2>err &&
commit refs/heads/S2
mark :303
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 3
COMMIT
from :301x
M 100644 :403 hello.c
EOF
# go create the commit, need it for merge test
git fast-import --import-marks=marks --export-marks=marks <<-EOF &&
commit refs/heads/S2
mark :303
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
commit 3
COMMIT
from :301
M 100644 :403 hello.c
EOF
# now evaluate the error
test_i18ngrep "after mark" err
'
#
# merge
#
test_expect_success 'S: merge with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
commit refs/heads/S
mark :304
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
merge 4
COMMIT
from :302
merge :303x
M 100644 :403 hello.c
EOF
test_i18ngrep "after mark" err
'
#
# tag, from markref
#
test_expect_success 'S: tag with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
tag refs/tags/Stag
from :302x
tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<TAG
tag S
TAG
EOF
test_i18ngrep "after mark" err
'
#
# cat-blob markref
#
test_expect_success 'S: cat-blob with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
cat-blob :403x
EOF
test_i18ngrep "after mark" err
'
#
# ls markref
#
test_expect_success 'S: ls with garbage after mark must fail' '
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls :302x hello.c
EOF
test_i18ngrep "space after mark" err
'
test_expect_success 'S: ls with garbage after sha1 must fail' '
sha1=$(grep :302 marks | cut -d\ -f2) &&
test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
ls ${sha1}x hello.c
EOF
test_i18ngrep "space after tree-ish" err
'
###
### series T (ls)
###
# Setup is carried over from series S.
test_expect_success 'T: ls root tree' '
sed -e "s/Z\$//" >expect <<-EOF &&
040000 tree $(git rev-parse S^{tree}) Z
EOF
sha1=$(git rev-parse --verify S) &&
git fast-import --import-marks=marks <<-EOF >actual &&
ls $sha1 ""
EOF
test_cmp expect actual
'
test_expect_success 'T: delete branch' '
git branch to-delete &&
git fast-import <<-EOF &&
reset refs/heads/to-delete
from 0000000000000000000000000000000000000000
EOF
test_must_fail git rev-parse --verify refs/heads/to-delete
'
test_expect_success 'T: empty reset doesnt delete branch' '
git branch not-to-delete &&
git fast-import <<-EOF &&
reset refs/heads/not-to-delete
EOF
git show-ref &&
git rev-parse --verify refs/heads/not-to-delete
'
###
### series U (filedelete)
###
test_expect_success 'U: initialize for U tests' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
test setup
COMMIT
M 100644 inline hello.c
data <<BLOB
blob 1
BLOB
M 100644 inline good/night.txt
data <<BLOB
sleep well
BLOB
M 100644 inline good/bye.txt
data <<BLOB
au revoir
BLOB
INPUT_END
git fast-import <input
'
test_expect_success 'U: filedelete file succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete good/night.txt
COMMIT
from refs/heads/U^0
D good/night.txt
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate file delete result' '
cat >expect <<-EOF &&
:100644 000000 2907ebb4bf85d91bf0716bb3bd8a68ef48d6da76 0000000000000000000000000000000000000000 D good/night.txt
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
test_expect_success 'U: filedelete directory succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
delete good dir
COMMIT
from refs/heads/U^0
D good
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate directory delete result' '
cat >expect <<-EOF &&
:100644 000000 69cb75792f55123d8389c156b0b41c2ff00ed507 0000000000000000000000000000000000000000 D good/bye.txt
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
test_expect_success 'U: filedelete root succeeds' '
cat >input <<-INPUT_END &&
commit refs/heads/U
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
must succeed
COMMIT
from refs/heads/U^0
D ""
INPUT_END
git fast-import <input
'
test_expect_success 'U: validate root delete result' '
cat >expect <<-EOF &&
:100644 000000 c18147dc648481eeb65dc5e66628429a64843327 0000000000000000000000000000000000000000 D hello.c
EOF
git diff-tree -M -r U^1 U >actual &&
compare_diff_raw expect actual
'
###
### series V (checkpoint)
###
# The commands in input_file should not produce any output on the file
# descriptor set with --cat-blob-fd (or stdout if unspecified).
#
# To make sure you're observing the side effects of checkpoint *before*
# fast-import terminates (and thus writes out its state), check that the
# fast-import process is still running using background_import_still_running
# *after* evaluating the test conditions.
background_import_then_checkpoint () {
options=$1
input_file=$2
mkfifo V.input
exec 8<>V.input
rm V.input
mkfifo V.output
exec 9<>V.output
rm V.output
git fast-import $options <&8 >&9 &
echo $! >V.pid
# We don't mind if fast-import has already died by the time the test
# ends.
test_when_finished "
exec 8>&-; exec 9>&-;
kill $(cat V.pid) && wait $(cat V.pid)
true"
# Start in the background to ensure we adhere strictly to (blocking)
# pipes writing sequence. We want to assume that the write below could
# block, e.g. if fast-import blocks writing its own output to &9
# because there is no reader on &9 yet.
(
cat "$input_file"
echo "checkpoint"
echo "progress checkpoint"
) >&8 &
error=1 ;# assume the worst
while read output <&9
do
if test "$output" = "progress checkpoint"
then
error=0
break
fi
# otherwise ignore cruft
echo >&2 "cruft: $output"
done
if test $error -eq 1
then
false
fi
}
background_import_still_running () {
if ! kill -0 "$(cat V.pid)"
then
echo >&2 "background fast-import terminated too early"
false
fi
}
test_expect_success PIPE 'V: checkpoint helper does not get stuck with extra output' '
cat >input <<-INPUT_END &&
progress foo
progress bar
INPUT_END
background_import_then_checkpoint "" input &&
background_import_still_running
'
test_expect_success PIPE 'V: checkpoint updates refs after reset' '
cat >input <<-\INPUT_END &&
reset refs/heads/V
from refs/heads/U
INPUT_END
background_import_then_checkpoint "" input &&
test "$(git rev-parse --verify V)" = "$(git rev-parse --verify U)" &&
background_import_still_running
'
test_expect_success PIPE 'V: checkpoint updates refs and marks after commit' '
cat >input <<-INPUT_END &&
commit refs/heads/V
mark :1
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 0
from refs/heads/U
INPUT_END
background_import_then_checkpoint "--export-marks=marks.actual" input &&
echo ":1 $(git rev-parse --verify V)" >marks.expected &&
test "$(git rev-parse --verify V^)" = "$(git rev-parse --verify U)" &&
test_cmp marks.expected marks.actual &&
background_import_still_running
'
# Re-create the exact same commit, but on a different branch: no new object is
# created in the database, but the refs and marks still need to be updated.
test_expect_success PIPE 'V: checkpoint updates refs and marks after commit (no new objects)' '
cat >input <<-INPUT_END &&
commit refs/heads/V2
mark :2
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 0
from refs/heads/U
INPUT_END
background_import_then_checkpoint "--export-marks=marks.actual" input &&
echo ":2 $(git rev-parse --verify V2)" >marks.expected &&
test "$(git rev-parse --verify V2)" = "$(git rev-parse --verify V)" &&
test_cmp marks.expected marks.actual &&
background_import_still_running
'
test_expect_success PIPE 'V: checkpoint updates tags after tag' '
cat >input <<-INPUT_END &&
tag Vtag
from refs/heads/V
tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data 0
INPUT_END
background_import_then_checkpoint "" input &&
git show-ref -d Vtag &&
background_import_still_running
'
###
### series W (get-mark and empty orphan commits)
###
cat >>W-input <<-W_INPUT_END
commit refs/heads/W-branch
mark :1
author Full Name <[email protected]> 1000000000 +0100
committer Full Name <[email protected]> 1000000000 +0100
data 27
Intentionally empty commit
LFsget-mark :1
W_INPUT_END
test_expect_success !MINGW 'W: get-mark & empty orphan commit with no newlines' '
sed -e s/LFs// W-input | tr L "\n" | git fast-import
'
test_expect_success !MINGW 'W: get-mark & empty orphan commit with one newline' '
sed -e s/LFs/L/ W-input | tr L "\n" | git fast-import
'
test_expect_success !MINGW 'W: get-mark & empty orphan commit with ugly second newline' '
# Technically, this should fail as it has too many linefeeds
# according to the grammar in fast-import.txt. But, for whatever
# reason, it works. Since using the correct number of newlines
# does not work with older (pre-2.22) versions of git, allow apps
# that used this second-newline workaround to keep working by
# checking it with this test...
sed -e s/LFs/LL/ W-input | tr L "\n" | git fast-import
'
test_expect_success !MINGW 'W: get-mark & empty orphan commit with erroneous third newline' '
# ...but do NOT allow more empty lines than that (see previous test).
sed -e s/LFs/LLL/ W-input | tr L "\n" | test_must_fail git fast-import
'
###
### series X (other new features)
###
test_expect_success 'X: handling encoding' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/encoding
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
encoding iso-8859-7
data <<COMMIT
INPUT_END
printf "Pi: \360\nCOMMIT\n" >>input &&
git fast-import <input &&
git cat-file -p encoding | grep $(printf "\360") &&
git log -1 --format=%B encoding | grep $(printf "\317\200")
'
test_done
|
devzero2000/git-core
|
t/t9300-fast-import.sh
|
Shell
|
gpl-2.0
| 76,583 |
#!/usr/bin/bash
#simple script that generates data from msHOT-lite and then runs it through all the scripts that split the data up, preprocess for PSMC, does PSMC and then plots the data
#
../../foreign-master/msHOT-lite/msHOT-lite 2 100 -t 30000 -r 6000 3000000 -eN 0.01 0.1 -eN 0.06 1 -eN 0.2 0.5 -eN 1 1 -eN 2 2 -l >testData.ms
python ../../myScripts/splitMs.py testData.ms
bash ../../myScripts/manyMs2Psmcfa.sh
bash ../../myScripts/manyPsmcfa2psmc.sh
bash ../../myScripts/manyPsmc2Plot.sh
|
shaunBarker/bioinformaticsScripts
|
simulateData2Plots.sh
|
Shell
|
gpl-2.0
| 494 |
VERSION=$( sed -e 's/^.*-//' -e q VERSION )
sed -e "s/@VERSION@/$VERSION/" <$1.template >$1
|
abh/ezmlm-idx
|
fill-template.sh
|
Shell
|
gpl-2.0
| 92 |
#!/bin/sh
export PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/bin/X11:/usr/local/bin
IFACE=$1
/etc/virt2real/log "$IFACE down"
/sbin/ifdown -f $IFACE
|
sv99/v2r_buildroot
|
board/virt2real/v1mass/rootfs-additions/etc/virt2real/yota-down.sh
|
Shell
|
gpl-2.0
| 148 |
#!/bin/bash
mkdir -p src/usr/share/NVDARemoteServer
mkdir -p src/usr/share/man/man1
mkdir -p src/usr/share/man/man5
mkdir -p src/etc
mkdir pkg
cp ../server.py ../options.py ../server.pem ../daemon.py src/usr/share/NVDARemoteServer
cp ../NVDARemoteCertificate src/usr/bin
cp ../manual/NVDARemoteServer.1 ../manual/NVDARemoteCertificate.1 src/usr/share/man/man1
cp ../manual/NVDARemoteServer.conf.5 src/usr/share/man/man5
cp ../NVDARemoteServer.conf src/etc
makepkg
rm -rf pkg
|
Technow-es/NVDARemoteServer
|
MSYS2/build.sh
|
Shell
|
gpl-2.0
| 475 |
#!/bin/sh
# Copyright (C) 2008 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
aux mirror_recovery_works || skip
aux prepare_vg 5
# ordinary mirrors
lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg
aux disable_dev --error --silent "$dev2" "$dev4"
mkfs.ext3 "$DM_DEV_DIR/$vg/4way" &
sleep 1
dmsetup status
echo n | lvconvert --repair $vg/4way 2>&1 | tee 4way.out
aux enable_dev --silent "$dev2" "$dev4"
lvs -a -o +devices | tee out
not grep unknown out
vgreduce --removemissing $vg
check mirror $vg 4way
lvchange -a n $vg/4way
wait
vgremove -f $vg
|
vgmoose/lvm
|
test/shell/lvconvert-repair-transient.sh
|
Shell
|
gpl-2.0
| 969 |
#!/bin/bash
RIX_HOME_=${RIX_HOME:-.}
RIX_FILE=${1}
${RIX_HOME}/rixc -q -o ${RIX_FILE} ${RIX_FILE}
if [ $? -eq 0 ]; then
chmod +x ./${RIX_FILE}.sh
./${RIX_FILE}.sh
fi
shift
./${RIX_FILE}.out $@
rm ${RIX_FILE}.out ${RIX_FILE}.c ${RIX_FILE}.h ${RIX_FILE}.sh ${RIX_FILE}.log
|
riolet/rix
|
rix.sh
|
Shell
|
gpl-3.0
| 273 |
pkill -9 -f chaos.pl
pkill -9 -f keepalive.sh
pkill -9 -f locator1
pkill -9 -f server1
pkill -9 -f server2
pkill -9 -f server3
|
Pivotal-Open-Source-Hub/geode-social-demo
|
bin/nuke.sh
|
Shell
|
apache-2.0
| 127 |
#!/bin/bash
#
# Script used by Appveyor to build Docker images for GRR.
set -ex
if [[ "${APPVEYOR_REPO_BRANCH}" == 'master' ]]; then
readonly BUILD_TAG='grrdocker/grr:latest'
else
readonly BUILD_TAG="grrdocker/grr:${APPVEYOR_REPO_BRANCH}"
fi
docker build -t "${BUILD_TAG}" \
--build-arg GCS_BUCKET="${GCS_BUCKET}" \
--build-arg GRR_COMMIT="${APPVEYOR_REPO_COMMIT}" \
.
|
dunkhong/grr
|
appveyor/docker_build/build_docker_image.sh
|
Shell
|
apache-2.0
| 382 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.