code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#! /bin/bash
<<VERSION
DESCRIPTION: This Script updates all provisioned servers.
AUTHOR: Akram Hamed ([email protected])
CURRENT VERSION: 0.9.2 Beta
CREATED: Fri 30-OCT-2015
LAST REVISED: Fri 02-NOV-2015
VERSION
<<LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.
The above copyright notice and this permission notice shallbe included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTI
CULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDE
RS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CON
TRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
LICENSE
<<VARIABLES
variables naming convention
===========================
decoder$i
range_dec
dec[i]
name_dec[i]
s_dec[i]
name_s_dec[i]
f_dec[i]
name_f_dec[i]
/tmp/U_dec
concentrator$i
range_conc
conc[i]
name_conc[i]
s_conc[i]
name_s_conc[i]
f_conc[i]
name_f_conc[i]
/tmp/U_conc
logdecoder$i
range_logdec
logdec[i]
name_logdec[i]
s_logdec[i]
name_s_logdec[i]
f_logdec[i]
name_f_logdec[i]
/tmp/U_logdec
broker$i
range_brok
brok[i]
name_brok[i]
s_brok[i]
name_s_brok[i]
f_brok[i]
name_f_brok[i]
/tmp/U_brok
malware_analysis$i
range_mal
mal[i]
name_mal[i]
s_mal[i]
name_s_mal[i]
f_mal[i]
name_f_mal[i]
/tmp/U_mal
archiver$i
range_arc
arc[i]
name_arc[i]
s_arc[i]
name_s_arc[i]
f_arc[i]
name_f_arc[i]
/tmp/U_arc
esa$i
range_esa
esa[i]
name_esa[i]
s_esa[i]
name_s_esa[i]
f_esa[i]
name_f_esa[i]
/tmp/U_esa
VARIABLES
clear
# Unsetting Global Variables, Just in case of sourcing
#=====================================================
unset s_dec;unset f_dec;unset s_logdec;unset f_logdec;unset s_conc;unset f_conc;unset s_brok;unset f_brok;unset s_mal;unset f_mal;unset s_esa;unset f_esa;unset s_arc;unset f_arc
# Making sure there is no left overs
#===================================
rm -f /tmp/U_esa /tmp/U_mal /tmp/U_dec /tmp/U_logdec /tmp/U_conc /tmp/U_arc /tmp/U_brok
sa_uuid=$(cd /var/lib/puppet/yaml/node; grep -Rw sa * | cut -d ':' -f1 | head -1)
malware_uuid=$(cd /var/lib/puppet/yaml/node; grep -w malware-analysis: * | cut -d ':' -f1 | head -1)
# BANNER
#==========
echo ""
echo ' ____ _ _ _ _ _'
echo '/ ___| / \ | | | |_ __ __| | __ _| |_ ___ _ __'
echo '\___ \ / _ \ | | | | _ \ / _ |/ _ | __/ _ \ __|'
echo ' ___) / ___ \ | |_| | |_) | (_| | (_| | || __/ |'
echo '|____/_/ \_\ \___/| .__/ \__,_|\__,_|\__\___|_|'
echo ' |_|'
echo ""
# Only root can do stuff
#========================
# Make sure only root can run this script
echo "Just Checking you are ROOT"
sleep 1
if [ "$(id -u)" != "0" ]; then
echo $'\n'
echo "This script must be run as root, please login as root and try again" 1>&2
echo $'\n'
exit 1
fi
echo "Seems, you are actually ROOT !!"
echo ""
echo ""
#========================
echo '================ WARN !! ================='
echo ""
echo "Please note that you MUST FIRST make sure that all the Hosts are provisioned correctly in the GUI"
echo ""
read -r -p "Are you sure ALL hosts are provisioned in the GUI ? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo ""
echo "Continuing at your choice"
echo ""
echo ""
sleep 2
;;
*)
echo ""
echo "Exiting ...."
echo ""
sleep 2
exit 1
;;
esac
#============================================================================
#============================ Calculating Phase ===========================
#============================================================================
# check number and type of hosts
#================================
echo "Checking Number and Type of HOSTs"
echo "=================================="
# Decoder
#=========
cd /var/lib/puppet/yaml/node
range_dec=$(grep -w decoder: * | wc -l)
echo ""
echo number_of_decoders=$range_dec
for ((i=1; i<=range_dec; i++)); do
declare decoder$i=$(grep ipaddress: $(grep -w decoder: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_decoder$i=$(grep hostname: $(grep -w decoder: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
dec[i]=$(eval echo \$decoder$i)
name_dec[i]=$(eval echo \$name_decoder$i)
echo "${name_dec[i]} ${dec[i]}"
done
echo ""
sleep 1
# Concentrator
#=============
cd /var/lib/puppet/yaml/node
range_conc=$(grep -w concentrator: * | wc -l)
echo ""
echo number_of_concentrators=$range_conc
for ((i=1; i<=range_conc; i++)); do
declare concentrator$i=$(grep ipaddress: $(grep -w concentrator: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_concentrator$i=$(grep hostname: $(grep -w concentrator: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
conc[i]=$(eval echo \$concentrator$i)
name_conc[i]=$(eval echo \$name_concentrator$i)
echo "${name_conc[i]} ${conc[i]}"
done
echo ""
sleep 1
# LogDecoder
#============
cd /var/lib/puppet/yaml/node
range_logdec=$(grep -w logdecoder: * | wc -l)
echo ""
echo number_of_logdecoders=$range_logdec
for ((i=1; i<=range_logdec; i++)); do
declare logdecoder$i=$(grep ipaddress: $(grep -w logdecoder: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_logdecoder$i=$(grep hostname: $(grep -w logdecoder: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
logdec[i]=$(eval echo \$logdecoder$i)
name_logdec[i]=$(eval echo \$name_logdecoder$i)
echo "${name_logdec[i]} ${logdec[i]}"
done
echo ""
sleep 1
# Broker
#========
cd /var/lib/puppet/yaml/node
range_brok=$(grep -w --exclude="$malware_uuid" --exclude="$sa_uuid" broker: * | wc -l)
echo ""
echo number_of_brokers=$range_brok
for ((i=1; i<=range_brok; i++)); do
declare broker$i=$(grep ipaddress: $(grep -w --exclude="$malware_uuid" --exclude="$sa_uuid" broker: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_broker$i=$(grep hostname: $(grep -w --exclude="$malware_uuid" --exclude="$sa_uuid" broker: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
brok[i]=$(eval echo \$broker$i)
name_brok[i]=$(eval echo \$name_broker$i)
echo "${name_brok[i]} ${brok[i]}"
done
echo ""
sleep 1
# Malware-Analysis
#==================
cd /var/lib/puppet/yaml/node
range_mal=$(grep -w malware-analysis: * | wc -l)
echo ""
echo number_of_malware_analysis=$range_mal
for ((i=1; i<=range_mal; i++)); do
declare malware_analysis$i=$(grep ipaddress: $(grep -w malware-analysis: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_malware_analysis$i=$(grep hostname: $(grep -w malware-analysis: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
mal[i]=$(eval echo \$malware_analysis$i)
name_mal[i]=$(eval echo \$name_malware_analysis$i)
echo "${name_mal[i]} ${mal[i]}"
done
echo ""
sleep 1
# Archiver
#==========
cd /var/lib/puppet/yaml/node
range_arc=$(grep -w archiver: * | wc -l)
echo ""
echo number_of_archivers=$range_arc
for ((i=1; i<=range_arc; i++)); do
declare archiver$i=$(grep ipaddress: $(grep -w archiver: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_archiver$i=$(grep hostname: $(grep -w archiver: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
arc[i]=$(eval echo \$archiver$i)
name_arc[i]=$(eval echo \$name_archiver$i)
echo "${name_arc[i]} ${arc[i]}"
done
echo ""
sleep 1
# ESA
#=====
cd /var/lib/puppet/yaml/node
range_esa=$(grep -w esa: * | wc -l)
echo ""
echo number_of_esa=$range_esa
for ((i=1; i<=range_esa; i++)); do
declare esa$i=$(grep ipaddress: $(grep -w esa: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8 | cut -c 2- | rev | cut -c 2- | rev)
declare name_esa$i=$(grep hostname: $(grep -w esa: * | cut -d ':' -f1 | awk '{if (NR == lineNum) {print $0}}' lineNum=$i) | head -1 | cut -d " " -f8)
esa[i]=$(eval echo \$esa$i)
name_esa[i]=$(eval echo \$name_esa$i)
echo "${name_esa[i]} ${esa[i]}"
done
echo ""
sleep 1
echo ""
echo ""
echo ""
read -r -p "Are you sure the above devices are correct ? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo ""
echo "Continuing at your choice"
echo ""
echo ""
sleep 2
;;
*)
echo ""
echo "Exiting ...."
echo ""
sleep 2
exit 1
;;
esac
#=============================================================================
#=========================== Checking Phase ================================
#=============================================================================
# Check SSH connection to HOSTs
#===============================
echo "Checking SSH Connectivity to hosts"
echo "==================================="
# Decoder
#=========
if [[ $range_dec -ne 0 ]] ; then
declare -a s_dec
declare -a name_s_dec
declare -a f_dec
declare -a name_f_dec
for (( j=1; j<=$range_dec; j++ ));do
dec_connectivity="$(openssl s_client -connect ${dec[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$dec_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to Decoder ${name_dec[j]} ${dec[j]}"
f_dec[${#f_dec[@]}+1]=${dec[j]}
name_f_dec[${#name_f_dec[@]}+1]=${name_dec[j]}
echo " "
else
echo "PASSED Decoder ${name_dec[j]} ${dec[j]} is ssh accessible"
s_dec[${#s_dec[@]}+1]=${dec[j]}
name_s_dec[${#name_s_dec[@]}+1]=${name_dec[j]}
echo " "
fi
done
fi
sleep 1
# LogDecoder
#============
if [[ $range_logdec -ne 0 ]] ; then
declare -a s_logdec
declare -a name_s_logdec
declare -a f_logdec
declare -a name_f_logdec
for (( j=1; j<=$range_logdec; j++ ));do
logdec_connectivity="$(openssl s_client -connect ${logdec[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$logdec_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to LogDecoder ${name_logdec[j]} ${logdec[j]}"
f_logdec[${#f_logdec[@]}+1]=${logdec[j]}
name_f_logdec[${#name_f_logdec[@]}+1]=${name_logdec[j]}
echo " "
else
echo "PASSED LogDecoder ${name_logdec[j]} ${logdec[j]} is ssh accessible"
s_logdec[${#s_logdec[@]}+1]=${logdec[j]}
name_s_logdec[${#name_s_logdec[@]}+1]=${name_logdec[j]}
echo " "
fi
done
fi
sleep 1
# Concentrator
#==============
if [[ $range_conc -ne 0 ]] ; then
declare -a s_conc
declare -a name_s_conc
declare -a f_conc
declare -a name_f_conc
for (( j=1; j<=$range_conc; j++ ));do
conc_connectivity="$(openssl s_client -connect ${conc[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$conc_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to Concentrator ${name_conc[j]} ${conc[j]}"
f_conc[${#f_conc[@]}+1]=${conc[j]}
name_f_conc[${#name_f_conc[@]}+1]=${name_conc[j]}
echo " "
else
echo "PASSED Concentrator ${name_conc[j]} ${conc[j]} is ssh accessible"
s_conc[${#s_conc[@]}+1]=${conc[j]}
name_s_conc[${#name_s_conc[@]}+1]=${name_conc[j]}
echo " "
fi
done
fi
sleep 1
# Broker
#========
if [[ $range_brok -ne 0 ]] ; then
declare -a s_brok
declare -a name_s_brok
declare -a f_brok
declare -a name_f_brok
for (( j=1; j<=$range_brok; j++ ));do
brok_connectivity="$(openssl s_client -connect ${brok[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$brok_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to Broker ${name_brok[j]} ${brok[j]}"
f_brok[${#f_brok[@]}+1]=${brok[j]}
name_f_brok[${#name_f_brok[@]}+1]=${name_brok[j]}
echo " "
else
echo "PASSED Broker ${name_brok[j]} ${brok[j]} is ssh accessible"
s_brok[${#s_brok[@]}+1]=${brok[j]}
name_s_brok[${#name_s_brok[@]}+1]=${name_brok[j]}
echo " "
fi
done
fi
sleep 1
# Malware
#=========
if [[ $range_mal -ne 0 ]] ; then
declare -a s_mal
declare -a name_s_mal
declare -a f_mal
declare -a name_f_mal
for (( j=1; j<=$range_mal; j++ ));do
mal_connectivity="$(openssl s_client -connect ${mal[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$mal_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to Malware ${name_mal[j]} ${mal[j]}"
f_mal[${#f_mal[@]}+1]=${mal[j]}
name_f_mal[${#name_f_mal[@]}+1]=${name_mal[j]}
echo " "
else
echo "PASSED Malware ${name_mal[j]} ${mal[j]} is ssh accessible"
s_mal[${#s_mal[@]}+1]=${mal[j]}
name_s_mal[${#name_s_mal[@]}+1]=${name_mal[j]}
echo " "
fi
done
fi
sleep 1
# ESA
#=====
if [[ $range_esa -ne 0 ]] ; then
declare -a s_esa
declare -a name_s_esa
declare -a f_esa
declare -a name_f_esa
for (( j=1; j<=$range_esa; j++ ));do
esa_connectivity="$(openssl s_client -connect ${esa[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$esa_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to ESA ${name_esa[j]} ${esa[j]}"
f_esa[${#f_esa[@]}+1]=${esa[j]}
name_f_esa[${#name_f_esa[@]}+1]=${name_esa[j]}
echo " "
else
echo "PASSED ESA ${name_esa[j]} ${esa[j]} is ssh accessible"
s_esa[${#s_esa[@]}+1]=${esa[j]}
name_s_esa[${#name_s_esa[@]}+1]=${name_esa[j]}
echo " "
fi
done
fi
sleep 1
# Archiver
#==========
if [[ $range_arc -ne 0 ]] ; then
declare -a s_arc
declare -a name_s_arc
declare -a f_arc
declare -a name_f_arc
for (( j=1; j<=$range_arc; j++ ));do
arc_connectivity="$(openssl s_client -connect ${arc[j]}:22 2> /dev/null| sed -n 1p | cut -c1-9)"
if [ "$arc_connectivity" != "CONNECTED" ]
then
echo "WARN !! Please Check SSH connection to Archiver ${name_arc[j]} ${arc[j]}"
f_arc[${#f_arc[@]}+1]=${arc[j]}
name_f_arc[${#name_f_arc[@]}+1]=${name_arc[j]}
echo " "
else
echo "PASSED Archiver ${name_arc[j]} ${arc[j]} is ssh accessible"
s_arc[${#s_arc[@]}+1]=${arc[j]}
name_s_arc[${#name_s_arc[@]}+1]=${name_arc[j]}
echo " "
fi
done
fi
sleep 1
# Print Failing SSH devices
#===========================
if [[ ${#f_dec[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these Decoders"
echo "================================================"
echo ${f_dec[@]}
echo ""
fi
if [[ ${#f_logdec[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these LogDecoders"
echo "====================================================="
echo ${f_logdec[@]}
echo ""
fi
if [[ ${#f_conc[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these Concentrators"
echo "====================================================="
echo ${f_conc[@]}
echo ""
fi
if [[ ${#f_brok[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these Brokers"
echo "================================================"
echo ${f_brok[@]}
echo ""
fi
if [[ ${#f_mal[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these Malwares"
echo "================================================"
echo ${f_mal[@]}
echo ""
fi
if [[ ${#f_esa[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these ESA"
echo "============================================="
echo ${f_esa[@]}
echo ""
fi
if [[ ${#f_arc[@]} -ne 0 ]] ; then
echo "Please Check SSH Accessibility to these Archivers"
echo "================================================"
echo ${f_arc[@]}
echo ""
fi
# Make sure all devices are UP before updating,
# else ask the customer to continue at own risk.
#===============================================
if [[ ${#f_arc[@]} -ne 0 ]] || [[ ${#f_esa[@]} -ne 0 ]] || [[ ${#f_mal[@]} -ne 0 ]] || [[ ${#f_brok[@]} -ne 0 ]] || [[ ${#f_logdec[@]} -ne 0 ]] || [[ ${#f_dec[@]} -ne 0 ]] || [[ ${#f_conc[@]} -ne 0 ]]; then
echo ""
# Print some scary stuff to continue update at own's risk
#==========================================================
echo "WARN !!! Some Devices aren't SSH Accessibile"
echo "RSA Strongly Recommends that you update ALL your devices at the same time"
read -r -p "Are you sure you wish to continue with upgrade ? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo ""
echo "Continuing Update..."
;;
*)
echo ""
echo "Exiting ...."
echo ""
sleep 2
exit 1
;;
esac
fi
#==========================================================================
#=========================== Updating Phase =============================
#==========================================================================
# let the magic begin
#=====================
# Update Sequence
#================
# SA Appliance
# ESA, Malware
# Decoders
# Concentrators
# Archivers
# Brokers
echo ""
sleep 2
echo "Script will now proceed with the update process"
echo ""
read -rsp $'Press any key to continue...\n' -n1 key
sleep 1
echo .
sleep 1
echo .
sleep 1
echo .
sleep 1
echo ""
echo "Updating the devices in the following sequence"
echo "==============================================="
echo "Security Analytics server"
echo "Evenet Stream Analaysis"
echo "Malware"
echo "Decoders"
echo "Concentrators"
echo "Archivers"
echo "Brokers"
echo ""
read -r -p "Continue...? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo ""
echo ""
sleep 1
;;
*)
echo ""
echo "Exiting ...."
echo ""
sleep 2
exit 1
;;
esac
sleep 2
# check SA repo is enabled, if not enabled, then enable it.
#=========================================================
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# SA Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
echo "Checking SA repo..."
echo "===================="
sa_repo_enable=$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [ $sa_repo_enable != 1 ]; then
echo 'RSA Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Just Enabled RSA Repo for you...'
fi
echo "PASSED SA repo check"
echo " "
echo ""
echo "Updating SA server..."
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck
echo ""
echo ""
echo "Updating SA Repo"
echo ""
sleep 3
yum clean all
yum update
# Create some temp scripts to be sent to each device type
#=========================================================
##========= ESA Script ===========
cat <<EOT > /tmp/U_esa
#! /bin/bash
echo " "
echo "checking ESA repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'ESA Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled ESA Repo'
fi
echo "PASSED ESA repo check"
echo " "
echo "Updating ESA"
rm -f /var/run/yum.pid
yum clean all
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating ESA \$HOSTNAME"
echo ""
sleep 3
echo ""
yum update -y
EOT
chmod +x /tmp/U_esa
##========= Malware Script ===========
cat <<EOT > /tmp/U_mal
#! /bin/bash
echo " "
echo "checking Malware repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Malware Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Malware Repo'
fi
echo "PASSED Malware repo check"
echo " "
echo "Updating Malware"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating Malware \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_mal
##========= Decoder Script ===========
cat <<EOT > /tmp/U_dec
#! /bin/bash
echo " "
echo "checking Decoder repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Decoder Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Decoder Repo'
fi
echo "PASSED Decoder repo check"
echo " "
echo "Updating Decoder"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating Decoder \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_dec
##========= Log Decoder Script ===========
cat <<EOT > /tmp/U_logdec
#! /bin/bash
echo " "
echo "checking Log Decoder repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Log Decoder Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Log Decoder Repo'
fi
echo "PASSED Log Decoder repo check"
echo " "
echo "Updating Log Decoder"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating LogDecoder \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_logdec
##========= Concentrator Script ===========
cat <<EOT > /tmp/U_conc
#! /bin/bash
echo " "
echo "checking Concentrator repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Concentrator Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Concentrator Repo'
fi
echo "PASSED Concentrator repo check"
echo " "
echo "Updating Concentrator"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating Concentrator \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_conc
##========= Archiver Script ===========
cat <<EOT > /tmp/U_arc
#! /bin/bash
echo " "
echo "checking Archiver repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Archiver Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Archiver Repo'
fi
echo "PASSED Archiver repo check"
echo " "
echo "Updating Archiver"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating Archiver \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_arc
##========= Broker Script ===========
cat <<EOT > /tmp/U_brok
#! /bin/bash
echo " "
echo "checking Broker repo..."
repo_enable=\$(grep enabled /etc/yum.repos.d/RSASoftware.repo | cut -c 9)
if [[ \$repo_enable != 1 ]]; then
echo 'Broker Repo was disabled, enabling it...'
sleep 2
sed -i -- 's/enabled=0/enabled=1/g' /etc/yum.repos.d/RSASoftware.repo;
echo 'Enabled Broker Repo'
fi
echo "PASSED Broker repo check"
echo " "
echo "Updating Broker"
rm -f /var/run/yum.pid
yum install rsa-sa-gpg-pubkeys --nogpgcheck -y
echo ""
echo ""
echo "Updating Broker \$HOSTNAME"
echo ""
sleep 3
yum clean all
yum update -y
EOT
chmod +x /tmp/U_brok
#=========================================================
# Updating ESA
#==============
if [[ ${#s_esa[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# ESA Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_esa[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_esa[k]} ${s_esa[k]}"
echo ""
echo ""
ssh root@${s_esa[k]} 'bash -s' < /tmp/U_esa
echo ""
echo ""
echo ""
done
fi
# Updating Malware
#==================
if [[ ${#s_mal[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Malware Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_mal[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_mal[k]} ${s_mal[k]}"
echo ""
echo ""
ssh root@${s_mal[k]} 'bash -s' < /tmp/U_mal
echo ""
echo ""
echo ""
done
fi
# Updating Decoder
#=================
if [[ ${#s_dec[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Decoder Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_dec[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_dec[k]} ${s_dec[k]}"
echo ""
echo ""
ssh root@${s_dec[k]} 'bash -s' < /tmp/U_dec
echo ""
echo ""
echo ""
done
fi
# Updating Log Decoder
#======================
if [[ ${#s_logdec[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Log Decoder Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_logdec[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_logdec[k]} ${s_logdec[k]}"
echo ""
echo ""
ssh root@${s_logdec[k]} 'bash -s' < /tmp/U_logdec
echo ""
echo ""
echo ""
done
fi
# Updating Concentrator
#=======================
if [[ ${#s_conc[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Concentrator Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_conc[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_conc[k]} ${s_conc[k]}"
echo ""
echo ""
ssh root@${s_conc[k]} 'bash -s' < /tmp/U_conc
echo ""
echo ""
echo ""
done
fi
# Updating Archiver
#==================
if [[ ${#s_arc[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Archiver Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_arc[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_arc[k]} ${s_arc[k]}"
echo ""
echo ""
ssh root@${s_arc[k]} 'bash -s' < /tmp/U_arc
echo ""
echo ""
echo ""
done
fi
# Updating Broker
#================
if [[ ${#s_brok[@]} -ne 0 ]]; then
echo ""
echo ""
echo "#######################################"
echo '# #'
echo "# Broker Server #"
echo "# #"
echo "# #"
echo "#######################################"
echo ""
echo ""
for ((k=1; k<=${#s_brok[@]}; k++)); do
echo ""
echo ""
echo ""
echo "Updating ${name_s_brok[k]} ${s_brok[k]}"
echo ""
echo ""
ssh root@${s_brok[k]} 'bash -s' < /tmp/U_brok
echo ""
echo ""
echo ""
done
fi
#=============================== FINALIZING ============================
# Cleaning up the mess
#=======================
rm -f /tmp/U_esa /tmp/U_mal /tmp/U_dec /tmp/U_logdec /tmp/U_conc /tmp/U_arc /tmp/U_brok
unset s_dec;unset f_dec;unset s_logdec;unset f_logdec;unset s_conc;unset f_conc;unset s_brok;unset f_brok;unset s_mal;unset f_mal;unset s_esa;unset f_esa;unset s_arc;unset f_arc
# Offer to Reboot SA server
#===========================
echo "==================================================================="
echo ""
echo "Script has finished the process"
echo ""
echo "Do you wish to reboot the SA server now ?"
echo "It is highly recommended to reboot the machine after doing some upgrades"
echo ""
read -r -p "Are you sure you wish to REBOOT ? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
echo ""
echo "Rebooting in 10 seconds..."
sleep 10
init 6
;;
*)
echo ""
echo "Exiting ...."
echo ""
sleep 2
exit 1
;;
esac
#================================= END OF SCRIPT =================================
|
skipper42/Security-Analytics-Updater
|
sa_update.sh
|
Shell
|
gpl-2.0
| 29,918 |
#!/bin/sh
. ./test-common.sh
cleanup 10
# ------------------------------- Test 10 ------------------------------------
preptest test.log 10 1
. ./test-common-selinux.sh
if [ $SELINUX_TESTS = 1 ]; then
chcon --type=logrotate_tmp_t test.log
else
echo "Skipping SELinux part of test 10"
fi
$RLR test-config.10 --force || exit 23
checkoutput <<EOF
test.log 0
test.log.1 0 zero
EOF
echo "newfile" > test.log
$RLR test-config.10 --force || exit 23
if [ $SELINUX_TESTS = 1 ]; then
ls -Z test.log.2.gz|grep logrotate_tmp_t >/dev/null
if [ $? != 0 ]; then
echo "test.log.2.gz should have selinux context logrotate_tmp_t."
ls -Z test.log.2.gz
exit 3
fi
ls -Z test.log.1|grep logrotate_tmp_t >/dev/null
if [ $? != 0 ]; then
echo "test.log.1 should have selinux context logrotate_tmp_t."
ls -Z test.log.1
exit 3
fi
fi
checkoutput <<EOF
test.log 0
test.log.1 0 newfile
test.log.2.gz 1 zero
EOF
checkmail test.log.1 newfile
|
logrotate/logrotate
|
test/test-0010.sh
|
Shell
|
gpl-2.0
| 942 |
#! /bin/bash
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2007-2008 Cisco, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# This script is run on developer copies of PLPA -- *not* distribution
# tarballs.
#set -x
##############################################################################
#
# User-definable parameters (search path and minimum supported versions)
#
# Note: use ';' to separate parameters
##############################################################################
ompi_aclocal_search="aclocal"
if test ! -z "$ACLOCAL"; then
ompi_aclocal_search="$ACLOCAL"
fi
ompi_autoheader_search="autoheader"
if test ! -z "$AUTOHEADER"; then
ompi_autoheader_search="$AUTOHEADER"
fi
ompi_autoconf_search="autoconf"
if test ! -z "$AUTOCONF"; then
ompi_autoconf_search="$AUTOCONF"
fi
ompi_libtoolize_search="libtoolize;glibtoolize"
if test ! -z "$LIBTOOLIZE"; then
ompi_libtoolize_search="$LIBTOOLIZE"
fi
ompi_automake_search="automake"
if test ! -z "$AUTOMAKE"; then
ompi_automake_search="$AUTOMAKE"
fi
ompi_automake_version="1.9.6"
ompi_autoconf_version="2.59"
ompi_libtool_version="1.5.22"
##############################################################################
#
# Global variables - should not need to modify defaults
#
##############################################################################
ompi_aclocal_version="$ompi_automake_version"
ompi_autoheader_version="$ompi_autoconf_version"
ompi_libtoolize_version="$ompi_libtool_version"
# program names to execute
ompi_aclocal=""
ompi_autoheader=""
ompi_autoconf=""
ompi_libtoolize=""
ompi_automake=""
mca_no_configure_components_file="config/mca_no_configure_components.m4"
mca_no_config_list_file="mca_no_config_list"
mca_no_config_env_file="mca_no_config_env"
mca_m4_include_file="mca_m4_config_include.m4"
mca_m4_config_env_file="mca_m4_config_env"
autogen_subdir_file="autogen.subdirs"
############################################################################
#
# Version check - does major,minor,release check (hopefully ignoring
# beta et al)
#
# INPUT:
# - minimum version allowable
# - version we found
#
# OUTPUT:
# - 0 version is ok
# - 1 version is not ok
#
# SIDE EFFECTS:
# none
#
##############################################################################
check_version() {
local min_version="$1"
local version="$2"
local min_major_version="`echo $min_version | cut -f1 -d.`"
local min_minor_version="`echo $min_version | cut -f2 -d.`"
local min_release_version="`echo $min_version | cut -f3 -d.`"
if test "$min_release_version" = "" ; then
min_release_version=0
fi
local major_version="`echo $version | cut -f1 -d.`"
local minor_version="`echo $version | cut -f2 -d.`"
local release_version="`echo $version | cut -f3 -d.`"
if test "$release_version" = "" ; then
release_version=0
fi
if test $min_major_version -lt $major_version ; then
return 0
elif test $min_major_version -gt $major_version ; then
return 1
fi
if test $min_minor_version -lt $minor_version ; then
return 0
elif test $min_minor_version -gt $minor_version ; then
return 1
fi
if test $min_release_version -gt $release_version ; then
return 1
fi
return 0
}
##############################################################################
#
# find app - find a version of the given application that is new
# enough for use
#
# INPUT:
# - name of application (eg aclocal)
#
# OUTPUT:
# none
#
# SIDE EFFECTS:
# - sets application_name variable to working executable name
# - aborts on error finding application
#
##############################################################################
find_app() {
local app_name="$1"
local version="0.0.0"
local min_version="99.99.99"
local found=0
local tmpIFS=$IFS
eval "min_version=\"\$ompi_${app_name}_version\""
eval "search_path=\"\$ompi_${app_name}_search\""
IFS=";"
for i in $search_path ; do
IFS="$tmpIFS"
version="`${i} --version 2>&1`"
if test "$?" != 0 ; then
IFS=";"
continue
fi
version="`echo $version | cut -f2 -d')'`"
version="`echo $version | cut -f1 -d' '`"
if check_version $min_version $version ; then
eval "ompi_${app_name}=\"${i}\""
found=1
break
fi
done
IFS="$tmpIFS"
if test "$found" = "0" ; then
cat <<EOF
I could not find a recent enough copy of ${app_name}.
I am gonna abort. :-(
Please make sure you are using at least the following versions of the
GNU tools:
GNU Autoconf $ompi_autoconf_version
GNU Automake $ompi_automake_version
NOTE: You may need Automake 1.8.5 (or higher) in order to run
"make dist" successfully
GNU Libtool $ompi_libtool_version
EOF
exit 1
fi
}
##############################################################################
#
# run_and_check - run the right GNU tool, printing warning on failure
#
# INPUT:
# - name of application (eg aclocal)
# - program arguments
#
# OUTPUT:
# none
#
# SIDE EFFECTS:
# - aborts on error running application
#
##############################################################################
run_and_check() {
local rac_progs="$*"
echo "[Running] $rac_progs"
eval $rac_progs
if test "$?" != 0; then
cat <<EOF
-------------------------------------------------------------------------
It seems that the execution of "$rac_progs" has failed. See above for
the specific error message that caused it to abort.
This *MAY* be caused by an older version of one of the required
packages. Please make sure you are using at least the following
versions:
GNU Autoconf $ompi_autoconf_version
GNU Automake $ompi_automake_version
GNU Libtool $ompi_libtool_version
-------------------------------------------------------------------------
EOF
exit 1
fi
}
##############################################################################
#
# find_and_delete - look for standard files in a number of common places
# (e.g., ./config.guess, config/config.guess, dist/config.guess), and
# delete it. If it's not found there, look for AC_CONFIG_AUX_DIR in
# the configure.in script and try there. If it's not there, oh well.
#
# INPUT:
# - file to delete
#
# OUTPUT:
# none
#
# SIDE EFFECTS:
# - files may disappear
#
##############################################################################
find_and_delete() {
local fad_file="$1"
local fad_cfile
local auxdir
# Look for the file in "standard" places
if test -f $fad_file; then
rm -f $fad_file
elif test -d config/$fad_file; then
rm -f config/$fad_file
elif test -d dist/$fad_file; then
rm -f dist/$fad_file
else
# Didn't find it -- look for an AC_CONFIG_AUX_DIR line in
# configure.[in|ac]
if test -f configure.in; then
fad_cfile=configure.in
elif test -f configure.ac; then
fad_cfile=configure.ac
else
echo "--> Errr... there's no configure.in or configure.ac file!"
fi
if test -n "$fad_cfile"; then
auxdir="`grep AC_CONFIG_AUX_DIR $fad_cfile | cut -d\( -f 2 | cut -d\) -f 1`"
fi
if test -f "$auxdir/$fad_file"; then
rm -f "$auxdir/$fad_file"
fi
fi
}
##############################################################################
#
# run_gnu_tools - run the GNU tools in a given directory
#
# INPUT:
# none
#
# OUTPUT:
# none
#
# SIDE EFFECTS:
# - assumes that the directory is ready to have the GNU tools run
# in it (i.e., there's some form of configure.*)
# - may preprocess the directory before running the GNU tools
# (e.g., generale Makefile.am's from configure.params, etc.)
#
##############################################################################
run_gnu_tools() {
# Find and delete the GNU helper script files
find_and_delete config.guess
find_and_delete config.sub
find_and_delete depcomp
find_and_delete compile
find_and_delete install-sh
find_and_delete ltconfig
find_and_delete ltmain.sh
find_and_delete missing
find_and_delete mkinstalldirs
find_and_delete libtool
find_and_delete configure
# Run the GNU tools
echo "*** Running GNU tools"
run_and_check $ompi_aclocal
run_and_check $ompi_autoheader
run_and_check $ompi_autoconf
run_and_check $ompi_libtoolize --automake --copy
run_and_check $ompi_automake --foreign -a --copy --include-deps
}
##############################################################################
#
# main - do the real work...
#
##############################################################################
# announce
echo "[Checking] prerequisites"
# sanity check to make sure user isn't being stupid
if test ! -d .svn ; then
cat <<EOF
This doesn't look like a developer copy of PLPA. You probably do not
want to run autogen.sh - it is normally not needed for a release
source tree. Giving you 5 seconds to reconsider and kill me.
EOF
sleep 5
fi
if test -f VERSION -a -f configure.ac -a -f src/libplpa/plpa.h.in ; then
# Top level of PLPA tree
uptime > /dev/null
else
cat <<EOF
You must run this script from either the top level of the PLPA
directory tree or the top-level of an MCA component directory tree.
EOF
exit 1
fi
# find all the apps we are going to run
find_app "aclocal"
find_app "autoheader"
find_app "autoconf"
find_app "libtoolize"
find_app "automake"
run_gnu_tools
# All done
exit 0
|
joergdietrich/mapmap
|
plpa-1.1/autogen.sh
|
Shell
|
gpl-2.0
| 10,223 |
convert images/OCS-295-A.png -crop 1517x4532+89+267 +repage images/OCS-295-A.png
#
#
#/OCS-295.png
convert images/OCS-295-B.png -crop 1567x4526+14+271 +repage images/OCS-295-B.png
#
#
#/OCS-295.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/cropedges.OCS-295.sh
|
Shell
|
gpl-2.0
| 198 |
#!/bin/bash
echo $WINEPREFIX
exit 0
|
hbirchtree/kaidan
|
kaidan-test/assets/script.sh
|
Shell
|
gpl-2.0
| 36 |
#!/bin/bash
./ptb_xtrabackup_test.sh \
--defaults-file=./changed_page_bitmap_sysbench.cfg \
--mysql-rootdir=/mnt/bin/ps-5.5-changed-page-bitmap \
--vardir=/mnt/var/cpb_sysbench \
--cachedir=/mnt/cache \
--prepare-rootdir=/usr/share/doc/sysbench/ \
--load-rootdir=/usr/share/doc/sysbench/ \
--backup-rootdir=/mnt/bin/xb-2.1-xtradb55-changed-page-bitmap \
--restore-rootdir=/mnt/bin/xb-2.1-xtradb55-changed-page-bitmap \
$@
exit $?
|
Percona-QA/PTB
|
examples/changed_page_bitmap_sysbench.sh
|
Shell
|
gpl-2.0
| 488 |
#! /bin/sh
#
# This is the 'official' LZO benchmark driver.
#
# usage: util/bench.sh Calgary-Corpus-directory
#
#
# Benchmark cookbook:
# -------------------
# 1) verify that LZO works (see PLATFORM.TXT)
# 2) check that your machine is idle
# 3) run 'sh util/bench.sh Calgary-Corpus-directory > bench.log'
# note: this *must* take 20 minutes or more !
# 4) run 'perl util/table.pl -b bench.log' to get your benchmark values
#
#
# If the execution time of this script is less than 20 minutes,
# then increase the value of the variable 'n' below.
#
#
# Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer
#
# n=8 takes about 21 minutes on my Intel i486 DX2/66 (djgpp v2 + gcc 2.7.2)
n=8
d=$[ $n*4 ]
dir=${1-"."}
./lzo_test -mbench -b262144 -s1 $dir -c$n -d$d
err=$?; if test $err != 0; then exit $err; fi
exit 0
|
OS2World/LIB-lzo
|
util/bench.sh
|
Shell
|
gpl-2.0
| 826 |
#!/bin/bash
#############################################################################
# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Albert Chu <[email protected]>
# LLNL-CODE-644248
#
# This file is part of Magpie, scripts for running Hadoop on
# traditional HPC systems. For details, see <URL>.
#
# Magpie is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Magpie is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magpie. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# This script launches Storm across all nodes for the user
# Make sure the environment variable STORM_CONF_DIR is set.
# First argument is start or stop
if [ "$1X" == "X" ]
then
echo "User must start or stop as first argument"
exit 1
fi
if [ "$1" != "start" ] && [ "$1" != "stop" ]
then
echo "User must specify start or stop as first argument"
exit 1
fi
if [ "${STORM_CONF_DIR}X" == "X" ]
then
echo "User must specify STORM_CONF_DIR"
exit 1
fi
if [ ! -f ${STORM_CONF_DIR}/workers ]
then
echo "Cannot find file ${STORM_CONF_DIR}/workers"
exit 1
fi
if [ ! -f ${STORM_CONF_DIR}/magpie-launch-storm-env.sh ]
then
echo "Cannot find file ${STORM_CONF_DIR}/magpie-launch-storm-env.sh"
exit 1
fi
source ${STORM_CONF_DIR}/magpie-launch-storm-env.sh
if [ "${STORM_HOME}X" == "X" ]
then
echo "STORM_HOME not specified"
exit 1
fi
if [ "${STORM_LOCAL_DIR}X" == "X" ]
then
echo "STORM_LOCAL_DIR not specified"
exit 1
fi
if [ "${MAGPIE_SCRIPTS_HOME}X" == "X" ]
then
echo "MAGPIE_SCRIPTS_HOME not specified"
exit 1
fi
if [ ! -f "${MAGPIE_SCRIPTS_HOME}/bin/magpie-launch-storm.sh" ]
then
echo "Cannot find magpie-launch-storm.sh"
exit 1
fi
${MAGPIE_SCRIPTS_HOME}/bin/magpie-launch-storm.sh ${STORM_CONF_DIR} ${STORM_LOG_DIR} ${STORM_HOME} ${STORM_LOCAL_DIR} nimbus $1
${MAGPIE_SCRIPTS_HOME}/bin/magpie-launch-storm.sh ${STORM_CONF_DIR} ${STORM_LOG_DIR} ${STORM_HOME} ${STORM_LOCAL_DIR} ui $1
RSH_CMD=${STORM_SSH_CMD:-ssh}
stormnodes=`cat ${STORM_CONF_DIR}/workers`
for stormnode in ${stormnodes}
do
${RSH_CMD} ${STORM_SSH_OPTS} ${stormnode} ${MAGPIE_SCRIPTS_HOME}/bin/magpie-launch-storm.sh ${STORM_CONF_DIR} ${STORM_LOG_DIR} ${STORM_HOME} ${STORM_LOCAL_DIR} supervisor $1
${RSH_CMD} ${STORM_SSH_OPTS} ${stormnode} ${MAGPIE_SCRIPTS_HOME}/bin/magpie-launch-storm.sh ${STORM_CONF_DIR} ${STORM_LOG_DIR} ${STORM_HOME} ${STORM_LOCAL_DIR} logviewer $1
done
|
zorino/magpie
|
bin/magpie-storm.sh
|
Shell
|
gpl-2.0
| 3,051 |
#!/bin/bash
# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
# MMI training (or optionally boosted MMI, if you give the --boost option).
# 4 iterations (by default) of Extended Baum-Welch update.
#
# For the numerator we have a fixed alignment rather than a lattice--
# this actually follows from the way lattices are defined in Kaldi, which
# is to have a single path for each word (output-symbol) sequence.
# Begin configuration section.
cmd=run.pl
num_iters=4
boost=0.0
cancel=true # if true, cancel num and den counts on each frame.
tau=400
weight_tau=10
acwt=0.1
stage=0
smooth_to_mode=true
# End configuration section
echo "$0 $@" # Print the command line for logging
[ -f ./path.sh ] && . ./path.sh; # source the path.
. parse_options.sh || exit 1;
if [ $# -ne 5 ]; then
echo "Usage: steps/train_mmi.sh <data> <lang> <ali> <denlats> <exp>"
echo " e.g.: steps/train_mmi.sh data/train_si84 data/lang exp/tri2b_ali_si84 exp/tri2b_denlats_si84 exp/tri2b_mmi"
echo "Main options (for others, see top of script file)"
echo " --boost <boost-weight> # (e.g. 0.1), for boosted MMI. (default 0)"
echo " --cancel (true|false) # cancel stats (true by default)"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --config <config-file> # config containing options"
echo " --stage <stage> # stage to do partial re-run from."
echo " --tau # tau for i-smooth to last iter (default 200)"
exit 1;
fi
data=$1
lang=$2
alidir=$3
denlatdir=$4
dir=$5
mkdir -p $dir/log
for f in $data/feats.scp $alidir/{tree,final.mdl,ali.1.gz} $denlatdir/lat.1.gz; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
nj=`cat $alidir/num_jobs` || exit 1;
[ "$nj" -ne "`cat $denlatdir/num_jobs`" ] && \
echo "$alidir and $denlatdir have different num-jobs" && exit 1;
sdata=$data/split$nj
splice_opts=`cat $alidir/splice_opts 2>/dev/null`
norm_vars=`cat $alidir/norm_vars 2>/dev/null` || norm_vars=false # cmn/cmvn option, default false.
mkdir -p $dir/log
cp $alidir/splice_opts $dir 2>/dev/null
cp $alidir/norm_vars $dir 2>/dev/null # cmn/cmvn option.
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
echo $nj > $dir/num_jobs
cp $alidir/{final.mdl,tree} $dir
silphonelist=`cat $lang/phones/silence.csl` || exit 1;
# Set up features
if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
echo "$0: feature type is $feat_type"
case $feat_type in
delta) feats="ark,s,cs:apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas ark:- ark:- |";;
lda) feats="ark,s,cs:apply-cmvn --norm-vars=$norm_vars --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |"
cp $alidir/final.mat $dir
;;
*) echo "Invalid feature type $feat_type" && exit 1;
esac
[ -f $alidir/trans.1 ] && echo Using transforms from $alidir && \
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$alidir/trans.JOB ark:- ark:- |"
lats="ark:gunzip -c $denlatdir/lat.JOB.gz|"
if [[ "$boost" != "0.0" && "$boost" != 0 ]]; then
lats="$lats lattice-boost-ali --b=$boost --silence-phones=$silphonelist $alidir/final.mdl ark:- 'ark,s,cs:gunzip -c $alidir/ali.JOB.gz|' ark:- |"
fi
cur_mdl=$alidir/final.mdl
x=0
while [ $x -lt $num_iters ]; do
echo "Iteration $x of MPE training"
# Note: the num and den states are accumulated at the same time, so we
# can cancel them per frame.
if [ $stage -le $x ]; then
$cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \
gmm-rescore-lattice $cur_mdl "$lats" "$feats" ark:- \| \
lattice-to-mpe-post --acoustic-scale=$acwt $cur_mdl \
"ark,s,cs:gunzip -c $alidir/ali.JOB.gz |" ark:- ark:- \| \
gmm-acc-stats2 $cur_mdl "$feats" ark,s,cs:- \
$dir/num_acc.$x.JOB.acc $dir/den_acc.$x.JOB.acc || exit 1;
n=`echo $dir/{num,den}_acc.$x.*.acc | wc -w`;
[ "$n" -ne $[$nj*2] ] && \
echo "Wrong number of MMI accumulators $n versus 2*$nj" && exit 1;
$cmd $dir/log/den_acc_sum.$x.log \
gmm-sum-accs $dir/den_acc.$x.acc $dir/den_acc.$x.*.acc || exit 1;
rm $dir/den_acc.$x.*.acc
$cmd $dir/log/num_acc_sum.$x.log \
gmm-sum-accs $dir/num_acc.$x.acc $dir/num_acc.$x.*.acc || exit 1;
rm $dir/num_acc.$x.*.acc
# note: this tau value is for smoothing towards model parameters, not
# as in the Boosted MMI paper, not towards the ML stats as in the earlier
# work on discriminative training (e.g. my thesis).
# You could use gmm-ismooth-stats to smooth to the ML stats, if you had
# them available [here they're not available if cancel=true].
if ! $smooth_to_model; then
echo "Iteration $x of MPE: computing ml (smoothing) stats"
$cmd JOB=1:$nj $dir/log/acc_ml.$x.JOB.log \
gmm-acc-stats $cur_mdl "$feats" \
"ark,s,cs:gunzip -c $alidir/ali.JOB.gz | ali-to-post ark:- ark:- |" \
$dir/ml.$x.JOB.acc || exit 1;
$cmd $dir/log/acc_ml_sum.$x.log \
gmm-sum-accs $dir/ml.$x.acc $dir/ml.$x.*.acc || exit 1;
rm $dir/ml.$x.*.acc
num_stats="gmm-ismooth-stats --tau=$tau $dir/ml.$x.acc $dir/num_acc.$x.acc -|"
else
num_stats="gmm-ismooth-stats --smooth-from-model=true --tau=$tau $cur_mdl $dir/num_acc.$x.acc -|"
fi
$cmd $dir/log/update.$x.log \
gmm-est-gaussians-ebw $cur_mdl "$num_stats" $dir/den_acc.$x.acc - \| \
gmm-est-weights-ebw - $dir/num_acc.$x.acc $dir/den_acc.$x.acc $dir/$[$x+1].mdl || exit 1;
rm $dir/{den,num}_acc.$x.acc
fi
cur_mdl=$dir/$[$x+1].mdl
# Some diagnostics: the objective function progress and auxiliary-function
# improvement.
tail -n 50 $dir/log/acc.$x.*.log | perl -e 'while(<STDIN>) { if(m/lattice-to-mpe-post.+Overall average frame-accuracy is (\S+) over (\S+) frames/) { $tot_objf += $1*$2; $tot_frames += $2; }} $tot_objf /= $tot_frames; print "$tot_objf $tot_frames\n"; ' > $dir/tmpf
objf=`cat $dir/tmpf | awk '{print $1}'`;
nf=`cat $dir/tmpf | awk '{print $2}'`;
rm $dir/tmpf
impr=`grep -w Overall $dir/log/update.$x.log | awk '{x += $10*$12;} END{print x;}'`
impr=`perl -e "print ($impr*$acwt/$nf);"` # We multiply by acwt, and divide by $nf which is the "real" number of frames.
# This gives us a projected objective function improvement.
echo "Iteration $x: objf was $objf, MPE auxf change was $impr" | tee $dir/objf.$x.log
x=$[$x+1]
done
echo "MPE training finished"
rm $dir/final.mdl 2>/dev/null
ln -s $x.mdl $dir/final.mdl
exit 0;
|
StevenLOL/Research_speech_speaker_verification_nist_sre2010
|
SRE2010/steps/train_mpe.sh
|
Shell
|
gpl-2.0
| 6,809 |
psql -U pi -d kriek -c 'delete from status_pidstatus cascade; delete from status_probestatus cascade; delete from status_ssrstatus cascade; delete from status_status cascade; delete from status_status_probes cascade;'
|
jsproull/kriek
|
shell/deleteallstatus.sh
|
Shell
|
gpl-2.0
| 218 |
#!/bin/bash
# In The Name Of God
# ========================================
# [] File Name : run.sh
#
# [] Creation Date : 19-11-2015
#
# [] Created By : Parham Alvani ([email protected])
# =======================================
if [ -d /tmp/beehive ]; then
echo "Remove beehive states.. :)"
rm -Rf /tmp/beehive
fi
echo "Run beehive-hello :)"
go run beehive-hello.go
|
1995parham/Learning
|
go/beehive-hello/run.sh
|
Shell
|
gpl-2.0
| 377 |
# -*- shell-script -*-
# "set editing" debugger command
#
# Copyright (C) 2010, 2011, 2019 Rocky Bernstein <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place, Suite 330, Boston,
# MA 02111 USA.
_Dbg_help_add_sub set editing \
'**set editing** [ **on** | **off** | **emacs** | **gmacs** | **vi** ]
Readline editing of command lines.
See also:
---------
**show editing**
' 1
_Dbg_do_set_editing() {
typeset onoff=${1:-'on'}
case $onoff in
e | em | ema | emac | emacs )
_Dbg_edit='-e'
_Dbg_edit_style='emacs'
;;
g | gm | gma | gmac | gmacs )
_Dbg_edit='-e'
_Dbg_edit_style='gmacs'
;;
on | 1 )
_Dbg_edit='-e'
_Dbg_edit_style='emacs'
;;
off | 0 )
_Dbg_edit=''
return 0
;;
v | vi )
_Dbg_edit='-e'
_Dbg_edit_style='vi'
;;
* )
_Dbg_errmsg '"on", "off", "vi", "gmacs", or "emacs" expected.'
return 1
esac
set -o $_Dbg_edit_style
return 0
}
|
rocky/kshdb
|
command/set_sub/editing.sh
|
Shell
|
gpl-2.0
| 1,602 |
#!/bin/bash
FILE=hist.txt
echo "Plotting Histogram as PNG"
gnuplot -persist << PLOT
set xlabel "Latency in us"
set ylabel "Number of latency samples"
set logscale y
set title "Latency Histogram Plot"
set terminal png
set output "hist.png"
plot "$FILE" using 1:2 with steps ls 1 title "CPU0"
quit
PLOT
echo "Done."
|
ckuehnel/BeagleBone
|
plot.sh
|
Shell
|
gpl-2.0
| 334 |
#!/bin/bash
# Copyright (C) 2018 Robin Černín <[email protected]>
# Copyright (C) 2018 Pablo Iranzo Gómez <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# long_name: Outputs release for host
# description: Retrieves release data from host
# Load common functions
[[ -f "${CITELLUS_BASE}/common-functions.sh" ]] && . "${CITELLUS_BASE}/common-functions.sh"
FILE="${CITELLUS_ROOT}/etc/redhat-release"
is_required_file ${FILE}
# Fill metadata 'hostname' to value
echo "release"
cat ${FILE} >&2
exit ${RC_OKAY}
|
zerodayz/citellus
|
citellusclient/plugins/metadata/system/release.sh
|
Shell
|
gpl-3.0
| 1,125 |
alias vi=nvim
alias vim=nvim
|
talon/saltstack-states
|
devtools/zsh/files/zsh/aliases.zsh
|
Shell
|
gpl-3.0
| 29 |
#!/usr/bin/env bash
#
# Scrape the cf4ocl code, get all public functions and macros, check
# what is tested (and where) and what is not.
#
# Tested functions (and the test functions which call them) are printed
# to stdout. Untested functions are printed to stderr.
#
# Requires: ctags cut grep awk find cflow
# Author: Nuno Fachada <[email protected]>
# Licence: GNU General Public License version 3 (GPLv3)
# Date: 2016
#
# Get all cf4ocl functions
macros_to_ignore=G_GNUC_NULL_TERMINATED
ccl_functions=`LC_ALL=C ctags -I $macros_to_ignore -x --c-kinds=pd ../src/lib/*.h | cut -f1 -d " " | grep '^[^A-Z_]'`
# Get all tests source files
test_srcs=`find ../tests/lib/ -type f -iname "*test*"`
# For each cf4ocl function, get test functions which call it
declare -A fun_table
for ccl_fun in $ccl_functions
do
for test_src in $test_srcs
do
callers=`cflow $test_src | awk '/^ [^ ]/ { caller=$1 }; /^.*'$ccl_fun'/ { printf caller " " }'`
fun_table[${ccl_fun}]+=$callers
done
done
# Print cf4ocl functions and respective test caller functions
untested_funs=0
for ccl_fun in $ccl_functions
do
if [[ -n ${fun_table[$ccl_fun]} ]]
then
printf "$ccl_fun\n"
for caller in ${fun_table[$ccl_fun]}
do
printf "\t$caller\n"
done
else
untested_funs=$(($untested_funs+1))
printf "$ccl_fun\n" 1>&2
fi
done
printf "\n SUMMARY: cf4ocl has ${#fun_table[@]} functions, $untested_funs of which are untested.\n"
|
FakenMC/cf4ocl
|
auxbuild/checktests.sh
|
Shell
|
gpl-3.0
| 1,420 |
#!/bin/sh
WORKERS=4
PORT=443
KEYFILE=/etc/letsencrypt/live/marat.uchicago.edu/privkey.pem
CERTFILE=/etc/letsencrypt/live/marat.uchicago.edu/fullchain.pem
ADDRESS=172.17.0.3
gunicorn --keyfile=$KEYFILE --certfile=$CERTFILE -k uvicorn.workers.UvicornWorker -b :$PORT --bind=$ADDRESS -w 4 --access-logfile=/var/www/Commonplaces/access.log --error-logfile=/var/www/Commonplaces/error.log web_app:app
|
ARTFL-Project/Commonplaces
|
web_server.sh
|
Shell
|
gpl-3.0
| 399 |
#
# Run this script to download MP-Sort source code.
#
tmp=$(mktemp -d)
if ! git diff --cached --exit-code; then
echo "There are changes in the index. Commit or reset first."
exit 1
fi
trap "rm -rf $tmp" EXIT
git clone https://github.com/rainwoodman/kdcount $tmp
sha=$(cd $tmp; git rev-parse --verify HEAD)
rsync -avz -f '- /*/' -f '- *.py' -f '- *.pyx' -f '- pykdcount.c' $tmp/kdcount/* .
git add *.[ch] Makefile
git commit -m "Update kdcount to $sha"
|
rainwoodman/fastPM
|
depends/kdcount/down-integrate.sh
|
Shell
|
gpl-3.0
| 459 |
#!/bin/bash
#Collecting the call-for-opinion notices from some Ministries of the State Counsel.
# Added at 09.08.2016
# Modified at 11.08.2016
. $HOME/CSObot/variables.sh "gwy-bw"
# 1.-----------------------(Can't grep the URL on Arch? while okay on Debian-local.)
# wget -O - www.sda.gov.cn/WS01/CL0014/ | grep -2 "$Month1" | sed 's/\.\./http:\/\/www.sda.gov.cn\/WS01/g' > $Text
#iconv -f GB2312 -t UTF-8 $Text | pandoc -f html -t markdown | sed '1s/^/*国家食品药品监督总局征集意见----* /g' > $MDText
#. $HOME/CSObot/toMe.sh "$Text" "$MDText" "国家食品药品监督总局"
# 2.------------------------
curl http://www.moe.gov.cn/jyb_xxgk/moe_1777/moe_1778/ | grep "href=.*$Month" | sed 's/=\"\./=\"http:\/\/www.moe.gov.cn\/jyb_xxgk\/moe_1777\/moe_1778/g' > $Text
# 政策文件
curl http://www.moe.gov.cn/s78/A02/zfs__left/s5911/moe_621/ | grep "href=.*$Month" | sed 's/\.\.\/\.\.\/\.\.\/\.\.\/\.\./http:\/\/www.moe.gov.cn/g' >> $Text
# 通知公告
curl http://www.moe.gov.cn/jyb_xwfb/s248/ | grep "href=.*$Month" | sed 's/=\"\./=\"http:\/\/www.moe.gov.cn\/jyb_xwfb\/s248/g' >> $Text
# 徵求意見
pandoc -f html -t markdown $Text | sed '1s/^/**教育政策和教育部門規章** /g' | sed 's/ \".*\"//g' > $MDText
. $HOME/CSObot/toAll.sh "$Text" "$MDText" "教育部"
# 3.------------------------
curl http://www.nhfpc.gov.cn/zhuzhan/gongw/lists.shtml | grep "href=.*$Month" | sed 's/\.\.\/\.\./http:\/\/www.nhfpc.gov.cn/g' > $Text
pandoc -f html -t markdown_github $Text | sed -e '1s/^/**衛生計生委文件發佈** /g' -e 's/ \".*\"//g' -e 's/>//g' > $MDText
. $HOME/CSObot/toAll.sh "$Text" "$MDText" "卫生计生委"
# 4.------------------------
curl http://www.mohrss.gov.cn/SYrlzyhshbzb/zcfg/ | grep "href=.*$Month" | sed 's/\.\//http:\/\/www.mohrss.gov.cn\/SYrlzyhshbzb\/zcfg\//g' > $Text
pandoc -f html -t markdown $Text | sed '1s/^/**人社部文件發佈** /g' | sed 's/ \".*\"//g' > $MDText
. $HOME/CSObot/toAll.sh "$Text" "$MDText" "人社部"
# 5.------------------------(Get 400 bad request when wgeting telegram.org)
curl http://www.mohurd.gov.cn/wjfb/index.html | grep "href=.*$Month" > $Text
pandoc -f html -t markdown $Text | sed '1s/^/**住建部文件發佈** /g' > $MDText
. $HOME/CSObot/toMe.sh "$Text" "$MDText" "住建部"
# 6.------------------------
curl http://www.miit.gov.cn/n1146295/n1652858/index.html | grep -2 "href=.*$Month1" | grep -v "href=.*$Month1" | sed 's/\.\.\/\.\./http:\/\/www.miit.gov.cn/g' > $Text
# 政策文件
curl http://www.miit.gov.cn/n1146295/n1146557/n1146624/index.html | grep -2 "$Month1" | sed 's/\.\.\/\.\.\/\.\./http:\/\/www.miit.gov.cn/g' >> $Text
# 部門規章
curl http://www.miit.gov.cn/n1146295/n1146557/index.html | grep -2 "$Month1" | sed 's/\.\.\/\.\./http:\/\/www.miit.gov.cn/g' >> $Text
# 法律法規
pandoc -f html -t markdown $Text | sed '1s/^/**工信部文件和規章發佈** /g' > $MDText
. $HOME/CSObot/toAll.sh "$Text" "$MDText" "工信部"
echo "Done. Sended to TGbots."
echo
exit 0
|
mdrights/CSObot
|
legacy/gov/bot-gwy-bw.sh
|
Shell
|
gpl-3.0
| 3,077 |
rm -rf .env
rm -f .activate.sh
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
bash miniconda.sh -b -p .env/miniconda
rm miniconda.sh
export PATH=".env/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
conda update -q conda
# Useful for debugging any issues with conda
conda info -a
PACKAGES="scipy numpy cvxopt scikit-learn pytest pip"
conda create -q -n test-env python=3.5 $PACKAGES
echo "source $(pwd)/.env/miniconda/bin/activate test-env" > .activate.sh
source ./.activate.sh
# source activate test-env
pip install pytest-cov
pip install coveralls
|
OliverTED/pyshapes
|
.create-env-miniconda.sh
|
Shell
|
gpl-3.0
| 636 |
#!/bin/bash
chmod +x . ./devices/oneplusone/ubuntu/*.sh
chmod +x . ./devices/oneplusone/android/*.sh
chmod +x . ./devices/generic/ubuntu/*.sh
chmod +x . ./devices/generic/android/*.sh
clear
echo ""
echo "OnePlus One - bacon"
echo ""
echo ""
sleep 1
echo "Ubuntu: "
echo ""
echo "[1] Install Ubuntu"
echo "[2] Install OpenStore"
echo ""
echo "[3] Back Up your UBports device"
echo ""
echo ""
echo "Android: "
echo ""
echo "[4] Install LineageOS 14.1"
echo "[5] Install LineageOS 14.1 without Gapps"
echo ""
echo "[6] Install Sailfish OS"
echo ""
echo "[7] Install TWRP recovery"
echo "[8] Back Up your Android device"
echo "[9] Lock/Unlock bootloader (Will wipe existing apps/data)"
echo ""
echo ""
echo "[0] Back to menu "
echo ""
echo -n "Enter option: "; read option
case "$option" in
1)
. ./devices/oneplusone/ubuntu/ubuntu.sh
;;
2)
. ./devices/generic/ubuntu/openstore.sh
;;
3)
. ./devices/generic/ubuntu/ubports-backup.sh
;;
4)
. ./devices/oneplusone/android/lineageos.sh
;;
5)
. ./devices/oneplusone/android/lineageoswogapps.sh
;;
6)
. ./devices/oneplusone/android/sailfishos.sh
;;
7)
. ./devices/oneplusone/android/twrp.sh
;;
8)
. ./devices/generic/android/backup.sh
;;
9)
. ./devices/oneplusone/android/bootloader.sh
;;
0)
. ./launcher.sh
;;
*)
echo ""
echo "Invalid Option"
echo ""
echo "Exiting magic-device-tool. Bye Bye"
exit
;;
esac
|
MariusQuabeck/magic-device-tool
|
devices/oneplusone/oneplusone.sh
|
Shell
|
gpl-3.0
| 1,377 |
#!/bin/bash
#################################
## Mover Archivos a directorio ##
#################################
# Variables
rojo="\033[1;31m"
verde="\033[1;32m"
amarillo="\033[1;33m"
azul="\033[1;34m"
noColor="\033[1;00m"
function info(){
echo -e "$verde Es necesario un$amarillo Directorio$verde y luego archivos"
echo -e "$verde Ejecuta con la sintaxis:$rojo sh $0 Directorio$azul archivo1 archivo2 archivo3...$noColor"
echo ""
}
# Comprobar que exista parámetro de entrada y el primero sea un directorio
if [ -f $1 ] | [ $# -lt 2 ]; then
info
exit 1
fi
echo -e "\n$verde Cantidad total de parámetros -->$rojo $#$noColor\n"
#Comprobar son archivos el resto de parámetros de entrada
for (( i=2; i<=$#; i++ ))
do
eval z='${'$i'}' #Concatena la variable y obtiene su valor #Tiene en cuenta +10
if [ -f $z ]; then
echo -e "$verde Archivo$rojo $z$verde copiándose en$rojo $1$noColor"
cp $z $1
else
echo -e "$rojo$z$verde no es un archivo, no será copiado$noColor"
fi
done
exit 0
|
fryntiz/ciclosuperior
|
Scripts_Bash/Nivel Bajo-Medio/5_copiarch.sh
|
Shell
|
gpl-3.0
| 1,013 |
echo "Mode,Type,Instance,Seed,User-Time,System-Time,Wall-Clock-Time,MemoryConsumption,Number of Memory-Errors,Number of Timeout-Errors,Number of Exitcode-Errors,Exit-Code,Lines stdout,Lines stderr,Command"
pid=$$
for i in `ls input/`
do
#echo $i
if [[ $i =~ .*.lp ]]
#if [[ $i =~ .* ]]
then
for j in `seq 1 1 5`
do
seed=$RANDOM
echo $i >> out.$pid
#/usr/bin/time ls 2>&1 > /dev/null | grep -o "[0-9]\+\.[0-9]\+"
nmbr=`bash ../dynamic_programming_test.sh input/$i ../encodings/vc_edge.lp "--seed $seed" "--configuration=$1" "in" "in" "vc_edge_dynamic_programming_$i.$j" edge 2>&1 >> out.$pid | grep -o "^[0-9]\+\.[0-9]\+"`
#mv g.xml g.xml.$i.$j
echo "heuristic,vc_edge_dynamic_programming,$i,$seed,$nmbr,0,0,0,0,0,0,0,0,0,0"
done
fi
done
|
hmarkus/dynclasp
|
examples/vertexcover_dynamic_programming.sh
|
Shell
|
gpl-3.0
| 766 |
#!/bin/bash
# xset led 3
# setleds -L +caps < /dev/tty7
# tleds
function do_beep
{
if [ "$(which beep)" != "" ];
then
beep -r 3 -f 30 -d 1;
fi
for (( i=0; i < 10; i++ ))
{
./bin/ledctl ~num > /dev/null;
sleep 0.05;
}
}
do_beep;
|
progman/ledctl
|
test.sh
|
Shell
|
gpl-3.0
| 244 |
#!/bin/bash
# loop, starting kodi when needed
. /etc/opt/mythtv/mythtv.conf
scriptname=`readlink -e "$0"`
scriptpath=`dirname "$scriptname"`
scriptname=`basename "$scriptname" .sh`
hostname=` tr '[a-z]' '[A-Z]' < /etc/hostname`
font=standard
# Bug in Jessie means rpcbind does not start with system restart
# It is needed for NFS
if ! nc -z -v localhost 111 ; then
sudo service rpcbind start
sleep 1
sudo service nfs-kernel-server restart
fi
if (( REBOOT_DAYS < 1 )) ; then
REBOOT_DAYS=1
fi
if [[ `tty` != /dev/tty1 ]] ; then
while true ; do
clear
figlet -f $font " $hostname"$'\n'" Please Press"$'\n'" Enter or OK"
pwd=
read -s pwd
sudo chvt 1
done
fi
kodiver=`/usr/bin/kodi --version|head -1|cut -f 1 -d '.'`
while true ; do
clear
figlet -f $font " $hostname - $kodiver"$'\n'" 22 - TV"$'\n'" 33 - Videos"$'\n'" 99 - Disconnect"
retry=Y
pwd=
while [[ "$retry" == Y ]] ; do
read -s pwd
if [[ "$pwd" == 22 ]] ; then
$scriptpath/wakeup.sh "$MAINHOST"
cp -f ~/.kodi/peter$kodiver/guisettings_pvrenabled.xml ~/.kodi/userdata/guisettings.xml
break
fi
if [[ "$pwd" == 33 ]] ; then
cp -f ~/.kodi/peter$kodiver/guisettings_pvrdisabled.xml ~/.kodi/userdata/guisettings.xml
break
fi
if [[ "$pwd" == 99 ]] ; then
clear
figlet -f $font " Shutting down"
sleep 3
sudo shutdown -h now
fi
done
clear
echo;echo;echo;echo;echo
# figlet -f $font " Starting Kodi."
LOG_FILE=$HOME/.kodi/temp/kodi.log
rm -f $LOG_FILE.1
mv -f $LOG_FILE $LOG_FILE.1
rm -f $LOG_FILE
/usr/lib/kodi/kodi.bin --standalone &
while [[ ! -f $LOG_FILE ]] ; do
sleep 1s
done
while read line ; do
if [[ ${line} =~ "application stopped" ]] ; then
echo "Killing kodi"
break
fi
done < <(tail --pid=`pidof -s /usr/lib/kodi/kodi.bin` -f -n0 $LOG_FILE)
killall kodi.bin
fbset -depth 8 && fbset -depth 16
s7daysago=`date --date="$REBOOT_DAYS days ago" +%F`
priorreboot=`cat $DATADIR/reboot_date`
if [[ "$priorreboot" = "$s7daysago" || "$priorreboot" < "$s7daysago" ]] ; then
date +%F > $DATADIR/reboot_date
clear
figlet -f $font " Restarting"
sudo shutdown -r now
fi
done
|
bennettpeter/mythscripts
|
install/opt/mythtv/bin/kodiloop.sh
|
Shell
|
gpl-3.0
| 2,423 |
#!/bin/bash
#
# @brief Notify when a particular user has logged out
# @version ver.1.0
# @date Fri Oct 16 20:46:32 2015
# @company Frobas IT Department, www.frobas.com 2015
# @author Vladimir Roncevic <[email protected]>
#
UTIL_LOGGED_OUT=logged_out
UTIL_LOGGED_OUT_VERSION=ver.1.0
UTIL=/root/scripts/sh_util/${UTIL_LOGGED_OUT_VERSION}
UTIL_LOG=${UTIL}/log
. ${UTIL}/bin/devel.sh
. ${UTIL}/bin/usage.sh
declare -A LOGGED_OUT_Usage=(
[USAGE_TOOL]="${UTIL_LOGGED_OUT}"
[USAGE_ARG1]="[LOGOUT_STRUCT] System username and time"
[USAGE_EX_PRE]="# Checking user to log out"
[USAGE_EX]="${UTIL_LOGGED_OUT} \$LOGOUT_STRUCT"
)
#
# @brief Notify when a particular user has logged out
# @param Value required structure username and time
# @retval Success return 0, else return 1
#
# @usage
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# declare -A LOGOUT_STRUCT=(
# [USERNAME]="vroncevic"
# [TIME]=$time
# )
#
# logged_out LOGOUT_STRUCT
# local STATUS=$?
#
# if [ $STATUS -eq $SUCCESS ]; then
# # true
# # notify admin | user
# else
# # false
# # missing argument(s)
# # return $NOT_SUCCESS
# # or
# # exit 128
# fi
#
function logged_out {
local -n LOGOUT_STRUCT=$1
local USR=${LOGOUT_STRUCT[USERNAME]} TIME=${LOGOUT_STRUCT[TIME]}
if [[ -n "${USR}" && -n "${TIME}" ]]; then
local FUNC=${FUNCNAME[0]} MSG="None"
MSG="Notify when a particular user has logged out!"
info_debug_message "$MSG" "$FUNC" "$UTIL_LOGGED_OUT"
who | grep "^${USR} " 2>&1 > /dev/null
if [[ $? != 0 ]]; then
MSG="User [${USR}] is not logged in!"
info_debug_message "$MSG" "$FUNC" "$UTIL_LOGGED_OUT"
info_debug_message_end "Done" "$FUNC" "$UTIL_LOGGED_OUT"
return $SUCCESS
fi
while who | grep "^${USR} "; do
sleep ${TIME}
done
MSG="User [${USR}] just logged out!"
info_debug_message "$MSG" "$FUNC" "$UTIL_LOGGED_OUT"
info_debug_message_end "Done" "$FUNC" "$UTIL_LOGGED_OUT"
return $SUCCESS
fi
usage LOGGED_OUT_Usage
return $NOT_SUCCESS
}
|
vroncevic/sh-util
|
sh_tool/bin/logged_out.sh
|
Shell
|
gpl-3.0
| 2,184 |
./NetconfServerSimulator.sh ../xmlNeModel/DVM_MWCore12_BasicAir.xml 12003 ../yang/yangNeModel
|
Melacon/WirelessTransportEmulator
|
NetconfServerSimulator/build/ne-12-p12003.sh
|
Shell
|
gpl-3.0
| 94 |
#!/bin/bash
./kinectPlanes.sh kinect.params --depth 10
|
fkie/structure_coloring_fkie
|
scripts/kinectOctree10.sh
|
Shell
|
gpl-3.0
| 55 |
#!/bin/bash
# This is a script shell for deploying a meshlab-portable folder.
# Requires a properly built meshlab (see windows_build.sh).
#
# Without given arguments, the folder that will be deployed is meshlab/src/install.
#
# You can give as argument the path where meshlab is installed.
SCRIPTS_PATH="$(dirname "$(realpath "$0")")"
DISTRIB_PATH=$SCRIPTS_PATH/../../distrib
INSTALL_PATH=$SCRIPTS_PATH/../../src/install
#checking for parameters
for i in "$@"
do
case $i in
-i=*|--install_path=*)
INSTALL_PATH="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
windeployqt $INSTALL_PATH/meshlab.exe
windeployqt $INSTALL_PATH/plugins/filter_sketchfab.dll --libdir $INSTALL_PATH/
mv $INSTALL_PATH/lib/meshlab/IFX* $INSTALL_PATH
cp $INSTALL_PATH/IFXCoreStatic.lib $INSTALL_PATH/lib/meshlab/
cp $DISTRIB_PATH/LICENSE.txt $INSTALL_PATH/
cp $DISTRIB_PATH/privacy.txt $INSTALL_PATH/
#at this point, distrib folder contains all the files necessary to execute meshlab
echo "$INSTALL_PATH is now a self contained meshlab application"
|
cnr-isti-vclab/meshlab
|
scripts/Windows/2_deploy.sh
|
Shell
|
gpl-3.0
| 1,087 |
#!/bin/bash
docker start cidsreference_cids-integration-base
|
cismet/docker-volumes
|
cids-reference/cids-integration-base/start.sh
|
Shell
|
gpl-3.0
| 61 |
#!/bin/bash
source /data/project/wikitweets/tweetsenv/bin/activate
python /data/project/wikitweets/enwikinewpages/enwikinewpages.py
deactivate
|
emijrp/wikitweets
|
enwikinewpages/enwikinewpages.sh
|
Shell
|
gpl-3.0
| 144 |
#!/bin/sh
# exercise the resize library; FAT and HFS+ only
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/init.sh"; path_prepend_ ../parted .
require_hfs_
require_fat_
require_root_
require_scsi_debug_module_
require_512_byte_sector_size_
ss=$sector_size_
start=63s
default_end=546147s
new_end=530144s
# create memory-backed device
scsi_debug_setup_ dev_size_mb=550 > dev-name ||
skip_ 'failed to create scsi_debug device'
dev=$(cat dev-name)
fail=0
parted -s $dev mklabel gpt > out 2>&1 || fail=1
# expect no output
compare /dev/null out || fail=1
# ensure that the disk is large enough
dev_n_sectors=$(parted -s $dev u s p|sed -n '2s/.* \([0-9]*\)s$/\1/p')
device_sectors_required=$(echo $default_end | sed 's/s$//')
# Ensure that $dev is large enough for this test
test $device_sectors_required -le $dev_n_sectors || fail=1
for fs_type in hfs+ fat32 fat16; do
echo "fs_type=$fs_type"
# create an empty $fs_type partition, cylinder aligned, size > 256 MB
parted -a min -s $dev mkpart p1 $start $default_end > out 2>&1 || fail=1
compare /dev/null out || fail=1
# print partition table
parted -m -s $dev u s p > out 2>&1 || fail=1
# wait for new partition device to appear
wait_for_dev_to_appear_ ${dev}1
case $fs_type in
fat16) mkfs_cmd='mkfs.vfat -F 16'; fsck='fsck.vfat -v';;
fat32) mkfs_cmd='mkfs.vfat -F 32'; fsck='fsck.vfat -v';;
hfs*) mkfs_cmd='mkfs.hfs'; fsck=fsck.hfs;;
*) error "internal error: unhandled fs type: $fs_type";;
esac
# create the file system
$mkfs_cmd ${dev}1 || fail=1
# NOTE: shrinking is the only type of resizing that works.
# resize that file system to be one cylinder (8MiB) smaller
fs-resize ${dev}1 0 $new_end > out 2>&1 || fail=1
# check for expected output
case $fs_type in
fat16) cat << EOF > exp || framework_failure
Information: Would you like to use FAT32? If you leave your file system as FAT16, then you will have no problems. If you convert to FAT32, and MS Windows is installed on this partition, then you must re-install the MS Windows boot loader. If you want to do this, you should consult the Parted manual (or your distribution's manual). Also, converting to FAT32 will make the file system unreadable by MS DOS, MS Windows 95a, and MS Windows NT.
EOF
;;
fat32) cat /dev/null > exp || framework_failure;; # expect no output
hfs*) cat /dev/null > exp || framework_failure;; # expect no output
esac
compare exp out || fail=1
# This is known to segfault with fsck.hfs from
# Fedora 16's hfsplus-tools-332.14-12.fc15.x86_64.
# You can build a working version from
# git://cavan.codon.org.uk/hfsplus-tools.git
# Skip the fsck.hfs test unless it understands the -v option.
skip=0
case $fs_type in
hfs*) $fsck -v || { warn_ skipping $fsck test; skip=1; } ;; esac
if test $skip = 0; then
$fsck ${dev}1 > out || fail=1
cat out
# Oops. Currently, fsck.hfs reports this:
# Executing fsck_hfs (version 540.1-Linux).
# ** Checking non-journaled HFS Plus Volume.
# The volume name is untitled
# ** Checking extents overflow file.
# ** Checking catalog file.
# ** Checking multi-linked files.
# ** Checking catalog hierarchy.
# ** Checking volume bitmap.
# Volume bitmap needs minor repair for orphaned blocks
# ** Checking volume information.
# Invalid volume free block count
# (It should be 67189 instead of 65197)
# Volume header needs minor repair
# (2, 0)
# FIXME: This means the HFS resizing code is wrong.
# FIXME: parse "out" for FS size and verify that it's the new, smaller size
fi
# Remove the partition explicitly, so that mklabel doesn't evoke a warning.
parted -s $dev rm 1 || fail=1
# Create a clean partition table for the next iteration.
parted -s $dev mklabel gpt > out 2>&1 || fail=1
# expect no output
compare /dev/null out || fail=1
done
Exit $fail
|
sjas/parted
|
tests/t3000-resize-fs.sh
|
Shell
|
gpl-3.0
| 4,610 |
#!/bin/sh
TOOLDIR=../../tools/src
if [ "$srcdir" = "" ] ; then
srcdir=. ;
fi
if ! $TOOLDIR/hfst-pmatch2fst $srcdir/pmatch_blanks.txt > test ; then
exit 1
fi
# Test with any old string
if ! $TOOLDIR/hfst-pmatch test < $srcdir/cat.strings > pmatch.out ; then
exit 1
fi
rm -f pmatch.out test
exit 0
|
unhammer/hfst3
|
test/tools/pmatch2fst-functionality.sh
|
Shell
|
gpl-3.0
| 325 |
#!/bin/sh
export GR_DONT_LOAD_PREFS=1
export srcdir=/home/zitouni/gnuradio-3.6.1/gr-atsc/src/python
export PATH=/home/zitouni/gnuradio-3.6.1/build/gr-atsc/src/python:$PATH
export LD_LIBRARY_PATH=/home/zitouni/gnuradio-3.6.1/build/volk/lib:/home/zitouni/gnuradio-3.6.1/build/gruel/src/lib:/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/lib:/home/zitouni/gnuradio-3.6.1/build/gr-atsc/src/lib:$LD_LIBRARY_PATH
export PYTHONPATH=/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/python:/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/lib/swig:/home/zitouni/gnuradio-3.6.1/build/gr-atsc/src/lib:$PYTHONPATH
/usr/bin/python -B /home/zitouni/gnuradio-3.6.1/gr-atsc/src/python/qa_atsc.py
|
zitouni/gnuradio-3.6.1
|
build/gr-atsc/src/python/qa_atsc_test.sh
|
Shell
|
gpl-3.0
| 695 |
#!/bin/bash
# Usage: $ ./run_power_experiments_wks-51-03.sh binary_path binary_name input_size run_time \
# testpower change_size
echo "Running on wks-51-03"
eval `cat ../data/tmp/current_ssh-agent.txt`
echo -n > ../data/tmp/current_remote_script.sh
echo "echo -n > /mnt/ramdisk/output.txt" >> ../data/tmp/current_remote_script.sh
echo "echo \"Starting script remotely\" | tee -a /mnt/ramdisk/output.txt" >> ../data/tmp/current_remote_script.sh
echo "./${2} -input-size $3 -runtime $4 -testpower $5 -change-size $6 2>&1 | tee -a /mnt/ramdisk/output.txt" >> ../data/tmp/current_remote_script.sh
ssh [email protected] "killall power_experiments"
scp ../data/tmp/current_remote_script.sh [email protected]:~/current_remote_script.sh
scp ../../bin/${1}/${2} [email protected]:~/${2}
ssh [email protected] "chmod +x current_remote_script.sh && ./current_remote_script.sh"
scp [email protected]:/mnt/ramdisk/output.txt ../data/tmp/current_run_power_experiments_cmd.txt
ssh [email protected] "rm -f ${2} current_remote_script.sh /mnt/ramdisk/output.txt"
|
matthewhammer/ceal
|
testpower/scripts/run_power_experiments_wks-51-03.sh
|
Shell
|
gpl-3.0
| 1,157 |
#!/usr/bin/env bash
#------------------------------------------------------------------------------
# Bash script to execute the Solidity tests.
#
# The documentation for solidity is hosted at:
#
# https://solidity.readthedocs.org
#
# ------------------------------------------------------------------------------
# This file is part of solidity.
#
# solidity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# solidity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with solidity. If not, see <http://www.gnu.org/licenses/>
#
# (c) 2017 solidity contributors.
#------------------------------------------------------------------------------
set -e
if [ ! -f "$1" -o -z "$2" ]
then
echo "Usage: $0 <path to soljson.js> <version>"
exit 1
fi
SOLJSON="$1"
VERSION="$2"
DIR=$(mktemp -d)
(
echo "Preparing solc-js (master)..."
git clone --depth 1 --branch master https://github.com/ethereum/solc-js "$DIR"
cd "$DIR"
# disable "prepublish" script which downloads the latest version
# (we will replace it anyway and it is often incorrectly cached
# on travis)
npm config set script.prepublish ''
npm install
# Replace soljson with current build
echo "Replacing soljson.js"
rm -f soljson.js
cp "$SOLJSON" soljson.js
# Update version (needed for some tests)
echo "Updating package.json to version $VERSION"
npm version --allow-same-version --no-git-tag-version $VERSION
echo "Running solc-js tests..."
npm run test
)
rm -rf "$DIR"
|
winsvega/solidity
|
test/solcjsTests.sh
|
Shell
|
gpl-3.0
| 1,950 |
#!/bin/bash
export ROOTPATH="$(dirname "$(readlink -f "$0")")/.."
OS=""
PACKAGE_SYSTEM=""
UNAME=`uname`
DATE_STRING=`date +"%Y-%m-%d_%H%M%S"`
LOG_FILE="$ROOTPATH/tools/MATLAB_Engine_install_log_$DATE_STRING.log"
FILE_LOG_LEVEL=7
CONSOLE_LOG_LEVEL=5
MLC_PYTHON_DIR="$ROOTPATH/mlc_python"
UNINSTALL=0
INSTALLED=0
PACKAGE_PATH=""
MATLAB_DIR=""
PYTHON_ENG_DIR="extern/engines/python"
function usage() {
log_message 5 "MLC Installer. Available options:"
log_message 5 " -h: Show this help"
log_message 5 " -d <dir>: Absolute path where MATLAB is installed in the system. Mandatory!"
}
# LOG LEVELS:
# 0: EMERGENCY
# 1: CRITICAL
# 2: ERROR
# 3: WARNING
# 4: NOTICE
# 5: INFO
# 6: DEBUG
# 7: TRACE
function log_message() {
LOG_LEVEL=$1
LOG_MESSAGE=$2
if [ $LOG_LEVEL -le $FILE_LOG_LEVEL ]; then
echo -e "$LOG_MESSAGE" >> $LOG_FILE
fi
if [ $LOG_LEVEL -le $CONSOLE_LOG_LEVEL ]; then
echo -e "$LOG_MESSAGE"
fi
}
function parse_args() {
log_message 7 "Checking command line arguments"
# Needed to make getopts work
local OPTIND opt
OPTSPEC=":hup:d:"
while getopts $OPTSPEC opt; do
case "${opt}" in
h)
usage
exit 0
;;
d)
if [ -z "$OPTARG" ]; then
log_message 1 "option '-d' requires an argument"
usage
exit 1
fi
MATLAB_DIR=$OPTARG
;;
\?)
log_message 1 "ERROR: Invalid option received"
usage
exit 1
;;
esac
done
if [ $UNINSTALL -eq 1 ]; then
return
fi
if [ x"$MATLAB_DIR" = "x" ]; then
log_message 1 "MATLAB dir must be supplied."
usage
exit 1
fi
}
function check_run_as_root() {
case `id` in
uid=0*) ;;
*)
log_message 1 "ERROR: Must be root to run script (use -h for help). Aborting installation"
exit 1
;;
esac
}
function install_matlab_engine() {
# Move to Python engine directory to install the Engine module. It doesn't work in another way
cd "$MATLAB_DIR/$PYTHON_ENG_DIR"
#Remove the build directory if it exists
rm -rf "$MATLAB_DIR/$PYTHON_ENG_DIR/build"
$MLC_PYTHON_DIR/bin/mlc_python "$MATLAB_DIR/$PYTHON_ENG_DIR/setup.py" build > /dev/null 2>&1
if [ $? -ne 0 ]; then
log_message 1 "An error ocurred while building MATLAB Python Package. Aborting installation"
exit 1
fi
$MLC_PYTHON_DIR/bin/mlc_python "$MATLAB_DIR/$PYTHON_ENG_DIR/setup.py" install > /dev/null 2>&1
if [ $? -ne 0 ]; then
log_message 1 "An error ocurred while installing MATLAB Python Package. Aborting installation"
exit 1
fi
log_message 5 "MATLAB Engine was succesfully installed"
}
function main() {
log_message 4 "#################################"
log_message 4 "# MATLAB Engine Installer #"
log_message 4 "#################################"
log_message 4 "Saving log to $LOG_FILE"
if [ $# -ne 1 -o "$1" != "-h" ]; then
check_run_as_root
fi
parse_args $*
install_matlab_engine
}
main $*
|
MachineLearningControl/OpenMLC-Python
|
tools/installer/Dockerfiles/deploy_scripts/install_matlab_engine.sh
|
Shell
|
gpl-3.0
| 3,260 |
#!/bin/tcsh
#
#$ -S /bin/tcsh
#$ -cwd
#$ -pe cordelia 1
#$ -l mem_free=1.5G
set EXEDIR=/data/rw14/galtay/sphray_update/sphray/src
set EXE=${EXEDIR}/sphray_He
set INDIR=/data/rw14/galtay/sphray_update/sphray/data/config/iliev_small_tests
set INFILE=${INDIR}/iliev_test1_He_N64_R8.config
${EXE} ${INFILE}
|
spthm/sphray
|
sge/iliev_tests/it1_he_r8.sh
|
Shell
|
gpl-3.0
| 308 |
#!/bin/bash
MAILTO=root
CONFIG=/etc/login-email.conf
declare -A watchfor
watchfor['root']=yes
if [ -f "${CONFIG}" ]; then
/bin/sed -r -e 's/[#].*$//' -e '/^[[:space:]]*$/d' "${CONFIG}" |
while read name; do
watchfor["${name}"]=yes
done
fi
if [ "${watchfor}[${USER}]" ]; then
(
FMT='%7s: %s\n'
/bin/printf "${FMT}" 'Date' "$(/bin/date)"
/bin/printf "${FMT}" 'User' "${USER}"
/bin/printf "${FMT}" 'Host' "${HOSTNAME}"
/bin/printf "${FMT}" 'Shell' "${SHELL}"
/bin/printf "${FMT}" 'TTY' "$(/bin/tty)"
cat <<-EOF
Last few logins:
-------------------------------------------------------
$(
/bin/last ${USER} | /bin/head -n5
)
-------------------------------------------------------
EOF
) | /bin/mailx -s "Login watch for ${HOSTNAME}" "${MAILTO}"
fi
|
megacoder/login-email
|
login-email.sh
|
Shell
|
gpl-3.0
| 787 |
perl SDSS_Rename_DR7.pl
wget -i sdss-wget-PSF_DR7.lis
chmod 755 Rename_PSF_DR7.sh
./Rename_PSF_DR7.sh
|
pdn4kd/SDSS-Tidal-Parameter-Analysis
|
exec0_DR7.sh
|
Shell
|
gpl-3.0
| 102 |
#!/bin/bash
function infra_report() {
# Given an autoscale group name, we'll report on its current status
AUTOSCALENAME=$1 # Passed to this function
echo " ";echo " ";echo "================================================="
echo " ------ ${AUTOSCALENAME} STATUS -----"
LAUNCHCONFIGNAME=$(aws autoscaling describe-auto-scaling-groups \
--auto-scaling-group-names ${AUTOSCALENAME} \
--query 'AutoScalingGroups[*].LaunchConfigurationName' \
--output text)
echo "=== LAUNCH CONFIG STATUS ==="
echo "LaunchConfigName AMI InstanceType CreatedDate"
# Show me a summary of what the launch config is set for
aws autoscaling describe-launch-configurations \
--launch-configuration-name ${LAUNCHCONFIGNAME} \
--query 'LaunchConfigurations[*].[LaunchConfigurationName, ImageId, InstanceType, CreatedTime]' \
--output text
echo "=== EC2 STATUS ==="
echo "InstanceId AvailabilityZone Health State LaunchConfig"
aws autoscaling describe-auto-scaling-groups \
--auto-scaling-group-names ${AUTOSCALENAME} \
--query 'AutoScalingGroups[*].Instances[*].[InstanceId, AvailabilityZone, HealthStatus, LifecycleState,LaunchConfigurationName]' \
--output text
echo "=== CURRENT SCALE ==="
echo "min / desired / max size"
aws autoscaling describe-auto-scaling-groups \
--auto-scaling-group-names ${AUTOSCALENAME} \
--query 'AutoScalingGroups[*].[MinSize, DesiredCapacity, MaxSize]' \
--output text
ELBNAME=$(aws autoscaling describe-auto-scaling-groups \
--auto-scaling-group-names ${AUTOSCALENAME} \
--query 'AutoScalingGroups[*].LoadBalancerNames' \
--output text)
echo "=== ELB ENDPOINT ==="
aws elb describe-load-balancers \
--load-balancer-name elb-stage-socorroweb \
--query 'LoadBalancerDescriptions[*].DNSName' \
--output text
echo "=== ELB HEALTH ==="
echo "InstanceID InstanceHealth"
aws elb describe-instance-health \
--load-balancer-name ${ELBNAME} \
--query 'InstanceStates[*].[InstanceId, State]' \
--output text
}
|
rhelmer/socorro-infra
|
bin/lib/infra_status.sh
|
Shell
|
mpl-2.0
| 2,285 |
source env.sh
TARGET=$1
if [ -z "$TARGET" ]; then
echo "ERROR: Missing required date argument"
exit -1
fi
BASE=$(pwd)
echo "Getting client offsets for $TARGET in $DATA_BUCKET"
# Make a schema
cat $BASE/schema_template.json | sed -r "s/__TARGET__/$TARGET/" > $BASE/schema.$TARGET.json
LIST=$BASE/s3files.$TARGET.list
echo "Listing files for $TARGET"
$HEKA_PATH/heka-s3list -bucket $DATA_BUCKET -bucket-prefix $DATA_PREFIX -schema $BASE/schema.$TARGET.json > $LIST
# TODO: save $LIST too.
echo "Fetching data for $(wc -l $LIST) files..."
time cat $LIST | $HEKA_PATH/heka-s3cat -bucket $DATA_BUCKET -format offsets -stdin -output $BASE/$TARGET.tmp &> $TARGET.log
echo "Checking for errors..."
# TODO: If there are any, exit nonzero.
ERRS=$(grep -i error $TARGET.log | wc -l)
if [ "$ERRS" -ne "0" ]; then
echo "Encountered indexing errors:"
grep -i error $TARGET.log
fi
echo "Sorting data..."
# Explicitly set locale so that sorts are stable.
LC_ALL=C
sort -k 2,2 -k 1,1 -k 3,3n $BASE/$TARGET.tmp > $BASE/$TARGET.txt
|
mreid-moz/client_indexer
|
index.sh
|
Shell
|
mpl-2.0
| 1,039 |
#!/bin/sh
# --------------------------------------------------------------------------------
# This script is building packages for Alignak-WebUI thanks to the fpm
# application (https://github.com/jordansissel/fpm).
# -----
# Using this script and fpm requires:
# sudo apt-get install ruby ruby-dev rubygems build-essential
# sudo gem install --no-ri --no-rdoc fpm
# -----
# This script updates the .bintray-*.json file to update:
# - the target repo, replacing sed_version_repo with the appropriate
# repository name: alignak-deb-testing or alignak-deb-stable
# - the version name, description and release date, replacing
# sed_version_name, sed_version_desc and sed_version_released
# -----
# Command line parameters:
# - git branch name:
# - master will build a stable version (alignak_deb-stable repository)
# -> python-alignak_x.x.x_all.deb
# - develop will build a develop version (alignak_deb-testing repository)
# -> python-alignak_x.x.x-dev_all.deb
# - any other will build a develop named version (alignak_deb-testing repository)
# -> python-alignak_x.x.x-mybranch_all.deb
#
# Note that it is not recommended to use anything else than alphabetic characters in the
# branch name according to the debian version name policy! Else, the package will not even
# install on the system!
#
# - python version:
# 2.7, 3.5 (when called from the Travis build, it is the most recent python 3 version)
#
# - package type:
# deb (default), rpm, freebsd, apk, pacman, ...
# Indeed all the package types supported by fpm
# --------------------------------------------------------------------------------
#set -ev
# Parse command line arguments
# Default is branch develop, python 3.5
git_branch=$1
python_version=$2
input_type="python"
output_type=$3
if [ $# -eq 0 ]; then
git_branch="develop"
python_version="3.5"
output_type="deb"
fi
if [ $# -eq 1 ]; then
python_version="3.5"
output_type="deb"
fi
if [ $# -eq 2 ]; then
output_type="deb"
fi
echo "Installing fpm..."
gem install --no-ri --no-rdoc fpm
echo "Building ${output_type} package for branch ${git_branch}, python version ${python_version}"
# Python prefix - no more used but kept for compatibility
python_prefix="python3"
systemd_service="python3/alignak-webui.service"
if [ "${python_version}" = "2.7" ]; then
python_prefix="python"
systemd_service="python2/alignak-webui.service"
# python_version="2"
#else
# python_version="3"
fi
# Package information - no more python-prefix but kept for compatibility
pkg_name="${python_prefix}-alignak-webui"
pkg_description="Alignak WebUI, Web User Interface for Alignak"
pkg_url="http://alignak.net"
pkg_team="Alignak Team ([email protected])"
version=`python -c "from alignak_webui import __version__;print(__version__)"`
version_date=`date "+%Y-%m-%d"`
mkdir -p dist
cp .bintray-${output_type}.json dist/.bintray-${output_type}.json
if [ "${git_branch}" = "master" ]; then
# Updating deploy script for Alignak stable version
sed -i -e "s|\"sed_package_name\"|\"${pkg_name}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_name\"|\"${version}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_desc\"|\"Stable version\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_released\"|\"${version_date}\"|g" dist/.bintray-${output_type}.json
# Stable repo
sed -i -e "s/sed_version_repo/alignak-${output_type}-stable/g" dist/.bintray-${output_type}.json
elif [ "${git_branch}" = "develop" ]; then
# Version is version number + develop
version="${version}-develop"
# version="-dev"
# Updating deploy script for Alignak develop version
sed -i -e "s|\"sed_package_name\"|\"${pkg_name}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_name\"|\"${version}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_desc\"|\"Development version\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_released\"|\"${version_date}\"|g" dist/.bintray-${output_type}.json
# Use the testing repo
sed -i -e "s/sed_version_repo/alignak-${output_type}-testing/g" dist/.bintray-${output_type}.json
else
# Version
if [ "${git_branch}" = "${version}" ]; then
version="${git_branch}"
else
version="${version}-${git_branch}"
fi
# Updating deploy script for any other branch / tag
sed -i -e "s|\"sed_package_name\"|\"${pkg_name}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_name\"|\"${version}\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_desc\"|\"Branch $1 version\"|g" dist/.bintray-${output_type}.json
sed -i -e "s|\"sed_version_released\"|\"${version_date}\"|g" dist/.bintray-${output_type}.json
# Use the testing repo
sed -i -e "s/sed_version_repo/alignak-${output_type}-testing/g" dist/.bintray-${output_type}.json
fi
echo "----------"
echo "BinTray configuration file:"
echo "----------"
cat dist/.bintray-${output_type}.json
echo "----------"
# Run fpm:
# Add --verbose for a verbose (very...) mode to have more information
# - from python to deb packages, for all architectures
# Use python dependencies - all Alignak python packages
# are packaged in the main distros so it will use the
# distro packages rather than the python one
# Force python interpreter else Travis deployment will use its own venv interpreter!
echo "Running fpm..."
if [ "${output_type}" = "deb" ]; then
fpm \
--verbose \
--force \
--input-type ${input_type} \
--output-type ${output_type} \
--package "./dist" \
--architecture all \
--license AGPL \
--version ${version} \
--name "${pkg_name}" \
--description "${pkg_description}" \
--url "${pkg_url}" \
--vendor "${pkg_team}" \
--maintainer "${pkg_team}" \
--python-package-name-prefix "${python_prefix}" \
--python-scripts-executable "/usr/bin/env python" \
--python-install-lib "/usr/local/lib/python${python_version}/site-packages" \
--python-install-data '/usr/local' \
--python-install-bin '/usr/local/bin' \
--no-python-dependencies \
--after-install "./bin/${python_prefix}-post-install.sh" \
--deb-no-default-config-files \
--deb-systemd ./bin/systemd/${systemd_service} \
--deb-no-default-config-files \
./setup.py
elif [ "${output_type}" = "rpm" ]; then
fpm \
--force \
--input-type ${input_type} \
--output-type ${output_type} \
--package "./dist" \
--architecture all \
--license AGPL \
--version ${version} \
--name "${pkg_name}" \
--description "${pkg_description}" \
--url "${pkg_url}" \
--vendor "${pkg_team}" \
--maintainer "${pkg_team}" \
--python-package-name-prefix "${python_prefix}" \
--python-scripts-executable "/usr/bin/env python" \
--python-install-lib "/usr/lib/python${python_version}/site-packages" \
--python-install-data '/usr/local' \
--python-install-bin '/usr/local/bin' \
--no-python-dependencies \
--after-install "./bin/${python_prefix}-post-install.sh" \
./setup.py
else
fpm \
--force \
--input-type ${input_type} \
--output-type ${output_type} \
--package "./dist" \
--architecture all \
--license AGPL \
--version ${version} \
--name "${pkg_name}" \
--description "${pkg_description}" \
--url "${pkg_url}" \
--vendor "${pkg_team}" \
--maintainer "${pkg_team}" \
--python-scripts-executable "/usr/bin/env python" \
--python-install-lib "/usr/local/lib/python${python_version}/site-packages" \
--python-install-data '/usr/local' \
--python-install-bin '/usr/local/bin' \
--no-python-dependencies \
--after-install "./bin/${python_prefix}-post-install.sh" \
./setup.py
fi
|
Alignak-monitoring-contrib/alignak-webui
|
package.sh
|
Shell
|
agpl-3.0
| 7,881 |
# Error on unset variables
set -u
# Load Linux version of _lp_battery()
uname() { printf 'Linux'; }
. ../liquidprompt --no-activate
unset -f uname
LP_ENABLE_BATT=1
typeset -a battery_outputs battery_statuses battery_values temp_outputs temp_values battery_ids
# Add test cases to these arrays like below
# Linux 4.19.0-10-amd64 #1 SMP Debian 4.19.132-1 (2020-07-24) x86_64 GNU/Linux Debian 10 (buster)
battery_outputs+=(
""
)
battery_statuses+=(4)
battery_ids+=("")
battery_values+=("")
temp_outputs+=(
"Thermal 0: ok, 23.0 degrees C"
)
temp_values+=(23)
# Unknown
battery_outputs+=(
"Battery 0: Discharging, 55%, 01:39:34 remaining"
)
battery_statuses+=(0)
battery_values+=(55)
battery_ids+=(0)
temp_outputs+=(
"Thermal 0: ok, -267.8 degrees C"
)
temp_values+=(-267)
# Multiple batteries
battery_outputs+=(
"Battery 0: Discharging, 0%, rate information unavailable
Battery 1: Discharging, 0%, rate information unavailable
Battery 2: Discharging, 53%, 02:35:00 remaining"
)
battery_statuses+=(0)
battery_values+=(53)
battery_ids+=(2)
temp_outputs+=(
"Thermal 0: ok, 39.0 degrees C"
)
temp_values+=(39)
# VPS at OVH
temp_outputs+=(
""
)
temp_values+=("")
function test_acpi_battery {
acpi() {
printf '%s\n' "$__battery_output"
}
for (( index=0; index < ${#battery_values[@]}; index++ )); do
__battery_output=${battery_outputs[$index]}
LP_BATTERY_ID=${battery_ids[$index]}
LP_BATTERY_THRESHOLD=100
_lp_battery
assertEquals "ACPI battery below returns at index ${index}" "${battery_statuses[$index]}" "$?"
assertEquals "ACPI battery value at index ${index}" "${battery_values[$index]}" "$lp_battery"
_status=${battery_statuses[$index]}
(( _status < 4 )) && _status=$(( _status + 1 ))
LP_BATTERY_THRESHOLD=0
_lp_battery
assertEquals "ACPI battery above returns at index ${index}" "$_status" "$?"
assertEquals "ACPI battery value at index ${index}" "${battery_values[$index]}" "$lp_battery"
done
}
function test_acpi_temperature {
LP_ENABLE_TEMP=1
LP_TEMP_THRESHOLD=-1000000
acpi() {
printf '%s\n' "$__temp_output"
}
local valid
for (( index=0; index < ${#temp_values[@]}; index++ )); do
__temp_output=${temp_outputs[$index]}
unset lp_temperature
__lp_temp_acpi
assertEquals "ACPI temperature output at index ${index}" "${temp_values[$index]}" "${lp_temperature-}"
if [[ -n ${temp_values[$index]} ]]; then
valid=0
else
valid=1
fi
__lp_temp_detect acpi
assertEquals "ACPI temperature detect at index ${index}" "$valid" "$?"
# Set the temp function in case the above detect said it was invalid.
# While we should never be in this situation, might as well make sure
# it doesn't crash.
_LP_TEMP_FUNCTION=__lp_temp_acpi
# This is to test that _lp_temperature() ignores previous high values
lp_temperature=10000
_lp_temperature
assertEquals "ACPI temperature return at index ${index}" "$valid" "$?"
assertEquals "ACPI temperature return output at index ${index}" "${temp_values[$index]}" "${lp_temperature-}"
done
}
if [ -n "${ZSH_VERSION-}" ]; then
SHUNIT_PARENT="$0"
setopt shwordsplit ksh_arrays
fi
. ./shunit2
|
laurentbardin/liquidprompt
|
tests/test_acpi.sh
|
Shell
|
agpl-3.0
| 3,198 |
#!/bin/bash
echo "####### Cleaning the old tests #######"
rm -rf site/coverage site/surefire-reports
mkdir -p site/coverage site/surefire-reports
rm -rf xunit.xml
echo "###### Executing the npm #######"
npm install
echo "[WARNING] Redis must be running, and the http certificates created"
echo "####### Executing unit Test #######"
node ./node_modules/.bin/istanbul cover --root lib/ --dir site/coverage -- grunt xunit
mv xunit.xml site/surefire-reports/TEST-xunit.xml
echo "####### Generating coverage report #######"
node ./node_modules/.bin/istanbul report --dir site/coverage/ cobertura
|
telefonicaid/Rush
|
unitTest.sh
|
Shell
|
agpl-3.0
| 595 |
source common.sh
$nixstore --verify
|
tianyicui/nix
|
tests/verify.sh
|
Shell
|
lgpl-2.1
| 37 |
#!/bin/sh -ex
. utilslib.sh
basedir=/src/libvirt
baseurl=http://libvirt.org/sources
version=0.9.0
revision=0
tarball=libvirt-${version}.tar.gz
directory=libvirt-${version}-${revision}
mkdir -p $basedir
pushd $basedir
utilslib_download $baseurl $tarball
if [ ! -d $directory ]
then
echo unpacking $tarball ...
mkdir -p $directory
tar -xvf $tarball -C $directory --strip-components=1
fi
pushd $directory
if [ ! -f mingw.patch.applied ]
then
echo patching ...
patch -p1 < ../../libvirt-${version}-mingw.patch
echo applied > mingw.patch.applied
fi
if [ -d /include/libvirt ]
then
# remove previously installed libvirt header files. specifying -I/include
# makes the build pickup the old headers instead of it's own files.
# removing the old header files is a simple workaround for this problem.
rm -r /include/libvirt
fi
if [ ! -f configure.done ]
then
CFLAGS=-I/include \
LDFLAGS=-L/lib \
./configure --prefix= \
--without-xen \
--without-libvirtd \
--without-openvz \
--without-lxc \
--without-phyp \
--with-python
echo done > configure.done
fi
make
make install
# copy libvirtmod.dll to the correct place so python will find it
cp /python/Lib/site-packages/libvirtmod.dll /python/DLLs/libvirtmod.pyd
# prepare gather
cp src/.libs/libvirt-0.dll $prepare_bin
cp src/.libs/libvirt.dll.a $prepare_lib
cp src/.libs/libvirt.a $prepare_lib
cp src/.libs/libvirt-qemu-0.dll $prepare_bin
cp src/.libs/libvirt-qemu.dll.a $prepare_lib
cp src/.libs/libvirt-qemu.a $prepare_lib
cp tools/.libs/virsh.exe $prepare_bin
mkdir -p $prepare_include/libvirt
cp include/libvirt/libvirt.h $prepare_include/libvirt
cp include/libvirt/libvirt-qemu.h $prepare_include/libvirt
cp include/libvirt/virterror.h $prepare_include/libvirt
cp python/libvirt.py $prepare_python
cp python/.libs/libvirtmod.dll $prepare_python/libvirtmod.pyd
cp ../libvirt-${version}.tar.gz $prepare_src
cp ../../libvirt-${version}-mingw.patch $prepare_src
popd
popd
|
photron/msys_setup
|
compile_libvirt-0.9.0.sh
|
Shell
|
lgpl-2.1
| 2,078 |
#!/bin/bash
if [ "${ENABLE_JLAN}" == "true" ]; then
echo "Enabling default Jlan SMB Share for OpenCms"
mv -v ${OPENCMS_HOME}/WEB-INF/config/jlanConfig.xml.linux ${OPENCMS_HOME}/WEB-INF/config/jlanConfig.xml
else
echo "Jlan SMB Share remains disabled. Start the container with \"-e ENABLE_JLAN=true\" to enable it."
fi
|
alkacon/opencms-docker
|
image/resources/root/preinit/60_init_jlan_share.sh
|
Shell
|
lgpl-2.1
| 322 |
#!/bin/bash
# This script will download and setup a cross compilation environment
# for targetting Win32 from Linux. It can also be used to build on
# Windows under the MSYS/MinGW environment. It will use the GTK
# binaries from Tor Lillqvist.
TOR_URL="http://ftp.gnome.org/pub/gnome/binaries/win32";
TOR_BINARIES=( \
glib/2.20/glib{-dev,}_2.20.4-1_win32.zip \
gtk+/2.16/gtk+{-dev,}_2.16.4-1_win32.zip \
pango/1.22/pango{-dev,}_1.22.0-1_win32.zip \
atk/1.26/atk{-dev,}_1.26.0-1_win32.zip );
TOR_DEP_URL="http://ftp.gnome.org/pub/gnome/binaries/win32/dependencies";
TOR_DEPS=( \
cairo{-dev,}_1.8.6-1_win32.zip \
gettext-runtime-{dev-,}0.17-1.zip \
fontconfig{-dev,}_2.6.0-2_win32.zip \
freetype{-dev,}_2.3.8-1_win32.zip \
expat_2.0.1-1_win32.zip );
#SF_URL="http://kent.dl.sourceforge.net/sourceforge";
#SF_URL="http://surfnet.dl.sourceforge.net/sourceforge";
MESA_VER=7.5
SF_URL="http://mesh.dl.sourceforge.net/sourceforge";
OTHER_DEPS=( \
"http://www.gimp.org/~tml/gimp/win32/libiconv-1.9.1.bin.woe32.zip" \
"${SF_URL}/libpng/zlib123-dll.zip" \
"http://www.libsdl.org/release/SDL-devel-1.2.13-mingw32.tar.gz" \
"${SF_URL}/mesa3d/MesaLib-${MESA_VER}.tar.bz2" );
GNUWIN32_URL="${SF_URL}/gnuwin32";
GNUWIN32_DEPS=( \
libpng-1.2.33-{bin,lib}.zip \
jpeg-6b-4-{bin,lib}.zip \
tiff-3.8.2-1-{bin,lib}.zip );
CLUTTER_GIT="git://git.clutter-project.org"
function download_file ()
{
local url="$1"; shift;
local filename="$1"; shift;
case "$DOWNLOAD_PROG" in
curl)
curl -C - -o "$DOWNLOAD_DIR/$filename" "$url";
;;
*)
$DOWNLOAD_PROG -O "$DOWNLOAD_DIR/$filename" -c "$url";
;;
esac;
if [ $? -ne 0 ]; then
echo "Downloading ${url} failed.";
exit 1;
fi;
}
function guess_dir ()
{
local var="$1"; shift;
local suffix="$1"; shift;
local msg="$1"; shift;
local prompt="$1"; shift;
local dir="${!var}";
if [ -z "$dir" ]; then
echo "Please enter ${msg}.";
dir="$PWD/$suffix";
read -r -p "$prompt [$dir] ";
if [ -n "$REPLY" ]; then
dir="$REPLY";
fi;
fi;
eval $var="\"$dir\"";
if [ ! -d "$dir" ]; then
if ! mkdir -p "$dir"; then
echo "Error making directory $dir";
exit 1;
fi;
fi;
}
function y_or_n ()
{
local prompt="$1"; shift;
while true; do
read -p "${prompt} [y/n] " -n 1;
echo;
case "$REPLY" in
y) return 0 ;;
n) return 1 ;;
*) echo "Please press y or n" ;;
esac;
done;
}
function do_unzip ()
{
do_unzip_d "$ROOT_DIR" "$@";
}
function do_unzip_d ()
{
local exdir="$1"; shift;
local zipfile="$1"; shift;
unzip -o -q -d "$exdir" "$zipfile" "$@";
if [ "$?" -ne 0 ]; then
echo "Failed to extract $zipfile";
exit 1;
fi;
}
function find_compiler ()
{
local gccbin fullpath;
if [ -z "$MINGW_TOOL_PREFIX" ]; then
for gccbin in i{3,4,5,6}86-mingw32{,msvc}-gcc; do
fullpath="`which $gccbin 2>/dev/null`";
if [ "$?" -eq 0 ]; then
MINGW_TOOL_PREFIX="${fullpath%%gcc}";
break;
fi;
done;
if [ -z "$MINGW_TOOL_PREFIX" ]; then
echo;
echo "No suitable cross compiler was found.";
echo;
echo "If you already have a compiler installed,";
echo "please set the MINGW_TOOL_PREFIX variable";
echo "to point to its location without the";
echo "gcc suffix (eg: \"/usr/bin/i386-mingw32-\").";
echo;
echo "If you are using Ubuntu, you can install a";
echo "compiler by typing:";
echo;
echo " sudo apt-get install mingw32";
echo;
echo "Otherwise you can try following the instructions here:";
echo;
echo " http://www.libsdl.org/extras/win32/cross/README.txt";
exit 1;
fi;
fi;
export ADDR2LINE="${MINGW_TOOL_PREFIX}addr2line"
export AS="${MINGW_TOOL_PREFIX}as"
export CC="${MINGW_TOOL_PREFIX}gcc"
export CPP="${MINGW_TOOL_PREFIX}cpp"
export CPPFILT="${MINGW_TOOL_PREFIX}c++filt"
export CXX="${MINGW_TOOL_PREFIX}g++"
export DLLTOOL="${MINGW_TOOL_PREFIX}dlltool"
export DLLWRAP="${MINGW_TOOL_PREFIX}dllwrap"
export GCOV="${MINGW_TOOL_PREFIX}gcov"
export LD="${MINGW_TOOL_PREFIX}ld"
export NM="${MINGW_TOOL_PREFIX}nm"
export OBJCOPY="${MINGW_TOOL_PREFIX}objcopy"
export OBJDUMP="${MINGW_TOOL_PREFIX}objdump"
export READELF="${MINGW_TOOL_PREFIX}readelf"
export SIZE="${MINGW_TOOL_PREFIX}size"
export STRINGS="${MINGW_TOOL_PREFIX}strings"
export WINDRES="${MINGW_TOOL_PREFIX}windres"
export AR="${MINGW_TOOL_PREFIX}ar"
export RANLIB="${MINGW_TOOL_PREFIX}ranlib"
export STRIP="${MINGW_TOOL_PREFIX}strip"
TARGET="${MINGW_TOOL_PREFIX##*/}";
TARGET="${TARGET%%-}";
echo "Using compiler $CC and target $TARGET";
}
# If a download directory hasn't been specified then try to guess one
# but ask for confirmation first
guess_dir DOWNLOAD_DIR "downloads" \
"the directory to download to" "Download directory";
# Try to guess a download program if none has been specified
if [ -z "$DOWNLOAD_PROG" ]; then
# If no download program has been specified then check if wget or
# curl exists
#wget first, because my curl can't download libsdl...
for x in wget curl; do
if [ "`type -t $x`" != "" ]; then
DOWNLOAD_PROG="$x";
break;
fi;
done;
if [ -z "$DOWNLOAD_PROG" ]; then
echo "No DOWNLOAD_PROG was set and neither wget nor curl is ";
echo "available.";
exit 1;
fi;
fi;
# If a download directory hasn't been specified then try to guess one
# but ask for confirmation first
guess_dir ROOT_DIR "clutter-cross" \
"the root prefix for the build environment" "Root dir";
SLASH_SCRIPT='s/\//\\\//g';
quoted_root_dir=`echo "$ROOT_DIR" | sed "$SLASH_SCRIPT" `;
##
# Download files
##
for bin in "${TOR_BINARIES[@]}"; do
bn="${bin##*/}";
download_file "$TOR_URL/$bin" "$bn"
done;
for dep in "${TOR_DEPS[@]}"; do
download_file "$TOR_DEP_URL/$dep" "$dep";
done;
for dep in "${OTHER_DEPS[@]}"; do
bn="${dep##*/}";
download_file "$dep" "$bn";
done;
for dep in "${GNUWIN32_DEPS[@]}"; do
download_file "$GNUWIN32_URL/$dep" "$dep";
done;
##
# Extract files
##
for bin in "${TOR_BINARIES[@]}"; do
echo "Extracting $bin...";
bn="${bin##*/}";
do_unzip "$DOWNLOAD_DIR/$bn";
done;
for dep in "${TOR_DEPS[@]}"; do
echo "Extracting $dep...";
do_unzip "$DOWNLOAD_DIR/$dep";
done;
for dep in "${GNUWIN32_DEPS[@]}"; do
echo "Extracting $dep...";
do_unzip "$DOWNLOAD_DIR/$dep";
done;
echo "Extracting libiconv...";
do_unzip "$DOWNLOAD_DIR/libiconv-1.9.1.bin.woe32.zip";
echo "Extracting zlib...";
do_unzip "$DOWNLOAD_DIR/zlib123-dll.zip" "zlib1.dll";
if ! mv "$ROOT_DIR/zlib1.dll" "$ROOT_DIR/bin/"; then
echo "Failed to mv zlib1.dll";
exit 1;
fi;
echo "Extracting SDL...";
if ! tar -C "$ROOT_DIR" \
-zxf "$DOWNLOAD_DIR/SDL-devel-1.2.13-mingw32.tar.gz"; then
echo "Failed to extract SDL";
exit 1;
fi;
for x in bin docs include lib man share; do
if ! cp -pR "$ROOT_DIR/SDL-1.2.13/$x" "$ROOT_DIR/"; then
echo "Failed to copy SDL files";
exit 1;
fi;
done;
rm -fr "$ROOT_DIR/SDL-1.2.13";
export SDL_CONFIG="$ROOT_DIR/bin/sdl-config";
echo "Fixing SDL libtool files...";
sed "s/^libdir=.*\$/libdir='${quoted_root_dir}\/lib'/" \
< "$ROOT_DIR/lib/libSDL.la" > "$ROOT_DIR/lib/libSDL.la.tmp";
mv "$ROOT_DIR/lib/libSDL.la.tmp" "$ROOT_DIR/lib/libSDL.la";
echo "Fixing pkgconfig files...";
for x in "$ROOT_DIR/lib/pkgconfig/"*.pc "$ROOT_DIR/bin/sdl-config"; do
sed "s/^prefix=.*\$/prefix=${quoted_root_dir}/" \
< "$x" > "$x.tmp";
mv "$x.tmp" "$x";
done;
chmod +x "$ROOT_DIR/bin/sdl-config";
# The Pango FT pc file hardcodes the include path for freetype, so it
# needs to be fixed separately
sed -e 's/^Cflags:.*$/Cflags: -I${includedir}\/pango-1.0 -I${includedir}\/freetype2/' \
-e 's/^\(Libs:.*\)$/\1 -lfreetype -lfontconfig/' \
< "$ROOT_DIR/lib/pkgconfig/pangoft2.pc" \
> "$ROOT_DIR/lib/pkgconfig/pangoft2.pc.tmp";
mv "$ROOT_DIR/lib/pkgconfig/pangoft2.pc"{.tmp,};
echo "Extracting Mesa headers...";
if ! tar -C "$DOWNLOAD_DIR" \
-jxf "$DOWNLOAD_DIR/MesaLib-${MESA_VER}.tar.bz2" \
Mesa-${MESA_VER}/include; then
echo "Failed to extract Mesa headers";
exit 1;
fi;
cp -R "$DOWNLOAD_DIR/Mesa-${MESA_VER}/include/"* "$ROOT_DIR/include";
##
# Build
##
export PKG_CONFIG_PATH="$ROOT_DIR/lib/pkgconfig:$PKG_CONFIG_PATH";
export LDFLAGS="-L$ROOT_DIR/lib -mno-cygwin $LDFLAGS"
export CPPFLAGS="-I$ROOT_DIR/include $CPPFLAGS"
export CFLAGS="-I$ROOT_DIR/include -mno-cygwin -mms-bitfields -march=i686 ${CFLAGS:-"-g"}"
export CXXFLAGS="-I$ROOT_DIR/include -mno-cygwin -mms-bitfields -march=i686 ${CFLAGS:-"-g"}"
if y_or_n "Do you want to checkout and build Clutter?"; then
find_compiler;
guess_dir CLUTTER_BUILD_DIR "clutter" \
"the build directory for clutter" "Build dir";
git clone "$CLUTTER_GIT/clutter" $CLUTTER_BUILD_DIR;
if [ "$?" -ne 0 ]; then
echo "git failed";
exit 1;
fi;
( cd "$CLUTTER_BUILD_DIR" && ./autogen.sh --prefix="$ROOT_DIR" \
--host="$TARGET" --target="$TARGET" --with-flavour=win32 );
if [ "$?" -ne 0 ]; then
echo "autogen failed";
exit 1;
fi;
( cd "$CLUTTER_BUILD_DIR" && make all install );
if [ "$?" -ne 0 ]; then
echo "make failed";
exit 1;
fi;
fi;
|
tardyp/clutter
|
build/mingw/mingw-cross-compile.sh
|
Shell
|
lgpl-2.1
| 9,289 |
#!/bin/bash -e
#$ -S /bin/bash
#$ -cwd
#$ -j y
#$ -pe orte 8
VERSION=0.1
external_packages=/ngs/apps
reference_genomes=/ngs/reference
bwaApp="$external_packages/bwa-0.7.12/bwa"
bowtie2App="$external_packages/bowtie2-2.2.6/bowtie2"
javaApp="$external_packages/jdk1.6.0_45/bin/java"
picardJar="$external_packages/picard-tools-1.129/picard.jar"
samtoolsApp="$external_packages/samtools-0.1.19/samtools"
countReadPairsAppsJar="/home/golharr/workspace/rgtools/dist/CountMappedReadPairs.jar"
graftGenome="hg19"
graftReference="$reference_genomes/$graftGenome/$graftGenome.fa"
graftbwaReference="$reference_genomes/$graftGenome/bwa_index/$graftGenome"
graftbowtie2Reference="$reference_genomes/$graftGenome/bowtie2_index/$graftGenome"
hostGenome="mm10"
hostReference="$reference_genomes/$hostGenome/$hostGenome.fa"
hostbwaReference="$reference_genomes/$hostGenome/bwa_index/$hostGenome"
hostbowtie2Reference="$reference_genomes/$hostGenome/bowtie2_index/$hostGenome"
graftSamFile="analysis/pdx/$sample.$graftGenome.sam"
graftMapped1="analysis/pdx/$sample.$graftGenome.aligned.fastq.1.gz"
graftMapped2="analysis/pdx/$sample.$graftGenome.aligned.fastq.2.gz"
graftUnmapped1="analysis/pdx/$sample.$graftGenome.unaligned.fastq.1.gz"
graftUnmapped2="analysis/pdx/$sample.$graftGenome.unaligned.fastq.2.gz"
graftAlignedHostSamFile="analysis/pdx/$sample.graftAligned.$hostGenome.sam"
graftUnalignedHostSamFile="analysis/pdx/$sample.graftUnaligned.$hostGenome.sam"
echo "PDX Pipeline $VERSION"
echo "Running $SAMPLE"
echo "FASTQ1: $FASTQ1"
echo "FASTQ2: $FASTQ2"
cd /scratch
aws s3 cp $FASTQ1 .
FASTQ1=`basename $FASTQ1`
aws s3 cp $FASTQ2 .
FASTQ2=`basename $FASTQ2`
# Map sample to hg19 (graft) to get graft mapped and unmapped
$bowtie2App --no-mixed --un-conc-gz analysis/pdx/$SAMPLE.$graftGenome.unaligned.fastq.gz --al-conc-gz analysis/pdx/$SAMPLE.$graftGenome.aligned.fastq.gz -p 8 -1 $FASTQ1 -2 $FASTQ2 \
--no-unal --rg-id $sample --rg 'SM:$SAMPLE\tLB:$SAMPLE\tPL:illumina' -S $graftSamFile -x $graftbowtie2Reference
}
# Map graft mapped to mm10 to get graft.mapped.host.[mapped/unmapped]
$bowtie2App --no-mixed --un-conc-gz analysis/pdx/$sample.graftAlignedHostUnaligned.fastq.gz --al-conc-gz analysis/pdx/$sample.graftAlignedHostAligned.fastq.gz \
-p 8 -1 $graftMapped_1 -2 $graftMapped_2 \
--no-unal --rg-id $sample --rg 'SM:$sample\tLB:$sample\tPL:illumina' -S $graftAlignedHostSamFile -x $hostbowtie2Reference
# Map graft unmapped to mm10 to get graft.unmapped.host.[mapped/unmapped]
$bowtie2App --no-mixed --un-conc-gz analysis/pdx/$sample.graftUnalignedHostUnaligned.fastq.gz --al-conc-gz analysis/pdx/$sample.graftUnalignedHostAligned.fastq.gz \
-p 8 -1 $graftUnmapped_1 -2 $graftUnmapped_2 \
--no-unal --rg-id $sample --rg 'SM:$sample\tLB:$sample\tPL:illumina' -S $graftUnalignedHostSamFile -x $hostbowtie2Reference
|
golharam/rgtools
|
scripts/NGS_pipelines/PDX/pdx_pipeline.sh
|
Shell
|
lgpl-3.0
| 2,841 |
#!/bin/bash
# call setup_opensg.sh first!
# get script directory
SOURCE="${BASH_SOURCE[0]}"
DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
cd $DIR
libDir="/c/usr/lib"
incDir="/c/usr/include"
if [ ! -e $libDir ]; then
mkdir -p $libDir
fi
if [ ! -e $incDir ]; then
mkdir -p $incDir
fi
function downloadRepository {
cd $DIR
if [ ! -e $1 ]; then
echo "download $1 source"
git clone ${@:2} $1
else
echo "$1 source allready present"
fi
}
downloadRepository libxml2 https://github.com/GNOME/libxml2.git
downloadRepository freetype2 https://git.savannah.gnu.org/git/freetype/freetype2.git
downloadRepository cpython --branch 2.7 https://github.com/Victor-Haefner/cpython.git
downloadRepository tiff https://gitlab.com/libtiff/libtiff.git
downloadRepository gdal https://github.com/Victor-Haefner/gdal.git
downloadRepository gdal/proj https://github.com/Victor-Haefner/PROJ.git
downloadRepository polyvr https://github.com/Victor-Haefner/polyvr.git
# --------------------- freetype2
if [ ! -e $DIR/freetype2/build ]; then
cd $DIR/freetype2
mkdir build && cd build
cmake -G "Visual Studio 15 2017 Win64" ..
cmake --build . --config Release
cp -r $DIR/freetype2/include/* $incDir/
cp -r $DIR/freetype2/build/include/freetype/* $incDir/freetype/
cp build/Release/freetype.lib $libDir/
fi
# --------------------- libxml2
if [ ! -e $libDir/libxml2.lib ]; then
cd $DIR/libxml2/win32
cmd //C $DIR/compile_libxml2.bat
fi
# --------------------- python c api
if [ ! -e $libDir/python27.lib ]; then
cd $DIR/cpython/PCbuild
cmd //C $DIR/compile_cpython.bat
cp $DIR/cpython/PCbuild/amd64/* $libDir/
cp $DIR/cpython/Include/*.h $incDir/Python/
cp $DIR/cpython/PC/pyconfig.h $incDir/Python/
fi
exit 0
# --------------------- lib tiff # TODO: move this to setupOSG
if [ ! -e tiff/Build ]; then
cd tiff
mkdir Build && cd Build
zlib="-DZLIB_INCLUDE_DIR=~/.emscripten_ports/zlib/zlib-version_1 -DZLIB_LIBRARY_RELEASE=~/.emscripten_cache/wasm-obj/libz.a"
imgJpg="-DJPEG_INCLUDE_DIR=~/.emscripten_ports/libjpeg/jpeg-9c -DJPEG_LIBRARY_RELEASE=~/.emscripten_cache/wasm-obj/libjpeg.a"
cmake ../ $zlib $imgJpg # TODO: jpeg not yet taken into account..
make -j8
cp port/*.a $libDir
cp libtiff/*.a $libDir
cp -r ../libtiff $incDir/libtiff
cp libtiff/*.h $incDir/libtiff/
fi
# --------------------- lib gdal
cd $DIR
if [ ! -e gdal/proj/build ]; then
cd gdal/proj
mkdir build && cd build
cmake ../ -DWITHOUT_SQLITE=1 -DENABLE_CURL=0 -DBUILD_TESTING=0 -DBUILD_PROJSYNC=0 -DTIFF_INCLUDE_DIR="../$incDir/libtiff" -DTIFF_LIBRARY="../$libDirlibtiffxx.a"
make -j8
cp lib/libproj.a $libDir
cp -r ../src $incDir/libproj
cp src/*.h $incDir/libproj/
cp -r ../include/proj $incDir/libproj/
fi
cd $DIR
if [ ! -e gdal/gdal/build ]; then
cd gdal/gdal # configure needs to run in this folder!
./configure \
--with-python=no \
--with-crypto=no \
--with-opencl=no \
--with-geos=no \
--with-curl=no \
--with-xml2=no \
--with-libkml=no \
--with-mysql=no \
--with-netcdf=no \
--with-pcraster=internal \
--with-pg=no \
--with-proj_include="-I$DIR/include/libproj" \
--with-proj_lib="-L$DIR/lib -lproj" \
--with-cryptopp=no \
--with-java=no \
--with-libjson-c=internal \
--with-libz=no \
--with-libz_include="-I$HOME/.emscripten_ports/zlib/zlib-version_1" \
--with-libz_lib="-L$HOME/.emscripten_cache/wasm-obj" \
--with-hdf5=no \
--with-expat=no \
--with-oci=no \
--with-oci-lib=no \
--with-oci-include=no \
--with-geotiff=internal \
--with-libtiff="$DIR/include/libtiff" \
--with-grass=no \
--with-spatialite=no \
--with-freexl=no
make -j8
make force-lib
cp libgdal.a $libDir
mkdir $incDir/gdal
find . -name "*.h" -exec cp {} $incDir/gdal/ \;
cp port/cpl_config.h $incDir/gdal/ # needs to override wrong config
#cp port/*.h $incDir/gdal/ #test
fi
exit 0
# --------------------- polyvr
if [ ! -e $DIR/polyvr/build ]; then
cd $DIR/polyvr
mkdir build && cd build
cmake -G "Visual Studio 15 2017 Win64" ..
cmake --build . --config Release
fi
fi
|
Victor-Haefner/polyvr-packaging
|
windows/setup_pvr.sh
|
Shell
|
lgpl-3.0
| 4,230 |
#!/bin/sh -e
# Copyright (C) 2014 Cryptotronix, LLC.
# This file is part of libcrypti2c.
# libcrypti2c is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# libcrypti2c is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with libcrypti2c. If not, see <http://www.gnu.org/licenses/>.
if [ ! -d "m4" ]; then
mkdir m4
fi
if [ ! -e "config.rpath" ]; then
touch config.rpath
fi
gnulib-tool --lgpl --libtool --import configmake safe-alloc valgrind-tests maintainer-makefile full-write gitlog-to-changelog
autoreconf --force --install
./configure
|
cryptotronix/libcrypti2c
|
autogen.sh
|
Shell
|
lgpl-3.0
| 991 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=OracleSolarisStudio-Solaris-x86
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/test_u_server
OUTPUT_BASENAME=test_u_server
PACKAGE_TOP_DIR=testuserver/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/testuserver/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/testuserver.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/testuserver.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
esohns/libACENetwork
|
prj/solstudio/test_u_server/nbproject/Package-Debug.bash
|
Shell
|
lgpl-3.0
| 1,565 |
#!/bin/bash
set -eu
if [ -n "$PY3PATH" ]; then
export PATH=$PY3PATH:$PATH
fi
autoreconf -iv
./configure \
--prefix=/mingw64 \
--build=x86_64-w64-mingw32 \
--host=x86_64-w64-mingw32 \
--target=x86_64-w64-mingw32 \
--enable-external-toolchain \
--enable-external-emudbg \
--enable-external-gngeo \
--enable-examples=no
make
make install
|
dciabrin/ngdevkit
|
.github/scripts/build-msys2.sh
|
Shell
|
lgpl-3.0
| 371 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" #"
if [ ! -z "$1" ]; then
DIR="$1"
fi
# directory paths
APP_SSH_DIR="$DIR/app/.ssh"
SSH_DIR="$DIR/ssh"
# create target directories
mkdir -p --mode 0700 $SSH_DIR
mkdir -p --mode 0700 $APP_SSH_DIR
# generate key
ssh-keygen -t rsa -f $SSH_DIR/id_rsa
# move public key to app/.ssh/authorized_keys to allow ssh into the container
mv $SSH_DIR/id_rsa.pub $APP_SSH_DIR/authorized_keys
exit 0
|
pirati-cz/DEPRECATED-graph
|
generate_ssh_key.sh
|
Shell
|
unlicense
| 464 |
#!/bin/sh
#
# Usage: cfmssimulator.sh <directory-containing-unzipped-snapshot-data>
export APPL_DATA=./data
export APPL_APPL=./appl
rm -rf $APPL_DATA
. ./classpath.sh
echo CLASSPATH=$CLASSPATH
java -Dappl.appl=$APPL_APPL -Dappl.data=$APPL_DATA -classpath $CLASSPATH com.airbiquity.tools.BuildEnvelope $1 $2 $3
# java -Dappl.appl=$APPL_APPL -Dappl.data=$APPL_DATA -classpath $CLASSPATH com.airbiquity.cfms.CfmsProtocolDecoder ./data/net/out/prepped > vperf.txt
|
yangjun2/android
|
sfsp_application/build-envelope.sh
|
Shell
|
unlicense
| 470 |
#!/bin/bash
# vi:nu:et:sts=4 ts=4 sw=4
name="$1"
upr="$(tr '[:lower:]' '[:upper:]' <<< ${name:0:1})${name:1}"
if [ "$name" = "" ]; then
echo "Usage: renObj.sh current_object_name"
echo "Run this from the main repo directory, not src or test!"
fi
mv "src/${name}.c" "src/${upr}A.c"
mv "src/${upr}A.c" "src/${upr}.c"
mv "src/${name}_object.c" "src/${upr}_objectA.c"
mv "src/${upr}_objectA.c" "src/${upr}_object.c"
mv "src/${name}.h" "src/${upr}A.h"
mv "src/${upr}A.h" "src/${upr}.h"
mv "src/${name}_internal.h" "src/${upr}_internalA.h"
mv "src/${upr}_internalA.h" "src/${upr}_internal.h"
mv "tests/${name}_test.c" "tests/${upr}_testA.c"
mv "tests/${upr}_testA.c" "tests/${upr}_test.c"
|
2kranki/libCmn
|
scripts/renObj.sh
|
Shell
|
unlicense
| 690 |
#! /bin/sh
# The script that builds the Elm files.
# When adding a new page, make sure the the script entry is added as well.
# Needs '&&' so that the script stops when it hits a compiler error.
elm-make ./client/App.elm --output ./backend/app/static/app/app.js
|
branjwong/shyun-and-bran
|
app-build.sh
|
Shell
|
unlicense
| 263 |
#!/bin/env bash
source ~/.nurc
export PATH=$PATH:~/.cabal/bin:~/.local/bin
termite -e "bash -c 'cd $NU_HOME/playbooks/squads/credit-card-core/bin; ./console.sh $1; exec bash'" &
|
ericvm/dotfiles
|
credit_card_core_console.sh
|
Shell
|
unlicense
| 178 |
#!/bin/bash
BIN=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
ROOT=$( cd ${BIN}/.. && pwd )
echo "HI THERE! Let's set up your environment for Whisk actions."
if [ ! -f ${ROOT}/.wskrc ]; then
echo "
Let's set up an.wskrc file to make this easier.
The.wskrc file is excluded from commits to git. Delete it at will.
"
bluemix regions
echo
read -p "Choose a CF API Endpoint [api.eu-gb.bluemix.net]: " apihost
export BLUEMIX_API_HOST=${apihost:-api.eu-gb.bluemix.net}
if ${BIN}/wsk-login.sh
then
echo "BLUEMIX_API_HOST=${BLUEMIX_API_HOST}" >> ${ROOT}/.wskrc
TARGET=$(ibmcloud target)
BLUEMIX_EMAIL=$(echo $TARGET | cut -d ':' -f6 | awk '{print $1}')
BLUEMIX_ACCOUNT=$(echo $TARGET | cut -d '(' -f3 | cut -d ')' -f1)
BLUEMIX_ORG=$(echo $TARGET | cut -d ':' -f8 | awk '{print $1}')
BLUEMIX_SPACE=$(echo $TARGET | cut -d ':' -f9 | awk '{print $1}')
echo "BLUEMIX_EMAIL=$BLUEMIX_EMAIL" >> ${ROOT}/.wskrc
echo "BLUEMIX_ACCOUNT=$BLUEMIX_ACCOUNT" >> ${ROOT}/.wskrc
echo "BLUEMIX_ORG=$BLUEMIX_ORG" >> ${ROOT}/.wskrc
echo "BLUEMIX_SPACE=$BLUEMIX_SPACE" >> ${ROOT}/.wskrc
fi
else
${BIN}/wsk-login.sh
fi
echo "Testing whisk action:
ibmcloud wsk action invoke /whisk.system/utils/echo -p message hello --blocking --result
Response: "
ibmcloud wsk action invoke /whisk.system/utils/echo -p message hello --blocking --result
rc=$?
if [ $rc -eq 0 ]; then
echo "All is well!"
exit 0
else
echo "Test invocation failed with return code $rc"
fi
HAS_NYC=$(which nyc)
if [ -z "$HAS_NYC"]; then
echo "Installing istanbul command line: npm install -g nyc"
npm install -g nyc
fi
npm install
|
gameontext/gameon-sweep
|
bin/wsk-setup.sh
|
Shell
|
apache-2.0
| 1,684 |
#!/bin/sh
################################################################################
: ${DIALOG=dialog}
installdir=$0
if [ $0 == './install.sh' ]; then
installdir=`pwd`/install.sh
fi
echo $installdir
user=`whoami`
if [ $user != "root" ]; then
echo "Please, execute installation script as root user"
exit 0
fi
echo "Prepearing installation process"
if ! [ -d "/temp" ]; then
mkdir /temp 2>/dev/null
fi
tail -n +156 "$installdir" | head -n +697 > /temp/dialog-1.0.20051107-1.2.2.i386.rpm
echo EOF >> /temp/dialog-1.0.20051107-1.2.2.i386.rpm
cd /temp
rpm -i dialog-1.0.20051107-1.2.2.i386.rpm &>/dev/null
jbosshome="/opt/jboss"
jdk="/opt/jre1.6.0_04"
inform="
JBOSS APPLICATION SERVER
JAVA DEVELOVEPMENT KIT
POSTGRESQL DATABASE SERVER
STV APPLICATION"
$DIALOG --clear --title "THIS SCRIPT INSTALLING" --msgbox "$inform" 10 44
case $? in
0)
echo "OK";;
255)
echo "ESC pressed.";;
esac
(
function forwrite()
{
if [ ! -z "$2" ]; then
echo "XXX";
echo "$1"
echo "XXX";
pr=` expr $pr + $2`
echo "$pr"
else
pr=` expr $pr + $1`
echo "$pr"
fi
sleep 1;
}
forwrite "Begin installation" "1"
service postgresql stop 2>/dev/null
service jboss stop 2>/dev/null
tail -n +697 "$installdir" > /temp/install.tar.gz
if [ -e install.tar.gz ]; then
forwrite "Extrcting file arhive" "10"
tar -xzf install.tar.gz &>/dev/null
chmod -R 755 /temp 2>/dev/null
else
echo "Error not corectly creat arhive"
exit 0
fi
forwrite "JRE install" "15"
#Java begin install
chmod -R 755 /temp/jre1.6.0_04 2>/dev/null
cd /temp
mkdir "$jdk" 2>/dev/null
cp -R jre1.6.0_04/* "$jdk/" 2>/dev/null
chmod -R 755 $jdk 2>/dev/null
classpath=`cd $jdk ; pwd`/lib/rt.jar
cd /temp
echo "#!/bin/sh
# if [ -z \$JAVA_HOME ]; then
export JAVA_HOME=$jdk
export PATH=\$PATH:$jdk/bin
export CLASSPATH=.:$classpath
# fi" > /etc/profile.d/jre1.6.0_04.sh 2>/dev/null
chmod 755 /etc/profile.d/jre1.6.0_04.sh 2>/dev/null
`/etc/profile.d/jre1.6.0_04.sh 2>/dev/null`
#Java end install
#Postgresql begin Install
forwrite "Begin DataBase server Postgresql Install" "10"
rm -rf /var/lib/pgsql/*
rpm -i postgresql-libs-8.2.6-1PGDG.rhel5.i686.rpm 2>/dev/null
rpm -i compat-postgresql-libs-4-1PGDG.rhel5.i686.rpm 2>/dev/null
rpm -i postgresql-8.2.6-1PGDG.rhel5.i686.rpm 2>/dev/null
rpm -i postgresql-devel-8.2.6-1PGDG.rhel5.i686.rpm 2>/dev/null
rpm -i postgresql-server-8.2.6-1PGDG.rhel5.i686.rpm 2>/dev/null
service postgresql initdb &>/dev/null
cat /temp/conf/pg_hba.conf > /var/lib/pgsql/data/pg_hba.conf 2>/dev/null
chown postgres:postgres /var/lib/pgsql/data/pg_hba.conf &>/dev/null
chmod 600 /var/lib/pgsql/data/pg_hba.conf &>/dev/null
service postgresql start &>/dev/null
forwrite "Creating PostgreSQL server role..." "7"
su - postgres -c "psql -c \"create role itv login password 'v3Gm51pg' nosuperuser noinherit nocreatedb nocreaterole;\" postgres postgres &>/dev/null " &>/dev/null
forwrite "Creating database STV..." "4"
su - postgres -c "psql -c \"create database \\\"STV\\\" with owner = itv encoding = 'WIN1251' tablespace = pg_default;\" postgres postgres &>/dev/null " &>/dev/null
forwrite "Restoring database..." "10"
su - postgres -c "pg_restore -i -U postgres -d \"STV\" -v \"/temp/conf/STV.backup\" &>/dev/null " &>/dev/null
#Postgresql end install
forwrite "Jboss application server Install" "25"
#Jboss begin install
userdel -r jboss 2>/dev/null
groupadd jboss 2>/dev/null
adduser -g jboss jboss 2>/dev/null
mkdir "$jbosshome" 2>/dev/null
cd /temp/jboss-4.2.2.GA
cp -R /temp/jboss-4.2.2.GA/* "$jbosshome" 2>/dev/null
chown -R jboss:jboss "$jbosshome" 2>/dev/null
forwrite "Installation successfully finished!" "18"
) |
$DIALOG --title "Processing install..." --cr-wrap --gauge "Install" 10 40 0
while :
do
$DIALOG --title "Enter ip address JBOSS application server listen on" --clear \
--inputbox "" 16 51 2> /temp/ipaddress
retval=$?
case $retval in
0)
if [ ! -z `cat "/temp/ipaddress" | grep -E '(^|[[:space:]])[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*([[:space:]]|$)'` ];
then
jbossaddr=`cat /temp/ipaddress`
forhost="JBOSS_HOST=$jbossaddr"
forhome="JBOSS_HOME=$jbosshome"
echo "#!/bin/sh
export $forhost
export $forhome" > /etc/profile.d/jboss.sh
chmod 755 /etc/profile.d/jboss.sh 2>/dev/null
cat /temp/conf/jboss | awk '{gsub("1234567890","'$forhome'"); print;}' > /temp/conf/jbos
cat /temp/conf/jbos | awk '{gsub("9876543210","'$forhost'"); print;}' > /temp/conf/jboss 2>/dev/null
cat /temp/conf/jboss > /etc/init.d/jboss 2>/dev/null
chmod 755 /etc/init.d/jboss 2>/dev/null
cp /temp/conf/APP.ear /opt/jboss/server/default/deploy/APP.ear 2>/dev/null
chown root:root /opt/jboss/server/default/deploy/APP.ear 2>/dev/null
chmod 644 /opt/jboss/server/default/deploy/APP.ear 2>/dev/null
`/etc/profile.d/jboss.sh 2>/dev/null`
chkconfig --add jboss 2>/dev/null
chkconfig --level 0123456 jboss on 2>/dev/null
service jboss start 2>/dev/null
#Jboss end install
# rm -rf /temp
break;
fi
;;
1)
echo "Cancel pressed.";;
255)
;;
esac
done
exit 1
|
idudko/install_script
|
install.sh
|
Shell
|
apache-2.0
| 5,549 |
#!/bin/bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Local testing of serving program
cd "$( dirname "${BASH_SOURCE[0]}" )" || exit
DIR="$( pwd )"
SRC_DIR=${DIR}"/../"
export PYTHONPATH=${PYTHONPATH}:${SRC_DIR}
echo "PYTHONPATH=""${PYTHONPATH}"
# The dataset used throughout the demonstration is
# Banknote Authentication Data Set, you may change according to your needs.
# The schema should be in the format of 'field_name:filed_type;...'
export TRAINING_DATA_SCHEMA='VWT:float;SWT:float;KWT:float;Entropy:float;Class:int'
export MODEL_FILENAME='model.txt'
python -m images.serving.app
|
GoogleCloudPlatform/professional-services
|
examples/vertex_pipeline/scripts/run_serving_local.sh
|
Shell
|
apache-2.0
| 1,129 |
#!/bin/sh
# Usage:
# 1. cd to the top-level dg directory (where the Makefile lives)
# 2a. Type bin/jslint-git.sh
# or
# 2b. Type make jslint-git
TMP_SCRIPT_NAME=myjslint-git.sh
# Use git status to get the list of added/modified files.
# Then use sed to convert the output of git status to a shell script
# that runs jslint-echo.sh on each added/modified file.
git status | sed -n -e 's/^# Changes.*/echo \"& (staged)\"/p' \
-e 's/^# Changed.*/echo \"& (unstaged)\"/p' \
-e 's/^# Untracked.*/echo \"& (unversioned)\"/p' \
-e 's/\(#[[:space:]]*\)\([^:]*\)$/bin\/jslint-echo.sh \2/' \
-e 's/#[[:space:]]*//' \
-e 's/\/libraries\/.*//' \
-e 's/deleted:.*//' \
-e 's/new file:/bin\/jslint-echo.sh/' \
-e 's/modified:/bin\/jslint-echo.sh/' \
-e 's/renamed:\(.*\)->\(.*\)$/bin\/jslint-echo.sh \2/' \
-e '/\.js$/p' \
> $TMP_SCRIPT_NAME
# Make the script we just created executable
chmod +x $TMP_SCRIPT_NAME
# Run the script
sh $TMP_SCRIPT_NAME
# Delete the script
rm $TMP_SCRIPT_NAME
|
apeeyush/Data-Analytics-CODAP
|
bin/jslint-git.sh
|
Shell
|
apache-2.0
| 1,215 |
#!/bin/bash
# Ensure that all your children are truly dead when you yourself are killed.
# trap "kill -- -$BASHPID" INT TERM EXIT
# leave out EXIT for now
trap "kill -- -$BASHPID" INT TERM
echo "BASHPID: $BASHPID"
echo "current PID: $$"
source ./runner_setup.sh "$@"
rm -f h2o-nodes.json
if [[ $HOSTNAME == "lg1" || $HOSTNAME == "ch-63" ]]
then
# in sm land. clean up!
pssh -h /home/0xdiag/hosts_minus_9_22 -i 'rm -f -r /home/0xdiag/ice*'
python ../four_hour_cloud.py -v -cj pytest_config-jenkins-sm32.json &
else
if [[ $USER == "jenkins" ]]
then
# clean out old ice roots from 0xcust.** (assuming we're going to run as 0xcust..
# only do this if you're jenksin
echo "If we use more machines, expand this cleaning list."
echo "The possibilities should be relatively static over time"
echo "Could be problems if other threads also using that user on these machines at same time"
echo "Could make the rm pattern match a "sourcing job", not just 0xcustomer"
echo "Who cleans up on the target 172-180 machines?"
echo "Also: Touch all the 0xcustomer-datasets mnt points, to get autofs to mount them."
echo "Permission rights extend to the top level now, so only 0xcustomer can automount them"
echo "okay to ls the top level here...no secret info..do all the machines we might be using"
echo ""
echo "resolve the issue with colliding with other jobs, by only deleting if older than 3 days"
# 171 dead
# for mr in 171 172 173 174 175 176 177 178 179 180
for mr in 172 173 174 175 176 177 178 179 180
do
ssh -i ~/.0xcustomer/0xcustomer_id_rsa [email protected].$mr \
'find /home/0xcustomer/ice* -ctime +3 | xargs rm -rf; cd /mnt/0xcustomer-datasets'
done
python ../four_hour_cloud.py -cj pytest_config-jenkins-175-180.json &
else
if [[ $USER == "kevin" ]]
then
python ../four_hour_cloud.py -cj pytest_config-kevin.json &
else
python ../four_hour_cloud.py &
fi
fi
fi
CLOUD_PID=$!
jobs -l
echo ""
echo "Have to wait until h2o-nodes.json is available from the cloud build. Deleted it above."
echo "spin loop here waiting for it. Since the h2o.jar copy slows each node creation"
echo "it might be 12 secs per node"
while [ ! -f ./h2o-nodes.json ]
do
sleep 5
done
ls -lt ./h2o-nodes.json
# We now have the h2o-nodes.json, that means we started the jvms
# Shouldn't need to wait for h2o cloud here..
# the test should do the normal cloud-stabilize before it does anything.
# n0.doit uses nosetests so the xml gets created on completion. (n0.doit is a single test thing)
# A little '|| true' hack to make sure we don't fail out if this subtest fails
# test_c1_rel has 1 subtest
# This could be a runner, that loops thru a list of tests.
echo "If it exists, pytest_config-<username>.json in this dir will be used"
echo "i.e. pytest_config-jenkins.json"
echo "Used to run as 0xcust.., with multi-node targets (possibly)"
myPy() {
DOIT=../testdir_single_jvm/n0.doit
$DOIT $1/$2 || true
# try moving all the logs created by this test in sandbox to a subdir to isolate test failures
# think of h2o.check_sandbox_for_errors()
rm -f -r sandbox/$1
mkdir -p sandbox/$1
cp -f sandbox/*log sandbox/$1
# rm -f sandbox/*log
}
echo $TESTDIR
echo $TEST
# avoid for now
# myPy c5 test_c5_KMeans_sphere15_180GB.py
if [[ $TEST == "" ]] || [[ $TESTDIR == "" ]]
then
# if va and fvec tests are mixed without deleting keys,
# the import leaves keys that apparently get converted by exec -> timeout
# just do fvec tests
# myPy c1 test_c1_rel.py
# myPy c2 test_c2_rel.py
# myPy c3 test_c3_rel.py
# myPy c4 test_c4_four_billion_rows.py
# myPy c6 test_c6_hdfs_fvec.py
# myPy c8 test_c8_rf_airlines_hdfs_fvec.py
# myPy c9 test_c9_GBM_airlines_hdfs.py
# myPy c9 test_c9_GLM_airlines_hdfs_fvec.py
# myPy c10 test_c10_rel_gbm.py
myPy c8 test_c8_rf_airlines_hdfs_fvec.py
# myPy c9 test_c9_GLM_rc_fvec.py
# put known failure last
# doesn't work. key gets locked. forget about it
# myPy c7 test_c7_rel.py
else
myPy $TESTDIR $TEST
fi
# If this one fails, fail this script so the bash dies
# We don't want to hang waiting for the cloud to terminate.
myPy shutdown test_shutdown.py
if ps -p $CLOUD_PID > /dev/null
then
echo "$CLOUD_PID is still running after shutdown. Will kill"
kill $CLOUD_PID
fi
ps aux | grep four_hour_cloud
# test_c2_rel has about 11 subtests inside it, that will be tracked individually by jenkins
# ../testdir_single_jvm/n0.doit test_c2_rel || true
# We don't want the jenkins job to complete until we kill it, so the cloud stays alive for debug
# also prevents us from overrunning ourselves with cloud building
# If we don't wait, the cloud will get torn down.
jobs -l
echo ""
echo "You can stop this jenkins job now if you want. It's all done"
|
woobe/h2o
|
py/testdir_release/runner.sh
|
Shell
|
apache-2.0
| 5,044 |
#!/bin/bash
exec monodoc
|
PerceptiveSoftwareCodeDash/codedash-contestant-machine
|
cookbooks/install-languages/templates/default/monodoc.sh
|
Shell
|
apache-2.0
| 25 |
#!/bin/bash
# Install Berkeley DB 6.2
# http://www.linuxfromscratch.org/blfs/view/cvs/server/db.html
# http://docs.oracle.com/cd/E17275_01/html/programmer_reference/build_unix_conf.html
if [ -d "/usr/share/db-6.2.32" ]
then
echo "Berkeley DB 6.2 is already installed, nothing done!"
else
source /vagrant/vagrant-setup/include.sh
cd /usr/share
wget_and_untar http://download.oracle.com/berkeley-db/ db-6.2.32.tar.gz
cd db-6.2.32
rm -f -r build_wince
rm -f -r build_windows
rm -f -r build_vxworks
cd build_unix
../dist/configure --prefix=/usr --enable-java --enable-cxx && make
# OpenLDAP will look for libdb.so library
# If this is omitted then "Berkeley DB link (-ldb) no" error is raised when building OpenLDAP
# http://doc.gnu-darwin.org/build_unix/shlib.html
cd .libs
ln -s libdb-6.2.so libdb.so
cd $PPWD
# yum erase -y java-1.7.0-openjdk-devel
# unlink /usr/lib/jvm/jre-1.7.0-openjdk.x86_64
# unlink /usr/lib/jvm/jre-openjdk
# unlink /etc/alternatives/java
# unlink /etc/alternatives/java.1.gz
# unlink /etc/alternatives/jre_1.7.0
# unlink /etc/alternatives/jre_1.7.0_exports
# unlink /etc/alternatives/jre_openjdk
# unlink /etc/alternatives/jre_openjdk_exports
# unlink /etc/alternatives/keytool
# unlink /etc/alternatives/keytool.1.gz
# unlink /etc/alternatives/rmid
# unlink /etc/alternatives/rmid.1.gz
# unlink /etc/alternatives/rmiregistry
# unlink /etc/alternatives/rmiregistry.1.gz
# unlink /etc/alternatives/servertool
# unlink /etc/alternatives/servertool.1.gz
# unlink /etc/alternatives/tnameserv
# unlink /etc/alternatives/tnameserv.1.gz
# unlink /etc/alternatives/unpack200
# unlink /etc/alternatives/unpack200.1.gz
# rm -rf /usr/lib/jvm/jre-1.7.0
# rm -rf /usr/lib/java-1.7.0-openjdk-1.7.0.91.x86_64
# rm -rf /usr/lib/jvm/java-1.7.0-openjdk-1.7.0.91.x86_64
fi
|
sergiomt/centorion
|
vagrant-setup/db62.sh
|
Shell
|
apache-2.0
| 1,829 |
#!/bin/bash
#
# Issue a search-and-cluster request with a more complex field mapping.
#
# Note the search_request part is a regular search request; it contains
# highlighter directives; the number of fragments and fields to be highlighted
# are configurable.
#
# In order to make sense for clustering, it must (should)
# fetch at least 100 documents.
#
# The "query_hint" part provides query-like terms that the clustering algorithm
# takes into account to avoid creating trivial clusters.
#
# The "field_mapping" section maps logical "document" fields (title, content) to
# the indexed document's title field and a highlighted (typically shorter content)
# fragment of "content" field.
#
curl -H "Content-Type: application/json" -XPOST 'http://localhost:9200/test/test/_search_with_clusters?pretty=true' -d '
{
"search_request": {
"_source" : [
"url",
"title",
"content"
],
"highlight" : {
"pre_tags" : ["", ""],
"post_tags" : ["", ""],
"fields" : {
"content" : { "fragment_size" : 150, "number_of_fragments" : 3 },
"title" : { "fragment_size" : 150, "number_of_fragments" : 3 }
}
},
"query" : {
"match" : {
"content" : "data mining"
}
},
"size": 100
},
"query_hint": "data mining",
"field_mapping": {
"title" : ["_source.title"],
"content": ["highlight.content"]
}
}'
|
carrot2/elasticsearch-carrot2
|
docs/curl/03-field-mapping.sh
|
Shell
|
apache-2.0
| 1,521 |
text_file="/home/disk1/jyhou/my_egs/swbd_xy_egs/info/text_fixed_tail_500"
keyword_list_dir="/home/disk1/jyhou/feats/XiaoYing_STD/list/"
data_list_dir="/home/disk1/jyhou/feats/XiaoYing_STD/list/"
keyword_type=keywords_60_100_10_1
keyword_dir="/home/disk1/jyhou/feats/XiaoYing_STD/a_${keyword_type}/"
keyword_list_basename="${keyword_type}_average.list"
keyword_list_file=${keyword_list_dir}${keyword_list_basename}
fea_type="sbnf1"
for x in data_15_30 data_40_55 #data_65_80;
do
result_dir=${keyword_dir}dtw_${x}_${fea_type}/
test_list_file="${data_list_dir}/${x}.list"
ROC_out_file=out/${keyword_type}_${x}_${fea_type}.ROC
echo $result_dir
echo "python ./script/ROC.py $result_dir $keyword_list_file $test_list_file $text_file $ROC_out_file"
python ./script/ROC.py $result_dir $keyword_list_file $test_list_file $text_file $ROC_out_file
done
|
jingyonghou/XY_QByE_STD
|
script/get_ROC_curve_sta.sh
|
Shell
|
apache-2.0
| 863 |
#!/bin/bash
# Add CentOS and EPEL repos
yum -y reinstall centos-release
yum -y install epel-release
# Install Ansible
yum -y install ansible
# Install tree util
yum -y install tree
# Install jq util
yum -y install jq
# Delete CentOS and EPEL repos to avoid issues with Fuel services
yum -y remove epel-release
rm /etc/yum.repos.d/CentOS-*.repo
|
aepifanov/mos_mu
|
install_ansible.sh
|
Shell
|
apache-2.0
| 347 |
#!/usr/bin/env bash
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
pwd
mkdir result
# The files will only exist if maven reaches the corresponding phase of the build
function cpe() {
if [[ -e target/$1 ]]; then
cp -rv target/$1 result
else
echo "target/$1 not copied, check the completed build phases."
fi
}
# Copy over the important artifacts
# Run in module directory so that $(basename $(pwd)) is the module name.
cpe $(basename $(pwd)).jar
cpe failsafe-reports
cpe surefire-reports
# Compress the artifacts for upload
tar -zcvf ${BUILD_ARTIFACTS} result
|
GoogleCloudPlatform/gcp-plugin-core-java
|
jenkins/saveAndCompress.sh
|
Shell
|
apache-2.0
| 1,101 |
#!/bin/bash
# this hasn't been added yet; I'm just working on different ways to download
# stuff that I can later incorporate into the main script
# exists COMMAND
exists() {
if type "$1" >/dev/null 2>&1
then
echo "found $1"
return 0
else
return 1
fi
}
unable_to_retrieve_package() {
echo "Unable to retrieve a valid package!"
exit 1
}
download() {
echo "downloading $1"
echo " to file $2"
if [ -e "$2" ]
then
echo "Error: File $2 already exists"
return 1
fi
if exists wget; then
wget_download "$1" "$2" && return 0
fi
if exists curl; then
curl_download "$1" "$2" && return 0
fi
if exists perl; then
perl_download "$1" "$2" && return 0
fi
if exists python; then
python_download "$1" "$2" && return 0
fi
if exists ruby; then
ruby_download "$1" "$2" && return 0
fi
if exists bash; then
bash_download "$1" "$2" && return 0
fi
unable_to_retrieve_package
}
# validate_download FILE
validate_download() {
if [ -s "$1" ] && [ -f "$1" ]
then
return 0
else
return 1
fi
}
# curl_download URL FILENAME
curl_download() {
echo "trying curl..."
curl --output "$2" "$1" || return 1
validate_download "$2" || return 1
return 0
}
# wget_download URL FILENAME
wget_download() {
echo "trying wget..."
wget --output-document "$2" "$1" || return 1
validate_download "$2" || return 1
return 0
}
# python_download URL FILENAME
python_download() {
echo "trying python..."
python -c "import sys,urllib2 ; sys.stdout.write(urllib2.urlopen(sys.argv[1]).read())" "$1" > "$2" 2>/tmp/stderr
rc=$?
# check for 404
grep "HTTP Error 404" /tmp/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
unable_to_retrieve_package
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
return 1
fi
return 0
validate_download "$2" || return 1
return 0
}
# perl_download URL FILENAME
perl_download() {
echo "trying perl..."
perl -e 'use LWP::Simple; getprint($ARGV[0]);' "$1" > "$2" 2>/tmp/stderr
rc=$?
# check for 404
grep "404 Not Found" /tmp/stderr 2>&1 >/dev/null
if test $? -eq 0; then
echo "ERROR 404"
unable_to_retrieve_package
fi
# check for bad return status or empty output
if test $rc -ne 0 || test ! -s "$2"; then
return 1
fi
validate_download "$2" || return 1
return 0
}
# ruby_download URL FILENAME
ruby_download() {
ruby -e "require 'open-uri'; File.open('$2', 'w') do |file| file.write(open('$1').read) end"
validate_download "$2" || return 1
return 0
}
bash_download() {
[ -n "$BASH" ] || return 1
# pretty epic bashism, copied verbatim from
# http://unix.stackexchange.com/questions/83926/how-to-download-a-file-using-just-bash-and-nothing-else-no-curl-wget-perl-et
function __wget() {
: ${DEBUG:=0}
local URL=$1
local tag="Connection: close"
local mark=0
if [ -z "${URL}" ]; then
printf "Usage: %s \"URL\" [e.g.: %s http://www.google.com/]" \
"${FUNCNAME[0]}" "${FUNCNAME[0]}"
return 1;
fi
read proto server path <<<$(echo ${URL//// })
DOC=/${path// //}
HOST=${server//:*}
PORT=${server//*:}
[[ x"${HOST}" == x"${PORT}" ]] && PORT=80
[[ $DEBUG -eq 1 ]] && echo "HOST=$HOST"
[[ $DEBUG -eq 1 ]] && echo "PORT=$PORT"
[[ $DEBUG -eq 1 ]] && echo "DOC =$DOC"
exec 3<>/dev/tcp/${HOST}/$PORT
echo -en "GET ${DOC} HTTP/1.1\r\nHost: ${HOST}\r\n${tag}\r\n\r\n" >&3
while read line; do
[[ $mark -eq 1 ]] && echo $line
if [[ "${line}" =~ "${tag}" ]]; then
mark=1
fi
done <&3
exec 3>&-
}
__wget "$1" > "$2"
validate_download "$2" || return 1
return 0
}
# other ideas:
# - use rsync
# - use openssl
# - use netcat
# - ksh tcp port
# - zsh tcp port http://web-tech.ga-usa.com/2014/04/zsh-simple-network-port-checker/
# on EL, download using RPM directly
# gnu gawk https://www.gnu.org/software/gawk/manual/gawkinet/html_node/TCP-Connecting.html http://www.linuxjournal.com/article/3132
# openssh "netcat mode" http://blog.rootshell.be/2010/03/08/openssh-new-feature-netcat-mode/
# openssl client?
# use rubygems directly
# fall back to trying to install curl/wget
|
danieldreier/puppet_installer
|
files/download.sh
|
Shell
|
apache-2.0
| 4,313 |
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad #
# Complutense de Madrid (dsa-research.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
SRC=$1
DST=$2
if [ -z "${ONE_LOCATION}" ]; then
TMCOMMON=/usr/lib/one/mads/tm_common.sh
else
TMCOMMON=$ONE_LOCATION/lib/mads/tm_common.sh
fi
. $TMCOMMON
SRC_PATH=`arg_path $SRC`
log "$1 $2"
case $SRC in
http://*)
log "Downloading $SRC"
exec_and_log "ssh $DST_HOST wget -O $DST_PATH $SRC_PATH"
;;
*)
log "Cloning $SRC"
VM_ID=`echo $DST | sed -e 's/.*\/\([0-9]\+\)\/images\/.*/\1/'`
cp -r $SRC_PATH $DATASTORE_PATH/one-$VM_ID &>/dev/null
mv $DATASTORE_PATH/one-$VM_ID/*.vmx $DATASTORE_PATH/one-$VM_ID/one-$VM_ID.vmx
;;
esac
|
fairchild/open-nebula-mirror
|
src/tm_mad/vmware/tm_clone.sh
|
Shell
|
apache-2.0
| 1,848 |
#!/bin/bash
swig2.0 -v -Ismile-lib -c++ -python -o pysmile_wrap.cpp pysmile.i
|
kadeng/pysmile
|
generate_wrapper.sh
|
Shell
|
apache-2.0
| 77 |
#!/bin/bash
git add .
git add -u
git commit -m "wip"
git push
|
whitewalter/fleetwood-bounder
|
push.sh
|
Shell
|
apache-2.0
| 63 |
# -----------------------------------------------------------------------------
#
# Package : phpspec/prophecy
# Version : 1.7
# Source repo : https://github.com/phpspec/prophecy
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=phpspec/prophecy
PACKAGE_VERSION=1.7
PACKAGE_URL=https://github.com/phpspec/prophecy
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq curl php php-curl php-dom php-mbstring php-json nodejs make gcc-c++ patch diffutils php-gd php-pecl-zip
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" && php composer-setup.php --install-dir=/bin --filename=composer
composer require --dev phpunit/phpunit --with-all-dependencies ^7
mkdir output
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
if ! composer install; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! /home/tester/vendor/bin/phpunit; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
p/phpspec__prophecy/phpspec__prophecy_rhel_8.3.sh
|
Shell
|
apache-2.0
| 2,678 |
#!/bin/bash
conf_file=~/.dotfiles
default_profile=priv
profile() {
local profile
if [ -e $conf_file ]; then
profile=$(cat $conf_file | grep -v '^#' | grep '^profile=' | cut -d '=' -f 2)
fi
[[ -z $profile ]] && echo $default_profile || echo $profile
}
mkdir -p ~/acpi
mkdir -p ~/utils
mkdir -p ~/.tmux/plugins
mkdir -p ~/.urxvt
mkdir -p ~/.config
mkdir -p ~/.config/ranger
mkdir -p ~/.config/awesome
mkdir -p ~/.mcabber
profile=$(profile)
echo "Active profile : $profile"
stow --override=* -R -v -t ~/ -d git $profile
stow --override=* -R -v -t ~/ -d xdg $profile
stow -R -v -t ~/ bash common functions tmux vim zsh x spacemacs
stow -R -v -t ~/.config/awesome awesome
stow -R -v -t ~/.mcabber mcabber
stow -R -v -t ~/ octave
stow -R -v -t ~/ misc
stow -R -v -t ~/.config/ranger ranger
stow -R -v -t ~/acpi acpi
stow -R -v -t ~/utils utils
stow -R -v -t ~/.urxvt urxvt
stow -R -v -t ~/.config i3wm
vim +PluginInstall +qall
cd ~/.vim/bundle/fzf && ./install --all
cd ~/.vim/bundle/YouCompleteMe && ./install.py --clang-completer
|
ssledz/dotfiles
|
restow.sh
|
Shell
|
apache-2.0
| 1,046 |
#!/bin/sh
# -------------------------------------------------------------------------
# copy needed JBOSS components into DCM4CHEE installation
# -------------------------------------------------------------------------
DIRNAME=`dirname $0`
DCM4CHEE_HOME="$DIRNAME"/..
DCM4CHEE_SERV="$DCM4CHEE_HOME"/server/default
if [ x$1 = x ]; then
echo "Usage: $0 <path-to-jboss-4.2.3.GA-installation-directory>"
exit 1
fi
JBOSS_HOME="$1"
JBOSS_SERV="$JBOSS_HOME"/server/default
if [ ! -f "$JBOSS_HOME"/bin/run.jar ]; then
echo Could not locate jboss-4.2.3.GA in "$JBOSS_HOME"
exit 1
fi
cp -v "$JBOSS_HOME"/bin/run.jar \
"$JBOSS_HOME"/bin/shutdown.bat \
"$JBOSS_HOME"/bin/shutdown.jar \
"$JBOSS_HOME"/bin/shutdown.sh \
"$JBOSS_HOME"/bin/twiddle.bat \
"$JBOSS_HOME"/bin/twiddle.jar \
"$JBOSS_HOME"/bin/twiddle.sh \
"$DCM4CHEE_HOME"/bin
mkdir "$DCM4CHEE_HOME"/client
cp -v "$JBOSS_HOME"/client/jbossall-client.jar \
"$JBOSS_HOME"/client/getopt.jar \
"$DCM4CHEE_HOME"/client
cp -v -R "$JBOSS_HOME"/lib "$DCM4CHEE_HOME"
cp -v "$JBOSS_SERV"/conf/jbossjta-properties.xml \
"$JBOSS_SERV"/conf/jndi.properties \
"$DCM4CHEE_SERV"/conf
cp -v -R "$JBOSS_SERV"/conf/props \
"$JBOSS_SERV"/conf/xmdesc \
"$DCM4CHEE_SERV"/conf
mkdir "$DCM4CHEE_SERV"/lib
cp -v "$JBOSS_SERV"/lib/* "$DCM4CHEE_SERV"/lib
rm -v "$DCM4CHEE_SERV"/lib/jbossmq.jar
cp -v "$JBOSS_SERV"/deploy/bsh-deployer.xml \
"$JBOSS_SERV"/deploy/cache-invalidation-service.xml \
"$JBOSS_SERV"/deploy/client-deployer-service.xml \
"$JBOSS_SERV"/deploy/ear-deployer.xml \
"$JBOSS_SERV"/deploy/ejb3-interceptors-aop.xml \
"$JBOSS_SERV"/deploy/jboss-ha-local-jdbc.rar \
"$JBOSS_SERV"/deploy/jboss-ha-xa-jdbc.rar \
"$JBOSS_SERV"/deploy/jbossjca-service.xml \
"$JBOSS_SERV"/deploy/jboss-local-jdbc.rar \
"$JBOSS_SERV"/deploy/jboss-xa-jdbc.rar \
"$JBOSS_SERV"/deploy/jmx-invoker-service.xml \
"$JBOSS_SERV"/deploy/jsr88-service.xml \
"$JBOSS_SERV"/deploy/mail-service.xml \
"$JBOSS_SERV"/deploy/properties-service.xml \
"$JBOSS_SERV"/deploy/quartz-ra.rar \
"$JBOSS_SERV"/deploy/sqlexception-service.xml \
"$JBOSS_SERV"/deploy/jms/jms-ra.rar \
"$DCM4CHEE_SERV"/deploy
cp -v -R "$JBOSS_SERV"/deploy/ejb3.deployer \
"$JBOSS_SERV"/deploy/http-invoker.sar \
"$JBOSS_SERV"/deploy/jboss-aop-jdk50.deployer \
"$JBOSS_SERV"/deploy/jboss-bean.deployer \
"$JBOSS_SERV"/deploy/jbossws.sar \
"$DCM4CHEE_SERV"/deploy
mv "$DCM4CHEE_SERV"/deploy/jbossws.sar/jaxb-api.jar "$DCM4CHEE_SERV"/lib
mv "$DCM4CHEE_SERV"/deploy/jbossws.sar/jaxb-impl.jar "$DCM4CHEE_SERV"/lib
cp -v "$JBOSS_SERV"/deploy/jboss-web.deployer/context.xml \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jasper-jdt.jar \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jbossweb-extras.jar \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jbossweb.jar \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jbossweb-service.jar \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jstl.jar \
"$DCM4CHEE_SERV"/deploy/jboss-web.deployer
cp -v -R "$JBOSS_SERV"/deploy/jboss-web.deployer/conf \
"$JBOSS_SERV"/deploy/jboss-web.deployer/jsf-libs \
"$JBOSS_SERV"/deploy/jboss-web.deployer/META-INF \
"$JBOSS_SERV"/deploy/jboss-web.deployer/ROOT.war \
"$DCM4CHEE_SERV"/deploy/jboss-web.deployer
cp -v "$JBOSS_SERV"/deploy/jmx-console.war/checkJNDI.jsp \
"$JBOSS_SERV"/deploy/jmx-console.war/displayMBeans.jsp \
"$JBOSS_SERV"/deploy/jmx-console.war/displayOpResult.jsp \
"$JBOSS_SERV"/deploy/jmx-console.war/index.jsp \
"$JBOSS_SERV"/deploy/jmx-console.war/jboss.css \
"$JBOSS_SERV"/deploy/jmx-console.war/style_master.css \
"$DCM4CHEE_SERV"/deploy/jmx-console.war
cp -v -R "$JBOSS_SERV"/deploy/jmx-console.war/cluster \
"$JBOSS_SERV"/deploy/jmx-console.war/images \
"$JBOSS_SERV"/deploy/jmx-console.war/META-INF \
"$DCM4CHEE_SERV"/deploy/jmx-console.war
cp -v -R "$JBOSS_SERV"/deploy/jmx-console.war/WEB-INF/classes \
"$DCM4CHEE_SERV"/deploy/jmx-console.war/WEB-INF
cp -v "$JBOSS_SERV"/deploy/management/console-mgr.sar/*.jar \
"$DCM4CHEE_SERV"/deploy/management/console-mgr.sar
cp -v -R "$JBOSS_SERV"/deploy/management/console-mgr.sar/META-INF \
"$DCM4CHEE_SERV"/deploy/management/console-mgr.sar
cp -v "$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/*.html \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/*.jar \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/*.js \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/*.jsp \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/*.xml \
"$DCM4CHEE_SERV"/deploy/management/console-mgr.sar/web-console.war
cp -v -R "$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/css \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/images \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/img \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/META-INF \
"$DCM4CHEE_SERV"/deploy/management/console-mgr.sar/web-console.war
cp -v -R "$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/WEB-INF/classes \
"$JBOSS_SERV"/deploy/management/console-mgr.sar/web-console.war/WEB-INF/tlds \
"$DCM4CHEE_SERV"/deploy/management/console-mgr.sar/web-console.war/WEB-INF
|
medicayun/medicayundicom
|
dcm4jboss-all/tags/DCM4CHEE_2_14_8/dcm4jboss-build/bin/install_jboss.sh
|
Shell
|
apache-2.0
| 5,242 |
#!/bin/bash
set -eo pipefail
rm -f /var/run/haproxy.pid > /dev/null 2>&1
rm -f /etc/haproxy/haproxy.cfg > /dev/null 2>&1
if [ -z "$ETCD_NODE" ]; then
IP=$(/sbin/ip route | awk '/default/ { print $3 }')
ETCD_NODE=$IP
fi
if [ -z $KONTENA_STACK_NAME ] || [ "$KONTENA_STACK_NAME" == "null" ]; then
LB_NAME=$KONTENA_SERVICE_NAME
else
LB_NAME="$KONTENA_STACK_NAME/$KONTENA_SERVICE_NAME"
fi
export ETCD_NODE=$ETCD_NODE
export ETCD_PATH="/kontena/haproxy/$LB_NAME"
exec /app/bin/lb
|
kontena/kontena-loadbalancer
|
entrypoint.sh
|
Shell
|
apache-2.0
| 488 |
cd $HOME/CADL/session-1
jupyter notebook
|
dariox2/CADL
|
session-1/runjup.sh
|
Shell
|
apache-2.0
| 42 |
#/**
# * Copyright (c) 2016 Intel Corporation
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
set -e
VERSION=$(grep current_version .bumpversion.cfg | cut -d " " -f 3)
PROJECT_NAME=$(basename $(pwd))
# build project
cd Godeps/_workspace
mkdir -p src/github.com/trustedanalytics/
cd src/github.com/trustedanalytics/
ln -s ../../../../.. $PROJECT_NAME
cd ../../../../..
GOPATH=`godep path`:$GOPATH go test ./...
godep go build
rm Godeps/_workspace/src/github.com/trustedanalytics/$PROJECT_NAME
# assemble the artifact
PACKAGE_CATALOG=${PROJECT_NAME}-${VERSION}
# prepare build manifest
echo "commit_sha=$(git rev-parse HEAD)" > build_info.ini
# create zip package
zip -r ${PROJECT_NAME}-${VERSION}.zip * -x ${PROJECT_NAME}-${VERSION}.zip
echo "Zip package for $PROJECT_NAME project in version $VERSION has been prepared."
|
trustedanalytics/app-launcher-helper
|
pack.sh
|
Shell
|
apache-2.0
| 1,357 |
#!/bin/bash -e
. /var/cache/build/packages-manual/common.sh
download_and_verify \
bedops \
bedops \
2.4.32 \
cce96ded8e0276ed02a1b08f949c0ced09991305eedb61f5e2aa6f79e6e80b1f \
https://github.com/bedops/bedops/releases/download/v\${version}/bedops_linux_x86_64-v\${version}.tar.bz2
add_binary_path \
bedops \
bedops \
bin
|
genialis/resolwe-bio
|
resolwe_bio/docker_images/chipseq/packages-manual/bedops.sh
|
Shell
|
apache-2.0
| 356 |
#!/bin/bash
browser=$1
platform=$2
version=$3
echo TRAVIS_BUILD_NUMBER=${TRAVIS_BUILD_NUMBER}
echo TRAVIS_JOB_NUMBER=${TRAVIS_JOB_NUMBER}
echo browser=${browser}
echo platform=${platform}
echo version=${version}
if [ -z $version ]; then
varg=""
else
varg="-Dselenium.browser.version=${version}"
fi
echo mvn integration-test verify ${INTEGRATION_TEST_ARGS} -Dselenium.browser.name=${browser} -Dselenium.platform=${platform} ${varg} -Dselenium.timeout=120
mvn integration-test verify ${INTEGRATION_TEST_ARGS} -Dselenium.browser.name=${browser} -Dselenium.platform=${platform} ${varg} -Dselenium.timeout=120
|
dickschoeller/gedbrowser
|
config/selenium-script.sh
|
Shell
|
apache-2.0
| 613 |
#!/bin/bash
# https://stackoverflow.com/a/4024263/537768
verlte() {
[ "$1" = "$(echo -e "$1\n$2" | sort -V | head -n1)" ]
}
PROJECT='oio-swift'
# This will work in the main repository, but not in forks
LATEST_TAG=$(git fetch --tags && git describe --tags)
if [ -z "$LATEST_TAG" ]
then
echo "No tag, cannot check"
exit 0
fi
VERSION_REGEX='s/[^[:digit:]]*(([[:digit:]]+\.){2})([[:digit:]]+).*$/\1\3/p'
echo "$PROJECT pre-release version is $LATEST_TAG"
export LATEST_TAG
TAG_VERSION=$(echo "$LATEST_TAG" | sed -E -n -e "$VERSION_REGEX")
CODE_VERSION=$(sed -E -n -e "$VERSION_REGEX" oioswift/__init__.py)
echo "$PROJECT latest tagged version is $TAG_VERSION"
echo "$PROJECT version from code is $CODE_VERSION"
# Ensure pkg-config version is up-to-date
if verlte "$TAG_VERSION" "$CODE_VERSION"
then
echo "OK"
else
echo "KO"
exit 1
fi
|
open-io/oio-swift
|
oio-check-version.sh
|
Shell
|
apache-2.0
| 856 |
#!/bin/bash
# Copyright 2015 Insight Data Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PEG_ROOT=$(dirname ${BASH_SOURCE})/../..
source ${PEG_ROOT}/util.sh
# check input arguments
if [ "$#" -ne 1 ]; then
echo "Please specify cluster name!" && exit 1
fi
CLUSTER_NAME=$1
PUBLIC_DNS=$(fetch_cluster_public_dns ${CLUSTER_NAME})
cmd='sudo /etc/init.d/riak stop'
for dns in ${PUBLIC_DNS}; do
run_cmd_on_node ${dns} ${cmd} &
done
wait
echo -e "${color_green}Riak Stopped!${color_norm}"
|
InsightDataScience/pegasus
|
service/riak/stop_service.sh
|
Shell
|
apache-2.0
| 1,002 |
#! /usr/bin/env bash
set -eu
shopt -s nullglob
# Locate the script file. Cross symlinks if necessary.
loc="$0"
while [ -h "$loc" ]; do
ls=`ls -ld "$loc"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
loc="$link" # Absolute link
else
loc="`dirname "$loc"`/$link" # Relative link
fi
done
base_dir=$(cd "`dirname "$loc"`" && pwd)
temp_out="$base_dir/djinni-output-temp"
in="$base_dir/djinni/all.djinni"
wchar_in="$base_dir/djinni/wchar_test.djinni"
# Relative version of in and temp_out are used for Djinni call below so that
# generated lists of infiles/outfiles are not machine-dependent. This
# is an artifact of the test suite, where we want the genereated files
# to be in git for examination.
in_relative="djinni/all.djinni"
wchar_in_relative="djinni/wchar_test.djinni"
temp_out_relative="djinni-output-temp"
cpp_out="$base_dir/generated-src/cpp"
jni_out="$base_dir/generated-src/jni"
objc_out="$base_dir/generated-src/objc"
java_out="$base_dir/generated-src/java/com/dropbox/djinni/test"
yaml_out="$base_dir/generated-src/yaml"
java_package="com.dropbox.djinni.test"
gen_stamp="$temp_out/gen.stamp"
if [ $# -eq 0 ]; then
# Normal build.
true
elif [ $# -eq 1 ]; then
command="$1"; shift
if [ "$command" != "clean" ]; then
echo "Unexpected arguemnt: \"$command\"." 1>&2
exit 1
fi
for dir in "$temp_out" "$cpp_out" "$jni_out" "$java_out"; do
if [ -e "$dir" ]; then
echo "Deleting \"$dir\"..."
rm -r "$dir"
fi
done
rm "$base_dir/generated-src/inFileList.txt"
rm "$base_dir/generated-src/outFileList.txt"
exit
fi
# Build Djinni
"$base_dir/../src/build"
# Run Djinni generation
[ ! -e "$temp_out" ] || rm -r "$temp_out"
(cd "$base_dir" && \
"$base_dir/../src/run-assume-built" \
--java-out "$temp_out_relative/java" \
--java-package $java_package \
--java-nullable-annotation "javax.annotation.CheckForNull" \
--java-nonnull-annotation "javax.annotation.Nonnull" \
--java-use-final-for-record false \
--ident-java-field mFooBar \
\
--cpp-out "$temp_out_relative/cpp" \
--cpp-namespace testsuite \
--ident-cpp-enum-type foo_bar \
--cpp-optional-template "std::experimental::optional" \
--cpp-optional-header "\"../../handwritten-src/cpp/optional.hpp\"" \
--cpp-extended-record-include-prefix "../../handwritten-src/cpp/" \
--cpp-use-wide-strings true \
\
--jni-out "$temp_out_relative/jni" \
--ident-jni-class NativeFooBar \
--ident-jni-file NativeFooBar \
\
--objc-out "$temp_out_relative/objc" \
--objcpp-out "$temp_out_relative/objc" \
--objc-type-prefix DB \
\
--yaml-out "$temp_out_relative/yaml" \
--yaml-out-file "yaml-test.yaml" \
--yaml-prefix "test_" \
\
--idl "$wchar_in_relative" && \
"$base_dir/../src/run-assume-built" \
--java-out "$temp_out_relative/java" \
--java-package $java_package \
--java-nullable-annotation "javax.annotation.CheckForNull" \
--java-nonnull-annotation "javax.annotation.Nonnull" \
--java-use-final-for-record false \
--java-implement-android-os-parcelable true \
--ident-java-field mFooBar \
\
--cpp-out "$temp_out_relative/cpp" \
--cpp-namespace testsuite \
--ident-cpp-enum-type foo_bar \
--cpp-optional-template "std::experimental::optional" \
--cpp-optional-header "\"../../handwritten-src/cpp/optional.hpp\"" \
--cpp-extended-record-include-prefix "../../handwritten-src/cpp/" \
\
--jni-out "$temp_out_relative/jni" \
--ident-jni-class NativeFooBar \
--ident-jni-file NativeFooBar \
\
--objc-out "$temp_out_relative/objc" \
--objcpp-out "$temp_out_relative/objc" \
--objc-type-prefix DB \
\
--list-in-files "./generated-src/inFileList.txt" \
--list-out-files "./generated-src/outFileList.txt"\
\
--yaml-out "$temp_out_relative/yaml" \
--yaml-out-file "yaml-test.yaml" \
--yaml-prefix "test_" \
\
--idl "$in_relative" \
--idl-include-path "djinni/vendor" \
)
# Make sure we can parse back our own generated YAML file
cp "$base_dir/djinni/yaml-test.djinni" "$temp_out/yaml"
(cd "$base_dir" && \
"$base_dir/../src/run-assume-built" \
--java-out "$temp_out_relative/java" \
--java-package $java_package \
--ident-java-field mFooBar \
\
--cpp-out "$temp_out_relative/cpp" \
--ident-cpp-enum-type foo_bar \
--cpp-optional-template "std::experimental::optional" \
--cpp-optional-header "\"../../handwritten-src/cpp/optional.hpp\"" \
\
--jni-out "$temp_out_relative/jni" \
--ident-jni-class NativeFooBar \
--ident-jni-file NativeFooBar \
\
--objc-out "$temp_out_relative/objc" \
--objcpp-out "$temp_out_relative/objc" \
--objc-type-prefix DB \
\
--idl "$temp_out_relative/yaml/yaml-test.djinni" \
)
# Copy changes from "$temp_out" to final dir.
mirror() {
local prefix="$1" ; shift
local src="$1" ; shift
local dest="$1" ; shift
mkdir -p "$dest"
rsync -r --delete --checksum --itemize-changes "$src"/ "$dest" | sed "s/^/[$prefix]/"
}
echo "Copying generated code to final directories..."
mirror "cpp" "$temp_out/cpp" "$cpp_out"
mirror "java" "$temp_out/java" "$java_out"
mirror "jni" "$temp_out/jni" "$jni_out"
mirror "objc" "$temp_out/objc" "$objc_out"
date > "$gen_stamp"
echo "Djinni completed."
|
PSPDFKit-labs/djinni
|
test-suite/run_djinni.sh
|
Shell
|
apache-2.0
| 5,447 |
#!/bin/bash
function setupNTPCronJob {
echo "add hourly cron job of ntpupdate"
echo '#!/bin/sh' > /etc/cron.hourly/ntpdate
echo 'ntpdate time.apple.com' >> /etc/cron.hourly/ntpdate
chmod 755 /etc/cron.hourly/ntpdate
}
echo "setup ntp cron job"
setupNTPCronJob
|
qiujian16/vagrant-novadocker
|
scripts/setup-ubuntu-ntp.sh
|
Shell
|
apache-2.0
| 279 |
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
# Control variable used to determine whether to execute this script
# directly or allow the gate_hook to import.
IS_GATE=${IS_GATE:-False}
USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True}
if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then
>&2 echo "Usage: $0 /path/to/devstack [-i]
Configure a host to run Neutron's functional test suite.
-i Install Neutron's package dependencies. By default, it is assumed
that devstack has already been used to deploy neutron to the
target host and that package dependencies need not be installed.
Warning: This script relies on devstack to perform extensive
modification to the underlying host. It is recommended that it be
invoked only on a throw-away VM."
exit 1
fi
# Skip the first argument
OPTIND=2
while getopts ":i" opt; do
case $opt in
i)
INSTALL_BASE_DEPENDENCIES=True
;;
esac
done
# Default to environment variables to permit the gate_hook to override
# when sourcing.
VENV=${VENV:-dsvm-functional}
# If executed in the gate, run in a constrained env
if [[ "$IS_GATE" == "True" && "$USE_CONSTRAINT_ENV" == "True" ]]
then
VENV=$VENV-constraints
fi
DEVSTACK_PATH=${DEVSTACK_PATH:-$1}
PROJECT_NAME=${PROJECT_NAME:-neutron}
REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)}
INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False}
# The gate should automatically install dependencies.
INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE}
if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then
>&2 echo "Unable to find devstack at '$DEVSTACK_PATH'. Please verify that the specified path points to a valid devstack repo."
exit 1
fi
set -x
function _init {
# Subsequently-called devstack functions depend on the following variables.
HOST_IP=127.0.0.1
FILES=$DEVSTACK_PATH/files
TOP_DIR=$DEVSTACK_PATH
source $DEVSTACK_PATH/stackrc
# Allow the gate to override values set by stackrc.
DEST=${GATE_DEST:-$DEST}
STACK_USER=${GATE_STACK_USER:-$STACK_USER}
}
function _install_base_deps {
echo_summary "Installing base dependencies"
INSTALL_TESTONLY_PACKAGES=True
PACKAGES=$(get_packages general,neutron,q-agt,q-l3)
# Do not install 'python-' prefixed packages other than
# python-dev*. Neutron's functional testing relies on deployment
# to a tox env so there is no point in installing python
# dependencies system-wide.
PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g')
install_package $PACKAGES
}
function _install_rpc_backend {
echo_summary "Installing rabbitmq"
RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
RABBIT_PASSWORD=${RABBIT_HOST:-secretrabbit}
source $DEVSTACK_PATH/lib/rpc_backend
enable_service rabbit
install_rpc_backend
restart_rpc_backend
}
# _install_databases [install_pg]
function _install_databases {
local install_pg=${1:-True}
echo_summary "Installing databases"
# Avoid attempting to configure the db if it appears to already
# have run. The setup as currently defined is not idempotent.
if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then
echo_summary "DB config appears to be complete, skipping."
return 0
fi
MYSQL_PASSWORD=${MYSQL_PASSWORD:-secretmysql}
DATABASE_PASSWORD=${DATABASE_PASSWORD:-secretdatabase}
source $DEVSTACK_PATH/lib/database
enable_service mysql
initialize_database_backends
install_database
configure_database_mysql
if [[ "$install_pg" == "True" ]]; then
enable_service postgresql
initialize_database_backends
install_database
configure_database_postgresql
fi
# Set up the 'openstack_citest' user and database in each backend
tmp_dir=$(mktemp -d)
trap "rm -rf $tmp_dir" EXIT
cat << EOF > $tmp_dir/mysql.sql
CREATE DATABASE openstack_citest;
CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest';
CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest';
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost';
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest';
FLUSH PRIVILEGES;
EOF
/usr/bin/mysql -u root < $tmp_dir/mysql.sql
if [[ "$install_pg" == "True" ]]; then
cat << EOF > $tmp_dir/postgresql.sql
CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest';
CREATE DATABASE openstack_citest WITH OWNER openstack_citest;
EOF
# User/group postgres needs to be given access to tmp_dir
setfacl -m g:postgres:rwx $tmp_dir
sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql
fi
}
function _install_agent_deps {
echo_summary "Installing agent dependencies"
source $DEVSTACK_PATH/lib/neutron-legacy
ENABLED_SERVICES=q-agt,q-dhcp,q-l3
install_neutron_agent_packages
}
# Set up the rootwrap sudoers for neutron to target the rootwrap
# configuration deployed in the venv.
function _install_rootwrap_sudoers {
echo_summary "Installing rootwrap sudoers file"
PROJECT_VENV=$REPO_BASE/$PROJECT_NAME/.tox/$VENV
ROOTWRAP_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap $PROJECT_VENV/etc/neutron/rootwrap.conf *"
ROOTWRAP_DAEMON_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap-daemon $PROJECT_VENV/etc/neutron/rootwrap.conf"
TEMPFILE=$(mktemp)
cat << EOF > $TEMPFILE
# A bug in oslo.rootwrap [1] prevents commands executed with 'ip netns
# exec' from being automatically qualified with a prefix from
# rootwrap's configured exec_dirs. To work around this problem, add
# the venv bin path to a user-specific secure_path.
#
# While it might seem preferable to set a command-specific
# secure_path, this would only ensure the correct path for 'ip netns
# exec' and the command targeted for execution in the namespace would
# not inherit the path.
#
# 1: https://bugs.launchpad.net/oslo.rootwrap/+bug/1417331
#
Defaults:$STACK_USER secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PROJECT_VENV/bin"
$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD
$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD
EOF
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
# Name the functional testing rootwrap to ensure that it will be
# loaded after the devstack rootwrap (50_stack_sh if present) so
# that the functional testing secure_path (a superset of what
# devstack expects) will not be overwritten.
sudo mv $TEMPFILE /etc/sudoers.d/60-neutron-func-test-rootwrap
}
function _install_post_devstack {
echo_summary "Performing post-devstack installation"
_install_databases
_install_rootwrap_sudoers
if is_ubuntu; then
install_package isc-dhcp-client
elif is_fedora; then
install_package dhclient
else
exit_distro_not_supported "installing dhclient package"
fi
# Installing python-openvswitch from packages is a stop-gap while
# python-openvswitch remains unavailable from pypi. This also
# requires that sitepackages=True be set in tox.ini to allow the
# venv to use the installed package. Once python-openvswitch
# becomes available on pypi, this will no longer be required.
#
# NOTE: the package name 'python-openvswitch' is common across
# supported distros.
install_package python-openvswitch
}
function _configure_iptables_rules {
# For linuxbridge agent fullstack tests we need to add special rules to
# iptables for connection of agents to rabbitmq:
CHAIN_NAME="openstack-INPUT"
sudo iptables -n --list $CHAIN_NAME 1> /dev/null 2>&1 || CHAIN_NAME="INPUT"
sudo iptables -I $CHAIN_NAME -s 240.0.0.0/8 -p tcp -m tcp -d 240.0.0.0/8 --dport 5672 -j ACCEPT
}
function configure_host_for_func_testing {
echo_summary "Configuring host for functional testing"
if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then
# Installing of the following can be achieved via devstack by
# installing neutron, so their installation is conditional to
# minimize the work to do on a devstack-configured host.
_install_base_deps
_install_agent_deps
_install_rpc_backend
fi
_install_post_devstack
}
_init
if [[ "$IS_GATE" != "True" ]]; then
if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then
_install_databases nopg
else
configure_host_for_func_testing
fi
fi
if [[ "$VENV" =~ "dsvm-fullstack" ]]; then
_configure_iptables_rules
fi
|
dims/neutron
|
tools/configure_for_func_testing.sh
|
Shell
|
apache-2.0
| 9,086 |
#! /usr/bin/env zsh
. ~/.zshrc
base16_icy
$@
exit 0
|
chocobn69/dotfiles
|
.config/zsh/launcher.sh
|
Shell
|
apache-2.0
| 52 |
#!/bin/sh
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Install PyYAML, a YAML Python library
yum install -y PyYAML
# Clone a repository with Keystone initialization scripts
git clone https://github.com/nimbis/keystone-init.git
# Replace the default configuration with the values defined be the
# environmental variables in configrc
sed -i "s/192.168.206.130/controller/g" keystone-init/config.yaml
sed -i "s/012345SECRET99TOKEN012345/`cat keystone-admin-token`/g" keystone-init/config.yaml
sed -i "s/name: openstackDemo/name: $OS_TENANT_NAME/g" keystone-init/config.yaml
sed -i "s/name: adminUser/name: $OS_USERNAME/g" keystone-init/config.yaml
sed -i "s/password: secretword/password: $OS_PASSWORD/g" keystone-init/config.yaml
sed -i "s/name: glance/name: $GLANCE_SERVICE_USERNAME/g" keystone-init/config.yaml
sed -i "s/password: glance/password: $GLANCE_SERVICE_PASSWORD/g" keystone-init/config.yaml
sed -i "s/name: nova/name: $NOVA_SERVICE_USERNAME/g" keystone-init/config.yaml
sed -i "s/password: nova/password: $NOVA_SERVICE_PASSWORD/g" keystone-init/config.yaml
sed -i "s/RegionOne/$OS_REGION_NAME/g" keystone-init/config.yaml
# Run the Keystone initialization script
./keystone-init/keystone-init.py ./keystone-init/config.yaml
echo ""
echo "The applied config file is keystone-init/config.yaml"
echo "You may do 'rm -rf keystone-init' to remove the no more needed keystone-init directory"
|
beloglazov/openstack-centos-kvm-glusterfs
|
07-openstack-controller/11-keystone-create-users.sh
|
Shell
|
apache-2.0
| 1,982 |
#!/bin/bash
TOMCAT_PATH="/home/jee/apache-tomcat-7.0.61"
WAR_NAME="testFacelets.war"
BUILD_DIR="./target"
mvn clean install && \
cp $BUILD_DIR/$WAR_NAME $TOMCAT_PATH/webapps/$WAR_NAME
|
wadzapi/testFacelets
|
scripts/deployIt.sh
|
Shell
|
apache-2.0
| 185 |
#!/bin/bash
set -e
changes=$(git log -1 --pretty=%B)
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
file_to_upload="platforms/ios/build/device/Deus LARP 2017.ipa"
fi
if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then
file_to_upload="platforms/android/app/build/outputs/apk/debug/app-debug.apk"
fi
curl \
-F "ipa=@$file_to_upload" \
-F "notes=$changes" \
-F "notes_type=0" \
-F "status=2" \
-F "release_type=2" \
-H "X-HockeyAppToken: $HOCKEY_APP_TOKEN" \
https://rink.hockeyapp.net/api/2/apps/upload
|
sth-larp/deus-mobile
|
scripts/upload-to-hockeyapp.sh
|
Shell
|
apache-2.0
| 511 |
# -----------------------------------------------------------------------------
#
# Package : instaclick/php-webdriver
# Version : 1.4.7
# Source repo : https://github.com/instaclick/php-webdriver
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=instaclick/php-webdriver
PACKAGE_VERSION=1.4.7
PACKAGE_URL=https://github.com/instaclick/php-webdriver
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq curl php php-curl php-dom php-mbstring php-json nodejs make gcc-c++ patch diffutils php-gd php-pecl-zip
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" && php composer-setup.php --install-dir=/bin --filename=composer
composer require --dev phpunit/phpunit --with-all-dependencies ^7
mkdir output
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
if ! composer install; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! /home/tester/vendor/bin/phpunit; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
i/instaclick__php-webdriver/instaclick__php-webdriver_rhel_8.3.sh
|
Shell
|
apache-2.0
| 2,714 |
#! /bin/bash
NUM_EXECUTORS=50
source config.sh
source ${DIG_CRF_SCRIPT}/checkMemexConnection.sh
source ${DIG_CRF_SCRIPT}/limitMemexExecutors.sh
echo "Submitting the job to the Memex cluster."
time spark-submit \
--master 'yarn-client' \
--num-executors ${NUM_EXECUTORS} \
${DRIVER_JAVA_OPTIONS} \
${DIG_CRF_COUNT}/countCrfResultPhrases.py \
-- \
--input ${WORKING_HAIR_EYES_HJ_FILE} \
--printToLog \
--excludeTags url
|
usc-isi-i2/dig-crf
|
examples/hbase-dump-2016-06-15/countCrfResultPhrases-crf-hair-eyes-hj.sh
|
Shell
|
apache-2.0
| 453 |
#!/bin/bash
# script credits : https://github.com/infracloudio/botkube
set -x
go vet . ./cmd/... ./manifest/...
|
databus23/helm-diff
|
scripts/verify-govet.sh
|
Shell
|
apache-2.0
| 114 |
#!/bin/bash
dnImage="yiwei1012/hadoop-hsa-dn:1.2"
nnImage="yiwei1012/hadoop-hsa-nn-dn:1.2"
domain="mycorp.kom"
HSAOPTS="--privileged --device /dev/kfd -e DEVICE_TYPE=HSA"
GPUOPTS="--privileged -e DISPLAY=unix:0.0 -v=/tmp/.X11-unix:/tmp/.X11-unix:rw -e DEVICE_TYPE=GPU"
CPUOPTS="-e DEVICE_TYPE=CPU"
SLAVEOPTS="-d -t --dns 127.0.0.1 -e NODE_TYPE=s"
index=1
# first slave
docker run ${SLAVEOPTS} ${HSAOPTS} --name slave${index} -h slave${index}.${domain} ${dnImage}
FIRST_IP=$( docker inspect --format="{{.NetworkSettings.IPAddress}}" slave1 )
# update SLAVEOPTS
SLAVEOPTS="$SLAVEOPTS -e JOIN_IP=$FIRST_IP"
MASTEROPTS="-i -t --dns 127.0.0.1 -e NODE_TYPE=m -e JOIN_IP=$FIRST_IP -v /home/yiwei/hadoop_input:/mnt"
#for (( i = 1; i < 3; i++ )); do
# if [[ $i == 4 ]] || [[ $i == 5 ]]; then
# continue;
# fi
# index=$(( $index+1 ));
# ssh pasH${i} "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave${index} -h slave${index}.${domain} ${dnImage}" &
# index=$(( $index+1 ));
# ssh pasH${i} "docker run ${SLAVEOPTS} ${CPUOPTS} --name slave${index} -h slave${index}.${domain} ${dnImage}" &
#done
#
ssh pasH1 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave2 -h slave2.${domain} ${dnImage}"
ssh pasH1 "docker run ${SLAVEOPTS} ${CPUOPTS} --name slave3 -h slave3.${domain} ${dnImage}"
ssh pasH2 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave4 -h slave4.${domain} ${dnImage}"
ssh pasH2 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave6 -h slave6.${domain} ${dnImage}"
ssh pasH3 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave7 -h slave7.${domain} ${dnImage}"
ssh pasH6 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave8 -h slave8.${domain} ${dnImage}"
ssh pasH7 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave9 -h slave9.${domain} ${dnImage}"
ssh pasH6 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave10 -h slave10.${domain} ${dnImage}"
ssh pasH7 "docker run ${SLAVEOPTS} ${HSAOPTS} --name slave11 -h slave11.${domain} ${dnImage}"
dnImage="yiwei1012/hadoop-gpu-dn:1.2"
ssh pasH8 "docker run ${SLAVEOPTS} ${GPUOPTS} --name slave5 -h slave5.${domain} ${dnImage}"
docker run ${MASTEROPTS} ${CPUOPTS} --name master -h master.${domain} ${nnImage}
|
YIWEI-CHEN/hadoop-hsa-docker
|
start-cluster.sh
|
Shell
|
apache-2.0
| 2,168 |
#!/usr/bin/env bash
CROSS_PREFIX=aarch64-linux-android-
SYSROOT="${NDK}/platforms/android-21/arch-arm64"
TOOLCHAIN=${NDK}/toolchains/aarch64-linux-android-4.9/prebuilt/${HOST_SYSTEM}
export FLAGS="--arch=aarch64"
. ${ROOT}/flags.sh
|
danbrough/andrudio
|
ffmpeg/arm64-v8a.sh
|
Shell
|
apache-2.0
| 239 |
sudo add-apt-repository ppa:eclipse-team/debian-package
sudo apt-get update
sudo apt-get install ant
|
bayvictor/distributed-polling-system
|
bin/ubuntu_install_ant.sh
|
Shell
|
apache-2.0
| 103 |
#!/bin/sh
if [ "$1" != "" ]; then
go_server_host="$1"
echo "Running Go agents for Go server at: ${go_server_host}"
else
echo "Go server host is required."
exit 1
fi
if [ "$2" != "" ]; then
docker_registry_url="$2"
echo "Pulling docker images from: ${docker_registry_url}"
else
echo "Docker registry url is required."
exit 1
fi
docker run -d -e GO_SERVER=${go_server_host} --name gocd-agent-java-8 ${docker_registry_url}/example/gocd-agent-java-8:0.1
docker run -d -e GO_SERVER=${go_server_host} --name gocd-agent-ruby-2.2 ${docker_registry_url}/example/gocd-agent-ruby-2.2:0.1
|
BrianEliotWhipple/massive-octo-bear
|
agents/runAgents.sh
|
Shell
|
apache-2.0
| 612 |
#!/bin/bash
usage() {
echo "$0 --app.foo bar --probe.foo bar"
exit 1
}
mkdir -p /var/run/weave
TOKEN_PROVIDED=false
if [ "$1" = "version" -o "$1" = "help" ]; then
exec -a scope /home/weave/scope --mode $1
exit 0
fi
for arg in $@; do
case "$arg" in
--no-app|--probe-only)
touch /etc/service/app/down
;;
--no-probe|--app-only)
touch /etc/service/probe/down
;;
--service-token*)
TOKEN_PROVIDED=true
touch /etc/service/app/down
;;
esac
done
echo "$@" >/var/run/weave/scope-app.args
echo "$@" >/var/run/weave/scope-probe.args
# End of the command line can optionally be some
# addresses of apps to connect to, for people not
# using Weave DNS. We stick these in /var/run/weave/apps
# for the run-probe script to pick up.
MANUAL_APPS=""
# Implicitly target the Scope Service if a service token was provided with
# no explicit manual app.
if [ "$MANUAL_APPS" = "" -a "$TOKEN_PROVIDED" = "true" ]; then
MANUAL_APPS="scope.weave.works:443"
fi
echo "$MANUAL_APPS" >/var/run/weave/apps
exec /home/weave/runsvinit
|
dilgerma/scope
|
docker/entrypoint.sh
|
Shell
|
apache-2.0
| 1,142 |
#!/bin/bash
# Use getopt instead of getopts for long options
set -e
OPTS=`getopt -o o: --long output-dir:,bam-out:,bam-in:,out-SM:,out-script:,standalone -n 'Reheader_SM.sh' -- "$@"`
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
#echo "$OPTS"
eval set -- "$OPTS"
MYDIR="$( cd "$( dirname "$0" )" && pwd )"
timestamp=$( date +"%Y-%m-%d_%H-%M-%S_%N" )
keep_intermediates=0
outSM='TN_Merged'
while true; do
case "$1" in
-o | --output-dir )
case "$2" in
"") shift 2 ;;
*) outdir=$2 ; shift 2 ;;
esac ;;
--bam-out )
case "$2" in
"") shift 2 ;;
*) outbam=$2 ; shift 2 ;;
esac ;;
--bam-in )
case "$2" in
"") shift 2 ;;
*) inbam=$2 ; shift 2 ;;
esac ;;
--out-SM )
case "$2" in
"") shift 2 ;;
*) outSM=$2 ; shift 2 ;;
esac ;;
--out-script )
case "$2" in
"") shift 2 ;;
*) out_script_name=$2 ; shift 2 ;;
esac ;;
--standalone )
standalone=1 ; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
logdir=${outdir}/logs
mkdir -p ${logdir}
if [[ ${out_script_name} ]]
then
out_script="${out_script_name}"
else
out_script="${logdir}/reheader.${timestamp}.cmd"
fi
if [[ $standalone ]]
then
echo "#!/bin/bash" > $out_script
echo "" >> $out_script
echo "#$ -o ${logdir}" >> $out_script
echo "#$ -e ${logdir}" >> $out_script
echo "#$ -S /bin/bash" >> $out_script
echo '#$ -l h_vmem=8G' >> $out_script
echo 'set -e' >> $out_script
fi
echo "" >> $out_script
# Uniform sample and read group names in the merged file
echo "docker run -v /:/mnt -u $UID --rm lethalfang/bamsurgeon:1.1-3 \\" >> $out_script
echo "java -Xmx6g -jar /usr/local/bin/picard.jar AddOrReplaceReadGroups \\" >> $out_script
echo "I=/mnt/${outdir}/${inbam} \\" >> $out_script
echo "RGID=BAMSurgeon \\" >> $out_script
echo "RGLB=TNMerged \\" >> $out_script
echo "RGPL=illumina \\" >> $out_script
echo "RGPU=BAMSurgeon \\" >> $out_script
echo "RGSM=${outSM} \\" >> $out_script
echo "CREATE_INDEX=true \\" >> $out_script
echo "O=/mnt/${outdir}/${outbam}" >> $out_script
echo "" >> $out_script
# Remove temp files
echo "mv ${outdir}/${outbam%.bam}.bai ${outdir}/${outbam}.bai" >> $out_script
echo "" >> $out_script
|
bioinform/somaticseq
|
somaticseq/utilities/dockered_pipelines/bamSimulator/bamSurgeon/Reheader_SM.sh
|
Shell
|
bsd-2-clause
| 2,526 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.