code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
$WebServer[0] = "Web Server"
$WebServer[1] = "Configure User Web Server? This will set up a web server
in a sandboxed environment underneath your username, as well as
scripts to automatically configure it. Your personal web site is
located at /home/USER/public_html/ and will be available instantly.
(container lighttpd, php, and SQLite)
"
$WebServer[2] = "lighttpd php5-cgi sqlite php5-sqlite"
INSTALL_WEB_SERVER(){
$temp = $(uname -m)
echo "[*] Installing web server."
echo "[*] Generating sandbox environment..."
mkdir .WebServ/
cp ~/postinstall-scripts/scripts/servers/* .WebServ/*
cd .WebServ/
if($temp=="x86_64")
./config64.sh
else
./config86.sh
fi
cd ~
echo "[*] Generating user web page folder..."
mkdir ~/public_html/
if($temp=="x86_64"
sudo mount –bind ~/public_html/ ~/.WebServ/x86_64/var/www
sudo ln -s ~/public_html/ ~/.WebServ/x86_64/var/www/
else
sudo mount –bind ~/public_html/ ~/.WebServ/i386/var/www
sudo ln -s ~/public_html/ ~/.WebServ/i386/var/www/
fi
echo "[*] Re-Generating user crontab file"
echo '#! #! /bin/sh
cd ~/.WebServ
echo $(gpg --decrypt ~/.ps.gpg) | sudo -S chroot x86_64/ mount.sh
echo $(gpg --decrypt ~/.ps.gpg) | sudo -S mount --bind ~/public_html/ x86_64/var/www
echo $(gpg --decrypt ~/.ps.gpg) | sudo -S chroot x86_64/ service lighttpd restart ""
' >> ~/.WebServ/startpws.sh
chmod +x ~/.WebServ/startpws.sh
crontab -l > ~/.usercron
echo "
@reboot ~/.WebServ/startpws.sh
" >> ~/.usercron
crontab -u $(whoami) ~/.usercron
cd ~
if($temp=="x86_64")
sudo cp ~/postinstall-scripts/scripts/servers/config/lighttpd.conf ~/.WebServ/x86_64/lighttpd/etc/lighttpd.conf
else
sudo cp ~/postinstall-scripts/scripts/servers/config/lighttpd.conf ~/.WebServ/i386/lighttpd/etc/lighttpd.conf
fi
cd ~/public_html/
git clone git://gitorious.org/+socialites/statusnet/gnu-social.git
chmod a+w gnu-social
chmod a+w gnu-social/avatar
chmod a+w gnu-social/background
chmod a+w gnu-social/file
echo "[*] Web server installed."
unset $temp
}
|
dkoksal/postinstall-scripts
|
scripts/servers/web-serv.sh
|
Shell
|
gpl-3.0
| 2,034 |
#/bin/bash
echo ----------------------------------------------------------------
echo sIBL_GUI For Maya - Files Gathering
echo ----------------------------------------------------------------
export PROJECT_DIRECTORY=$(cd $( dirname "${BASH_SOURCE[0]}" )/..; pwd)
export SOURCE_DIRECTORY=$PROJECT_DIRECTORY/src/
export RELEASES_DIRECTORY=$PROJECT_DIRECTORY/releases/
export BUILD_DIRECTORY=$RELEASES_DIRECTORY/build
export REPOSITORY_DIRECTORY=$RELEASES_DIRECTORY/repository
export UTILITIES_DIRECTORY=$PROJECT_DIRECTORY/utilities
#! Gathering folder cleanup.
echo ----------------------------------------------------------------
echo Gathering Folder Cleanup - Begin
echo ----------------------------------------------------------------
rm -rf $BUILD_DIRECTORY
rm -rf $REPOSITORY_DIRECTORY/*
echo ----------------------------------------------------------------
echo Gathering Folder Cleanup - End
echo ----------------------------------------------------------------
#! Extra files cleanup.
echo ----------------------------------------------------------------
echo Extra Files Cleanup - Begin
echo ----------------------------------------------------------------
python $UTILITIES_DIRECTORY/recursiveRemove.py ./ .DS_Store
echo ----------------------------------------------------------------
echo Extra Files Cleanup - End
echo ----------------------------------------------------------------
#! Change log gathering.
echo ----------------------------------------------------------------
echo Change Log Gathering - Begin
echo ----------------------------------------------------------------
cp $RELEASES_DIRECTORY/Change_Log.html $REPOSITORY_DIRECTORY/
echo ----------------------------------------------------------------
echo Change Log Gathering - End
echo ----------------------------------------------------------------
echo ----------------------------------------------------------------
echo Files Gathering - Begin
echo ----------------------------------------------------------------
mkdir -p $BUILD_DIRECTORY/prefs/icons
mkdir -p $BUILD_DIRECTORY/prefs/shelves
mkdir $BUILD_DIRECTORY/scripts
cp $PROJECT_DIRECTORY/README $BUILD_DIRECTORY/
cp $SOURCE_DIRECTORY/prefs/icons/*.png $BUILD_DIRECTORY/prefs/icons/
cp $SOURCE_DIRECTORY/prefs/shelves/shelf_sIBL_GUI.mel $BUILD_DIRECTORY/prefs/shelves/shelf_sIBL_GUI.mel
cp $SOURCE_DIRECTORY/scripts/sIBL_GUI_For_Maya.py $BUILD_DIRECTORY/scripts/sIBL_GUI_For_Maya.py
cd $BUILD_DIRECTORY/
zip -r $REPOSITORY_DIRECTORY/sIBL_GUI_For_Maya.zip .
echo ----------------------------------------------------------------
echo Files Gathering - End
echo ----------------------------------------------------------------
|
KelSolaar/sIBL_GUI_For_Maya
|
utilities/gatherFiles.sh
|
Shell
|
gpl-3.0
| 2,670 |
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters"
exit 1
fi
CONFIG=$1
echo "Modifying $CONFIG for best behaviour in this container"
# Every config modification have its own environemnt variable that can configure the behaviour.
# Different users, providers or host systems might have specific preferences.
# But we should try to add sensible defaults, a way to disable it, and alternative implementations as needed.
CONFIG_MOD_USERPASS=${CONFIG_MOD_USERPASS:-"1"}
CONFIG_MOD_CA_CERTS=${CONFIG_MOD_CA_CERTS:-"1"}
CONFIG_MOD_PING=${CONFIG_MOD_PING:-"1"}
CONFIG_MOD_RESOLV_RETRY=${CONFIG_MOD_RESOLV_RETRY:-"1"}
CONFIG_MOD_TLS_CERTS=${CONFIG_MOD_TLS_CERTS:-"1"}
CONFIG_MOD_VERBOSITY=${CONFIG_MOD_VERBOSITY:-"1"}
CONFIG_MOD_REMAP_USR1=${CONFIG_MOD_REMAP_USR1:-"1"}
## Option 1 - Change the auth-user-pass line to point to credentials file
if [[ $CONFIG_MOD_USERPASS == "1" ]]; then
echo "Modification: Point auth-user-pass option to the username/password file"
sed -i "s#auth-user-pass.*#auth-user-pass /config/openvpn-credentials.txt#g" "$CONFIG"
fi
## Option 2 - Change the ca certificate path to point relative to the provider home
if [[ $CONFIG_MOD_CA_CERTS == "1" ]]; then
echo "Modification: Change ca certificate path"
config_directory=$(dirname "$CONFIG")
# Some configs are already adjusted, need to handle both relative and absolute paths, like:
# ca /etc/openvpn/mullvad/ca.crt
# ca ca.ipvanish.com.crt
sed -i -E "s#ca\s+(.*/)*#ca $config_directory/#g" "$CONFIG"
fi
## Option 3 - Update ping options to exit the container, so Docker will restart it
if [[ $CONFIG_MOD_PING == "1" ]]; then
echo "Modification: Change ping options"
# Remove any old options
sed -i "/^inactive.*$/d" "$CONFIG"
sed -i "/^ping.*$/d" "$CONFIG"
# Remove keep-alive option - it doesn't work in conjunction with ping option(s) which we're adding later
sed -i '/^keepalive.*$/d' "$CONFIG"
# Add new ones
sed -i "\$q" "$CONFIG" # Ensure config ends with a line feed
echo "inactive 3600" >> "$CONFIG"
echo "ping 10" >> "$CONFIG"
echo "ping-exit 60" >> "$CONFIG"
fi
## Option 4 - Set a sensible default for resolv-retry. The OpenVPN default value
## is "infinite" and that will cause things to hang on DNS errors
if [[ $CONFIG_MOD_RESOLV_RETRY == "1" ]]; then
echo "Modification: Update/set resolv-retry to 15 seconds"
# Remove old setting
sed -i "/^resolv-retry.*$/d" "$CONFIG"
# Add new ones
sed -i "\$q" "$CONFIG" # Ensure config ends with a line feed
echo "resolv-retry 15" >> "$CONFIG"
fi
## Option 5 - Change the tls-crypt path to point relative to the provider home
if [[ $CONFIG_MOD_TLS_CERTS == "1" ]]; then
echo "Modification: Change tls-crypt keyfile path"
config_directory=$(dirname "$CONFIG")
# Some configs are already adjusted, need to handle both relative and absolute paths, like:
# tls-crypt /etc/openvpn/celo/uk1-TCP-443-tls.key
# tls-crypt uk1-TCP-443-tls.key
sed -i -E "s#tls-crypt\s+(.*/)*#tls-crypt $config_directory/#g" "$CONFIG"
fi
## Option 6 - Update or set verbosity of openvpn logging
if [[ $CONFIG_MOD_VERBOSITY == "1" ]]; then
echo "Modification: Set output verbosity to 3"
# Remove any old options
sed -i "/^verb.*$/d" "$CONFIG"
# Add new ones
sed -i "\$q" "$CONFIG" # Ensure config ends with a line feed
echo "verb 3" >> "$CONFIG"
fi
## Option 7 - Remap the SIGUSR1 signal to SIGTERM
## We don't want OpenVPN to restart within the container
if [[ $CONFIG_MOD_REMAP_USR1 == "1" ]]; then
echo "Modification: Remap SIGUSR1 signal to SIGTERM, avoid OpenVPN restart loop"
# Remove any old options
sed -i "/^remap-usr1.*$/d" "$CONFIG"
# Add new ones
sed -i "\$q" "$CONFIG" # Ensure config ends with a line feed
echo "remap-usr1 SIGTERM" >> "$CONFIG"
fi
|
haugene/docker-transmission-openvpn
|
openvpn/modify-openvpn-config.sh
|
Shell
|
gpl-3.0
| 3,885 |
mkdir .rebuildtrackdesc
mkdir .rebuildtrackuser
mkdir .rebuildstartpoint
mkdir .rebuilddate
|
fparrel/regepe
|
wamp-src/cgi-bin/genrebuildall.sh
|
Shell
|
gpl-3.0
| 92 |
brew cask install aerial
/usr/bin/defaults -currentHost write com.apple.screensaver 'CleanExit' -string "YES"
/usr/bin/defaults -currentHost write com.apple.screensaver 'PrefsVersion' -int "100"
/usr/bin/defaults -currentHost write com.apple.screensaver 'idleTime' -int "300"
/usr/bin/defaults -currentHost write com.apple.screensaver "moduleDict" -dict-add "path" -string "/Users/nwagensonner/Library/Screen Savers/Aerial.saver"
/usr/bin/defaults -currentHost write com.apple.screensaver "moduleDict" -dict-add "type" -int "0"
/usr/bin/defaults -currentHost write com.apple.screensaver 'ShowClock' -bool "false"
/usr/bin/defaults -currentHost write com.apple.screensaver 'tokenRemovalAction' -int "0"
killall cfprefsd
|
sharpner/dotfiles
|
saver.sh
|
Shell
|
gpl-3.0
| 722 |
#!/bin/bash
#######################################################
# URAVPS - Script install Lemp in Centos 7
# Author: thelawbreaker - URAVPS
# To install type: chmod +x uravps.sh && ./uravps.sh
#######################################################
uravps_vers="1.0.2"
phpmyadmin_version="5.2.0" # Released 2020-03-21.
script_resource="https://resource.uravps.com"
low_ram='262144' # 256MB
yum -y install gawk bc wget lsof ntpdate
clear
printf "=========================================================================\n"
printf " URA VPS \n"
printf " Check parameter of server \n"
printf "=========================================================================\n"
cpu_name=$( awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo )
cpu_cores=$( awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo )
cpu_freq=$( awk -F: ' /cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo )
server_ram_total=$(awk '/MemTotal/ {print $2}' /proc/meminfo)
server_ram_mb=`echo "scale=0;$server_ram_total/1024" | bc`
server_hdd=$( df -h | awk 'NR==2 {print $2}' )
server_swap_total=$(awk '/SwapTotal/ {print $2}' /proc/meminfo)
server_swap_mb=`echo "scale=0;$server_swap_total/1024" | bc`
server_ip=$(curl ipinfo.io/ip)
printf "=========================================================================\n"
printf "Result \n"
printf "=========================================================================\n"
echo "CPU : $cpu_name"
echo "CPU core : $cpu_cores"
echo "Core speed : $cpu_freq MHz"
echo "Total RAM : $server_ram_mb MB"
echo "Swap : $server_swap_mb MB"
echo "Disk : $server_hdd GB"
echo "IP : $server_ip"
printf "=========================================================================\n"
printf "=========================================================================\n"
if [ $server_ram_total -lt $low_ram ]; then
echo -e "Warning: Ram is slow \n (it nhat 256MB) \n"
echo "Exit..."
exit
fi
sleep 3
clear
printf "=========================================================================\n"
printf " URA VPS \n"
printf "Preparing to install... \n"
printf "=========================================================================\n"
printf "You want using PHP version??\n"
prompt="Enter selection [1-3]: "
php_version="7.4"; # Default PHP 7.1
options=("PHP 7.4" "PHP 7.2" "PHP 7.1")
PS3="$prompt"
select opt in "${options[@]}"; do
case "$REPLY" in
1) php_version="7.4"; break;;
2) php_version="7.2"; break;;
3) php_version="7.1"; break;;
$(( ${#options[@]}+1 )) ) printf "\nSetup PHP 7.4\n"; break;;
*) printf "Invalid, the system will install by default PHP 7.4\n"; break;;
esac
done
printf "\Enter the default domain (www/non-www) [ENTER]: "
read server_name
if [ "$server_name" = "" ]; then
server_name="uravps.com"
echo "Invalid, domain default = uravps.com"
fi
printf "\nEnter the port manager [ENTER]: "
read admin_port
if [ "$admin_port" == "" ] || [ "$admin_port" == "2411" ] || [ $admin_port == "7777" ] || [ $admin_port -lt 2000 ] || [ $admin_port -gt 9999 ] || [ $(lsof -i -P | grep ":$admin_port " | wc -l) != "0" ]; then
admin_port="1408"
echo "Port invalid, port default: 1408"
echo
fi
printf "=========================================================================\n"
printf " URA VPS \n"
printf "Preparation is complete \n"
printf "=========================================================================\n"
timedatectl set-timezone Asia/Ho_Chi_Minh
ntpdate time.apple.com
if [ -s /etc/selinux/config ]; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sed -i 's/SELINUX=permissive/SELINUX=disabled/g' /etc/selinux/config
fi
setenforce 0
# Install EPEL + Remi Repo
yum -y install epel-release yum-utils
rpm -Uvh https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/e/epel-release-7-12.noarch.rpm
rpm -Uvh http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
# Install Nginx Repo
rpm -Uvh $script_resource/nginx-1.18.0-1.el7.ngx.x86_64.rpm
# Install MariaDB Repo
curl -sS $script_resource/mariadb_repo_setup | sudo bash
systemctl stop saslauthd.service
systemctl disable saslauthd.service
yum -y remove mysql* php* httpd* sendmail* postfix* rsyslog*
yum clean all
yum -y update
clear
printf "=========================================================================\n"
printf " URAVPS \n"
printf "Start the installation... \n"
printf "=========================================================================\n"
sleep 3
# Install Nginx, PHP-FPM and modules
# Enable Remi Repo
yum-config-manager --enable remi
if [ "$php_version" = "7.4" ]; then
yum-config-manager --enable remi-php74
yum -y install nginx php-fpm php-common php-gd php-mysqlnd php-pdo php-xml php-mbstring php-mcrypt php-curl php-opcache php-cli php-pecl-zip
elif [ "$php_version" = "7.2" ]; then
yum-config-manager --enable remi-php72
yum -y install nginx php-fpm php-common php-gd php-mysqlnd php-pdo php-xml php-mbstring php-mcrypt php-curl php-opcache php-cli php-pecl-zip
elif [ "$php_version" = "7.1" ]; then
yum-config-manager --enable remi-php71
yum -y install nginx php-fpm php-common php-gd php-mysqlnd php-pdo php-xml php-mbstring php-mcrypt php-curl php-opcache php-cli
else
yum -y install nginx php-fpm php-common php-gd php-mysqlnd php-pdo php-xml php-mbstring php-mcrypt php-curl php-devel php-cli gcc
fi
# Install MariaDB
yum -y install MariaDB-server MariaDB-client
# Install Others
yum -y install exim syslog-ng syslog-ng-libdbi cronie iptables-services fail2ban unzip zip nano openssl ntpdate
clear
printf "=========================================================================\n"
printf " URAPVS \n"
printf "Start configuration... \n"
printf "=========================================================================\n"
sleep 3
# Autostart
systemctl enable nginx.service
systemctl enable php-fpm.service
systemctl enable mysql.service # Failed to execute operation: No such file or directory
systemctl enable fail2ban.service
# Disable firewalld and install iptables
systemctl mask firewalld
systemctl enable iptables
systemctl enable ip6tables
systemctl stop firewalld
systemctl start iptables
systemctl start ip6tables
#systemctl start exim.service
#systemctl start syslog-ng.service
mkdir -p /home/$server_name/public_html
mkdir /home/$server_name/private_html
mkdir /home/$server_name/logs
chmod 777 /home/$server_name/logs
mkdir -p /var/log/nginx
chown -R nginx:nginx /var/log/nginx
chown -R nginx:nginx /var/lib/php/session
wget -q $script_resource/index.html -O /home/$server_name/public_html/index.html
systemctl start nginx.service
systemctl start php-fpm.service
systemctl start mysql.service
# PHP #
phplowmem='2097152'
check_phplowmem=$(expr $server_ram_total \< $phplowmem)
max_children=`echo "scale=0;$server_ram_mb*0.4/30" | bc`
if [ "$check_phplowmem" == "1" ]; then
lessphpmem=y
fi
if [[ "$lessphpmem" = [yY] ]]; then
# echo -e "\nCopying php-fpm-min.conf /etc/php-fpm.d/www.conf\n"
wget -q $script_resource/php-fpm-min.conf -O /etc/php-fpm.conf
wget -q $script_resource/www-min.conf -O /etc/php-fpm.d/www.conf
else
# echo -e "\nCopying php-fpm.conf /etc/php-fpm.d/www.conf\n"
wget -q $script_resource/php-fpm.conf -O /etc/php-fpm.conf
wget -q $script_resource/www.conf -O /etc/php-fpm.d/www.conf
fi # lessphpmem
sed -i "s/server_name_here/$server_name/g" /etc/php-fpm.conf
sed -i "s/server_name_here/$server_name/g" /etc/php-fpm.d/www.conf
sed -i "s/max_children_here/$max_children/g" /etc/php-fpm.d/www.conf
# dynamic PHP memory_limit calculation
if [[ "$server_ram_total" -le '262144' ]]; then
php_memorylimit='48M'
php_uploadlimit='48M'
php_realpathlimit='256k'
php_realpathttl='14400'
elif [[ "$server_ram_total" -gt '262144' && "$server_ram_total" -le '393216' ]]; then
php_memorylimit='96M'
php_uploadlimit='96M'
php_realpathlimit='320k'
php_realpathttl='21600'
elif [[ "$server_ram_total" -gt '393216' && "$server_ram_total" -le '524288' ]]; then
php_memorylimit='128M'
php_uploadlimit='128M'
php_realpathlimit='384k'
php_realpathttl='28800'
elif [[ "$server_ram_total" -gt '524288' && "$server_ram_total" -le '1049576' ]]; then
php_memorylimit='160M'
php_uploadlimit='160M'
php_realpathlimit='384k'
php_realpathttl='28800'
elif [[ "$server_ram_total" -gt '1049576' && "$server_ram_total" -le '2097152' ]]; then
php_memorylimit='256M'
php_uploadlimit='256M'
php_realpathlimit='384k'
php_realpathttl='28800'
elif [[ "$server_ram_total" -gt '2097152' && "$server_ram_total" -le '3145728' ]]; then
php_memorylimit='320M'
php_uploadlimit='320M'
php_realpathlimit='512k'
php_realpathttl='43200'
elif [[ "$server_ram_total" -gt '3145728' && "$server_ram_total" -le '4194304' ]]; then
php_memorylimit='512M'
php_uploadlimit='512M'
php_realpathlimit='512k'
php_realpathttl='43200'
elif [[ "$server_ram_total" -gt '4194304' ]]; then
php_memorylimit='800M'
php_uploadlimit='800M'
php_realpathlimit='640k'
php_realpathttl='86400'
fi
cat > "/etc/php.d/00-uravps-custom.ini" <<END
date.timezone = Asia/Ho_Chi_Minh
max_execution_time = 180
short_open_tag = On
realpath_cache_size = $php_realpathlimit
realpath_cache_ttl = $php_realpathttl
memory_limit = $php_memorylimit
upload_max_filesize = $php_uploadlimit
post_max_size = $php_uploadlimit
expose_php = Off
mail.add_x_header = Off
max_input_nesting_level = 128
max_input_vars = 2000
mysqlnd.net_cmd_buffer_size = 16384
always_populate_raw_post_data=-1
disable_functions=shell_exec
END
# Zend Opcache
opcache_path='opcache.so' #Default for PHP 5.5 and newer
if [ "$php_version" = "5.4" ]; then
cd /usr/local/src
wget http://pecl.php.net/get/ZendOpcache
tar xvfz ZendOpcache
cd zendopcache-7.*
phpize
php_config_path=`which php-config`
./configure --with-php-config=$php_config_path
make
make install
rm -rf /usr/local/src/zendopcache*
rm -f ZendOpcache
opcache_path=`find / -name 'opcache.so'`
fi
wget -q https://raw.github.com/amnuts/opcache-gui/master/index.php -O /home/$server_name/private_html/op.php
cat > /etc/php.d/*opcache*.ini <<END
zend_extension=$opcache_path
opcache.enable=1
opcache.enable_cli=1
opcache.memory_consumption=128
opcache.interned_strings_buffer=16
opcache.max_accelerated_files=4000
opcache.max_wasted_percentage=5
opcache.use_cwd=1
opcache.validate_timestamps=1
opcache.revalidate_freq=60
opcache.fast_shutdown=1
opcache.blacklist_filename=/etc/php.d/opcache-default.blacklist
END
cat > /etc/php.d/opcache-default.blacklist <<END
/home/*/public_html/wp-content/plugins/backwpup/*
/home/*/public_html/wp-content/plugins/duplicator/*
/home/*/public_html/wp-content/plugins/updraftplus/*
/home/$server_name/private_html/
END
systemctl restart php-fpm.service
# Nginx #
cat > "/etc/nginx/nginx.conf" <<END
user nginx;
worker_processes $cpu_cores;
worker_rlimit_nofile 260000;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 2048;
accept_mutex off;
accept_mutex_delay 200ms;
use epoll;
#multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '\$remote_addr - \$remote_user [\$time_local] "\$request" '
'\$status \$body_bytes_sent "\$http_referer" '
'"\$http_user_agent" "\$http_x_forwarded_for"';
#Disable IFRAME
add_header X-Frame-Options SAMEORIGIN;
#Prevent Cross-site scripting (XSS) attacks
add_header X-XSS-Protection "1; mode=block";
#Prevent MIME-sniffing
add_header X-Content-Type-Options nosniff;
access_log off;
sendfile on;
tcp_nopush on;
tcp_nodelay off;
types_hash_max_size 2048;
server_tokens off;
server_names_hash_bucket_size 128;
client_max_body_size 0;
client_body_buffer_size 256k;
client_body_in_file_only off;
client_body_timeout 60s;
client_header_buffer_size 256k;
client_header_timeout 20s;
large_client_header_buffers 8 256k;
keepalive_timeout 10;
keepalive_disable msie6;
reset_timedout_connection on;
send_timeout 60s;
gzip on;
gzip_static on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json text/javascript application/javascript text/xml application/xml application/xml+rss;
include /etc/nginx/conf.d/*.conf;
}
END
cat > "/usr/share/nginx/html/403.html" <<END
<html>
<head><title>403 Forbidden</title></head>
<body bgcolor="white">
<center><h1>403 Forbidden</h1></center>
<hr><center>URAPVS</center>
</body>
</html>
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
END
cat > "/usr/share/nginx/html/404.html" <<END
<html>
<head><title>404 Not Found</title></head>
<body bgcolor="white">
<center><h1>404 Not Found</h1></center>
<hr><center>URAPVS</center>
</body>
</html>
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
<!-- a padding to disable MSIE and Chrome friendly error page -->
END
rm -rf /etc/nginx/conf.d/*
> /etc/nginx/conf.d/default.conf
server_name_alias="www.$server_name"
if [[ $server_name == *www* ]]; then
server_name_alias=${server_name/www./''}
fi
cat > "/etc/nginx/conf.d/$server_name.conf" <<END
server {
listen 80;
server_name $server_name_alias;
rewrite ^(.*) http://$server_name\$1 permanent;
}
server {
listen 80 default_server;
# access_log off;
access_log /home/$server_name/logs/access.log;
# error_log off;
error_log /home/$server_name/logs/error.log;
root /home/$server_name/public_html;
index index.php index.html index.htm;
server_name $server_name;
location / {
try_files \$uri \$uri/ /index.php?\$args;
}
# Custom configuration
include /home/$server_name/public_html/*.conf;
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include /etc/nginx/fastcgi_params;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_connect_timeout 1000;
fastcgi_send_timeout 1000;
fastcgi_read_timeout 1000;
fastcgi_buffer_size 256k;
fastcgi_buffers 4 256k;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
fastcgi_intercept_errors on;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
}
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
allow $server_ip;
deny all;
}
location /php_status {
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include /etc/nginx/fastcgi_params;
allow 127.0.0.1;
allow $server_ip;
deny all;
}
location ~ /\. {
deny all;
}
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ~* \.(3gp|gif|jpg|jpeg|png|ico|wmv|avi|asf|asx|mpg|mpeg|mp4|pls|mp3|mid|wav|swf|flv|exe|zip|tar|rar|gz|tgz|bz2|uha|7z|doc|docx|xls|xlsx|pdf|iso|eot|svg|ttf|woff)$ {
gzip_static off;
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
access_log off;
expires 30d;
break;
}
location ~* \.(txt|js|css)$ {
add_header Pragma public;
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
access_log off;
expires 30d;
break;
}
}
server {
listen $admin_port;
access_log off;
log_not_found off;
error_log /home/$server_name/logs/nginx_error.log;
root /home/$server_name/private_html;
index index.php index.html index.htm;
server_name $server_name;
auth_basic "Restricted";
auth_basic_user_file /home/$server_name/private_html/uravps/.htpasswd;
location / {
autoindex on;
try_files \$uri \$uri/ /index.php;
}
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include /etc/nginx/fastcgi_params;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_connect_timeout 1000;
fastcgi_send_timeout 1000;
fastcgi_read_timeout 1000;
fastcgi_buffer_size 256k;
fastcgi_buffers 4 256k;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
fastcgi_intercept_errors on;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
}
location ~ /\. {
deny all;
}
}
END
cat >> "/etc/security/limits.conf" <<END
* soft nofile 262144
* hard nofile 262144
nginx soft nofile 262144
nginx hard nofile 262144
nobody soft nofile 262144
nobody hard nofile 262144
root soft nofile 262144
root hard nofile 262144
END
ulimit -n 262144
systemctl restart nginx.service
# MariaDB #
# set /etc/my.cnf templates from Centmin Mod
cp /etc/my.cnf /etc/my.cnf-original
if [[ "$(expr $server_ram_total \<= 2099000)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10-min.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10-min.cnf -O /etc/my.cnf
fi
if [[ "$(expr $server_ram_total \> 2100001)" = "1" && "$(expr $server_ram_total \<= 4190000)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10.cnf -O /etc/my.cnf
fi
if [[ "$(expr $server_ram_total \>= 4190001)" = "1" && "$(expr $server_ram_total \<= 8199999)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10-4gb.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10-4gb.cnf -O /etc/my.cnf
fi
if [[ "$(expr $server_ram_total \>= 8200000)" = "1" && "$(expr $server_ram_total \<= 15999999)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10-8gb.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10-8gb.cnf -O /etc/my.cnf
fi
if [[ "$(expr $server_ram_total \>= 16000000)" = "1" && "$(expr $server_ram_total \<= 31999999)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10-16gb.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10-16gb.cnf -O /etc/my.cnf
fi
if [[ "$(expr $server_ram_total \>= 32000000)" = "1" ]]; then
# echo -e "\nCopying MariaDB my-mdb10-32gb.cnf file to /etc/my.cnf\n"
wget -q $script_resource/my-mdb10-32gb.cnf -O /etc/my.cnf
fi
sed -i "s/server_name_here/$server_name/g" /etc/my.cnf
rm -f /var/lib/mysql/ib_logfile0
rm -f /var/lib/mysql/ib_logfile1
rm -f /var/lib/mysql/ibdata1
clear
printf "=========================================================================\n"
printf " URAVPS \n"
printf "MariaDB configuration... \n"
printf "=========================================================================\n"
# Random password for MySQL root account
root_password=`date |md5sum |cut -c '14-30'`
sleep 1
# Random password for MySQL admin account
admin_password=`date |md5sum |cut -c '14-30'`
'/usr/bin/mysqladmin' -u root password "$root_password"
mysql -u root -p"$root_password" -e "GRANT ALL PRIVILEGES ON *.* TO 'admin'@'localhost' IDENTIFIED BY '$admin_password' WITH GRANT OPTION;"
mysql -u root -p"$root_password" -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost')"
mysql -u root -p"$root_password" -e "DELETE FROM mysql.user WHERE User=''"
mysql -u root -p"$root_password" -e "DROP User '';"
mysql -u root -p"$root_password" -e "DROP DATABASE test"
mysql -u root -p"$root_password" -e "FLUSH PRIVILEGES"
cat > "/root/.my.cnf" <<END
[client]
user=root
password=$root_password
END
chmod 600 /root/.my.cnf
# Fix MariaDB 10
systemctl stop mysql.service
wget -q $script_resource/mariadb10_3tables.sql1
mv mariadb10_3tables.sql1 mariadb10_3tables.sql
rm -rf /var/lib/mysql/mysql/gtid_slave_pos.ibd
rm -rf /var/lib/mysql/mysql/innodb_table_stats.ibd
rm -rf /var/lib/mysql/mysql/innodb_index_stats.ibd
systemctl start mysql.service
mysql -e "ALTER TABLE mysql.gtid_slave_pos DISCARD TABLESPACE;" 2> /dev/null
mysql -e "ALTER TABLE mysql.innodb_table_stats DISCARD TABLESPACE;" 2> /dev/null
mysql -e "ALTER TABLE mysql.innodb_index_stats DISCARD TABLESPACE;" 2> /dev/null
mysql mysql < mariadb10_3tables.sql
systemctl restart mysql.service
mysql_upgrade --force mysql
rm -f mariadb10_3tables.sql
if [ "$1" = "wordpress" ]; then
clear
printf "=========================================================================\n"
printf " URAVPS \n"
printf "Setup wordpress... \n"
printf "=========================================================================\n"
cd /home/$server_name/public_html/
rm -f index.html
# Generate wordpress database
wordpress_password=`date |md5sum |cut -c '1-15'`
mysql -u root -p"$root_password" -e "CREATE DATABASE wordpress;GRANT ALL PRIVILEGES ON wordpress.* TO wordpress@localhost IDENTIFIED BY '$wordpress_password';FLUSH PRIVILEGES;"
# Download latest WordPress and uncompress
wget https://wordpress.org/latest.tar.gz
tar zxf latest.tar.gz
mv wordpress/* ./
# Grab Salt Keys
wget -O /tmp/wp.keys https://api.wordpress.org/secret-key/1.1/salt/
# Butcher our wp-config.php file
sed -e "s/database_name_here/wordpress/" -e "s/username_here/wordpress/" -e "s/password_here/"$wordpress_password"/" wp-config-sample.php > wp-config.php
sed -i '/#@-/r /tmp/wp.keys' wp-config.php
sed -i "/#@+/,/#@-/d" wp-config.php
# Tidy up
rm -rf wordpress latest.tar.gz /tmp/wp.keys wp wp-config-sample.php
fi
clear
printf "=========================================================================\n"
printf " URAVPS \n"
printf "Configuration successful... \n"
printf "=========================================================================\n"
# URAVPS Script Admin
cd /home/$server_name/private_html/
wget -q $script_resource/administrator.zip
unzip -q administrator.zip && rm -f administrator.zip
mv -f administrator/* .
rm -rf administrator
printf "admin:$(openssl passwd -apr1 $admin_password)\n" > /home/$server_name/private_html/uravps/.htpasswd
sed -i "s/rootpassword/$root_password/g" /home/$server_name/private_html/uravps/SQLManager.php
# Server Info
mkdir /home/$server_name/private_html/serverinfo/
cd /home/$server_name/private_html/serverinfo/
wget -q $script_resource/serverinfo.zip
unzip -q serverinfo.zip && rm -f serverinfo.zip
# phpMyAdmin
mkdir -p /home/$server_name/private_html/phpmyadmin/
cd /home/$server_name/private_html/phpmyadmin/
wget -q https://files.phpmyadmin.net/phpMyAdmin/$phpmyadmin_version/phpMyAdmin-$phpmyadmin_version-english.zip
unzip -q phpMyAdmin-$phpmyadmin_version-english.zip
mv -f phpMyAdmin-$phpmyadmin_version-english/* .
rm -rf phpMyAdmin-$phpmyadmin_version-english*
# eXtplorer File Manager
mkdir /home/$server_name/private_html/filemanager/
cd /home/$server_name/private_html/filemanager/
wget --no-check-certificate -q https://extplorer.net/attachments/download/74/eXtplorer_2.1.10.zip # Note ID 74
unzip -q eXtplorer_$extplorer_version.zip && rm -f eXtplorer_$extplorer_version.zip
cat > "/home/$server_name/private_html/filemanager/config/.htusers.php" <<END
<?php
if( !defined( '_JEXEC' ) && !defined( '_VALID_MOS' ) ) die( 'Restricted access' );
\$GLOBALS["users"]=array(
array('admin','$(echo -n "$admin_password" | md5sum | awk '{print $1}')','/home','http://localhost','1','','7',1),
);
?>
# Log Rotation
cat > "/etc/logrotate.d/nginx" <<END
/home/*/logs/access.log /home/*/logs/error.log /home/*/logs/nginx_error.log {
create 640 nginx nginx
daily
missingok
rotate 5
maxage 7
compress
delaycompress
notifempty
sharedscripts
postrotate
[ -f /var/run/nginx.pid ] && kill -USR1 \`cat /var/run/nginx.pid\`
endscript
su nginx nginx
}
END
cat > "/etc/logrotate.d/php-fpm" <<END
/home/*/logs/php-fpm*.log {
daily
compress
maxage 7
missingok
notifempty
sharedscripts
delaycompress
postrotate
/bin/kill -SIGUSR1 \`cat /var/run/php-fpm/php-fpm.pid 2>/dev/null\` 2>/dev/null || true
endscript
}
END
cat > "/etc/logrotate.d/mysql" <<END
/home/*/logs/mysql*.log {
create 640 mysql mysql
notifempty
daily
rotate 3
maxage 7
missingok
compress
postrotate
# just if mysqld is really running
if test -x /usr/bin/mysqladmin && \
/usr/bin/mysqladmin ping &>/dev/null
then
/usr/bin/mysqladmin flush-logs
fi
endscript
su mysql mysql
}
END
# Change port SSH
sed -i 's/#Port 22/Port 2411/g' /etc/ssh/sshd_config
cat > "/etc/fail2ban/jail.local" <<END
[ssh-iptables]
enabled = true
filter = sshd
action = iptables[name=SSH, port=2411, protocol=tcp]
logpath = /var/log/secure
maxretry = 3
bantime = 3600
[nginx-http-auth]
enabled = true
filter = nginx-http-auth
action = iptables[name=NoAuthFailures, port=$admin_port, protocol=tcp]
logpath = /home/$server_name/logs/nginx_error.log
maxretry = 3
bantime = 3600
END
systemctl start fail2ban.service
# Open port
if [ -f /etc/sysconfig/iptables ]; then
systemctl start iptables.service
iptables -I INPUT -p tcp --dport 80 -j ACCEPT
iptables -I INPUT -p tcp --dport 25 -j ACCEPT
iptables -I INPUT -p tcp --dport 443 -j ACCEPT
iptables -I INPUT -p tcp --dport 465 -j ACCEPT
iptables -I INPUT -p tcp --dport 587 -j ACCEPT
iptables -I INPUT -p tcp --dport $admin_port -j ACCEPT
iptables -I INPUT -p tcp --dport 2411 -j ACCEPT
service iptables save
fi
mkdir -p /var/lib/php/session
chown -R nginx:nginx /var/lib/php
chown nginx:nginx /home/$server_name
chown -R nginx:nginx /home/*/public_html
chown -R nginx:nginx /home/*/private_html
rm -f /root/install
echo -n "cd /home" >> /root/.bashrc
mkdir -p /etc/uravps/
cat > "/etc/uravps/scripts.conf" <<END
uravps_vers="$uravps_vers"
server_name="$server_name"
server_ip="$server_ip"
admin_port="$admin_port"
resource_url="$script_resource"
mariadb_root_password="$root_password"
END
chmod 600 /etc/uravps/scripts.conf
clear
printf "=========================================================================\n"
printf "Adding menu... \n"
printf "=========================================================================\n"
wget -q $script_resource/uravps -O /bin/uravps && chmod +x /bin/uravps
mkdir /etc/uravps/menu/
cd /etc/uravps/menu/
wget -q $script_resource/uravps_menu.zip
unzip -q uravps_menu.zip && rm -f uravps_menu.zip
mv -f /etc/uravps/menu/uravps_menu/* /etc/uravps/menu/
chmod +x /etc/uravps/menu/*
clear
cat > "/root/uravps.txt" <<END
=========================================================================
URAVPS
MANAGE VPS INFORMATION
=========================================================================
Command access URAVPS scripts: uravps
Domain default: http://$server_name/ or http://$server_ip/
URAVPS Script Admin: http://$server_name:$admin_port/ or http://$server_ip:$admin_port/
File Manager: http://$server_name:$admin_port/filemanager/ or http://$server_ip:$admin_port/filemanager/
phpMyAdmin: http://$server_name:$admin_port/phpmyadmin/ or http://$server_ip:$admin_port/phpmyadmin/
Server Info: http://$server_name:$admin_port/serverinfo/ or http://$server_ip:$admin_port/serverinfo/
PHP OPcache: http://$server_name:$admin_port/op.php or http://$server_ip:$admin_port/op.php
Username: admin
Password: $admin_password
Support team: https://uravps.com
END
chmod 600 /root/uravps.txt
if [ "$1" = "wordpress" ]; then
printf "=========================================================================\n"
printf " URAVPS \n"
printf "Install successful URAVPS Script + WordPress! \n"
printf "=========================================================================\n"
printf "Access http://$server_name \n or http://$server_ip to config Wordpress \n"
else
printf "=========================================================================\n"
printf "Scripts URAVPS install complete.. \n"
printf "=========================================================================\n"
printf "Infomation VPS \n"
printf "=========================================================================\n"
printf "Domain default: http://$server_name/ or http://$server_ip/\n"
fi
printf "=========================================================================\n"
printf "URAVPS Script Admin: http://$server_name:$admin_port/ \n or http://$server_ip:$admin_port/\n\n"
printf "File Manager: http://$server_name:$admin_port/filemanager/ \n or http://$server_ip:$admin_port/filemanager/\n\n"
printf "phpMyAdmin: http://$server_name:$admin_port/phpmyadmin/ \n or http://$server_ip:$admin_port/phpmyadmin/\n\n"
printf "Server Info: http://$server_name:$admin_port/serverinfo/ \n or http://$server_ip:$admin_port/serverinfo/\n\n"
printf "PHP OPcache: http://$server_name:$admin_port/op.php \n or http://$server_ip:$admin_port/op.php\n"
printf "=========================================================================\n"
printf " Info login: \n"
printf " Username: admin \n"
printf " Password: $admin_password \n"
printf "=========================================================================\n"
printf "/root/uravps.txt \n"
printf "=========================================================================\n"
printf "***Warning: SSH port change to 2411 \n"
printf "=========================================================================\n"
printf "Command manager server \"uravps\". \n"
printf "Support team: https://uravps.com \n"
printf "=========================================================================\n"
printf "Reboot server.... \n\n"
sleep 3
reboot
exit
|
BREAKTEAM/BreakBuild
|
uravps.sh
|
Shell
|
gpl-3.0
| 31,347 |
#!/bin/sh
# cp from 3.16 fails this test
# Copyright (C) 1997-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp
mkdir b
msg=bar
echo $msg > a
cd b
ln -s ../a .
cd ..
# It should fail with a message something like this:
# cp: 'a' and 'b/foo' are the same file
# Fail this test if the exit status is not 1
returns_ 1 cp -d a b 2>/dev/null || fail=1
test "$(cat a)" = $msg || fail=1
Exit $fail
|
gyl33333/coreutils
|
tests/cp/no-deref-link2.sh
|
Shell
|
gpl-3.0
| 1,084 |
#!/bin/sh
MY_DIR=`dirname $0`
START_NORM=/jci/scripts/start_normal_mode.sh
CONF_NAME=autorun
CONF_DIR=/mnt/data_persist/dev/bin
CONF_ADB=${CONF_DIR}/adb
CONF_FILE=${CONF_DIR}/${CONF_NAME}
MY_CONF=${MY_DIR}/${CONF_NAME}
MY_ADB=${MY_DIR}/adb
UDEV_HANDLR=${CONF_DIR}/02-run-tweaks-from-usb/install-udev-handler-if-not-installed
UDEV_AUTO=${CONF_DIR}/02-run-tweaks-from-usb/install-udev-handler-if-not-installed.autorun
REC_LOG=${MY_DIR}/recovery.log
sleep 5
echo "*** Begin Autorun Recovery ***" >> ${REC_LOG}
while true
do
if [ ! -e ${CONF_FILE} ]
then
echo "Recover Autorun & Files" >> ${REC_LOG}
cp -a ${MY_CONF} ${CONF_FILE}
cp -a ${MY_ADB} ${CONF_ADB}
cp -a ${MY_DIR}/02-* ${CONF_DIR}
mv ${UDEV_HANDLR} ${UDEV_AUTO}
fi
sleep 5
done
|
Trevelopment/MZD-AIO-TI-X
|
app/files/tweaks/cmu-autorun/sdcard/recovery/44-recovery-recovery/recover-autorun.sh
|
Shell
|
gpl-3.0
| 765 |
#! /usr/bin/env bash
# Search for a string in epub files listed in a file and prints the names
# of the epub files that contain the search string.
# primejyothi at gmail dot com 20140118
# License : GPLv3
if [[ "$#" -ne 2 ]]
then
echo "Usage `basename $0` epubListFile SearchString"
exit 2
fi
if [[ ! -r $1 ]]
then
echo "Unable to read input file $1"
exit 1
fi
while read i
do
unzip -c "$i" > j.txt
grep -iq "$2" j.txt
retVal=$?
if [[ "${retVal}" -eq 0 ]]
then
echo $i
fi
rm -f j.txt
done < $1
exit 0
|
primejyothi/ShellUtils
|
srchEpub.sh
|
Shell
|
gpl-3.0
| 520 |
#!/bin/bash
# This script builds all dependencies of sdsl
# and installs the library on a LINUX or Mac OS X system
CUR_DIR=`pwd`
SDSL_INSTALL_PREFIX=${HOME}
if [ $# -ge 1 ]; then
SDSL_INSTALL_PREFIX=${1}
fi
# Get absolute path name of install directory
mkdir -p "${SDSL_INSTALL_PREFIX}" 2> /dev/null
cd "${SDSL_INSTALL_PREFIX}" > /dev/null 2>&1
if [ $? != 0 ] ; then
echo "ERROR: directory '${SDSL_INSTALL_PREFIX}' does not exist nor could be created."
echo "Please choose another directory."
exit 1
else
SDSL_INSTALL_PREFIX=`pwd -P`
fi
echo "Library will be installed in '${SDSL_INSTALL_PREFIX}'"
cd "${CUR_DIR}"
OLD_DIR="$( cd "$( dirname "$0" )" && pwd )" # gets the directory where the script is located in
cd "${OLD_DIR}"
OLD_DIR=`pwd`
# (1) Copy pre-commit hook
if [ -d ".git/hooks" ]; then
echo "Copy pre-commit into .git/hooks"
cp extras/pre-commit .git/hooks/
if [ $? != 0 ]; then
echo "WARNING: could not copy pre-commit script into .git/hooks"
fi
chmod u+x .git/hooks/pre-commit
if [ $? != 0 ]; then
echo "WARNING: could not make pre-commit script executable"
fi
else
echo "WARNING: .git/hooks directory does not exists."
echo " The pre-commit hook is not installed."
fi
# (2) Install divsufsort, gtest, and sdsl
HEADER=test/CompileTest.hpp # Make a header-file that contains all other header-files
echo "#ifndef INCLUDED_SDSL_COMPILE_TEST" > ${HEADER}
echo "#define INCLUDED_SDSL_COMPILE_TEST" >> ${HEADER}
for HEADERFILE in include/sdsl/*.hpp
do
FILENAME=`basename ${HEADERFILE}`
echo "#include \"sdsl/${FILENAME}\"" >> ${HEADER}
done
echo "#endif" >> ${HEADER}
cd build # change into the build directory
if [ $? != 0 ]; then
exit 1
fi
./clean.sh # clean-up build directory
if [ $? != 0 ]; then
exit 1
fi
cmake -DCMAKE_INSTALL_PREFIX="${SDSL_INSTALL_PREFIX}" .. # run cmake
if [ $? != 0 ]; then
echo "ERROR: CMake build failed."
exit 1
fi
make # run make
if [ $? != 0 ]; then
echo "ERROR: Build failed."
exit 1
fi
echo "Removing old files"
echo "rm -rf '${SDSL_INSTALL_PREFIX}/include/sdsl/*'"
rm -rf "${SDSL_INSTALL_PREFIX}/include/sdsl/*"
if [ $? != 0 ]; then
echo "WARNING: Could not remove old header files."
fi
echo "rm -f '${SDSL_INSTALL_PREFIX}/lib/libsdsl*'"
rm -f "${SDSL_INSTALL_PREFIX}/lib/libsdsl*"
if [ $? != 0 ]; then
echo "WARNING: Could not remove old library file."
fi
make install # install library
if [ $? != 0 ]; then
echo "ERROR: Installation failed."
exit 1
fi
cd ..
if [ "`pwd`" != "${OLD_DIR}" ]; then
echo "ERROR: we are not in the original dir ${OLD_DIR} now."
exit 1
fi
echo "SUCCESS: sdsl was installed successfully!"
echo "The sdsl include files are located in '${SDSL_INSTALL_PREFIX}/include'."
echo "The library files are located in '${SDSL_INSTALL_PREFIX}/lib'."
echo " "
echo "Sample programs can be found in the examples-directory."
echo "A program 'example.cpp' can be compiled with the command: "
echo "g++ -std=c++11 -DNDEBUG -O3 [-msse4.2] \\"
echo " -I${SDSL_INSTALL_PREFIX}/include -L${SDSL_INSTALL_PREFIX}/lib \\"
echo " example.cpp -lsdsl -ldivsufsort -ldivsufsort64"
echo " "
echo "Tests in the test-directory"
echo "A cheat sheet in the extras/cheatsheet-directory."
echo "Have fun!"
|
andmaj/rrr-size
|
sdsl-lite/lib/src/sdsl-lite/install.sh
|
Shell
|
gpl-3.0
| 3,206 |
#!/usr/bin/env bash
set -e
set -u
# set -x
# ==============================================================================
# The following variables are required and need to be set to the environment by
# the user:
#
# NEWCOMEN_TARGET_REPO
# NEWCOMEN_SOURCE_REPOS
# GH_TOKEN
#
# The following variables are optional, default values have been provided:
#
# NEWCOMEN_AUTHOR_NAME
# NEWCOMEN_AUTHOR_EMAIL
#
# ==============================================================================
# ==============================================================================
# Values that need to be filled by the user
sGithubToken=''
sSourceRepo=''
sTargetRepo=''
# ------------------------------------------------------------------------------
# Default values that can be overwritten by the user
sGitUser='potherca-bot'
sGitMail='[email protected]'
# ------------------------------------------------------------------------------
# Global variables the script will fill in
sOriginalGitUser=''
sOriginalGitMail=''
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
declare -a aSourceRepos
# ------------------------------------------------------------------------------
# Inmutable values
readonly sApplicationName='Newcomen'
readonly sTmpDirectory=".${sApplicationName}SourceRepositoryContent"
readonly sMergeBranch='newcomen-merge-branche'
# ==============================================================================
# ------------------------------------------------------------------------------
function indent() {
# sed -l basically makes sed replace and buffer through stdin to stdout
# so you get updates while the command runs and dont wait for the end
# e.g. npm install | indent
# if an arg is given it's a flag indicating we shouldn't indent the first line,
# so use :+ to tell SED accordingly if that parameter is set, otherwise null
# string for no range selector prefix (it selects from line 2 onwards and then
# every 1st line, meaning all lines)
local c="${1:+"2,999"} s/^/ /"
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
# ------------------------------------------------------------------------------
function printError() {
echo
echo -e " ! ERROR: $*" | indent no_first_line_indent
echo
}
# ------------------------------------------------------------------------------
function printTopic() {
echo
echo "=====> $*"
}
# ------------------------------------------------------------------------------
function printStatus() {
echo "-----> $*"
}
# ------------------------------------------------------------------------------
function setEnvironmentFromParameters() {
# @TODO: Use named parameters instead of positional ones
NEWCOMEN_TARGET_REPO="$1"
NEWCOMEN_SOURCE_REPOS="$2"
GH_TOKEN="$3"
}
# ------------------------------------------------------------------------------
function validateEnvironment() {
local sErrorMessage=''
set +u
if [ -z "${NEWCOMEN_SOURCE_REPOS}" ];then
sErrorMessage="${sErrorMessage}\n - NEWCOMEN_SOURCE_REPOS"
fi
if [ -z "${NEWCOMEN_TARGET_REPO}" ];then
sErrorMessage="${sErrorMessage}\n - NEWCOMEN_TARGET_REPO"
fi
if [ -z "${GH_TOKEN}" ];then
sErrorMessage="${sErrorMessage}\n - GH_TOKEN"
fi
set +u
if [ -n "${sErrorMessage}" ];then
sErrorMessage="Please make sure the following variable(s) are set in the environment: ${sErrorMessage}"
printError "${sErrorMessage}"
exit 65
fi
}
# ------------------------------------------------------------------------------
function setVariables() {
IFS=',' read -ra aSourceRepos <<< "${NEWCOMEN_SOURCE_REPOS}"
sTargetRepo="${NEWCOMEN_TARGET_REPO}"
sGithubToken="${GH_TOKEN}"
if [ -n "$(echo ${NEWCOMEN_AUTHOR_NAME})" ]; then
sGitUser="${NEWCOMEN_AUTHOR_NAME}"
fi
if [ -n "$(echo ${NEWCOMEN_AUTHOR_EMAIL})" ]; then
sGitMail="${NEWCOMEN_AUTHOR_EMAIL}"
fi
}
# ------------------------------------------------------------------------------
function storeSourceName() {
sSourceRepo="$(git config --get remote.origin.url | cut -f2 -d':' | cut -f1 -d'.')"
}
# ------------------------------------------------------------------------------
function storeSourceContent() {
printStatus "Storing content from ${sSourceRepo}"
mkdir "./${sTmpDirectory}"
for sFile in $(ls -A); do
if [ "${sFile}" != "${sTmpDirectory}" ];then
mv "${sFile}" "${sTmpDirectory}"
fi
done
}
# ------------------------------------------------------------------------------
function restoreSourceContent() {
printStatus "Content for ${sSourceRepo} will be restored"
removeGitDir
for sFile in $(ls -A "${sTmpDirectory}"); do
mv "${sTmpDirectory}/${sFile}" .
done
makeGitIgnoreTempDirectory
printStatus "Restored content for ${sSourceRepo}"
}
# ------------------------------------------------------------------------------
function storeGitUser() {
printStatus "Storing Git User and Email"
set +e
sOriginalGitMail="$(git config --get --global user.email)"
sOriginalGitUser="$(git config --get --global user.name)"
set -e
}
# ------------------------------------------------------------------------------
function restoreGitUser() {
if [ -n "${sOriginalGitUser}" ] && [ -n "${sOriginalGitUser}" ]; then
printStatus "Restoring Git User and Email to ${sOriginalGitUser}<${sOriginalGitMail}>"
git config --global user.email "${sOriginalGitMail}"
git config --global user.name "${sOriginalGitUser}"
fi
}
# ------------------------------------------------------------------------------
function setGitUser() {
printStatus "Setting ${sGitUser}<${sGitMail}> as author"
git config --global user.email "${sGitMail}"
git config --global user.name "${sGitUser}"
}
# ------------------------------------------------------------------------------
function addRepositoryContent() {
local sRepo="$1"
printTopic "Adding content for ${sRepo}"
sFirstRepo=${aSourceRepos[${#aSourceRepos[@]}-1]}
# @TODO: If the sSourceRepo is the sFirstRepo there's no need to store/restore
# The content can be left alone. Add check here and at (re)store points
if [ "${sRepo}" == "${sSourceRepo}" ];then
restoreSourceContent
elif [ "${sRepo}" == "${sFirstRepo}" ];then
printStatus "Content for ${sRepo} will be retrieved"
createGitRepository
addRemoteToRepository "${sRepo}"
fetchFromRemote
sBranch="$(getBranch)"
printStatus "Switching to branch ${sBranch}"
git checkout "${sBranch}" | indent
else
retrieveRepositoryContent "${sRepo}"
fi
}
# ------------------------------------------------------------------------------
function fetchFromRemote() {
printStatus 'Fetching from remote'
git fetch | indent
}
# ------------------------------------------------------------------------------
function removeGitDir() {
rm -Rf .git
}
# ------------------------------------------------------------------------------
function makeGitIgnoreTempDirectory() {
echo "${sTmpDirectory}" >> '.git/info/exclude'
}
# ------------------------------------------------------------------------------
function createGitRepository() {
git init | indent
makeGitIgnoreTempDirectory
}
# ------------------------------------------------------------------------------
function prepareRepository() {
printStatus "Preparing the directory"
removeGitDir
createGitRepository
}
# ------------------------------------------------------------------------------
function addRemoteToRepository() {
local sGitRepo="$1"
printStatus 'Adding remote'
if [ -z "${sGithubToken}" ];then
git remote add origin "https://github.com/${sGitRepo}" | indent
else
git remote add origin "https://${sGithubToken}@github.com/${sGitRepo}" | indent
fi
}
# ------------------------------------------------------------------------------
function getBranch() {
local sBranch='master'
sBranchName='gh-pages'
if [ "$(git show-ref ${sBranchName} 2>&1)" ]; then
sBranch="${sBranchName}"
fi
echo "${sBranch}"
}
# ------------------------------------------------------------------------------
function commitContent() {
printStatus "Committing content to branch ${sMergeBranch}"
git checkout -b "${sMergeBranch}" | indent
git add -A | indent
git commit -a -m "${sApplicationName}: Adding changes from source repositories. $(date +'%C%y-%m-%d %k:%M')" | indent
}
# ------------------------------------------------------------------------------
function mergeContent() {
local sRepo="$1"
printStatus "Merging content from branch ${sMergeBranch}"
if [ "${sRepo}" == "${sTargetRepo}" ];then
sOption='theirs'
else
sOption='ours'
fi
git merge --strategy=recursive --strategy-option="$sOption" "${sMergeBranch}" -m "${sApplicationName}: Merging content from source repositories." | indent
}
# ------------------------------------------------------------------------------
function retrieveRepositoryContent() {
local sRepo="$1"
printStatus "Content for ${sRepo} will be retrieved"
prepareRepository
addRemoteToRepository "${sRepo}"
commitContent
fetchFromRemote
sBranch="$(getBranch)"
printStatus "Switching to branch ${sBranch}"
git checkout "${sBranch}" | indent
mergeContent "${sRepo}"
}
# ------------------------------------------------------------------------------
function pushContents() {
local sBranch="$1"
printTopic "Sending merged content to target: origin ${sBranch}"
# @TODO: Add --dry-run option when long-parameters have been implemented
git push -- origin "${sBranch}" | indent
}
function cleanupBuild() {
printTopic 'Running clean-up'
restoreGitUser
echo 'Done.'
}
function prepareBuild() {
printTopic 'Preparing build'
storeGitUser
trap cleanupBuild EXIT
setGitUser
storeSourceName
storeSourceContent
}
# ------------------------------------------------------------------------------
function runBuild() {
local sBranch=''
prepareBuild
printTopic 'Handling Source Repositories'
# Handle Source Repos in reverse, so the most important repo is fetched last
for ((iCounter=${#aSourceRepos[@]}-1; iCounter>=0; iCounter--)); do
addRepositoryContent "${aSourceRepos[$iCounter]}"
done
printTopic 'Handling Target Repository'
addRepositoryContent "${sTargetRepo}"
pushContents "${sBranch}"
}
# ------------------------------------------------------------------------------
function run() {
if [ "$#" -eq 3 ];then
setEnvironmentFromParameters $@
fi
validateEnvironment
setVariables
runBuild
}
# ------------------------------------------------------------------------------
run $@
#EOF
|
Newcomen/Newcomen
|
build.sh
|
Shell
|
gpl-3.0
| 11,193 |
#!/bin/bash
# disable malloc check in glibc otw. we get corruption blarg on exit() with NDEBUG builds
export MALLOC_CHECK_=0
declare -i TIMEOUT=200
declare -i MEMOUT=1000
export GNUTIME="/usr/bin/time --verbose -o" # time command
export RUN="run -s $MEMOUT -t $((TIMEOUT+20)) -k -o"
export TIMELIMIT="timelimit -p -s 1 -t $TIMEOUT -T 20"
export TIMEFORMAT=$'\nreal\t%3R\nuser\t%3U\nsys\t%3S' # time format
#export TESTSPATH='./experiments' # path to lp/br/opt
export DMCS_GEN_PATH='../build/src/generator' # path to dmcsGen
#export LOGPATH='./experiments' # path to output logfiles (should be fixed)
DORUN=yes # run with `run'
DOTIMELIMIT=yes # run with `timelimit'
VERBOSE=yes # output stuff
LOGDAEMONS=yes # log daemon output
TESTCASES=testcases.txt
# D T R Z
declare -a topoNum=(1 6 4 3)
# D T R Z
declare -a sizes=(10 28 34 10 28 34 50 100 10 28 34 50 100 10 28 34 50 100)
declare -a start=(0 3 8 13 18)
declare -a length=(3 5 5 5)
declare -a sigs=(10 40)
declare -a bridges=(5 20)
declare -a rels=(5 20)
declare -i i=0
declare -i j=0
declare -i k=0
declare -i currentTopoNum=0
rm $TESTCASES
cd experiments
DMCS_EXPR_DIR=$?
if [ DMCS_EXPR_DIR != 0 ] ; then
mkdir experiments
cd experiments
fi
for TOPO in diamond tree ring zig-zag ; do
declare -i currentTopoNum=${topoNum[$i]}
mkdir $TOPO
cd $TOPO
declare -i start_now=${start[$i]}
declare -i end_now=$start_now
let "end_now += ${length[$i]}"
for (( j=$start_now; j < $end_now; ++j )); do
declare -i CTX=${sizes[$j]}
for (( k = 0; k < 2; ++k )); do
declare -i SIG=${sigs[$k]}
declare -i BRS=${bridges[$k]}
declare -i RLS=${rels[$k]}
mkdir $CTX-$SIG-$BRS-$RLS
cd $CTX-$SIG-$BRS-$RLS
for x in {a..j} ; do
export INST=$x
TEMPLATE=$TOPO-$CTX-$SIG-$BRS-$RLS-$INST
echo $TEMPLATE >> ../../../$TESTCASES
mkdir $TEMPLATE
cd $TEMPLATE
export DMCSGEN=$DMCS_GEN_PATH/dmcsGen
pwd
DMCSGENS="--context=$CTX --atoms=$SIG --interface=$BRS --bridge_rules=$RLS --topology=$currentTopoNum --prefix=$TEMPLATE"
DMCSGENRUN="../../../../$DMCSGEN $DMCSGENS"
$DMCSGENRUN
cd ..
done
echo "end" >> ../../../$TESTCASES
cd ..
done
done
cd ..
let "i += 1"
done
|
DistributedMCS/dmcs
|
examples/genTest.sh
|
Shell
|
gpl-3.0
| 2,329 |
#!/bin/bash
###################
# Author: Domenic Denicola
# Modifications by: Prasad Talasila
# Date: 3-December-2017
###################
set -e # Exit with nonzero exit code if anything fails
SOURCE_BRANCH="dev"
TARGET_BRANCH="gh-pages"
ENCRYPTION_LABEL="bb6a8c93b0d6"
COMMIT_AUTHOR_EMAIL="[email protected]"
function createDocs {
mix docs
cp -r doc/* pages/
}
# Pull requests and commits to other branches shouldn't try to deploy, just build to verify
if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
echo "Skipping deploy; just doing a build."
exit 0
fi
# Save some useful information
REPO=`git config remote.origin.url`
SSH_REPO=${REPO/https:\/\/github.com\//[email protected]:}
SHA=`git rev-parse --verify HEAD`
# Clone the existing gh-pages for this repo into out/
# Create a new empty branch if gh-pages doesn't exist yet (should only happen on first deply)
git clone $REPO pages
cd pages
git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
cd ..
# Clean out existing contents
rm -rf pages/* || exit 0
# create the documents using ExDoc
createDocs
# Now let's go have some fun with the cloned repo
cd pages
git config user.name "Travis CI"
git config user.email "$COMMIT_AUTHOR_EMAIL"
# Commit the "changes", i.e. the new version.
# The delta will show diffs between new and old versions.
git add .
git commit -m "[Travis Commit] Automated Deploy to gh-pages | Caused by ${SHA}
refer auto_commit_script: https://github.com/AutolabJS/autolabcli/blob/$SOURCE_BRANCH/script/doc_auto_deploy.sh
" || exit 0 # fail silently if there is nothing to commit
#go to parent directory and perform SSH configuration
cd ..
# Get the deploy key by using Travis's stored variables to decrypt deploy_key.enc
ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key"
ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv"
ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR}
ENCRYPTED_IV=${!ENCRYPTED_IV_VAR}
openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in config/deploy_key.enc -out config/deploy_key -d
chmod 600 config/deploy_key
eval `ssh-agent -s`
ssh-add config/deploy_key
#go to pages/ directory and commit the gh-pages/ update
cd pages/
#check the git repo context
pwd
echo repo=$SSH_REPO
echo branch=$TARGET_BRANCH
echo "===show local gh-pages commit logs==="
git log --oneline -n 5
echo "===show remote gh-pages commit logs==="
git log --oneline -n 5 origin/$TARGET_BRANCH
echo "===show remote commits above the current local commit==="
git log HEAD..origin/$TARGET_BRANCH
echo "===show status of local branch==="
git status
# Now that we're all set up, we can push.
git push $SSH_REPO $TARGET_BRANCH
##References
#https://gist.github.com/domenic/ec8b0fc8ab45f39403dd
#https://github.com/travis-ci/travis.rb
|
prasadtalasila/TransportScheduler
|
script/docs_auto_deploy.sh
|
Shell
|
gpl-3.0
| 2,785 |
#!/bin/sh
export GR_DONT_LOAD_PREFS=1
export srcdir=/home/zitouni/gnuradio-3.6.1/gnuradio-core/src/python/gnuradio/gr
export PATH=/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/python/gnuradio/gr:$PATH
export LD_LIBRARY_PATH=/home/zitouni/gnuradio-3.6.1/build/volk/lib:/home/zitouni/gnuradio-3.6.1/build/gruel/src/lib:/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/lib:$LD_LIBRARY_PATH
export PYTHONPATH=/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/python:/home/zitouni/gnuradio-3.6.1/build/gnuradio-core/src/lib/swig:$PYTHONPATH
/usr/bin/python -B /home/zitouni/gnuradio-3.6.1/gnuradio-core/src/python/gnuradio/gr/qa_float_to_short.py
|
zitouni/gnuradio-3.6.1
|
build/gnuradio-core/src/python/gnuradio/gr/qa_float_to_short_test.sh
|
Shell
|
gpl-3.0
| 657 |
#!/bin/bash
STEAM_GAME_ID=285190
GAME_BINARY=DawnOfWar3
export HOME=$DEBUG_REAL_HOME
steam steam://run/$STEAM_GAME_ID &
sleep 6
GAME_PID=`pgrep $GAME_BINARY | tail -1`
echo '#!/bin/sh' > steam-env-vars.sh
echo "# PID: $GAME_PID" >> steam-env-vars.sh
while read -d $'\0' ENV; do NAME=`echo $ENV | cut -d= -f1`; VAL=`echo $ENV | cut -d= -f2`; echo "export $NAME=\"$VAL\""; done < /proc/$GAME_PID/environ >> steam-env-vars.sh
chmod +x steam-env-vars.sh
killall -9 DawnOfWar3
sleep 6
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/dow3-1.0.1/pre.sh
|
Shell
|
gpl-3.0
| 484 |
#!/bin/bash
PYTHON_VERSION=2.4.6
function checkout_version {
local repo=$1
version=${2:-python-2.4}
echo Checking out $version.4 on $repo ...
(cd ../$repo && git checkout $version && pyenv local $PYTHON_VERSION) && \
git pull
return $?
}
export PATH=$HOME/.pyenv/bin/pyenv:$PATH
owd=$(pwd)
bs=${BASH_SOURCE[0]}
if [[ $0 == $bs ]] ; then
echo "This script should be *sourced* rather than run directly through bash"
exit 1
fi
mydir=$(dirname $bs)
fulldir=$(readlink -f $mydir)
(cd $fulldir/.. && checkout_version python-spark && checkout_version python-filecache &&
checkout_version python-xdis python-2.4-to-2.7 && checkout_version python-uncompyle6)
cd $owd
git checkout python-2.4 && pyenv local $PYTHON_VERSION && git pull
|
rocky/python2-trepan
|
admin-tools/setup-python-2.4.sh
|
Shell
|
gpl-3.0
| 760 |
#!/bin/bash
. $_COMMANDER_HOME/libs/util.sh
_cf_load_ ssh
function _ssh_agent_(){
ssh-agent > /dev/null 2>&1
}
function _ssh_key_pub_(){
for k in $@ ; do
if [[ -f $_COMMANDER_HOME/configs/ssh/.ssh/$k.pub ]]; then
_exec_ cat $_COMMANDER_HOME/configs/ssh/.ssh/$k.pub
fi
done
}
function _ssh_key_del_(){
for k in $@ ; do
if [[ -f $_COMMANDER_HOME/configs/ssh/.ssh/$k ]]; then
_exec_ rm $_COMMANDER_HOME/configs/ssh/.ssh/$k
_exec_ rm $_COMMANDER_HOME/configs/ssh/.ssh/$k.pub
fi
done
}
function _ssh_key_load_(){
ssh-add -D > /dev/null 2>&1
if [[ $# -gt 0 ]]; then
for f in $@ ; do
if [[ -f $_COMMANDER_HOME/configs/ssh/.ssh/$f ]]; then
eval ssh-add $_COMMANDER_HOME/configs/ssh/.ssh/$f > /dev/null 2>&1
fi
done
else
if [[ -d $_COMMANDER_HOME/configs/ssh/.ssh ]]; then
for file in $(find "$_COMMANDER_HOME/configs/ssh/.ssh" -name "*.pub"); do
eval ssh-add ${file%%.pub} > /dev/null 2>&1
done
fi
fi
}
function _ssh_key_gen_(){
if [[ $# -eq 0 ]]; then
echo -n "ssh key identity: "
read k;
if [[ $k == "" ]]; then
echo "ssh key identity can't be empty."
return
fi
else
k=$1
fi
echo -n "ssh key description: "
read n;
if [[ $n == "" ]]; then
n=$k
fi
if [[ ! -d $_COMMANDER_HOME/configs/ssh ]]; then
_exec_ mkdir -p $_COMMANDER_HOME/configs/ssh
echo "created directory: $_COMMANDER_HOME/configs/ssh"
fi
if [[ ! -d $_COMMANDER_HOME/configs/ssh/.ssh ]]; then
_exec_ mkdir -p $_COMMANDER_HOME/configs/ssh/.ssh
echo "created directory: $_COMMANDER_HOME/configs/ssh/.ssh"
fi
ssh-keygen -f $_COMMANDER_HOME/configs/ssh/.ssh/$k -C $n
}
function _ssh_srv_new_(){
echo -n "ssh new connection name: "
read n;
if [[ $n == "" ]]; then
echo -n "connection name can't be empty."
return
fi
_srv_connection=$n
echo -n "ssh connect host address: "
read n;
if [[ $n == "" ]]; then
echo -n "host address can't be empty."
return
fi
_srv_host=$n
echo -n "ssh connect host port (default: 22): "
read n;
if [[ $n == "" ]]; then
n=22
fi
_srv_port=$n
echo -n "ssh connect user name (default: $USER): "
read n;
if [[ $n == "" ]]; then
n=$USER
fi
_srv_user=$n
_cf_var_write_ "ssh/$_srv_connection.sh" ${_srv_connection}"_host" $_srv_host
_cf_var_write_ "ssh/$_srv_connection.sh" ${_srv_connection}"_port" $_srv_port
_cf_var_write_ "ssh/$_srv_connection.sh" ${_srv_connection}"_user" $_srv_user
echo -n "ssh connect ssh key identity (default: $_srv_connection): "
read n;
if [[ $n == "" ]]; then
n=$_srv_connection
fi
_srv_key=$n
_cf_var_write_ "ssh/$_srv_connection.sh" ${_srv_connection}"_key" $_srv_key
echo
echo "============================================================"
echo " NOTE "
echo "============================================================"
if [[ ! -f $_COMMANDER_HOME/configs/ssh/.ssh/$_srv_key.pub ]]; then
echo "please generate a ssh key identified by name ($_srv_key)."
echo "you can use the following command: cmd ssh key"
echo
fi
echo "please copy the ssh key pub to the remote server"
echo "you can use the following command: cmd ssh copyid $_srv_connection"
echo
}
function _ssh_srv_con_(){
if [[ -f $_COMMANDER_HOME/configs/ssh/$1.sh ]]; then
host=${1}_host
port=${1}_port
user=${1}_user
eval ssh \$$user@\$$host -p \$$port
else
echo "ssh connection ($1) doesn't exist."
fi
}
function _ssh_key_cpy_(){
if [[ ! -f $_COMMANDER_HOME/configs/ssh/$1.sh ]]; then
echo "ssh connection ($1) doesn't exist. please use command: cmd ssh con"
return
fi
host=${1}_host
port=${1}_port
user=${1}_user
key=${1}_key
eval ssh-copy-id -i $_COMMANDER_HOME/configs/ssh/.ssh/\$$key.pub \$$user@\$$host -p \$$port
}
_ssh_agent_
_ssh_key_load_
function cmd-ssh(){
__doc__ ssh key and connection management
case "$1" in
"" | -h )
echo "Usage: cmd-ssh [ key | connect(con | conn) | copyid(copy) | list(ls) | reload ]"
echo
;;
"key" )
shift
if [[ $# -gt 0 ]]; then
_ssh_key_pub_ $1
else
_ssh_key_gen_
fi
;;
"con" | "conn" | "connect" )
shift
if [[ $# -eq 1 ]]; then
_ssh_srv_con_ $1
else
_ssh_srv_new_
fi
;;
"copyid" | "copy" )
shift
if [[ $# -ne 1 ]]; then
echo "Usage: cmd ssh copyid [connection]"
echo
return
fi
_ssh_key_cpy_ $@
;;
"list" | "ls" )
if [[ -d $_COMMANDER_HOME/configs/ssh ]]; then
echo "ssh command has following connections:"
echo
for file in $(find "$_COMMANDER_HOME/configs/ssh" -name "*.sh"); do
filename=${file##*/}
echo ${filename%%.sh}
done | sort | uniq
fi
echo
if [[ -d $_COMMANDER_HOME/configs/ssh/.ssh ]]; then
echo "ssh command has following keys:"
echo
for file in $(find "$_COMMANDER_HOME/configs/ssh/.ssh" -name "*.pub"); do
filename=${file##*/}
echo ${filename%%.pub}
done | sort | uniq
fi
;;
"remove" )
shift
if [[ $# -gt 0 ]]; then
_ssh_key_del_ $1
fi
;;
"reload" )
shift
_ssh_key_load_ $@
;;
* )
if [[ ! -f $_COMMANDER_HOME/configs/ssh/$1.sh ]]; then
echo "connection ($1) not exist."
fi
_ssh_srv_con_ $1
;;
esac
}
complete -W "key connect copyid list remove reload" cmd-ssh
|
auto-program/commander
|
scripts/cmd-ssh.sh
|
Shell
|
gpl-3.0
| 5,155 |
#!/bin/bash
# This is part of the rsyslog testbench, licensed under GPLv3
export IMFILEINPUTFILES="10"
export IMFILEINPUTFILESSTEPS="5"
#export IMFILEINPUTFILESALL=$(($IMFILEINPUTFILES * $IMFILEINPUTFILESSTEPS))
export IMFILECHECKTIMEOUT="5"
. $srcdir/diag.sh init
. $srcdir/diag.sh check-inotify-only
# generate input files first. Note that rsyslog processes it as
# soon as it start up (so the file should exist at that point).
# Start rsyslog now before adding more files
. $srcdir/diag.sh startup imfile-wildcards-dirs-multi.conf
for j in `seq 1 $IMFILEINPUTFILESSTEPS`;
do
echo "Loop Num $j"
for i in `seq 1 $IMFILEINPUTFILES`;
do
mkdir rsyslog.input.dir$i
mkdir rsyslog.input.dir$i/dir$i
./inputfilegen -m 1 > rsyslog.input.dir$i/dir$i/file.logfile
done
ls -d rsyslog.input.*
# Check correct amount of input files each time
let IMFILEINPUTFILESALL=$(($IMFILEINPUTFILES * $j))
. $srcdir/diag.sh content-check-with-count "HEADER msgnum:00000000:" $IMFILEINPUTFILESALL $IMFILECHECKTIMEOUT
# Delete all but first!
for i in `seq 1 $IMFILEINPUTFILES`;
do
rm -rf rsyslog.input.dir$i/dir$i/file.logfile
rm -rf rsyslog.input.dir$i
done
done
. $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
. $srcdir/diag.sh wait-shutdown # we need to wait until rsyslogd is finished!
. $srcdir/diag.sh exit
|
madedotcom/rsyslog
|
tests/imfile-wildcards-dirs-multi.sh
|
Shell
|
gpl-3.0
| 1,359 |
#!/bin/bash
#From here: https://github.com/alexeevdv/dename
function detect_gnome()
{
ps -e | grep -E '^.* gnome-session$' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
VERSION=`gnome-session --version | awk '{print $2}'`
DESKTOP="GNOME"
return 1
}
function detect_kde()
{
ps -e | grep -E '^.* kded4$' > /dev/null
if [ $? -ne 0 ];
then
return 0
else
VERSION=`kded4 --version | grep -m 1 'KDE' | awk -F ':' '{print $2}' | awk '{print $1}'`
DESKTOP="KDE"
return 1
fi
}
function detect_unity()
{
ps -e | grep -E 'unity-panel' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
VERSION=`unity --version | awk '{print $2}'`
DESKTOP="UNITY"
return 1
}
function detect_xfce()
{
ps -e | grep -E '^.* xfce4-session$' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
VERSION=`xfce4-session --version | grep xfce4-session | awk '{print $2}'`
DESKTOP="XFCE"
return 1
}
function detect_cinnamon()
{
ps -e | grep -E '^.* cinnamon$' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
VERSION=`cinnamon --version | awk '{print $2}'`
DESKTOP="CINNAMON"
return 1
}
function detect_mate()
{
ps -e | grep -E '^.* mate-panel$' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
VERSION=`mate-about --version | awk '{print $4}'`
DESKTOP="MATE"
return 1
}
function detect_lxde()
{
ps -e | grep -E '^.* lxsession$' > /dev/null
if [ $? -ne 0 ];
then
return 0
fi
# We can detect LXDE version only thru package manager
which apt-cache > /dev/null 2> /dev/null
if [ $? -ne 0 ];
then
which yum > /dev/null 2> /dev/null
if [ $? -ne 0 ];
then
VERSION='UNKNOWN'
else
# For Fedora
VERSION=`yum list lxde-common | grep lxde-common | awk '{print $2}' | awk -F '-' '{print $1}'`
fi
else
# For Lubuntu and Knoppix
VERSION=`apt-cache show lxde-common /| grep 'Version:' | awk '{print $2}' | awk -F '-' '{print $1}'`
fi
DESKTOP="LXDE"
return 1
}
function detect_sugar()
{
if [ "$DESKTOP_SESSION" == "sugar" ];
then
VERSION=`python -c "from jarabe import config; print config.version"`
DESKTOP="SUGAR"
else
return 0
fi
}
DESKTOP="UNKNOWN"
if detect_unity;
then
if detect_kde;
then
if detect_gnome;
then
if detect_xfce;
then
if detect_cinnamon;
then
if detect_mate;
then
if detect_lxde;
then
detect_sugar
fi
fi
fi
fi
fi
fi
fi
if [ "$1" == '-v' ];
then
echo $VERSION
else
if [ "$1" == '-n' ];
then
echo $DESKTOP
else
echo $DESKTOP $VERSION
fi
fi
|
JoshuaD84/teahouse-fox-background
|
dename.sh
|
Shell
|
gpl-3.0
| 2,687 |
#!/bin/bash
# --------------------------------------------------------------------------- #
# #
# Copyright (C) 2014 LAFKON/Christoph Haag #
# #
# svglayers2pdfdoublepages.sh is free software: you can redistribute it #
# and/or modify it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3, #
# or (at your option) any later version. #
# #
# svglayers2pdfdoublepages.sh is distributed in the hope that it #
# will be useful, but WITHOUT ANY WARRANTY; without even the implied #
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# --------------------------------------------------------------------------- #
SVG=$1
PDF=${SVG%%.*}.pdf
# --------------------------------------------------------------------------- #
# INTERACTIVE CHECKS
# --------------------------------------------------------------------------- #
if [ ! -f ${SVG%%.*}.svg ]; then echo; echo "We need a svg!"
echo "e.g. $0 yoursvg.svg"; echo
exit 0;
fi
if [ -f $PDF ]; then
echo "$PDF does exist"
read -p "overwrite ${PDF}? [y/n] " ANSWER
if [ X$ANSWER != Xy ] ; then echo "Bye"; exit 0; fi
fi
BREAKFOO=`echo N${RANDOM}FO0 | cut -c 1-8`
SPACEFOO=`echo S${RANDOM}F0O | cut -c 1-8`
# --------------------------------------------------------------------------- #
# MOVE ALL LAYERS ON SEPARATE LINES (TEMPORARILY; EASIFY PARSING LATER ON)
# --------------------------------------------------------------------------- #
sed ":a;N;\$!ba;s/\n/$BREAKFOO/g" $SVG | # REMOVE ALL LINEBREAKS (BUT SAVE)
sed "s/ /$SPACEFOO/g" | # REMOVE ALL SPACE (BUT SAVE)
sed 's/<g/4Fgt7RfjIoPg7/g' | # PLACEHOLDER FOR GROUP OPEN
sed ':a;N;$!ba;s/\n/ /g' | # REMOVE ALL NEW LINES
sed 's/4Fgt7RfjIoPg7/\n<g/g' | # RESTORE GROUP OPEN + NEWLINE
sed '/groupmode="layer"/s/<g/4Fgt7R/g' | # PLACEHOLDER FOR LAYERGROUP OPEN
sed ':a;N;$!ba;s/\n/ /g' | # REMOVE ALL LINEBREAKS
sed 's/4Fgt7R/\n<g/g' | # RESTORE LAYERGROUP OPEN + NEWLINE
sed 's/<\/svg>//g' | # REMOVE SVG CLOSE
sed 's/display:none/display:inline/g' | # MAKE VISIBLE EVEN WHEN HIDDEN
tee > ${SVG%%.*}.tmp # WRITE TO TEMPORARY FILE
# --------------------------------------------------------------------------- #
# EXTRACT HEADER
# --------------------------------------------------------------------------- #
# SVGHEADER=`head -n 1 ${SVG%%.*}.tmp`
# --------------------------------------------------------------------------- #
# WRITE LAYERS TO SEPARATE FILES AND TRANSFORM TO PDF
# --------------------------------------------------------------------------- #
XSHIFT="526.410"
TRANSFORM="transform=\"translate($XSHIFT,0)\""
COUNT=1
for LAYERNAME in `sed -n '1!p' ${SVG%%.*}.tmp | # DISPLAY EVERYTHING EXCEPT FIRST LINE
sed 's/inkscape:label="/\nTHIS/g' | #
sed 's/"/\n"/g' | #
grep ^THIS | #
sed 's/THIS//g' | #
grep -v "^XX_" | #
sort -u`
do
for PAGE in 1 2
do
if [ $PAGE -eq 1 ]; then
SHIFT=$TRANSFORM
else
SHIFT=""
fi
NUM=`echo 0000$COUNT | rev | cut -c 1-4 | rev`
LNAME=`echo $LAYERNAME | #
sed 's/ /_/g'`
OUT=layer2svg_${NUM}_${LNAME}
head -n 1 ${SVG%%.*}.tmp | # THE HEADER
sed "s/$BREAKFOO/\n/g" | # RESTORE ORIGINAL LINEBREAKS
sed "s/$SPACEFOO/ /g" | # RESTORE ORIGINAL SPACES
tee > ${OUT}.svg
echo "<g $SHIFT>" >> ${OUT}.svg
grep ":label=\"$LAYERNAME" ${SVG%%.*}.tmp | # THE LAYER
sed "s/$BREAKFOO/\n/g" | # RESTORE ORIGINAL LINEBREAKS
sed "s/$SPACEFOO/ /g" | # RESTORE ORIGINAL SPACES
tee >> ${OUT}.svg
echo "</g>" >> ${OUT}.svg
echo "</svg>" >> ${OUT}.svg
inkscape --export-pdf=${OUT}.pdf \
--export-text-to-path \
${OUT}.svg
gs -o ${OUT}_CONFORMED.pdf \
-sDEVICE=pdfwrite \
-sColorConversionStrategy=Gray \
-sProcessColorModel=DeviceGray \
-sColorImageDownsampleThreshold=2 \
-sColorImageDownsampleType=Bicubic \
-sColorImageResolution=300 \
-sGrayImageDownsampleThreshold=2 \
-sGrayImageDownsampleType=Bicubic \
-sGrayImageResolution=300 \
-sMonoImageDownsampleThreshold=2 \
-sMonoImageDownsampleType=Bicubic \
-sMonoImageResolution=1200 \
-dSubsetFonts=true \
-dEmbedAllFonts=true \
-dAutoRotatePages=/None \
-sCannotEmbedFontPolicy=Error \
-c ".setpdfwrite<</NeverEmbed[ ]>> setdistillerparams" \
-f ${OUT}.pdf > /dev/null
mv ${OUT}_CONFORMED.pdf ${OUT}.pdf
rm layer2svg_${NUM}_${LNAME}.svg
COUNT=`expr $COUNT + 1`
done
done
# --------------------------------------------------------------------------- #
# MAKE MULTIPAGE PDF
# --------------------------------------------------------------------------- #
pdftk layer2svg_*.pdf cat output $PDF
# --------------------------------------------------------------------------- #
# CLEAN UP
# --------------------------------------------------------------------------- #
rm ${SVG%%.*}.tmp layer2svg_*.pdf
exit 0;
|
lafkon/conversations
|
var/layouts/makingof-01/svglayers2pdfdoublepages.sh
|
Shell
|
gpl-3.0
| 6,451 |
#!/bin/bash
ANSIBLEPLAYBOOK="/usr/local/bin/ansible-playbook"
install_ansible()
{
if [ ! -f $ANSIBLEPLAYBOOK ]; then
pip install ansible
fi
}
configure_ansible()
{
if [ ! -d "/etc/ansible" ]; then
mkdir /etc/ansible
fi
echo "localhost ansible_connection=local" >> /etc/ansible/hosts
}
start_ansible()
{
$ANSIBLEPLAYBOOK -v /home/ec2-user/configs/ansible/nginx.yml --connection=local
}
install_ansible
configure_ansible
start_ansible
|
maxjazz/AWSHomeWork
|
scripts/start.sh
|
Shell
|
gpl-3.0
| 462 |
echo "Example 3: Same data"
echo " Epsilon: 0.4"
echo " Distance: 120"
echo " Users: 1"
echo " Simple zone"
echo "Running BHI..."
../../evaluator/build/evaluator --method bhi -e 0.4 -d 120 -n 1 -f 2091 -r "reference/" -i "data/" --verbose
echo "Done!"
|
leanbalma/OSM
|
examples/same-data/script-same-data-bhi.sh
|
Shell
|
gpl-3.0
| 260 |
#!/bin/bash
cd matlab-code/Code/
matlab -r "run_igci_gaussian_integral valid; exit"
matlab -r "run_igci_uniform_integral valid; exit"
matlab -r "run_lingam valid; exit"
|
ssamot/causality
|
extract_matlab_valid.sh
|
Shell
|
gpl-3.0
| 169 |
#!/bin/bash
#--- RCV1: semi-supervised LSTM with 2 unsupervised LSTM embeddings
#--- and 3 unsupervised CNN embeddings.
#--- NOTE: 5GB or more GPU device memory is required.
#-----------------#
gpu=-1 # <= change this to, e.g., "gpu=0" to use a specific GPU.
mem=5 # pre-allocate 5GB device memory
source sh-common.sh
#-----------------#
ddir=data
ldir=rcv1_data # <= Change this to where RCV1 labeled data are
lstmdir=for-semi # <= Change this to where LSTM embeddings are. Downloaded if lstmdir=for-semi.
cnndir=for-semi # <= Change this to where CNN embeddings are. Downloaded if cnndir=for-semi.
#####
##### WARNING: If your system uses Big Endian (Motorola convention), you cannot use the
##### downloaded files! They are in the little-endian format (Intel convention)!
#####
options="LowerCase UTF8"
txt_ext=.txt.tok
catdic=${ddir}/rcv1-lvl2.catdic
z=l5 # to avoid name conflict with other scripts
dim=300
unsite=50; supite=100; supite1=80
suplam=0; suplamtop=0
#--- Prepare unsupervised embedding files.
lay_fn0=rcv1-LstmF-dim${dim}.lay.epo${unsite}.ReLayer0
lay_fn1=rcv1-LstmB-dim${dim}.lay.epo${unsite}.ReLayer0
for fn in $lay_fn0 $lay_fn1; do find_file $lstmdir $fn; if [ $? != 0 ]; then echo $shnm: find_file failed.; exit 1; fi; done
lay_fn0=${lstmdir}/${lay_fn0}; lay_fn1=${lstmdir}/${lay_fn1}
lay_fn2=rcv1-uns-p20.dim100.epo10.ReLayer0
lay_fn3=rcv1-unsx3-p20.dim100.epo10.ReLayer0
lay_fn4=rcv1-parsup-p20p20.dim100.epo10.ReLayer0
for fn in $lay_fn2 $lay_fn3 $lay_fn4; do find_file $cnndir $fn; if [ $? != 0 ]; then echo $shnm: find_file failed.; exit 1; fi; done
lay_fn2=${cnndir}/${lay_fn2}; lay_fn3=${cnndir}/${lay_fn3}; lay_fn4=${cnndir}/${lay_fn4}
#--- Generate input data for unsupervised LSTM embeddings
voc01=${tmpdir}/rcv1${z}-01.wmap
$exe $gpu write_word_mapping layer_type=LstmF layer0_fn=$lay_fn0 word_map_fn=$voc01
if [ $? != 0 ]; then echo $shnm: write_word_mapping failed.; exit 1; fi
for set in train test; do
rnm=${tmpdir}/rcv1${z}-${set}-p1
$prep_exe gen_regions NoSkip \
region_fn_stem=$rnm input_fn=${ldir}/rcv1-1m-${set} vocab_fn=$voc01 \
$options text_fn_ext=$txt_ext label_fn_ext=.lvl2 \
label_dic_fn=$catdic \
patch_size=1 patch_stride=1 padding=0
if [ $? != 0 ]; then echo $shnm: gen_regions failed.; exit 1; fi
done
#--- Generate input data for unsupervised CNN embeddings
wm2=${tmpdir}/rcv1${z}-2.wmap
$exe $gpu write_word_mapping layer0_fn=$lay_fn2 layer_type=Weight+ word_map_fn=$wm2
if [ $? != 0 ]; then echo $shnm: write_word_mapping failed.; exit 1; fi
wm3=${tmpdir}/rcv1${z}-3.wmap
$exe $gpu write_word_mapping layer0_fn=$lay_fn3 layer_type=Weight+ word_map_fn=$wm3
if [ $? != 0 ]; then echo $shnm: write_word_mapping failed.; exit 1; fi
wm4=${tmpdir}/rcv1${z}-4.wmap
$exe $gpu write_word_mapping layer0_fn=$lay_fn4 layer_type=Weight+ word_map_fn=$wm4
if [ $? != 0 ]; then echo $shnm: write_word_mapping failed.; exit 1; fi
for set in train test; do
for no in 2 3 4; do
p=21 # b/c we want an odd number here ...
rnm=${tmpdir}/rcv1${z}-${set}-${no}-p${p}bow
$prep_exe gen_regions NoSkip Bow \
region_fn_stem=$rnm input_fn=${ldir}/rcv1-1m-${set} vocab_fn=${tmpdir}/rcv1${z}-${no}.wmap \
$options text_fn_ext=$txt_ext RegionOnly \
patch_size=$p patch_stride=1 padding=$(((p-1)/2))
if [ $? != 0 ]; then echo $shnm: gen_regions failed.; exit 1; fi
done
done
#--- Training with labeled data and five unsupervised embeddings
mynm=lstm-5unsemb-rcv1-dim${dim}
logfn=${logdir}/${mynm}.log
csvfn=${csvdir}/${mynm}.csv
echo Training with labeled data and five unsupervised embeddings ... see $logfn and $csvfn
$exe $gpu:$mem train reg_L2=$suplam top_reg_L2=$suplamtop top_dropout=0.5 \
num_sides=5 0side0_layer_type=LstmF 0side0_layer_fn=$lay_fn0 0side1_layer_type=LstmB 0side1_layer_fn=$lay_fn1 \
0side2_layer_type=Weight+ 0side2_layer_fn=$lay_fn2 0side2_dsno=1 \
0side3_layer_type=Weight+ 0side3_layer_fn=$lay_fn3 0side3_dsno=2 \
0side4_layer_type=Weight+ 0side4_layer_fn=$lay_fn4 0side4_dsno=3 \
NoGate_i NoGate_o test_mini_batch_size=500 \
max_loss=5 inc=5000 trnname=rcv1${z}-train- tstname=rcv1${z}-test- data_dir=${tmpdir} \
test_interval=25 step_size=1 evaluation_fn=$csvfn \
layers=2 loss=Square mini_batch_size=50 momentum=0.9 random_seed=1 \
datatype=sparse dsno0=p1 dsno1=2-p21bow dsno2=3-p21bow dsno3=4-p21bow \
num_epochs=$supite ss_scheduler=Few ss_decay=0.1 ss_decay_at=$supite1 \
0layer_type=Lstm2 0nodes=500 0chop_size=50 \
1layer_type=Pooling 1num_pooling=1 1pooling_type=Max > $logfn
if [ $? != 0 ]; then echo $shnm: training failed.; exit 1; fi
rm -f ${tmpdir}/rcv1${z}*
|
riejohnson/ConText
|
examples/other-sh/lstm-5unsemb-rcv1.sh
|
Shell
|
gpl-3.0
| 5,005 |
#!/bin/sh
charging="Charging"
discharging="Discharging"
fully_charged="Fully-charged"
battery_status=""
percentage=$(upower -i /org/freedesktop/UPower/devices/battery_battery | grep -E "percentage" | sed s/"percentage:"// | tr -d "[:space:]")
state=$(upower -i /org/freedesktop/UPower/devices/battery_battery | grep -E "state" | sed s/"state:"// | tr -d "[:space:]")
state=$(echo "$state" | sed "s/.*/\u&/")
if [ "$state" = "$charging" ]
then
battery_status=$(echo "⚡: $percentage $state")
elif [ "$state" = "$discharging" ]
then
battery_status=$(echo " $percentage $state")
elif [ "$state" = "$fully_charged" ]
then
battery_status=$(echo "☻: $percentage $state")
else
battery_status=$(echo "?")
fi
echo $battery_status
|
kasramp/PinebookScripts
|
.config/i3/i3blocks_battery.sh
|
Shell
|
gpl-3.0
| 795 |
#!/bin/sh
if [ -e Makefile ]; then
make -j8 && make test
elif [-e rules.ninja ]; then
ninja && ninja test
fi
if [ "x$?" = "x0" ]; then
lcov --base-directory . --directory . -c -o mia.info
# remove system library files
lcov --remove mia.info "/usr*" -o mia.info
lcov --remove mia.info "mia2/src*" -o mia.info
# generate the html report, note that genhtml may have some problems with a few files
# that will currently need to be removed manually
rm -rf test-coverage
genhtml -o test-coverage -t "Mia coverage" --num-spaces 2 mia.info
else
echo build or test failed
fi
|
gerddie/mia
|
coverage.sh
|
Shell
|
gpl-3.0
| 615 |
#!/usr/bin/env bash
# NOTE:
# TO GET RID OF THE VERBOSE OUTPUT, REDIRECT STDOUT TO /dev/null
#TODO: handle changing item conf from l to c
#TODO: error whene sync /etc without root permission
dotfile () {
if [ ! -f dotfile-${2}.list ]; then
echo "ERROR: configuration file 'dotfile-${2}.list' not found" >&2
exit 1
fi
if [ ! -d "$SYNC_DIR" ];then
echo "ERROR: ${SYNC_DIR} dir to sync not found on $(pwd)" >&2
exit 1
fi
echo $1 $2:
grep "^\w*[lc] \+[^[:space:]]\+\w*$" dotfile-${2}.list | \
while read line; do
ITEM_TO_SYNC=$(awk '{print $2}' <<< "$line")
if [ ! -e "$SYNC_DIR/$ITEM_TO_SYNC" ]; then
echo "ERROR: item not found @ $SYNC_DIR/$ITEM_TO_SYNC'" >&2
continue
fi
$1 "$(awk '{print $1}' <<< "$line")"
done
}
sync () {
if [[ "$1" == "c" && -e $TARGET_DIR/$ITEM_TO_SYNC ]]; then
diff {$SYNC_DIR,$TARGET_DIR}/$ITEM_TO_SYNC > /dev/null \
&& echo "DID: $ITEM_TO_SYNC" \
|| echo "WARN: different item '$TARGET_DIR/$ITEM_TO_SYNC' exists" >&2
return
fi
if [[ "$1" == "l" && -e $TARGET_DIR/$ITEM_TO_SYNC ]];then
if [[ $(stat -L -c '%i' "$TARGET_DIR/$ITEM_TO_SYNC") == \
$(stat -L -c '%i' "$SYNC_DIR/$ITEM_TO_SYNC") ]]; then
echo "DID: $ITEM_TO_SYNC"
return
fi
echo "WARN: different item '$TARGET_DIR/$ITEM_TO_SYNC' exists" >&2
return
fi
mkdir -p $(dirname "$TARGET_DIR/$ITEM_TO_SYNC")
case $1 in
l) ln -s "$SYNC_DIR/$ITEM_TO_SYNC" "$TARGET_DIR/$ITEM_TO_SYNC"
echo "LINKED: $ITEM_TO_SYNC"
;;
c) cp -r "$SYNC_DIR/$ITEM_TO_SYNC" "$TARGET_DIR/$ITEM_TO_SYNC"
echo "COPYED: $ITEM_TO_SYNC"
;;
esac
}
status () {
if [[ ! -e $TARGET_DIR/$ITEM_TO_SYNC ]]; then
echo "NOSYNC: $ITEM_TO_SYNC" >&2
return
fi
case $1 in
l)
if [[ $(stat -L -c '%i' "$TARGET_DIR/$ITEM_TO_SYNC") == \
$(stat -L -c '%i' "$SYNC_DIR/$ITEM_TO_SYNC") ]]; then
echo "MATCH : $ITEM_TO_SYNC"
else
echo "DIFFER: $ITEM_TO_SYNC" >&2
fi
;;
c)
diff {$SYNC_DIR,$TARGET_DIR}/$ITEM_TO_SYNC > /dev/null \
&& echo "MATCH : $ITEM_TO_SYNC" \
|| echo "DIFFER: $ITEM_TO_SYNC" >&2
;;
esac
}
help () {
cat << EOF
$(basename $0): sync dotfiles
USAGE: $(basename $0) COMMAND PROFILE
command:
sync: sync dotfiles of profile
status: check diff/status of profile's dotfiles
profile:
home: dotfiles of $HOME config at dotfiles-home.list
etc: dotfiles of /etc config at dotfiles-etc.list
EOF
}
CWD=$(readlink -f $(dirname $0))
if [ $# -ne 2 ]; then
echo "ERROR: unvalid numbber of args" >&2
help >&2
exit 1
fi
case $2 in
etc)
SYNC_DIR="$CWD/etc"
TARGET_DIR="/etc"
;;
home)
SYNC_DIR="$CWD/~"
TARGET_DIR="$HOME"
;;
*)
echo "ERROR: unsupported profile" >&2
help >&2
exit 1
;;
esac
case $1 in
sync|status)
dotfile $1 $2;;
*)
echo "ERROR: unsupported command" >&2
help >&2
exit 1;;
esac
|
lejenome/dotfiles
|
dotfiles.sh
|
Shell
|
gpl-3.0
| 2,824 |
#!/usr/bin/env bash
# Copyright 2017 Yash D. Saraf
# This file is part of BB-Bot.
# BB-Bot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BB-Bot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BB-Bot. If not, see <http://www.gnu.org/licenses/>.
##Build busybox auto-magically
cd "$(realpath `dirname $0`/..)"
git clone --quiet https://github.com/yashdsaraf/busybox.git
cd busybox
current=nosel
build() {
if [[ $1 == "all" ]]
then
build arm arm64 x86 x86_64 mips mips64 mipseb
return 0
fi
while (( $# ))
do
# if [[ $1 == "mipseb" && $current == "sel" ]]
# then
# shift 1
# continue
# fi
: ${toolc:=$(eval echo \$`tr 'a-z' 'A-Z' <<< $1`)}
sysr=$(find $toolc -name sysroot -type d)
cross=`ls $toolc/bin | grep -E ".+-rorschack-linux-.+gcc$"\
| awk -Fgcc '{print $1}'`
sed -i "s|.*CONFIG_SYSROOT.*|CONFIG_SYSROOT=\"$sysr\"|" .config
echo "Building $1 busybox-$current --"
make clean &>/dev/null
PATH=$toolc/bin:$PATH LD_LIBRARY_PATH=$toolc/lib ARCH=$1 CROSS_COMPILE=$cross\
CFLAGS="-Os -I$toolc/include" make -j$CORES >/dev/null || exit $?
mv busybox ../out/busybox-$1-$current
unset toolc
shift 1
done
}
make mrproper
cp conf_no_selinux .config
build $TO_BUILD
[[ $TO_BUILD == "mipseb" ]] && exit
current=sel
cp conf_selinux .config
build $TO_BUILD
|
yashdsaraf/bb-bot
|
scripts/build-bb.sh
|
Shell
|
gpl-3.0
| 1,894 |
#!/bin/bash
set -e
FORMAT_OUT=${TMPDIR:-/tmp}/clang-format-diff.out
find src -name "*.h" -o -name "*.cc" | xargs clang-format-11 --verbose -i
# Check if we got any diff, then print it out in in the CI.
# TODO: make these suggested diffs in the pull request.
git diff > ${FORMAT_OUT}
if [ -s ${FORMAT_OUT} ]; then
echo "== There were changes running the formatter =="
cat ${FORMAT_OUT}
echo "To locally fix, run .github/bin/run-clang-format.sh then commit and push."
exit 1
fi
exit 0
|
hzeller/beagleg
|
.github/bin/run-clang-format.sh
|
Shell
|
gpl-3.0
| 505 |
#! /bin/bash
# Computes some statistics about the parsing performance
# Takes as an input the directory created by learnAbstractSyntax.sh
#
CURDIR=`dirname $0`
PYDIR=$CURDIR/src/py
#shflags
. $CURDIR/shflags
DEFINE_string 'dir' './mwes-h10000' 'directory where the parsed sentence are stored' 'd'
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
DIR=${FLAGS_dir}
NUMSENTENCESFULLYPARSED_SL=`cat $DIR/trees.clean.sl | grep "^[^?]" | wc -l`
NUMSENTENCESFULLYPARSED_TL=`cat $DIR/trees.clean.tl | grep "^[^?]" | wc -l`
NUMSENTENCESFULLYPARSED_BOTH=`paste -d '|' $DIR/trees.clean.sl $DIR/trees.clean.tl | sed 's:^[?].*:BUUU:' | sed 's:|?.*:BUUU:' | grep -v "BUUU" | wc -l`
echo "Sentences fully parsed SL: $NUMSENTENCESFULLYPARSED_SL"
echo "Sentences fully parsed TL: $NUMSENTENCESFULLYPARSED_TL"
echo "Sentences fully parsed itersection: $NUMSENTENCESFULLYPARSED_BOTH"
echo "Summary: $NUMSENTENCESFULLYPARSED_SL $NUMSENTENCESFULLYPARSED_TL $NUMSENTENCESFULLYPARSED_BOTH"
|
vitaka/apertiumToFromGF
|
reportParsingPerformance.sh
|
Shell
|
gpl-3.0
| 977 |
#!/bin/bash
# Javascript builder/packager script
# - Flatten all required js files into a single js.
# - Add version number if supplied.
# - Exclude tests.
# - package in a directory along with any extra assets, such
# as css files.
# Set this to specify the files to build:
EXT_DIR="js-logger"
# => build/EXT_DIR-<version-string>/
# - version_string is set via the build script.
EXT_MAIN="logger"
# => build/EXT_DIR-<version-string>/EXT_MAIN.js
FILE_ORDER="pretty_print.js Logger.js"
# List of js files to minifier and squash into a file.
# eg "file1 file2 file3"
OTHER_FILES=
# Optional; put other files into EXT_DIR such as
# css files for packaging with your js.
# eg "file1 file2 file3"
# Leave as empty string otherwise.
usage(){
cat <<-EOF
1) You must specify FILE_ORDER which is set at the top of this
script.
2) You must specify a js minifier program in JSMINIFY variable.
Use 'cat' if you don't have one. Make sure it is in your
environment or set it in this script.
export JSMINIFY=/path/to/minifier
EOF
}
err2(){
cat <<-EOF
Don't think you are in the correct directory.
Make sure you are in the root directory of this project.
EOF
}
check(){
test -z "$FILE_ORDER" && usage && return 0
test -z "$JSMINIFY" && usage && return 0
test -e "LICENSE" || (err2 && return 0)
return 1
}
# Usage: build v0.4.3 => build/unitjs-v0.4.3.js
build() {
check && return 1
version=$1
test -n "$version" && version="-${version}"
dir=build/$EXT_DIR${version}/
main=$dir/$EXT_MAIN.js
echo $dir
mkdir -p $dir
(echo '/*'; cat COPYING; echo '*/') >$main
cat $FILE_ORDER |$JSMINIFY >>$main
test -n "$OTHER_FILES" && cp $OTHER_FILES $dir/
}
build $*
|
danielbush/jslogger
|
build.sh
|
Shell
|
gpl-3.0
| 1,732 |
#!/bin/bash
#############################################################################################################
## ##
## This script is Free Software, it's licensed under the GPLv3 and has ABSOLUTELY NO WARRANTY ##
## you can find and read the complete version of the GPLv3 @ http://www.gnu.org/licenses/gpl.html ##
## ##
#############################################################################################################
## ##
## Please see the README file for any informations such as FAQs, Version History and TODO ##
## ##
#############################################################################################################
## ##
## Name: dellbiosupdate.sh ##
## Version: 0.1.2 ##
## Date: Sat, Mar 28 2009 ##
## Author: Callea Gaetano Andrea (aka cga) ##
## Contributors: ##
## Language: BASH ##
## Location: http://github.com/cga/dellbiosupdate.sh/tree/master ##
## ##
#############################################################################################################
## let's roll!!!
## the script has to be run as root, let's make sure of that:
if [[ ${EUID} != 0 ]] ; then
echo
echo "You must run this script as root!! See FAQs in REAMDE for information"
echo
exit 1
fi
## here the scripts checks if the needed tools are installed:
if which dellBiosUpdate curl html2text >/dev/null 2>&1 ; then
sleep 1
else
## if the script doesn't find the needed tools..........
echo
echo "Either libsmbios, html2text or curl was NOT found! should I install it for you?"
echo
## .........you get prompted to install libsmbios for your specific DISTRO:
select DISTRO in "Debian, Ubuntu and derivatives" "Red Hat, Fedora, CentOS and derivatives" "SuSE, OpenSuSE and derivatives" "Arch and derivatives" "Gentoo and derivatives" "Quit, I will install it myself" "Ok, I'm done installing. Let's move on!" ; do
case $DISTRO in
"Debian, Ubuntu and derivatives") apt-get install libsmbios-bin curl html2text ;;
"Red Hat, Fedora, CentOS and derivatives") yum install firmware-addon-dell libsmbios curl html2text ;;
"SuSE, OpenSuSE and derivatives") zypper install libsmbios-bin curl html2text ;;
"Arch and derivatives") pacman -S libsmbios curl html2text ;;
"Gentoo and derivatives") emerge -av libsmbios curl html2text ;;
"Quit, I will install it myself") echo ; echo "Please install libsmbios, html2text and curl"; echo ; exit 2 ;;
"Ok, I'm done installing. Let's move on!") break ;;
esac
done
fi
## now the script shows helpful informations about your DELL such as libsmbios version, SystemId (we need this) and BIOS version (wee need this):
echo
echo "These are some useful informations about your DELL, some of them are needed to update the BIOS:"
echo
getSystemId
echo
## now let's get the data we need in order to get the right BIOS: "Syste ID" and "BIOS Version":
SYSTEM_ID=$(getSystemId | grep "System ID:" | cut -f6 -d' ')
BIOS_VERSION_BASE=$(getSystemId | grep "BIOS Version:" | cut -f3 -d' ')
## plus the model of your computer:
## original version with cut; i leave it here just in case.
#COMPUTER=$(getSystemId | grep "Product Name:" | cut -f3,4,5 -d' ')
## improved version with awk since user "lvillani" told me he couldn't get the ${COMPUTER} set properly.
## please test and let me know if it works good for you (tm)
COMPUTER=$(getSystemId | grep "Product Name:" | awk -F\: '{print $NF}')
## now we 1) notify the current installed BIOS and 2) fetch all the available BIOS for your system.........
echo "Your currently installed BIOS Version is ${BIOS_VERSION_BASE}, getting the available BIOS updates for your ${COMPUTER}....."
echo
BIOS_AVAILABLE=($(curl http://linux.dell.com/repo/firmware/bios-hdrs/ 2>/dev/null | html2text -nobs | grep -i "system_bios_ven_0x1028_dev_${SYSTEM_ID}_version_*" | cut -f2 -d' ' | tr -d '/' | sed 's/.*_//'))
## ......we list them..........
echo "These are the available BIOS updates available for your ${COMPUTER}:"
echo
## just to make sure PS3 doesn't get changed forever:
OLDPS3=$PS3
COLUMNS=10
PS3=$'\nNote that you actually *can* install the latest BIOS update without updating the immediately subsequent version.\n\nChoose the BIOS Version you want to install by typing the corresponding number: '
## ......and we make them selectable:
select BIOS_VERSION in "${BIOS_AVAILABLE[@]}" "I already have BIOS Version ${BIOS_VERSION_BASE} installed" ; do
## we offer option to quit script on user will if BIOS Version is already installed
if [ "$BIOS_VERSION" == "I already have BIOS Version ${BIOS_VERSION_BASE} installed" ] ; then
echo
echo "Thanks for using this script; now you know you have a tool to check if new BIOS versions are available ;)"
echo
exit 3
elif [[ $BIOS_VERSION ]] ; then
break
fi
done
echo
COLUMNS=
PS3=$OLDPS3
## now that we have all the data, we need to set the URL to download the right BIOS:
URL=http://linux.dell.com/repo/firmware/bios-hdrs/system_bios_ven_0x1028_dev_${SYSTEM_ID}_version_${BIOS_VERSION}/bios.hdr
## if an unknown bios.hdr version exist then mv it and append $DATE; finally download the bios.hdr file with the version saved in the file name:
if [ -f "~/bios.hdr" ] ; then
echo "I found an existing BIOS file (~/bios.hdr) of which I don't know the version and I'm going to back it up as ~/bios-$(date +%Y-%m-%d).hdr"
echo
sleep 1
mv ~/bios.hdr ~/bios-$(date +%Y-%m-%d).hdr
sleep 1
echo "Downloading selected BIOS Version ${BIOS_VERSION} for your ${COMPUTER} and saving it as ~/bios-${BIOS_VERSION}.hdr"
echo
sleep 1
curl ${URL} -o ~/bios-${BIOS_VERSION}.hdr
echo
else
echo "Downloading selected BIOS Version ${BIOS_VERSION} for your ${COMPUTER} and saving it as ~/bios-${BIOS_VERSION}.hdr"
echo
sleep 1
curl ${URL} -o ~/bios-${BIOS_VERSION}.hdr
echo
fi
## now we check that the BIOS Version you chose is appropriate for the computer:
echo "Checking if BIOS Version ${BIOS_VERSION} for your ${COMPUTER} is valid............."
sleep 3
echo
## if not the script will exit and remove the downloaded BIOS:
dellBiosUpdate -t -f ~/bios-${BIOS_VERSION}.hdr >/dev/null 2>&1 ; STATUS_FAIL=$?
if [[ ${STATUS_FAIL} != 0 ]] ; then
echo "WARNING: BIOS HDR file BIOS version appears to be less than or equal to current BIOS version."
echo "This may result in bad things happening!!!!"
echo
rm -f ~/bios-${BIOS_VERSION}.hdr
echo "The downloaded ~/bios-${BIOS_VERSION}.hdr has been deleted."
echo
exit 4
## if BIOS is valid we load the needed DELL module and proceed with the update:
else
echo "This is a valid BIOS Version for your ${COMPUTER}, telling the operating system I want to update the BIOS:"
echo
modprobe dell_rbu
echo "The necessary 'dell_rbu' module has been loaded"
echo
## the actual update:
dellBiosUpdate -u -f ~/bios-${BIOS_VERSION}.hdr
echo
fi
## to complete the update we must *soft* reboot:
echo
read -p "In order to update the BIOS you *must* reboot your system, do you want to reboot now? [Y/n]"
if [[ $REPLY = [yY] ]] ; then
echo
echo "Rebooting in 5 seconds. Press CTRL+c to NOT reboot."
sleep 5
reboot
else
echo
echo "Don't forget to reboot your system or the BIOS will NOT update!!"
fi
exit 0
|
ruphy/dellbiosupdate.sh
|
dellbiosupdate.sh
|
Shell
|
gpl-3.0
| 8,342 |
#!/usr/bin/env zsh
export MPW_FULLNAME='Langston Barrett'
export MPW_SITETYPE=x
# https://github.com/Lyndir/MasterPassword/blob/master/platform-independent/cli-c/mpw.bashrc
mpw() {
_copy() {
if hash pbcopy 2>/dev/null; then
pbcopy
elif hash xclip 2>/dev/null; then
xclip -selection clip
elif hash xsel 2>/dev/null; then
xsel -ib
else
cat; echo 2>/dev/null
return
fi
echo >&2 "Copied!"
}
# Empty the clipboard
:| _copy 2>/dev/null
# Start Master Password and copy the output.
printf %s "$(command mpw -t x "$@")" | _copy
}
passmpw () {
pass -c master
}
|
siddharthist/dots
|
files/zsh.d/mpw.zsh
|
Shell
|
mpl-2.0
| 625 |
#!/bin/bash
var0=0
LIMIT=10
while [ $var0 -lt $LIMIT ]
do
echo -n "$var0 "
var0=`expr $var0 + 1`
done
echo
exit 0
|
cxsjabc/basic
|
bash/_basic/while.sh
|
Shell
|
agpl-3.0
| 123 |
#!/bin/bash
rel=../..
if [ x"$TOP" == x ]; then TOP=`pwd`/$rel; fi
. $rel/linux-ow.sh
dos=1 # MS-DOS
dospc98=1 # PC-98
if [ "$1" == "clean" ]; then
do_clean
rm -fv boot*.dsk
exit 0
fi
if [ "$1" == "disk" ]; then
dd if=/dev/zero of=bootibm.dsk bs=512 count=2880 || exit 1
dd if=dos86s/bootrep.com of=bootibm.dsk bs=512 conv=notrunc || exit 1
dd if=/dev/zero of=bootpc98.dsk bs=512 count=2880 || exit 1
dd if=d9886s/bootrep.com of=bootpc98.dsk bs=512 conv=notrunc || exit 1
fi
if [[ "$1" == "build" || "$1" == "" ]]; then
make_buildlist
begin_bat
what=all
if [ x"$2" != x ]; then what="$2"; fi
if [ x"$3" != x ]; then build_list="$3"; fi
for name in $build_list; do
do_wmake $name "$what" || exit 1
bat_wmake $name "$what" || exit 1
done
end_bat
fi
|
joncampbell123/doslib
|
tiny/bootrep/make.sh
|
Shell
|
lgpl-2.1
| 850 |
#!/bin/bash
# Copyright (C) 2009,2010 Palo Alto Research Center, Inc.
#
# This work is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the
# Free Software Foundation.
# This work is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.
# $1 = download directory
# $2 = canonical repostory
# $3 = package file name
# $4 = http options
DOWNDIR=$1
REPO=$2
PKG=$3
OPTS=$4
# First try to get the package from ${SENPKG_DIR}, if not
# get it from $1
CURL=`which curl`
WGET=`which wget`
OPENSSL=`which openssl`
if [[ "$OPTS" != "" ]]; then
HTTP_OPTS="?"$OPTS
echo "Using opts $HTTP_OPTS"
else
HTTP_OPTS=""
fi
function getit {
if [[ "$CURL" != "" ]]; then
echo "curl -L ${1}${HTTP_OPTS} > $2"
$CURL -L "${1}${HTTP_OPTS}" > $2
elif [[ "$WGET" != "" ]]; then
echo "$WGET \"${1}${HTTP_OPTS}\""
$WGET "${1}${HTTP_OPTS}"
else
echo "Could not find curl or wget!"
exit -2
fi
}
function checkhash {
if [[ ! -r hashes/$PKG.sha1 ]]; then
echo "Missing hash of $REPO"
exit -3
fi
if [[ ! -r ${DOWNDIR}/$PKG ]]; then
echo "File $PKG missing in directory '${DOWNDIR}'"
return 1;
fi
# check the hash
HASH_true=`cat hashes/$PKG.sha1`
HASH_file=`${OPENSSL} dgst -sha1 ${DOWNDIR}/$PKG | awk '{print $2}'`
if [[ "${HASH_true}" == "${HASH_file}" ]]; then
echo "Match hash for ${DOWNDIR}/$PKG"
exit 0
else
echo "Hash mismatch for $REPO, will re-download"
echo " true: $HASH_true"
echo " file: $HASH_file"
return 1
fi
}
if [[ "${DOWNDIR}" == "" ]]; then
echo "You must set DOWNDIR"
exit -1
fi
if [[ "${OPENSSL}" == "" ]]; then
echo "Could not find openssl"
exit -1
fi
# If it is readable in directory and matches md5,
# exit
if [[ -r ${DOWNDIR}/$PKG ]]; then
checkhash $PKG
fi
echo "Downloading $PKG from $REPO"
mkdir -p ${DOWNDIR}
pushd ${DOWNDIR}
getit $REPO/$PKG $PKG
popd
checkhash $PKG
|
yyhpys/ccnx-trace-interest
|
android/external/download.sh
|
Shell
|
lgpl-2.1
| 2,038 |
#!/bin/bash
# WARNING: completely replaces htdocs by new version without
# backups other than the cvs repository.
# It is assumed that old versions of htdocs can be obtained using
# cvs export with a different tag or date
DOCBOOKDIR=htdocs/resources/docbook-manual
DEBIANDIR=htdocs/resources/debian
PLOTEXAMPLES=htdocs_plot_examples.tgz
echo "This script is obsolete. In order to update the PLplot web site,"
echo "Please, use the Makefile in the www CVS module."
echo "(Read the README file for instructions)."
exit 0
cd /home/groups/p/pl/plplot
if [ ! -d $DOCBOOKDIR ] ; then
echo "Warning: The DocBook directory is missing in the htdocs hierarchy."
echo " Install it from the CVS checked out sources of the docbook module."
DOCBOOKDIR=""
else
mv $DOCBOOKDIR docbook-manual-$$
fi
if [ ! -f $PLOTEXAMPLES ] ; then
echo "Warning, $PLOTEXAMPLES does not exists."
echo "Follow the instructions in scripts/htdocs-gen_plot-examples.sh to"
echo "generate it."
fi
test -d $DEBIANDIR && mv $DEBIANDIR debian-$$
if [ -z "$USER" ] ; then
echo -n "Login name for SourceForge CVS: "
read USER
fi
rm -rf htdocs
CVS_RSH=ssh CVSROOT=$USER@cvs1:/cvsroot/plplot \
cvs export -r HEAD -d htdocs www
test -n "$DOCBOOKDIR" && mv docbook-manual-$$ $DOCBOOKDIR
test -n "$DEBIANDIR" && mv debian-$$ $DEBIANDIR
test -f $PLOTEXAMPLES && tar xzf htdocs_plot_examples.tgz
chmod -R g=u htdocs
|
pemryan/DAKOTA
|
packages/plplot/scripts/htdocs-replace.sh
|
Shell
|
lgpl-2.1
| 1,407 |
#!/bin/sh
CP="conf/;classes/;lib/*"
SP=src/java/
/bin/mkdir -p classes/
javac -sourcepath $SP -classpath $CP -d classes/ src/java/nxt/*.java src/java/nxt/*/*.java || exit 1
/bin/rm -f tzr.jar
jar cf tzr.jar -C classes . || exit 1
/bin/rm -rf classes
echo "tzr.jar generated successfully"
|
metrospark/czarcraft
|
win-compile.sh
|
Shell
|
lgpl-2.1
| 293 |
#!/bin/sh
test_begin "3D-HEVC"
do_test "$MP4BOX -add $EXTERNAL_MEDIA_DIR/3D-HEVC/stream_bbb.bit:fmt=HEVC -new $TEMP_DIR/test.mp4" "import"
do_hash_test $TEMP_DIR/test.mp4 "import"
do_playback_test "$TEMP_DIR/test.mp4" "play"
test_end
test_begin "add-subsamples-HEVC"
do_test "$MP4BOX -add $EXTERNAL_MEDIA_DIR/3D-HEVC/stream_bbb.bit:fmt=HEVC:subsamples -new $TEMP_DIR/test.mp4" "add-subsamples-HEVC"
do_hash_test $TEMP_DIR/test.mp4 "import"
test_end
|
rauf/gpac
|
tests/scripts/3D-HEVC.sh
|
Shell
|
lgpl-2.1
| 457 |
function build {
if [ "$1" == "jphonelite-android" ]; then
return
fi
if [ "$1" == "jfrdp" ]; then
return
fi
cd $1
ant jar
if [ "$1" == "plymouth-theme-jflinux" ]; then
sudo ant install-ubuntu -Dbits=32
elif [ "$1" == "jflogon" ]; then
sudo ant install-ubuntu -Dbits=32
else
sudo ant install -Dbits=32
fi
ant deb32
cd ..
}
for i in *; do
if [ -d $i ]; then
build $i
fi
done
|
ericomattos/javaforce
|
projects/buildAllUbuntu32.sh
|
Shell
|
lgpl-3.0
| 426 |
#!/bin/bash
echo "" > /.bahub.env
if [[ ! -f /cron ]]; then
echo " !!! Warning !!!: No /cron file available, should be a crontab-syntax file"
fi
# setup dns
echo "nameserver 1.1.1.1" > /etc/resolv.conf
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
if [[ "$@" ]]; then
echo "BAHUB_ARGS=\"$@\"" >> /.bahub.env
fi
# test version of Bahub mounted via volume on test environment (eg. on CI)
if [[ -d /test ]]; then
cd /test && make install
fi
# on developer environment you may not want to override production backups
if [[ ${DISABLE_SCHEDULED_JOBS} == "1" ]]; then
echo "" > /cron
echo "" > /etc/crontabs/root
fi
cp /cron /etc/crontabs/root
supervisord -c /bahub/supervisord.conf
while true; do
log_file=$(ls /var/log/bahub/*.log|sort -r|head -1)
if [[ -f ${log_file} ]]; then
echo " >> Browsing log file ${log_file}"
tail -f ${log_file}
fi
echo " .. Previous log file was deleted or just 'tail' crashed, finding new log file..."
sleep 5
done
|
Wolnosciowiec/file-repository
|
client/docker_entrypoint.sh
|
Shell
|
lgpl-3.0
| 1,006 |
#!/bin/bash
source $HOME/.dumbscripts/quodlibet-functions.sh
FIRST_QUEUE="$(save-queue)"
# monitor for song changes & save queue
dbus-monitor --profile "interface='net.sacredchao.QuodLibet',member='SongStarted'" |
while read -r line; do
# this keeps getting reset somehow
sed -i 's/^ascii = true$/ascii = false/' $HOME/.quodlibet/config
# avoid saving an empty queue
# & avoid bug where print-queue returns nothing, resulting in just
# the now-playing song being in the save-queue output
# & skip if this particular queue has already been saved
# & avoid bug where dbus-monitor tries to save the first queue several
# times when it first starts running
if [ "$(save-queue)" = ""] \
|| [ "$(save-queue | wc -l)" = "1" ] \
|| [ "$(save-queue)" = "$(<$QUEUE_LOCAL)" ] \
|| [ "$(save-queue)" = "$FIRST_QUEUE" ]; then
continue
else
# prefer sponge from moreutils: less buggy writing a lot at once
if hash sponge 2>/dev/null; then
save-queue | sponge $QUEUE_LOCAL
else
save-queue > $QUEUE_LOCAL
fi
sleep 1
cp $QUEUE_LOCAL $QUEUE_FILE
fi
done
|
Jimi-James/odds-n-ends
|
dumbscripts/quodlibet-monitor.sh
|
Shell
|
lgpl-3.0
| 1,105 |
#!/usr/bin/bash
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestInOut
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestValidity
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestIdentifier
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestTemplatedIdentifier
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestJavaConversion
iasRun -l j org.junit.platform.console.ConsoleLauncher -c org.eso.ias.basictypes.test.IasValueJsonSerializerTest
iasRun -l j org.junit.platform.console.ConsoleLauncher -c org.eso.ias.basictypes.test.IASValueTest
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestAlarm
iasRun -l s org.scalatest.run org.eso.ias.basictypes.test.TestOperationalMode
testIasValue
testTimestamp
|
IntegratedAlarmSystem-Group/ias
|
BasicTypes/src/test/runTests.sh
|
Shell
|
lgpl-3.0
| 772 |
/opt/jdk1.6/jre/bin/java -cp target/classes:libs/* com.ojt.OJTLauncher $*
|
remiguitreau/ojt
|
ojt-core/ojt.sh
|
Shell
|
lgpl-3.0
| 75 |
#! /bin/bash
IFS=$'\t\n'
# echo -e "DigWF_ID\tOCLC\tImagePath\tImageCount\tImageDPI\tImageBitDepth\tMARC\tARKPID\tVolume\tCollection_ID\tOwningLibraryID\tIA_ID"
while read line
do array=($line)
# S3 API Key (supplied by Internet Archive to users)
accesskey=""
secret=""
declare -a collections
collections[17]="africanamericanliterature" # African American Imprints
collections[10]="" # Atlanta City Directories
collections[21]="baedeckers" # Baedecker Travel Guides
collections[8]="" # Brittle Books
collections[13]="civilwardocuments" # Civil War Imprints
collections[18]="" # Early Northern European
collections[16]="" # Emory Publications"
collections[15]="" # Emory Yearbooks"
collections[7]="" # General
collections[12]="" # Georgia State House Journals
collections[14]="" # Georgia State Senate Journals
collections[4]="" # MARBL
collections[1]="americanmethodism" # Methodism
collections[11]="americanmethodism" # Methodist Conference Minutes
collections[9]="regimentalhistories" # Regimental Histories
collections[5]="" # Theology Reference
collections[19]="tripledeckers"
collections[2]="yellowbacks"
collections[22]="medicalheritagelibrary" # Medical Heritage
declare -a libraries
libraries[1]="Robert W. Woodruff Library"
libraries[2]="Goizueta Business Library"
libraries[3]="Marian K. Heilbrun Music and Media Library"
libraries[4]="Woodruff Health Sciences Center Library"
libraries[5]="James S. Guy Chemistry Library"
libraries[6]="Pitts Theology Library"
libraries[7]="Hugh F. Macmillan Library"
libraries[8]="Manuscript, Archives and Rare Book Library"
libraries[9]="Oxford College Library"
digwf_id=${array[0]}
oclc=${array[1]}
if [[ ${array[2]} == *E* ]]; then
barcode=''
elif [[ ${array[2]} == *NULL* ]]; then
barcode=''
else
barcode=$(printf "%012d\n" ${array[2]})
fi
imagepath=`echo "${array[4]}${array[14]}"`
imagecount=`find ${imagepath} -type f -iname "*.tif" -o -iname "*.jpg" -maxdepth 1 2>/dev/null | wc -l`
imagesample=`find ${imagepath} -type f -iname "*.tif" -o -iname "*.jpg" -maxdepth 1 2>/dev/null | head -8 | tail -1`
imagedpi=`exiftool $imagesample | grep "X Resolution" | awk '{ print $4 }'`
imagebitdepth=`exiftool $imagesample | grep "Bits Per Sample" | awk '{ print $5 }'`
arkpid=${array[20]}
volume=${array[22]}
collection_id=${array[23]}
collection=${collections[$collection_id]}
owninglibrary_id=${array[10]}
ia_id=${array[31]}
owninglibrary_id=`cat ${marcxmlpath} | xmllint --format - | grep -e 'code=\"5\"' | grep -E 'GEU|GEU-T|GEU-M|GEU-S|GEU-L|G0xC|GOxC' | head -1 -`
if [[ $owninglibrary_id == *GEU-M* ]]; then
owninglibrary=${libraries[4]}
elif [[ $owninglibrary_id == *GEU-T* ]]; then
owninglibrary=${libraries[6]}
elif [[ $owninglibrary_id == *GEU-S* ]]; then
owninglibrary=${libraries[8]}
elif [[ $owninglibrary_id == *GEU-L* ]]; then
owninglibrary=${libraries[7]}
elif [[ $owninglibrary_id == *G0xC* ]]; then
owninglibrary=${libraries[9]}
elif [[ $owninglibrary_id == *GEU* ]]; then
owninglibrary=${libraries[1]}
fi
: '
echo -en "${digwf_id}\t${oclc}\t"
echo -en "${barcode}\t"
echo -en "${imagepath}\t"
echo -en "${imagecount}\t"
echo -en "${imagedpi}\t"
echo -en "${imagebitdepth}\t"
if [ -a ${marcxmlpath} ]; then
echo -en "MARCExists\t"
else
echo -en "NoMARC\t"
fi
echo -en "${arkpid}\t"
echo -en "${volume}\t"
echo -en "${collection}\t"
echo -en "${owninglibrary}\t"
echo -en "${ia_id}\t"
echo
'
if [ ! ${volume} == "NULL" ]; then
ia_volume=$volume
else
ia_volume=''
fi
# Destroy and replace the _meta.xml file
echo curl --location \
--header 'x-amz-auto-make-bucket:1' \
--header 'x-archive-ignore-preexisting-bucket:1' \
--header 'x-archive-queue-derive:0' \
--header 'x-archive-meta-mediatype:texts' \
--header 'x-archive-meta-sponsor:Emory University, '"$owninglibrary"'' \
--header 'x-archive-meta-contributor:Emory University, '"$owninglibrary"'' \
--header 'x-archive-meta01-collection:emory' \
--header 'x-archive-meta02-collection:'"$collection"'' \
--header 'x-archive-meta-ppi:'"$imagedpi"'' \
--header 'x-archive-meta-imagecount:'"$imagecount"'' \
--header 'x-archive-meta-pid:'"$arkpid"'' \
--header 'x-archive-meta-barcode:'"$barcode"'' \
--header 'x-archive-meta-volume:'"$ia_volume"'' \
--header 'authorization: LOW '"$accesskey"':'"$secret"'' \
--request PUT --header 'content-length:0' \
http://s3.us.archive.org/${ia_id}
curl --location \
--header 'x-amz-auto-make-bucket:1' \
--header 'x-archive-ignore-preexisting-bucket:1' \
--header 'x-archive-queue-derive:0' \
--header 'x-archive-meta-mediatype:texts' \
--header 'x-archive-meta-sponsor:Emory University, '"$owninglibrary"'' \
--header 'x-archive-meta-contributor:Emory University, '"$owninglibrary"'' \
--header 'x-archive-meta01-collection:emory' \
--header 'x-archive-meta02-collection:'"$collection"'' \
--header 'x-archive-meta-ppi:'"$imagedpi"'' \
--header 'x-archive-meta-imagecount:'"$imagecount"'' \
--header 'x-archive-meta-pid:'"$arkpid"'' \
--header 'x-archive-meta-barcode:'"$barcode"'' \
--header 'x-archive-meta-volume:'"$ia_volume"'' \
--header 'authorization: LOW '"$accesskey"':'"$secret"'' \
--request PUT --header 'content-length:0' \
http://s3.us.archive.org/${ia_id}
# Replace the MARCXML file with a fixed version
# marcxmlpath=${array[4]}/${array[19]}
# marcxml=`cat ${marcxmlpath}`
# marcxmlfixed=${marcxml/\[electronic resource\]/}
# echo "$marcxmlfixed" > /tmp/marcxmlfixed.xml
# curl -v --location \
# --header 'authorization: LOW '"$accesskey"':'"$secret"'' \
# --upload-file /tmp/marcxmlfixed.xml \
# http://s3.us.archive.org/${ia_id}/${ia_id}_marc.xml
# rm /tmp/marcxmlfixed.xml
sleep 10
done
|
jkylefenton/digitization_scripts
|
internet_archive/bak_ia_legacy_fixmdata.sh
|
Shell
|
unlicense
| 5,631 |
#! /bin/bash
# put rdf+json from a remote location
# copy it from the default repository to the -write instance
# the request includes the Location header, but no content
initialize_repository --repository "${STORE_REPOSITORY}-write"
echo PUT-rj w/location PUT > $ECHO_OUTPUT
curl_graph_store_update -X PUT -o /dev/null \
-H "Location: ${STORE_URL}/${STORE_ACCOUNT}/${STORE_REPOSITORY}/service" \
-H "Content-Type: application/n-quads" \
--repository "${STORE_REPOSITORY}-write" <<EOF
EOF
echo PUT-rj location GET > $ECHO_OUTPUT
curl_graph_store_get \
-H "Accept: application/n-quads" --repository "${STORE_REPOSITORY}-write" \
| tr -s '\n' '\t' \
| fgrep "default object" \
| fgrep -q "named object"
|
dydra/http-api-tests
|
sparql-graph-store-http-protocol/location/PUT-nquads.sh
|
Shell
|
unlicense
| 732 |
#!/bin/bash
CURRDIR=$(dirname "${0}")
source ${CURRDIR}/common.sh
echo "Building binary..."
${BUILDDIR}/${VENV}/${BIN}/pyinstaller${EXE} \
--workpath=${BUILDDIR}/${DIST}/build \
--distpath=${BUILDDIR}/${DIST} \
${BASEDIR}/binary.spec
echo "Binary built."
|
mesosphere/dcos-cli
|
cli/bin/binary.sh
|
Shell
|
apache-2.0
| 269 |
function docker_active_cont_base() {
docker ps | sed -r 's-^[a-z0-9]*\W*([a-zA-Z0-9\.:_\/-]*)\W.*\s([a-zA-Z0-9\._-]*)$-C_R: \2 \-> \1-' | tail -n +2 | sed -e 's-#-\\-g'| awk '{print "\033[1;32m"$0"\033[0m"}'
}
function docker_cont_base() {
docker_active_cont_base; \
docker ps -a | grep Exited | sed -r 's-^[a-zA-Z0-9]*\W*([a-zA-Z0-9\.:_\/-]*)\W.*\s([a-zA-Z0-9\._-]*)$-C_H: \2 \-> \1-' | sed -e 's-#-\\-g'| awk '{print "\033[1;36m"$0"\033[0m"}';\
}
function docker_images_base() {
docker images | awk '{print "\033[1;34mIMG: "$1":"$2" \033[0m"}' | tail -n +2
}
function docker_info_base() {
docker_cont_base; \
docker_images_base
}
function docker_info() {
if [ $# -gt 0 ]; then
docker_info_base | grep $1
else
docker_info_base
fi
}
function docker_cont() {
if [ $# -gt 0 ]; then
docker_cont_base | grep $1
else
docker_cont_base
fi
}
function docker_active_cont() {
if [ $# -gt 0 ]; then
docker_active_cont_base | grep $1
else
docker_active_cont_base
fi
}
function docker_images() {
if [ $# -gt 0 ]; then
docker_images_base | grep $1
else
docker_images_base
fi
}
alias _dca='docker_active_cont'
alias _dc='docker_cont'
alias _da='docker_info'
alias _di='docker_images'
|
OkieOth/bash_conf
|
docker_stuff.sh
|
Shell
|
apache-2.0
| 1,320 |
#!/bin/bash
pwd
./YCgCo2cmy.exe
./YCgCo2cmyk.exe
./YCgCo2gray.exe
./YCgCo2hsi.exe
./YCgCo2hsl.exe
./YCgCo2hsv.exe
./YCgCo2hwb.exe
./YCgCo2lab.exe
./YCgCo2LabCH.exe
./YCgCo2lms.exe
./YCgCo2lms_BFD.exe
./YCgCo2lms_K65.exe
./YCgCo2lms_KE.exe
./YCgCo2lms_MCAT02.exe
./YCgCo2luv.exe
./YCgCo2LuvCH.exe
./YCgCo2rgb.exe
./YCgCo2xyy.exe
./YCgCo2xyz.exe
./YCgCo2YCgCo.exe
./YCgCo2YDbDr.exe
./YCgCo2yiq.exe
./YCgCo2YPbPr.exe
./YCgCo2YPbPr2020.exe
./YCgCo2YPbPr601.exe
./YCgCo2YPbPr709.exe
./YCgCo2yuv.exe
./YCgCo2yuv601.exe
./YCgCo2yuv709.exe
|
dmilos/color
|
example/less-than-1k/assign/YCgCo/run.sh
|
Shell
|
apache-2.0
| 533 |
# Copyright 2015 ThoughtWorks, Inc.
# This file is part of Gauge.
# Gauge is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Gauge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Gauge. If not, see <http://www.gnu.org/licenses/>.
#!/bin/sh
#Using protoc version 3.0.0
cd gauge-proto
PATH=$PATH:$GOPATH/bin protoc --go_out=plugins=grpc:../gauge_messages *.proto
cd ..
sed -i.backup '/import gauge_messages1 "spec.pb"/d' gauge_messages/messages.pb.go && sed -i.backup 's/gauge_messages1.//g' gauge_messages/messages.pb.go && rm gauge_messages/messages.pb.go.backup
sed -i.backup '/import gauge_messages1 "spec.pb"/d' gauge_messages/api.pb.go && sed -i.backup 's/gauge_messages1.//g' gauge_messages/api.pb.go && rm gauge_messages/api.pb.go.backup
sed -i.backup '/import "."/d' gauge_messages/api.pb.go && rm gauge_messages/api.pb.go.backup
sed -i.backup '/import "."/d' gauge_messages/messages.pb.go && rm gauge_messages/messages.pb.go.backup
# go fmt github.com/getgauge/gauge/...
|
getgauge/gauge
|
genproto.sh
|
Shell
|
apache-2.0
| 1,437 |
echo
echo "Test command used:"
echo "curl -X POST -d @test_event1.json -H "Content-Type: application/json" http://localhost:7800/orderupdates"
echo
curl -X POST -d @test_event1.json -H "Content-Type: application/json" http://localhost:7800/orderupdates
echo
echo
|
sg248351/scenario3
|
99_docker/iib/testme.sh
|
Shell
|
apache-2.0
| 263 |
#!/bin/bash
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
set -e
DIR="$( cd "$( dirname "$0" )" && pwd )"
$DIR/../diem-build.sh $DIR/Dockerfile diem/init "$@"
|
libra/libra
|
docker/init/build.sh
|
Shell
|
apache-2.0
| 196 |
java -jar -Dhbird.assembly=assemblies/satellite-estcube1.xml -Dhbird.scriptlibrary=scripts -Dorekit.data.path=resources -Dlog4j.configuration=file:resources/log4j.properties -Dhbird.log=satellite hbird-0.10.0.jar
|
Villemos/hbird-business
|
systems/estcube/src/main/deprecated/scripts/startSatellite.sh
|
Shell
|
apache-2.0
| 214 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run a Yarn command on all slave hosts.
usage="Usage: yarn-daemons.sh [--config confdir] [--hosts hostlistfile] [start
|stop] command args..."
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
if [ -n "$HADOOP_HOME" ]; then
DEFAULT_LIBEXEC_DIR="$HADOOP_HOME"/libexec
fi
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/yarn-config.sh
exec "$bin/slaves.sh" --config $YARN_CONF_DIR cd "$HADOOP_YARN_HOME" \; "$bin/yarn-daemon.sh" --config $YARN_CONF_DIR "$@"
|
scalingdata/Impala
|
thirdparty/hadoop-2.7.1.2.3.0.0-2557/sbin/yarn-daemons.sh
|
Shell
|
apache-2.0
| 1,434 |
#!/bin/sh
# This script will determine the current local dynamic IP address then update the A record with the current IP address for YOUR DOMAIN.COM hosted at DNSimple
# It can be scheduled as a task or cronjob and run daily or weekly to keep the A record updated with the current dynamic IP address
exec >> /var/log/DNSimpleARecordUpdater.log
TOKEN="YOUR API TOKEN HERE"
DOMAIN_ID="YOUR DOMAIN.COM"
RECORD_ID="DNSIMPLE RECORD ID"
IP=$(curl -s http://icanhazip.com/)
# need to do some error checking here for unexpected curl results or a timeout or other error
curl -H "Accept: application/json" \
-H "Content-Type: application/json" \
-H "X-DNSimple-Domain-Token: $TOKEN" \
-X "PUT" \
-ski "https://api.dnsimple.com/v1/domains/$DOMAIN_ID/records/$RECORD_ID" \
-d "{\"record\":{\"content\":\"$IP\"}}"
# need to do some error checking here for unexpected curl results or a timeout or other error
# possibly generate an alert or email notification for any failures
|
swoodford/linux
|
dnsimple-dns-record-updater.sh
|
Shell
|
apache-2.0
| 1,002 |
#!/bin/bash
./tools/apidoc/generate.sh \
"Test API" \
"API" \
"" \
\
"$@"
|
stuffer2325/Makagiga
|
tools/apidoc/generate-test.sh
|
Shell
|
apache-2.0
| 80 |
#!/bin/bash
sudo rm -rf /tmp/consul
sudo consul agent -config-file consul.json
|
JoergM/consul-examples
|
dns/start_consul.sh
|
Shell
|
apache-2.0
| 80 |
#!/usr/bin/env bash
LINES="$(grep -E "Copyright \((c|C)\)" -rL --include=\*.kt --include=\*.java --exclude-dir=generated-src --exclude-dir=projects --exclude-dir=gen --exclude-dir=build --exclude-dir=.gradle .)"
NUM_LINES=${#LINES}
if (( $NUM_LINES > 0 )); then
echo "These files seem to be missing a license header:"
echo $LINES | tr ' ' '\n'
exit 1
fi
|
benjamin-bader/thrifty
|
script/ensure_license_headers.sh
|
Shell
|
apache-2.0
| 361 |
#!/usr/bin/ksh
#
set -x
[ $# -eq 0 ] && { echo "Usage: $0 -d <dc> -n <project> -f <properties file>"; exit 1; }
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-n)
PROJECT="$2"
shift # past argument
;;
-f)
PROP_FILE="$2"
shift # past argument
;;
-d)
DC="$2"
shift # past argument
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
IPS=""
# Gets a list of list of pods in the project namespace (ex. apollo) filtering for application (ex. searcher)
# and creates a comma separated list of application URLs
# TODO this port should be parameterized
oc get pods -o jsonpath='{range .items[*]}{.metadata.name} {.status.podIP} {.status.phase}
{end}' -n ${PROJECT} | egrep "^${DC}-.*-..... .*Running$" | \
while read POD IP PHASE ; do
IPS="$IPS,http:\/\/$IP:8080"
done
IPS=${IPS#?}
echo $IPS
sed -i -e "s/^\(apollo\.slave\.urls\s*=\s*\).*\$/\1$IPS/" $PROP_FILE
|
mcanoy/pod-watcher
|
pod/get_pods.sh
|
Shell
|
apache-2.0
| 945 |
# Example how to setup a clean-up step for the Buildkite Agent on macOS
# Create a "buildkite" user first with a clean home directory.
#
# The agent will run as this "buildkite" user and will be monitored by a launchd
# service that runs a "buildkite-wrapper" script, which will launch the actual
# agent and clean-up after each job. (launchd will restart the service automatically
# after it exits.)
cat > /Library/LaunchDaemons/de.geheimspeicher.buildkite-agent.plist <<'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>de.geheimspeicher.buildkite-agent</string>
<key>WorkingDirectory</key>
<string>/usr/local/bin</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/buildkite-wrapper</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>ProcessType</key>
<string>Interactive</string>
<key>ThrottleInterval</key>
<integer>10</integer>
<key>StandardOutPath</key>
<string>/usr/local/var/log/buildkite-agent.log</string>
<key>StandardErrorPath</key>
<string>/usr/local/var/log/buildkite-agent.log</string>
</dict>
</plist>
EOF
chown root:wheel /Library/LaunchDaemons/de.geheimspeicher.buildkite-agent.plist
chmod 0644 /Library/LaunchDaemons/de.geheimspeicher.buildkite-agent.plist
cat > /usr/local/bin/buildkite-wrapper <<'EOF'
#!/bin/bash
set -x
OS_VERSION=$(sw_vers -productVersion)
MACHINE_TYPE=$(system_profiler SPHardwareDataType | grep 'Model Name' | cut -d':' -f2 | tr -d ' ' | tr '[:upper:]' '[:lower:]')
BUILDKITE_AGENT_TAGS="queue=macos,kind=worker,os=macos,os-version=${OS_VERSION},machine-type=${MACHINE_TYPE}"
# Kill all processes that might still be running from the last build.
killall -9 -u buildkite
# Remove temporary files.
find /private/tmp -user buildkite -delete
# Delete all Bazel output bases (but leave the cache and install bases).
find /private/var/tmp/_bazel_buildkite -mindepth 1 -maxdepth 1 ! -name 'cache' ! -name 'install' -exec rm -rf {} +
# Delete Bazel install bases older than 7 days.
find /private/var/tmp/_bazel_buildkite/install -mindepth 1 -maxdepth 1 -mtime +7 -exec rm -rf {} +
# Delete the user's cache and temporary files.
find /var/folders -user buildkite -delete
# Completely remove all temporary files, output bases, repo cache, install bases, ...
# find /private/var/tmp -user buildkite -delete
# Completely reset the user's home directory to a known state.
/usr/local/bin/rsync -aAX --delete --ignore-errors /Users/buildkite-fresh/ /Users/buildkite/
sudo -H -u buildkite env \
PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin \
BUILDKITE_AGENT_DISCONNECT_AFTER_JOB="true" \
BUILDKITE_AGENT_EXPERIMENT="git-mirrors" \
BUILDKITE_AGENT_NAME="%hostname" \
BUILDKITE_AGENT_TAGS="${BUILDKITE_AGENT_TAGS}" \
BUILDKITE_BUILD_PATH="/Users/buildkite/builds" \
BUILDKITE_CONFIG_PATH="/usr/local/etc/buildkite-agent/buildkite-agent.cfg" \
BUILDKITE_GIT_MIRRORS_PATH="/usr/local/var/bazelbuild" \
BUILDKITE_GIT_CLONE_MIRROR_FLAGS="-v --bare" \
/usr/local/bin/buildkite-agent start
# Just to make really sure that nothing stays running after a job, run 'killall' now.
killall -9 -u buildkite
EOF
chown ci:staff /usr/local/bin/buildkite-wrapper
chmod 0755 /usr/local/bin/buildkite-wrapper
launchctl load /Library/LaunchDaemons/de.geheimspeicher.buildkite-agent.plist
|
bazelbuild/continuous-integration
|
macos/mac-cleanup.sh
|
Shell
|
apache-2.0
| 3,495 |
#!/usr/bin/env bash
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -eo pipefail
set +x
killall socat || true
killall quilkin || true
killall iperf3 || true
dir=$(dirname "$0")
rm "$dir"/*.log || true
rm "$dir"/*.json || true
|
googleforgames/quilkin
|
examples/iperf3/clean.sh
|
Shell
|
apache-2.0
| 771 |
#!/bin/sh
set -x
###################################################################################################
###################################################################################################
# CEILOMETER
yum install -y openstack-ceilometer-compute python-ceilometerclient python-pecan
crudini --set /etc/ceilometer/ceilometer.conf publisher telemetry_secret TELEMETRY_SECRET
crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
crudini --set /etc/ceilometer/ceilometer.conf oslo_messaging_rabbit rabbit_host controller
crudini --set /etc/ceilometer/ceilometer.conf oslo_messaging_rabbit rabbit_userid openstack
crudini --set /etc/ceilometer/ceilometer.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
crudini --set /etc/ceilometer/ceilometer.conf DEFAULT auth_strategy keystone
crudini --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_uri http://controller:5000/v2.0
crudini --set /etc/ceilometer/ceilometer.conf keystone_authtoken identity_uri http://controller:35357
crudini --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_tenant_name service
crudini --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_user ceilometer
crudini --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_password CEILOMETER_PASS
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_auth_url http://controller:5000/v2.0
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_username ceilometer
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_tenant_name service
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_password CEILOMETER_PASS
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_endpoint_type internalURL
crudini --set /etc/ceilometer/ceilometer.conf service_credentials os_region_name regionOne
crudini --set /etc/ceilometer/ceilometer.conf DEFAULT verbose True
crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit True
crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit_period hour
crudini --set /etc/nova/nova.conf DEFAULT notify_on_state_change vm_and_task_state
crudini --set /etc/nova/nova.conf DEFAULT notification_driver messagingv2
systemctl enable openstack-ceilometer-compute.service
systemctl start openstack-ceilometer-compute.service
systemctl restart openstack-nova-compute.service
|
berendt/vagrant-openstack-installation-guide
|
scripts/compute_ceilometer.sh
|
Shell
|
apache-2.0
| 2,403 |
#!/bin/sh
set -e
cd $HOME/logs
flock -n ~/var/locks/pdf-content $HOME/services/java-wrappers/pdfcontent-invoke $HOME/services/conf/pdf-content.properties "$@"
|
statsbiblioteket/digital-pligtaflevering-aviser-tools
|
tools/dpa-tools-deployment/for-deployment/bin/pdf-content.sh
|
Shell
|
apache-2.0
| 161 |
#!/bin/sh
#
# Copyright (c) 2017 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script will walk you through setting up BIND on the host and making the changes needed in
# Azure portal.
#
# This script will bootstrap these OSes:
# - CentOS 6
# - CentOS 7
# - RHEL 6
# - RHEL 7
#
# Notes and notable differences between OSes:
# - CentOS and RHEL 6 use dhclient
# - CentOS and RHEL 7 use NetworkManager
#
#
# WARNING
#
# - This script only creates one zone file which supports <= 255 hosts. It has not been tested
# with > 255 hosts trying to use the same zone file. It "might just work", or it may require
# manually configuring additional zone files in `/etc/named/named.conf.local` and
# `/etc/named/zones/`.
# - It is assumed that the Azure nameserver IP address will always be `168.63.129.16`. See more
# info: https://blogs.msdn.microsoft.com/mast/2015/05/18/what-is-the-ip-address-168-63-129-16/.
#
#
# Microsoft Azure Assumptions
#
nameserver_ip="168.63.129.16" # used for all regions
#
# Functions
#
#
# This function does the install and setup for BIND
#
base_beginning() {
echo "-- STOP --"
echo "This script will turn a fresh host into a BIND server and walk you through changing Azure DNS "
echo "settings. If you have previously run this script on this host, or another host within the same "
echo "virtual network: stop running this script and run the reset script before continuing."
printf "Press [Enter] to continue."
read -r
#
# Quick sanity checks
#
if ! hostname -f
then
echo "Unable to run the command 'hostname -f'; run the reset script and try again."
exit 1
fi
hostname -i
if ! hostname -i
then
echo "Unable to run the command 'hostname -i'; run the reset script and try again."
exit 1
fi
#
# Install and setup the prerequisites
#
sudo yum -y install bind bind-utils
if ! yum list installed bind
then
echo "Unable to install package 'bind', manual troubleshoot required."
exit 1
fi
if ! yum list installed bind-utils
then
echo "Unable to install package 'bind-utils', manual troubleshoot required."
exit 1
fi
# make the directories that bind will use
mkdir /etc/named/zones
# make the files that bind will use
touch /etc/named/named.conf.local
touch /etc/named/zones/db.internal
touch /etc/named/zones/db.reverse
#
# Set all of the variables
#
echo ""
printf "Enter the internal host FQDN suffix you wish to use for your cluster network (e.g. cdh-cluster.internal): "
read -r internal_fqdn_suffix
while [ -z "$internal_fqdn_suffix" ]; do
printf "You must enter the internal host FQDN suffix you wish to use for your cluster network (e.g. cdh-cluster.internal): "
read -r internal_fqdn_suffix
done
hostname=$(hostname -s)
internal_ip=$(hostname -i)
subnet=$(ipcalc -np "$(ip -o -f inet addr show | awk '/scope global/ {print $4}')" | awk '{getline x;print x;}1' | awk -F= '{print $2}' | awk 'NR%2{printf "%s/",$0;next;}1')
ptr_record_prefix=$(hostname -i | awk -F. '{print $3"." $2"."$1}')
hostnumber=$(hostname -i | cut -d . -f 4)
hostmaster="hostmaster"
echo "[DEBUG: Variables used]"
echo "subnet: $subnet"
echo "internal_ip: $internal_ip"
echo "internal_fqdn_suffix: $internal_fqdn_suffix"
echo "ptr_record_prefix: $ptr_record_prefix"
echo "hostname: $hostname"
echo "hostmaster: $hostmaster"
echo "hostnumber: $hostnumber"
echo "[END DEBUG: Variables used]"
#
# Create the BIND files
# Section not indented so EOF works
#
cat > /etc/named.conf <<EOF
acl trusted {
${subnet};
};
options {
listen-on port 53 { 127.0.0.1; ${internal_ip}; };
listen-on-v6 port 53 { ::1; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
allow-query { localhost; trusted; };
recursion yes;
forwarders { ${nameserver_ip}; };
dnssec-enable yes;
dnssec-validation yes;
dnssec-lookaside auto;
/* Path to ISC DLV key */
bindkeys-file "/etc/named.iscdlv.key";
managed-keys-directory "/var/named/dynamic";
};
logging {
channel default_debug {
file "data/named.run";
severity dynamic;
};
};
zone "." IN {
type hint;
file "named.ca";
};
include "/etc/named.rfc1912.zones";
include "/etc/named.root.key";
include "/etc/named/named.conf.local";
EOF
cat > /etc/named/named.conf.local <<EOF
zone "${internal_fqdn_suffix}" IN {
type master;
file "/etc/named/zones/db.internal";
allow-update { ${subnet}; };
};
zone "${ptr_record_prefix}.in-addr.arpa" IN {
type master;
file "/etc/named/zones/db.reverse";
allow-update { ${subnet}; };
};
EOF
cat > /etc/named/zones/db.internal <<EOF
\$ORIGIN .
\$TTL 600 ; 10 minutes
${internal_fqdn_suffix} IN SOA ${hostname}.${internal_fqdn_suffix}. ${hostmaster}.${internal_fqdn_suffix}. (
10 ; serial
600 ; refresh (10 minutes)
60 ; retry (1 minute)
604800 ; expire (1 week)
600 ; minimum (10 minutes)
)
NS ${hostname}.${internal_fqdn_suffix}.
\$ORIGIN ${internal_fqdn_suffix}.
${hostname} A ${internal_ip}
EOF
cat > /etc/named/zones/db.reverse <<EOF
\$ORIGIN .
\$TTL 600 ; 10 minutes
${ptr_record_prefix}.in-addr.arpa IN SOA ${hostname}.${internal_fqdn_suffix}. ${hostmaster}.${internal_fqdn_suffix}. (
10 ; serial
600 ; refresh (10 minutes)
60 ; retry (1 minute)
604800 ; expire (1 week)
600 ; minimum (10 minutes)
)
NS ${hostname}.${internal_fqdn_suffix}.
\$ORIGIN ${ptr_record_prefix}.in-addr.arpa.
${hostnumber} PTR ${hostname}.${internal_fqdn_suffix}.
EOF
#
# Final touches on BIND related items
#
chown -R named:named /etc/named*
if ! named-checkconf /etc/named.conf # if named-checkconf fails
then
exit 1
fi
if ! named-checkzone "${internal_fqdn_suffix}" /etc/named/zones/db.internal # if named-checkzone fails
then
exit 1
fi
if ! named-checkzone "${ptr_record_prefix}.in-addr.arpa" /etc/named/zones/db.reverse # if named-checkzone fails
then
exit 1
fi
service named start
chkconfig named on
#
# This host is now running BIND
#
}
#
# This function prompts the person running the script to go to portal.azure.com to change Azure
# DNS settings then makes sure everything works as expected
#
base_end() {
#
# Now it's time to update Azure DNS settings in portal
#
echo ""
echo "-- STOP -- STOP -- STOP --"
echo "Go to -- portal.azure.com -- and change Azure DNS to point to the private IP of this host: ${internal_ip}"
printf "Press [Enter] once you have gone to portal.azure.com and this is completed."
read -r
#
# Loop until DNS nameserver updates have propagated to /etc/resolv.conf
# NB: search server updates don't take place until dhclient-exit-hooks have executed
#
until grep "nameserver ${internal_ip}" /etc/resolv.conf
do
service network restart
echo "Waiting for Azure DNS nameserver updates to propagate, this usually takes less than 2 minutes..."
sleep 10
done
#
# Check that everything is working
#
echo "Running sanity checks:"
if ! hostname -f
then
echo "Unable to run the command 'hostname -f' (check 1 of 4)"
echo "Run the reset script and then try this script again."
exit 1
fi
if ! hostname -i
then
echo "Unable to run the command 'hostname -i' (check 2 of 4)"
echo "Run the reset script and then try this script again."
exit 1
fi
if ! host "$(hostname -f)"
then
echo "Unable to run the command 'host \`hostname -f\`' (check 3 of 4)"
echo "Run the reset script and then try this script again."
exit 1
fi
if ! host "$(hostname -i)"
then
echo "Unable to run the command 'host \`hostname -i\`' (check 4 of 4)"
echo "Run the reset script and then try this script again."
exit 1
fi
echo ""
echo "Everything is working!"
exit 0
}
#
# CentOS and RHEL 6 use dhclient. Add a script to be automatically invoked when interface comes up.
# Function not indented so EOF works.
#
dhclient_6()
{
# dhclient-exit-hooks explained in dhclient-script man page: http://linux.die.net/man/8/dhclient-script
# cat a here-doc represenation of the hooks to the appropriate file
cat > /etc/dhcp/dhclient-exit-hooks <<"EOF"
#!/bin/bash
printf "\ndhclient-exit-hooks running...\n\treason:%s\n\tinterface:%s\n" "${reason:?}" "${interface:?}"
# only execute on the primary nic
if [ "$interface" != "eth0" ]
then
exit 0;
fi
# when we have a new IP, update the search domain
if [ "$reason" = BOUND ] || [ "$reason" = RENEW ] || [ "$reason" = REBIND ] || [ "$reason" = REBOOT ]
then
EOF
# this is a separate here-doc because there's two sets of variable substitution going on, this set
# needs to be evaluated when written to the file, the two others (with "EOF" surrounded by quotes)
# should not have variable substitution occur when creating the file.
cat >> /etc/dhcp/dhclient-exit-hooks <<EOF
domain=${internal_fqdn_suffix}
EOF
cat >> /etc/dhcp/dhclient-exit-hooks <<"EOF"
resolvconfupdate=$(mktemp -t resolvconfupdate.XXXXXXXXXX)
echo updating resolv.conf
grep -iv "search" /etc/resolv.conf > "$resolvconfupdate"
echo "search $domain" >> "$resolvconfupdate"
cat "$resolvconfupdate" > /etc/resolv.conf
fi
#done
exit 0;
EOF
chmod 755 /etc/dhcp/dhclient-exit-hooks
}
centos_6()
{
echo "CentOS 6"
base_beginning
# execute the CentOS / RHEL 6 dhclient-exit-hooks setup
dhclient_6
base_end
}
rhel_6()
{
echo "RHEL 6"
# rewrite SELINUX config to disabled and turn off enforcement
sed -i.bak "s/^SELINUX=.*$/SELINUX=disabled/" /etc/selinux/config
setenforce 0
# stop firewall and disable
service iptables stop
chkconfig iptables off
# update config to disable IPv6 and disable
echo "# Disable IPv6" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
base_beginning
# execute the CentOS / RHEL 6 dhclient-exit-hooks setup
dhclient_6
base_end
}
#
# CentOS and RHEL 7 use NetworkManager. Add a script to be automatically invoked when interface comes up.
# Function not indented so EOF works.
#
networkmanager_7()
{
cat > /etc/NetworkManager/dispatcher.d/12-register-dns <<"EOF"
#!/bin/bash
# NetworkManager Dispatch script
# Deployed by Cloudera Altus Director Bootstrap
#
# Expected arguments:
# $1 - interface
# $2 - action
#
# See for info: http://linux.die.net/man/8/networkmanager
# Register A and PTR records when interface comes up
# only execute on the primary nic
if [ "$1" != "eth0" ] || [ "$2" != "up" ]
then
exit 0;
fi
# when we have a new IP, perform nsupdate
new_ip_address="$DHCP4_IP_ADDRESS"
EOF
# this is a separate here-doc because there's two sets of variable substitution going on, this set
# needs to be evaluated when written to the file, the two others (with "EOF" surrounded by quotes)
# should not have variable substitution occur when creating the file.
cat >> /etc/NetworkManager/dispatcher.d/12-register-dns <<EOF
domain=${internal_fqdn_suffix}
EOF
cat >> /etc/NetworkManager/dispatcher.d/12-register-dns <<"EOF"
IFS='.' read -ra ipparts <<< "$new_ip_address"
ptrrec="$(printf %s "$new_ip_address." | tac -s.)in-addr.arpa"
nsupdatecmds=$(mktemp -t nsupdate.XXXXXXXXXX)
resolvconfupdate=$(mktemp -t resolvconfupdate.XXXXXXXXXX)
echo updating resolv.conf
grep -iv "search" /etc/resolv.conf > "$resolvconfupdate"
echo "search $domain" >> "$resolvconfupdate"
cat "$resolvconfupdate" > /etc/resolv.conf
exit 0;
EOF
chmod 755 /etc/NetworkManager/dispatcher.d/12-register-dns
}
centos_7()
{
echo "CentOS 7"
base_beginning
# execute the CentOS / RHEL 7 network manager setup
networkmanager_7
base_end
}
rhel_7()
{
echo "RHEL 7"
# rewrite SELINUX config to disable and turn off enforcement
sed -i.bak "s/^SELINUX=.*$/SELINUX=disabled/" /etc/selinux/config
setenforce 0
# stop firewall and disable
systemctl stop iptables
systemctl iptables off
# RHEL 7 uses firewalld
systemctl stop firewalld
systemctl disable firewalld
# Disable tuned so it does not overwrite sysctl.conf
service tuned stop
systemctl disable tuned
# Disable chrony so it does not conflict with ntpd installed by Director
systemctl stop chronyd
systemctl disable chronyd
# update config to disable IPv6 and disable
echo "# Disable IPv6" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
# swappiness is set by Director in /etc/sysctl.conf
# Poke sysctl to have it pickup the config change.
sysctl -p
base_beginning
# execute the CentOS / RHEL 7 network manager setup
networkmanager_7
base_end
}
#
# Main workflow
#
# ensure user is root
if [ "$(id -u)" -ne 0 ]
then
echo "Please run as root."
exit 1
fi
# find the OS and release
os=""
major_release=""
# if it's there, use lsb_release
if rpm -q redhat-lsb
then
os=$(lsb_release -si)
major_release=$(lsb_release -sr | cut -d '.' -f 1)
# if lsb_release isn't installed, use /etc/redhat-release
else
if grep "CentOS.* 6\\." /etc/redhat-release
then
os="CentOS"
major_release="6"
fi
if grep "CentOS.* 7\\." /etc/redhat-release
then
os="CentOS"
major_release="7"
fi
if grep "Red Hat Enterprise Linux Server release 6\\." /etc/redhat-release
then
os="RedHatEnterpriseServer"
major_release="6"
fi
if grep "Red Hat Enterprise Linux Server release 7\\." /etc/redhat-release
then
os="RedHatEnterpriseServer"
major_release="7"
fi
fi
echo "OS: $os $major_release"
# select the OS and run the appropriate setup script
not_supported_msg="OS $os $major_release is not supported."
if [ "$os" = "CentOS" ]; then
if [ "$major_release" = "6" ]; then
centos_6
elif [ "$major_release" = "7" ]; then
centos_7
else
echo "$not_supported_msg"
exit 1
fi
elif [ "$os" = "RedHatEnterpriseServer" ]; then
if [ "$major_release" = "6" ]; then
rhel_6
elif [ "$major_release" = "7" ]; then
rhel_7
else
echo "$not_supported_msg"
exit 1
fi
else
echo "$not_supported_msg"
exit 1
fi
|
cloudera/director-scripts
|
azure-dns-scripts/bind-dns-setup.sh
|
Shell
|
apache-2.0
| 15,604 |
#/bin/bash -l
function somaticSniper_variants()
{
local prefix=$1
local ext=$2
local fn_genome=$3
local out_dir=$4
local bam_norm=$5
if [ -z "$1" -o -z "$3" -o -z "$4" -o -z "$5" ]; then # $2 is allowed to be empty
echo "usage somaticSniper_variants <prefix> <ext> <fn_genome_fasta> <output_dir> <normal_bam>"
exit -1;
fi
mkdir -p $out_dir
if [ -f $fn_genome -a ! -f $fn_genome.fai ]; then
echo did not find index file $fn_genome.fai
echo $samtools faidx $fn_genome
$samtools faidx $fn_genome
fi
if [ -f "$prefix" ]; then
local files=$prefix
else
local files="$prefix*$ext"
fi
for fn_bam in $files
do
local vcf_output=$out_dir/`basename $fn_bam | sed 's/.bam//'`.somaticSniper_SNVs_Raw.vcf
if [ ! -f $vcf_output ]; then
$somaticSniper -F vcf -f $fn_genome \
$fn_bam \
$bam_norm \
$vcf_output
else
echo "Somatic sniper vcf-output file already exists."
fi
done
}
|
cbg-ethz/WES_Cancer_Sim
|
sim_cancer/cancer_data_pipeline/SNV_caller/somaticSniper_variants.sh
|
Shell
|
apache-2.0
| 1,104 |
#!/bin/bash -eux
# should output one of 'redhat' 'centos' 'oraclelinux'
distro="`rpm -qf --queryformat '%{NAME}' /etc/redhat-release | cut -f 1 -d '-'`"
major_version="`sed 's/^.\+ release \([.0-9]\+\).*/\1/' /etc/redhat-release | awk -F. '{print $1}'`";
# make sure we use dnf on EL 8+
if [ "$major_version" -ge 8 ]; then
pkg_cmd="dnf"
else
pkg_cmd="yum"
fi
# remove previous kernels that yum/dnf preserved for rollback
if [ "$major_version" -ge 8 ]; then
dnf autoremove -y
dnf remove -y $(dnf repoquery --installonly --latest-limit=-1 -q)
elif [ "$major_version" -gt 5 ]; then # yum-utils isn't in RHEL 5 so don't try to run this
if ! command -v package-cleanup >/dev/null 2>&1; then
yum install -y yum-utils
fi
package-cleanup --oldkernels --count=1 -y
fi
# Remove development and kernel source packages
$pkg_cmd -y remove gcc cpp kernel-devel kernel-headers;
# Clean up network interface persistence
rm -f /etc/udev/rules.d/70-persistent-net.rules;
mkdir -p /etc/udev/rules.d/70-persistent-net.rules;
rm -f /lib/udev/rules.d/75-persistent-net-generator.rules;
rm -rf /dev/.udev/;
for ndev in `ls -1 /etc/sysconfig/network-scripts/ifcfg-*`; do
if [ "`basename $ndev`" != "ifcfg-lo" ]; then
sed -i '/^HWADDR/d' "$ndev";
sed -i '/^UUID/d' "$ndev";
fi
done
# new-style network device naming for centos7
if grep -q -i "release 7" /etc/redhat-release ; then
# radio off & remove all interface configration
nmcli radio all off
/bin/systemctl stop NetworkManager.service
for ifcfg in `ls /etc/sysconfig/network-scripts/ifcfg-* |grep -v ifcfg-lo` ; do
rm -f $ifcfg
done
rm -rf /var/lib/NetworkManager/*
echo "==> Setup /etc/rc.d/rc.local for EL7"
cat <<'EOF' >> /etc/rc.d/rc.local
#PACKER-BEGIN
LANG=C
# delete all connection
for con in `nmcli -t -f uuid con`; do
if [ "$con" != "" ]; then
nmcli con del $con
fi
done
# add gateway interface connection.
gwdev=`nmcli dev | grep ethernet | egrep -v 'unmanaged' | head -n 1 | awk '{print $1}'`
if [ "$gwdev" != "" ]; then
nmcli connection add type ethernet ifname $gwdev con-name $gwdev
fi
sed -i "/^#PACKER-BEGIN/,/^#PACKER-END/d" /etc/rc.d/rc.local
chmod -x /etc/rc.d/rc.local
#PACKER-END
EOF
chmod +x /etc/rc.d/rc.local
fi
# truncate any logs that have built up during the install
find /var/log -type f -exec truncate --size=0 {} \;
# we try to remove these in the ks file, but they're still there
# in the builds so let's remove them here to be sure :shrug:
#
# 12.2019 note: We can probably remove this now, but let's confirm it
$pkg_cmd remove -y \
aic94xx-firmware \
atmel-firmware \
bfa-firmware \
ipw2100-firmware \
ipw2200-firmware \
ivtv-firmware \
iwl1000-firmware \
iwl3945-firmware \
iwl4965-firmware \
iwl5000-firmware \
iwl5150-firmware \
iwl6000-firmware \
iwl6050-firmware \
kernel-uek-firmware \
libertas-usb8388-firmware \
netxen-firmware \
ql2xxx-firmware \
rt61pci-firmware \
rt73usb-firmware \
zd1211-firmware \
linux-firmware \
microcode_ctl
if [ "$distro" != 'redhat' ]; then
$pkg_cmd -y clean all;
fi
# remove the install log
rm -f /root/anaconda-ks.cfg
# remove the contents of /tmp and /var/tmp
rm -rf /tmp/* /var/tmp/*
# Blank netplan machine-id (DUID) so machines get unique ID generated on boot.
if [ "$major_version" -ge 7 ]; then
truncate -s 0 /etc/machine-id
fi
# clear the history so our install isn't there
export HISTSIZE=0
rm -f /root/.wget-hsts
|
erumble/packer-templates
|
centos/scripts/cleanup.sh
|
Shell
|
apache-2.0
| 3,451 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
usage() {
echo "Usage: promote_rc.sh -v version_number -k your_full_gpg_public_key -g your_github_username"
echo " -v The #.#.#.RC# version number to ship"
echo " -k Your 40-digit GPG fingerprint"
echo " -g Your github username"
exit 1
}
FULL_VERSION=""
SIGNING_KEY=""
GITHUB_USER=""
while getopts ":v:k:g:" opt; do
case ${opt} in
v )
FULL_VERSION=$OPTARG
;;
k )
SIGNING_KEY=$OPTARG
;;
g )
GITHUB_USER=$OPTARG
;;
\? )
usage
;;
esac
done
if [[ ${FULL_VERSION} == "" ]] || [[ ${SIGNING_KEY} == "" ]] || [[ ${GITHUB_USER} == "" ]]; then
usage
fi
SIGNING_KEY=$(echo $SIGNING_KEY|sed 's/[^0-9A-Fa-f]//g')
if [[ $SIGNING_KEY =~ ^[0-9A-Fa-f]{40}$ ]]; then
true
else
echo "Malformed signing key ${SIGNING_KEY}. Example valid key: '0000 0000 1111 1111 2222 2222 3333 3333 ABCD 1234'"
exit 1
fi
if [[ $FULL_VERSION =~ ^([0-9]+\.[0-9]+\.[0-9]+)\.(RC[0-9]+)$ ]]; then
VERSION=${BASH_REMATCH[1]}
else
echo "Malformed version number ${FULL_VERSION}. Example valid version: 1.9.0.RC1"
exit 1
fi
VERSION_MM=${VERSION%.*}
set -x
WORKSPACE=$PWD/release-${VERSION}-workspace
GEODE=$WORKSPACE/geode
GEODE_DEVELOP=$WORKSPACE/geode-develop
GEODE_EXAMPLES=$WORKSPACE/geode-examples
GEODE_NATIVE=$WORKSPACE/geode-native
GEODE_BENCHMARKS=$WORKSPACE/geode-benchmarks
BREW_DIR=$WORKSPACE/homebrew-core
SVN_DIR=$WORKSPACE/dist/dev/geode
set +x
if [ -d "$GEODE" ] && [ -d "$GEODE_DEVELOP" ] && [ -d "$GEODE_EXAMPLES" ] && [ -d "$GEODE_NATIVE" ] && [ -d "$GEODE_BENCHMARKS" ] && [ -d "$BREW_DIR" ] && [ -d "$SVN_DIR" ] ; then
true
else
echo "Please run this script from the same working directory as you initially ran prepare_rc.sh"
exit 1
fi
function failMsg {
errln=$1
echo "ERROR: script did NOT complete successfully"
echo "Comment out any steps that already succeeded (approximately lines 94-$(( errln - 1 ))) and try again"
}
trap 'failMsg $LINENO' ERR
echo ""
echo "============================================================"
echo "Checking for later versions..."
echo "============================================================"
cd ${GEODE_DEVELOP}
latestnv=$(git tag| grep '^rel/v' | grep -v RC | cut -c6- | egrep '^[0-9]+\.[0-9]+\.[0-9]+$' | awk -F. '/KEYS/{next}{print 1000000*$1+1000*$2+$3,$1"."$2"."$3}' | sort -n | tail -1)
latestn=$(echo $latestnv | awk '{print $1}')
latestv=$(echo $latestnv | awk '{print $2}')
thisre=$(echo $VERSION | awk -F. '/KEYS/{next}{print 1000000*$1+1000*$2+$3}')
if [ $latestn -gt $thisre ] ; then
LATER=$latestv
echo "Later version $LATER found; $VERSION will not be merged to master or tagged as 'latest' in docker."
else
echo "No later versions found; $VERSION will be tagged as 'latest' in docker and merged to master"
fi
echo ""
echo "============================================================"
echo "Releasing artifacts to mirror sites..."
echo "(note: must be logged in to svn as a PMC member or this will fail)"
echo "============================================================"
set -x
cd ${SVN_DIR}/../..
svn update
svn mv dev/geode/${FULL_VERSION} release/geode/${VERSION}
cp dev/geode/KEYS release/geode/KEYS
svn commit -m "Releasing Apache Geode ${VERSION} distribution"
set +x
echo ""
echo "============================================================"
echo "Tagging ${FULL_VERSION} as ${VERSION} and pushing tags..."
echo "============================================================"
for DIR in ${GEODE} ${GEODE_EXAMPLES} ${GEODE_NATIVE} ${GEODE_BENCHMARKS} ; do
set -x
cd ${DIR}
git tag -s -u ${SIGNING_KEY} rel/v${VERSION} -m "Apache Geode v${VERSION} release" rel/v${FULL_VERSION}
git push origin rel/v${VERSION}
set +x
done
echo ""
echo "============================================================"
echo "Waiting for artifacts to publish to downloads.apache.org..."
echo "============================================================"
for suffix in "" .asc .sha256 ; do
file=apache-geode-${VERSION}.tgz
url=https://downloads.apache.org/geode/${VERSION}/${file}${suffix}
expectedsize=$(cd ${SVN_DIR}/../../release/geode/${VERSION}; ls -l ${file}${suffix} | awk '{print $5}')
actualsize=0
while [ $expectedsize -ne $actualsize ] ; do
while ! curl -s --output /dev/null --head --fail "$url"; do
echo -n .
sleep 3
done
actualsize=$(curl -s --head "$url" | grep "Content-Length" | awk '{print $2}' | tr -d '\r')
done
echo "$url exists and is correct size"
done
echo ""
echo "============================================================"
if [ -n "$LATER" ] ; then
echo "NOT updating brew to avoid overwriting newer version $LATER"
echo "============================================================"
else
echo "Updating brew"
echo "============================================================"
set -x
cd ${BREW_DIR}/Formula
git pull
git remote add myfork [email protected]:${GITHUB_USER}/homebrew-core.git || true
if ! git fetch myfork ; then
echo "Please fork https://github.com/Homebrew/homebrew-core"
exit 1
fi
git checkout -b apache-geode-${VERSION}
GEODE_SHA=$(awk '{print $1}' < $WORKSPACE/dist/release/geode/${VERSION}/apache-geode-${VERSION}.tgz.sha256)
set +x
sed -e 's# *url ".*# url "https://www.apache.org/dyn/closer.lua?path=geode/'"${VERSION}"'/apache-geode-'"${VERSION}"'.tgz"#' \
-e '/ *mirror ".*www.*/d' \
-e '/ *mirror ".*downloads.*/d' \
-e 's# *mirror ".*archive.*# mirror "https://archive.apache.org/dist/geode/'"${VERSION}"'/apache-geode-'"${VERSION}"'.tgz"\
mirror "https://downloads.apache.org/geode/'"${VERSION}"'/apache-geode-'"${VERSION}"'.tgz"#' \
-e 's/ *sha256 ".*/ sha256 "'"${GEODE_SHA}"'"/' \
-i.bak apache-geode.rb
rm apache-geode.rb.bak
set -x
git add apache-geode.rb
git diff --staged --color | cat
git commit -m "apache-geode ${VERSION}"
git push -u myfork
set +x
fi
echo ""
echo "============================================================"
echo "Updating Geode Dockerfile"
echo "============================================================"
set -x
cd ${GEODE}/docker
git pull -r
set +x
sed -e "s/^ENV GEODE_GPG.*/ENV GEODE_GPG ${SIGNING_KEY}/" \
-e "s/^ENV GEODE_VERSION.*/ENV GEODE_VERSION ${VERSION}/" \
-e "s/^ENV GEODE_SHA256.*/ENV GEODE_SHA256 ${GEODE_SHA}/" \
-i.bak Dockerfile
rm Dockerfile.bak
set -x
git add Dockerfile
git diff --staged --color | cat
git commit -m "apache-geode ${VERSION}"
git push
set +x
echo ""
echo "============================================================"
echo "Updating Native Dockerfile"
echo "============================================================"
set -x
cd ${GEODE_NATIVE}/docker
git pull -r
set +x
sed -e "/wget.*closer.*apache-geode-/s#http.*filename=geode#https://downloads.apache.org/geode#" \
-e "/wget.*closer.*apache-rat-/s#http.*filename=creadur#https://archive.apache.org/dist/creadur#" \
-e "s/^ENV GEODE_VERSION.*/ENV GEODE_VERSION ${VERSION}/" \
-i.bak Dockerfile
rm Dockerfile.bak
set -x
git add Dockerfile
git diff --staged --color | cat
git commit -m "apache-geode ${VERSION}"
git push
set +x
echo ""
echo "============================================================"
echo "Building Geode docker image"
echo "============================================================"
set -x
cd ${GEODE}/docker
docker build .
docker build -t apachegeode/geode:${VERSION} .
[ -n "$LATER" ] || docker build -t apachegeode/geode:latest .
set +x
echo ""
echo "============================================================"
echo "Building Native docker image"
echo "============================================================"
set -x
cd ${GEODE_NATIVE}/docker
docker build .
docker build -t apachegeode/geode-native-build:${VERSION} .
[ -n "$LATER" ] || docker build -t apachegeode/geode-native-build:latest .
set +x
echo ""
echo "============================================================"
echo "Publishing Geode docker image"
echo "============================================================"
set -x
cd ${GEODE}/docker
docker login
docker push apachegeode/geode:${VERSION}
[ -n "$LATER" ] || docker push apachegeode/geode:latest
set +x
echo ""
echo "============================================================"
echo "Publishing Native docker image"
echo "============================================================"
set -x
cd ${GEODE_NATIVE}/docker
docker push apachegeode/geode-native-build:${VERSION}
[ -n "$LATER" ] || docker push apachegeode/geode-native-build:latest
set +x
echo ""
echo "============================================================"
echo "Removing temporary commit from geode-examples..."
echo "============================================================"
set -x
cd ${GEODE_EXAMPLES}
git pull
set +x
sed -e 's#^geodeRepositoryUrl *=.*#geodeRepositoryUrl =#' \
-e 's#^geodeReleaseUrl *=.*#geodeReleaseUrl =#' -i.bak gradle.properties
rm gradle.properties.bak
set -x
git add gradle.properties
git diff --staged --color | cat
git commit -m 'Revert "temporarily point to staging repo for CI purposes"'
git push
set +x
echo ""
echo "============================================================"
if [ -n "$LATER" ] ; then
echo "NOT merging to master to avoid overwriting newer version $LATER"
echo "============================================================"
else
echo "Merging to master"
echo "============================================================"
for DIR in ${GEODE} ${GEODE_EXAMPLES} ${GEODE_NATIVE} ${GEODE_BENCHMARKS} ; do
set -x
cd ${DIR}
git fetch origin
git checkout support/${VERSION_MM}
#this creates a merge commit that will then be ff-merged to master, so word it from that perspective
git merge -s ours origin/master -m "Replacing master with contents of support/${VERSION_MM} (${VERSION)"
git checkout master
git merge support/${VERSION_MM}
git push origin master
set +x
done
fi
echo ""
echo "============================================================"
echo "Updating 'old' versions and Benchmarks baseline on develop"
echo "============================================================"
set -x
cd ${GEODE_DEVELOP}
git pull
git remote add myfork [email protected]:${GITHUB_USER}/geode.git || true
git checkout -b add-${VERSION}-to-old-versions
set +x
PATCH=${VERSION##*.}
PREV=${VERSION%.*}.$(( PATCH - 1 ))
#add at the end if this is a new minor or a patch to the latest minor, otherwise add after it's predecessor
if [ $PATCH -eq 0 ] || grep -q "'${PREV}'].each" settings.gradle ; then
#before:
# '1.9.0'].each {
#after:
# '1.9.0',
# '1.10.0'].each {
sed -e "s/].each/,\\
'${VERSION}'].each/" \
-i.bak settings.gradle
else
#before:
# '1.9.0',
#after:
# '1.9.0',
# '1.9.1',
sed -e "s/'${PREV}',/'${PREV}',\\
'${VERSION}'/" \
-i.bak settings.gradle
fi
rm settings.gradle.bak
if [ $PATCH -eq 0 ] ; then
#also update benchmark baseline for develop to this new minor
sed -e "s/^ baseline_version:.*/ baseline_version: '${VERSION}'/" \
-i.bak ci/pipelines/shared/jinja.variables.yml
rm ci/pipelines/shared/jinja.variables.yml.bak
BENCHMSG=" and set as Benchmarks baseline"
set -x
git add ci/pipelines/shared/jinja.variables.yml
fi
set -x
git add settings.gradle
git diff --staged --color | cat
git commit -m "add ${VERSION} to old versions${BENCHMSG} on develop"
git push -u myfork
set +x
echo ""
echo "============================================================"
echo "Updating 'old' versions on support/$VERSION_MM"
echo "============================================================"
set -x
cd ${GEODE}
git pull
set +x
#add at the end as this release will always be the latest on this branch
sed -e "s/].each/,\\
'${VERSION}'].each/" \
-i.bak settings.gradle
rm settings.gradle.bak
set -x
git add settings.gradle
git diff --staged --color | cat
git commit -m "add ${VERSION} to old versions on support/$VERSION_MM"
git push
set +x
echo ""
echo "============================================================"
echo "Removing old versions from mirrors"
echo "============================================================"
set -x
cd $SVN_RELEASE_DIR
svn update --set-depth immediates
#identify the latest patch release for "N-2" (the latest 3 major.minor releases), remove anything else from mirrors (all releases remain available on non-mirrored archive site)
RELEASES_TO_KEEP=3
set +x
ls | awk -F. '/KEYS/{next}{print 1000000*$1+1000*$2+$3,$1"."$2"."$3}'| sort -n | awk '{mm=$2;sub(/\.[^.]*$/,"",mm);V[mm]=$2}END{for(v in V){print V[v]}}'|tail -$RELEASES_TO_KEEP > ../keep
echo Keeping releases: $(cat ../keep)
(ls | grep -v KEYS; cat ../keep ../keep)|sort|uniq -u|while read oldVersion; do
set -x
svn rm $oldVersion
svn commit -m "remove $oldVersion from mirrors (it is still available at http://archive.apache.org/dist/geode)"
set +x
[ -z "$DID_REMOVE" ] || DID_REMOVE="${DID_REMOVE} and "
DID_REMOVE="${DID_REMOVE}${oldVersion}"
done
rm ../keep
echo ""
echo "============================================================"
echo "Done promoting release artifacts!"
echo "============================================================"
cd ${GEODE}/../..
echo "Next steps:"
echo "1. Click 'Release' in http://repository.apache.org/ (if you haven't already)"
echo "2. Go to https://github.com/${GITHUB_USER}/homebrew-core/pull/new/apache-geode-${VERSION} and submit the pull request"
echo "3. Go to https://github.com/${GITHUB_USER}/geode/pull/new/add-${VERSION}-to-old-versions and create the pull request"
echo "4. Validate docker image: docker run -it -p 10334:10334 -p 7575:7575 -p 1099:1099 apachegeode/geode"
echo "5. Bulk-transition JIRA issues fixed in this release to Closed"
echo "6. Wait overnight for apache mirror sites to sync"
echo "7. Confirm that your homebrew PR passed its PR checks and was merged to master"
echo "8. Check that documentation has been published to https://geode.apache.org/docs/"
PATCH="${VERSION##*.}"
[ "${PATCH}" -ne 0 ] || echo "9. Ask on the dev list for a volunteer to begin the chore of updating 3rd-party dependency versions on develop"
M=$(date --date '+9 months' '+%a, %B %d %Y' 2>/dev/null || date -v +9m "+%a, %B %d %Y" 2>/dev/null || echo "9 months from now")
[ "${PATCH}" -ne 0 ] || echo "10. Mark your calendar for $M to run ${0%/*}/end_of_support.sh -v ${VERSION_MM}"
echo "Bump support pipeline to ${VERSION_MM}.$(( PATCH + 1 )) by plussing BumpPatch in https://concourse.apachegeode-ci.info/teams/main/pipelines/apache-support-${VERSION_MM//./-}-main?group=Semver%20Management"
echo "Run ${0%/*}/set_versions.sh -v ${VERSION_MM}.$(( PATCH + 1 )) -s"
echo "Finally, send announce email!"
|
davinash/geode
|
dev-tools/release/promote_rc.sh
|
Shell
|
apache-2.0
| 15,527 |
#!/usr/bin/env bash
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
## Select version.
OPENMPI_VERSION=$1
set -e
mkdir -p /tmp
cd /tmp
wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OPENMPI_VERSION}.tar.gz >/dev/null
gunzip -c openmpi-${OPENMPI_VERSION}.tar.gz | tar xf -
cd openmpi-${OPENMPI_VERSION}
./configure --prefix=/usr/local/ >/dev/null
make -j4 >/dev/null
make install >/dev/null
export PATH=/usr/local/bin:/usr/local/:$PATH
export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
echo "export PATH=/usr/local/bin/:/usr/local/:$PATH" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH" >> ~/.bashrc
cd /tmp
rm -rf /tmp/openmpi-*
|
tensorflow/recommenders-addons
|
tools/docker/install/install_openmpi.sh
|
Shell
|
apache-2.0
| 1,330 |
find . -name '*.lua' | xargs wc -l
|
zorfmorf/loveprojects
|
drip/linecount.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip3.5
# Update bazel
update_bazel_linux
# Export required variables for running pip.sh
export OS_TYPE="UBUNTU"
export CONTAINER_TYPE="GPU"
export TF_PYTHON_VERSION='python3.5'
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=1
export TF_CUDA_VERSION=10.1
export TF_CUDNN_VERSION=7
export TF_NEED_TENSORRT=1
export TENSORRT_INSTALL_PATH=/usr/local/tensorrt
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION})
export PROJECT_NAME="tensorflow_gpu"
export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib"
export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh
# Export optional variables for running pip.sh
export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py35'
export TF_BUILD_FLAGS="--config=opt --config=v2 --config=cuda --distinct_host_configuration=false \
--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain "
export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \
--distinct_host_configuration=false \
--action_env=TF_CUDA_VERSION --action_env=TF_CUDNN_VERSION --test_env=TF2_BEHAVIOR=1 \
--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \
--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute "
export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... "
export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean"
export IS_NIGHTLY=0 # Not nightly
export TF_PROJECT_NAME=${PROJECT_NAME}
export TF_PIP_TEST_ROOT="pip_test"
# To build both tensorflow and tensorflow-gpu pip packages
export TF_BUILD_BOTH_GPU_PACKAGES=1
./tensorflow/tools/ci_build/builds/pip_new.sh
|
ppwwyyxx/tensorflow
|
tensorflow/tools/ci_build/release/ubuntu_16/gpu_py35_full/pip.sh
|
Shell
|
apache-2.0
| 2,941 |
#!/bin/bash
#
# SPDX-License-Identifier: Apache-2.0
set -e
CI_VERSION="$(grep "GO_VER" ci.properties | cut -f2- -d'=')"
GO_VERSION="$(go version | cut -f3 -d' ' | sed -E 's/^go//')"
fail() {
>&2 echo "ERROR: ${CI_VERSION} is required to build Fabric and you are using ${GO_VERSION}. Please update go."
exit 2
}
vpos () {
local v
v="$(echo "$1" | sed -E 's/([[:digit:]]+)\.([[:digit:]]+)(\.([[:digit:]]+))?/'"\\$2"'/g')"
echo "${v:-0}"
}
version() {
vpos "$1" 1
}
release() {
vpos "$1" 2
}
patch () {
vpos "$1" 4
}
# major versions must match
[ "$(version "$GO_VERSION")" -eq "$(version "$CI_VERSION")" ] || fail
# go release must be >= ci release
[ "$(release "$GO_VERSION")" -ge "$(release "$CI_VERSION")" ] || fail
# if releases are equal, patch must be >= ci patch
if [ "$(release "$GO_VERSION")" -eq "$(release "$CI_VERSION")" ]; then
[ "$(patch "$GO_VERSION")" -ge "$(patch "$CI_VERSION")" ] || fail
fi
# versions are equal and go release > required ci release
exit 0
|
binhn/fabric
|
scripts/check_go_version.sh
|
Shell
|
apache-2.0
| 1,017 |
#!/bin/bash
adb uninstall com.commonsware.android.advservice.client && adb uninstall com.commonsware.android.advservice
#ant -buildfile RemoteClient/build.xml clean debug && ant -buildfile RemoteService/build.xml clean debug
ant -buildfile RemoteClient/build.xml debug && ant -buildfile RemoteService/build.xml debug
adb install RemoteClient/bin/InputServiceTest-debug.apk && adb install RemoteService/bin/InputServiceTest-debug.apk
|
abeluck/android-streams-ipc
|
deploy.sh
|
Shell
|
apache-2.0
| 434 |
#!/bin/bash
set -e
set -x
export JAVA_HOME=${HOME}/jdk
export PATH=${JAVA_HOME}/bin:${PATH}
export M2_HOME=${HOME}/maven
export PATH=${M2_HOME}/bin:${PATH}
echo "#################################################################"
echo "Checking Maven install"
echo
mvn -version
echo
echo "Done"
echo "#################################################################"
cd ${HOME}
export BRANCH=master
git clone https://github.com/cescoffier/vertx-microservices-workshop.git
cd vertx-microservices-workshop
mvn install dependency:go-offline
|
cescoffier/vertx-microservices-workshop
|
packer/scripts/lab_setup.sh
|
Shell
|
apache-2.0
| 542 |
#!/usr/bin/env bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
LAUNCHER=
# If debugging is enabled propagate that through to sub-shells
if [[ "$-" == *x* ]]; then
LAUNCHER="bash -x"
fi
BIN=$(cd "$( dirname "$( readlink "$0" || echo "$0" )" )"; pwd)
#start up alluxio
USAGE="Usage: alluxio-start.sh [-hNwm] [-i backup] ACTION [MOPT] [-f]
Where ACTION is one of:
all [MOPT] \tStart all masters, proxies, and workers.
job_master \tStart the job_master on this node.
job_masters \tStart job_masters on master nodes.
job_worker \tStart a job_worker on this node.
job_workers \tStart job_workers on worker nodes.
local [MOPT] \tStart all processes locally.
master \tStart the local master on this node.
secondary_master \tStart the local secondary master on this node.
masters \tStart masters on master nodes.
proxy \tStart the proxy on this node.
proxies \tStart proxies on master and worker nodes.
safe \tScript will run continuously and start the master if it's not running.
worker [MOPT] \tStart a worker on this node.
workers [MOPT] \tStart workers on worker nodes.
logserver \tStart the logserver
restart_worker \tRestart a failed worker on this node.
restart_workers \tRestart any failed workers on worker nodes.
MOPT (Mount Option) is one of:
Mount \tMount the configured RamFS if it is not already mounted.
SudoMount\tMount the configured RamFS using sudo if it is not already mounted.
NoMount \tDo not mount the configured RamFS.
\tNotice: to avoid sudo requirement but using tmpFS in Linux,
set ALLUXIO_RAM_FOLDER=/dev/shm on each worker and use NoMount.
NoMount is assumed if MOPT is not specified.
-a asynchronously start all processes. The script may exit before all
processes have been started.
-f format Journal, UnderFS Data and Workers Folder on master.
-h display this help.
-i backup a journal backup to restore the master from. The backup should be
a URI path within the root under filesystem, e.g.
hdfs://mycluster/alluxio_backups/alluxio-journal-YYYY-MM-DD-timestamp.gz.
-N do not try to kill previous running processes before starting new ones.
-w wait for processes to end before returning.
Supported environment variables:
ALLUXIO_JOB_WORKER_COUNT - identifies how many job workers to start per node (default = 1)"
ensure_dirs() {
if [[ ! -d "${ALLUXIO_LOGS_DIR}" ]]; then
echo "ALLUXIO_LOGS_DIR: ${ALLUXIO_LOGS_DIR}"
mkdir -p ${ALLUXIO_LOGS_DIR}
fi
}
# returns 1 if "$1" contains "$2", 0 otherwise.
contains() {
if [[ "$1" = *"$2"* ]]; then
return 1
fi
return 0
}
get_env() {
DEFAULT_LIBEXEC_DIR="${BIN}"/../libexec
ALLUXIO_LIBEXEC_DIR=${ALLUXIO_LIBEXEC_DIR:-${DEFAULT_LIBEXEC_DIR}}
. ${ALLUXIO_LIBEXEC_DIR}/alluxio-config.sh
CLASSPATH=${ALLUXIO_SERVER_CLASSPATH}
}
# Pass ram folder to check as $1
# Return 0 if ram folder is mounted as tmpfs or ramfs, 1 otherwise
is_ram_folder_mounted() {
local mounted_fs=""
if [[ $(uname -s) == Darwin ]]; then
mounted_fs=$(mount -t "hfs" | grep '/Volumes/' | cut -d " " -f 3)
else
mounted_fs=$(mount -t "tmpfs,ramfs" | cut -d " " -f 3)
fi
for fs in ${mounted_fs}; do
if [[ "${1}" == "${fs}" || "${1}" =~ ^"${fs}"\/.* ]]; then
return 0
fi
done
return 1
}
check_mount_mode() {
case $1 in
Mount);;
SudoMount);;
NoMount)
local tier_alias=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.alias)
local tier_path=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.dirs.path)
if [[ ${tier_alias} != "MEM" ]]; then
# if the top tier is not MEM, skip check
return
fi
is_ram_folder_mounted "${tier_path}"
if [[ $? -ne 0 ]]; then
echo "ERROR: Ramdisk ${tier_path} is not mounted with mount option NoMount. Use alluxio-mount.sh to mount ramdisk." >&2
echo -e "${USAGE}" >&2
exit 1
fi
if [[ "${tier_path}" =~ ^"/dev/shm"\/{0,1}$ ]]; then
echo "WARNING: Using tmpFS does not guarantee data to be stored in memory."
echo "WARNING: Check vmstat for memory statistics (e.g. swapping)."
fi
;;
*)
if [[ -z $1 ]]; then
echo "This command requires a mount mode be specified" >&2
else
echo "Invalid mount mode: $1" >&2
fi
echo -e "${USAGE}" >&2
exit 1
esac
}
# pass mode as $1
do_mount() {
MOUNT_FAILED=0
case "$1" in
Mount|SudoMount)
local tier_alias=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.alias)
local tier_path=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.dirs.path)
if [[ ${tier_alias} != "MEM" ]]; then
echo "Can't Mount/SudoMount when alluxio.worker.tieredstore.level0.alias is not MEM"
exit 1
fi
is_ram_folder_mounted "${tier_path}" # Returns 0 if already mounted.
if [[ $? -eq 0 ]]; then
echo "Ramdisk already mounted. Skipping mounting procedure."
else
echo "Ramdisk not detected. Mounting..."
${LAUNCHER} "${BIN}/alluxio-mount.sh" $1
MOUNT_FAILED=$?
fi
;;
NoMount)
;;
*)
echo "This command requires a mount mode be specified" >&2
echo -e "${USAGE}" >&2
exit 1
esac
}
stop() {
${BIN}/alluxio-stop.sh $1
}
start_job_master() {
if [[ "$1" == "-f" ]]; then
${LAUNCHER} "${BIN}/alluxio" format
fi
if [[ ${ALLUXIO_MASTER_SECONDARY} != "true" ]]; then
if [[ -z ${ALLUXIO_JOB_MASTER_JAVA_OPTS} ]] ; then
ALLUXIO_JOB_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting job master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioJobMaster > ${ALLUXIO_LOGS_DIR}/job_master.out 2>&1) &
fi
}
start_job_masters() {
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" "-a" "job_master"
}
start_job_worker() {
if [[ -z ${ALLUXIO_JOB_WORKER_JAVA_OPTS} ]] ; then
ALLUXIO_JOB_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting job worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioJobWorker > ${ALLUXIO_LOGS_DIR}/job_worker.out 2>&1) &
ALLUXIO_JOB_WORKER_JAVA_OPTS+=" -Dalluxio.job.worker.rpc.port=0 -Dalluxio.job.worker.web.port=0"
local nworkers=${ALLUXIO_JOB_WORKER_COUNT:-1}
for (( c = 1; c < ${nworkers}; c++ )); do
echo "Starting job worker #$((c+1)) @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioJobWorker > ${ALLUXIO_LOGS_DIR}/job_worker.out 2>&1) &
done
}
start_job_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "-a" "job_worker"
}
start_logserver() {
if [[ ! -d "${ALLUXIO_LOGSERVER_LOGS_DIR}" ]]; then
echo "ALLUXIO_LOGSERVER_LOGS_DIR: ${ALLUXIO_LOGSERVER_LOGS_DIR}"
mkdir -p ${ALLUXIO_LOGSERVER_LOGS_DIR}
fi
echo "Starting logserver @ $(hostname -f)."
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_LOGSERVER_JAVA_OPTS} \
alluxio.logserver.AlluxioLogServer "${ALLUXIO_LOGSERVER_LOGS_DIR}" > ${ALLUXIO_LOGS_DIR}/logserver.out 2>&1) &
# Wait for 1s before starting other Alluxio servers, otherwise may cause race condition
# leading to connection errors.
sleep 1
}
start_master() {
if [[ "$1" == "-f" ]]; then
${LAUNCHER} ${BIN}/alluxio format
elif [[ `${LAUNCHER} ${BIN}/alluxio getConf ${ALLUXIO_MASTER_JAVA_OPTS} alluxio.master.journal.type` == "EMBEDDED" ]]; then
JOURNAL_DIR=`${LAUNCHER} ${BIN}/alluxio getConf ${ALLUXIO_MASTER_JAVA_OPTS} alluxio.master.journal.folder`
if [ -f "${JOURNAL_DIR}" ]; then
echo "Journal location ${JOURNAL_DIR} is a file not a directory. Please remove the file before retrying."
elif [ ! -e "${JOURNAL_DIR}" ]; then
${LAUNCHER} ${BIN}/alluxio format
fi
fi
if [[ ${ALLUXIO_MASTER_SECONDARY} == "true" ]]; then
if [[ -z ${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS} ]]; then
ALLUXIO_SECONDARY_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
# use a default Xmx value for the master
contains "${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_SECONDARY_MASTER_JAVA_OPTS+=" -Xmx8g "
fi
echo "Starting secondary master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioSecondaryMaster > ${ALLUXIO_LOGS_DIR}/secondary_master.out 2>&1) &
else
if [[ -z ${ALLUXIO_MASTER_JAVA_OPTS} ]]; then
ALLUXIO_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
if [[ -n ${journal_backup} ]]; then
ALLUXIO_MASTER_JAVA_OPTS+=" -Dalluxio.master.journal.init.from.backup=${journal_backup}"
fi
# use a default Xmx value for the master
contains "${ALLUXIO_MASTER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_MASTER_JAVA_OPTS+=" -Xmx8g "
fi
echo "Starting master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioMaster > ${ALLUXIO_LOGS_DIR}/master.out 2>&1) &
fi
}
start_masters() {
start_opts=""
if [[ -n ${journal_backup} ]]; then
start_opts="-i ${journal_backup}"
fi
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" ${start_opts} "-a" "master" $1
}
start_proxy() {
if [[ -z ${ALLUXIO_PROXY_JAVA_OPTS} ]]; then
ALLUXIO_PROXY_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting proxy @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_PROXY_JAVA_OPTS} \
alluxio.proxy.AlluxioProxy > ${ALLUXIO_LOGS_DIR}/proxy.out 2>&1) &
}
start_proxies() {
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" "-a" "proxy"
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "-a" "proxy"
}
start_worker() {
do_mount $1
if [ ${MOUNT_FAILED} -ne 0 ] ; then
echo "Mount failed, not starting worker" >&2
exit 1
fi
if [[ -z ${ALLUXIO_WORKER_JAVA_OPTS} ]]; then
ALLUXIO_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
# use a default Xmx value for the worker
contains "${ALLUXIO_WORKER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_WORKER_JAVA_OPTS+=" -Xmx4g "
fi
# use a default MaxDirectMemorySize value for the worker
contains "${ALLUXIO_WORKER_JAVA_OPTS}" "XX:MaxDirectMemorySize"
if [[ $? -eq 0 ]]; then
ALLUXIO_WORKER_JAVA_OPTS+=" -XX:MaxDirectMemorySize=4g "
fi
echo "Starting worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioWorker > ${ALLUXIO_LOGS_DIR}/worker.out 2>&1 ) &
}
start_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "-a" "worker" $1
}
restart_worker() {
if [[ -z ${ALLUXIO_WORKER_JAVA_OPTS} ]]; then
ALLUXIO_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
RUN=$(ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${RUN} -eq 0 ]]; then
echo "Restarting worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioWorker > ${ALLUXIO_LOGS_DIR}/worker.out 2>&1) &
fi
}
restart_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "restart_worker"
}
get_offline_worker() {
local run=
local result=""
run=$(ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${run} -eq 0 ]]; then
result=$(hostname -f)
fi
echo "${result}"
}
get_offline_workers() {
local result=""
local run=
local i=0
local workers=$(cat "${ALLUXIO_CONF_DIR}/workers" | sed "s/#.*$//;/^$/d")
for worker in $(echo ${workers}); do
if [[ ${i} -gt 0 ]]; then
result+=","
fi
run=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt ${worker} \
ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${run} -eq 0 ]]; then
result+="${worker}"
fi
i=$((i+1))
done
echo "${result}"
}
start_monitor() {
local action=$1
local nodes=$2
local run=
if [[ "${action}" == "restart_worker" ]]; then
action="worker"
if [[ -z "${nodes}" ]]; then
run="false"
fi
elif [[ "${action}" == "restart_workers" ]]; then
action="workers"
if [[ -z "${nodes}" ]]; then
run="false"
fi
elif [[ "${action}" == "logserver" || "${action}" == "safe" ]]; then
run="false"
fi
if [[ -z "${run}" ]]; then
${LAUNCHER} "${BIN}/alluxio-monitor.sh" "${action}" "${nodes}"
else
echo "Skipping the monitor checks..."
fi
}
run_safe() {
while [ 1 ]
do
RUN=$(ps -ef | grep "alluxio.master.AlluxioMaster" | grep "java" | wc | awk '{ print $1; }')
if [[ ${RUN} -eq 0 ]]; then
echo "Restarting the system master..."
start_master
fi
echo "Alluxio is running... "
sleep 2
done
}
main() {
# get environment
get_env
# ensure log/data dirs
ensure_dirs
while getopts "ahNwi:" o; do
case "${o}" in
a)
async="true"
;;
h)
echo -e "${USAGE}"
exit 0
;;
i)
journal_backup=${OPTARG}
;;
N)
killonstart="no"
;;
w)
wait="true"
;;
*)
echo -e "${USAGE}" >&2
exit 1
;;
esac
done
shift $((${OPTIND} - 1))
ACTION=$1
if [[ -z "${ACTION}" ]]; then
echo "Error: no ACTION specified" >&2
echo -e "${USAGE}" >&2
exit 1
fi
shift
MOPT=$1
# Set MOPT.
case "${ACTION}" in
all|worker|workers|local)
if [[ -z "${MOPT}" ]]; then
echo "Assuming NoMount by default."
MOPT="NoMount"
elif [[ "${MOPT}" == "-f" ]]; then
echo "Assuming SudoMount given -f option."
MOPT="SudoMount"
else
shift
fi
if [[ "${ACTION}" = "worker" ]] || [[ "${ACTION}" = "local" ]]; then
check_mount_mode "${MOPT}"
fi
;;
*)
MOPT=""
;;
esac
FORMAT=$1
if [[ ! -z "${FORMAT}" && "${FORMAT}" != "-f" ]]; then
echo -e "${USAGE}" >&2
exit 1
fi
MONITOR_NODES=
if [[ ! "${async}" ]]; then
case "${ACTION}" in
restart_worker)
MONITOR_NODES=$(get_offline_worker)
;;
restart_workers)
MONITOR_NODES=$(get_offline_workers)
;;
*)
MONITOR_NODES=""
;;
esac
fi
if [[ "${killonstart}" != "no" ]]; then
case "${ACTION}" in
all | local | master | masters | secondary_master | job_master | job_masters | proxy | proxies | worker | workers | job_worker | job_workers | logserver)
stop ${ACTION}
sleep 1
;;
esac
fi
case "${ACTION}" in
all)
start_masters "${FORMAT}"
start_job_masters
sleep 2
start_workers "${MOPT}"
start_job_workers
start_proxies
;;
local)
local master_hostname=$(${BIN}/alluxio getConf ${ALLUXIO_MASTER_JAVA_OPTS}\
alluxio.master.hostname)
local is_master_set_and_local=false
if [[ -n ${master_hostname} ]]; then
local local_addresses=( "localhost" "127.0.0.1" $(hostname -s) $(hostname -f) )
if [[ $(uname -a) != Darwin* ]]; then
# Assuming Linux
local_addresses+=( $(hostname --ip-address) )
fi
for local_address in ${local_addresses[*]}
do
if [[ ${local_address} == ${master_hostname} ]]; then
is_master_set_and_local=true
break
fi
done
fi
if [[ ${is_master_set_and_local} != true ]]; then
echo "# The following line is auto-generated by command \"bin/alluxio-start.sh local\"" \
>> "${ALLUXIO_CONF_DIR}/alluxio-site.properties"
echo "alluxio.master.hostname=localhost" >> "${ALLUXIO_CONF_DIR}/alluxio-site.properties"
fi
if [[ "${FORMAT}" == "-f" ]]; then
${LAUNCHER} ${BIN}/alluxio formatJournal
${LAUNCHER} ${BIN}/alluxio formatWorker
fi
start_master
ALLUXIO_MASTER_SECONDARY=true
# We only start a secondary master when using a UFS journal.
local journal_type=$(${BIN}/alluxio getConf ${ALLUXIO_MASTER_JAVA_OPTS} \
alluxio.master.journal.type | awk '{print toupper($0)}')
if [[ ${journal_type} == "UFS" ]]; then
start_master
fi
ALLUXIO_MASTER_SECONDARY=false
start_job_master
sleep 2
start_worker "${MOPT}"
start_job_worker
start_proxy
;;
job_master)
start_job_master
;;
job_masters)
start_job_masters
;;
job_worker)
start_job_worker
;;
job_workers)
start_job_workers
;;
master)
start_master "${FORMAT}"
;;
secondary_master)
ALLUXIO_MASTER_SECONDARY=true
start_master
ALLUXIO_MASTER_SECONDARY=false
;;
masters)
start_masters
;;
proxy)
start_proxy
;;
proxies)
start_proxies
;;
restart_worker)
restart_worker
;;
restart_workers)
restart_workers
;;
safe)
run_safe
;;
worker)
start_worker "${MOPT}"
;;
workers)
start_workers "${MOPT}"
;;
logserver)
start_logserver
;;
*)
echo "Error: Invalid ACTION: ${ACTION}" >&2
echo -e "${USAGE}" >&2
exit 1
esac
sleep 2
if [[ "${wait}" ]]; then
wait
fi
if [[ ! "${async}" ]]; then
start_monitor "${ACTION}" "${MONITOR_NODES}"
fi
}
main "$@"
|
madanadit/alluxio
|
bin/alluxio-start.sh
|
Shell
|
apache-2.0
| 18,541 |
find build/artifacts/NYoShWorkbenchDistribution/ -name NYoShWorkbench-\*|xargs -I{} scp {} campagnelab.org:www/files/
|
CampagneLaboratory/NYoSh
|
deploy-to-web.sh
|
Shell
|
apache-2.0
| 121 |
#!/bin/bash
# Settings
DEFAULT_TARGET=../src/simulation/modelsim/memory.lst
ASSEMBLER=./tiny8v1_assembler
ADDRESSABILITY=1
# Command line parameters
ASM_FILE=$1
TARGET_FILE=${2:-$DEFAULT_TARGET}
ADDRESSABILITY=${3:-$ADDRESSABILITY}
# Print usage
if [[ "$#" -lt 1 ]]; then
echo "Compile an LC-3b assembly file and write a memory file for simulation."
echo "Usage: $0 <asm-file> [memory-file]"
exit 0
fi
# Remove temporary directory on exit
cleanup()
{
rm -rf -- "$WORK_DIR"
# echo "$WORK_DIR"
}
trap cleanup exit
# Create temporary directory
WORK_DIR="$(mktemp -d)"
# Copy files to temporary directory
cp "$ASM_FILE" "$WORK_DIR"
# Fail if assembler cannot be found
if [ ! -x "$ASSEMBLER" ]; then
echo "Cannot execute assembler at $ASSEMBLER." >&2
echo "Make sure it exists and is executable." >&2
exit 1
fi
# Assemble code
"$ASSEMBLER" -i "${WORK_DIR}/$(basename $ASM_FILE)" -o "${WORK_DIR}/temp.lst"
OBJ_FILE="${WORK_DIR}/temp.lst"
# Fail if object file doesn't exist or has no memory content
if [[ ! -e "$OBJ_FILE" || "$(cat "$OBJ_FILE" | wc -c)" -le "16" ]]; then
echo "Error assembling $ASM_FILE, not generating memory file" >&2
exit 2
fi
# Fail if the target directory doesn't exist
if [[ ! -d "$(dirname "$TARGET_FILE")" ]]; then
echo "Directory $(dirname "$TARGET_FILE") does not exist." >&2
echo "Did you specify the correct target path? Aborting." >&2
exit 3
fi
# Ask if user wants to overwrite target
if [ -e "$TARGET_FILE" ]; then
echo "Target file $TARGET_FILE exists."
read -p "Overwrite? [y/N] " CONF
shopt -s nocasematch
if [[ "${CONF}" != "y" && "${CONF}" != "yes" ]]; then
echo "Aborting." >&2
exit 0
fi
shopt -u nocasematch
fi
mv "${WORK_DIR}/temp.lst" "${TARGET_FILE}"
echo "Assembled $ASM_FILE and wrote memory contents to $TARGET_FILE"
|
mgold95/tiny8v1
|
bin/load_memory.sh
|
Shell
|
apache-2.0
| 1,862 |
#!/usr/bin/env bash
# Example call: Usage: Usage: ./build_env_file.sh <obsthresh> <cost_inscribed_thresh> <cost_possibly_circumscribed_thresh>
# <cellsize> <nominalvel> <timetoturn45degsinplace> <start_x> <start_y> <start_theta> <end_x> <end_y> <end_theta> [<env_file>
# <map_file>]
# Confirm that correct number of params were given
if [ $# -ne 12 -a $# -ne 14 ]
then
echo "You gave $# arguments, expected 12 or 14"
echo "Usage: ./build_env_file.sh <obsthresh> <cost_inscribed_thresh> <cost_possibly_circumscribed_thresh> <cellsize> <nominalvel> <timetoturn45degsinplace> <start_x> <start_y> <start_theta> <end_x> <end_y> <end_theta> [<env_file> <map_file>]"
exit 1
fi
# Read and store call params
obsthresh=$1
cost_inscribed_thresh=$2
cost_possibly_circumscribed_thresh=$3
cellsize=$4 # Meters
nominalvel=$5 # Meters per second
timetoturn45degsinplace=$6 # Seconds
start_x=$7
start_y=$8
start_theta=$9
end_x=${10}
end_y=${11}
end_theta=${12}
if [ $# -eq 12 ]
then
# File and directory locations not given, assume being called from ./scripts
echo "Using default files"
ENV_FILE="../qwe/navigation/envs/env.cfg"
MAP_FILE="../qwe/navigation/maps/binary_map.txt"
fi
if [ $# -eq 14 ]
then
# File and directory locations given by caller
ENV_FILE=${13}
MAP_FILE=${14}
fi
# Get size of course in cells
y_len=$(cat $MAP_FILE | tr -cd "01\n" | wc -l)
total_bytes=$(cat $MAP_FILE | tr -cd "01" | wc -c)
x_len=$(expr $total_bytes / $y_len)
# Append env information
echo "discretization(cells):" $x_len $y_len > $ENV_FILE
echo "obsthresh:" $obsthresh >> $ENV_FILE
echo "cost_inscribed_thresh:" $cost_inscribed_thresh >> $ENV_FILE
echo "cost_possibly_circumscribed_thresh:" $cost_possibly_circumscribed_thresh >> $ENV_FILE
echo "cellsize(meters):" $cellsize >> $ENV_FILE
echo "nominalvel(mpersecs):" $nominalvel >> $ENV_FILE
echo "timetoturn45degsinplace(secs):" $timetoturn45degsinplace >> $ENV_FILE
echo "start(meters,rads):" $start_x $start_y $start_theta >> $ENV_FILE
echo "end(meters,rads):" $end_x $end_y $end_theta >> $ENV_FILE
echo "environment:" >> $ENV_FILE
# Append env map
cat $MAP_FILE >> $ENV_FILE
|
IEEERobotics/high-level
|
scripts/build_env_file.sh
|
Shell
|
bsd-2-clause
| 2,128 |
#!/usr/bin/env bash
#set -x
# Checking if script is running as root
function checkroot {
if ! [ $(id -u) = 0 ]
then
echo "You need to have root privileges to run this script
Please try again, this time using 'sudo'. Exiting."
exit
fi
}
checkroot
# Mapping distro identification commands
YUM_CMD=$(which yum)
APT_GET_CMD=$(which apt-get)
# Capture Ctrl + C
trap ctrl_c INT
function ctrl_c() {
echo ""
echo "GOOD BYE -- LinuxUserWizard"
echo ""
exit
}
# Initialize logs at the script launch, if log file is not there
function initializelogs {
if [ ! -f /var/log/luw.log ]
then
touch /var/log/luw.log
echo "################################" >> /var/log/luw.log
echo `date` -- "Log file initiated" >> /var/log/luw.log
echo "################################" >> /var/log/luw.log
fi
}
initializelogs # initializing the log file at launch
function logentry {
echo `date` "|" "Operation: "$logoperation "|" "User: "$luwuser >> /var/log/luw.log
}
function exiting {
echo "Do you want to do another operation? (Y/N)"
read exitanswer
if [ "$exitanswer" = "y" ] || [ "$exitanswer" = "Y" ]
then
mainmenu
elif [ "$exitanswer" = "n" ] || [ "$exitanswer" = "N" ]
then
echo ""
echo "GOOD BYE -- LinuxUserWizard"
echo ""
logoperation="Exited the program" && logentry
exit
else
echo "Wrong option, please enter 'Y' or 'N'"
exiting
fi
}
function wrongoption {
echo "Wrong option, please enter 'Y' or 'N'"
exiting
}
function sshdirmake {
mkdir /home/$luwuser/.ssh
logoperation="SSH directory created"
logentry
touch /home/$luwuser/.ssh/authorized_keys
logoperation="authorized_keys file created"
logentry
}
function keypairgen {
ssh-keygen -t rsa -f /home/$luwuser/.ssh/id_rsa -q -N ""
cat /home/$luwuser/.ssh/id_rsa.pub >> /home/$luwuser/.ssh/authorized_keys
chmod 600 /home/$luwuser/.ssh/authorized_keys
chown -R $luwuser /home/$luwuser
logoperation="SSH key generated"
logentry
}
function packageinstaller {
echo $YUM_CMD > /dev/null
echo $APT_GET_CMD > /dev/null
if [[ ! -z $YUM_CMD ]]
then
echo "Using 'yum' to install the requirements.." && sleep 2
yum -y install $packagetoinstall
elif [[ ! -z $APT_GET_CMD ]]
then
echo "Using 'apt-get' to install the requirements.." && sleep 2
apt-get install $packagetoinstall -y
else
echo "Can't find package manager" && sleep 2
exiting
fi
}
function sudoprivilage {
if grep -Fxq "$luwuser" /etc/sudoers
then
echo "User is already in /etc/sudoers"
else
echo "Would you like to give SUDO privilages (administrative access) to this user? (y\n)"
read sudoanswer
if [ $sudoanswer = "y" ] || [ $sudoanswer = "Y" ]
then
echo "$luwuser ALL=(ALL:ALL) ALL" >> /etc/sudoers
else
echo "User will be left with no sudo access"
fi
fi
}
function mainmenu {
clear
echo "#########################################################"
echo "# ***** LINUX USER WIZARD ***** #"
echo "#########################################################"
echo "# #"
echo "# - Create a user with SSH key - Press 1 #"
echo "# - Remove a user and keys - Press 2 #"
echo "# - Enable password login with no key - Press 3 #"
echo "# - Disable password login with no key - Press 4 #"
echo "# - View users with a shell - Press 5 #"
echo "# - View user's private key - Press 6 #"
echo "# - View logs - Press 7 #"
echo "# - Delete/Re-initialize logs - Press 8 #"
echo "# - Exit - Press 9 #"
echo "# #"
echo "#########################################################"
echo "# ***** AWS TOOLBOX ***** #"
echo "#########################################################"
echo "# #"
echo "# - Install latest AWS CLI - Press 10 #"
echo "# - Install/Configure systat SAR - Press 11 #"
echo "# - Install/Configure CloudWatch Logs Agent - Press 12 #"
echo "# - Generate SOS report for AWS Support - Press 13 #"
echo "# - Install/Configure Java - Press 14 #"
echo "# - Install/Compile iPerf v2.0.5 - Press 15 #"
echo "# #"
echo "#########################################################"
echo " "
echo " - L.U.W. logs:/var/log/luw.log "
echo " "
echo " Select a number and hit 'Enter' "
read answer
if [ "$answer" = "1" ] ### OPTION 1 START
then
echo "Please enter a username"
read luwuser
if [ -d /home/$luwuser ]
then echo "Homefolder exists do you want to overwrite it? (y/n)"
read homefolderanswer
if [ "$homefolderanswer" = "y" ] || [ "$homefolderanswer" = "Y" ]
then
rm -rf /home/$luwuser && logoperation="Homefolder deleted" && logentry
useradd $luwuser -s /bin/bash
chown -R $luwuser /home/$luwuser
sshdirmake
keypairgen
sudoprivilage
exiting
else
echo "Not creating the user since you want to keep the homefolder"
echo "Leaving homefolder intact"
echo "Do you still want to create SSH key and put in user's home folder? (y/n)"
read keyans
if [ "$keyans" = "y" ] || [ "$keyans" = "Y" ]
then
if [ ! -d /home/$luwuser/.ssh]; then
sshdirmake
fi
keypairgen
fi
sudoprivilage
exiting
fi
fi
if [ ! -d /home/$luwuser ]
then
useradd $luwuser -s /bin/bash && logoperation="New user added" && logentry
if [ ! -d /home/$luwuser ] # check due ubuntu does not create home folder on user creation
then
mkdir /home/$luwuser && logoperation="Homefolder created" && logentry
fi
sshdirmake
keypairgen
sudoprivilage
exiting
fi
fi ### OPTION 1 END
if [ "$answer" = "2" ] ### OPTION 2 START
then
echo "Please enter a username to delete"
read luwuser
if [ -d /home/$luwuser ]
then
userdel -r $luwuser
sed -i '/$luwuser/d' /etc/passwd
sed -i'/$luwuser/d' /etc/sudoers
rm -rf /home/$luwuser
if [ ! -d /home/$luwuser ]; then
echo "User homefolder deleted"
logoperation="User deleted" && logentry
fi
if [ -f /var/spool/mail/$luwuser ]; then
rm -rf /var/spool/mail/$luwuser
echo "Mail file removed"
fi
exiting
else
sed -i '/$luwuser/d' /etc/passwd
sed -i '/$luwuser/d' /etc/sudoers
if [ -f /var/spool/mail/$luwuser ]; then
rm -rf /var/spool/mail/$luwuser
echo "Mail file removed"
fi
echo "Home folder does not exist"
logoperation="Homefolder could not be found" && logentry
exiting
fi
fi ### OPTION 2 END
if [ "$answer" = "3" ] ### OPTION 3 START
then
if [ -f /etc/ssh/sshd_config ]
then
sed -i 's/#PasswordAuthentication/PasswordAuthentication/' /etc/ssh/sshd_config && logoperation="SSH via password enabled" && logentry
echo "SSH with password is enabled"
service sshd restart
service ssh restart
exiting
else
echo "sshd_config file is not under /etc/ssh, please edit manually and set
#PasswordAuthentication yes to PasswordAuthentication yes | remove # (uncomment)" && logoperation="sshd_config can't be found" && logentry
exiting
fi
fi ### OPTION 3 END
if [ "$answer" = "4" ] ### OPTION 4 START
then
if [ -f /etc/ssh/sshd_config ]
then
sed -i 's/PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config && logoperation="SSH via password disabled" && logentry
echo "SSH with password is disabled"
service sshd restart
service ssh restart
exiting
else
echo "sshd_config file is not under /etc/ssh, please edit manually and set
#PasswordAuthentication yes to PasswordAuthentication yes | remove # (uncomment)" && logoperation="sshd_config can't be found" && logentry
exiting
fi
fi ### OPTION 4 END
if [ "$answer" = "5" ]
then
cat /etc/passwd | grep /bin/bash | less && logoperation="Viewed users with shell" && logentry
mainmenu
fi
if [ "$answer" = "6" ]
then
echo "Please enter the username to view it's private key"
read keyviewuser
if [ -f /home/$keyviewuser/.ssh/id_rsa ]
then
cat /home/$keyviewuser/.ssh/id_rsa | less && logoperation="Private Key viewed" && logentry
exiting
else
echo "Private key is not under" /home/$keyviewuser "or not named id_rsa" && logoperation="Private key can't be found" && logentry
exiting
fi
fi
if [ "$answer" = "7" ]
then
logoperation="Viewed logs" && logentry && less /var/log/luw.log
mainmenu
fi
if [ "$answer" = "8" ]
then
rm -f /var/log/luw.log
initializelogs
echo "Logs has been deleted and re-initialized"
exiting
fi
if [ "$answer" = "9" ]
then
echo ""
echo "GOOD BYE -- LinuxUserWizard"
echo ""
exit
fi
if [ "$answer" = "10" ]
then
packagetoinstall="unzip" && packageinstaller
curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "/tmp/awscli-bundle.zip"
unzip /tmp/awscli-bundle.zip
./tmp/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
echo "Would you like to configure AWS CLI now? (y/n)"
read awsclians
if [ "$awsclians" = "y" ] || [ "$awsclians" = "Y" ]
then
aws configure && logoperation="AWS CLI Installed" && logentry && exiting
else
echo "Please issue 'aws configure' command after closing this tool"
logoperation="AWS CLI Installed" && logentry
exiting
fi
rm -f /tmp/awscli-bundle.zip
exiting
fi
if [ "$answer" = "11" ]
then
#packagetoinstall="sysstat" && packageinstaller
sed -i 's/ENABLED="false"/ENABLED="true"/' /etc/default/sysstat
sed -i 's/5-55\/10/*\/2/' /etc/cron.d/sysstat
service sysstat restart
echo "SYSSTAT installed & configured"
echo "SAR logs are under /var/log/sa and will be kept in rotation for 30 days" && sleep 2
exiting
fi
if [ "$answer" = "12" ]
then
echo "Coming soon."
exiting
fi
if [ "$answer" = "13" ]
then
wget -q -O ginfo.sh 'http://bit.ly/1scykJV'
chmod 755 ginfo.sh
./ginfo.sh
exiting
fi
if [ "$answer" = "14" ]
then
echo "Coming soon."
exiting
fi
if [ "$answer" = "15" ]
then
packagetoinstall="gcc-c++" && packageinstaller
wget http://sourceforge.net/projects/iperf/files/iperf-2.0.5.tar.gz/download
mv download iperf-2.0.5.tar.gz
tar -xzvf iperf-2.0.5.tar.gz
cd iperf-2.0.5
./configure
make
make install
fi
if [ "$answer" != "1" ] && [ "$answer" != "2" ] && [ "$answer" != "3" ] && [ "$answer" != "4" ] && [ "$answer" != "5" ] && [ "$answer" != "6" ] \
&& [ "$answer" != "7" ] && [ "$answer" != "8" ] && [ "$answer" != "9" ] && [ "$answer" != "10" ] && [ "$answer" != "11" ] && [ "$answer" != "12" ] \
&& [ "$answer" != "13" ] && [ "$answer" != "14" ] && [ "$answer" != "15" ]
then
mainmenu
fi
}
mainmenu
|
AlperSakarya/LinuxUserWizard
|
linux-user-wizard.sh
|
Shell
|
bsd-2-clause
| 12,860 |
yarn install --frozen-lockfile
cat yarn.lock | npx hasha > .node_hash
|
SalesforceFoundation/HEDAP
|
scripts/update-dependencies.sh
|
Shell
|
bsd-3-clause
| 69 |
#!/bin/bash
# experiment for adversary inserting relays after user's guard selection
# 1/1/2013 00:00:00 timestamp: 1356998400
BASE_DIR=/mnt/shared/orsec_data
TOT_PROCESSES=20
PARALLEL_PROCESSES=$1
DATE_RANGE=$2
OUTPUT=2
ADV_GUARD_BW=$3
ADV_EXIT_BW=$4
ADV_TIME=$5
NUM_ADV_GUARDS=1
USERMODEL=typical
NUM_SAMPLES=5000
TRACEFILE=$BASE_DIR/in/users2-processed.traces.pickle
PATH_ALG=tor
EXP_NAME=$USERMODEL.$DATE_RANGE.$ADV_GUARD_BW-$NUM_ADV_GUARDS-$ADV_EXIT_BW-$ADV_TIME-adv
NSF_DIR=$BASE_DIR/out/network-state/slim/ns-$DATE_RANGE
OUT_DIR=$BASE_DIR/out/simulate/$EXP_NAME
mkdir -p $OUT_DIR
i=1
while [ $i -le $TOT_PROCESSES ]
do
j=1
while [[ $j -lt $PARALLEL_PROCESSES && $i -lt $TOT_PROCESSES ]]
do
# start these in parallel
(time pypy pathsim.py simulate $NSF_DIR $NUM_SAMPLES $TRACEFILE $USERMODEL $OUTPUT $ADV_GUARD_BW $ADV_EXIT_BW $ADV_TIME $NUM_ADV_GUARDS $PATH_ALG) 2> $OUT_DIR/simulate.$EXP_NAME.$NUM_SAMPLES-samples.$i.time 1> $OUT_DIR/simulate.$EXP_NAME.$NUM_SAMPLES-samples.$i.out &
j=$(($j+1))
i=$(($i+1))
done
# wait for this one to finish
(time pypy pathsim.py simulate $NSF_DIR $NUM_SAMPLES $TRACEFILE $USERMODEL $OUTPUT $ADV_GUARD_BW $ADV_EXIT_BW $ADV_TIME $NUM_ADV_GUARDS $PATH_ALG) 2> $OUT_DIR/simulate.$EXP_NAME.$NUM_SAMPLES-samples.$i.time 1> $OUT_DIR/simulate.$EXP_NAME.$NUM_SAMPLES-samples.$i.out
i=$(($i+1))
done
|
multicastTor/multicastTor
|
torps/run_simulations_delayed_entry.sh
|
Shell
|
bsd-3-clause
| 1,354 |
export HIPDEX_INCLUDE_PATH=`pwd`
|
smart-m3/sib-tcp
|
hip-dex/install.sh
|
Shell
|
bsd-3-clause
| 33 |
#!/usr/bin/env bash
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
echo -e "${RED}* Killing service tango_master.1${NC}"
docker kill "$(docker ps -f name=tango_master.1 -f status=running -q)"
echo -e "${RED}* Waiting for service to restart${NC}"
while true; do
SERVICE_ID="$(docker service ps -f desired-state=running tango_tango_master -q)"
if [[ ! -z "${SERVICE_ID}" ]]; then
echo -e "-- ${BLUE} Service started!! ID = ${SERVICE_ID}${NC}"
break
fi
done
|
SKA-ScienceDataProcessor/integration-prototype
|
sip/tango_control/tango_master/kill_service.sh
|
Shell
|
bsd-3-clause
| 488 |
#!/bin/bash
[[ -z $WORKING_DIR ]] && WORKING_DIR=$(pwd)
[[ -z $VERSION ]] && VERSION=$(git log --oneline -n 1 | awk '{print $1}')
case "$1" in
'setup_qt')
sudo apt-get update -qq
sudo apt-get -y install qt4-qmake libqt4-dev libqt4-sql-sqlite
;;
'get_quazip')
wget http://downloads.sourceforge.net/quazip/quazip-0.7.1.tar.gz
tar -xvzf quazip-0.7.1.tar.gz > /dev/null
mv quazip-0.7.1/quazip .
;;
'build')
./build-scripts/revision.sh
qmake-qt4 CONFIG+=linux_quazip_static
make
;;
'package')
mkdir build
mv resources/README.txt .
tar -cvzpf build/mupen64plus-qt_linux_$VERSION.tar.gz mupen64plus-qt README.txt
;;
esac
|
sergiobenrocha2/mupen64plus-qt
|
build-scripts/platform/linux.sh
|
Shell
|
bsd-3-clause
| 743 |
#!/usr/bin/env bash
FILES=`find $1 -name "*.bag"`
for BAG in $FILES
do
echo $BAG
filename="${BAG##*/}"
echo $filename
rostopic echo -b $BAG -p /behavior/status > $2/${filename%.*}_status.csv
rostopic echo -b $BAG -p /bebop/land > $2/${filename%.*}_land.csv
rostopic echo -b $BAG -p /vicon/bebop_blue_en/bebop_blue_en > $2/${filename%.*}_bebop.csv
rostopic echo -b $BAG -p /vicon/bebop_target/bebop_target > $2/${filename%.*}_target.csv
python trimmer.py $2/${filename%.*}
done
|
AutonomyLab/bebop_hri
|
script/analysis/trajectory/extract.sh
|
Shell
|
bsd-3-clause
| 494 |
#!/usr/bin/env bash
# The NSA XSAPR file is no longer available
#wget --no-check-certificate https://engineering.arm.gov/~jhelmus/sample_xsapr_a1_files/2013_12_03_NSA/nsaxsaprrhiC1.a1/nsaxsaprrhiC1.a1.20131203.141936.nc
wget --no-check-certificate https://engineering.arm.gov/~collis/KAMX_20140417_1056
wget --no-check-certificate https://engineering.arm.gov/~collis/osrsc/KDVN_110608.nexrad
wget --no-check-certificate https://engineering.arm.gov/~collis/osrsc/KILX_110437.nexrad
wget --no-check-certificate https://engineering.arm.gov/~collis/osrsc/KLOT_110622.nexrad
wget --no-check-certificate https://engineering.arm.gov/~collis/osrsc/IL_grid.nc
wget https://github.com/jjhelmus/pyart2baltrad/raw/master/data/sgpcsaprppi_20110520095101.nc
wget --no-check-certificate https://engineering.arm.gov/~collis/osrsc/CHL20100621_222020
|
jjhelmus/pyart_short_course
|
data/get_data.sh
|
Shell
|
bsd-3-clause
| 834 |
#!/usr/bin/env bash
# Computes a checksum of a git repository considering
# 1. the current revision,
# 2. the current diff, and
# 3. the content of new files.
# Arguments:
# 1. the path to the repository
action() {
# determine the directory of this file
local this_file="$( [ ! -z "$ZSH_VERSION" ] && echo "${(%):-%x}" || echo "${BASH_SOURCE[0]}" )"
local this_dir="$( cd "$( dirname "$this_file" )" && pwd )"
# load polyfills
source "$this_dir/../../../polyfills.sh" ""
# handle arguments
local repo_path="$1"
if [ -z "$repo_path" ]; then
>&2 echo "please provide the path to the repository to bundle"
return "1"
fi
if [ ! -d "$repo_path" ]; then
>&2 echo "the provided path '$repo_path' is not a directory or does not exist"
return "2"
fi
( \
cd "$repo_path" && \
git rev-parse HEAD && \
git diff && \
( git ls-files --others --exclude-standard | xargs cat ) \
) | shasum | cut -d " " -f 1
local ret="$?"
return "$ret"
}
action "$@"
|
riga/law
|
law/contrib/git/scripts/repository_checksum.sh
|
Shell
|
bsd-3-clause
| 1,067 |
#!/usr/bin/env bash
set -e -u
function set_this_up {
ttagval=${TRAVIS_TAG:-notag}
if [ "$TRAVIS_PULL_REQUEST" != "false" ]
then
echo "This was a pull request, thus dont build docs. Exit."
exit 0
fi
if [ "$TRAVIS_BRANCH" != "master" ] && [ "$ttagval" == "notag" ]
then
echo "This commit was made against the $TRAVIS_BRANCH branch and not the master branch. Exit."
exit 0
fi
if [ -z ${BINSTAR_TOKEN+x} ]
then
echo "BINSTAR_TOKEN was not set, so this is probably a fork. Exit."
exit 0
fi
}
set_this_up
CONDA_PACKAGE_FILE=$(conda build tools/conda-recipe --output | grep '.tar.bz2' | tail -1)
echo "found conda package file $CONDA_PACKAGE_FILE"
conda install anaconda-client -qy
tagval=${TRAVIS_TAG:-notag}
if [ "$tagval" == "notag" ]
then
echo "uploading devel package"
anaconda -t $BINSTAR_TOKEN upload -c readdy -u readdy -l dev --force $CONDA_PACKAGE_FILE
else
echo "uploading tagged package with tag $tagval"
anaconda -t $BINSTAR_TOKEN upload -u readdy $CONDA_PACKAGE_FILE
fi
|
readdy/readdy
|
tools/ci/travis/upload_conda_package.sh
|
Shell
|
bsd-3-clause
| 1,084 |
#!/bin/sh
set -e
tar=`which bsdtar tar 2>/dev/null | head -1`
mkdir -p "$2"
(cd "$1"; exec find . -print0) |
(cd "$1"; exec $tar -cn -f - -T - --no-fflags --no-xattrs --null) |
(cd "$2"; exec $tar -xp -f - --no-fflags --no-xattrs)
|
dragonmaus/home
|
src/bin/dircp/dircp.sh
|
Shell
|
bsd-3-clause
| 235 |
#!/bin/sh
# Generated file, master is Makefile.am
. ${srcdir:-.}/common.sh
infile="$srcdir/images/ojpeg_chewey_subsamp21_multi_strip.tiff"
outfile="o-tiffcrop-doubleflip-ojpeg_chewey_subsamp21_multi_strip.tiff"
f_test_convert "$TIFFCROP -F both" $infile $outfile
f_tiffinfo_validate $outfile
|
ric2b/Vivaldi-browser
|
update_notifier/thirdparty/wxWidgets/src/tiff/test/tiffcrop-doubleflip-ojpeg_chewey_subsamp21_multi_strip.sh
|
Shell
|
bsd-3-clause
| 292 |
#!/bin/bash
python setup.py upload
|
slackhq/python-slackclient
|
scripts/deploy_to_prod_pypi_org.sh
|
Shell
|
mit
| 36 |
#!/bin/bash
echo "After Script"
echo "-- Repo Slug: $TRAVIS_REPO_SLUG"
echo "-- Repo Tag: $TRAVIS_TAG"
echo "-- PHP Version: $TRAVIS_PHP_VERSION"
echo "-- PULL Request: $TRAVIS_PULL_REQUEST"
if [ "$TRAVIS_REPO_SLUG" == "bluzphp/framework" ] && [ "$TRAVIS_TAG" != "" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_PHP_VERSION" == "7.0" ]; then
echo "Generate Documentation"
wget http://phpdox.de/releases/phpdox.phar
php phpdox.phar
echo "Publishing"
# move docs to `home` directory
cp -R docs/html $HOME/docs-latest
cd $HOME
git config --global user.email "[email protected]"
git config --global user.name "travis-ci"
git config --global push.default simple
git clone --quiet https://${GITHUB_TOKEN}@github.com/bluzphp/bluzphp.github.io > /dev/null
cd bluzphp.github.io
echo "-- Clean"
git rm -rf ./ > /dev/null
echo "-- Copy"
cp -Rf $HOME/docs-latest/* ./
echo "-- Push"
git add -f .
git commit -m "PHPDocumentor (Travis Build: $TRAVIS_BUILD_NUMBER@$TRAVIS_TAG)"
git push -fq origin > /dev/null
echo -e "Published to github.io\n"
fi
|
AntonShevchuk/bluz-framework
|
.travis.sh
|
Shell
|
mit
| 1,097 |
#!/usr/bin/env bash
# copy ioncube libs to php5 dir
cp ./zendguard/zend-loader-php5.6-linux-x86_64/ZendGuardLoader.so /usr/lib/php/20131226/
cp ./zendguard/zend-loader-php5.6-linux-x86_64/opcache.so /usr/lib/php/20131226/
# copy php5 config
cp ./01-zendguard.ini /etc/php/5.6/fpm/conf.d/
cp ./01-zendguard.ini /etc/php/5.6/apache2/conf.d/
# restart apache
service apache2 restart
|
gigatec/docker-ubuntu
|
files/root/install-zendguard.sh
|
Shell
|
mit
| 383 |
#!/bin/bash
FN="mu19ksuba.db_3.2.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/mu19ksuba.db_3.2.3.tar.gz"
"https://bioarchive.galaxyproject.org/mu19ksuba.db_3.2.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mu19ksuba.db/bioconductor-mu19ksuba.db_3.2.3_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mu19ksuba.db/bioconductor-mu19ksuba.db_3.2.3_src_all.tar.gz"
)
MD5="668c92eb1b9e2367c5a69dc8d0498cca"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-mu19ksuba.db/post-link.sh
|
Shell
|
mit
| 1,426 |
#!/bin/bash
# The 'other' host
HOST_NUMBER=$1
OTHER_HOSTS=$2
# Bridge address
BRIDGE_ADDRESS=10.10.$HOST_NUMBER.1/24
UNION_IP=192.168.250.$HOST_NUMBER
UNION_ADDRESS=$UNION_IP/24
# Add the docker0 bridge
brctl addbr docker0
# Set up the IP for the docker0 bridge
ip address add $BRIDGE_ADDRESS dev docker0
# Activate the bridge
ip link set docker0 up
# Add the br0 Open vSwitch bridge
ovs-vsctl add-br br0
# Create the tunnel to the other host and attach it to the
# br0 bridge
COUNTER=0
for REMOTE_IP in $OTHER_HOSTS; do
let COUNTER=COUNTER+1
ovs-vsctl add-port br0 gre$COUNTER -- set interface gre$COUNTER type=gre options:remote_ip=$REMOTE_IP
done
ovs-vsctl add-port br0 tep0 -- set interface tep0 type=internal
ip address add $UNION_ADDRESS dev tep0
ip link set tep0 up
# Add the br0 bridge to docker0 bridge
brctl addif docker0 br0
|
1uptalent/fleet
|
provisioning_scripts/prepare_docker_network.sh
|
Shell
|
mit
| 844 |
#! /bin/bash
# A script for setting up environment for travis-ci testing.
# Sets up Lua and Luarocks.
# LUA must be "lua5.1", "lua5.2" or "luajit".
# luajit2.0 - master v2.0
# luajit2.1 - master v2.1
set -eufo pipefail
LUAJIT_VERSION="2.0.4"
LUAJIT_BASE="LuaJIT-$LUAJIT_VERSION"
source .travis/platform.sh
LUA_HOME_DIR=$TRAVIS_BUILD_DIR/install/lua
LR_HOME_DIR=$TRAVIS_BUILD_DIR/install/luarocks
mkdir $HOME/.lua
LUAJIT="no"
if [ "$PLATFORM" == "macosx" ]; then
if [ "$LUA" == "luajit" ]; then
LUAJIT="yes";
fi
if [ "$LUA" == "luajit2.0" ]; then
LUAJIT="yes";
fi
if [ "$LUA" == "luajit2.1" ]; then
LUAJIT="yes";
fi;
elif [ "$(expr substr $LUA 1 6)" == "luajit" ]; then
LUAJIT="yes";
fi
mkdir -p "$LUA_HOME_DIR"
if [ "$LUAJIT" == "yes" ]; then
if [ "$LUA" == "luajit" ]; then
curl --location https://github.com/LuaJIT/LuaJIT/archive/v$LUAJIT_VERSION.tar.gz | tar xz;
else
git clone https://github.com/LuaJIT/LuaJIT.git $LUAJIT_BASE;
fi
cd $LUAJIT_BASE
if [ "$LUA" == "luajit2.1" ]; then
git checkout v2.1;
# force the INSTALL_TNAME to be luajit
perl -i -pe 's/INSTALL_TNAME=.+/INSTALL_TNAME= luajit/' Makefile
fi
make PREFIX="$LUA_HOME_DIR" && make install PREFIX="$LUA_HOME_DIR"
ln -s $LUA_HOME_DIR/bin/luajit $HOME/.lua/luajit
ln -s $LUA_HOME_DIR/bin/luajit $HOME/.lua/lua;
else
if [ "$LUA" == "lua5.1" ]; then
curl http://www.lua.org/ftp/lua-5.1.5.tar.gz | tar xz
cd lua-5.1.5;
elif [ "$LUA" == "lua5.2" ]; then
curl http://www.lua.org/ftp/lua-5.2.4.tar.gz | tar xz
cd lua-5.2.4;
elif [ "$LUA" == "lua5.3" ]; then
curl http://www.lua.org/ftp/lua-5.3.2.tar.gz | tar xz
cd lua-5.3.2;
fi
# Build Lua without backwards compatibility for testing
perl -i -pe 's/-DLUA_COMPAT_(ALL|5_2)//' src/Makefile
make $PLATFORM
make INSTALL_TOP="$LUA_HOME_DIR" install;
ln -s $LUA_HOME_DIR/bin/lua $HOME/.lua/lua
ln -s $LUA_HOME_DIR/bin/luac $HOME/.lua/luac;
fi
cd $TRAVIS_BUILD_DIR
lua -v
LUAROCKS_BASE=luarocks-$LUAROCKS
curl --location http://luarocks.org/releases/$LUAROCKS_BASE.tar.gz | tar xz
cd $LUAROCKS_BASE
if [ "$LUA" == "luajit" ]; then
./configure --lua-suffix=jit --with-lua-include="$LUA_HOME_DIR/include/luajit-2.0" --prefix="$LR_HOME_DIR";
elif [ "$LUA" == "luajit2.0" ]; then
./configure --lua-suffix=jit --with-lua-include="$LUA_HOME_DIR/include/luajit-2.0" --prefix="$LR_HOME_DIR";
elif [ "$LUA" == "luajit2.1" ]; then
./configure --lua-suffix=jit --with-lua-include="$LUA_HOME_DIR/include/luajit-2.1" --prefix="$LR_HOME_DIR";
else
./configure --with-lua="$LUA_HOME_DIR" --prefix="$LR_HOME_DIR"
fi
make build && make install
ln -s $LR_HOME_DIR/bin/luarocks $HOME/.lua/luarocks
cd $TRAVIS_BUILD_DIR
luarocks --version
rm -rf $LUAROCKS_BASE
if [ "$LUAJIT" == "yes" ]; then
rm -rf $LUAJIT_BASE;
elif [ "$LUA" == "lua5.1" ]; then
rm -rf lua-5.1.5;
elif [ "$LUA" == "lua5.2" ]; then
rm -rf lua-5.2.4;
elif [ "$LUA" == "lua5.3" ]; then
rm -rf lua-5.3.2;
fi
|
v1993/cocos2d-x-lua-i18n
|
.travis/setup_lua.sh
|
Shell
|
mit
| 3,012 |
# Base16 Styling Guidelines:
# base00 - Default Background
# base01 - Lighter Background (Used for status bars)
# base02 - Selection Background
# base03 - Comments, Invisibles, Line Highlighting
# base04 - Dark Foreground (Used for status bars)
# base05 - Default Foreground, Caret, Delimiters, Operators
# base06 - Light Foreground (Not often used)
# base07 - Light Background (Not often used)
# base08 - Variables, XML Tags, Markup Link Text, Markup Lists, Diff Deleted
# base09 - Integers, Boolean, Constants, XML Attributes, Markup Link Url
# base0A - Classes, Markup Bold, Search Text Background
# base0B - Strings, Inherited Class, Markup Code, Diff Inserted
# base0C - Support, Regular Expressions, Escape Characters, Markup Quotes
# base0D - Functions, Methods, Attribute IDs, Headings
# base0E - Keywords, Storage, Selector, Markup Italic, Diff Changed
base00=default # #000000
base01=colour18 # #282828
base02=colour19 # #383838
base03=colour8 # #585858
base04=colour20 # #B8B8B8
base05=colour7 # #D8D8D8
base06=colour21 # #E8E8E8
base07=colour15 # #F8F8F8
base08=colour01 # #AB4642
base09=colour16 # #DC9656
base0A=colour3 # #F7CA88
base0B=colour2 # #A1B56C
base0C=colour6 # #86C1B9
base0D=colour4 # #7CAFC2
base0E=colour5 # #BA8BAF
base0F=colour17 # #A16946
set -g status-left-length 32
set -g status-right-length 150
set -g status-interval 5
# default statusbar colors
set-option -g status-fg $base02
set-option -g status-bg $base01
set-option -g status-attr default
set-window-option -g window-status-fg $base04
set-window-option -g window-status-bg $base00
set -g window-status-format " #I #W"
# active window title colors
set-window-option -g window-status-current-fg $base01
set-window-option -g window-status-current-bg $base0C
set-window-option -g window-status-current-format " #[bold]#W "
# pane border colors
set-window-option -g pane-border-fg $base03
set-window-option -g pane-active-border-fg $base0C
# message text
set-option -g message-bg $base00
set-option -g message-fg $base0C
# pane number display
set-option -g display-panes-active-colour $base0C
set-option -g display-panes-colour $base01
# clock
set-window-option -g clock-mode-colour $base0C
tm_session_name="#[default,bg=$base0E,fg=$base01] #S "
set -g status-left "$tm_session_name"
tm_tunes="#[bg=$base0D,fg=$base01] ♫ #(osascript -l JavaScript ~/.dotfiles/applescripts/tunes.js)"
# tm_tunes="#[fg=$tm_color_music]#(osascript ~/.dotfiles/applescripts/tunes.scpt | cut -c 1-50)"
# tm_battery="#(~/.dotfiles/bin/battery_indicator.sh)"
tm_battery="#[fg=$base01,bg=$base09] ♥ #(battery)"
tm_date="#[default,bg=$base02,fg=$base05] %R"
tm_host="#[fg=$base01,bg=$base0E] #h "
set -g status-right "$tm_tunes $tm_battery $tm_date $tm_host"
|
SouthernBlackNerd/dotfiles
|
tmux/base16.sh
|
Shell
|
mit
| 2,743 |
#!/bin/sh
### Run R providing the R script in $1 as standard input and passing
### the remaining arguments on the command line
# Function that writes a message to stderr and exits
function fail
{
echo "$@" >&2
exit 1
}
# Ensure R executable is found
which R > /dev/null || fail "'R' is required by this tool but was not found on path"
# Extract first argument
infile=$1; shift
# Ensure the file exists
test -f $infile || fail "R input file '$infile' does not exist"
# Invoke R passing file named by first argument to stdin
R --vanilla --slave $* < $infile
|
volpino/Yeps-EURAC
|
tools/plotting/r_wrapper.sh
|
Shell
|
mit
| 572 |
#!/bin/sh
#
# Copyright (C) 2015 Fujitsu Technology Solutions GmbH. All Rights Reserved.
#
# version string
# Version: 3.30.02
# Version: 3.20.01
# Date: 2015-11-09
# svcimlistenerd default installation settings
DIR_INIT="/etc/init.d"
FILE_INIT="sv_cimlistenerd"
DIR_LISTENER="/usr/bin"
FILE_LISTENER="svcimlistenerd"
FILE_LISTENER_CONF="svcimlistenerd.conf"
FILE_LISTENER_LOG="svcimlistenerd.log"
SV_PORT=3169
DIR_CONF="/etc"
DIR_SSL_CERT="$DIR_CONF/svcimlistenerd"
FILE_LISTENER_CERT="server.crt"
DIR_LOG_FJ="/var/log/fujitsu/svcimlistener"
LOG_FILE_INSTALL="install.log"
LOG_FILE_UNINSTALL="uninstall.log"
LOG_FILE_CONFIGURE="$DIR_LOG_FJ/configure.log"
LOG_FILE=$LOG_FILE_INSTALL
# common
SCRIPTNAME=$(basename $0)
MODE_PREREQ="TRUE"
MODE_INSTALL="TRUE"
MODE_UNINSTALL="FALSE"
WARN_OCCURED="FALSE"
ERR_OCCURED="FALSE"
LICENSEAGREE="no"
DIALOG="TRUE"
umask 0022
# functions
function usage()
{
cat <<E_O_F
$SCRIPTNAME usage:
Optional parameter:
-e|--erase|--uninstall Mode Uninstallation
-n|--noprereq Do not check for necessary installed packages
-v|--verbose|--debug Be verbose
-s|--silent Unattended mode
-h|--help|--? Displays this usage
E_O_F
}
function message()
{
if [ "$VERBOSE" == "1" ]; then
echo -e "$@" | tee -a $LOG
else
echo -e "$@" >> $LOG
fi
}
function message_display()
{
echo -e "$@" | tee -a $LOG
}
function warn_1()
{
[ $WARN_OCCURED == "FALSE" ] && echo "Warning occured -- see $LOG"
WARN_OCCURED="TRUE"
}
function err_1()
{
[ $ERR_OCCURED == "FALSE" ] && echo "ERROR occured -- see $LOG"
ERR_OCCURED="TRUE"
}
function check_prerequisites()
{
message_display "* Check_prerequisites"
ret=0
FOUND=0
SYSLOG=Syslog.pm
PERL_LIST=`rpm -qa | grep perl`
for i in $PERL_LIST
do rpm -q --filesbypkg $i | grep $SYSLOG > /dev/null && message " -$i (provides $SYSLOG)" && FOUND=1 && break || {
FOUND=0
}
done
[ $FOUND == 0 ] && message_display "ERROR: package providing \"$SYSLOG\" --- NOT FOUND" && ret=1
for i in perl-IO-Socket-INET6 perl-NetAddr-IP perl-IO-Socket-SSL perl-Net-SSLeay perl-XML-Twig perl-Time-HiRes
do rpm -q $i > /dev/null && message " -$i" || {
message_display "ERROR: $i --- NOT FOUND"
ret=1
}
done
message " -SNMP Trap Handler"
unset SELSNMPTT
SELSNMPTT=`cat svcimlistenerd.conf | sed 's/^[ ]*//' | sed 's/[\x09]//g' | grep ^[^#] | grep '^SNMPTT_EXE' | awk -F "=" '{ print $2}'`
if [ $SELSNMPTT ]; then
message " SNMPTT_EXE found in svcimlistenerd.conf:$SELSNMPTT"
if [ ! -f $SELSNMPTT ]; then
message_display "ERROR: SNMP Trap Handler $SELSNMPTT --- NOT FOUND"
ret=1
fi
else
message_display "ERROR: \"SNMPTT_EXE\" key not found in svcimlistenerd.conf"
ret=1
fi
if [ $ret != 0 ]; then
message_display "\nDo you want to continue with installation?"
message_display "If you choose "yes", the installation will proceed <yes/no>"
read ANSWER
if [ "$ANSWER" != "Y" -a "$ANSWER" != "y" -a "$ANSWER" != "YES" -a "$ANSWER" != "Yes" -a "$ANSWER" != "yes" ]; then
message_display "Installation aborted by user"
cat $LOG >> $LOG_FILE_CONFIGURE
exit 0
fi
fi
return $ret
}
function get_os_version
{
local version_file=$1
if [ -e $version_file ]; then
local content=$(cat "$version_file")
if [[ $content =~ [[:digit:]]{1,2} ]]; then
echo "${BASH_REMATCH[0]}"
fi
fi
}
function set_srv_handling
{
if [ "$redhat_version" == "7" ]; then
Srv_Listener="service $FILE_LISTENER"
else
Srv_Listener=$DIR_INIT/$FILE_INIT
fi
}
function set_os_type
{
message_display "* Check distribution"
if [ -f /etc/SuSE-release ]; then
OS_NAME="suse"
message " OS: $(cat /etc/SuSE-release)"
else
if [ -f /etc/redhat-release ]; then
OS_NAME="redhat"
message " OS: $(cat /etc/redhat-release)"
redhat_version=$(get_os_version /etc/redhat-release)
fi
fi
}
function check_svom
{
#check possible SVOM application server Installation
message_display "* Check used ports"
for i in ServerViewJBoss ServerViewTomee
do rpm -qa | grep -i $i > /dev/null
if [ "$?" == "0" ]; then
message_display " ServerView $i application server is installed."
# Check svcimlistenerd.conf file for an alternate port to be used
unset SELPORT
SELPORT=`cat svcimlistenerd.conf | sed 's/^[ ]*//' | sed 's/[\x09]//g' | grep ^[^#] | grep '^PORT' | awk -F "=" '{ print $2}'`
if [ $SELPORT ]; then
message_display " Alternate cimlistener port $SELPORT found in svcimlistenerd.conf"
if [ $SELPORT == $SV_PORT ]; then
message_display "ERROR: Port conflict detected. Please select a free port other than $SV_PORT."
message_display " Operation aborted\n"
exit 1
fi
else
message_display "ERROR: Port conflict detected."
message_display " Please select a free port for svcimlistener in svcimlistenerd.conf other than $SV_PORT."
message_display " Operation aborted\n"
exit 1
fi
fi
done
}
# Check commandline arguments
while [ $# -gt 0 ]; do
case $1 in
-e|--erase|--uninstall )
MODE_INSTALL="FALSE"
MODE_UNINSTALL="TRUE"
LOG_FILE=$LOG_FILE_UNINSTALL
LICENSEAGREE="yes"
shift
;;
-v|--verbose|--debug )
VERBOSE=1
shift
;;
-n|--noprereq )
MODE_PREREQ="FALSE"
shift
;;
-s|--silent )
DIALOG="FALSE"
shift
;;
-h|--help|--? )
usage
exit 0
;;
* )
usage
exit 1
;;
esac
done
# Check appropriate permission
if [ $MODE_INSTALL == "TRUE" -o $MODE_UNINSTALL == "TRUE" ] && [ `id -u` != 0 ]; then
echo "Permission denied"
echo
echo "FUJITSU Software ServerView Plug-in for Nagios Core -- Operation aborted"
exit 1;
fi
# Eula
if [ "$LICENSEAGREE" != "yes" -a "$DIALOG" == "TRUE" ]; then
echo ""
more EULA.txt
echo ""
echo ""
echo "If you agree please confirm with yes otherwise leave with no"
read LICENSEAGREE
[ "$LICENSEAGREE" != "YES" -a "$LICENSEAGREE" != "Yes" -a "$LICENSEAGREE" != "yes" -a "$LICENSEAGREE" != "Y" -a "$LICENSEAGREE" != "y" ] && exit 0
fi
# setup logging
[ -f ${DIR_LOG_FJ}/${LOG_FILE} ] && rm -f ${DIR_LOG_FJ}/${LOG_FILE}
[ -d $DIR_LOG_FJ ] || mkdir -p $DIR_LOG_FJ
LOG=${DIR_LOG_FJ}/${LOG_FILE}
echo
message_display "FUJITSU Software ServerView Plug-in for Nagios Core -- Operation started at $(date)"
message_display ""
# Detect distribution
set_os_type
# Distribution specific service handling
set_srv_handling
# Installation
if [ $MODE_INSTALL == "TRUE" ]; then
# check for an existing SVOM installation
check_svom
# check necessary prerequisites
[ $MODE_PREREQ == "TRUE" ] && check_prerequisites
# stop exiting listener
if [ -f $DIR_INIT/$FILE_INIT ]; then
message_display "* Existing Service $FILE_LISTENER - Shut down"
$Srv_Listener stop >> $LOG
ret=$?
if [ "$ret" != "0" ]; then
message_display "ERROR: Stop existing $FILE_LISTENER service failed RC:$ret"
else
message " - Existing Service $FILE_LISTENER - Stopped"
fi
$Srv_Listener status >> $LOG
if [ "$OS_NAME" == "suse" ]; then
/sbin/insserv -r $DIR_INIT/$FILE_INIT
else
/sbin/chkconfig --del $DIR_INIT/$FILE_INIT
fi
fi
# rename existing svcimlistenerd.log
[ -f $DIR_LOG_FJ/$FILE_LISTENER_LOG ] && mv $DIR_LOG_FJ/$FILE_LISTENER_LOG $DIR_LOG_FJ/$( date "+%Y%m%d_%H%M%S" )_$FILE_LISTENER_LOG
message_display "* Files Installation"
# Init script
cp -f -v $FILE_INIT $DIR_INIT/$FILE_INIT >>$LOG 2>&1 || err_1
chmod 755 $DIR_INIT/$FILE_INIT >>$LOG 2>&1 || err_1
# CIMListener Daemon
cp -f -v $FILE_LISTENER $DIR_LISTENER/$FILE_LISTENER >>$LOG 2>&1 || err_1
chmod 755 $DIR_LISTENER/$FILE_LISTENER >>$LOG 2>&1 || err_1
# CIMListener Config
cp -f -v $FILE_LISTENER_CONF $DIR_CONF/$FILE_LISTENER_CONF >>$LOG 2>&1 || err_1
chmod 755 $DIR_CONF/$FILE_LISTENER_CONF >>$LOG 2>&1 || err_1
# Certificate
[ -d $DIR_SSL_CERT ] || mkdir $DIR_SSL_CERT
cp -f -v $FILE_LISTENER_CERT $DIR_SSL_CERT/$FILE_LISTENER_CERT >>$LOG 2>&1 || err_1
message_display "* Service $FILE_LISTENER - Installation"
if [ "$OS_NAME" == "suse" ]; then
/sbin/insserv $DIR_INIT/$FILE_INIT
else
/sbin/chkconfig --add $DIR_INIT/$FILE_INIT
fi
message_display "* Service $FILE_LISTENER - Start"
$Srv_Listener start >> $LOG
ret=$?
sleep 2
if [ "$ret" != "0" ]; then
message_display "ERROR: Start $FILE_LISTENER service failed RC:$ret"
err_1
else
message " - Service $FILE_LISTENER - started"
fi
$Srv_Listener status >> $LOG
fi
# Uninstallation
if [ $MODE_UNINSTALL == "TRUE" ]; then
if [ -f $DIR_INIT/$FILE_INIT ]; then
message_display "* Service $FILE_LISTENER - Shut down"
$Srv_Listener stop >> $LOG
ret=$?
$Srv_Listener status >> $LOG
if [ "$ret" != "0" ]; then
message_display "ERROR: Stop $FILE_LISTENER service failed RC:$ret"
else
message " - Service $FILE_LISTENER - Stopped"
fi
if [ "$OS_NAME" == "suse" ]; then
/sbin/insserv -r $DIR_INIT/$FILE_INIT
else
/sbin/chkconfig --del $DIR_INIT/$FILE_INIT
fi
else
message_display "Warning: Service $FILE_LISTENER - not found"
warn_1
fi
message_display "* Files Uninstallation"
[ -f "$DIR_INIT/$FILE_INIT" ] && rm -f "$DIR_INIT/$FILE_INIT" && message " - $DIR_INIT/$FILE_INIT"
[ -f "$DIR_LISTENER/$FILE_LISTENER" ] && rm -f "$DIR_LISTENER/$FILE_LISTENER" && message " - $DIR_LISTENER/$FILE_LISTENER"
[ -f "$DIR_CONF/$FILE_LISTENER_CONF" ] && rm -f "$DIR_CONF/$FILE_LISTENER_CONF" && message " - $DIR_CONF/$FILE_LISTENER_CONF"
[ -d $DIR_SSL_CERT ]&& rm -rf $DIR_SSL_CERT && message " - Remove Directory: $DIR_SSL_CERT"
fi
echo
if [ $ERR_OCCURED == "FALSE" -a $WARN_OCCURED == "FALSE" ]; then
message_display "FUJITSU Software ServerView Plug-in for Nagios Core -- Operation finished"
echo "Log saved in $LOG file."
ret=0
else
message_display "FUJITSU Software ServerView Plug-in for Nagios Core -- Operation ended with warnings / errors"
echo "Log saved in $LOG file."
ret=1
fi
cat $LOG >> $LOG_FILE_CONFIGURE
exit $ret
|
EXASOL/nagios-monitoring
|
opt/fujitsu/ServerViewSuite/cimindication/listener/sv_install.sh
|
Shell
|
mit
| 10,033 |
#!/bin/env bash
# ------------------------------------------------------------------------------
# bookmark favourite paths
# ------------------------------------------------------------------------------
#
# TODO: add dir lock
usage() {
cat >&2 << EOF
Usage: ${0##*/} [OPTIONS] <command>
${0##*/} [OPTIONS] add <directory>
${0##*/} [OPTIONS] del <directory>
${0##*/} [OPTIONS] cat
${0##*/} [OPTIONS] check
${0##*/} [OPTIONS] clear
${0##*/} [OPTIONS] ls
${0##*/} [OPTIONS] rm
add add a bookmarked path
del delete bookmarked path
cat display file content
check print invalid directories in bookmarks file
clear clear any invalid directories in bookmarks file
ls list file path of bookmarks
rm remove database file
OPTIONS:
-h help
-v verbose
-q quiet
-d debug
EOF
}
main() {
# flags
local PATH_BOOKMARKS=${PATH_BOOKMARKS:-"${HOME}/.local/share/"}
local TMP=${TMP:-"/tmp/"}
local -r FSBOOKMARKS="${PATH_BOOKMARKS}fsbookmarks.db.txt"
local -r tmp_bookmarks="$(mktemp --dry-run $TMP/fsbookmarks.XXXXXXXXtmp.db.txt)"
local -i enable_verbose=0
local -i enable_quiet=0
local -i enable_debug=0
local -a options
local -a args
check_dependencies
# parse input args
parse_options "$@"
# set leftover options parsed local input args
set -- ${args[@]}
# remove args array
unset -v args
check_input_args "$@"
prepare_env
set_signal_handlers
setup
run "$@"
unset_signal_handlers
}
################################################################################
# script internal execution functions
################################################################################
run() {
subcommand=$1
shift
cmd_${subcommand} "$@"
local -r rc=$?
if (($rc == 127)); then
error_exit 127 "Subcommand '$subcommand' is invalid."
fi
}
check_dependencies() {
:
}
check_input_args() {
if [[ -z $1 ]]; then
usage
exit 1
fi
}
prepare_env() {
if ! [[ -d $PATH_BOOKMARKS ]]; then
mkdir -p "$PATH_BOOKMARKS" || error_exit 1 "Failed to create bookmarks directory: '$PATH_BOOKMARKS'"
fi
if ! touch "$FSBOOKMARKS"; then
error_exit 1 "Failed to create bookmarks database: '$FSBOOKMARKS'."
fi
}
prepare() {
export PATH_USER_LIB=${PATH_USER_LIB:-"$HOME/.local/lib/"}
set -e
source_libs
set +e
set_descriptors
}
source_libs() {
source "${PATH_USER_LIB}libutils.sh"
source "${PATH_USER_LIB}libcolors.sh"
}
set_descriptors() {
if (($enable_verbose)); then
exec {fdverbose}>&2
else
exec {fdverbose}>/dev/null
fi
if (($enable_debug)); then
set -xv
exec {fddebug}>&2
else
exec {fddebug}>/dev/null
fi
}
set_signal_handlers() {
trap sigh_abort SIGABRT
trap sigh_alarm SIGALRM
trap sigh_hup SIGHUP
trap sigh_cont SIGCONT
trap sigh_usr1 SIGUSR1
trap sigh_usr2 SIGUSR2
trap sigh_cleanup SIGINT SIGQUIT SIGTERM EXIT
}
unset_signal_handlers() {
trap - SIGABRT
trap - SIGALRM
trap - SIGHUP
trap - SIGCONT
trap - SIGUSR1
trap - SIGUSR2
trap - SIGINT SIGQUIT SIGTERM EXIT
}
setup() {
set_descriptors
}
parse_options() {
# exit if no options left
[[ -z $1 ]] && return 0
log "parse \$1: $1" 2>&$fddebug
local do_shift=0
case $1 in
-)
if ! (($singleton)); then
singleton=1
return 9
fi
error_exit 5 "stdin is not allowed inside config."
;;
-d|--debug)
enable_debug=1
;;
-v|--verbose)
enable_verbose=1
;;
-q|--quiet)
enable_quiet=1
;;
--)
do_shift=3
;;
-*)
usage
error_exit 5 "$1 is not allowed."
;;
*)
do_shift=1
;;
esac
if (($do_shift == 1)) ; then
args+=("$1")
elif (($do_shift == 2)) ; then
# got option with argument
shift
elif (($do_shift == 3)) ; then
# got --, use all arguments left as options for other commands
shift
options+=("$@")
return
fi
shift
parse_options "$@"
}
sigh_abort() {
trap - SIGABRT
}
sigh_alarm() {
trap - SIGALRM
}
sigh_hup() {
trap - SIGHUP
}
sigh_cont() {
trap - SIGCONT
}
sigh_usr1() {
trap - SIGUSR1
}
sigh_usr2() {
trap - SIGUSR2
}
sigh_cleanup() {
trap - SIGINT SIGQUIT SIGTERM EXIT
rm -f "$tmp_bookmarks"
local active_jobs=$(jobs -p)
for p in $active_jobs; do
if ps -p $p >/dev/null 2>&1; then
kill -SIGINT $p >/dev/null 2>&1
fi
done
exit 0
}
################################################################################
# custom functions
#-------------------------------------------------------------------------------
cmd_add() {
local input=
if read -t 0; then
input=$(cat)
else
input="$@"
fi
if ! { input=$(realpath "$input") && [[ -d "$input" ]]; }; then
error_exit 1 "Input is either not a directory or does not exist. Input: '$input'"
fi
info "input: $input" 2>&$fdverbose
if grep -q -n -F -x "$input" "$FSBOOKMARKS"; then
error_exit 1 "The path '$input' has already been added."
fi
info "a'$input'" 2>&$fdverbose
echo "$input" >> "$FSBOOKMARKS"
}
cmd_check() {
cat "$FSBOOKMARKS" | xargs -I {} sh -c "if ! test -d {}; then echo '{}';fi"
}
cmd_clear() {
local line=
while read line; do
if [[ -d $line ]]; then
info "d:'$line'" 2>&$fdverbose
echo "$line" >> "$tmp_bookmarks"
fi
done < "$FSBOOKMARKS"
mv "$tmp_bookmarks" "$FSBOOKMARKS"
}
cmd_del() {
local input=
if read -t 0; then
input=$(cat)
else
input="$@"
fi
[[ -z "$input" ]] && error_exit 1 "No input."
input=$(realpath "$input")
info "input: $input" 2>&$fdverbose
local line_number=
if ! line_number=$(grep -n -F -x "$input" "$FSBOOKMARKS"); then
error_exit 1 "Input not found in bookmarks."
fi
if (($(echo "$line_number" | wc -l) > 1)); then
error_exit 1 "Query unspecific. Too many results"
fi
line_number=$(echo "$line_number" | awk -F: '{print $1}')
info "d:$line_number:'$input'" 2>&$fdverbose
sed -i "$line_number d" "$FSBOOKMARKS"
}
cmd_cat() {
cat "$FSBOOKMARKS"
}
cmd_rm() {
rm -i "$FSBOOKMARKS"
}
cmd_ls() {
ls "$FSBOOKMARKS"
}
#-------------------------------------------------------------------------------
# end custom functions
################################################################################
prepare
main "$@"
|
dgengtek/scripts
|
tools/fsbookmark.sh
|
Shell
|
mit
| 6,318 |
pkill -9 -f S1.log
|
msuchoi/Coffee
|
stop.sh
|
Shell
|
mit
| 19 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.