code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
VERSION="0.1.1"
SYSTEM_CONFIG="/etc/chronicle.cfg"
USER_CONFIG="$HOME/.chronicle.cfg"
if [ "$EDITOR" -eq ]; then
EDITOR="vim + +startinsert"
fi
${CHRONICLE_DIR:="$HOME/.chronicle"}
DATE_FORMAT="%Y/%m/%d/%H:%M:%S"
TMP_ENTRY="/tmp/chronicle.cfg"
TMP_ENTRY_ORIG="/tmp/chronicle.cfg.empty"
ENCRYPTION="FALSE"
ENCRYPTION_METHOD="aes-256-cbc"
DEBUG="FALSE"
WARNINGS="TRUE"
COLOR="TRUE"
command="$1"
which sensible-editor >/dev/null 2>&1;
if [[ "$?" -eq 0 ]]; then
EDITOR="sensible-editor";
fi
# ---[ Information ]--------------------------------------------------------- #
manual () {
echo "Chronicle, the command line journal manager.
Version $VERSION
Usage:
chronicle COMMAND
Commands:
enter: Write a new entry.
default-conig: Print the default config values,
write them to the given file if present.
backup: Backup journal entries.
version: Output only the version
help: Output this.
In depth documentation:
- Backup:
usage: chronicle backup METHOD
supported methods:
--uncompressed DIRECTORY
Copies the entire journal entry direcoty to the given directory.
--gzip FILE
Compresses the entire journal entry directory into the given file.
"
}
# ---[ Feedback ]------------------------------------------------------------ #
ESC_SEQ="\x1b["
COL_RESET=$ESC_SEQ"39;49;00m"
COL_RED=$ESC_SEQ"31;11m"
COL_GREEN=$ESC_SEQ"32;11m"
COL_YELLOW=$ESC_SEQ"33;11m"
COL_BLUE=$ESC_SEQ"34;11m"
COL_MAGENTA=$ESC_SEQ"35;11m"
COL_CYAN=$ESC_SEQ"36;11m"
print_colored_text () {
color=$1
text=$2
color_code="COL_$color"
if [ "$WARNINGS" == "TRUE" ]
then
echo -e "${!color_code}$text$COL_RESET"
else
echo "$text"
fi
}
debug () {
if [ "$DEBUG" == "TRUE" ]
then
print_colored_text GREEN "[DEBUG]: $*"
fi
}
warning () {
if [ "$WARNINGS" == "TRUE" ]
then
print_colored_text YELLOW "[WARNING]: $*"
fi
}
error () {
print_colored_text RED "[ERROR]: $*"
exit 1
}
# ---[ Config ]-------------------------------------------------------------- #
safe_source (){
configfile=$1
configfile_secured="/tmp/$(basename "$configfile")"
if [ ! -r "$configfile" ]
then
warning "Could not read config file \"$configfile\"."
if [ -e "$configfile" ]
then
if [ -d "$configfile" ]
then
debug "It's a directory"
else
debug "You don't have the correct permissions"
fi
else
debug "It doesnt exist"
fi
return
fi
# check if the file contains something we don't want
if egrep -q -v '^#|^[^ ]*=[^;]*' "$configfile"
then
debug "Config file is unclean, cleaning it..."
# filter the original to a new file
egrep '^#|^[^ ]*=[^;&]*' "$configfile" > "$configfile_secured"
configfile="$configfile_secured"
fi
source "$configfile"
}
read_config (){
debug "Reading system-wide config"
safe_source "$SYSTEM_CONFIG"
debug "Reading user config"
safe_source "$USER_CONFIG"
}
cfg () {
key="$1"
value="$2"
echo "$key=\"$value\"" >> "$output_file"
}
default_config (){
debug "Generating default config file"
file_argument=$2
output_file="/dev/stdout"
if [ "$file_argument" ]
then
output_file="$file_argument"
debug "Writing default config file to $output_file"
else
debug "Writing default config file to stdout."
fi
cfg "DEBUG" "$DEBUG"
cfg "WARNINGS" "$WARNINGS"
cfg "COLOG" "$COLOR"
cfg "CHRONICLE_DIR" "$CHRONICLE_DIR"
cfg "EDITOR" "$EDITOR"
cfg "DATE_FORMAT" "$DATE_FORMAT"
cfg "ENCRYPTION" "$ENCRYPTION"
cfg "ENCRYPTION_METHOD" "$ENCRYPTION_METHOD"
cfg "TMP_ENTRY" "$TMP_ENTRY"
cfg "TMP_ENTRY_ORIG" "$TMP_ENTRY_ORIG"
}
# ---[ New entry ]----------------------------------------------------------- #
prepare () {
# If the tmp entry exists, delete it first.
if [ -e $TMP_ENTRY ]
then
rm -f $TMP_ENTRY
fi
file=$1
echo >> "$file"
}
encrypt () {
in_file=$1
out_file=$2
debug "Encrypting the new entry"
openssl "$ENCRYPTION_METHOD" -e -in "$in_file" -out "$out_file"
}
enter () {
debug "Starting new entry"
prepare $TMP_ENTRY
entry_file=$CHRONICLE_DIR/$(date +"$DATE_FORMAT")
cp $TMP_ENTRY $TMP_ENTRY_ORIG
# possibly edit the file
$EDITOR $TMP_ENTRY
diff $TMP_ENTRY $TMP_ENTRY_ORIG > /dev/null 2>&1
if [ "$?" == "1" ]
then
debug "Generating a new entry file: $entry_file"
mkdir -p "$(dirname "$entry_file")"
if [ "$ENCRYPTION" == "TRUE" ]
then
entry_file="$entry_file.enc"
encrypt "$TMP_ENTRY" "$entry_file"
else
entry_file=$entry_file.txt
mv "$TMP_ENTRY" "$entry_file"
chmod 600 "$entry_file"
fi
else
debug "Not generating a new entry, no new content"
fi
rm -f $TMP_ENTRY_ORIG
}
# ---[ Backup ]-------------------------------------------------------------- #
gzip_backup () {
compressed_file="$(realpath $1)"
if [ "$compressed_file" == "" ]
then
error "No target file given"
else
if [ -e "$compressed_file" ]
then
debug "Target exists"
if [ -f "$compressed_file" ] # It's a file.
then
error "Target file already exists."
fi
if [ -d "$compressed_file" ] # It's a directory.
then
warning "Target is an existing directory. Copying into it."
fi
fi
debug "Backing up to $compressed_file."
fi
tar -zcvf "$compressed_file" "$CHRONICLE_DIR"
debug "Backed up and compressed all journal entries into $compressed_file"
}
uncompressed_backup () {
copied_directory="$(realpath $1)"
if [ "$copied_directory" == "" ]
then
error "No target directory given"
else
if [ -e "$copied_directory" ] # It exists
then
debug "Target exists"
if [ -f "$copied_directory" ] # It's a file.
then
error "Target is a file."
fi
if [ -d "$copied_directory" ] # It's a directory.
then
warning "Target is an existing directory. Copying into it."
fi
fi
debug "Backup to $copied_directory".
fi
cp -r "$CHRONICLE_DIR" "$copied_directory"
debug "Backed up all journal entries in $copied_directory"
}
backup () {
method="$2"
case "$method" in
"--uncompressed" )
dir="$3"
uncompressed_backup "$dir"
;;
"--gzip" )
file="$3"
gzip_backup "$file"
;;
* )
error "Unrecognized method, run 'chronicle help' for help."
;;
esac
}
# ---[ Execute ]------------------------------------------------------------- #
read_config
case "$command" in
"enter" )
enter
;;
"default-config" )
default_config $*
;;
"backup" )
backup $*
;;
"help" )
manual
;;
"version" )
echo $VERSION
;;
* )
manual
esac
|
NorfairKing/chronicle
|
chronicle.sh
|
Shell
|
gpl-2.0
| 7,525 |
#!/usr/bin/env sh
#
# Copyright 2012 Amazon Technologies, Inc.
#
# Licensed under the Amazon Software License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://aws.amazon.com/asl
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
cd ../..
cd bin
./generateResultsSummary.sh -resultsFile ../samples/image_category/image_category.results -outputFile ../samples/image_category/image_category.summary -sample image_category
cd ..
cd samples/image_category
|
ucberkeley/moocchat
|
turk/samples/image_category/generateResultsSummary.sh
|
Shell
|
gpl-3.0
| 723 |
#!/bin/bash
for if in $(find ~/.ferris -name "*.db")
do
echo "Verifying database $if..."
db_verify $if
done
|
monkeyiq/ferris
|
apps/ferris-verify-dot-ferris-databases.sh
|
Shell
|
gpl-3.0
| 117 |
#!/bin/bash
python AnalyzeSimulation.py --paralog1 YDR418W --paralog2 YEL054C --simnum 73 > YDR418W_YEL054C_MG94_nonclock_Sim73_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Simulation/ShFiles/MG94_YDR418W_YEL054C_sim73.sh
|
Shell
|
gpl-3.0
| 145 |
#!/usr/bin/env bash
# Copyright: Christoph Dittmann <[email protected]>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
#
# Converts Japanese text into Romaji. The quality of the output
# heavily depends on kakasi and mecab. Kanji readings might be a bit
# off.
# shellcheck source=gettext/gettext.sh
. "$(dirname "$0")"/../gettext/gettext.sh
READING=$(printf '%s' "$*" | mecab --node-format="%f[5] " --eos-format= --unk-format=%m)
# For some reason utf-8 support in kakasi on Ubuntu 9.04 seems to be
# broken.
# Fortunately, we have iconv.
# Unfortunately, this breaks the backslash '\' (among other, less
# important things), so we remove it first.
READING=${READING//\\/}
RESULT=$(printf '%s' "$READING" | kakasi -iutf8 -outf8 -Ka -Ha -Ja)
if [ -n "$RESULT" ]; then
# Remove newlines. There shouldn't be any, but we make sure.
RESULT=${RESULT//$'\n'/}
# Restrict length and print result
printf " %s\n" "${RESULT:0:300}"
else
echo_ 'No result.'
fi
exit 0
|
Christoph-D/Japanese-Tools
|
romaji/romaji.sh
|
Shell
|
gpl-3.0
| 1,013 |
# Test for flaky tab failures
rm -rf 3*
echo b > 3ob
echo g > 3og
echo 'g' >> 3 g
echo 'g' >> 3 g
echo g >> 3 g
echo g >> 3 g
echo g >> 3 g
echo 'g' >> 3 g
echo 'g' >> 3 g
echo 'g' >> 3 g
echo 'g' >> 3 g
rm 3 b
mv 3 out3
|
ardagnir/athame
|
test/shell/inst3.sh
|
Shell
|
gpl-3.0
| 222 |
#!/bin/bash
# SE-VPN script
echo "Updating system"
apt-get update
#DEBIAN_FRONTEND=noninteractive apt-get -y -o DPkg::options::="--force-confdef" -o DPkg::options::="--force-confold"
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | debconf-set-selections
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | debconf-set-selections
echo "Installing dependencies"
cat /etc/resolv.conf > /etc/resolv.conf.default
apt-get install -y unzip curl git bc make gcc openssl build-essential iptables-persistent haproxy tmux mosh
apt-get install -y unzip curl git bc make gcc openssl build-essential iptables-persistent haproxy tmux mosh
apt-get install -y isc-dhcp-server
cp -n /etc/dhcp/dhcpd.conf{,.bak}
mv /etc/dhcp/dhcpd.conf /etc/dhcp/dhcpd.conf.default
touch /etc/dhcp/dhcpd.conf
cat <<EOF >> /etc/dhcp/dhcpd.conf
default-lease-time 600;
max-lease-time 7200;
option domain-name "vpn.team28devs.com";
subnet 192.168.199.0 netmask 255.255.255.0 {
range 192.168.199.100 192.168.199.150;
option domain-name-servers 192.168.199.1, 8.8.8.8, 207.67.222.222;
option routers 5.5.0.1;
}
EOF
chown -R root:dhcpd /var/lib/dhcp/
chmod 775 -R /var/lib/dhcp/
touch /etc/apparmor.d/local/usr,sbin.dhcpd
cat << 'EOF' >> /etc/apparmor.d/local/usr.sbin.dhcpd
/var/lib/dhcp/dhcpd.leases* rwl,
/var/lib/dhcp/dhcpd6.leases* rwl,
/etc/dhcp/dhcpd.conf r,
/etc/dhcp/dhcpd6.conf r,
EOF
apparmor_parser -R /etc/apparmor.d/usr.sbin.dhcpd
echo "setting up bind9 dns server"
apt-get install -y bind9 bind9utils bind9-doc
cp /etc/bind/named.conf /etc/bind/named.conf.default
sed -i 's#include "/etc/bind/named.conf.options"#//include "/etc/bind/named.conf.options"#g' /etc/bind/named.conf
cat <<EOF >> /etc/bind/named.conf.local
acl goodclients {
192.168.199.0/24;
localhost;
localnets;
::;
};
options {
directory "/var/cache/bind";
recursion yes;
allow-query { goodclients; };
//dnssec-validation auto;
auth-nxdomain no; # conform to RFC1035
listen-on-v6 { any; };
forwarders {
8.8.8.8;
8.8.4.4;
208.67.222.222;
208.67.220.220;
172.31.0.2;
172.16.0.2;
};
//forward-only;
dnssec-enable yes;
dnssec-validation yes;
};
EOF
#resto
#restore original resolv.conf
systemctl restart bind9
cat /etc/resolv.conf.default > /etc/resolv.conf
HOSTG=$(cat /etc/hosts | grep metadata.google.internal)
if [[ $HOSTG =~ metadata.google.internal ]]; then
echo "server=169.254.169.254" >> /etc/dnsmasq.conf;
( echo "169.254.169.254 metadata.google.internal" | tee -a /etc/hosts ) &>/dev/null
fi
cat /etc/resolv.conf.default > /etc/resolv.conf
apt-get install -y libreadline-dev libncurses5-dev libssl-dev libevent-dev
DISTRO=$(lsb_release -ds 2>/dev/null || cat /etc/*release 2>/dev/null | head -n1 || uname -om)
if [[ $DISTRO =~ Debian ]]; then
echo deb http://httpredir.debian.org/debian jessie-backports main | sed 's/\(.*-backports\) \(.*\)/&@\1-sloppy \2/' | tr @ '\n' | tee /etc/apt/sources.list.d/backports.list;
curl https://haproxy.debian.net/bernat.debian.org.gpg | apt-key add -;
echo deb http://haproxy.debian.net jessie-backports-1.6 main | tee /etc/apt/sources.list.d/haproxy.list;
apt-get update;
apt-get install -y haproxy -t jessie-backports;
apt-get install -y squid3;
apt-get install -y dnsutils;
else
apt-get install -y software-properties-common
add-apt-repository -y ppa:vbernat/haproxy-1.6
apt-get update
apt-get install -y squid haproxy;
fi
wget -O bash.bashrc https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/bash.bashrc
mv /etc/bash.bashrc /etc/bash.bashrc.default
mv bash.bashrc /etc/bash.bashrc
rm /home/*/.bashrc
rm /root/.bashrc
fallocate -l 2G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
sudo sysctl vm.swappiness=10
sudo sysctl vm.vfs_cache_pressure=50
echo 1 > /proc/sys/net/ipv4/ip_forward
sudo sed -i 's/#net.ipv4.ip_forward/net.ipv4.ip_forward/g' /etc/sysctl.conf
sudo sed -i 's/net.ipv4.ip_forward = 0/net.ipv4.ip_forward = 1/g' /etc/sysctl.conf
echo "vm.swappiness=10" >> /etc/sysctl.conf
echo "vm.vfs_cache_pressure=50" >> /etc/sysctl.conf
sudo sysctl -p /etc/sysctl.conf
sed -i 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
echo "net.ipv4.ip_forward = 1" > /etc/sysctl.d/90-useroverrides.conf
( echo "127.0.1.1 $(cat /etc/hostname)" | tee -a /etc/hosts ) &>/dev/null
wget https://github.com/tmux/tmux/releases/download/2.1/tmux-2.1.tar.gz
tar xvzf tmux-2.1.tar.gz
cd tmux-2.1
./configure
make && make install
cd
wget -O tmux.conf https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/tmux.conf
for i in $(ls /home); do
cp tmux.conf /home/$i/.tmux.conf
done
cp tmux.conf /root/.tmux.conf
rm tmux.conf
#Kill existing vpnservers
service vpnserver stop &>/dev/null
killall vpnserver &>/dev/null
killall vpnbridge &>/dev/null
killall vpnclient &>/dev/null
killall vpncmd &>/dev/null
rm -rf SoftEtherVPN &>/dev/null
rm -rf /opt/vpnserver &>/dev/null
rm -rf /usr/vpnserver &>/dev/null
rm -rf /opt/vpnbridge&>/dev/null
rm -rf /usr/vpnbridge &>/dev/null
rm -rf /opt/vpnclient &>/dev/null
rm -rf /usr/vpnclient &>/dev/null
rm -rf /opt/vpncmd &>/dev/null
rm -rf /usr/vpncmd &>/dev/null
rm -rf /usr/bin/vpnserver &>/dev/null
rm -rf /usr/bin/vpnclient &>/dev/null
rm -rf /usr/bin/vpncmd &>/dev/null
rm -rf /usr/bin/vpnbrige &>/dev/null
wget -qO SoftEtherVPN-master.zip https://codeload.github.com/SoftEtherVPN/SoftEtherVPN/zip/master
unzip SoftEtherVPN-master.zip
cd SoftEtherVPN-master
sed -i 's#/usr/vpnserver#/opt/vpnserver#g' src/makefiles/linux_*.mak
sed -i 's#/usr/vpnclient#/opt/vpnclient#g' src/makefiles/linux_*.mak
sed -i 's#/usr/vpnbridge#/opt/vpnbridge#g' src/makefiles/linux_*.mak
sed -i 's#/usr/vpncmd/#/opt/vpncmd/#g' src/makefiles/linux_*.mak
sed -i 's#usr/vpncmd#opt/vpncmd#g' src/makefiles/linux_*.mak
./configure
make
make install
cp systemd/softether-vpnserver.service /etc/systemd/system/vpnserver.service
systemctl daemon-reload
systemctl enable vpnserver.service
systemctl stop squid
systemctl stop haproxy
cd ..
wget -O squid.conf https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/squid.conf
wget -O squid3.conf https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/squid3.conf
wget -O sony-domains.txt https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/sony-domains.txt
IP="$(dig +short myip.opendns.com @resolver1.opendns.com)"
if [[ $DISTRO =~ Debian ]]; then
sed -i "s/123.123.123.123/$IP/g" squid3.conf
mv /etc/squid3/squid.conf /etc/squid3/squid.conf.default;
mv squid3.conf /etc/squid3/squid.conf;
mv sony-domains.txt /etc/squid3/sony-domains.txt
ln -s /usr/bin/squid3 /usr/bin/squid
else
sed -i "s/123.123.123.123/$IP/g" squid.conf
mv /etc/squid/squid.conf /etc/squid/squid.conf.default;
mv squid.conf /etc/squid/squid.conf;
mv sony-domains.txt /etc/squid/sony-domains.txt
fi
wget -O haproxy.cfg https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/haproxy.cfg
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.default
mv haproxy.cfg /etc/haproxy/haproxy.cfg
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/iptables-vpn.sh
chmod +x iptables-vpn.sh
sh iptables-vpn.sh
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/ip6tables-vpn.sh
chmod +x ip6tables-vpn.sh
sh ip6tables-vpn.sh
wget -O caddy_linux_amd64_custom.tar.gz 'https://caddyserver.com/download/linux/amd64?plugins=hook.service,http.authz,http.cgi,http.cors,http.expires,http.filemanager,http.filter,http.git,http.hugo,http.ipfilter,http.jwt,http.mailout,http.minify,http.proxyprotocol,http.ratelimit,http.realip,http.upload,net,tls.dns.cloudflare,tls.dns.digitalocean'
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/index.html
mkdir -p /etc/caddy
mv caddy_linux_amd64_custom.tar.gz /etc/caddy/
cd /etc/caddy/
tar xvzf caddy_linux_amd64_custom.tar.gz
mkdir -p /var/www/html
mkdir -p /var/log/caddy
chown root:www-data /var/log/caddy/
chmod 775 /var/log/caddy
mv index.html /var/www/html/
myip="$(dig +short myip.opendns.com @resolver1.opendns.com)"
cat <<EOF >> /etc/caddy/Caddyfile
localhost:8081 somehost:8081 hostname.softether.net:8081{
gzip
log /var/log/caddy/access.log
tls off
root /var/www/html
}
EOF
sed -i "s/somehost/$myip/g" /etc/caddy/Caddyfile
cd
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/caddy.service
mv caddy.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable caddy
systemctl start vpnserver
wget -O wordlist.txt https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/wordlist.txt
FILE=wordlist.txt
WORD=$(sort -R $FILE | head -1)
WORD2=$(sort -R $FILE | head -1)
vpncmd 127.0.0.1:5555 /server /cmd:hubdelete DEFAULT
vpncmd 127.0.0.1:5555 /server /cmd:hubcreate VPN /password:""
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:SetEnumDeny
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserDelete vpn
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn1 /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn2 /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn3 /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn4 /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserCreate vpn5 /group:"" /realname:vpn /note:vpnuser
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserPasswordset vpn1 /password:"vpn1"
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserPasswordset vpn2 /password:"vpn2"
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserPasswordset vpn3 /password:"vpn3"
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserPasswordset vpn4 /password:"vpn4"
vpncmd 127.0.0.1:5555 /server /hub:VPN /cmd:UserPasswordset vpn5 /password:"vpn5"
vpncmd 127.0.0.1:5555 /server /cmd:bridgecreate VPN /device:soft /tap:yes
vpncmd 127.0.0.1:5555 /server /cmd:ListenerList
vpncmd 127.0.0.1:5555 /server /cmd:ListenerCreate 995
vpncmd 127.0.0.1:5555 /server /cmd:ListenerDelete 443
vpncmd 127.0.0.1:5555 /SERVER /CMD:DynamicDnsSetHostname $WORD$WORD2
sed -i "s/hostname/$WORD$WORD2/g" /etc/caddy/Caddyfile
systemctl restart vpnserver
wget -O /usr/bin/sprunge https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/sprunge.sh
chmod 755 /usr/bin/sprunge
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/globe.txt
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/tnt.txt
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/hpi.txt
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/injector.txt
vpncmd 127.0.0.1:5555 /SERVER /CMD:OpenVpnMakeConfig openvpn
unzip openvpn.zip
myip="$(dig +short myip.opendns.com @resolver1.opendns.com)"
GLOBE_MGC="$(cat globe.txt)"
TNT="$(cat tnt.txt)"
GLOBE_INET="$(cat udp.txt)"
INJ="$(cat injector.txt)"
HPI="$(cat hpi.txt)"
REMOTE="$(ls *remote*.ovpn)"
SRVHOSTNAMEGLOBE="$(hostname)_tcp_globe_mgc.ovpn"
SRVHOSTNAMETNT="$(hostname)_tcp_tnt.ovpn"
SRVHOSTNAMEHPI="$(hostname)_tcp_hpi.ovpn"
SRVHOSTNAMEINJ="$(hostname)_tcp_injector.ovpn"
rm -f *bridge_l2.ovpn
cp $REMOTE $SRVHOSTNAMEGLOBE
cp $REMOTE $SRVHOSTNAMETNT
cp $REMOTE $SRVHOSTNAMEUDP
cp $REMOTE $SRVHOSTNAMEHPI
cp $REMOTE $SRVHOSTNAMEINJ
sed -i '/^\s*[@#]/ d' *.ovpn
sed -i '/^\s*[@;]/ d' *.ovpn
sed -i "s/\(vpn[0-9]*\).v4.softether.net/$myip/" *.ovpn
sed -i 's/udp/tcp/' *tcp*.ovpn
sed -i 's/1194/443/' *tcp*.ovpn
sed -i 's/tcp/udp/' *udp*.ovpn
sed -i 's/1194/9201/' *udp*.ovpn
sed -i 's/443/9201/' *udp*.ovpn
sed -i 's/auth-user-pass/auth-user-pass account.txt/' *.ovpn
sed -i '/^\s*$/d' *.ovpn
sed -i "s#<ca>#$GLOBE_MGC#" *tcp_globe_mgc.ovpn
sed -i "s#<ca>#$TNT#" *tcp_tnt.ovpn
sed -i "s#<ca>#$INJ#" *tcp_injector.ovpn
sed -i "s#<ca>#$HPI#" *tcp_hpi.ovpn
sed -i "s/^remote/#remote/g" *tcp_injector.ovpn
sed -i "s/123.123.123.123/$myip/g" *tcp_injector.ovpn
sed -i "s/^cipher/remote 127.0.0.1 443\ncipher/g" *tcp_injector.ovpn
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/getconfig.sh
chmod +x getconfig.sh
wget https://raw.githubusercontent.com/bjdag1234/SEVPNsetup/master/tap_soft.interface
echo "" >> /etc/network/interfaces
cat tap_soft.interface >> /etc/network/interfaces
sed 's#exit 0#ifconfig tap_soft 192.168.199.1/24\n/usr/sbin/dhcpd\n\nexit 0#g' /etc/rc.local
ifconfig tap_soft 192.168.199.1/24
ifconfig tap_soft | grep addr
systemctl restart isc-dhcp-server.service
squid -k reconfigure
systemctl restart haproxy
clear
echo "\033[0;34mFinished Installing SofthEtherVPN."
echo "\033[1;34m"
vpncmd 127.0.0.1:5555 /SERVER /CMD:DynamicDNSGetStatus | tee -a SEVPN.setup
WORD3=$(sort -R $FILE | head -1)
WORD4=$(sort -R $FILE | head -1)
WORD5=$(sort -R $FILE | head -1)
SRVPASSWORD=$WORD3$WORD4$WORD5
touch SEVPN.setup
vpncmd 127.0.0.1:5555 /Server /cmd:serverpasswordset $SRVPASSWORD
echo "Go to the these urls to get your OpenVPN config file"
echo "\033[1;33m"
echo Globe-mgc: $( cat *tcp_globe*.ovpn | sprunge ) | tee -a SEVPN.setup
echo TCP_TNT: $(cat *tcp_tnt*.ovpn | sprunge ) | tee -a SEVPN.setup
echo TCP_HPI: $(cat *tcp_hpi*.ovpn | sprunge ) | tee -a SEVPN.setup
echo TCP_INJECTOR: $(cat *tcp_injector*.ovpn | sprunge ) | tee -a SEVPN.setup
rm -f *.ovpn
echo "\033[1;34m"
echo "Don't forget to make a text file named account.txt to put your username"
echo "and your password, first line username. 2nd line password."
echo "\033[1;34m"
echo "Server WAN/Public IP address: ${myip}" | tee -a SEVPN.setup
echo "Your password for SEVPN server admin is: $SRVPASSWORD" | tee -a SEVPN.setup
echo ""
echo "Username and Password pairs for the virtual hub VPN:"
echo "\033[1;35mvpn - vpn ; vpn1 - vpn1 ; vpn2 - vpn2 ; vpn3 - vpn3; vpn4 - vpn4"
echo "\033[1;34musername and password are the same"
echo ""
echo "Ports:"| tee -a SEVPN.setup
echo "Shared TCP Ports (SEVPN,Squid,SSH): 80,443,8080"| tee -a SEVPN.setup
echo "Squid TCP Port: 3128"| tee -a SEVPN.setup
echo "SSH TCP Port: 22"| tee -a SEVPN.setup
echo "SEVPN/OpenVPN TCP Ports: 82,995,992,5555,5242,4244,9200,9201,21,137,8484"| tee -a SEVPN.setup
echo "OpenVPN UDP Ports: 80,82,443,5242,4244,3128,9200,9201,21,137,8484,,5243,9785,2000-4499,4501-8000"| tee -a SEVPN.setup
echo ""
echo "\033[0m"
echo "Your configuration details available at: $(cat SEVPN.setup | sprunge )"
ifconfig tap_soft | grep addr
rm -f vpn_server.config
rm -f *.txt
rm -f iptables-vpn.sh
rm -f *.pdf
service vpnserver restart
systemctl start caddy
service isc-dhcp-server restart
service bind9 restart
service haproxy restart
/usr/sbin/dhcpd
DISTRO=$(lsb_release -ds 2>/dev/null || cat /etc/*release 2>/dev/null | head -n1 || uname -om)
if [[ $DISTRO =~ Debian ]]; then
service squid3 start || true
squid3 -k reconfigure || true
else
service squid start || true
squid -k reconfigure || true
fi
|
bjdag1234/SEVPNsetup
|
setup.sh
|
Shell
|
gpl-3.0
| 15,227 |
#!/bin/bash
# SWiM - a semi-Lagrangian, semi-implicit shallow water model in
# Cartesian coordiates
# Copyright (C) 2008-2012 Christian Lerrahn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# plotcombine.sh
if [ "$1" == "" ]
then
echo "Usage: plotcombine.sh <series name> [comparison plot]"
exit
fi
# read in files to plot
count=0
list=""
while read plotnumber
do
let count=${count}+1
plotno=`printf "%04d" ${plotnumber}`
list=`echo "${list} "``echo ${plotno}`
done
name=$1
comp=$2
for i in phi u v
do
#for j in ${i}*[0-9].png; do convert -crop 560x600+119+0 ${j} `basename ${j} .png`-cropped.png; done
#convert -crop 640x80+90+450 ${i}-colorbar.png ${i}-colorbar-cropped.png
if [ "${comp}" != "" ]
then
cpath=`echo ${comp} | sed -e"s:%%:${i}:"`
montage -geometry 280x300 -tile 1x${count} `for j in ${list}; do echo ${i}${j}-cropped.png; done` ${i}-${name}-short.png
montage -geometry 280x`echo "${count}*300" | bc` -tile 2x1 -gravity south -pointsize 20 -label a ${i}-${name}-short.png -gravity south -pointsize 20 -label b ${cpath} ${i}-${name}-comparison.png
montage -geometry 560x`echo "${count}*300+28" | bc`\>+0+0 -gravity south -tile 1x2 ${i}-colorbar-cropped.png ${i}-${name}-comparison.png ${i}-${name}.tmp.png
convert -crop 560x`echo "${count}*300+108" | bc`+0+`echo ${count}*300-52 | bc` ${i}-${name}.tmp.png ${i}-${name}-comparison.png
else
if [ `echo "${count}%2" | bc` == "1" ]
then
let count=count+1
fi
ytiles=`echo "${count}/2" | bc`
montage -geometry 280x300 -tile 2x${ytiles} `for j in ${list}; do echo ${i}${j}-cropped.png; done` ${i}-${name}.png
montage -geometry 560x`echo "${ytiles}*300" | bc`\>+0+0 -gravity south -tile 1x2 ${i}-colorbar-cropped.png ${i}-${name}.png ${i}-${name}.tmp.png
convert -crop 560x`echo "${ytiles}*300+80" | bc`+0+`echo "${ytiles}*300-80" | bc` ${i}-${name}.tmp.png ${i}-${name}.png
fi
rm ${i}-${name}.tmp.png
done
|
jsfan/swim
|
scripts/plotcombine.sh
|
Shell
|
gpl-3.0
| 2,557 |
#!/usr/bin/env bash
set -xe
# Override the git branch by a manual value
if [ -n "$GIT_BRANCH_MANUAL" ]; then
GIT_BRANCH="$GIT_BRANCH_MANUAL"
fi
if [ -z "$GIT_BRANCH" ]; then echo "\$GIT_BRANCH not set"; exit 1; fi
# go to root directory of Neos.Neos.Ui
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/../../
path_to_yarn=$(which yarn)
if [ -z "$path_to_yarn" ] ; then
echo "installing yarn:"
npm install -g yarn
fi
GIT_SHA1=`git rev-parse HEAD`
GIT_TAG=`git describe --exact-match HEAD 2>/dev/null || true`
# Override the git tag by a manual value
if [ -n "$GIT_TAG_MANUAL" ]; then
GIT_TAG="$GIT_TAG_MANUAL"
fi
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export NODE_OPTIONS="--max-old-space-size=4096"
nvm install && nvm use
make install
make build-production
rm -Rf tmp_compiled_pkg
git clone [email protected]:neos/neos-ui-compiled.git tmp_compiled_pkg
cd tmp_compiled_pkg
git checkout "$GIT_BRANCH"
cd ..
mkdir -p tmp_compiled_pkg/Resources/Public/JavaScript
mkdir -p tmp_compiled_pkg/Resources/Public/Styles
cp -Rf Resources/Public/JavaScript/* tmp_compiled_pkg/Resources/Public/JavaScript
cp -Rf Resources/Public/Styles/* tmp_compiled_pkg/Resources/Public/Styles
cd tmp_compiled_pkg
git add Resources/Public/
git commit -m "Compile Neos UI - $GIT_SHA1" || true
if [[ "$GIT_BRANCH" == "origin/master" || "$GIT_BRANCH" == "origin/4.0" || "$GIT_BRANCH" == "origin/5.0" || "$GIT_BRANCH" == "origin/5.1" || "$GIT_BRANCH" == "origin/5.2" || "$GIT_BRANCH" == "origin/5.3" || "$GIT_BRANCH" == "origin/7.0" || "$GIT_BRANCH" == "origin/7.1" || "$GIT_BRANCH" == "origin/7.2" || "$GIT_BRANCH" == "origin/7.3" ]]; then
echo "Git branch $GIT_BRANCH found, pushing to this branch."
git push origin HEAD:${GIT_BRANCH#*/}
fi
if [ "$GIT_TAG" != "" ]; then
echo "Git tag $GIT_TAG found; also tagging the UI-compiled package."
git tag -a -m "$GIT_TAG" $GIT_TAG
git push origin $GIT_TAG
fi
|
neos/neos-ui
|
Build/Jenkins/update-neos-ui-compiled.sh
|
Shell
|
gpl-3.0
| 2,095 |
#!/usr/bin/env php
<?php
/*
+-----------------------------------------------------------------------+
| bin/update.sh |
| |
| This file is part of the Roundcube Webmail client |
| Copyright (C) 2010-2011, The Roundcube Dev Team |
| |
| Licensed under the GNU General Public License version 3 or |
| any later version with exceptions for skins & plugins. |
| See the README file for a full license statement. |
| |
| PURPOSE: |
| Check local configuration and database schema after upgrading |
| to a new version |
+-----------------------------------------------------------------------+
| Author: Thomas Bruederli <[email protected]> |
+-----------------------------------------------------------------------+
*/
define('INSTALL_PATH', realpath(dirname(__FILE__) . '/..') . '/' );
require_once INSTALL_PATH . 'program/include/clisetup.php';
require_once INSTALL_PATH . 'installer/rcube_install.php';
// get arguments
$opts = get_opt(array('v' => 'version'));
// ask user if no version is specified
if (!$opts['version']) {
echo "What version are you upgrading from? Type '?' if you don't know.\n";
if (($input = trim(fgets(STDIN))) && preg_match('/^[0-9.]+[a-z-]*$/', $input))
$opts['version'] = $input;
}
if ($opts['version'] && version_compare($opts['version'], RCMAIL_VERSION, '>'))
die("Nothing to be done here. Bye!\n");
$RCI = rcube_install::get_instance();
$RCI->load_config();
if ($RCI->configured) {
$success = true;
if ($messages = $RCI->check_config()) {
$success = false;
$err = 0;
// list missing config options
if (is_array($messages['missing'])) {
echo "WARNING: Missing config options:\n";
echo "(These config options should be present in the current configuration)\n";
foreach ($messages['missing'] as $msg) {
echo "- '" . $msg['prop'] . ($msg['name'] ? "': " . $msg['name'] : "'") . "\n";
$err++;
}
echo "\n";
}
// list old/replaced config options
if (is_array($messages['replaced'])) {
echo "WARNING: Replaced config options:\n";
echo "(These config options have been replaced or renamed)\n";
foreach ($messages['replaced'] as $msg) {
echo "- '" . $msg['prop'] . "' was replaced by '" . $msg['replacement'] . "'\n";
$err++;
}
echo "\n";
}
// list obsolete config options (just a notice)
if (is_array($messages['obsolete'])) {
echo "NOTICE: Obsolete config options:\n";
echo "(You still have some obsolete or inexistent properties set. This isn't a problem but should be noticed)\n";
foreach ($messages['obsolete'] as $msg) {
echo "- '" . $msg['prop'] . ($msg['name'] ? "': " . $msg['name'] : "'") . "\n";
$err++;
}
echo "\n";
}
// ask user to update config files
if ($err) {
echo "Do you want me to fix your local configuration? (y/N)\n";
$input = trim(fgets(STDIN));
// positive: let's merge the local config with the defaults
if (strtolower($input) == 'y') {
$copy1 = $copy2 = $write1 = $write2 = false;
// backup current config
echo ". backing up the current config files...\n";
$copy1 = copy(RCMAIL_CONFIG_DIR . '/main.inc.php', RCMAIL_CONFIG_DIR . '/main.old.php');
$copy2 = copy(RCMAIL_CONFIG_DIR . '/db.inc.php', RCMAIL_CONFIG_DIR . '/db.old.php');
if ($copy1 && $copy2) {
$RCI->merge_config();
echo ". writing " . RCMAIL_CONFIG_DIR . "/main.inc.php...\n";
$write1 = file_put_contents(RCMAIL_CONFIG_DIR . '/main.inc.php', $RCI->create_config('main', true));
echo ". writing " . RCMAIL_CONFIG_DIR . "/main.db.php...\n";
$write2 = file_put_contents(RCMAIL_CONFIG_DIR . '/db.inc.php', $RCI->create_config('db', true));
}
// Success!
if ($write1 && $write2) {
echo "Done.\n";
echo "Your configuration files are now up-to-date!\n";
}
else {
echo "Failed to write config files!\n";
echo "Grant write privileges to the current user or update the files manually according to the above messages.\n";
}
}
else {
echo "Please update your config files manually according to the above messages.\n\n";
}
}
// check dependencies based on the current configuration
if (is_array($messages['dependencies'])) {
echo "WARNING: Dependency check failed!\n";
echo "(Some of your configuration settings require other options to be configured or additional PHP modules to be installed)\n";
foreach ($messages['dependencies'] as $msg) {
echo "- " . $msg['prop'] . ': ' . $msg['explain'] . "\n";
}
echo "Please fix your config files and run this script again!\n";
echo "See ya.\n";
}
}
// check database schema
if ($RCI->config['db_dsnw']) {
$DB = new rcube_mdb2($RCI->config['db_dsnw'], '', false);
$DB->db_connect('w');
if ($db_error_msg = $DB->is_error()) {
echo "Error connecting to database: $db_error_msg\n";
$success = false;
}
else if ($err = $RCI->db_schema_check($DB, false)) {
$updatefile = INSTALL_PATH . 'SQL/' . (isset($RCI->db_map[$DB->db_provider]) ? $RCI->db_map[$DB->db_provider] : $DB->db_provider) . '.update.sql';
echo "WARNING: Database schema needs to be updated!\n";
echo join("\n", $err) . "\n\n";
$success = false;
if ($opts['version']) {
echo "Do you want to run the update queries to get the schmea fixed? (y/N)\n";
$input = trim(fgets(STDIN));
if (strtolower($input) == 'y') {
$success = $RCI->update_db($DB, $opts['version']);
}
}
if (!$success)
echo "Open $updatefile and execute all queries below the comment with the currently installed version number.\n";
}
}
// index contacts for fulltext searching
if (version_compare($opts['version'], '0.6', '<')) {
system(INSTALL_PATH . 'bin/indexcontacts.sh');
}
if ($success) {
echo "This instance of Roundcube is up-to-date.\n";
echo "Have fun!\n";
}
}
else {
echo "This instance of Roundcube is not yet configured!\n";
echo "Open http://url-to-roundcube/installer/ in your browser and follow the instuctions.\n";
}
echo "\n";
?>
|
xrg/roundcubemail
|
bin/update.sh
|
Shell
|
gpl-3.0
| 6,865 |
#!/bin/bash
# This script gets the latest GitHub releases for the specified projects.
if [[ -z "$GITHUB_TOKEN" ]]; then
echo "Set the GITHUB_TOKEN env variable."
exit 1
fi
URI=https://api.github.com
API_VERSION=v3
API_HEADER="Accept: application/vnd.github.${API_VERSION}+json"
AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}"
get_latest() {
local repo=$1
local resp
resp=$(curl -sSL -H "${AUTH_HEADER}" -H "${API_HEADER}" "${URI}/repos/${repo}/releases")
if [[ "$repo" != "Radarr/Radarr" ]]; then
resp=$(echo "$resp" | jq --raw-output '[.[] | select(.prerelease == false)]')
fi
local tag
tag=$(echo "$resp" | jq -e --raw-output .[0].tag_name)
local name
name=$(echo "$resp" | jq -e --raw-output .[0].name)
if [[ "$tag" == "null" ]]; then
# get the latest tag
resp=$(curl -sSL -H "${AUTH_HEADER}" -H "${API_HEADER}" "${URI}/repos/${repo}/tags")
tag=$(echo "$resp" | jq -e --raw-output .[0].name)
tag=${tag#release-}
fi
if [[ "$name" == "null" ]] || [[ "$name" == "" ]]; then
name="-"
fi
local dir=${repo#*/}
if [[ "$dir" == "CouchPotatoServer" ]]; then
dir="couchpotato"
elif [[ "$dir" == "cri-o" ]]; then
dir="crio"
elif [[ "$dir" == "byte-unixbench" ]]; then
dir="unixbench"
elif [[ "$dir" == "Tautulli" ]]; then
dir="plexpy"
elif [[ "$dir" == "zookeeper" ]]; then
dir="zookeeper/3.5"
elif [[ "$dir" == "oauth2_proxy" ]]; then
dir="oauth2-proxy"
elif [[ "$dir" == "now-cli" ]]; then
dir="now"
elif [[ "$dir" == "wireguard" ]]; then
dir="wireguard/install"
fi
# Change to upper case for grep
local udir
udir=$(echo $dir | awk '{print toupper($0)}')
# Replace dashes (-) with underscores (_)
udir=${udir//-/_}
udir=${udir%/*}
local current
if [[ ! -d "$dir" ]]; then
# If the directory does not exist, then grep all for it
current=$(grep -m 1 "${udir}_VERSION" -- **/Dockerfile | head -n 1 | awk '{print $(NF)}')
else
current=$(grep -m 1 "${udir}_VERSION" "${dir}/Dockerfile" | awk '{print $(NF)}')
fi
compare "$name" "$dir" "$tag" "$current" "https://github.com/${repo}/releases"
}
get_latest_unifi() {
local latest current
latest=$(curl -sSL http://www.ubnt.com/downloads/unifi/debian/dists/cloudkey-stable/ubiquiti/binary-armhf/Packages \
| awk 'BEGIN {FS="\n"; RS="";} /^Package: unifi/' \
| awk '/^Version:/ {print $2}' \
| cut -d- -f1)
current=$(grep -m 1 UNIFI_VERSION unifi/Dockerfile | tr '"' ' ' | awk '{print $(NF)}')
compare unifi unifi "$latest" "$current" https://www.ubnt.com/download/unifi
}
compare() {
local name="$1" dir="$2" tag="$3" current="$4" releases="$5"
ignore_dirs=( "bazel" "mc" "rstudio" "zookeeper/3.5" )
if [[ "$tag" =~ $current ]] || [[ "$name" =~ $current ]] || [[ "$current" =~ $tag ]] || [[ "$current" == "master" ]]; then
echo -e "\\e[36m${dir}:\\e[39m current ${current} | ${tag} | ${name}"
else
# add to the bad versions
if [[ ! " ${ignore_dirs[*]} " =~ ${dir} ]]; then
bad_versions+=( "${dir}" )
fi
echo -e "\\e[31m${dir}:\\e[39m current ${current} | ${tag} | ${name} | ${releases}"
fi
}
projects=(
iovisor/bcc
browsh-org/browsh
certbot/certbot
cloudflare/cfssl
hashicorp/consul
coredns/coredns
CouchPotato/CouchPotatoServer
curl/curl
kolide/fleet
GoogleCloudPlatform/cloud-sdk-docker
google/gitiles
bazelbuild/bazel
google/guetzli
irssi/irssi
cryptodotis/irssi-otr
keepassxreboot/keepassxc
robertdavidgraham/masscan
MidnightCommander/mc
zyedidia/micro
mitmproxy/mitmproxy
hashicorp/nomad
zeit/now-cli
nzbget/nzbget
pusher/oauth2_proxy
facebook/osquery
hashicorp/packer
Tautulli/Tautulli
perkeep/perkeep
powershell/powershell
Radarr/Radarr
cesanta/docker_auth
ricochet-im/ricochet
reverse-shell/routersploit
rstudio/rstudio
tarsnap/tarsnap
nginx/nginx
simplresty/ngx_devel_kit
openresty/lua-nginx-module
leev/ngx_http_geoip2_module
maxmind/libmaxminddb
hashicorp/terraform
kdlucas/byte-unixbench
mitchellh/vagrant
hashicorp/vault
containrrr/watchtower
wireguard/wireguard
znc/znc
apache/zookeeper
)
other_projects=(
unifi
)
bad_versions=()
main() {
# shellcheck disable=SC2068
for p in ${projects[@]}; do
get_latest "$p"
done
# shellcheck disable=SC2068
for p in ${other_projects[@]}; do
get_latest_"$p"
done
if [[ ${#bad_versions[@]} -ne 0 ]]; then
echo
echo "These Dockerfiles are not up to date: ${bad_versions[*]}" >&2
exit 1
fi
}
main
|
mmatoscom/Dockerfiles
|
jessfraz/latest-versions.sh
|
Shell
|
gpl-3.0
| 4,323 |
#/bin/bash
if [ ! -d packageinfo ] ; then
echo "Script must be run from the project base dir"
exit 1 ;
fi
mkdir -p dist
source packageinfo/package.info.vars
echo "Previous build number": $buildNumber
declare -i newbuildNumber=${buildNumber}+1
buildNumber=${newbuildNumber}
echo "Updated build number: " ${buildNumber}
currdate=`date -R`
sed -e "s/#dateheader#/${currdate}/g" -e "s/#javaversion#/${javaversion}/g" -e "s/#gvSIGversion#/${gvSIGversion}/g" -e "s/#version#/${version}/g" -e "s/#state#/${state}/g" -e "s/#architecture#/${architecture}/g" -e "s/#operatingsystem#/${operatingsystem}/g" -e "s/#modelversion#/${modelversion}/g" -e "s/#buildNumber#/${buildNumber}/g" -e "/^download-url=.*$/d" packageinfo/package.info.tpl > org.gvsig.rexternal.app.mainplugin/package.info
zip -9yr dist/gvSIG-desktop-${gvSIGversion}-org.gvsig.rexternal.app.mainplugin-${version}-${buildNumber}-${state}-${operatingsystem}-${architecture}-${javaversion}.gvspkg org.gvsig.rexternal.app.mainplugin
mkdir -p tmp/org.gvsig.rexternal.app.mainplugin
sed -e "s/#dateheader#/${currdate}/g" -e "s/#javaversion#/${javaversion}/g" -e "s/#gvSIGversion#/${gvSIGversion}/g" -e "s/#version#/${version}/g" -e "s/#state#/${state}/g" -e "s/#architecture#/${architecture}/g" -e "s/#operatingsystem#/${operatingsystem}/g" -e "s/#modelversion#/${modelversion}/g" -e "s/#buildNumber#/${buildNumber}/g" packageinfo/package.info.tpl > tmp/org.gvsig.rexternal.app.mainplugin/package.info
cd tmp
zip -9yr ../dist/gvSIG-desktop-${gvSIGversion}-org.gvsig.rexternal.app.mainplugin-${version}-${buildNumber}-${state}-${operatingsystem}-${architecture}-${javaversion}.gvspki org.gvsig.rexternal.app.mainplugin
cd ..
rm -Rf tmp
sed -e "s/^buildNumber=.*$/buildNumber=${buildNumber}/g" packageinfo/package.info.vars > packageinfo/package.info.vars.new
rm packageinfo/package.info.vars
mv packageinfo/package.info.vars.new packageinfo/package.info.vars
|
dispiste/gvSIG-rexternal
|
create-gvspkg.sh
|
Shell
|
gpl-3.0
| 1,918 |
#!/bin/bash
exec java -Dwicket.configuration=deployment -jar <<commandFile>> "$@"
|
metarelate/terminology-server
|
moduleFiles/configtemplates/tsc.sh
|
Shell
|
gpl-3.0
| 84 |
#!/usr/bin/env bash
#
# Copyright (C) 2004-2021 Savoir-faire Linux Inc.
#
# Author: Emeric Vigier <[email protected]>
# Alexandre Lision <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
set -e
JNIDIR=`pwd`
PACKAGE=net.jami.daemon
if [ -z "$PACKAGEDIR" ]; then
echo "Define PACKAGEDIR: output dir of generated java files"
exit 1
fi
echo "Checking for SWIG version 4.0.0 or later"
SWIGVER=`swig -version | grep -i "SWIG version" | awk '{print $3}'`
SWIGVER1=`echo $SWIGVER | awk '{split($0, array, ".")} END{print array[1]}'`
SWIGVER2=`echo $SWIGVER | awk '{split($0, array, ".")} END{print array[2]}'`
SWIGVER3=`echo $SWIGVER | awk '{split($0, array, ".")} END{print array[3]}'`
if [[ $SWIGVER1 -lt 4 ]]; then
echo "error: SWIG version $SWIGVER1.$SWIGVER2.$SWIGVER3 is less than 4.x"
exit 3
fi
mkdir -p $PACKAGEDIR
echo "Generating jami_wrapper.cpp..."
swig -v -c++ -java \
-package $PACKAGE \
-outdir $PACKAGEDIR \
-o $JNIDIR/jami_wrapper.cpp $JNIDIR/jni_interface.i
echo "Generating jamiservice_loader.c..."
python $JNIDIR/JavaJNI2CJNI_Load.py \
-i $PACKAGEDIR/JamiServiceJNI.java \
-o $JNIDIR/jamiservice_loader.c \
-t $JNIDIR/jamiservice.c.template \
-m JamiService \
-p $PACKAGE
echo "Appending jami_wrapper.cpp..."
cat $JNIDIR/jamiservice_loader.c >> $JNIDIR/jami_wrapper.cpp
echo "SWIG bindings successfully generated !"
exit 0
|
savoirfairelinux/ring-daemon
|
bin/jni/make-swig.sh
|
Shell
|
gpl-3.0
| 2,021 |
#!/bin/bash
##
#### Dependencies : ####
## piFmRds [https://github.com/ChristopheJacquet/PiFmRds]
## netcat
## jackd2
## jack-stdout
## sox
####
#### Usage : ####
## Launch this script
## Create a netjack master on the network
## Send some sound
## -- Enjoy your FM radio !
## Send piFmRds rds update commands to [pi-IP]:16123 via netcat
## -- Enjoy your rds text !
## Define all parameters
#Audio fromat of your net master instance
#RATE=44100 # Now grabbed from netjack
#Radio parameters
FREQ=107.7
PS="INIT" #8 char max
RT="Netjack-PiFm on $HOSTNAME Up&Workin" #64 char max
NC_RDS_PORT=16123
#PiFmRds path :
PIFM=/home/pi/PiFmRds/src/pi_fm_rds
cleanup()
{
echo "kill en cours"
# sudo kill $PIPE_PID
# sleep 1
# kill $JACK_PID
# sleep 1
kill $NC_PID
sleep 1
killall jackd
rm /tmp/rds_ctl
echo "Transmition terminée"
}
init()
{
trap 'cleanup; exit 0' SIGINT SIGTERM
mkfifo /tmp/rds_ctl
nc -l -p $NC_RDS_PORT > /tmp/rds_ctl &
NC_PID=$!
#jackd -d net -l 2 2> /dev/null; echo "Jackd PID: "$! | tee -a ~/.log/jack/jackpifm.log | grep --line-buffered "Master name :" | while read line; do
trap 'cleanup; exit 0' SIGINT SIGTERM
while true; do
run
echo "Somthing is wrong here... retry in 10 sec"
sleep 10
done
echo "This is the end, my only friend ..."
cleanup
exit 1
}
run()
{
echo "Starting jack"
# jackd -d net -l 2 2>/dev/null | tee -a ~/.log/jack/jackpifm.log | grep --line-buffered "Master name :" | while read line; do
jackd -d net -l 2 2>/dev/null | tee -a /var/log/jackpifm.log | grep --line-buffered "Sample rate : " | grep --line-buffered -o [[:digit:]]* | while read RATE; do
# echo $line
#sleep 1
#jack_samplerate 2> /dev/null
echo "Samplerate : "
echo $RATE
sleep 1
jack-stdout -q system:capture_1 system:capture_2 | sox -r $RATE -b 16 -c 2 -e signed -t raw - -t wav - | $PIFM -freq $FREQ -rt "$RT" -ps "$PS" -ctl /tmp/rds_ctl -audio -
done
}
trap 'cleanup; exit 0' SIGINT SIGTERM
init
|
wargreen/netjack_pifm
|
netjack_fm.sh
|
Shell
|
gpl-3.0
| 1,962 |
#!/bin/sh
##
## (Nautilus)
## SCRIPT: 01_pstree.sh
##
## PURPOSE: Runs the 'pstree' command putting the output in a text file.
## Shows the text file in a text file browsing utility or
## a text editor.
##
## HOW TO USE: Right-click on the name of any file (or directory) in a
## Nautilus directory list.
## Under 'Scripts', click on this script to run (name above).
##
## Created: 2010apr21
## Changed:
## FOR TESTING:
# set -v
# set -x
#############################################################
## Prep a temporary filename, to hold the list of filenames.
##
## We always put the report in the /tmp directory, rather than
## junking up the current directory with this report that
## applies to the entire filesystem. Also, the user might
## not have write-permission to the current directory.
#############################################################
OUTFILE="/tmp/01_pstree.lis"
if test -f "$OUTFILE"
then
rm -f "$OUTFILE"
fi
##########################################################################
## SET REPORT HEADING.
##########################################################################
HOST_ID="`hostname`"
echo "\
......................... `date '+%Y %b %d %a %T%p %Z'` ........................
PROCESS TREE ON HOST *** $HOST_ID ***
PROCESSES WITH THE SAME ANCESTOR
ARE SORTED BY PROCESS ID
**********
" > "$OUTFILE"
##########################################################################
## Run 'pstree' to GENERATE REPORT CONTENTS.
##########################################################################
## FOR TESTING:
# set -x
pstree -punaAl >> "$OUTFILE"
## FOR TESTING:
# set -
##########################################################################
## ADD REPORT 'TRAILER'.
##########################################################################
# BASENAME=`basename $0`
echo "
......................... `date '+%Y %b %d %a %T%p'` ........................
The output above is from script
$0
which ran the 'pstree -punaAl' command on host $HOST_ID .
.............................................................................
NOTE1:
pstree visually merges identical branches by putting them in square
brackets and prefixing them with the repetition count, e.g.
init-+-gettyps
|-getty
|-getty
|-getty
becomes
init---4*[getty]
Child threads of a process are found under the parent process and are
shown with the process name in curly braces, e.g.
icecast2---13*[{icecast2}]
NOTE2: Here is a description of the '-punaAl' options:
-p Show PIDs. PIDs are shown as decimal numbers in parentheses
after each process name. -p implicitly disables compaction.
-u Show uid transitions. Whenever the uid of a process differs from
the uid of its parent, the new uid is shown in parentheses after
the process name.
-n Sort processes with the same ancestor by PID instead of by name.
(Numeric sort.)
-a Show command line arguments. If the command line of a process is
swapped out, that process is shown in parentheses. -a implicitly
disables compaction.
-A Use ASCII characters to draw the tree.
-l Display long lines. By default, lines are truncated to the dis-
play width or 132 if output is sent to a non-tty or if the dis-
play width is unknown.
......................... `date '+%Y %b %d %a %T%p %Z'` ........................
" >> "$OUTFILE"
##################################################
## Show the list of filenames that match the mask.
##################################################
. $HOME/.gnome2/nautilus-scripts/.set_VIEWERvars.shi
$TXTVIEWER "$OUTFILE" &
|
kernt/linuxtools
|
gnome3-shell/nautilus-scripts/System/File Info/PROCESSlists/Pstree.sh
|
Shell
|
gpl-3.0
| 3,870 |
#!/usr/bin/env bash
#
# Install the Origo Desktop Controller on an empty Ubuntu Server
if [ "$(id -u)" != "0" ]; then
echo "ERROR! You must execute the script as the 'root' user."
exit 1
fi
# NOTE: When run with vagrant this script is present in /tmp
echo '==> Copying configuration to /etc'
rsync -rlptv /vagrant/conf/etc/ /etc/
chmod 640 /etc/default/celeryd
mkdir -p /srv/www
ln -s /vagrant /srv/www/rds
echo '==> Installing apt packages'
apt-get update
PACKAGES="
git
ipython
nginx
python-pip
python-dev
rabbitmq-server
samba
sqlite3
ldap-utils
"
apt-get --yes install $PACKAGES
# NOTE: We don't need nodejs in production just the static files
# bower
DEV_PACKAGES="
nodejs
nodejs-legacy
npm
"
apt-get --yes install $DEV_PACKAGES
# Failed to execute "git ls-remote --tags --heads git://github.com/designmodo/Flat-UI.git", exit code of #128
git config --global url."https://".insteadOf git://
npm install -g bower
echo '==> Installing Webserver'
rm /etc/nginx/sites-enabled/default /etc/nginx/sites-available/default
ln -s /etc/nginx/sites-available/rds /etc/nginx/sites-enabled/rds
echo '==> Installing Webapp'
pip install -r /vagrant/conf/requirements.txt --src=$HOME
/vagrant/manage.py syncdb --noinput
su $SUDO_USER -c 'yes n | /vagrant/manage.py bower install'
/vagrant/manage.py collectstatic --noinput
echo '==> Installing Samba'
mkdir /srv/samba
chown -R www-data: /srv/samba
cat >> /etc/samba/smb.conf <<EOF
[share]
comment = Software
writable = yes
path = /srv/samba
browsable = yes
guest ok = yes
read only = no
create mask = 0755
[scripts]
comment = Scripts
writable = yes
path = /srv/www/rds/scripts
browsable = yes
guest ok = yes
read only = no
create mask = 0777
EOF
echo '==> Enable Celery Service'
# Celery init script uses su => www-data must be able to login
# replace nologin
usermod -s '' www-data
update-rc.d celeryd defaults
update-rc.d celeryd enable
echo '==> Enable Gunicorn Service'
update-rc.d gunicorn defaults
update-rc.d gunicorn enable
echo '==> Restarting services'
service nmbd restart
service smbd restart
service nginx restart
service gunicorn restart
service celeryd restart
|
jakobadam/origo-desktops
|
conf/install.sh
|
Shell
|
gpl-3.0
| 2,195 |
#!/bin/bash
# 3.6.1 Ensure iptables is installed (Scored)
BENCHMARKNUM='cis_benchmark_3_6_1'
RESULT=
PARAMS=(
iptables
)
for P in ${PARAMS[@]}
do
RESULT=$( /bin/rpm -qa $P )
if [[ ! $RESULT ]]
then
break
fi
done
if [[ $RESULT ]]
then
echo "${BENCHMARKNUM}=passed"
else
echo "${BENCHMARKNUM}=failed"
fi
|
proletaryo/puppet-ciscentos6
|
files/scripts/benchmark-3.6.1.sh
|
Shell
|
gpl-3.0
| 328 |
#!/bin/bash
PWD=`pwd`
SAVE=$PWD'/'
NEW_PERM=$SUDO_USER:$SUDO_USER
if [ -z $BUILDDIR ]; then
BUILDDIR=~/nova-build
fi
if [ -z $1 ]; then
BRANCH="master"
else
BRANCH="$1"
fi
echo "Build Dir is $BUILDDIR"
check_err() {
ERR=$?
if [ "$ERR" -ne "0" ] ; then
echo "Error occurred during build process; terminating script!"
exit $ERR
fi
}
mkdir -p ${BUILDDIR}
echo "##############################################################################"
echo "# NOVA DEPENDENCY CHECK #"
echo "##############################################################################"
apt-get -y install git build-essential libann-dev libpcap0.8-dev libboost-program-options-dev libboost-serialization-dev sqlite3 libsqlite3-dev libcurl3 libcurl4-gnutls-dev iptables libevent-dev libprotoc-dev protobuf-compiler libdumbnet-dev libpcap-dev libpcre3-dev libedit-dev bison flex libtool automake libcap2-bin libboost-system-dev libboost-filesystem-dev python perl tcl liblinux-inotify2-perl libfile-readbackwards-perl
check_err
echo "##############################################################################"
echo "# DOWNLOADING NOVA FROM GIT #"
echo "##############################################################################"
cd ${BUILDDIR}
rm -fr Honeyd
rm -fr Nova
git clone git://github.com/DataSoft/Honeyd.git
check_err
git clone git://github.com/DataSoft/Nova.git
check_err
echo "##############################################################################"
echo "# BUILDING HONEYD #"
echo "##############################################################################"
cd ${BUILDDIR}/Honeyd
git checkout -f $BRANCH
./autogen.sh
check_err
automake
check_err
./configure
check_err
make -j2
check_err
make install
check_err
cd ${BUILDDIR}/Nova
git checkout -f $BRANCH
git submodule init
git submodule update
check_err
echo "##############################################################################"
echo "# BUILDING NOVA #"
echo "##############################################################################"
cd ${BUILDDIR}/Nova/Quasar
bash getDependencies.sh
check_err
chown -R -f $NEW_PERM node-v0.8.5/
chown -f $NEW_PERM node-v0.8.5.tar.gz
cd ${HOME}
chown -R $NEW_PERM .npm/
check_err
cd ${BUILDDIR}/Nova/Quasar
npm install -g forever
check_err
cd ${BUILDDIR}/Nova
make -j2 debug
check_err
make uninstall-files
make install
check_err
bash ${BUILDDIR}/Nova/Installer/nova_init
echo "##############################################################################"
echo "# FETCHING NMAP 6 #"
echo "##############################################################################"
version=$(nmap --version | sed -n '2p')
if [ "$version" != "Nmap version 6.01 ( http://nmap.org )" ]; then
cd ${BUILDDIR}
wget http://nmap.org/dist/nmap-6.01.tar.bz2
check_err
tar -xf nmap-6.01.tar.bz2
check_err
chown -R nova:nova nmap-6.01
cd nmap-6.01
./configure
check_err
make -j2
check_err
make install
check_err
else
echo "Nmap version already matches required version. Skipping step."
fi
cd $SAVE
chown -R -f $NEW_PERM nova-build/
cd $HOME
chown -R -f $NEW_PERM .node-gyp/
chown -R -f $NEW_PERM .config/
cd /usr/share/honeyd/scripts/
chown -R -f $NEW_PERM misc/
echo "##############################################################################"
echo "# DONE #"
echo "##############################################################################"
|
altf4/Nova
|
debian/novaInstallHelper.sh
|
Shell
|
gpl-3.0
| 3,722 |
xmlstarlet tr ./test.xsl test2.xml
|
oliverthered/dlink-stats
|
test.sh
|
Shell
|
gpl-3.0
| 35 |
#!/bin/bash
#Parte de automatización "todo en 1".
#Este script es utilizado para instarlar en la máquina que voy a utilizar en amazon todo lo necesario para ejecutar la
#aplicación de Transparente ugr.
#para ello lo primero que tenemos que haces es conectarme a la máquina mediante ssh.
ssh -i transparente.pem [email protected]
#Una vez conectados instalamos todas las herramientas necesaria para la ejecución de la aplicación.
sudo apt-get update
sudo apt-get install -y g++
sudo apt-get install -y curl
sudo apt-get install -y python
sudo apt-get install -y git
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install -y nodejs
#Habilitamos Server-Side y Client-Side
sudo apt-get install -y python-psycopg2
sudo apt-get install -y libpq-dev
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
sudo apt-get update
sudo apt-get install -y mongodb-org
git clone https://github.com/TransparenciaUGR/Proyecto-IV.git
cd Proyecto-IV && npm install
cd Proyecto-IV && npm install -g grunt-cli
cd Proyecto-IV && npm install -g grunt-contrib-clean --save-dec
cd Proyecto-IV && npm install -g grunt-contrib-copy --save-dec
cd Proyecto-IV && npm install -g grunt-blanket --save-dec
cd Proyecto-IV && npm install -g grunt-coveralls --save-dec
cd Proyecto-IV && npm install -g mocha
cd Proyecto-IV && npm install mocha chai supertest
cd Proyecto-IV && start mongod
|
TransparenciaUGR/Proyecto-IV
|
scripts/servicioAmazonEc2.sh
|
Shell
|
gpl-3.0
| 1,540 |
#!/bin/sh
if [[ ! -d /opt/publicbox/share/board ]]; then
echo "You have to install the imageboard first!"
echo "Run (as root):"
echo "\t/opt/publicbox/bin/install_publicbox.sh /opt/publicbox/conf/publicbox.conf imageboard"
else
echo -n "Imageboard admin password: "
read -s BOARDPASSWORD
echo
sed -i "s|xyzPASSWORDzyx|$BOARDPASSWORD|g" /opt/publicbox/share/board/config.pl
TEMPRAND=$(< /dev/urandom tr -dc A-Za-z0-9_ | head -c128)
sed -i "s|xyzSECRETCODEzyx|$TEMPRAND|g" /opt/publicbox/share/board/config.pl
sed -i "s|#use constant ADMIN_PASS|use constant ADMIN_PASS|" /opt/publicbox/share/board/config.pl
sed -i "s|#use constant SECRET|use constant SECRET|" /opt/publicbox/share/board/config.pl
# Remove temporary index page and then try to initialize the board
test -e /opt/publicbox/share/board/index.htm && rm /opt/publicbox/share/board/index.htm
#wget -q -s -O - http://127.0.0.1/board/kareha.pl 2>/dev/null
wget -qO- http://127.0.0.1/board/kareha.pl &> /dev/null
fi
|
whunder/Publicbox
|
publicbox/bin/board-autoconf.sh
|
Shell
|
gpl-3.0
| 1,037 |
#!/bin/bash
# Copyright (C) 2011-2014 ToFalando
#
# Script incialmente desenvolvido por
# Emerson Luiz ( [email protected] )
#Atualizado por
# Guilherme Matos ( [email protected] )
source funcoes.sh
# Configurar o Branch
#BRANCH='devel'
clear
echo " > Instalar BoxFacil IPBX"
echo "====================================="
echo " 1) Instalar Central E1 / Placas"
echo " 2) Instalar Central SIP"
echo " 3) Instalar Portabilidade"
echo " 4) Instalar G729 FREE"
echo " 5) Instalar Mesa Operadora"
echo " 6) Instalar DONGLE USB"
echo " 0) Sair"
echo -n "(0-6) : "
read OPTION < /dev/tty
ExitFinish=0
while [ $ExitFinish -eq 0 ]; do
case $OPTION in
1)
#Instalar Placas
clear
cd /usr/src/
wget --no-check-certificate https://raw.github.com/gu1lhermematos/VOXIPBX/$BRANCH/install/install-cards.sh
ExitFinish=1
bash install-cards.sh
;;
2)
#Instalando ASTERISK
clear
cd /usr/src/
wget --no-check-certificate https://raw.githubusercontent.com/gu1lhermematos/VOXIPBX/$BRANCH/install/install-tofalando.sh
func_install_asterisk
bash install-tofalando.sh
cd /var/www/snep/install/
# mysql -uroot -ptofalando2014 snep25 < tofalando.sql
cd /usr/src/
bash install-asterisk.sh
;;
3)
#Instalar o Portabilidade IPBX
clear
func_install_portabilidade
ExitFinish=1
bash install-asterisk.sh
;;
4)
#Instalar o G729 FREE
clear
cd /usr/src/
# Checar asterisk
if [ ! -d "/etc/asterisk" ]; then
clear
cd /usr/src/
func_install_asterisk
func_install_g729
bash install-asterisk.sh
ExitFinish=1
else
clear
cd /usr/src/
func_install_g729
bash install-asterisk.sh
ExitFinish=1
fi
# Fim seta CPU
;;
5)
#Instalar a Mesa Operadora
clear
func_install_mesa
bash install-asterisk.sh
;;
6)
#Install DONGLE USB
clear
func_install_dongle
bash install-asterisk.sh
;;
0)
clear
cd /usr/src/
rm -rf asterisk* dahdi* lib* install* fop* funcoes* linux-3* openr2* chan_*
# Apaga Instalacao
cd /var/www/ipbx/
rm -rf install
ExitFinish=1
;;
*)
esac
done
|
gu1lhermematos/VOXIPBX
|
install/install-asterisk.sh
|
Shell
|
gpl-3.0
| 2,655 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-mediancpu_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::mediancpu_0:1.0 -N ID0000004 -R condorpool -L example_workflow -T 2016-11-25T15:15:05+00:00 ./example_workflow-mediancpu_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1B/logs/w-10-B/20161125T151505+0000/00/00/mediancpu_0_ID0000004.sh
|
Shell
|
gpl-3.0
| 1,237 |
#!/bin/sh
script_root_dir=`dirname "$0"`
config="$script_root_dir/backup-restore.config"
site_path="$1"
if [ -z "$site_path" ]
then
echo "Usage: $0 /path/to/your/site"
exit 1
fi
if [ -f "$config" ]
then
. "$config"
else
echo "Sorry, I couldn't find a 'backup-restore.config' file. Copy backup-restore.config.example to backup-restore.config and fill in all the required information inside to continue."
exit 2
fi
echo '--------'
echo 'Enabling maintenance mode'
echo '--------'
# You can change the message the site outputs during maintenance mode in the config file.
echo "$maintenance_msg" > "$site_path/.maintenance"
if [ $? -eq 0 ]
then
echo "***** Done! Maintenance mode enabled."
maintenance_mode_enable_status="OK"
else
echo "***** FAIL! Couldn't enable maintenance mode."
maintenance_mode_enable_status="FAIL"
fi
echo '--------'
echo 'Backup the current database, just in case. A backup of the backup if you will. You can find it in ' $backup_dir
echo '--------'
wp --path="$site_path" db export "$backup_dir/dbbackup-before-restore.sql"
echo '--------'
echo 'Restoring the database...'
echo '--------'
wp --path="$site_path" db import "$backup_dir/latest-db-dump.sql"
if [ $? -eq 0 ]
then
echo "***** Done! The database was successfully imported."
db_import_status="OK"
else
echo "***** Couldn't import the database."
exit 3
fi
echo '--------'
echo 'Now extracting the files from the backup archive'
echo '--------'
tar -zxvf "$backup_dir/latest-files-backup.tar.gz" -C "/"
if [ $? -eq 0 ]
then
echo "***** Done! The site files were successfully extracted from the backup archive."
files_extract_status="OK"
else
echo "***** Could not extract site files from the backup archive."
exit 4
fi
echo '--------'
echo 'Disabling maintenance mode'
echo '--------'
rm "$site_path/.maintenance"
if [ $? -eq 0 ]
then
echo "***** Done! Maintenance mode disabled."
maintenance_mode_disable_status="OK"
else
echo "***** FAIL! Coudn't disable maintenance mode. Check for a '.maintenance' file in your WordPress install directory, and delete it if there is one."
maintenance_mode_disable_status="FAIL"
fi
echo '----------------'
echo 'All done! Status log below:'
echo '-----'
echo 'Maintenance mode enabled: ' $maintenance_mode_enable_status
echo '-----'
echo 'Database import: ' $db_import_status
echo '-----'
echo 'Files import: ' $files_extract_status
echo '-----'
echo 'Maintenance mode disabled: ' $maintenance_mode_disable_status
echo 'Done.'
|
governmentbg/opendata-cms
|
backup-restore/restore.sh
|
Shell
|
gpl-3.0
| 2,498 |
#!/bin/bash
outdir="dat/B3"
data="$outdir/5.dat"
fitxenergies="$outdir/5_energies.dat"
#outfile="$outdir/energies.dat"
#outplot="$outdir/energies.eps"
PG=
#PG=" -pg "
mkdir -p $outdir
gcc -O3 -o autocorr autocorr.c -lm || exit 1
gcc -O3 $PG -o ising ising.c -lm || exit 1
L[0]="20"
L[1]="40"
L[2]="100"
N[0]="400"
N[1]="1600"
N[2]="10000"
rangeE["0"]="3000"
rangeE["3"]="10"
rangeM["0"]="3000"
rangeM["3"]="250"
for j in "0" "3";do
for i in $(seq 0 2);do
echo "Computing dyn=$j, L= ${L[$i]}"
./ising -d 2 -L ${L[$i]} -T 2.25 --nmeas 1 --nmcs 100000 --ieq 2000 -s 3291043 --dyn $j > "$outdir/${L[$i]}_dyn${j}_raw.dat"
./mycut -f 1 < "$outdir/${L[$i]}_dyn${j}_raw.dat" > "$outdir/${L[$i]}_dyn_${j}_energy.dat"
./mycut -f 2 < "$outdir/${L[$i]}_dyn${j}_raw.dat" > "$outdir/${L[$i]}_dyn_${j}_magnet.dat"
./autocorr 3000 < "$outdir/${L[$i]}_dyn_${j}_energy.dat" > "$outdir/${L[$i]}_dyn_${j}_energy_autocorr.dat"
./autocorr 3000 < "$outdir/${L[$i]}_dyn_${j}_magnet.dat" > "$outdir/${L[$i]}_dyn_${j}_magnet_autocorr.dat"
maxrangeE=${rangeE[$j]}
maxrangeM=${rangeM[$j]}
gnuplot -e "inputE='$outdir/${L[$i]}_dyn_${j}_energy_autocorr.dat';inputM='$outdir/${L[$i]}_dyn_${j}_magnet_autocorr.dat';outputE='$outdir/${L[$i]}_dyn_${j}_energy_autocorr.eps';outputM='$outdir/${L[$i]}_dyn_${j}_magnet_autocorr.eps';myrangeE=$maxrangeE;myrangeM=$maxrangeM" PartB3plot1.gnu
done
done
|
zeehio/ising
|
scripts/PartB3.sh
|
Shell
|
gpl-3.0
| 1,430 |
#!/usr/bin/env bash
#
# This script builds a container which, when run, starts a Pbench Server.
#
# Note: successfully running this script required adding AUDIT_WRITE (for the
# operation of sshd) and CAP_SETFCAP (for the installation of httpd) to
# the list of default capabilities in /etc/containers/containers.conf
# (and, if that file doesn't exist, you'll need to create it with the
# other default capabilities, e.g., see
# https://man.archlinux.org/man/containers.conf.5.en#CONTAINERS_TABLE and
# https://github.com/containers/common/blob/da56e470c0c57c27e91bdc844b32c5dab6611394/pkg/config/containers.conf#L48)
#
set -o errexit
# Locations inside the container
INSTALL_ROOT=/opt/pbench-server
SERVER_LIB=${INSTALL_ROOT}/lib
SERVER_BIN=${INSTALL_ROOT}/bin
CONF_PATH=${SERVER_LIB}/config/pbench-server.cfg
HOSTNAME_F=pbenchinacan
# Locations on the host
GITTOP=${GITTOP:-$(git rev-parse --show-toplevel)}
PBINC_DIR=${GITTOP}/server/pbenchinacan
# Image tag determined from jenkins/branch.name
BRANCH=$(< ${GITTOP}/jenkins/branch.name)
# Open a copy of the base container. Docker format is required in order to set
# the hostname.
container=$(buildah from --format=docker quay.io/pbench/pbench-devel-ubi:${BRANCH})
# We could mount the container filesystem and access it directly, but we
# instead access it with buildah commands.
# mnt=$(buildah mount $container)
# Consider adding -v datavolume for the Server data, and perhaps SQL and ES data
buildah config \
--label maintainer="Nick Dokos <[email protected]>" \
--hostname $HOSTNAME_F \
--stop-signal SIGINT \
--port 22:55555 `# sshd` \
--port 8001 `# pbench-server` \
$container
# Set up Pbench DNF repo and install the Server and Apache
buildah copy $container ${PBINC_DIR}/etc/yum.repos.d/pbench.repo /etc/yum.repos.d/pbench.repo
buildah run $container dnf install -y pbench-server httpd
buildah run $container dnf clean all
# Skip installing and configuring the Firewall
# Work around a problem with cron running jobs as other users in a container.
buildah run $container bash -c "sed -i -e '/pam_loginuid/ s/^/#/' /etc/pam.d/crond"
# Copy the Pbench Server config file; then correct the hostname configuration.
buildah copy --chown pbench:pbench --chmod 0644 $container \
${PBINC_DIR}/etc/pbench-server/pbench-server.cfg ${CONF_PATH}
buildah run $container sed -Ei \
-e "/^default-host[[:space:]]*=/ s/=.*/= ${HOSTNAME_F}/" ${CONF_PATH}
buildah run $container su -l pbench \
-c "_PBENCH_SERVER_CONFIG=${CONF_PATH} PATH=$SERVER_BIN:$PATH pbench-server-activate-create-crontab ${SERVER_LIB}/crontab"
buildah run $container mkdir -p -m 0755 \
/srv/pbench/archive/fs-version-001 \
/srv/pbench/public_html/incoming \
/srv/pbench/public_html/results \
/srv/pbench/public_html/users \
/srv/pbench/public_html/static \
/srv/pbench/logs \
/srv/pbench/tmp \
/srv/pbench/quarantine \
/srv/pbench/pbench-move-results-receive/fs-version-002
buildah run $container chown --recursive pbench:pbench /srv/pbench
# SELinux is currently disabled inside the container, so these commands don't
# work very well, so we'll just comment them out for the time being.
#
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/archive
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/archive/fs-version-001
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/public_html/incoming
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/public_html/results
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/public_html/users
# buildah run $container semanage fcontext -a -t httpd_sys_content_t /srv/pbench/public_html/static
# buildah run $container restorecon -v -r /srv/pbench/archive /srv/pbench/public_html
buildah run $container crontab -u pbench ${SERVER_LIB}/crontab/crontab
echo >/tmp/pbench.conf.${$} \
"<VirtualHost *:80>
ProxyPreserveHost On
ProxyPass /api/ http://${HOSTNAME_F}:8001/api/
ProxyPassReverse /api/ http://${HOSTNAME_F}:8001/api/
ProxyPass / !
</VirtualHost>"
buildah copy --chown root:root --chmod 0644 $container \
/tmp/pbench.conf.${$} /etc/httpd/conf.d/pbench.conf
rm /tmp/pbench.conf.${$}
buildah run $container cp ${SERVER_LIB}/systemd/pbench-server.service \
/etc/systemd/system/pbench-server.service
buildah run $container systemctl enable httpd
buildah run $container systemctl enable pbench-server
# Create the container image
buildah commit $container pbench-server:latest
|
distributed-system-analysis/pbench
|
server/pbenchinacan/container-build.sh
|
Shell
|
gpl-3.0
| 4,653 |
#!/bin/bash -e
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )
LIBS=${DIR}/lib
exec ${DIR}/lib/ld.so --library-path $LIBS ${DIR}/bin/initdb "$@"
|
syncloud/3rdparty
|
postgresql-10/bin/initdb.sh
|
Shell
|
gpl-3.0
| 159 |
#!/bin/bash
java -Djava.library.path=../../../../bin -cp ../../../../lib/mocket.jar:$NOMADS_HOME/util/lib/util.jar:../../../../build/antcache us.ihmc.mockets.test.DataRecv $1
|
ihmc/nomads
|
mockets/test/java/scripts/linux/runDataRecv.sh
|
Shell
|
gpl-3.0
| 178 |
#!/bin/bash
if [ -z "${SKY_HOME}" ]; then
SKY_HOME=$(dirname $BASH_SOURCE)/..
fi
cd ${SKY_HOME}
nohup java -Dspring.config.location=file:conf/sky-worker.yml -jar sky-worker.jar 1> /dev/null 2>&1 &
|
Vondom/sky
|
sky-distribution/src/main/resources/bin/start-worker.sh
|
Shell
|
gpl-3.0
| 200 |
#!/bin/sh
version_file=`dirname $0`/VERSION
usage()
{
echo "Usage $0" 1>&2
echo "Options:" 1>&2
echo " -h print this help" 1>&2
exit 1
}
while getopts "h" option; do
case $option in
*)
usage
;;
esac
done
if [ -f $version_file ]; then
cat $version_file
exit 0
fi
current_tag=`git describe --tags --exact-match 2> /dev/null`
if [ $? -eq 0 ]; then
echo $current_tag
else
git branch | grep '^\*' | awk '{ gsub(/\//, "-"); print $2 }'
fi
|
stratumn/go
|
version.sh
|
Shell
|
mpl-2.0
| 545 |
#!/bin/bash
cd extension
zip -FS -r ../firefox-extension-gnome-theme-tweak.xpi *
cd -
|
gnome-integration-team/firefox-gnome-tweak
|
make-xpi.sh
|
Shell
|
mpl-2.0
| 87 |
# source this script to get the CMAKE env var
cmake_dir=cmake
cmake_bin=$cmake_dir/bin/cmake
cmake_url="https://cmake.org/files/v3.9/cmake-3.9.2-Linux-x86_64.sh"
# Work with travis cache
if [[ ! -f $cmake_bin ]]; then
mkdir -p $cmake_dir
wget $cmake_url -O $cmake_dir/install-cmake.sh
chmod +x $cmake_dir/install-cmake.sh
$cmake_dir/install-cmake.sh --prefix=$cmake_dir --exclude-subdir
fi
export CMAKE=$cmake_bin
|
D-I-S-K-U-S/TOP-1
|
scripts/travis-install-cmake.sh
|
Shell
|
mpl-2.0
| 432 |
HOSTNAME='FINDTracker'
IP=$(getent hosts "${HOSTNAME}" | awk '{ print $1 }')
PLATFORMIO_UPLOAD_FLAGS="--auth=supersecret" platformio run --silent --target uploadfs --upload-port ${IP}
|
TheMegaTB/FINDTracker
|
OTA-SPIFFS.sh
|
Shell
|
agpl-3.0
| 184 |
#!/usr/bin/env bash
ROBOPOP="$(cd "$(dirname "$0")" ; pwd)"
PROJECT="$(dirname "${ROBOPOP}")"
TMP="${PROJECT}/data/tmp"
EPISTEMICS_BASE_URL='http://localhost:8888'
if [ -n "$1" ]
then
EPISTEMICS_BASE_URL="$1" ; shift
fi
mkdir -p "${TMP}"
curl -L -sS -D - 'https://github.com/robopop/epistemics/raw/master/Installation/BeliefSystem.zip' -o "${TMP}/BeliefSystem-curl.zip"
curl -sS -D - -X POST -F "file=@${TMP}/BeliefSystem-curl.zip" "${EPISTEMICS_BASE_URL}/beliefsystem-rest/epistemics/belief-system"
|
robopop/docker
|
robopop/load-default-data.sh
|
Shell
|
agpl-3.0
| 507 |
#set -x
CWD="$(pwd)"
#This is to avoid permission problems
echo "export HADOOP_USER_NAME=hdfs" >> ~/.bashrc
echo "export HADOOP_USER_NAME=hdfs" >> /home/vagrant/.bashrc
#This is to set the native hadoop library path for java to find
echo "export LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native/:$LD_LIBRARY_PATH" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native/:$LD_LIBRARY_PATH" >> /home/vagrant/.bashrc
source ~/.bashrc
#Install java and wget
sudo yum install java-1.7.0-openjdk java-1.7.0-openjdk-devel wget -y
#Download our Ambari setup script
git clone git://github.com/DemandCube/hadoop-single-node-cluster
cd hadoop-single-node-cluster
sudo ./INSTALL-HADOOP -y
#Checkout the whole project, set up gradlew
cd $CWD
git clone git://github.com/DemandCube/NeverwinterDP
cd NeverwinterDP
./neverwinterdp.sh checkout
./gradlew
cd $CWD/NeverwinterDP-Commons/
./gradlew clean build install release -x test
cd $CWD/Queuengin/
./gradlew clean build install release -x test
cd $CWD/Scribengin
./gradlew clean build install release -x test
#Download kafka
cd $CWD
wget https://archive.apache.org/dist/kafka/0.8.0/kafka_2.8.0-0.8.0.tar.gz
tar -xzf kafka_2.8.0-0.8.0.tar.gz
#Launch kafka
#cd kafka_2.8.0-0.8.0
#Launch Kafka at system startup
echo $CWD/kafka_2.8.0-0.8.0/bin/kafka-server-start.sh $CWD/kafka_2.8.0-0.8.0/config/server.properties >> /etc/rc.d/rc.local
#Launch Kafka now
#./bin/kafka-server-start.sh ./config/server.properties &
#KAFKA_PROC_ID=$!
#echo "Kafka process ID: $KAFKA_PROC_ID"
|
DemandCube/Scribengin
|
vagrant/scripts/setup.sh
|
Shell
|
agpl-3.0
| 1,520 |
#!/bin/bash
##
## @brief @(#) See README. Drop database CLI tests.
##
## @file 20_run_delete.sh
##
## -----------------------------------------------------------------------------
## Enduro/X Middleware Platform for Distributed Transaction Processing
## Copyright (C) 2009-2016, ATR Baltic, Ltd. All Rights Reserved.
## Copyright (C) 2017-2019, Mavimax, Ltd. All Rights Reserved.
## This software is released under one of the following licenses:
## AGPL (with Java and Go exceptions) or Mavimax's license for commercial use.
## See LICENSE file for full text.
## -----------------------------------------------------------------------------
## AGPL license:
##
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU Affero General Public License, version 3 as published
## by the Free Software Foundation;
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
## PARTICULAR PURPOSE. See the GNU Affero General Public License, version 3
## for more details.
##
## You should have received a copy of the GNU Affero General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## -----------------------------------------------------------------------------
## A commercial use license is available from Mavimax, Ltd
## [email protected]
## -----------------------------------------------------------------------------
##
export TESTNAME="test048_cache"
PWD=`pwd`
if [ `echo $PWD | grep $TESTNAME ` ]; then
# Do nothing
echo > /dev/null
else
# started from parent folder
pushd .
echo "Doing cd"
cd $TESTNAME
fi;
export NDRX_CCONFIG=`pwd`
. ../testenv.sh
export TESTDIR="$NDRX_APPHOME/atmitest/$TESTNAME"
export PATH=$PATH:$TESTDIR
export NDRX_TOUT=10
export NDRX_ULOG=$TESTDIR
source ./test-func-include.sh
#
# Domain 1 - here client will live
#
set_dom1() {
echo "Setting domain 1"
. ../dom1.sh
export NDRX_CONFIG=$TESTDIR/ndrxconfig-dom1.xml
export NDRX_DMNLOG=$TESTDIR/ndrxd-dom1.log
export NDRX_LOG=$TESTDIR/ndrx-dom1.log
export TESTDIR_DB=$TESTDIR
export TESTDIR_SHM=$TESTDIR
export NDRX_CCTAG=dom1
}
set_dom2() {
echo "Setting domain 2"
. ../dom2.sh
export NDRX_CONFIG=$TESTDIR/ndrxconfig-dom2.xml
export NDRX_DMNLOG=$TESTDIR/ndrxd-dom2.log
export NDRX_LOG=$TESTDIR/ndrx-dom2.log
export TESTDIR_DB=$TESTDIR/dom2
export TESTDIR_SHM=$TESTDIR/dom2
export NDRX_CCTAG=dom2
}
set_dom3() {
echo "Setting domain 3"
. ../dom3.sh
export NDRX_CONFIG=$TESTDIR/ndrxconfig-dom3.xml
export NDRX_DMNLOG=$TESTDIR/ndrxd-dom3.log
export NDRX_LOG=$TESTDIR/ndrx-dom3.log
export TESTDIR_DB=$TESTDIR/dom3
export TESTDIR_SHM=$TESTDIR/dom3
export NDRX_CCTAG=dom3
}
#
# Generic exit function
#
function go_out {
echo "Test exiting with: $1"
set_dom1;
xadmin stop -y
xadmin down -y
set_dom2;
xadmin stop -y
xadmin down -y
set_dom3;
xadmin stop -y
xadmin down -y
# If some alive stuff left...
xadmin killall atmiclt48
popd 2>/dev/null
exit $1
}
rm *.log
# Any bridges that are live must be killed!
xadmin killall tpbridge
echo "Booting domain 1"
set_dom1;
xadmin down -y
xadmin start -y || go_out 1
echo "Booting domain 2"
set_dom2;
xadmin down -y
xadmin start -y || go_out 1
echo "Booting domain 3"
set_dom3;
xadmin down -y
xadmin start -y || go_out 1
echo "Let clients to boot & links to establish..."
sleep 60
RET=0
echo "Domain 1 info"
set_dom1;
xadmin psc
xadmin ppm
xadmin pc
echo "Domain 2 info"
set_dom2;
xadmin psc
xadmin ppm
xadmin pc
echo "Domain 3 info"
set_dom3;
xadmin psc
xadmin ppm
xadmin pc
echo "Run "
set_dom1;
(time ./testtool48 -sTESTSV20 -b '{"T_STRING_FLD":"KEY1","T_STRING_2_FLD":"CACHE1","T_LONG_3_FLD":"1"}' \
-m '{"T_STRING_FLD":"KEY1","T_STRING_2_FLD":"CACHE1","T_LONG_3_FLD":"1"}' \
-cY -n50 -fY 2>&1) > ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (1)"
go_out 1
fi
(time ./testtool48 -sTESTSV20 -b '{"T_STRING_FLD":"KEY2","T_STRING_2_FLD":"CACHE2","T_LONG_3_FLD":"2"}' \
-m '{"T_STRING_FLD":"KEY2","T_STRING_2_FLD":"CACHE2","T_LONG_3_FLD":"2"}' \
-cY -n50 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (2)"
go_out 2
fi
(time ./testtool48 -sTESTSV20 -b '{"T_STRING_FLD":"KEY3","T_STRING_2_FLD":"CACHE3","T_LONG_3_FLD":"3"}' \
-m '{"T_STRING_FLD":"KEY3","T_STRING_2_FLD":"CACHE3","T_LONG_3_FLD":"3"}' \
-cY -n50 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (3)"
go_out 3
fi
(time ./testtool48 -sTESTSV20 -b '{"T_STRING_FLD":"KEY4","T_STRING_2_FLD":"CACHE4","T_LONG_3_FLD":"4"}' \
-m '{"T_STRING_FLD":"KEY4","T_STRING_2_FLD":"CACHE4","T_LONG_3_FLD":"4"}' \
-cY -n50 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (4)"
go_out 4
fi
echo "Calling inval..."
(time ./testtool48 -sTESTSV20I -b '{"T_STRING_FLD":"KEY1","T_STRING_2_FLD":"CACHE1","T_LONG_3_FLD":"1"}' \
-m '{"T_STRING_FLD":"KEY1","T_STRING_2_FLD":"CACHE1","T_LONG_3_FLD":"1"}' \
-cY -n1 -fY 2>&1) > ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (5)"
go_out 5
fi
(time ./testtool48 -sTESTSV20I -b '{"T_STRING_FLD":"KEY2","T_STRING_2_FLD":"CACHE2","T_LONG_3_FLD":"2"}' \
-m '{"T_STRING_FLD":"KEY2","T_STRING_2_FLD":"CACHE2","T_LONG_3_FLD":"2"}' \
-cY -n1 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (6)"
go_out 6
fi
(time ./testtool48 -sTESTSV20I -b '{"T_STRING_FLD":"KEY3","T_STRING_2_FLD":"CACHE3","T_LONG_3_FLD":"3"}' \
-m '{"T_STRING_FLD":"KEY3","T_STRING_2_FLD":"CACHE3","T_LONG_3_FLD":"3"}' \
-cY -n1 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (7)"
go_out 7
fi
(time ./testtool48 -sTESTSV20I -b '{"T_STRING_FLD":"KEY4","T_STRING_2_FLD":"CACHE4","T_LONG_3_FLD":"4"}' \
-m '{"T_STRING_FLD":"KEY4","T_STRING_2_FLD":"CACHE4","T_LONG_3_FLD":"4"}' \
-cY -n1 -fY 2>&1) >> ./20_testtool48.log
if [ $? -ne 0 ]; then
echo "testtool48 failed (8)"
go_out 8
fi
echo "let messages to bcast"
sleep 3
echo "Testing domain 1"
ensure_keys db20 0
echo "Testing domain 2"
set_dom2;
ensure_keys db20 2
xadmin cs db20
ensure_field db20 SV20_1KEY1 T_STRING_2_FLD CACHE1 1
ensure_field db20 SV20_2KEY2 T_STRING_2_FLD CACHE2 0
ensure_field db20 SV20_3KEY3 T_STRING_2_FLD CACHE3 1
ensure_field db20 SV20_4KEY4 T_STRING_2_FLD CACHE4 0
echo "Testing domain 3"
set_dom3;
xadmin cs db20
ensure_keys db20 2
ensure_field db20 SV20_1KEY1 T_STRING_2_FLD CACHE1 1
ensure_field db20 SV20_2KEY2 T_STRING_2_FLD CACHE2 0
ensure_field db20 SV20_3KEY3 T_STRING_2_FLD CACHE3 1
ensure_field db20 SV20_4KEY4 T_STRING_2_FLD CACHE4 0
go_out $RET
# vim: set ts=4 sw=4 et smartindent:
|
endurox-dev/endurox
|
atmitest/test048_cache/20_run_delete.sh
|
Shell
|
agpl-3.0
| 7,049 |
#!/bin/bash
tempDirOD="OpenDataAN"
urlAmdtOD="http://data.assemblee-nationale.fr/static/openData/repository/LOI/amendements_legis/Amendements_XIV.json.zip"
AmdtODFile="Amendements_XIV.json"
rm -rf $tempDirOD
mkdir -p $tempDirOD
cd $tempDirOD
wget $urlAmdtOD
unzip $AmdtODFile.zip
cd ..
python parseAmdtsFromANOpenData.py
|
regardscitoyens/nosdeputes.fr
|
batch/amendements/runAmdtOD.sh
|
Shell
|
agpl-3.0
| 329 |
#!/bin/bash
REPO_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../"
hookfile="$REPO_DIR/.git/hooks/pre-commit"
if [[ -f $hookfile ]]; then
echo "'$hookfile' already exists - aborting" 1>&2
exit 1
fi
echo '#!/bin/bash
patch=$(git clang-format --diff)
if [[ "$patch" =~ "no modified files to format" || "$patch" =~ "clang-format did not modify any files" ]]; then
echo "" > /dev/null
else
echo ""
echo "formatting fixes required" >&2
echo ""
echo "$patch"
exit 1
fi
' > $hookfile
chmod a+x $hookfile
|
backmari/moose
|
scripts/install-format-hook.sh
|
Shell
|
lgpl-2.1
| 544 |
#!/bin/bash
####################
# Detta script körs på maskinen som har mysqldatabasen för Privatläkarportalen.
#
# Usage: ./webcert_usage_part1_pp.sh <database_user> <database_password> <pp_database_name>
#
# Två textfiler med databasdumpar skapas i current working directory, wc_usage_query_result.txt och pp_allavardgivare_dump.txt
# Dessa används av nästa script, webcert_usage_part2_it.sh
####################
# Set to swedish locale, for correct encoding of characters
LC_ALL="sv_SE.utf8"
LC_CTYPE="sv_SE.utf8"
USER=$1
PASSWORD=$2
# In development, we use 'privatlakarportal'
PRIVATLAKARPORTAL_DATABASE_NAME=$3
QUERY="select VARDGIVARE_ID,PERSONID,VARDGIVARE_NAMN,EPOST,TELEFONNUMMER from PRIVATLAKARE order by VARDGIVARE_ID;"
echo "Executing query: $QUERY"
PRIVATLAKARPORTAL_QUERY_RESULT="$(mysql --user=$USER --password=$PASSWORD --batch -e "use $PRIVATLAKARPORTAL_DATABASE_NAME; $QUERY")"
# For use in final result
echo "$PRIVATLAKARPORTAL_QUERY_RESULT" > wc_pp_usage_query_result.txt
# Skapa en lista av samtliga privatläkare
VARDGIVARE_I_PP="$(echo "$PRIVATLAKARPORTAL_QUERY_RESULT" | cut -f 1 | tail -n +2)"
echo "$VARDGIVARE_I_PP" > pp_allavardgivare_dump.txt
|
sklintyg/tools
|
webcert-usage/pp-users/webcert_usage_part1_pp.sh
|
Shell
|
lgpl-3.0
| 1,186 |
valgrind --tool=memcheck --leak-check=full --show-reachable=yes --show-reachable=yes ./ruletable -t '{
"ControlRule":[
{"Id":1,"SrcZoneIds":[1,2],"SrcIpgrpIds":[3,4],"DstZoneIds":[5,6],"DstIpgrpIds":[7,8],"ProtoIds":[0,9],"Action":["AC_ACCEPT","AC_AUDIT"]},
{"Id":2,"SrcZoneIds":[2,2],"SrcIpgrpIds":[2,4],"DstZoneIds":[2,6],"DstIpgrpIds":[2,8],"ProtoIds":[2,9],"Action":["AC_AUDIT"]}
],
"ControlSet":{
"MacWhiteListSetName":"macwhite",
"IpWhiteListSetName":"ipwhite",
"MacBlackListSetName":"macblack",
"IpBlackListSetName":"ipblack"
},
"AuditRule":[
{"Id":1,"SrcZoneIds":[1,2],"SrcIpgrpIds":[3,4],"DstZoneIds":[5,6],"DstIpgrpIds":[7,8],"ProtoIds":[0,9],"Action":["AC_ACCEPT","AC_AUDIT"]},
{"Id":2,"SrcZoneIds":[2,2],"SrcIpgrpIds":[2,4],"DstZoneIds":[2,6],"DstIpgrpIds":[2,8],"ProtoIds":[2,9],"Action":["AC_AUDIT"]}
],
"AuditSet":{
"MacWhiteListSetName":"macwhite",
"IpWhiteListSetName":"ipwhite",
"MacBlackListSetName":"macblack",
"IpBlackListSetName":"ipblack"
}
}'
|
itgb/ac_toolkit
|
ruletable/src/valgrindtest.sh
|
Shell
|
lgpl-3.0
| 1,002 |
#!/bin/bash
#
# A simple script to update my computer's various programs.
# This script can be added to crontab
#
# At the moment it updates:
# * vim programs
# * brew updates
# * brew upgrates
# * a cleanup after all operations
# update vim plugins
echo "updating vim plugins"
cd ~/.dotfiles/.vim/bundle
for D in `find . -type d -depth 1 | grep -v 'deleted'`
do
cd "${D}"; git pull origin master; cd ..
done
# update beets
pip install --upgrade beets
# update brew
echo "updating brew"
brew update
# upgrade software installed via brew
echo "upgrading brew softwares"
brew upgrade
# perform a cleanup
cleanup
|
laplacesdemon/mac-cleanup
|
update.sh
|
Shell
|
unlicense
| 632 |
#!/bin/bash
set -eux -o pipefail
mkdir -p "${HOME}/src/github.com/pyenv"
pushd "${HOME}/src/github.com/pyenv"
if [ ! -d "${HOME}/.pyenv" ]; then
git clone https://github.com/pyenv/pyenv "${HOME}/.pyenv"
fi
cd "${HOME}/.pyenv"
git pull
popd
mkdir -p "${HOME}/.pyenv/plugins"
pushd "${HOME}/.pyenv/plugins"
if [ ! -d pyenv-virtualenvwrapper ]; then
git clone https://github.com/yyuu/pyenv-virtualenvwrapper
fi
cd "${HOME}/.pyenv/plugins/pyenv-virtualenvwrapper"
git pull
popd
./pyenv/pyenv-installer
./update.sh
|
carterjones/nix-config
|
templates/packages/pyenv/shared.sh
|
Shell
|
unlicense
| 521 |
#!/bin/bash
#
# This script supports the inplace upgrade to AEM64 on a consolidated
#
# Prerequisites Upgrade:
# * Place package pre-upgrade-tasks-content-cq62-1.2.4.zip in the defined s3 bucket
# * Place crx2oak jar file crx2oak-1.8.6-all-in-one.jar in the defined s3 bucket
#
# Prerequisites Post-Upgrade:
# * Place following packages in the defined s3 bucket:
# ** acs-aem-commons-content-3.17.4.zip
# ** acs-aem-tools-content-1.0.0.zip
# ** com.adobe.acs.bundles.netty-1.0.2.zip
#
current_datetime=$(date "+%Y-%m-%d-%H-%M-%S")
java_run=true
aem_workdir="/opt/aem"
author_workdir="${aem_workdir}/author"
publish_workdir="${aem_workdir}/publish"
author_crx_workdir="${author_workdir}/crx-quickstart"
publish_crx_workdir="${publish_workdir}/crx-quickstart"
shinesolutions_workdir="/opt/shinesolutions"
aemtools_workdir="${shinesolutions_workdir}/aem-tools"
# S3 Bucket
s3_bucket="aem-opencloud"
s3_bucket_path="s3://${s3_bucket}"
### Prerequisites Upgrade Parameters
# Log Purge rules
create_version_purge=true
create_workflow_purge=false
create_audit_purge=false
# Deletion of bak files older than x days
enable_delete_bak_files=true
bak_file_age=30
# Enable offline Snapshot
enable_offline_snapshot=false
# Definition of the crx2oak file to apply for repository migration
# This file must be located in the defined s3_bucket
crx2oak_source="${s3_bucket_path}/crx2oak-1.8.6-all-in-one.jar"
# Using ${aemtools_workdir}/deploy-artifact.sh to install pre upgrade task package
# Definition for the pre upgrade tasks package
pre_upgrade_package_source="${s3_bucket_path}/pre-upgrade-tasks-content-cq62-1.2.4.zip"
pre_upgrade_package_group="day/cq62/product"
pre_upgrade_package_name="pre-upgrade-tasks-content-cq62"
pre_upgrade_package_version="1.2.4"
pre_upgrade_package_replicate=false
pre_upgrade_package_activate=true
pre_upgrade_package_force=false
### Post Upgrade parameters starts from here
enable_post_upgrade=true
enable_stop_rewriter_bundle=false
# Parameters to install latest acs aem commons content package
acs_aem_commons_install=true
acs_aem_commons_source="${s3_bucket_path}/acs-aem-commons-content-3.17.4.zip"
acs_aem_commons_group="adobe/consulting"
acs_aem_commons_name="acs-aem-commons-content"
acs_aem_commons_version="3.17.4"
acs_aem_commons_replicate=false
acs_aem_commons_activate=true
acs_aem_commons_force=false
# Parameters to install latest acs aem tools content package
acs_aem_tools_install=true
acs_aem_tools_source="${s3_bucket_path}/acs-aem-tools-content-1.0.0.zip"
acs_aem_tools_group="adobe/consulting"
acs_aem_tools_name="acs-aem-tools-content"
acs_aem_tools_version="1.0.0"
acs_aem_tools_replicate=false
acs_aem_tools_activate=true
acs_aem_tools_force=false
# Parameters to install package com.adobe.acs.bundles.netty
acs_bundles_netty_install=true
acs_bundles_netty_source="${s3_bucket_path}/com.adobe.acs.bundles.netty-1.0.2.zip"
acs_bundles_netty_group="adobe/consulting"
acs_bundles_netty_name="com.adobe.acs.bundles.netty"
acs_bundles_netty_version="1.0.2"
acs_bundles_netty_replicate=false
acs_bundles_netty_activate=true
acs_bundles_netty_force=false
translate_exit_code() {
exit_code="$1"
if [ "$exit_code" -eq 0 ]; then
exit_code=0
else
exit "$exit_code"
fi
return "$exit_code"
}
start_author_plain () {
cd ${author_workdir}
java -Xmx4096m -jar aem-author-4502.jar > /dev/null 2>&1 &
echo $!
}
start_publish_plain () {
cd ${publish_workdir}
java -Xmx4096m -jar aem-publish-4503.jar > /dev/null 2>&1 &
echo $!
}
start_aem_author() {
echo "Starting Author instance"
cd ${author_workdir}
systemctl start aem-author
translate_exit_code "$?"
}
start_aem_publish() {
echo "Starting Publish instance"
cd ${publish_workdir}
systemctl start aem-publish
translate_exit_code "$?"
}
stop_aem_author() {
echo "Stopping Author instance"
cd ${author_workdir}
systemctl stop aem-author
translate_exit_code "$?"
}
stop_aem_publish() {
echo "Stopping Publish instance"
cd ${publish_workdir}
systemctl stop aem-publish
translate_exit_code "$?"
}
restart_aem_author() {
stop_aem_author
java_run=true
wait_author_stopped
start_aem_author
java_run=false
wait_author_started
}
restart_aem_publish() {
stop_aem_publish
java_run=true
wait_publish_stopped
start_aem_publish
java_run=false
wait_publish_started
}
wait_author_started () {
while [ $java_run == 'false' ] || [ $java_run == 'False' ] ; do
if (( $(ps -ef | grep -v grep | grep java | grep author | wc -l) > 0 )); then
echo "Author instance is started."
java_run=true
else
echo "Wait till Author instance is started."
sleep 10
java_run=false
fi
done
}
wait_publish_started () {
while [ $java_run == 'false' ] || [ $java_run == 'False' ] ; do
if (( $(ps -ef | grep -v grep | grep java | grep publish | wc -l) > 0 )); then
echo "Publish instance is started"
java_run=true
else
echo "Wait till Publish instance is started."
sleep 10
java_run=false
fi
done
}
wait_author_stopped () {
while [ $java_run == 'true' ] || [ $java_run == 'True' ] ; do
if (( $(ps -ef | grep -v grep | grep java | grep author | wc -l) > 0 )); then
echo "Wait till Author process is stopped"
sleep 10
java_run=true
else
echo "Author process is stopped"
java_run=false
fi
done
}
wait_publish_stopped () {
while [ $java_run == 'true' ] || [ $java_run == 'True' ] ; do
if (( $(ps -ef | grep -v grep | grep java | grep publish | wc -l) > 0 )); then
echo "Wait till Publish process is stopped"
sleep 10
java_run=true
else
echo "Publish process is stopped"
java_run=false
fi
done
}
update_author_permission () {
echo "Update permissions to aem-author:aem-author for path ${author_workdir} & ${author_crx_workdir}/repository/"
chown -R aem-author:aem-author /opt/aem/author
chown -R aem-author:aem-author /opt/aem/author/crx-quickstart/repository/*
}
update_publish_permission () {
echo "Update permissions to aem-publish:aem-publish for path ${publish_workdir} & ${publish_crx_workdir}/repository/"
chown -R aem-publish:aem-publish /opt/aem/publish
chown -R aem-publish:aem-publish /opt/aem/publish/crx-quickstart/repository/*
}
upgrade_author () {
cd ${author_workdir}
java -server -Xmx4096m -Dcom.adobe.upgrade.forcemigration=true \
-Djava.awt.headless=true -Dsling.run.modes=author,crx3,crx3tar \
-jar crx-quickstart/app/cq-quickstart-6.4.0-standalone-quickstart.jar start -c crx-quickstart -i launchpad \
-p 4502 -Dsling.properties=crx-quickstart/conf/sling.properties > /dev/null 2>&1 &
echo $!
}
upgrade_publish () {
cd ${publish_workdir}
java -server -Xmx4096m -Dcom.adobe.upgrade.forcemigration=true \
-Djava.awt.headless=true -Dsling.run.modes=publish,crx3,crx3tar \
-jar crx-quickstart/app/cq-quickstart-6.4.0-standalone-quickstart.jar start -c crx-quickstart -i launchpad \
-p 4503 -Dsling.properties=crx-quickstart/conf/sling.properties > /dev/null 2>&1 &
echo $!
}
echo "Upgrading AEM to AEM 6.4"
echo "."
echo "Upgrade Steps:"
echo "1. Stopping Author instance"
echo "2. Stopping Publish instance"
echo "3. Start Author instance from jar file"
echo "4. Start Publish instance from jar file"
echo "5. Configure Audit purge log and workflow purge manually"
echo "6. Create Version Purge rule."
echo "7. Create workflow Purge rule."
echo "8. Create Audit Purge rule."
echo "9. Please run Version, Audit and workflow purge job manually"
echo "10. Please uninstall any existing acs-aem-common-content package manually"
echo "11. Run RevisionGarbageCollection"
echo "12. Disable Author replication agents manually"
echo "13. Install pre upgrade tasks for Author instance"
echo "14. Install pre upgrade tasks for Publish instance"
echo "15. Trigger run of all pre upgrade tasks for author instance"
echo "16. Trigger run of all pre upgrade tasks for publish instance"
echo "17. Check if pre upgrade tasks where successfully manually"
echo "18. Stop of Author & Publish process"
echo "19. Copy Files needed for Upgrading"
echo "20. Create Backup"
echo "21. Remove old .bak files from repository"
echo "22. Run offline compaction job"
echo "23. Unpack AEM 64 jar file for Author"
echo "24. Run Repository migration for Author"
echo "25. Check logfiles for success of the repository upgrade"
echo "26. Run AEM Upgrade for Author"
echo "27. Check if AEM Upgrade for Author was successful manually"
echo "28. Stop Author instance"
echo "29. Unpack AEM 64 jar file for Publish"
echo "30. Run Repository migration for Publish"
echo "31. Check logfiles for success of the repository upgrade"
echo "32. Run AEM Upgrade for Publish"
echo "33. Check if AEM Upgrade for Publish was successful manually"
echo "34. Stop Publish instance"
echo "35. Run Post-Upgrade jobs"
echo "35.1 Start AEM Author"
echo "35.2 Start AEM Publish"
echo "35.3 Check if AEM successfully starts manually"
echo "35.4 Stop org.apache.sling.rewriter bundle"
echo "35.5 Install ACS Bundles netty"
echo "35.6 Install ACS AEM Tools"
echo "35.7 Install ACS AEM Comons Content"
echo "35.8 Stop AEM Author"
echo "35.9 Stop AEM Publish"
echo "36. Run offline Snapshot "
read -p "Press enter to start AEM Upgrade process"
echo "."
echo "."
stop_aem_author
echo "."
echo "."
java_run=true
wait_author_stopped
echo "."
echo "."
echo "Starting Author instance without any options"
author_pid=$(start_author_plain)
java_run=false
wait_author_started
echo "."
echo "."
stop_aem_publish
echo "."
echo "."
java_run=true
wait_publish_stopped
echo "."
echo "."
echo "Starting Publish instance without any options"
publish_pid=$(start_publish_plain)
java_run=false
wait_publish_started
echo "."
echo "."
echo "Please configure Audit purge log and workflow purge manually"
echo "http://localhost:4502/system/console/configMgr"
echo "http://localhost:4503/system/console/configMgr"
read -p "Press enter to continue to create automated purge rules"
echo "."
echo "."
echo "Create Purge Rules"
if [ $create_version_purge == 'True' ] || [ $create_version_purge == 'true' ] ; then
echo "."
echo "."
echo "Create Version Purge rule for Author."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_daily' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_daily' \
-F 'granite.task.disclaimer=' \
-F 'granite.task.hint=' \
-F 'granite.maintenance.name=com.day.cq.wcm.core.impl.VersionPurgeTask' \
'http://localhost:4502/apps/granite/operations/config/maintenance/_granite_daily/*'
translate_exit_code "$?"
echo "Version Purge rule for Author created."
echo "."
echo "."
echo "Create Version Purge rule for Publish."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_daily' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_daily' \
-F 'granite.task.disclaimer=' \
-F 'granite.task.hint=' \
-F 'granite.maintenance.name=com.day.cq.wcm.core.impl.VersionPurgeTask' \
'http://localhost:4503/apps/granite/operations/config/maintenance/_granite_daily/*'
translate_exit_code "$?"
echo "Version Purge rule for Publish created."
fi
if [ $create_workflow_purge == 'True' ] || [ $create_workflow_purge == 'true' ] ; then
echo "."
echo "."
echo "Create workflow Purge rule for Author."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'granite.task.disclaimer=' \
-F 'granite.maintenance.name=WorkflowPurgeTask' \
'http://localhost:4502/apps/granite/operations/config/maintenance/_granite_weekly/*'
translate_exit_code "$?"
echo "workflow Purge rule for Author created."
echo "."
echo "."
echo "Create workflow Purge rule for Publish."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'granite.task.disclaimer=' \
-F 'granite.maintenance.name=WorkflowPurgeTask' \
'http://localhost:4503/apps/granite/operations/config/maintenance/_granite_weekly/*'
translate_exit_code "$?"
echo "workflow Purge rule for Publish created."
fi
if [ $create_audit_purge == 'True' ] || [ $create_audit_purge == 'true' ] ; then
echo "."
echo "."
echo "Create Audit Purge rule for Author."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'granite.task.hint=' \
-F 'granite.maintenance.name=com.day.cq.audit.impl.AuditLogMaintenanceTask' \
'http://localhost:4502/apps/granite/operations/config/maintenance/_granite_weekly/*'
translate_exit_code "$?"
echo "Audit Purge rule for Author created."
echo "."
echo "."
echo "Create Audit Purge rule for Publish."
curl -f -u admin:admin \
-F 'jcr:primaryType=nt:unstructured' \
-F 'sling:resourceType=granite/operations/components/maintenance/task' \
-F ':redirect=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'name=/libs/granite/operations/content/maintenanceWindow.html/mnt/overlay/granite/operations/config/maintenance/_granite_weekly' \
-F 'granite.task.hint=' \
-F 'granite.maintenance.name=com.day.cq.audit.impl.AuditLogMaintenanceTask' \
'http://localhost:4503/apps/granite/operations/config/maintenance/_granite_weekly/*'
translate_exit_code "$?"
echo "Audit Purge rule for Publish created."
fi
echo "."
echo "."
echo "Please run Version, Audit and workflow purge job manually and wait till it's finished succesfully."
echo "http://localhost:4502/libs/granite/operations/content/maintenance.html"
echo "http://localhost:4503/libs/granite/operations/content/maintenance.html"
read -p "Press enter to continue"
echo "."
echo "."
echo "Please uninstall any existing acs-aem-common-content package manually"
echo "http://localhost:4502/crx/packmgr/index.jsp"
echo "http://localhost:4503/crx/packmgr/index.jsp"
read -p "Press enter to continue"
echo "."
echo "."
echo "Run RevisionGarbageCollection"
curl -f -u admin:admin \
-X POST \
"http://localhost:4502/system/console/jmx/org.apache.jackrabbit.oak:name=Segment+node+store+revision+garbage+collection,type=RevisionGarbageCollection/op/startRevisionGC/"
translate_exit_code "$?"
curl -f -u admin:admin \
-X POST \
"http://localhost:4503/system/console/jmx/org.apache.jackrabbit.oak:name=Segment+node+store+revision+garbage+collection,type=RevisionGarbageCollection/op/startRevisionGC/"
translate_exit_code "$?"
echo "."
echo "."
echo "Please check if RevisionGarbageCollection was succesfully manually"
echo "http://localhost:4502/system/console/jmx"
echo "http://localhost:4503/system/console/jmx"
read -p "Press enter to continue"
echo "."
echo "."
echo "Please disable Author replication agents manually"
echo "http://localhost:4502/etc/replication/agents.author/replicationAgent-localhost.html"
read -p "Press enter to continue"
echo "."
echo "."
echo "Install pre upgrade tasks for Author instance from ${pre_upgrade_package_source}"
${aemtools_workdir}/deploy-artifact.sh author "${pre_upgrade_package_source}" ${pre_upgrade_package_group} ${pre_upgrade_package_name} ${pre_upgrade_package_version} ${pre_upgrade_package_replicate} ${pre_upgrade_package_activate} ${pre_upgrade_package_force}
translate_exit_code "$?"
echo "."
echo "."
echo "Install pre upgrade tasks for Publish instance from ${pre_upgrade_package_source}"
${aemtools_workdir}/deploy-artifact.sh publish "${pre_upgrade_package_source}" ${pre_upgrade_package_group} ${pre_upgrade_package_name} ${pre_upgrade_package_version} ${pre_upgrade_package_replicate} ${pre_upgrade_package_activate} ${pre_upgrade_package_force}
translate_exit_code "$?"
echo "."
echo "."
echo "Trigger run of all pre upgrade tasks for author instance"
curl -f -v -u admin:admin -X POST 'http://localhost:4502/system/console/jmx/com.adobe.aem.upgrade.prechecks:type=PreUpgradeTasks/op/runAllPreUpgradeTasks/'
translate_exit_code "$?"
echo "."
echo "."
echo "Trigger run of all pre upgrade tasks for publish instance"
curl -f -v -u admin:admin -X POST 'http://localhost:4503/system/console/jmx/com.adobe.aem.upgrade.prechecks:type=PreUpgradeTasks/op/runAllPreUpgradeTasks/'
translate_exit_code "$?"
echo "."
echo "."
echo "Please check if pre upgrade tasks where successfully manually"
echo 'http://localhost:4502/system/console/jmx/com.adobe.aem.upgrade.prechecks:type=PreUpgradeTasks'
echo 'http://localhost:4503/system/console/jmx/com.adobe.aem.upgrade.prechecks:type=PreUpgradeTasks'
read -p "Press enter to continue"
echo "."
echo "."
echo "Please confirm to stop of Author & Publish process"
read -p "Press enter to continue"
kill ${author_pid} ${publish_pid}
echo "."
java_run=true
wait_author_stopped
echo "."
echo "."
java_run=true
wait_publish_stopped
echo "."
echo "."
echo "."
echo "Copying AEM 6.4 Quickstart file to $author_workdir"
aws s3 cp ${s3_bucket_path}/AEM_6.4_Quickstart.jar $author_workdir/; \
echo "."
echo "."
echo "."
echo "Copying AEM 6.4 Quickstart file to $publish_workdir"
aws s3 cp ${s3_bucket_path}/AEM_6.4_Quickstart.jar $publish_workdir/; \
echo "."
echo "."
echo "creating backup directory ${author_workdir}/backup"
if [ -d ${author_workdir}/backup ]; then
mv ${author_workdir}/backup ${author_workdir}/backup_${current_datetime}
mkdir ${author_workdir}/backup
else
mkdir ${author_workdir}/backup
fi
echo "."
echo "."
echo "."
echo "creating backup directory ${publish_workdir}/backup"
if [ -d ${publish_workdir}/backup ]; then
mv ${publish_workdir}/backup ${publish_workdir}/backup_${current_datetime}
mkdir ${publish_workdir}/backup
else
mkdir ${publish_workdir}/backup
fi
echo "."
echo "."
echo "."
if [ -f ${author_workdir}/AEM_6.2_Quickstart.jar ]; then
echo "Move file ${author_workdir}/AEM_6.2_Quickstart.jar to ${author_workdir}/backup"
mv ${author_workdir}/AEM_6.2_Quickstart.jar ${author_workdir}/backup/
fi
echo "."
echo "."
echo "."
if [ -f ${author_workdir}/aem-author-4502.jar ]; then
echo "Move file ${author_workdir}/aem-author-4502.jar to ${author_workdir}/backup"
mv ${author_workdir}/aem-author-4502.jar ${author_workdir}/backup/
fi
echo "."
echo "."
echo "."
if [ -f ${publish_workdir}/AEM_6.2_Quickstart.jar ]; then
echo "Move file ${publish_workdir}/AEM_6.2_Quickstart.jar to ${publish_workdir}/backup"
mv ${publish_workdir}/AEM_6.2_Quickstart.jar ${publish_workdir}/backup/
fi
echo "."
echo "."
echo "."
if [ -f ${publish_workdir}/aem-publish-4503.jar ]; then
echo "Move file ${publish_workdir}/aem-publish-4503.jar to ${publish_workdir}/backup"
mv ${publish_workdir}/aem-publish-4503.jar ${publish_workdir}/backup/
fi
echo "."
echo "."
echo "."
if [ -f ${author_workdir}/AEM_6.4_Quickstart.jar ]; then
echo "Rename file ${author_workdir}/AEM_6.4_Quickstart.jar to ${author_workdir}/aem-author-4502.jar"
mv ${author_workdir}/AEM_6.4_Quickstart.jar ${author_workdir}/aem-author-4502.jar
fi
echo "."
echo "."
echo "."
if [ -f ${publish_workdir}/AEM_6.4_Quickstart.jar ]; then
echo "Rename file ${publish_workdir}/AEM_6.4_Quickstart.jar to ${publish_workdir}/aem-publish-4503.jar"
mv ${publish_workdir}/AEM_6.4_Quickstart.jar ${publish_workdir}/aem-publish-4503.jar
fi
echo "."
echo "."
echo "Adds execution right to file aem-author-4502.jar"
chmod +x ${author_workdir}/aem-author-4502.jar
echo "."
echo "."
echo "Adds execution right to file aem-publish-4503.jar"
chmod +x ${publish_workdir}/aem-publish-4503.jar
echo "."
echo "."
echo "Create a backup of ${author_crx_workdir} in ${author_workdir}/backup/"
cp -r ${author_crx_workdir} ${author_workdir}/backup/
echo "."
echo "."
echo "Create a backup of ${publish_crx_workdir} in ${publish_workdir}/backup/"
cp -r ${publish_crx_workdir} ${publish_workdir}/backup/
echo "."
echo "."
if [ $enable_delete_bak_files == 'True' ] || [ $enable_delete_bak_files == 'true' ] ; then
echo "removing .bak files older than ${bak_file_age} days in Author repository ${author_crx_workdir}/repository/"
echo "."
echo "."
find ${author_crx_workdir}/repository/ \
-name '*.bak' \
-type f \
-mtime +$bak_file_age \
-exec rm -fv '{}' \;
echo "Finish removing .bak files older than $bak_file_age days."
echo "."
echo "."
fi
echo "."
echo "."
if [ $enable_delete_bak_files == 'True' ] || [ $enable_delete_bak_files == 'true' ] ; then
echo "removing .bak files older than ${bak_file_age} days in Publisher repository ${publish_crx_workdir}/repository/"
echo "."
echo "."
find ${publish_crx_workdir}/repository/ \
-name '*.bak' \
-type f \
-mtime +$bak_file_age \
-exec rm -fv '{}' \;
echo "Finish removing .bak files older than $bak_file_age days."
echo "."
echo "."
fi
echo "."
echo "."
update_author_permission
echo "."
echo "."
update_publish_permission
echo "."
echo "."
echo "Run offline compaction job"
${aemtools_workdir}/offline-compaction.sh >> /var/log/shinesolutions/upgrade_offline_compaction.log 2>&1
translate_exit_code "$?"
echo "Offline Compaction job done."
read -p "Press enter to continue"
echo "."
echo "."
echo "Unpack aem-author-4502.jar"
cd ${author_workdir}
java -Xmx4096m -jar aem-author-4502.jar -unpack
echo "Unpack aem-author-4502.jar done"
echo "."
echo "."
echo "Wait till unpacking of aem-author-4502.jar is done."
echo "Please confirm to go to the next step 'repository upgrade'"
read -p "Press enter to continue"
echo "."
echo "."
update_author_permission
echo "."
echo "."
echo "Remove old crx2oak jar file from ${author_crx_workdir}/opt/extensions/"
rm -fv ${author_crx_workdir}/opt/extensions/crx2oak*.jar
echo "."
echo "."
echo "Copy ${crx2oak_source} to ${author_crx_workdir}/opt/extensions/crx2oak.jar"
aws s3 cp ${crx2oak_source} ${author_crx_workdir}/opt/extensions/crx2oak.jar
echo "."
echo "."
echo "Run Repository migration for Author"
cd ${author_workdir}
java -Xmx4096m -jar aem-author-4502.jar -v -x crx2oak -xargs -- --load-profile segment-no-ds
echo "."
echo "."
echo "Wait till repo migration for Author is done."
update_author_permission
echo "."
echo "."
echo "Repo migration is done please press enter to print last 50 lines of ${author_crx_workdir}/logs/upgrade.log"
read -p "Press enter to continue"
tail -n 50 ${author_crx_workdir}/logs/upgrade.log
echo "."
echo "."
echo "Please check logfile ${author_crx_workdir}/logs/upgrade.log for errors."
echo "Please confirm to go to the next step 'AEM Upgrade'"
read -p "Press enter to continue"
echo "."
echo "."
if [ -f ${author_crx_workdir}/app/cq-quickstart-6.2.0-standalone-quickstart.jar ]; then
echo "Copying AEM 6.2 Quickstart file from ${author_crx_workdir}/app/ to /tmp as it's not needed anymore."
mv ${author_crx_workdir}/app/cq-quickstart-6.2.0-standalone-quickstart.jar /tmp
fi
echo "."
echo "."
echo "Run AEM Upgrade for Author"
author_pid=$(upgrade_author)
echo "."
echo "."
echo "Sleep 10 minutes, as update may take around 10 minutes."
sleep 600
echo "Press enter to print last 50 lines of ${author_crx_workdir}/logs/upgrade.log"
read -p "Press enter to continue"
tail -n 50 ${author_crx_workdir}/logs/upgrade.log
echo "."
echo "."
echo "Please check logfile ${author_crx_workdir}/logs/upgrade.log if upgrade is finished."
read -p "If upgrade is finished press enter to continue"
echo "Stop Author instance"
kill ${author_pid}
echo "."
echo "."
java_run=true
wait_author_stopped
update_author_permission
echo "."
echo "."
echo "Upgrade for AEM Author is done going ahead with upgrading AEM Publish instance"
read -p "Press enter to continue"
echo "."
echo "."
echo "Unpack aem-publish-4503.jar"
cd ${publish_workdir}
java -Xmx4096m -jar aem-publish-4503.jar -unpack
echo "Unpack aem-publish-4503.jar done"
echo "."
echo "."
echo "Wait till unpacking of aem-publish-4503.jar is done."
echo "Please confirm to go to the next step 'repository upgrade'"
read -p "Press enter to continue"
echo "."
echo "."
update_publish_permission
echo "."
echo "."
echo "Remove old crx2oak jar file from ${publish_crx_workdir}/opt/extensions/"
rm -fv ${publish_crx_workdir}/opt/extensions/crx2oak*.jar
echo "."
echo "."
echo "Copy ${crx2oak_source} to ${publish_crx_workdir}/opt/extensions/crx2oak.jar"
aws s3 cp ${crx2oak_source} ${publish_crx_workdir}/opt/extensions/crx2oak.jar
echo "."
echo "."
echo "Run Repository migration for Publish"
cd ${publish_workdir}
java -Xmx4096m -jar aem-publish-4503.jar -v -x crx2oak -xargs -- --load-profile segment-no-ds
echo "."
echo "."
echo "Wait till repo migration for Publish is done."
update_publish_permission
echo "."
echo "."
echo "Output Last 50 lines of ${publish_crx_workdir}/logs/upgrade.log:"
tail -n 50 ${publish_crx_workdir}/logs/upgrade.log
echo "."
echo "."
echo "Please check logfile ${publish_crx_workdir}/logs/upgrade.log for errors in a different window"
echo "before confirming next step."
echo "Please confirm to go to the next step 'AEM Upgrade'"
read -p "Press enter to continue"
echo "."
echo "."
if [ -f ${publish_crx_workdir}/app/cq-quickstart-6.2.0-standalone-quickstart.jar ]; then
echo "Copying AEM 6.2 Quickstart file from ${publish_crx_workdir}/app/ to /tmp as it's not needed anymore."
mv ${publish_crx_workdir}/app/cq-quickstart-6.2.0-standalone-quickstart.jar /tmp
fi
echo "."
echo "."
echo "Run AEM Upgrade for Publish"
publish_pid=$(upgrade_publish)
echo "."
echo "."
echo "Sleep 10 minutes, as update may take around 10 minutes."
sleep 600
echo "Press enter to print last 50 lines of ${publish_crx_workdir}/logs/upgrade.log"
read -p "Press enter to continue"
tail -n 50 ${publish_crx_workdir}/logs/upgrade.logg
echo "."
echo "."
echo "Please check logfile ${publish_crx_workdir}/logs/upgrade.log if upgrade is finished."
read -p "If upgrade is finished press enter to continue"
echo "Stop publish instance"
kill ${publish_pid}
echo "."
echo "."
java_run=true
wait_publish_stopped
update_publish_permission
echo "AEM Upgrade is done!"
echo "."
echo "."
if [ $enable_post_upgrade == 'True' ] || [ $enable_post_upgrade == 'true' ] ; then
echo "Starting Post Upgrade process"
read -p "press enter to continue"
echo "."
echo "."
start_aem_author
echo "."
echo "."
java_run=false
wait_author_started
echo "."
echo "."
start_aem_publish
echo "."
echo "."
java_run=false
wait_publish_started
echo "."
echo "."
echo "Please verify that AEM started successfully manually."
read -p "press enter to continue"
echo "."
echo "."
if [ $enable_stop_rewriter_bundle == 'True' ] || [ $enable_stop_rewriter_bundle == 'true' ] ; then
echo "Stopping bundle org.apache.sling.rewriter on Author."
curl -f -u admin:admin \
-F action=start \
http://localhost:4502/system/console/bundles/org.apache.sling.rewriter
translate_exit_code "$?"
echo "Bundle org.apache.sling.rewriter stopped"
read -p "press enter to continue"
echo "."
echo "."
echo "Stopping bundle org.apache.sling.rewriter on Publish."
curl -f -u admin:admin \
-F action=start \
http://localhost:4503/system/console/bundles/org.apache.sling.rewriter
translate_exit_code "$?"
echo "Bundle org.apache.sling.rewriter stopped"
read -p "press enter to continue"
echo "."
echo "."
fi
if [ $acs_bundles_netty_install == 'True' ] || [ $acs_bundles_netty_install == 'true' ] ; then
echo "Installing ACS Bundle netty"
${aemtools_workdir}/deploy-artifact.sh author "${acs_bundles_netty_source}" ${acs_bundles_netty_group} ${acs_bundles_netty_name} ${acs_bundles_netty_version} ${acs_bundles_netty_replicate} ${acs_bundles_netty_activate} ${acs_bundles_netty_force}
translate_exit_code "$?"
echo "."
echo "."
fi
if [ $acs_aem_tools_install == 'True' ] || [ $acs_aem_tools_install == 'true' ] ; then
echo "Installing ACS AEM Tools on Author"
${aemtools_workdir}/deploy-artifact.sh author "${acs_aem_tools_source}" ${acs_aem_tools_group} ${acs_aem_tools_name} ${acs_aem_tools_version} ${acs_aem_tools_replicate} ${acs_aem_tools_activate} ${acs_aem_tools_force}
translate_exit_code "$?"
echo "."
echo "."
fi
if [ $acs_aem_commons_install == 'True' ] || [ $acs_aem_commons_install == 'true' ] ; then
echo "Installing ACS AEM commons on Author"
${aemtools_workdir}/deploy-artifact.sh author "${acs_aem_commons_source}" ${acs_aem_commons_group} ${acs_aem_commons_name} ${acs_aem_commons_version} ${acs_aem_commons_replicate} ${acs_aem_commons_activate} ${acs_aem_commons_force}
translate_exit_code "$?"
echo "."
echo "."
echo "Installing ACS AEM commons on Publish"
${aemtools_workdir}/deploy-artifact.sh publish "${acs_aem_commons_source}" ${acs_aem_commons_group} ${acs_aem_commons_name} ${acs_aem_commons_version} ${acs_aem_commons_replicate} ${acs_aem_commons_activate} ${acs_aem_commons_force}
translate_exit_code "$?"
echo "."
echo "."
fi
read -p "press enter to stop Author & Publish instance."
echo "."
echo "."
stop_aem_author
echo "."
echo "."
java_run=true
wait_author_stopped
echo "."
echo "."
stop_aem_publish
echo "."
echo "."
java_run=true
wait_publish_stopped
echo "."
echo "."
echo "Post-upgrade steps are finished"
read -p "press enter to continue"
fi
if [ $enable_offline_snapshot == 'True' ] || [ $enable_offline_snapshot == 'true' ] ; then
read -p "Please press enter to run offline snapshot."
echo "."
echo "."
echo "Run offline snapshot"
${aemtools_workdir}/offline-snapshot-backup.sh
translate_exit_code "$?"
echo "Offline snapshot done."
fi
echo "."
echo "."
read -p "press enter to exit"
|
shinesolutions/aem-aws-stack-builder
|
scripts/repository-upgrade/aem62-to-aem64/upgrade_consolidated.sh
|
Shell
|
apache-2.0
| 31,020 |
#!/bin/bash
cp ./newlisp /usr/bin/
|
Yrp/mysql2hive
|
builder/armory/install/ubuntu/64bit/setup.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/bash
snap=../bin/snap
snap_opts="--histogram"
files="rtm.pre.raw snapshot-f.108.raw snapshot-f.109.raw"
echo "selecting snap : $snap"
for f in $files
do
prefix=`basename $f .raw`
echo -n "file $f ... "
$snap $snap_opts $f > ${prefix}.log
echo "done"
done
$snap $snap_opts snapshot-f.108.raw snapshot-f.109.raw > snapdiff.log
|
so07/snap
|
test/run_snap.sh
|
Shell
|
apache-2.0
| 353 |
#!/bin/bash
# install jnsaf and nativedroid
source `which virtualenvwrapper.sh`
install_nativedroid()
{
workon nativedroid
cd nativedroid
protoc nativedroid/protobuf/java_signatures.proto --python_out=.
protoc nativedroid/protobuf/taint_result.proto --python_out=.
protoc nativedroid/protobuf/summary.proto --python_out=.
python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. nativedroid/protobuf/jnsaf_grpc.proto
python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. nativedroid/protobuf/nativedroid_grpc.proto
python setup.py install
cd ..
}
install_jnsaf()
{
tools/bin/sbt clean assembly
}
MODE='ALL'
if [[ -n "$1" ]]; then
MODE=$1
fi
if [[ "$MODE" == "ALL" ]]; then
install_jnsaf
install_nativedroid
elif [[ "$MODE" == 'jnsaf' ]]; then
install_jnsaf
else
install_nativedroid
fi
|
arguslab/Argus-SAF
|
tools/scripts/install.sh
|
Shell
|
apache-2.0
| 871 |
#!/bin/bash
source ./image_name.sh
CONTAINER_NAME=registry.ng.bluemix.net/wkulhanek/$IMAGE_NAME
# cf ic run -d -p 80:80 -p 22:22 -v 48in48:/persistent_data $CONTAINER_NAME
cf ic run -d -p 80:80 -p 22:22 $CONTAINER_NAME
|
wkulhanek/48in48
|
run_server_remote.sh
|
Shell
|
apache-2.0
| 221 |
#!/bin/sh
# Copyright 2012 Mohammed Alrokayan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# We run sync to have prober OpenStack slave image
sync
|
alrokayan/hadoop-openstack-centos
|
06-conf-allVMs/05-sync.sh
|
Shell
|
apache-2.0
| 650 |
echo -e "\033[1;33m\033[40m \033[0m"
echo -e "\033[1;33m\033[40m Installing the Atom text editor \033[0m"
echo -e "\033[1;33m\033[40m \033[0m"
wget -O ~/Downloads/atom-amd64.deb https://atom.io/download/deb
sudo dpkg -i ~/Downloads/atom-amd64.deb
apm install atomatigit
# apm install language-diff
apm install language-matlab
apm install language-r
apm install merge-conflicts
apm install split-diff
apm install atom-alignment
apm upgrade --no-confirm
|
erikjandevries/ConfigLM17ML
|
prep-atom.sh
|
Shell
|
apache-2.0
| 546 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip2.7
# Update bazel
update_bazel_linux
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=1
export TF_CUDA_VERSION=10.1
export TF_CUDNN_VERSION=7
export TF_NEED_TENSORRT=1
export TENSORRT_INSTALL_PATH=/usr/local/tensorrt
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python2.7)
export TF2_BEHAVIOR=1
export PROJECT_NAME="tensorflow_gpu"
export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib"
export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh
tag_filters="gpu,requires-gpu,-no_gpu,-nogpu,-no_oss,-oss_serial,-no_oss_py2"
bazel test --config=cuda --config=opt \
--crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \
--linkopt=-lrt \
--action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \
--test_lang_filters=py \
--build_tag_filters="${tag_filters}" \
--test_tag_filters="${tag_filters}" \
--test_timeout="300,450,1200,3600" --local_test_jobs=4 \
--test_output=errors --verbose_failures=true --keep_going \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
-- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/...
|
ppwwyyxx/tensorflow
|
tensorflow/tools/ci_build/release/ubuntu_16/gpu_py2_full/nonpip.sh
|
Shell
|
apache-2.0
| 2,195 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 6+ Debian 7+ and Ubuntu 12+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
Install_PHP55() {
pushd ${oneinstack_dir}/src > /dev/null
if [ -e "${apache_install_dir}/bin/httpd" ];then
[ "$(${apache_install_dir}/bin/httpd -v | awk -F'.' /version/'{print $2}')" == '4' ] && Apache_main_ver=24
[ "$(${apache_install_dir}/bin/httpd -v | awk -F'.' /version/'{print $2}')" == '2' ] && Apache_main_ver=22
fi
if [ ! -e "/usr/local/lib/libiconv.la" ]; then
tar xzf libiconv-${libiconv_ver}.tar.gz
patch -d libiconv-${libiconv_ver} -p0 < libiconv-glibc-2.16.patch
pushd libiconv-${libiconv_ver} > /dev/null
./configure --prefix=/usr/local
make -j ${THREAD} && make install
popd > /dev/null
rm -rf libiconv-${libiconv_ver}
fi
if [ ! -e "${curl_install_dir}/lib/libcurl.la" ]; then
tar xzf curl-${curl_ver}.tar.gz
pushd curl-${curl_ver} > /dev/null
[ "${Debian_ver}" == '8' ] && apt-get -y remove zlib1g-dev
./configure --prefix=${curl_install_dir} --with-ssl=${openssl_install_dir}
make -j ${THREAD} && make install
[ "${Debian_ver}" == '8' ] && apt-get -y install libc-client2007e-dev libfreetype6-dev libglib2.0-dev libpng12-dev libssl-dev libzip-dev zlib1g-dev
popd > /dev/null
rm -rf curl-${curl_ver}
fi
if [ ! -e "/usr/local/lib/libmcrypt.la" ]; then
tar xzf libmcrypt-${libmcrypt_ver}.tar.gz
pushd libmcrypt-${libmcrypt_ver} > /dev/null
./configure
make -j ${THREAD} && make install
ldconfig
pushd libltdl > /dev/null
./configure --enable-ltdl-install
make -j ${THREAD} && make install
popd > /dev/null
popd > /dev/null
rm -rf libmcrypt-${libmcrypt_ver}
fi
if [ ! -e "/usr/local/lib/libmhash.la" ]; then
tar xzf mhash-${mhash_ver}.tar.gz
pushd mhash-${mhash_ver} > /dev/null
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf mhash-${mhash_ver}
fi
[ -z "`grep /usr/local/lib /etc/ld.so.conf.d/*.conf`" ] && echo '/usr/local/lib' > /etc/ld.so.conf.d/local.conf
ldconfig
if [ "${PM}" == 'yum' ]; then
ln -s /usr/local/bin/libmcrypt-config /usr/bin/libmcrypt-config
if [ "${OS_BIT}" == '64' ]; then
ln -s /lib64/libpcre.so.0.0.1 /lib64/libpcre.so.1
ln -s /usr/lib64/libc-client.so /usr/lib/libc-client.so
else
ln -s /lib/libpcre.so.0.0.1 /lib/libpcre.so.1
fi
fi
tar xzf mcrypt-${mcrypt_ver}.tar.gz
pushd mcrypt-${mcrypt_ver} > /dev/null
ldconfig
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf mcrypt-${mcrypt_ver}
id -u ${run_user} >/dev/null 2>&1
[ $? -ne 0 ] && useradd -M -s /sbin/nologin ${run_user}
tar xzf php-${php55_ver}.tar.gz
patch -d php-${php55_ver} -p0 < fpm-race-condition.patch
pushd php-${php55_ver} > /dev/null
[ ! -d "${php_install_dir}" ] && mkdir -p ${php_install_dir}
[ "${phpcache_option}" == '1' ] && phpcache_arg='--enable-opcache' || phpcache_arg='--disable-opcache'
if [ "${apache_option}" == '2' ] || [ "${Apache_main_ver}" == '22' ] || [ "${apache_mode_option}" == '2' ]; then
./configure --prefix=${php_install_dir} --with-config-file-path=${php_install_dir}/etc \
--with-config-file-scan-dir=${php_install_dir}/etc/php.d \
--with-apxs2=${apache_install_dir}/bin/apxs ${phpcache_arg} --disable-fileinfo \
--with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd \
--with-iconv-dir=/usr/local --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib \
--with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-exif \
--enable-sysvsem --enable-inline-optimization --with-curl=${curl_install_dir} --enable-mbregex \
--enable-mbstring --with-mcrypt --with-gd --enable-gd-native-ttf --with-openssl=${openssl_install_dir} \
--with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --enable-ftp --enable-intl --with-xsl \
--with-gettext --enable-zip --enable-soap --disable-debug $php_modules_options
else
./configure --prefix=${php_install_dir} --with-config-file-path=${php_install_dir}/etc \
--with-config-file-scan-dir=${php_install_dir}/etc/php.d \
--with-fpm-user=${run_user} --with-fpm-group=${run_user} --enable-fpm ${phpcache_arg} --disable-fileinfo \
--with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd \
--with-iconv-dir=/usr/local --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib \
--with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-exif \
--enable-sysvsem --enable-inline-optimization --with-curl=${curl_install_dir} --enable-mbregex \
--enable-mbstring --with-mcrypt --with-gd --enable-gd-native-ttf --with-openssl=${openssl_install_dir} \
--with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --enable-ftp --enable-intl --with-xsl \
--with-gettext --enable-zip --enable-soap --disable-debug $php_modules_options
fi
make ZEND_EXTRA_LIBS='-liconv' -j ${THREAD}
make install
if [ -e "${php_install_dir}/bin/phpize" ]; then
echo "${CSUCCESS}PHP installed successfully! ${CEND}"
else
rm -rf ${php_install_dir}
echo "${CFAILURE}PHP install failed, Please Contact the author! ${CEND}"
kill -9 $$
fi
[ -z "`grep ^'export PATH=' /etc/profile`" ] && echo "export PATH=${php_install_dir}/bin:\$PATH" >> /etc/profile
[ -n "`grep ^'export PATH=' /etc/profile`" -a -z "`grep ${php_install_dir} /etc/profile`" ] && sed -i "s@^export PATH=\(.*\)@export PATH=${php_install_dir}/bin:\1@" /etc/profile
. /etc/profile
# wget -c http://pear.php.net/go-pear.phar
# ${php_install_dir}/bin/php go-pear.phar
[ ! -e "${php_install_dir}/etc/php.d" ] && mkdir -p ${php_install_dir}/etc/php.d
/bin/cp php.ini-production ${php_install_dir}/etc/php.ini
sed -i "s@^memory_limit.*@memory_limit = ${Memory_limit}M@" ${php_install_dir}/etc/php.ini
sed -i 's@^output_buffering =@output_buffering = On\noutput_buffering =@' ${php_install_dir}/etc/php.ini
#sed -i 's@^;cgi.fix_pathinfo.*@cgi.fix_pathinfo=0@' ${php_install_dir}/etc/php.ini
sed -i 's@^short_open_tag = Off@short_open_tag = On@' ${php_install_dir}/etc/php.ini
sed -i 's@^expose_php = On@expose_php = Off@' ${php_install_dir}/etc/php.ini
sed -i 's@^request_order.*@request_order = "CGP"@' ${php_install_dir}/etc/php.ini
sed -i "s@^;date.timezone.*@date.timezone = ${timezone}@" ${php_install_dir}/etc/php.ini
sed -i 's@^post_max_size.*@post_max_size = 100M@' ${php_install_dir}/etc/php.ini
sed -i 's@^upload_max_filesize.*@upload_max_filesize = 50M@' ${php_install_dir}/etc/php.ini
sed -i 's@^max_execution_time.*@max_execution_time = 5@' ${php_install_dir}/etc/php.ini
sed -i 's@^disable_functions.*@disable_functions = passthru,exec,system,chroot,chgrp,chown,shell_exec,proc_open,proc_get_status,ini_alter,ini_restore,dl,openlog,syslog,readlink,symlink,popepassthru,stream_socket_server,fsocket,popen@' ${php_install_dir}/etc/php.ini
[ -e /usr/sbin/sendmail ] && sed -i 's@^;sendmail_path.*@sendmail_path = /usr/sbin/sendmail -t -i@' ${php_install_dir}/etc/php.ini
[ "${phpcache_option}" == '1' ] && cat > ${php_install_dir}/etc/php.d/02-opcache.ini << EOF
[opcache]
zend_extension=opcache.so
opcache.enable=1
opcache.memory_consumption=${Memory_limit}
opcache.interned_strings_buffer=8
opcache.max_accelerated_files=4000
opcache.revalidate_freq=60
;opcache.save_comments=0
opcache.fast_shutdown=1
opcache.enable_cli=1
;opcache.optimization_level=0
EOF
if [ ! -e "${apache_install_dir}/bin/apxs" -o "${Apache_main_ver}" == '24' ] && [ "${apache_mode_option}" != '2' ]; then
# php-fpm Init Script
if [ -e /bin/systemctl ]; then
/bin/cp ${oneinstack_dir}/init.d/php-fpm.service /lib/systemd/system/
sed -i "s@/usr/local/php@${php_install_dir}@g" /lib/systemd/system/php-fpm.service
systemctl enable php-fpm
else
/bin/cp sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
chmod +x /etc/init.d/php-fpm
[ "${PM}" == 'yum' ] && { chkconfig --add php-fpm; chkconfig php-fpm on; }
[ "${PM}" == 'apt-get' ] && update-rc.d php-fpm defaults
fi
cat > ${php_install_dir}/etc/php-fpm.conf <<EOF
;;;;;;;;;;;;;;;;;;;;;
; FPM Configuration ;
;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;
; Global Options ;
;;;;;;;;;;;;;;;;;;
[global]
pid = run/php-fpm.pid
error_log = log/php-fpm.log
log_level = warning
emergency_restart_threshold = 30
emergency_restart_interval = 60s
process_control_timeout = 5s
daemonize = yes
;;;;;;;;;;;;;;;;;;;;
; Pool Definitions ;
;;;;;;;;;;;;;;;;;;;;
[${run_user}]
listen = /dev/shm/php-cgi.sock
listen.backlog = -1
listen.allowed_clients = 127.0.0.1
listen.owner = ${run_user}
listen.group = ${run_user}
listen.mode = 0666
user = ${run_user}
group = ${run_user}
pm = dynamic
pm.max_children = 12
pm.start_servers = 8
pm.min_spare_servers = 6
pm.max_spare_servers = 12
pm.max_requests = 2048
pm.process_idle_timeout = 10s
request_terminate_timeout = 120
request_slowlog_timeout = 0
pm.status_path = /php-fpm_status
slowlog = var/log/slow.log
rlimit_files = 51200
rlimit_core = 0
catch_workers_output = yes
;env[HOSTNAME] = $HOSTNAME
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /tmp
env[TMPDIR] = /tmp
env[TEMP] = /tmp
EOF
if [ $Mem -le 3000 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = $(($Mem/3/20))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = $(($Mem/3/30))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = $(($Mem/3/40))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = $(($Mem/3/20))@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 3000 -a $Mem -le 4500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 30@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 20@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 4500 -a $Mem -le 6500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 60@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 40@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 30@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 60@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 6500 -a $Mem -le 8500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 70@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 40@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 70@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 8500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 80@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 60@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 80@" ${php_install_dir}/etc/php-fpm.conf
fi
service php-fpm start
elif [ "${apache_option}" == '2' ] || [ "${Apache_main_ver}" == '22' ] || [ "${apache_mode_option}" == '2' ]; then
service httpd restart
fi
popd > /dev/null
[ -e "${php_install_dir}/bin/phpize" ] && rm -rf php-${php55_ver}
popd > /dev/null
}
|
lj2007331/lnmp
|
include/php-5.5.sh
|
Shell
|
apache-2.0
| 11,946 |
#!/bin/bash
Install_MySQL57() {
pushd ${asn_dir}/src
id -u mysql >/dev/null 2>&1
[ $? -ne 0 ] && useradd -M -s /sbin/nologin mysql
[ ! -d "${mysql_install_dir}" ] && mkdir -p ${mysql_install_dir}
mkdir -p ${mysql_data_dir};chown mysql.mysql -R ${mysql_data_dir}
if [ "${dbInstallMethods}" == "1" ]; then
tar xvf mysql-${mysql57_version}-linux-glibc2.5-${SYS_BIT_b}.tar.gz
mv mysql-${mysql57_version}-linux-glibc2.5-${SYS_BIT_b}/* ${mysql_install_dir}
sed -i 's@executing mysqld_safe@executing mysqld_safe\nexport LD_PRELOAD=/usr/local/lib/libjemalloc.so@' ${mysql_install_dir}/bin/mysqld_safe
sed -i "s@/usr/local/mysql@${mysql_install_dir}@g" ${mysql_install_dir}/bin/mysqld_safe
elif [ "${dbInstallMethods}" == "2" ]; then
tar xvf mysql-${mysql57_version}.tar.gz
pushd mysql-${mysql57_version}
cmake . -DCMAKE_INSTALL_PREFIX=${mysql_install_dir} \
-DMYSQL_DATADIR=${mysql_data_dir} \
-DSYSCONFDIR=/etc \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_EMBEDDED_SERVER=1 \
-DENABLE_DTRACE=0 \
-DENABLED_LOCAL_INFILE=1 \
-DDEFAULT_CHARSET=utf8mb4 \
-DDEFAULT_COLLATION=utf8mb4_general_ci \
-DEXTRA_CHARSETS=all \
-DCMAKE_EXE_LINKER_FLAGS='-ljemalloc'
make -j ${THREAD}
make install
popd
fi
if [ -d "${mysql_install_dir}/support-files" ]; then
echo "${CSUCCESS}MySQL installed successfully! ${CEND}"
if [ "${dbInstallMethods}" == "1" ]; then
rm -rf mysql-${mysql57_version}-*-${SYS_BIT_b}
elif [ "${dbInstallMethods}" == "2" ]; then
rm -rf mysql-${mysql57_version}
fi
else
rm -rf ${mysql_install_dir}
rm -rf mysql-${mysql57_version}
echo "${CFAILURE}MySQL install failed, Please contact the author! ${CEND}"
kill -9 $$
fi
/bin/cp ${mysql_install_dir}/support-files/mysql.server /etc/init.d/mysqld
sed -i "s@^basedir=.*@basedir=${mysql_install_dir}@" /etc/init.d/mysqld
sed -i "s@^datadir=.*@datadir=${mysql_data_dir}@" /etc/init.d/mysqld
chmod +x /etc/init.d/mysqld
[ "${OS}" == "CentOS" ] && { chkconfig --add mysqld; chkconfig mysqld on; }
[[ "${OS}" =~ ^Ubuntu$|^Debian$ ]] && update-rc.d mysqld defaults
popd
# my.cnf
cat > /etc/my.cnf << EOF
[client]
port = 3306
socket = /tmp/mysql.sock
default-character-set = utf8mb4
[mysql]
prompt="MySQL [\\d]> "
no-auto-rehash
[mysqld]
port = 3306
socket = /tmp/mysql.sock
basedir = ${mysql_install_dir}
datadir = ${mysql_data_dir}
pid-file = ${mysql_data_dir}/mysql.pid
user = mysql
bind-address = 0.0.0.0
server-id = 1
init-connect = 'SET NAMES utf8mb4'
character-set-server = utf8mb4
skip-name-resolve
#skip-networking
back_log = 300
max_connections = 1000
max_connect_errors = 6000
open_files_limit = 65535
table_open_cache = 128
max_allowed_packet = 500M
binlog_cache_size = 1M
max_heap_table_size = 8M
tmp_table_size = 16M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
sort_buffer_size = 8M
join_buffer_size = 8M
key_buffer_size = 4M
thread_cache_size = 8
query_cache_type = 1
query_cache_size = 8M
query_cache_limit = 2M
ft_min_word_len = 4
log_bin = mysql-bin
binlog_format = mixed
expire_logs_days = 7
log_error = ${mysql_data_dir}/mysql-error.log
slow_query_log = 1
long_query_time = 1
slow_query_log_file = ${mysql_data_dir}/mysql-slow.log
performance_schema = 0
explicit_defaults_for_timestamp
#lower_case_table_names = 1
skip-external-locking
#default_storage_engine = InnoDB
default-storage-engine = MyISAM
innodb_file_per_table = 1
innodb_open_files = 500
innodb_buffer_pool_size = 64M
innodb_write_io_threads = 4
innodb_read_io_threads = 4
innodb_thread_concurrency = 0
innodb_purge_threads = 1
innodb_flush_log_at_trx_commit = 2
innodb_log_buffer_size = 2M
innodb_log_file_size = 32M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 90
innodb_lock_wait_timeout = 120
bulk_insert_buffer_size = 8M
myisam_sort_buffer_size = 8M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
interactive_timeout = 28800
wait_timeout = 28800
[mysqldump]
quick
max_allowed_packet = 500M
[myisamchk]
key_buffer_size = 8M
sort_buffer_size = 8M
read_buffer = 4M
write_buffer = 4M
EOF
sed -i "s@max_connections.*@max_connections = $((${Mem}/3))@" /etc/my.cnf
if [ ${Mem} -gt 1500 -a ${Mem} -le 2500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 16@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 16M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 128M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 32M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 256@' /etc/my.cnf
elif [ ${Mem} -gt 2500 -a ${Mem} -le 3500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 32@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 32M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 32M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 512M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 64M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 512@' /etc/my.cnf
elif [ ${Mem} -gt 3500 ]; then
sed -i 's@^thread_cache_size.*@thread_cache_size = 64@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 64M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 256M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 1024M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 128M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 1024@' /etc/my.cnf
fi
${mysql_install_dir}/bin/mysqld --initialize-insecure --user=mysql --basedir=${mysql_install_dir} --datadir=${mysql_data_dir}
chown mysql.mysql -R ${mysql_data_dir}
[ -d "/etc/mysql" ] && /bin/mv /etc/mysql{,_bk}
service mysqld start
[ -z "$(grep ^'export PATH=' /etc/profile)" ] && echo "export PATH=${mysql_install_dir}/bin:\$PATH" >> /etc/profile
[ -n "$(grep ^'export PATH=' /etc/profile)" -a -z "$(grep ${mysql_install_dir} /etc/profile)" ] && sed -i "s@^export PATH=\(.*\)@export PATH=${mysql_install_dir}/bin:\1@" /etc/profile
. /etc/profile
${mysql_install_dir}/bin/mysql -e "grant all privileges on *.* to root@'127.0.0.1' identified by \"${dbrootpwd}\" with grant option;"
${mysql_install_dir}/bin/mysql -e "grant all privileges on *.* to root@'localhost' identified by \"${dbrootpwd}\" with grant option;"
${mysql_install_dir}/bin/mysql -uroot -p${dbrootpwd} -e "reset master;"
rm -rf /etc/ld.so.conf.d/{mysql,mariadb,percona,alisql}*.conf
[ -e "${mysql_install_dir}/my.cnf" ] && rm -rf ${mysql_install_dir}/my.cnf
echo "${mysql_install_dir}/lib" > /etc/ld.so.conf.d/mysql.conf
ldconfig
service mysqld stop
}
|
asntechsolution/lamp-lemp
|
include/mysql-5.7.sh
|
Shell
|
apache-2.0
| 7,221 |
#!/bin/bash
#
# __mkaimal__
checktmux()
{
tmux list-session
if [ $? -eq 0 ]
then
tsession_name=$(tmux list-session | awk 'NR==1{print $1}'| cut -d: -f1)
else
tsession_name=tmulti
tmux new-session -d -s tmulti
fi
}
startmux()
{
checktmux
if [ $@ -eq 0 ]
then
echo ' usage()'
else
tmux attach-session -t $tsession_name
for h in $#
do
tmux split-window -h " ssh $h"
tmux select-layout tiled > /dev/null
done
tmux select-page -t 0
tmux set-window-option synchronize-panes on > /dev/null
fi
}
startmux
|
mkaimal/shell
|
bin/tmulti.sh
|
Shell
|
apache-2.0
| 551 |
#!/bin/bash
set -e
SCRIPT=$(readlink -f "$0")
SCRIPT_DIR=$(dirname "$SCRIPT")
ROOT_DIR=$(dirname "$SCRIPT_DIR")
cd $SCRIPT_DIR
cd ..
# We need to spawn and kill process and need to be root for this to work.
sudo -E su <<EOF
export GOOGLE_APPLICATION_CREDENTIALS="$KOKORO_KEYSTORE_DIR/73609_cloud-sharp-jenkins-compute-service-account"
git submodule init
git submodule update
./build-deps.sh
export LD_LIBRARY_PATH=$ROOT_DIR/third_party/coreclr/bin/Product/Linux.x64.Debug
sudo ldconfig
./build.sh
./run_integration_tests.sh
EOF
|
GoogleCloudPlatform/google-cloud-dotnet-debugger
|
.kokoro/run_integration_tests.sh
|
Shell
|
apache-2.0
| 536 |
#!/bin/bash
set -e
build_and_push() {
local slug="$1"
echo "* building $slug..."
docker build --pull --no-cache -t "$slug" .
echo "* pushing $slug..."
docker push "$slug"
}
main() {
local tag slug yn
tag="$(jq -r '.version' ./package.json)"
slug="terascope/teraslice-base:v$tag"
while true; do
read -p "Do you want to build and push \"$slug\"? " -r yn
case $yn in
[Yy]* ) build_and_push "$slug"; break;;
[Nn]* ) echo; echo "OK: Bump the package.json version in this directory"; exit;;
* ) echo "Please answer yes or no.";;
esac
done
}
main "$@"
|
jsnoble/teraslice
|
docker-base/build-and-push.sh
|
Shell
|
apache-2.0
| 651 |
#!/bin/bash
usage()
{
cat << EOF
usage: $CALL_SIGNATURE <options>
The script normalizes all files under a certain directory.
OPTIONS:
[-h]
this message
-t <target directory>
target directory
[-n]
add MOBBL namespace to output
EOF
}
TARGETDIR=
ADD_NS=
while getopts "ht:n" OPTION
do
case "$OPTION" in
h)
usage
exit 1
;;
n)
ADD_NS_ARG="-n"
;;
t)
TARGETDIR="$OPTARG"
;;
?)
usage
exit
;;
esac
done
if [ -z "$TARGETDIR" ]
then
usage
exit
fi
# look for all XML files in the TARGETDIR. Strip empty namespace declarations,
# add MOBBL namespace declarations if requested.
for FILE_PATH in `find "$TARGETDIR" -name '*.xml'`
do
mcutils normalize-config $ADD_NS_ARG -i "$FILE_PATH"
done
|
sven-m/thesis-specification
|
normalize-dir.sh
|
Shell
|
apache-2.0
| 798 |
#!/bin/bash -eux
set -e
cd $(dirname $0)
. ../../commands/common.sh
. ./do_common.sh
function filter() {
while read -r line
do
mc=$(echo $line | cut -d' ' -f1)
if echo $mc | grep "$(mc_name)$" &> /dev/null && echo $mc | grep -v 'template-' &> /dev/null
then
echo "$line"
fi
done
}
function convert() {
while read -r line
do
mc=$(echo $line | cut -d' ' -f1)
ip=$(echo $line | cut -d':' -f2 | cut -d',' -f1 | tr -d ' ')
while cfcli removerecord $mc | grep -v "Unable"
do
echo "Cleaning up $mc records"
done
cfcli addrecord -t A $mc $ip || cfcli editrecord -t A $mc $ip
done
}
$tugboat droplets | filter | convert
|
neilellis/easydeploy
|
providers/do/do_to_cf.sh
|
Shell
|
apache-2.0
| 695 |
TARGET_KEY="#network.host: localhost"
REPLACEMENT_VALUE="network.host: localhost"
CONFIG_FILE=/etc/elasticsearch/elasticsearch.yml
wget -O - http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -
echo 'deb http://packages.elasticsearch.org/elasticsearch/1.4/debian stable main' | sudo tee /etc/apt/sources.list.d/elasticsearch.list
sudo apt-get update
sudo apt-get install python-numpy #Requirements fail if I do not do this so..
sudo apt-get -y install elasticsearch=1.4.4
sed -c -i "s/\($TARGET_KEY *= *\).*/\1$REPLACEMENT_VALUE/" $CONFIG_FILE
sudo service elasticsearch restart
sudo update-rc.d elasticsearch defaults 95 10
|
Upande/MaMaSe
|
util/elastic.sh
|
Shell
|
apache-2.0
| 650 |
#!/bin/bash
#
# Deploy a jar, source jar, and javadoc jar to Sonatype's snapshot repo.
#
# Adapted from https://coderwall.com/p/9b_lfq and
# http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/
SLUG="stephanenicolas/toothpick"
JDK="oraclejdk8"
BRANCH="master"
set -e
if [ "$TRAVIS_REPO_SLUG" != "$SLUG" ]; then
echo "Skipping snapshot deployment: wrong repository. Expected '$SLUG' but was '$TRAVIS_REPO_SLUG'."
elif [ "$TRAVIS_JDK_VERSION" != "$JDK" ]; then
echo "Skipping snapshot deployment: wrong JDK. Expected '$JDK' but was '$TRAVIS_JDK_VERSION'."
elif [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Skipping snapshot deployment: was pull request."
elif [ "$TRAVIS_BRANCH" != "$BRANCH" ]; then
echo "Skipping snapshot deployment: wrong branch. Expected '$BRANCH' but was '$TRAVIS_BRANCH'."
else
echo "Deploying snapshot..."
./gradlew uploadArchives
echo "Snapshot deployed!"
fi
|
stephanenicolas/toothpick
|
.buildscript/deploy_snapshot.sh
|
Shell
|
apache-2.0
| 941 |
#!/usr/bin/env bash
# Update repository to fetch latest OpenJDK
sudo add-apt-repository -y ppa:openjdk-r/ppa
sudo apt-get -y update
# Install required packages
sudo apt-get -y install openjdk-8-jdk maven
# Build the JobManager application
cd /vagrant/jobmanager
mvn clean package
# Updating hosts
echo "192.168.23.24 jobdb.dev" >> /etc/hosts
echo "192.168.33.12 kafka.dev" >> /etc/hosts
# Add an Upstart job to run our script upon machine boot
chmod 777 /vagrant/jobmanager/config/spring-start.sh
cp /vagrant/jobmanager/config/jobmanager.conf /etc/init/jobmanager.conf
# Run the JobManager application
cd /vagrant/jobmanager/config
./spring-start.sh
|
venicegeo/pz-jobmanager
|
config/jm-bootstrap.sh
|
Shell
|
apache-2.0
| 657 |
#!/bin/bash -x
oc process --local -o yaml --insecure-skip-tls-verify \
-n ${NAMESPACE} \
-f "${ADDITIONAL_YAML_PATH}" \
-p SAFE_BRANCH="${SAFE_BRANCH}" \
-p SAFE_PROJECT="${SAFE_PROJECT}" \
-p BRANCH="${BRANCH}" \
-p PROJECT="${PROJECT}" \
-p LAGOON_GIT_SHA="${LAGOON_GIT_SHA}" \
-p NAMESPACE=${NAMESPACE} \
| oc ${ADDITIONAL_YAML_COMMAND} --insecure-skip-tls-verify -n ${NAMESPACE} -f - || ${ADDITIONAL_YAML_IGNORE_ERROR}
|
amazeeio/lagoon
|
images/kubectl-build-deploy-dind/scripts/exec-additional-yaml.sh
|
Shell
|
apache-2.0
| 442 |
#!/bin/bash
for i in $(tugboat droplets | grep node | awk '{print $NF}' | sed 's/)//g'); do
echo tugboat destroy -c -i $i
done
|
cattleio/stampede
|
docs/do-demo/delete-all.sh
|
Shell
|
apache-2.0
| 132 |
# -*- bash -*-
# gshuf () {
# shuf "$@"
# }
# Uses a jq regex to select node/nodes
# function aal () {
# match="$1"
# # list instances for ssh
# jq --version >/dev/null 2>&1
# RV=$?
# if [ "$RV" != "0" ] ; then
# echo "The jq tool isn't present" 1>&2
# return 2
# fi
# raws "$match" ec2 describe-instances | jq "[.Reservations[].Instances | select(.[].Tags | length > 0) | select(.[].Tags[].Value | test(\"$match\") ) | select(.[].Tags[].Key == \"Name\") | {InstanceId: .[].InstanceId, PrivateIpAddress: .[].PrivateIpAddress, State: .[].State, LaunchTime: .[].LaunchTime, Tags: .[].Tags, AvailabilityZone: .[].Placement.AvailabilityZone, ImageId: .[].ImageId }]"
# }
function aaname () {
match="$1"
# Get the instance names from their "Name" tag
jq --version >/dev/null 2>&1
RV=$?
if [ "$RV" != "0" ] ; then
echo "The jq tool isn't present" 1>&2
return 2
fi
aal "$match" | jq -S '.[].Tags[] | select(.Key == "Name") |.Value'
}
function aag () {
target=$1
host=$(aal "$target" | jq -r ".[] | select(.Tags[].Key == \"Name\") | select (.Tags[].Value == \"$target\" ) | select (.State.Name == \"Running\") | .PrivateIpAddress")
# nonstrictssh -i $AMAZON_SSH_KEY_FILE -l ubuntu $host
nonstrictssh -l ubuntu $host
}
# function ashuf () {
# match=$1
# shift
# gshuf --version 2>&1 > /dev/null
# RV=$?
# if [ "$RV" != "0" ] ; then
# echo "The gshuf from gnu coreutils isn't present or active $(which gshuf)" 1>&2
# return 2
# fi
# host=$(aal "$match" | jq -r ".[] | select(.State.Name == \"running\") | select(.Tags[].Key == \"Name\") | select (.Tags[].Value | test(\"$match\") ) | .PrivateIpAddress" | gshuf -n 1)
# # nonstrictssh -i $AMAZON_SSH_KEY_FILE -l ubuntu $host "$@"
# nonstrictssh -l ubuntu $host "$@"
# }
function nonstrictssh () {
if [ "$AWSAM_ACTIVE_ACCOUNT" == "prod" ] ; then
AMAZON_SSH_KEY_FILE=~/.ssh/portal-keypair.pem
elif [ "$AWSAM_ACTIVE_ACCOUNT" == "staging" ] ; then
AMAZON_SSH_KEY_FILE=~/.ssh/portal-dev-keypair
# else
# echo "\$AMAZON_SSH_KEY_FILE isn't set" 1>&2
# return 2
fi
echo "$@"
# ssh -i $AMAZON_SSH_KEY_FILE \
# -o StrictHostKeyChecking=no \
# -o UserKnownHostsFile=/dev/null \
# "$@"
ssh -o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
"$@"
}
function raws () {
match="$1"
shift 1
aws --version 2> /dev/null
RV=$?
if [ $RV != 0 ] ; then
echo "The aws tool isn't present or active $(which aws)" 1>&2
return 2
fi
echo "$match" | grep -q dogfood
RV=$?
if [ $RV -eq 0 ] ; then
aws --region us-west-2 "$@"
else
aws "$@"
fi
}
# declare -x -f is a bash-ism
# declare -x -f gshuf # don't need this anymore, not on a mac
# declare -x -f aal
# declare -x -f aaname
# declare -x -f aag
# declare -x -f ashuf
declare -x -f nonstrictssh
declare -x -f raws
|
pcn/startup
|
linux/.bash.d/40_ssh_aliases.sh
|
Shell
|
apache-2.0
| 3,041 |
#!/bin/bash
# Contact Catalogues-DB
status_code=$(curl -s -o /dev/null -w "%{http_code}" http://sp.int3.sonata-nfv.eu:27017/)
if [[ $status_code != 20* ]] ;
then
echo "Error: Response error $status_code"
exit -1
fi
echo "Success: Catalogues-DB found"
# Contact Gatekeeper
status_code=$(curl -s -o /dev/null -w "%{http_code}" http://sp.int3.sonata-nfv.eu:32001/api)
if [[ $status_code != 20* ]] ;
then
echo "Error: Response error $status_code"
exit -1
fi
echo "Success: Gatekeeper found"
# Integration user checks
demo_reg_data() {
cat << EOF
{"username":"jenkins","password":"1234","user_type":"developer","email":"[email protected]"}
EOF
}
# echo "$(demo_reg_data)"
printf "\n\n======== POST Jenkins User (integration test user) Registration form to Gatekeeper ==\n\n\n"
resp=$(curl -qSfsw '\n%{http_code}' -H "Content-Type: application/json" \
-d "$(demo_reg_data)" \
-X POST http://sp.int3.sonata-nfv.eu:32001/api/v2/users)
echo $resp
username=$(echo $resp | grep "username")
code=$(echo "$resp" | tail -n1)
echo "Code: $code"
if [[ $code != 201 ]] ;
then
echo "Response $code"
fi
|
sonata-nfv/son-tests
|
int-gtkpkg-sp-catalogue/tests.sh
|
Shell
|
apache-2.0
| 1,126 |
#!/usr/bin/env bash
set -e
find . -name '*.pyc' -delete
find . -name "*.js" -not \( \
-path "./static/*" -o \
-path "*/bower_components/*" -o \
-path "./node_modules/*" -o \
-path "*/jquery.cookie.js" \) -print0 | xargs -0 jshint
flake8 .
coverage erase
coverage run manage.py test --keepdb "$@"
coverage report -m --fail-under 80
|
ejucovy/django-opendebates
|
run_tests.sh
|
Shell
|
apache-2.0
| 350 |
#!/bin/bash
set -ex
yum makecache
# Append hosts if they aren't already in there
if ! [[ $(< /etc/hosts) == *"$1"* ]]; then
echo "$1" >> /etc/hosts
fi
# enable EPEL and get sshpass if it's not already installed
if ! sshpass; then
if ! yum list installed epel-release > /dev/null; then
curl -O 'http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm'
rpm -ivh epel-release-7-5.noarch.rpm
fi
yum install -y --enablerepo=epel sshpass
fi
# Install required packages if they aren't already present
for pkg in gcc python-virtualenv libselinux-python; do
yum list installed "$pkg" > /dev/null || yum install -y "$pkg"
done
pip --version > /dev/null || easy_install pip
pip install -r /vagrant/requirements.txt
cd /vagrant
chown -R vagrant:vagrant /vagrant
# security.yml and ssl/ are stored in a directory that is preserved across
# reboots/reloads/rsyncs
semi_permanent=/security-backup
mkdir -p "$semi_permanent"
if [ ! -f security.yml ] || [ ! -d ssl/ ]; then
# If there are backups, restore them here
if [ -f $semi_permanent/security.yml ] && [ -d $semi_permanent/ssl/ ]; then
cp $semi_permanent/security.yml .
cp -a $semi_permanent/ssl .
else
# Otherwise, create new ones and back them up
mkdir -p ssl/ # avoid an error in security-setup
./security-setup --enable=false
chown -R vagrant:vagrant "$PWD"
cp security.yml $semi_permanent
cp -a ssl $semi_permanent
fi
fi
|
ContainerSolutions/microservices-infrastructure
|
vagrant/provision.sh
|
Shell
|
apache-2.0
| 1,456 |
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test runfiles creation
#
# Load test environment
source $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/test-setup.sh \
|| { echo "test-setup.sh not found!" >&2; exit 1; }
# Make sure runfiles are created under a custom-named subdirectory when
# workspace() is specified in the WORKSPACE file.
function test_runfiles() {
name=blorp_malorp
cat > WORKSPACE <<EOF
workspace(name = "$name")
EOF
mkdir foo
cat > foo/BUILD <<EOF
java_test(
name = "foo",
srcs = ["Noise.java"],
main_class = "Noise",
)
EOF
cat > foo/Noise.java <<EOF
public class Noise {
public static void main(String[] args) {
System.err.println(System.getenv("I'm a test."));
}
}
EOF
bazel build //foo:foo >& $TEST_log || fail "Build failed"
[[ -d bazel-bin/foo/foo.runfiles/$name ]] || fail "$name runfiles directory not created"
[[ -d bazel-bin/foo/foo.runfiles/$name/foo ]] || fail "No foo subdirectory under $name"
[[ -x bazel-bin/foo/foo.runfiles/$name/foo/foo ]] || fail "No foo executable under $name"
}
run_suite "runfiles tests"
|
rohitsaboo/bazel
|
src/test/shell/bazel/runfiles_test.sh
|
Shell
|
apache-2.0
| 1,665 |
#!/bin/bash
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
DIR=`cd $bin/../; pwd`
. "${DIR}/../bin/config.sh"
. "${DIR}/bin/config.sh"
echo "========== preparing ${APP} data =========="
JAR="${DIR}/target/SVMApp-1.0.jar"
CLASS="SVM.src.main.scala.SVMDataGen"
OPTION=" ${APP_MASTER} ${INOUT_SCHEME}${INPUT_HDFS} ${NUM_OF_EXAMPLES} ${NUM_OF_FEATURES} ${NUM_OF_PARTITIONS} "
${RM} -r ${INPUT_HDFS}
#genOpt="large"
# paths check
setup
START_TS=`get_start_ts`;
if [ "$genOpt" = "large" ]; then
tmp_dir=${APP_DIR}/tmp
${RM} -r $tmp_dir
${MKDIR} ${APP_DIR}
${MKDIR} $tmp_dir
#srcf=${DATASET_DIR}/tmp-10k
srcf=${DATASET_DIR}/SVMdata.txt
${CPFROM} $srcf $tmp_dir
for((i=1; i<${DATA_COPIES}; i++)); do
${HADOOP_HOME}/bin/hdfs dfs -appendToFile $srcf ${INPUT_HDFS}/Auto.data 2> /dev/null
done
JAR="${DIR}/target/scala-2.10/svmapp_2.10-1.0.jar"
CLASS="src.main.scala.DocToTFIDF"
OPTION="${tmp_dir} ${INPUT_HDFS} ${NUM_OF_PARTITIONS} "
fi
START_TIME=`timestamp`
echo "${SPARK_HOME}/bin/spark-submit --class $CLASS --master ${APP_MASTER} ${YARN_OPT} ${SPARK_OPT} $JAR ${OPTION} 2>&1|tee ${BENCH_NUM}/SVM_gendata_${START_TS}.dat"
exec ${SPARK_HOME}/bin/spark-submit --class $CLASS --master ${APP_MASTER} ${YARN_OPT} ${SPARK_OPT} $JAR ${OPTION} 2>&1|tee ${BENCH_NUM}/SVM_gendata_${START_TS}.dat
res=$?;
END_TIME=`timestamp`
SIZE=`${DU} -s ${INPUT_HDFS} | awk '{ print $1 }'`
get_config_fields >> ${BENCH_REPORT}
print_config ${APP}-gen ${START_TIME} ${END_TIME} ${SIZE} ${START_TS} ${res}>> ${BENCH_REPORT};
teardown
exit 0
|
ibmsoe/spark-bench
|
SVM/bin/gen_data.sh
|
Shell
|
apache-2.0
| 1,531 |
#!/usr/bin/env bash
set -o nounset -o errexit
export PATH=/usr/sbin:/usr/bin:$PATH
IFNAME=$(ip route ls | grep -e 'default via' | awk '{ print $5 }')
echo $(ip addr show ${IFNAME} | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -1)
|
vitosans/ansible-dcos-aws-playbook
|
community/roles/dcos-bootstrap/files/ip-detect.sh
|
Shell
|
apache-2.0
| 253 |
#!/usr/bin/env bash
# see http://michal.karzynski.pl/blog/2015/04/19/packaging-django-applications-as-docker-container-images/
scripts/wait_for_it.sh db:5432 -- echo "db is up"
python manage.py makemigrations
python manage.py migrate
python manage.py collectstatic --noinput
touch .env
python scripts/setup_script.py
tail -F /tmp/debug.log &
exec gunicorn fit4school.wsgi:application \
--name fit4school \
--bind 0.0.0.0:8000 \
--workers 3 \
--reload \
--reload-extra-file .env \
--reload-extra-file fit4school \
--log-level=info \
"$@"
|
goodes/fit4school
|
start_server.sh
|
Shell
|
apache-2.0
| 572 |
#!/bin/sh
node_modules/mosca/bin/mosca -v --http-static . \
--http-port 8080 \
--parent-port $BROKER_PORT_1883_TCP_PORT \
--parent-host $BROKER_PORT_1883_TCP_ADDR \
| bunyan
|
Sanji-IO/sanji-mqtt-inspector
|
server.sh
|
Shell
|
apache-2.0
| 190 |
#!/bin/bash
# Install MariaDB Galera Cluster
#
# $1 - number of nodes; $2 - cluster name;
#
NNODES=${1-1}
MYSQLPASSWORD=${2:-""}
IPPREFIX=${3:-"10.0.0."}
DEBPASSWORD=${4:-`date +%D%A%B | md5sum| sha256sum | base64| fold -w16| head -n1`}
IPLIST=`echo ""`
MYIP=`ip route get ${IPPREFIX}70 | awk 'NR==1 {print $NF}'`
MYNAME=`echo "Node$MYIP" | sed 's/$IPPREFIX.1/-/'`
CNAME=${4:-"GaleraCluster"}
FIRSTNODE=`echo "${IPPREFIX}$(( $NNODES + 9 ))"`
for (( n=1; n<=$NNODES; n++ ))
do
IPLIST+=`echo "${IPPREFIX}$(( $n + 9 ))"`
if [ "$n" -lt $NNODES ];
then
IPLIST+=`echo ","`
fi
done
cd ~
apt-get update > /dev/null
apt-get install -f -y > /dev/null
# apt-get upgrade -f -y
#apt-get dist-upgrade -f -y
# dpkg --configure --force-confnew -a
apt-get install lsb-release bc > /dev/null
REL=`lsb_release -sc`
DISTRO=`lsb_release -is | tr [:upper:] [:lower:]`
# NCORES=` cat /proc/cpuinfo | grep cores | wc -l`
# WORKER=`bc -l <<< "4*$NCORES"`
apt-get install -y --fix-missing python-software-properties > /dev/null
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db
add-apt-repository "deb http://mirror.edatel.net.co/mariadb/repo/10.1/$DISTRO $REL main"
apt-get update > /dev/null
DEBIAN_FRONTEND=noninteractive apt-get install -y rsync mariadb-server
# Remplace Debian maintenance config file
wget https://raw.githubusercontent.com/juliosene/azure-nginx-php-mariadb-cluster/master/files/debian.cnf > /dev/null
sed -i "s/#PASSWORD#/$DEBPASSWORD/g" debian.cnf
mv debian.cnf /etc/mysql/
mysql -u root <<EOF
GRANT ALL PRIVILEGES on *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '$DEBPASSWORD' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'localhost' = PASSWORD('$MYSQLPASSWORD');
CREATE USER 'root'@'%' IDENTIFIED BY '$MYSQLPASSWORD';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
FLUSH PRIVILEGES;
EOF
# To create another MariaDB root user:
#CREATE USER '$MYSQLUSER'@'localhost' IDENTIFIED BY '$MYSQLUSERPASS';
#GRANT ALL PRIVILEGES ON *.* TO '$MYSQLUSER'@'localhost' WITH GRANT OPTION;
#CREATE USER '$MYSQLUSER'@'%' IDENTIFIED BY '$MYSQLUSERPASS';
#GRANT ALL PRIVILEGES ON *.* TO '$MYSQLUSER'@'%' WITH GRANT OPTION;
service mysql stop
# adjust my.cnf
# sed -i "s/#wsrep_on=ON/wsrep_on=ON/g" /etc/mysql/my.cnf
# create Galera config file
wget https://raw.githubusercontent.com/juliosene/azure-nginx-php-mariadb-cluster/master/files/cluster.cnf > /dev/null
sed -i "s/#wsrep_on=ON/wsrep_on=ON/g;s/IPLIST/$IPLIST/g;s/MYIP/$MYIP/g;s/MYNAME/$MYNAME/g;s/CLUSTERNAME/$CNAME/g" cluster.cnf
mv cluster.cnf /etc/mysql/conf.d/
# Create a raid
wget https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/shared_scripts/ubuntu/vm-disk-utils-0.1.sh
bash vm-disk-utils-0.1.sh -s
mkdir /datadisks/disk1/data
cp -R -p /var/lib/mysql /datadisks/disk1/data/
sed -i "s,/var/lib/mysql,/datadisks/disk1/data/mysql,g" /etc/mysql/my.cnf
# Starts a cluster if is the first node
if [ "$FIRSTNODE" = "$MYIP" ];
then
service mysql start --wsrep-new-cluster > /dev/null
else
service mysql start > /dev/null
fi
# To check cluster use the command below
# mysql -u root -p
# mysql> SELECT VARIABLE_VALUE as "cluster size" FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME="wsrep_cluster_size";
# mysql> EXIT;
#
# To add a new cluster node:
# 1 - stop MariaDB
# service mysql stop
# 2 - start as a new node
# service mysql start --wsrep_cluster_address=gcomm://10.0.0.10
|
juliosene/azure-nginx-php-mariadb-cluster
|
mariadb-galera-install.sh
|
Shell
|
apache-2.0
| 3,453 |
#!/bin/bash
set -e
export PROJECT="kubeflow-ci"
export GCP_ZONE="us-central1-a"
export GCP_USER="$(gcloud config list account --format "value(core.account)" )"
export GCP_PROJECT="$(gcloud config list project --format "value(core.project)" )"
export CLUSTER_NAME="kfctl-arr-${REPO_NAME}-${BUILD_ID}"
export CLUSTER_VERSION="$(gcloud container get-server-config --zone=${GCP_ZONE} --format="value(validMasterVersions[0])" )"
############################
# Create and setup cluster #
############################
gcloud container clusters create "${CLUSTER_NAME}" \
--project "${GCP_PROJECT}" \
--zone "${GCP_ZONE}" \
--username "admin" \
--cluster-version "${CLUSTER_VERSION}" \
--machine-type "custom-6-23040" --num-nodes "1" \
--image-type "UBUNTU" \
--local-ssd-count=4 \
--disk-type "pd-ssd" --disk-size "50" \
--no-enable-cloud-logging --no-enable-cloud-monitoring \
--no-enable-ip-alias \
--enable-network-policy \
--enable-autoupgrade --enable-autorepair
echo "Getting credentials for newly created cluster..."
gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone="${GCP_ZONE}"
echo "Setting up GKE RBAC..."
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user="${GCP_USER}"
|
kubeflow/kubeflow
|
testing/kfctl/scripts/create_existing_cluster.sh
|
Shell
|
apache-2.0
| 1,238 |
#!/bin/bash
radia_main() {
# needed for fftw and uti_*.py
codes_dependencies srw
codes_download ochubar/Radia
# committed *.so files are not so good.
find . -name \*.so -o -name \*.a -o -name \*.pyd -exec rm {} \;
rm -rf ext_lib
perl -pi - cpp/py/setup.py <<'EOF'
s/\bfftw/sfftw/;
s/mpi_cxx/mpicxx/;
s{/usr/lib/openmpi/lib}{/usr/lib64/mpich/lib}g;
s{/usr/lib/openmpi/include}{/usr/include/mpich-x86_64}g;
EOF
perl -pi -e '
s/-lfftw/-lsfftw/;
s/\bcc\b/mpicc/;
s/\bc\+\+/mpicxx/;
# The MODE flag hardwires includes incorrectly
s/^(LIBCFLAGS\s*=)/$1 -D_WITH_MPI /;
' cpp/gcc/Makefile
cd cpp/gcc
make "-j$(codes_num_cores)" lib
}
radia_python_install() {
cd Radia/cpp/py
MODE=mpi python setup.py build_ext
codes_python_lib_copy "$(find . -name radia*.so)"
}
|
radiasoft/download
|
installers/rpm-code/codes/radia.sh
|
Shell
|
apache-2.0
| 888 |
#!/bin/bash
if [ ! -e lib/anna-3.3.jar ];then
wget -P lib/ https://mate-tools.googlecode.com/files/anna-3.3.jar
fi
if [ ! -e lib/minorthird.jar ];then
wget -P lib/ http://sourceforge.net/projects/minorthird/files/MinorThird%20Jar/minorthird-jar_20080611/minorthird_20080611.jar/download
cd lib
mv download minorthird.jar
cd ..
fi
if [ ! -e GeoNames/cities1000.zip ] ;then
wget -P GeoNames/ http://download.geonames.org/export/dump/cities1000.zip
fi
if [ ! -e GeoNames/allCountries.zip ] ;then
wget -P GeoNames/ http://download.geonames.org/export/dump/allCountries.zip
fi
if [ ! -e GeoNames/admin2Codes.txt ] ;then
wget -P GeoNames/ http://download.geonames.org/export/dump/admin2Codes.txt
fi
cd GeoNames
unzip cities1000.zip
unzip allCountries.zip
rm cities1000.zip
rm allCountries.zip
cd ..
java -jar GeoNames/indexer.jar -index -write GeoNames/allCountries.txt GazIndex/
|
weizh/geolocator-1.0
|
geo-locator/install.sh
|
Shell
|
apache-2.0
| 882 |
#!/bin/sh
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
/usr/sbin/tmpwatch 24 /srv/conary/tmp/
/usr/sbin/tmpwatch 336 /srv/conary/cscache/
if [ -e /srv/conary/proxycontents ] ; then
/usr/sbin/tmpwatch 336 /srv/conary/proxycontents
fi
tmpwatch \
--exclude=/srv/rbuilder/tmp/client_temp \
--exclude=/srv/rbuilder/tmp/fastcgi_temp \
--exclude=/srv/rbuilder/tmp/proxy_temp \
--exclude=/srv/rbuilder/tmp/scgi_temp \
--exclude=/srv/rbuilder/tmp/uwsgi_temp \
24 /srv/rbuilder/tmp
|
sassoftware/rbm
|
distro/cleanup.sh
|
Shell
|
apache-2.0
| 1,044 |
echo "---------------------------------------------------"
echo "---------------------TEST--------------------------"
./buffer ../test/test1
echo "---------------------------------------------------"
./buffer ../test/test2
echo "---------------------------------------------------"
./buffer ../test/test3
echo "---------------------------------------------------"
./buffer ../test/test4
echo "---------------------------------------------------"
./buffer ../test/test5
echo "---------------------------------------------------"
echo "-------------------WORK FILE-----------------------"
./buffer ../../project/workfile_r1
echo "---------------------------------------------------"
./buffer ../../project/workfile_r2
echo "---------------------------------------------------"
./buffer ../../project/workfile_r3
echo "---------------------------------------------------"
./buffer ../../project/workfile_r4
echo "---------------------------------------------------"
./buffer ../../project/workfile_r5
echo "---------------------------------------------------"
|
sharan-wisc/756-project
|
van_ginneken/src/complete.sh
|
Shell
|
apache-2.0
| 1,057 |
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Glance's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --virtual-env-path <path> Location of the virtualenv directory"
echo " Default: \$(pwd)"
echo " --virtual-env-name <name> Name of the virtualenv directory"
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo " --concurrency <concurrency> How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count"
echo " Default: 0"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
i=1
while [ $i -le $# ]; do
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-8|--pep8-only-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
;;
--virtual-env-name)
(( i++ ))
venv_dir=${!i}
;;
--tools-path)
(( i++ ))
tools_path=${!i}
;;
--concurrency)
(( i++ ))
concurrency=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
just_pep8_changed=0
no_pep8=0
coverage=0
debug=0
update=0
concurrency=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
process_options $@
# Make our paths available to other scripts we call
export venv_path
export venv_dir
export venv_name
export tools_dir
export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ $debug -eq 1 ]; then
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testrargs="discover ./kiloeyes/tests"
fi
${wrapper} python -m testtools.run $testropts $testrargs
# Short circuit because all of the testr and coverage stuff
# below does not make sense when running testtools.run for
# debugging purposes.
return $?
fi
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
TESTRTESTS="$TESTRTESTS"
fi
# Just run the test suites in current environment
set +e
testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'"
if [ setup.cfg -nt kiloeyes.egg-info/entry_points.txt ]
then
${wrapper} python setup.py egg_info
fi
echo "Running \`${wrapper} $TESTRTESTS\`"
if ${wrapper} which subunit-2to1 2>&1 > /dev/null
then
# subunit-2to1 is present, testr subunit stream should be in version 2
# format. Convert to version one before colorizing.
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py"
else
bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py"
fi
RESULT=$?
set -e
copy_subunit_log
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='kiloeyes/*' --omit='kiloeyes/openstack/common/*' -d covhtml -i
fi
return $RESULT
}
function copy_subunit_log {
LOGNAME=`cat .testrepository/next-stream`
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function warn_on_flake8_without_venv {
if [ $never_venv -eq 1 ]; then
echo "**WARNING**:"
echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
fi
}
function run_pep8 {
echo "Running flake8 ..."
warn_on_flake8_without_venv
bash -c "${wrapper} flake8"
}
TESTRTESTS="lockutils-wrapper python setup.py testr"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $just_pep8_changed -eq 1 ]; then
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
# --diff argument behaves surprisingly as well, because although you feed it a
# diff, it actually checks the file on disk anyway.
files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
echo "Running flake8 on ${files}"
warn_on_flake8_without_venv
bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff"
exit
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi
|
litong01/python-monasca
|
run_tests.sh
|
Shell
|
apache-2.0
| 7,868 |
#!/bin/bash
docker rm -f fglab fglab-mongo
docker network rm fglab
IFACE="br0"
ALLOWED_SUBNET=$(ip addr show dev $IFACE | grep -oP '\d+\.\d+\.\d+\.\d+/\d+')
sudo iptables -D DOCKER -i $IFACE -p tcp --dport 5080 ! -s $ALLOWED_SUBNET -j DROP
|
windj007/fglab-scripts
|
stop_fglab.sh
|
Shell
|
apache-2.0
| 242 |
#!/bin/bash -e
aws ecr get-login --no-include-email | bash
#docker push 954636985443.dkr.ecr.us-west-2.amazonaws.com/pipelineai/predict-gpu:1.5.0
#docker push 954636985443.dkr.ecr.us-west-2.amazonaws.com/pipelineai/predict-cpu:1.5.0
docker push pipelineai/predict-gpu:1.5.0
docker push pipelineai/predict-cpu:1.5.0
#docker push gcr.io/flux-capacitor1/pipelineai/predict-gpu:1.5.0
#docker push gcr.io/flux-capacitor1/pipelineai/predict-cpu:1.5.0
#docker push pipelineai.azurecr.io/pipelineai/predict-gpu:1.5.0
#docker push pipelineai.azurecr.io/pipelineai/predict-cpu:1.5.0
#docker push 954636985443.dkr.ecr.us-west-2.amazonaws.com/pipelineai/predict-python-gpu:1.5.0
#docker push 954636985443.dkr.ecr.us-west-2.amazonaws.com/pipelineai/predict-python-cpu:1.5.0
#docker push pipelineai/predict-python-gpu:1.5.0
#docker push pipelineai/predict-python-cpu:1.5.0
|
fluxcapacitor/pipeline
|
predict/push.sh
|
Shell
|
apache-2.0
| 866 |
#!/usr/bin/env bash
# make sure we're up to date
sudo apt-get update -qq
# install go get dependencies
sudo apt-get install -qq mercurial
# install backend dependencies
sudo add-apt-repository -y ppa:fkrull/deadsnakes
sudo apt-get update -qq
sudo apt-get install -qq libonig-dev python3.4 python3.4-dev
# install qml frontend dependencies
sudo add-apt-repository -y ppa:ubuntu-sdk-team/ppa
sudo apt-get update -qq
sudo apt-get install -qq qtbase5-private-dev qtdeclarative5-private-dev
|
crr0004/lime
|
tasks/ci/install.sh
|
Shell
|
bsd-2-clause
| 490 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
JIRA_URL=http://127.0.0.1:2990/jira/secure/Dashboard.jspa
cd "$DIR"
rm jira.log
atlas-run-standalone --product jira --http-port 2990 -B -nsu -o --threads 2.0C </dev/zero >jira.log 2>&1 &
printf "Waiting for JIRA to start respinding on $JIRA_URL "
until $(curl --output /dev/null --silent --head --fail $JIRA_URL); do
printf '.'
sleep 5
done
|
stevencarey/jira
|
tests/start-jira.sh
|
Shell
|
bsd-2-clause
| 418 |
#!/bin/sh
#
# @(#) $Id$
#
# Creates a kext symbol file for debugging
#
# Created for the ext2fsx project: http://sourceforge.net/projects/ext2fsx/
#
# Copyright 2004 Brian Bergstrand.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of the author may not be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
PATH=/usr/bin:/bin:/sbin:/usr/sbin
KSYM_CREATE=kextload
KSYM_KERNEL=/mach_kernel
KSYM_BUNDLEID=
KSYM_LOADADDR=
KSYM_MODULE=
usage() {
echo "Usage:"
echo "$0 [-k kernel path] -b bundle id -a load address -m kext name"
exit 22
}
#Process args
if [ $# -eq 0 ]; then
usage
fi
while : ; do
case "${1}" in
-a )
KSYM_LOADADDR=${2}
shift
shift # once more for the address
;;
-b )
KSYM_BUNDLEID=${2}
shift
shift # once more for the path
;;
-k )
KSYM_KERNEL=${2}
shift
shift # once more for the path
;;
-m )
KSYM_MODULE=${2}
shift
shift # once more for the path
;;
* )
break
;;
esac
done
if [ -z ${KSYM_BUNDLEID} ] || [ -z ${KSYM_LOADADDR} ] || [ -z ${KSYM_MODULE} ]; then
usage
fi
if [ ! -f "${KSYM_KERNEL}" ]; then
echo "kernel file '${KSYM_KERNEL}' does not exist"
exit 2
fi
if [ ! -d "./${KSYM_MODULE}" ]; then
echo "kext bundle '${KSYM_MODULE}' does not exist in the current directory"
exit 2
fi
${KSYM_CREATE} -c -k "${KSYM_KERNEL}" -n -r . -s . -z -a ${KSYM_BUNDLEID}@${KSYM_LOADADDR} \
"${KSYM_MODULE}"
|
georghe-crihan/ext2fsx
|
test/mkksym.sh
|
Shell
|
bsd-2-clause
| 2,658 |
#RUBY AND PYTHON
export PATH=~/anaconda/bin:$HOME/.rvm/bin:/usr/local/bin:$PATH
export PATH=~/.local/bin:$PATH
# zsh下面脚本无效, 手动加
export PATH=/Users/lhr/.rvm/rubies/ruby-2.2.3/bin:$PATH
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
# ON GOING PROJECT
# export PATH=/Users/lhr/bak/lhrkits/labkit/bin:$PATH
# export PYTHONPATH=/Users/lhr/_action/\@vagrant/cluster/node1/labkit:$PYTHONPATH
# 加载lhrkits里面所有kit的目录
source $HOME/lhrkits/init_kits.sh
#ANSIBLE
# export ANSIBLE_INVENTORY=~/_env/ansible/hosts
# export ANSIBLE_ROLES=~/_env/ansible/centos/roles
|
lhrkkk/dotfiles_old_2015_11_01
|
pubrc/mypath.sh
|
Shell
|
bsd-2-clause
| 662 |
#!/bin/sh
# external mounter script sample, e.g /root/ntfs-3g.sh
# fstab line:
# /root/ntfs-3g.sh /usr/home/web/downloads external rw 0 0
#
usage()
{
printf "[jail] external mounter script sample\n"
printf " -j jname (optional)n"
printf " -o options\n"
printf " -p full (hoster) path for mountpoint in jail\n"
exit 0
}
[ -z "${1}" -o "${1}" = "--help" ] && usage
# MAIN
while getopts "j:o:p:" opt; do
case "${opt}" in
j) jname="${OPTARG}" ;;
o) options="${OPTARG}" ;;
p) path="${OPTARG}" ;;
esac
shift $(($OPTIND - 1))
done
/usr/local/bin/ntfs-3g -o ${options} /dev/da0p1 ${path}
exit $?
|
cbsd/cbsd
|
share/examples/jails-fstab/ntfs-3g.sh
|
Shell
|
bsd-2-clause
| 612 |
#!/bin/sh
CLASSPATH=~/Sandbox/appengine-java-sdk-1.8.0/lib/user/appengine-api-1.0-sdk-1.8.0.jar
CLASSPATH=$CLASSPATH:~/Applications/gaeshell.jar
CLASSPATH=$CLASSPATH:~/Sandbox/appengine-java-sdk-1.8.0/lib/appengine-remote-api.jar
java -cp $CLASSPATH com.inpun.alt.gaecl.AppEngineShell $@
|
inpun/gaecl
|
bin/gaeshell.sh
|
Shell
|
bsd-2-clause
| 290 |
java -cp lib/gnat.jar gnat.tests.RemoteNerGeneRepTest
|
miroculus/GNAT
|
scripts/testRemoteNerGenerep.sh
|
Shell
|
bsd-2-clause
| 54 |
#!/bin/bash
set -e
set -v
# Install dependencies to create collectd
sudo yum install -y epel-release
# Install collectd for plugin management
# Install Java dependencies
sudo yum install -y java-11-openjdk
sudo yum install -y collectd collectd-write_riemann
sudo systemctl enable collectd
sudo systemctl start collectd
|
jhajek/packer-vagrant-build-scripts
|
packer/scripts/proxmox/core-rocky/post_install_prxmx_install-collectd.sh
|
Shell
|
bsd-3-clause
| 321 |
#!/bin/bash
export PYTHONPATH=.
./runauto_nag_common.sh
. venv/bin/activate
# Clean the log files
python -m auto_nag.log --clean
# Bug fixed without assignee
# very common
python -m auto_nag.scripts.no_assignee
# Bug closed with the leave open keyword
# very common
python -m auto_nag.scripts.leave_open
# has a STR without flag has_str
# common
# python -m auto_nag.scripts.has_str_no_hasstr
# Closes crash bug without any crashes for the last 12 weeks
# pretty common
python -m auto_nag.scripts.no_crashes
# List bug with the meta keyword but not [meta] in the title
# Pretty common
python -m auto_nag.scripts.meta_summary_missing
# List bug without the meta keyword with [meta] in the title (with autofix)
# Pretty common
python -m auto_nag.scripts.summary_meta_missing
# List reopened bugs with invalid nightly status flag
# Pretty common
python -m auto_nag.scripts.nightly_reopened
# Bug closed with the stalled keyword
# Pretty rare
python -m auto_nag.scripts.stalled
# Bugs with missing beta status
# Pretty rare
python -m auto_nag.scripts.missing_beta_status
# Bugs with STR and no regression-range
# Pretty rare
python -m auto_nag.scripts.has_str_no_range
# Notify bugs tracked (+ or blocking)
# with P3, P4 or P5 priorities for the ongoing releases
# Pretty common
python -m auto_nag.scripts.mismatch_priority_tracking_esr
python -m auto_nag.scripts.mismatch_priority_tracking_release
python -m auto_nag.scripts.mismatch_priority_tracking_beta
python -m auto_nag.scripts.mismatch_priority_tracking_nightly
# Bug is tracked for a release but the bug severity is small
# pretty common
python -m auto_nag.scripts.tracked_bad_severity
# Move info (signatures, product/component) from/to bugs & their dups
# Pretty common
python -m auto_nag.scripts.copy_duplicate_info
# Enhancement or task with the "regression" keyword
python -m auto_nag.scripts.regression_but_type_enhancement_task
# Move dupeme from whiteboard to keyword
# Pretty rare
python -m auto_nag.scripts.dupeme_whiteboard_keyword
# Remove dupeme keyword when the bug is closed
# Pretty rare
python -m auto_nag.scripts.closed_dupeme
# Suggest components for untriaged bugs (hourly, list only bugs on which we acted)
python -m auto_nag.scripts.component --frequency hourly
# MUST ALWAYS BE AFTER COMPONENTS (to reset the priority if mandatory)
# Reset the priority if the product::component changed after the priority has been set
python -m auto_nag.scripts.prod_comp_changed_with_priority
# Detect spam bugs using bugbug
python -m auto_nag.scripts.spambug
# Send a mail if the logs are not empty
# MUST ALWAYS BE THE LAST COMMAND
python -m auto_nag.log --send
deactivate
if [ "$errored" = true ] ; then
exit -1
fi
|
mozilla/bztools
|
runauto_nag_hourly.sh
|
Shell
|
bsd-3-clause
| 2,711 |
#!/bin/bash
# Wait until network is ready
while ! ping -c 1 baxter > /dev/null
do
sleep 1
done
# Prepare FIFO for logging. Perhaps this FIFO is already prepared by LaunchLogger
mkdir /tmp/supervisor
mkfifo /tmp/supervisor/launch_logger_fifo
# Kill previous roslaunch
tmux kill-session -t gripper
# roslaunch in tmux
set -e
tmux new-session -d -s gripper -n roslaunch "script -f /tmp/supervisor/launch_logger_fifo"
## Using pipe-pane like following does not work when -d is specified in new-session:
## tmux pipe-pane -o -t gripper:roslaunch.0 "cat > /tmp/supervisor/launch_logger_fifo"
## capture-pane works, but it only captures current state and does not know which line is new
tmux send-keys -t gripper:roslaunch.0 "source ~/ros/kinetic/devel/setup.bash && rossetip && rossetmaster baxter && roslaunch sphand_driver setup_gripper_v8.launch left_gripper:=true" Enter
|
start-jsk/jsk_apc
|
demos/sphand_ros/sphand_driver/scripts/launch_left_gripper.sh
|
Shell
|
bsd-3-clause
| 875 |
# Heroku Web bootstrap script
# Updates the Web root to public/
# @link https://github.com/winglian/Heroku-PHP
# echo "Installing PECL APC"
# /app/php/bin/pecl install apc
# echo "[apc]" >> /app/php/php.ini
# echo "extension=apc.so" >> /app/php/php.ini
# echo "[memcache]" >> /app/php/php.ini
# echo "extension=memcache.so" >> /app/php/php.ini
sed -i 's/Listen 80/Listen '$PORT'/' /app/apache/conf/httpd.conf
# sed -i 's/^DocumentRoot/# DocumentRoot/' /app/apache/conf/httpd.conf
# sed -i 's/^ServerLimit 1/ServerLimit 8/' /app/apache/conf/httpd.conf
# sed -i 's/^MaxClients 1/MaxClients 8/' /app/apache/conf/httpd.conf
for var in `env|cut -f1 -d=`; do
echo "PassEnv $var" >> /app/apache/conf/httpd.conf;
done
# Setup apache logs
# touch /app/apache/logs/error_log
# touch /app/apache/logs/access_log
# tail -F /app/apache/logs/error_log &
# tail -F /app/apache/logs/access_log &
# export LD_LIBRARY_PATH=/app/php/ext
# export PHP_INI_SCAN_DIR=/app/www
# Start a temporary apache process
# This will display the index.php in the root
# while we are booting the ZF2 application
/app/apache/bin/httpd -DNO_DETACH &
PID=$!
echo "Installing application"
sh www/heroku/app-boot.sh
# Add our local configuration to the apache configuration
echo "Include /app/www/heroku/conf/*.conf" >> /app/apache/conf/httpd.conf
# Restart httpd with new configuration
kill $PID
sleep 2
echo "Launching apache"
exec /app/apache/bin/httpd -DNO_DETACH
|
HelloFax/hellosign-php-sdk-demo
|
heroku/web-boot.sh
|
Shell
|
bsd-3-clause
| 1,440 |
#!/bin/bash
pushd $LOCAL_TMP_DIR
status=0
rm -t -f u22_warnings.log
fw -p $LOCAL_TEST_DIR/u22_cfg.py
for file in u22_warnings.log
do
sed -i -r -f $LOCAL_TEST_DIR/filter-timestamps.sed $file
diff $LOCAL_TEST_DIR/unit_test_outputs/$file $LOCAL_TMP_DIR/$file
if [ $? -ne 0 ]
then
echo The above discrepancies concern $file
status=1
fi
done
popd
exit $status
|
gartung/fnal-art
|
art/test/Framework/Services/Message/u22t.sh
|
Shell
|
bsd-3-clause
| 381 |
#!/usr/bin/env bash
# Copyright (c) 2013-2016 Tuomo Tanskanen <[email protected]>
# Usage: Copy 'gitlab.rb.example' as 'gitlab.rb', then 'vagrant up'.
set -e
# these are normally passed via Vagrantfile to environment
# but if you run this on bare metal they need to be reset
GITLAB_HOSTNAME=${GITLAB_HOSTNAME:-192.168.98.100}
GITLAB_PORT=${GITLAB_PORT:-443}
#
# --------------------------------
# Installation - no need to touch!
# --------------------------------
#
export DEBIAN_FRONTEND=noninteractive
fatal()
{
echo "fatal: $@" >&2
}
check_for_root()
{
if [[ $EUID != 0 ]]; then
fatal "need to be root"
exit 1
fi
}
check_for_gitlab_rb()
{
if [[ ! -e /vagrant/gitlab.rb ]]; then
fatal "gitlab.rb not found at /vagrant"
exit 1
fi
}
check_for_backwards_compatibility()
{
if egrep -q "^ci_external_url" /vagrant/gitlab.rb; then
fatal "ci_external_url setting detected in 'gitlab.rb'"
fatal "This setting is deprecated in Gitlab 8.0+, and will cause Chef to fail."
fatal "Check the 'gitlab.rb.example' for fresh set of settings."
exit 1
fi
}
set_apt_pdiff_off()
{
echo 'Acquire::PDiffs "false";' > /etc/apt/apt.conf.d/85pdiff-off
}
install_swap_file()
{
# "GITLAB_SWAP" is passed in environment by shell provisioner
if [[ $GITLAB_SWAP > 0 ]]; then
echo "Creating swap file of ${GITLAB_SWAP}G size"
SWAP_FILE=/.swap.file
dd if=/dev/zero of=$SWAP_FILE bs=1G count=$GITLAB_SWAP
mkswap $SWAP_FILE
echo "$SWAP_FILE none swap sw 0 0" >> /etc/fstab
chmod 600 $SWAP_FILE
swapon -a
else
echo "Skipped swap file creation due 'GITLAB_SWAP' set to 0"
fi
}
rewrite_hostname()
{
sed -i -e "s,^external_url.*,external_url 'https://${GITLAB_HOSTNAME}/'," /etc/gitlab/gitlab.rb
}
# All commands expect root access.
check_for_root
# Check for configs that are not compatible anymore
check_for_gitlab_rb
check_for_backwards_compatibility
# install swap file for more memory
install_swap_file
# install tools to automate this install
apt-get -y update
apt-get -y install debconf-utils curl
# install the few dependencies we have
echo "postfix postfix/main_mailer_type select Internet Site" | debconf-set-selections
echo "postfix postfix/mailname string $GITLAB_HOSTNAME" | debconf-set-selections
apt-get -y install openssh-server postfix
# generate ssl keys
apt-get -y install ca-certificates ssl-cert
make-ssl-cert generate-default-snakeoil --force-overwrite
# download omnibus-gitlab package (300M) with apt
# vagrant-cachier plugin hightly recommended
echo "Setting up Gitlab deb repository ..."
set_apt_pdiff_off
curl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash
echo "Installing gitlab-ce via apt ..."
apt-get install -y gitlab-ce
# fix the config and reconfigure
cp /vagrant/gitlab.rb /etc/gitlab/gitlab.rb
rewrite_hostname
gitlab-ctl reconfigure
# done
echo "Done!"
echo " Login at https://${GITLAB_HOSTNAME}:${GITLAB_PORT}/, username 'root'. Password will be reset on first login."
echo " Config found at /etc/gitlab/gitlab.rb and updated by 'sudo gitlab-ctl reconfigure'"
|
gilmararaujo/GitLab
|
install-gitlab.sh
|
Shell
|
bsd-3-clause
| 3,214 |
#!/bin/sh
# Generated file, master is Makefile.am
. ${srcdir:-.}/common.sh
infile="$srcdir/images/quad-lzw-compat.tiff"
outfile="o-tiffcrop-doubleflip-quad-lzw-compat.tiff"
f_test_convert "$TIFFCROP -F both" $infile $outfile
f_tiffinfo_validate $outfile
|
ric2b/Vivaldi-browser
|
update_notifier/thirdparty/wxWidgets/src/tiff/test/tiffcrop-doubleflip-quad-lzw-compat.sh
|
Shell
|
bsd-3-clause
| 254 |
#! /usr/bin/env bash
#
# Created by alex [email protected]
#
# Copyright (c) 2012
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the project's author nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#backup
#program <mode> <partition> <syncdir>
options=""
softlimit=10 #values in percent
usage()
{
echo "usage: backuprestore <mode> [<path to partition>] <syncdir>"
echo "modes:"
echo "backup: backup files, use partitionname for identification"
echo "backup-ignore-sl: same as backup but ignore soft limit"
echo "restore: rsync files to syncdir/dest/backup"
echo ""
echo "exit 2, when too less space"
echo "exit 3, when backuped files maybe are too big and manual intervention is needed"
exit 1
}
if [ "$1" = "help" ] || [ "$1" = "--help" ] ;then
usage
fi
#intern dependencies: umountscript.sh mountscript
#use readlink -f if realpath isn't available
if [ ! -e "/usr/bin/realpath" ];then
realpath()
{
echo "$(readlink -f "$1")"
exit 0;
}
fi
#dir where the cloneme files are located
sharedir="$(dirname "$(dirname "$(realpath "$0")")")"
mode="$1"
partition_path="$(realpath "$2")"
partition="$(basename "$partition_path")"
backup()
{
"$sharedir"/sh/mountscript.sh "${partition_path}" "$syncdir"/tmpmount
if ls "$syncdir"/tmpmount/ > /dev/null; then
if [[ "${mode}" != "backup-ignore-sl" ]] && [[ "$(((100-($(stat --file-system -c%f "$syncdir/tmpmount")*100 / $(stat --file-system -c%b "$syncdir/tmpmount"))))" -gt "$softlimit" ]]; then
echo "Debug: over softlimit; exit"
exit 3
fi
mkdir -p "$syncdir"/transferdir/"${partition}"
rsync -a -A --progress --delete "$syncdir"/tmpmount "$syncdir"/transferdir/"${partition}"
fi
"$sharedir"/sh/umountscript.sh n "$syncdir"/tmpmount
}
restore()
{
mkdir -p "$syncdir"/dest/backupoldfiles
rsync -a -A --progress --delete "$syncdir"/transferdir "$syncdir"/dest/backupoldfiles
}
if [[ "${mode}" = "backup" ]] && [[ "$#" = "3" ]]; then
backup
elif [[ "${mode}" = "backup-ignore-sl" ]] && [[ "$#" = "3" ]]; then
backup
elif [[ "${mode}" = "restore" ]] && [[ "$#" -ge "2" ]]; then
restore
else
usage
fi
|
devkral/cloneme
|
src/share/sh/backuprestore.sh
|
Shell
|
bsd-3-clause
| 3,492 |
#!/bin/bash
chmod 755 startmongo.sh
sudo docker build -t ${DOCKER_REGISTRY}/mongo:${TAG} .
sudo docker push ${DOCKER_REGISTRY}/mongo:${TAG}
|
reza-rahim/microservice
|
ansible/mesos-app/mongo/docker/build.sh
|
Shell
|
mit
| 141 |
#!/usr/bin/env bash
# make sure we have up-to-date packages
apt-get update
## vagrant grub-pc fix from: https://gist.github.com/jrnickell/6289943
# parameters
echo "grub-pc grub-pc/kopt_extracted boolean true" | debconf-set-selections
echo "grub-pc grub2/linux_cmdline string" | debconf-set-selections
echo "grub-pc grub-pc/install_devices multiselect /dev/sda" | debconf-set-selections
echo "grub-pc grub-pc/install_devices_failed_upgrade boolean true" | debconf-set-selections
echo "grub-pc grub-pc/install_devices_disks_changed multiselect /dev/sda" | debconf-set-selections
# vagrant grub fix
dpkg-reconfigure -f noninteractive grub-pc
# upgrade all packages
#apt-get upgrade -y
# install packages as explained in INSTALL.md
apt-get install -y ruby1.9.1 libruby1.9.1 ruby1.9.1-dev ri1.9.1 \
postgresql-9.3-postgis-2.1 postgresql-server-dev-all postgresql-contrib \
build-essential git-core \
libxml2-dev libxslt-dev imagemagick libmapserver1 gdal-bin libgdal-dev ruby-mapscript nodejs libmagickwand-dev redis-server
#ruby gdal needs the build Werror=format-security removed currently
sed -i 's/-Werror=format-security//g' /usr/lib/ruby/1.9.1/x86_64-linux/rbconfig.rb
gem1.9.1 install bundle
## install the bundle necessary for mapwarper
pushd /srv/mapwarper
# do bundle install as a convenience
sudo -u vagrant -H bundle install
# create user and database for openstreetmap-website
db_user_exists=`sudo -u postgres psql postgres -tAc "select 1 from pg_roles where rolname='vagrant'"`
if [ "$db_user_exists" != "1" ]; then
sudo -u postgres createuser -s vagrant
sudo -u vagrant -H createdb -E UTF-8 -O vagrant mapwarper_development
fi
# build and set up postgres extensions
sudo -u vagrant psql mapwarper_development -c "create extension postgis;"
# set up sample configs
if [ ! -f config/database.yml ]; then
sudo -u vagrant cp config/database.example.yml config/database.yml
fi
if [ ! -f config/application.yml ]; then
sudo -u vagrant cp config/application.example.yml config/application.yml
fi
if [ ! -f config/secrets.yml ]; then
sudo -u vagrant cp config/secrets.yml.example config/secrets.yml
fi
echo "now migrating database. This may take a few minutes"
# migrate the database to the latest version
sudo -u vagrant -H RAILS_ENV=development bundle exec rake db:migrate
popd
|
nypl-spacetime/nypl-warper
|
lib/vagrant/provision.sh
|
Shell
|
mit
| 2,321 |
# == Name
#
# consul.watch
#
# === Description
#
# Manages a consul.watch.
#
# === Parameters
#
# * state: The state of the resource. Required. Default: present.
# * name: The name of the watch. Required.
# * type: The type of watch: key, keyprefix, services, nodes, service, checks, event. Required.
# * key: A key to monitor when using type "key". Optional.
# * prefix: A prefix to monitor when using type "keyprefix". Optional.
# * service: A service to monitor when using type "service" or "checks". Optional.
# * tag: A service tag to monitor when using type "service". Optional.
# * passingonly: Only return instances passing all health checks when using type "service". Optional.
# * check_state: A state to filter on when using type "checks". Optional.
# * event_name: An event to filter on when using type "event. Optional.
# * datacenter: Can be provided to override the agent's default datacenter. Optional.
# * token: Can be provided to override the agent's default ACL token. Optional.
# * handler: The handler to invoke when the data view updates. Required.
# * file: The file to store the watch in. Required. Defaults to /etc/consul/agent/conf.d/watch-name.json
# * file_owner: The owner of the service file. Optional. Defaults to root.
# * file_group: The group of the service file. Optional. Defaults to root.
# * file_mode: The mode of the service file. Optional. Defaults to 0640
#
# === Example
#
# ```bash
# consul.watch --name nodes \
# --type nodes \
# --handler "/usr/local/bin/build_hosts_file.sh"
# ```
#
consul.watch() {
# Declare the resource
waffles_resource="consul.watch"
# Check if all dependencies are installed
local _wrd=("jsed" "consul")
if ! waffles.resource.check_dependencies "${_wrd[@]}" ; then
return 2
fi
# Resource Options
local -A options
waffles.options.create_option state "present"
waffles.options.create_option name "__required__"
waffles.options.create_option type "__required__"
waffles.options.create_option handler "__required__"
waffles.options.create_option token
waffles.options.create_option datacenter
waffles.options.create_option key
waffles.options.create_option prefix
waffles.options.create_option service
waffles.options.create_option tag
waffles.options.create_option passingonly
waffles.options.create_option check_state
waffles.options.create_option event_name
waffles.options.create_option file
waffles.options.create_option file_owner "root"
waffles.options.create_option file_group "root"
waffles.options.create_option file_mode "640"
waffles.options.parse_options "$@"
if [[ $? != 0 ]]; then
return $?
fi
# Local Variables
local _file
local _name="${options[name]}"
local _dir=$(dirname "${options[file]}")
local _simple_options=(type handler token datacenter key prefix service tag passingonly)
if [[ -z ${options[file]} ]]; then
_file="/etc/consul/agent/conf.d/watch-${options[name]}.json"
else
_file="${options[file]}"
fi
# Process the resource
waffles.resource.process $waffles_resource "$_name"
}
consul.watch.read() {
if [[ ! -f $_file ]]; then
waffles_resource_current_state="absent"
return
fi
_watch=$(consul.watch.build_watch)
_existing_md5=$(md5sum "$_file" | cut -d' ' -f1)
_new_md5=$(echo $_watch | md5sum | cut -d' ' -f1)
if [[ "$_existing_md5" != "$_new_md5" ]]; then
waffles_resource_current_state="update"
return
fi
waffles_resource_current_state="present"
}
consul.watch.create() {
if [[ ! -d $_dir ]]; then
exec.capture_error mkdir -p "$_dir"
fi
_watch=$(consul.watch.build_watch)
os.file --name "$_file" --content "$_watch" --owner "${options[file_owner]}" --group "${options[file_group]}" --mode "${options[file_mode]}"
}
consul.watch.update() {
consul.watch.delete
consul.watch.create
}
consul.watch.delete() {
os.file --state absent --name "$_file"
}
consul.watch.build_watch() {
_watch='{"watches":[]}'
_options=""
# Build simple options
for _o in "${_simple_options[@]}"; do
if [[ -n ${options[$_o]} ]]; then
_watch_options="${_options} --key '$_o' --value '${options[$_o]}'"
fi
done
_watch=$(echo "$_watch" | jsed add object --path watches "$_watch_options")
# check_state conflicts with "state" option, so we need to make a special check here
if [[ -n ${options[check_state]} ]]; then
_watch=$(echo "$_watch" | jsed add object --path watches.0.state --key "${options[check_state]}")
fi
# event_name conflicts with "name" option, so we need to make a special check here
if [[ -n ${options[event_name]} ]]; then
_watch=$(echo "$_watch" | jsed add object --path watches.0.name --key "${options[event_name]}")
fi
echo "$_watch"
}
|
wffls/waffles
|
resources/consul_watch.sh
|
Shell
|
mit
| 4,778 |
#!/bin/bash
# run danger-brewing hub.
# specifying config path to be relative from /lib/index.js.
# node-config would otherwise: NODE_CONFIG_DIR=/config
sudo NODE_PATH=lib/ node lib/index.js
|
jonpitch/danger-brewing-hub
|
run.sh
|
Shell
|
mit
| 192 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.