code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
INPUT=$1
yum install perl-pmtools
pminst | grep -v -P "^\d" | sort | uniq > /tmp/installed_perl_fresh.log
comm -23 <(sort $INPUT | grep -v -P "^\d" | uniq) <(sort /tmp/installed_perl_fresh.log | uniq) > /tmp/installed_perl_diff.log
DATABASES=( $(sort /tmp/installed_perl_diff.log) )
TOTALc=`wc -l /tmp/installed_perl_diff.log | gawk '{print $1}'`
FRESHc=`wc -l /tmp/installed_perl_fresh.log | gawk '{print $1}'`
ORIGINALc=`wc -l $INPUT | gawk '{print $1}'`
rm /tmp/installed_perl_out.log &>/dev/null
cpan -a
for element in $(seq 0 $((${#DATABASES[@]} -1)))
do
BASENAME=${DATABASES[$element]}
echo "$element : $TOTALc : $FRESHc : $ORIGINALc - $BASENAME"
cpan -fi $BASENAME
sleep 1
done
mkdir perl &>/dev/null
mv ~/rpm/RPMS/noarch/*.rpm ./perl
|
sauloal/linuxscripts
|
yumupdate/perl_restore.sh
|
Shell
|
mit
| 771 |
#!/usr/bin/env bash
if test `uname` = Darwin; then
cachedir=~/Library/Caches/KBuild
else
if [ -z $XDG_DATA_HOME ]; then
cachedir=$HOME/.local/share
else
cachedir=$XDG_DATA_HOME;
fi
fi
mkdir -p $cachedir
url=https://www.nuget.org/nuget.exe
if test ! -f $cachedir/nuget.exe; then
wget -O $cachedir/nuget.exe $url 2>/dev/null || curl -o $cachedir/nuget.exe --location $url /dev/null
fi
if test ! -e .nuget; then
mkdir .nuget
cp $cachedir/nuget.exe .nuget/nuget.exe
fi
if test ! -d packages/KoreBuild; then
mono .nuget/nuget.exe install KoreBuild -ExcludeVersion -o packages -nocache -pre
mono .nuget/nuget.exe install Sake -ExcludeVersion -Source https://www.nuget.org/api/v2/ -Out packages
fi
if ! type dnvm > /dev/null 2>&1; then
source packages/KoreBuild/build/dnvm.sh
fi
if ! type dnx > /dev/null 2>&1; then
dnvm upgrade
fi
mono packages/Sake/tools/Sake.exe -I packages/KoreBuild/build -f makefile.shade "$@"
|
iambmelt/O365-ASPNETMVC-Start
|
build.sh
|
Shell
|
mit
| 978 |
#!/bin/bash -e
##-------------------------------------------------------------------
## @copyright 2016 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/tag_v1/LICENSE
##
## File: check_proc_mem.sh
## Author : Denny <[email protected]>
## Description :
## --
##
## Link: https://www.dennyzhang.com/nagois_monitor_process_memory
##
## Created : <2014-10-25>
## Updated: Time-stamp: <2017-09-04 18:54:36>
##-------------------------------------------------------------------
function print_help {
echo "check_proc_mem v1.0"
echo ""
echo "Usage:"
echo "check_proc_mem.sh -w <warn_MB> -c <criti_MB> <pid_pattern> <pattern_argument>"
echo ""
echo "Below: If tomcat use more than 1024MB resident memory, send warning"
echo "check_proc_mem.sh -w 1024 -c 2048 --pidfile /var/run/tomcat7.pid"
echo "check_proc_mem.sh -w 1024 -c 2048 --pid 11325"
echo "check_proc_mem.sh -w 1024 -c 2048 --cmdpattern \"tomcat7.*java.*Dcom\""
echo ""
echo "Copyright (C) 2014 DennyZhang ([email protected])"
}
while [ "$#" -gt 0 ]
do
opt=$1
case $opt in
-w)
warn_mb=$2
shift 2 # Past argument and value
;;
-c)
crit_mb=$2
shift 2 # Past argument and value
;;
--pidfile)
pidfile=$2
pid=$(cat "$pidfile")
shift 2 # Past argument and value
;;
--cmdpattern)
cmdpattern=$2
pid=$(pgrep -a -f "$cmdpattern" | grep -v `basename $0` | head -n 1 | awk -F' ' '{print $1}')
shift 2 # Past argument and value
;;
--pid)
pid=$2
shift 2 # Past argument and value
;;
*)
print_help
exit 3
;;
esac
done
num_re='^[0-9]+$'
if ! [[ "$warn_mb" =~ $num_re ]] || ! [[ "$crit_mb" =~ $num_re ]]
then
echo "ERROR: Warning or Critical level is not a number"
exit 3
fi
if [ -z "$pid" ]; then
echo "ERROR: no related process is found"
exit 2
fi
memVmSize=$(grep 'VmSize:' "/proc/${pid}/status" | awk -F' ' '{print $2}')
memVmSize=$((memVmSize/1024))
memVmRSS=$(grep 'VmRSS:' "/proc/${pid}/status" | awk -F' ' '{print $2}')
memVmRSS=$((memVmRSS/1024))
if [ "$memVmRSS" -ge "$crit_mb" ]; then
echo "Memory: CRITICAL RES: $memVmRSS MB - VIRT: $memVmSize MB used!|RES=$((memVmRSS*1024*1024));;;;"
exit 2
elif [ "$memVmRSS" -ge "$warn_mb" ]; then
echo "Memory: WARNING RES: $memVmRSS MB - VIRT: $memVmSize MB used!|RES=$((memVmRSS*1024*1024));;;;"
exit 1
else
echo "Memory: OK RES: $memVmRSS MB - VIRT: $memVmSize MB!|RES=$((memVmRSS*1024*1024));;;;"
exit 0
fi
## File - check_proc_mem.sh ends
|
DennyZhang/devops_public
|
nagios_plugins/check_proc_mem/check_proc_mem.sh
|
Shell
|
mit
| 2,759 |
#!/bin/bash
cd "$(dirname "$0")"
function doIt() {
rsync --exclude ".git/" \
--exclude ".gitignore" \
--exclude "install-deps.sh" \
--exclude ".DS_Store" \
--exclude "sync-remote.sh" \
--exclude "sync-local.sh" \
--exclude "README.md" \
--exclude "LICENSE.md" \
--exclude "init" \
--exclude "utils" \
--exclude "sync.sh" \
--exclude "Caskfile" \
-av --no-perms . ~
fish
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
doIt
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
doIt
fi
fi
unset doIt
|
Kikobeats/dotfiles
|
sync-local.sh
|
Shell
|
mit
| 695 |
#!/bin/bash -e
rsync -avrc --exclude=logs/* --exclude=.git --exclude=sync_vagrant_share_with_root.sh /vagrant/ /home/webapps/rsss/
|
rgeyer/rs_selfservice
|
sync_vagrant_share_with_root.sh
|
Shell
|
mit
| 132 |
#!/bin/bash
# Script to add manage UA override to B2G 1.2+
# Karl Dubost - 2013 © - MIT Licence
LOCAL_USER_JS=/tmp/user.js
LOCAL_ORIG_USER_JS=/tmp/orig-user.js
# TODO: Change this for /data/b2g/mozilla/*.default/
# The user profile is
# /data/b2g/mozilla/something.default/prefs.js
PROFILE_DIR=/system/b2g/defaults/pref
REMOTE_USER_JS=${PROFILE_DIR}/user.js
SERVER_UA_LIST='https://hg.mozilla.org/mozilla-central/raw-file/tip/b2g/app/ua-update.json.in'
LOCAL_UA_LIST=/tmp/server_ua.txt
preparing() {
# remove any previous files
rm -f ${LOCAL_USER_JS} ${LOCAL_USER_JS}.tmp ${LOCAL_ORIG_USER_JS} ${LOCAL_UA_LIST}
# pull from the device to a local tmp directory
adb pull ${REMOTE_USER_JS} ${LOCAL_USER_JS}
# downloading the remote list of UA override
curl -s ${SERVER_UA_LIST} -o ${LOCAL_UA_LIST}
}
helpmsg() {
# Print the list of arguments
cat << EOF
The List of UA overrides is controlled by a remote file located at:
https://hg.mozilla.org/mozilla-central/raw-file/tip/b2g/app/ua-update.json.in
In addition you still have the ability to add UA override
locally for testing.
Usage: mozua2.sh <options>
List of options:
on/off:
Enable or disable the remote UA override list.
list <string>:
Give a list of all domain names matchin the string
having a UA override.
add <domain_name> <ua_string>:
Add the domain_name to the local list of UA overrides
using a specific User Agent string.
remove <domain_name>:
Remove the domain_name from the local list of UA overrides.
EOF
}
overridestatus() {
# Check the status of override
PREFOVERRIDE=$(grep useragent.updates.enabled ${LOCAL_USER_JS})
if [[ "$PREFOVERRIDE" =~ "false" ]]; then
echo False
else
echo True
fi
}
override() {
OVERRIDEFLAG=overridestatus
if [[ $1 == "on" ]]; then
if [[ $OVERRIDEFLAG ]]; then
echo "UA override is already on!"
exit 1
fi
activate
elif [[ $1 = "off" ]]; then
if [[ $OVERRIDEFLAG ]]; then
echo "UA override is already off!"
exit 1
fi
stop
else
error
fi
}
list() {
local DOMAIN=${1}
echo "UA override for ${DOMAIN}"
grep -i "${DOMAIN} ${LOCAL_UA_LIST} ${LOCAL_USER_JS}"
echo "TODO: better presentation and matching for search"
}
add() {
local DOMAIN=${1}
local UA=${2}
remote_list=$(grep -i "${DOMAIN} ${LOCAL_UA_LIST}" | sed -e 's/^ *//' -e 's/ *$//')
user_list=$(grep -i general.useragent.override."${DOMAIN} ${LOCAL_USER_JS}")
if [[ -z "$remote_list" ]]; then
if [[ -z "$user_list" ]]; then
echo "@TODO: Adding UA override for ${DOMAIN} with User-Agent ${UA}"
else
echo "There is already a local UA override for this domain."
echo "local:<$user_list>"
fi
else
echo "There is already a remote UA override for this domain."
echo "local:<$remote_list>"
fi
echo "@TODO: If yes display the current UA override"
echo "@TODO: If no add the UA override to the prefs file in /tmp"
echo "@TODO: push to device"
echo "@TODO: reboot the device"
}
error() {
# error message
echo "This is not a valid feature"
}
activate() {
# Activate UA override
echo "Activate UA override"
grep -v "useragent.updates.enabled" ${LOCAL_USER_JS} > ${LOCAL_USER_JS}.tmp
echo 'pref("general.useragent.updates.enabled", true);' >> ${LOCAL_USER_JS}.tmp
pushtodevice
}
stop() {
# Stop UA Override
echo "Stop UA override"
grep -v "useragent.updates.enabled" ${LOCAL_USER_JS} > ${LOCAL_USER_JS}.tmp
echo 'pref("general.useragent.updates.enabled", false);' >> ${LOCAL_USER_JS}.tmp
pushtodevice
}
pushtodevice() {
# Upload the new prefs
echo "Pushing to device"
set -x
adb shell mount -o rw,remount /system
adb push ${LOCAL_USER_JS}.tmp ${REMOTE_USER_JS}
adb shell mount -o ro,remount /system
restart
}
restart() {
# Create a soft reboot
echo "Restart the device (software)"
adb shell stop b2g && adb shell start b2g
}
reboot() {
# Create a hard reboot
echo "Restart the device (hardware)"
adb reboot
}
echo "========================================="
echo "UA override management on Firefox OS 1.2+"
echo "========================================="
# Main
if [[ $# -lt 2 || $# -gt 3 ]]; then
helpmsg
exit 1
fi
# Saving locally the files from the device
preparing
# Going through the options
if [[ ${1} == "override" ]]; then
override "${2}"
elif [[ ${1} == "list" ]]; then
list "${2}"
elif [[ ${1} == "add" ]]; then
add "${2} ${3}"
elif [[ ${1} == "remove" ]]; then
echo "TODO remove UA override for ${2}"
else
error
helpmsg
fi
# End
echo "Bye!"
|
karlcow/webcompat
|
moz/uaoverride/mozua2.sh
|
Shell
|
mit
| 4,802 |
#!/bin/sh
rm $0
./base
echo "
------------------
(program exited with code: $?)"
echo "Press return to continue"
#to be more compatible with shells like dash
dummy_var=""
read dummy_var
|
voidrank/ASA.tv
|
src/app/scripts/services/ABPlayerHTML5/css/geany_run_script.sh
|
Shell
|
mit
| 195 |
#!/bin/bash
# Clone submodules
git submodule update --init
cd docs
# Fix sphinx to ignore non-local image warnings
filename="$(find /home/travis/virtualenv/ -name environment.py)"
sed -e '/nonlocal\ image\ URI\ found/ s/^/#/' -i ${filename}
# Test documentation
OPTIONS="-n" # nit-picky mode
if [[ "$1" = "-W" ]] ; then
OPTIONS="$OPTIONS -W"
fi
sphinx-build $OPTIONS -b html -d _build/doctrees . _build/html
|
Gomez/docs-1
|
test.sh
|
Shell
|
mit
| 416 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-53-1
#
# Security announcement date: 2014-09-16 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:48 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - apt:0.8.10.3+squeeze3
#
# Last versions recommanded by security team:
# - apt:0.8.10.3+squeeze5
#
# CVE List:
# - CVE-2014-0487
# - CVE-2014-0488
# - CVE-2014-0489
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade apt=0.8.10.3+squeeze5 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2014/DLA-53-1.sh
|
Shell
|
mit
| 650 |
#!/bin/sh
MODULE=ikev1
VERSION=0.9
TITLE=IPsec服务器
DESCRIPTION=高安全的企业VPN服务器
HOME_URL=Module_ikev1.asp
CHANGELOG="√修复重启防火墙规则可能丢失的BUG"
# Check and include base
DIR="$( cd "$( dirname "$BASH_SOURCE[0]" )" && pwd )"
# now include build_base.sh
. $DIR/../softcenter/build_base.sh
# build bin
sh $DIR/build/build ikev1
# change to module directory
cd $DIR
# do something here
do_build_result
sh backup.sh $MODULE
|
koolshare/ledesoft
|
ikev1/build.sh
|
Shell
|
mit
| 467 |
# libs/copy_home_dir.sh
#
# Sub-routines to handle copying a user's homedir to the local machine.
# You can make an update script that calls this if you want to allow
# this to happen every time you `ezvm update`
#
# If there is a home directory in the local content dir (etc/local by default)
# then we want to copy it to the system.
EZVM_HOME_SRC=${EZVM_HOME_SRC:-"$EZVM_LOCAL_CONTENT_DIR/home"}
# Copy home source to $HOME
# Usage: copy_home_src $source_directory
copy_home_dir_src() {
local src="$1"
local files="$(find "$src" -maxdepth 1 | sed -e "s,^$src/*,," | grep -v '^users$')"
local f
log_msg 7 "Copying home dir files from $src"
for f in $files; do
log_msg 9 "Overwrite: $HOME/$f"
cp -r "$src/$f" "$HOME/$f" || die "Error copying $f" $?
done
}
# Copy the default home directory from the local source to the current machine.
# If there is also a specific user home directory, copy that too.
# Usage: copy_home_dir
copy_home_dir() {
if [ -d $EZVM_HOME_SRC ]; then
log_msg 5 "Copying generic home dir source"
copy_home_dir_src "$EZVM_HOME_SRC"
# If there is a version specifically for the current user, use that version
# in addition to the default version
if [ -d "$EZVM_HOME_SRC/users/$EZVM_USERNAME" ]; then
log_msg 5 "Copying user-specific home dir source"
copy_home_dir_src "$EZVM_HOME_SRC/users/$EZVM_USERNAME"
fi
else
log_msg 10 "No home dir source dir exists: $EZVM_HOME_SRC"
fi
}
|
vube/ezvm
|
libs/copy-home-dir.sh
|
Shell
|
mit
| 1,545 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1460-1
#
# Security announcement date: 2012-05-31 00:00:00 UTC
# Script generation date: 2017-01-01 21:02:34 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - linux-image-3.2.0-1413-omap4:3.2.0-1413.17
#
# Last versions recommanded by security team:
# - linux-image-3.2.0-1413-omap4:3.2.0-1413.17
#
# CVE List:
# - CVE-2012-1601
# - CVE-2012-2123
# - CVE-2012-2745
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade linux-image-3.2.0-1413-omap4=3.2.0-1413.17 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2012/USN-1460-1.sh
|
Shell
|
mit
| 713 |
sudo setfacl -R -m u:www-data:rwX -m u:`whoami`:rwx app/cache app/logs
sudo setfacl -dR -m u:www-data:rwx -m u:`whoami`:rwx app/cache app/logs
sudo setfacl -R -m u:www-data:rwX -m u:www-data:rwx app/cache app/logs
sudo setfacl -dR -m u:www-data:rwx -m u:www-data:rwx app/cache app/logs
sudo -u www-data php app/console cache:clear --env=prod
sudo -u www-data php app/console cache:warmup --env=prod
sudo setfacl -R -m u:www-data:rwX -m u:`whoami`:rwx app/cache app/logs
sudo setfacl -dR -m u:www-data:rwx -m u:`whoami`:rwx app/cache app/logs
sudo setfacl -R -m u:www-data:rwX -m u:www-data:rwx app/cache app/logs
sudo setfacl -dR -m u:www-data:rwx -m u:www-data:rwx app/cache app/logs
sudo chown -R www-data:www-data app/cache
|
Aldor007/mkaciuba.pl
|
cache.sh
|
Shell
|
mit
| 728 |
#! /bin/sh
echo "Pruning the database"
# Add your pruning instructions here
$PRUNE_SCRIPT
|
ryanisnan/pg-clean-to-s3
|
src/db_prune.sh
|
Shell
|
mit
| 91 |
# remap caps lock (linux)
setxkbmap -option caps:ctrl_modifier
sudo apt install ansible curl
cd playbooks;ansible-playbook --ask-sudo-pass dev-laptop.yml
|
ejosafat/dotfiles
|
setup_ubuntu.sh
|
Shell
|
mit
| 157 |
#!/bin/bash
BGreen='\e[1;32m'
normal=`tput sgr0`
echo -e "${BGreen}"
echo -e "================================"
echo -e "== 1/4 INSTALL ENVIRONMENT =="
echo -e "================================"
echo -e "\033[0m${normal}"
virtualenv -p /usr/bin/python2.7 venv
echo -e "${BGreen}"
echo -e "================================"
echo -e "== 2/4 INSTALL REQUIREMENTS =="
echo -e "================================"
echo -e "\033[0m${normal}"
source venv/bin/activate
pip2.7 install -r requirements.txt
bower install
echo -e "${BGreen}"
echo -e "================================"
echo -e "== 3/4 CONFIGURATION =="
echo -e "================================"
echo -e "\033[0m${normal}"
cp configuration/example.py configuration/local.py
echo -n "Enter your GMAIL_USERNAME [Enter]: "
read GMAIL_USERNAME
echo `export GMAIL_USERNAME=$GMAIL_USERNAME`
echo -n "Enter your GMAIL_PASSWORD [Enter]: "
read GMAIL_PASSWORD
echo `export GMAIL_PASSWORD=$GMAIL_PASSWORD`
echo -e "${BGreen}"
echo -e "================================"
echo -e "== 4/4 INITIALIZATION DB =="
echo -e "================================"
echo -e "\033[0m${normal}"
python2.7 manage.py db init
python2.7 manage.py db migrate
python2.7 manage.py db upgrade
python2.7 manage.py create_role --name 'admin'
python2.7 manage.py create_user [email protected] --password=password
python2.7 manage.py add_role --user [email protected] --role admin
|
shepilov-vladislav/Flask-Restless-Restangular-with-JWT-auth
|
bootstrap.sh
|
Shell
|
mit
| 1,423 |
#!/usr/bin/env bash
set -ex
export CUDA_VISIBLE_DEVICES="" # ensure running on cpu
R=runs/87
mkdir -p $R/{repro1,repro2,bn,3repeats}
# exactly repros run_86
export RR=$R/repro1
nice ./naf_cartpole.py --action-force=100 \
--action-repeats=2 --steps-per-repeat=5 \
--optimiser=Momentum --optimiser-args='{"learning_rate": 0.0001, "momentum": 0.9}' \
--ckpt-dir=$RR/ckpts --event-log-out=$RR/events >$RR/out 2>$RR/err &
# switchs to 6 repeats, instead of 5, so that's 12 total (like the 3x4 one below)
export RR=$R/repro2
nice ./naf_cartpole.py --action-force=100 \
--action-repeats=2 --steps-per-repeat=6 \
--optimiser=Momentum --optimiser-args='{"learning_rate": 0.0001, "momentum": 0.9}' \
--ckpt-dir=$RR/ckpts --event-log-out=$RR/events >$RR/out 2>$RR/err &
# with batch norm
export RR=$R/bn
nice ./naf_cartpole.py --action-force=100 \
--action-repeats=2 --steps-per-repeat=6 \
--use-batch-norm \
--optimiser=Momentum --optimiser-args='{"learning_rate": 0.0001, "momentum": 0.9}' \
--ckpt-dir=$RR/ckpts --event-log-out=$RR/events >$RR/out 2>$RR/err &
# with 3 action repeats (but still 12 total steps)
export RR=$R/3repeats
nice ./naf_cartpole.py --action-force=100 \
--action-repeats=3 --steps-per-repeat=4 \
--optimiser=Momentum --optimiser-args='{"learning_rate": 0.0001, "momentum": 0.9}' \
--ckpt-dir=$RR/ckpts --event-log-out=$RR/events >$RR/out 2>$RR/err &
|
matpalm/cartpoleplusplus
|
exps/run_87.sh
|
Shell
|
mit
| 1,372 |
#!/bin/bash
# This was downloaded from http://deb.nodesource.com/setup_7.x
# Thanks to https://bugs.launchpad.net/ubuntu/+source/pycurl/+bug/1111673,
# we need to work with http:// deb locations instead of https://
# Discussion, issues and change requests at:
# https://github.com/nodesource/distributions
#
# Script to install the NodeSource Node.js v7.x repo onto a
# Debian or Ubuntu system.
#
# Run as root or insert `sudo -E` before `bash`:
#
# curl -sL http://deb.nodesource.com/setup_7.x | bash -
# or
# wget -qO- http://deb.nodesource.com/setup_7.x | bash -
#
export DEBIAN_FRONTEND=noninteractive
SCRSUFFIX="_7.x"
NODENAME="Node.js v7.x"
NODEREPO="node_7.x"
NODEPKG="nodejs"
print_status() {
echo
echo "## $1"
echo
}
if test -t 1; then # if terminal
ncolors=$(which tput > /dev/null && tput colors) # supports color
if test -n "$ncolors" && test $ncolors -ge 8; then
termcols=$(tput cols)
bold="$(tput bold)"
underline="$(tput smul)"
standout="$(tput smso)"
normal="$(tput sgr0)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
fi
fi
print_bold() {
title="$1"
text="$2"
echo
echo "${red}================================================================================${normal}"
echo "${red}================================================================================${normal}"
echo
echo -e " ${bold}${yellow}${title}${normal}"
echo
echo -en " ${text}"
echo
echo "${red}================================================================================${normal}"
echo "${red}================================================================================${normal}"
}
bail() {
echo 'Error executing command, exiting'
exit 1
}
exec_cmd_nobail() {
echo "+ $1"
bash -c "$1"
}
exec_cmd() {
exec_cmd_nobail "$1" || bail
}
node_deprecation_warning() {
if [[ "X${NODENAME}" == "Xio.js v1.x" ||
"X${NODENAME}" == "Xio.js v2.x" ||
"X${NODENAME}" == "Xio.js v3.x" ||
"X${NODENAME}" == "XNode.js v5.x" ]]; then
print_bold \
" DEPRECATION WARNING " "\
${bold}${NODENAME} is no longer actively supported!${normal}
${bold}You will not receive security or critical stability updates${normal} for this version.
You should migrate to a supported version of Node.js as soon as possible.
Use the installation script that corresponds to the version of Node.js you
wish to install. e.g.
* ${green}http://deb.nodesource.com/setup_4.x — Node.js v4 LTS \"Argon\"${normal} (recommended)
* ${green}http://deb.nodesource.com/setup_6.x — Node.js v6 Current${normal}
Please see ${bold}https://github.com/nodejs/LTS/${normal} for details about which version
may be appropriate for you.
The ${bold}NodeSource${normal} Node.js Linux distributions GitHub repository contains
information about which versions of Node.js and which Linux distributions
are supported and how to use the install scripts.
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 10 seconds ..."
echo
sleep 10
elif [ "X${NODENAME}" == "XNode.js v0.10" ]; then
print_bold \
" NODE.JS v0.10 DEPRECATION WARNING " "\
Node.js v0.10 will cease to be actively supported in ${bold}October 2016${normal}.
This means you will not continue to receive security or critical stability
updates for this version of Node.js beyond that time.
You should begin migration to a newer version of Node.js as soon as
possible. Use the installation script that corresponds to the version of
Node.js you wish to install. e.g.
* ${green}http://deb.nodesource.com/setup_4.x — Node.js v4 LTS \"Argon\"${normal} (recommended)
* ${green}http://deb.nodesource.com/setup_6.x — Node.js v6 Current${normal}
Please see ${bold}https://github.com/nodejs/LTS/${normal} for details about which version
may be appropriate for you.
The ${bold}NodeSource${normal} Node.js Linux distributions GitHub repository contains
information about which versions of Node.js and which Linux distributions
are supported and how to use the install scripts.
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 5 seconds ..."
echo
sleep 5
elif [ "X${NODENAME}" == "XNode.js v0.12" ]; then
print_bold \
" NODE.JS v0.12 DEPRECATION WARNING " "\
Node.js v0.12 will cease to be actively supported ${bold}at the end of 2016${normal}.
This means you will not continue to receive security or critical stability
updates for this version of Node.js beyond that time.
You should begin migration to a newer version of Node.js as soon as
possible. Use the installation script that corresponds to the version of
Node.js you wish to install. e.g.
* ${green}http://deb.nodesource.com/setup_4.x — Node.js v4 LTS \"Argon\"${normal} (recommended)
* ${green}http://deb.nodesource.com/setup_6.x — Node.js v6 Current${normal}
Please see ${bold}https://github.com/nodejs/LTS/${normal} for details about which version
may be appropriate for you.
The ${bold}NodeSource${normal} Node.js Linux distributions GitHub repository contains
information about which versions of Node.js and which Linux distributions
are supported and how to use the install scripts.
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 3 seconds ..."
echo
sleep 3
fi
}
script_deprecation_warning() {
if [ "X${SCRSUFFIX}" == "X" ]; then
print_bold \
" SCRIPT DEPRECATION WARNING " "\
This script, located at ${bold}http://deb.nodesource.com/setup${normal}, used to
install Node.js v0.10, is being deprecated and will eventually be made
inactive.
You should use the script that corresponds to the version of Node.js you
wish to install. e.g.
* ${green}http://deb.nodesource.com/setup_4.x — Node.js v4 LTS \"Argon\"${normal} (recommended)
* ${green}http://deb.nodesource.com/setup_6.x — Node.js v6 Current${normal}
Please see ${bold}https://github.com/nodejs/LTS/${normal} for details about which version
may be appropriate for you.
The ${bold}NodeSource${normal} Node.js Linux distributions GitHub repository contains
information about which versions of Node.js and which Linux distributions
are supported and how to use the install scripts.
${bold}https://github.com/nodesource/distributions${normal}
"
echo
echo "Continuing in 10 seconds (press Ctrl-C to abort) ..."
echo
sleep 10
fi
}
setup() {
script_deprecation_warning
print_status "Installing the NodeSource ${NODENAME} repo..."
if $(uname -m | grep -Eq ^armv6); then
print_status "You appear to be running on ARMv6 hardware. Unfortunately this is not currently supported by the NodeSource Linux distributions. Please use the 'linux-armv6l' binary tarballs available directly from nodejs.org for Node.js v4 and later."
exit 1
fi
PRE_INSTALL_PKGS=""
# Check that HTTPS transport is available to APT
# (Check snaked from: https://get.docker.io/ubuntu/)
if [ ! -e /usr/lib/apt/methods/https ]; then
PRE_INSTALL_PKGS="${PRE_INSTALL_PKGS} apt-transport-https"
fi
if [ ! -x /usr/bin/lsb_release ]; then
PRE_INSTALL_PKGS="${PRE_INSTALL_PKGS} lsb-release"
fi
if [ ! -x /usr/bin/curl ] && [ ! -x /usr/bin/wget ]; then
PRE_INSTALL_PKGS="${PRE_INSTALL_PKGS} curl"
fi
# Populating Cache
print_status "Populating apt-get cache..."
exec_cmd 'apt-get update'
if [ "X${PRE_INSTALL_PKGS}" != "X" ]; then
print_status "Installing packages required for setup:${PRE_INSTALL_PKGS}..."
# This next command needs to be redirected to /dev/null or the script will bork
# in some environments
exec_cmd "apt-get install -y${PRE_INSTALL_PKGS} > /dev/null 2>&1"
fi
IS_PRERELEASE=$(lsb_release -d | grep 'Ubuntu .*development' >& /dev/null; echo $?)
if [[ $IS_PRERELEASE -eq 0 ]]; then
print_status "Your distribution, identified as \"$(lsb_release -d -s)\", is a pre-release version of Ubuntu. NodeSource does not maintain official support for Ubuntu versions until they are formally released. You can try using the manual installation instructions available at https://github.com/nodesource/distributions and use the latest supported Ubuntu version name as the distribution identifier, although this is not guaranteed to work."
exit 1
fi
DISTRO=$(lsb_release -c -s)
check_alt() {
if [ "X${DISTRO}" == "X${2}" ]; then
echo
echo "## You seem to be using ${1} version ${DISTRO}."
echo "## This maps to ${3} \"${4}\"... Adjusting for you..."
DISTRO="${4}"
fi
}
check_alt "Kali" "sana" "Debian" "jessie"
check_alt "Kali" "kali-rolling" "Debian" "jessie"
check_alt "Debian" "stretch" "Debian" "jessie"
check_alt "Linux Mint" "maya" "Ubuntu" "precise"
check_alt "Linux Mint" "qiana" "Ubuntu" "trusty"
check_alt "Linux Mint" "rafaela" "Ubuntu" "trusty"
check_alt "Linux Mint" "rebecca" "Ubuntu" "trusty"
check_alt "Linux Mint" "rosa" "Ubuntu" "trusty"
check_alt "Linux Mint" "sarah" "Ubuntu" "xenial"
check_alt "LMDE" "betsy" "Debian" "jessie"
check_alt "elementaryOS" "luna" "Ubuntu" "precise"
check_alt "elementaryOS" "freya" "Ubuntu" "trusty"
check_alt "elementaryOS" "loki" "Ubuntu" "xenial"
check_alt "Trisquel" "toutatis" "Ubuntu" "precise"
check_alt "Trisquel" "belenos" "Ubuntu" "trusty"
check_alt "BOSS" "anokha" "Debian" "wheezy"
check_alt "bunsenlabs" "bunsen-hydrogen" "Debian" "jessie"
check_alt "Tanglu" "chromodoris" "Debian" "jessie"
if [ "X${DISTRO}" == "Xdebian" ]; then
print_status "Unknown Debian-based distribution, checking /etc/debian_version..."
NEWDISTRO=$([ -e /etc/debian_version ] && cut -d/ -f1 < /etc/debian_version)
if [ "X${NEWDISTRO}" == "X" ]; then
print_status "Could not determine distribution from /etc/debian_version..."
else
DISTRO=$NEWDISTRO
print_status "Found \"${DISTRO}\" in /etc/debian_version..."
fi
fi
print_status "Confirming \"${DISTRO}\" is supported..."
if [ -x /usr/bin/curl ]; then
exec_cmd_nobail "curl -sLf -o /dev/null 'http://deb.nodesource.com/${NODEREPO}/dists/${DISTRO}/Release'"
RC=$?
else
exec_cmd_nobail "wget -qO /dev/null -o /dev/null 'http://deb.nodesource.com/${NODEREPO}/dists/${DISTRO}/Release'"
RC=$?
fi
if [[ $RC != 0 ]]; then
print_status "Your distribution, identified as \"${DISTRO}\", is not currently supported, please contact NodeSource at https://github.com/nodesource/distributions/issues if you think this is incorrect or would like your distribution to be considered for support"
exit 1
fi
if [ -f "/etc/apt/sources.list.d/chris-lea-node_js-$DISTRO.list" ]; then
print_status 'Removing Launchpad PPA Repository for NodeJS...'
exec_cmd_nobail 'add-apt-repository -y -r ppa:chris-lea/node.js'
exec_cmd "rm -f /etc/apt/sources.list.d/chris-lea-node_js-${DISTRO}.list"
fi
print_status 'Adding the NodeSource signing key to your keyring...'
if [ -x /usr/bin/curl ]; then
exec_cmd 'curl -s http://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -'
else
exec_cmd 'wget -qO- http://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -'
fi
print_status "Creating apt sources list file for the NodeSource ${NODENAME} repo..."
exec_cmd "echo 'deb http://deb.nodesource.com/${NODEREPO} ${DISTRO} main' > /etc/apt/sources.list.d/nodesource.list"
exec_cmd "echo 'deb-src http://deb.nodesource.com/${NODEREPO} ${DISTRO} main' >> /etc/apt/sources.list.d/nodesource.list"
print_status 'Running `apt-get update` for you...'
exec_cmd 'apt-get update'
node_deprecation_warning
print_status "Run \`apt-get install ${NODEPKG}\` (as root) to install ${NODENAME} and npm"
}
## Defer setup until we have the complete script
setup
|
journeymonitor/infra
|
puppet/modules/nodejs/files/opt/puppet/install/nodesource_nodejs_setup_7.x.sh
|
Shell
|
mit
| 12,387 |
#!/bin/bash
set -e
python bootstrap
source bin/activate
pip install coverage
coverage erase
coverage run '--include=migrations/**' tests/testrunner.py
coverage xml
deactivate || : # virtualenv
|
qmagico/gae-migrations
|
coverage.sh
|
Shell
|
mit
| 195 |
#!/usr/bin/env bash
# 把生成好的文档上传到github.com/api上面去
# api-repos-location: /Users/leeight/local/leeight.github.com/edp-cli/api
# bash upload.sh localDocDir serverDocDir apiReposDir
set -x
LOCAL_DOC_DIR="$1"
SERVER_DOC_DIR="$2"
API_REPOS_LOCATION="$3"
cd "${API_REPOS_LOCATION}"
git checkout gh-pages
git reset --hard origin/gh-pages
git pull origin gh-pages
mkdir -p $(dirname "$SERVER_DOC_DIR")
[ -e "${SERVER_DOC_DIR}" ] && git rm -r "${SERVER_DOC_DIR}"
mv "${LOCAL_DOC_DIR}" "${SERVER_DOC_DIR}"
git add .
git commit -a -m "Add ${SERVER_DOC_DIR} and auto commit"
git push origin gh-pages
|
ecomfe/edp-webhooks
|
base/upload.sh
|
Shell
|
mit
| 617 |
#!/bin/bash
set -e
SUFFIX=$1
if [ -z ${SUFFIX} ]; then
echo "Supply valid python package extension such as whl or tar.gz. Exiting."
exit 3
fi
script=`pwd`/${BASH_SOURCE[0]}
HERE=`dirname ${script}`
ROOT=`realpath ${HERE}/../..`
cd ${ROOT}
DESTENV=${ROOT}/.venvforinstall
if [ -d ${DESTENV} ]; then
rm -rf ${DESTENV}
fi
python -m venv ${DESTENV}
source ${DESTENV}/bin/activate
pip install --upgrade --quiet pip
pip install --quiet -r dev_requirements.txt
invoke devenv
invoke package
# find packages
PKG=`ls ${ROOT}/dist/*.${SUFFIX}`
ls -l ${PKG}
TESTDIR=${ROOT}/STAGETESTS
if [ -d ${TESTDIR} ]; then
rm -rf ${TESTDIR}
fi
mkdir ${TESTDIR}
cp -R ${ROOT}/tests ${TESTDIR}/tests
cd ${TESTDIR}
# install, run tests
pip install ${PKG}
# Redis tests
pytest -m 'not onlycluster'
# RedisCluster tests
CLUSTER_URL="redis://localhost:16379/0"
pytest -m 'not onlynoncluster and not redismod' --redis-url=${CLUSTER_URL}
|
alisaifee/redis-py
|
.github/workflows/install_and_test.sh
|
Shell
|
mit
| 930 |
#!/bin/bash
[ -z $MAKE ] && MAKE=make
[ -z $GIT ] && GIT=git
[ -z $MC_BOPOMOFO_REPO ] &&\
MC_BOPOMOFO_REPO=git://github.com/OpenVanilla/McBopomofo.git
[ -z ${TMPDIR} ] && TMPDIR=./
echo Pulling McBopomofo...
rm -rf ${TMPDIR}McBopomofo
${GIT} clone --depth=1 ${MC_BOPOMOFO_REPO} ${TMPDIR}McBopomofo
echo
echo Generate data.txt...
${MAKE} -C ${TMPDIR}McBopomofo/Source/Data data.txt
echo
echo Copying data.txt and cleaning up...
mkdir -p ./data
cp ${TMPDIR}/McBopomofo/Source/Data/data.txt ./data/
${GIT} --git-dir=${TMPDIR}McBopomofo/.git log -n 1 --format=%H > \
./data/data-commit-hash
rm -rf ${TMPDIR}McBopomofo
|
timdream/jszhuyin
|
build/pull-mcbopomofo-data.sh
|
Shell
|
mit
| 621 |
export CONLL_PATH=~/uparse/data/nlp/treebank/treebank-2.0/combined/conll_token_scode
dist/Release/GNU-MacOSX/ai-parse -s optimize -p $CONLL_PATH -l 50 -e p-1v_p0v_p1v_c-1v_c0v_c1v_tl -o scode_token
|
hsensoy/ai-parse
|
run_token.sh
|
Shell
|
mit
| 198 |
#!/bin/bash
npm i -g polymer-cli bower
cd client
bower install
polymer build
|
salimkayabasi/cherry
|
install/post-install.sh
|
Shell
|
mit
| 77 |
#!/bin/sh
echo '{"version":1}[[],'
exec conky -c ~/.conky/conkyi3 2>/dev/null
|
memes/home
|
.config/i3/primary_status.sh
|
Shell
|
mit
| 78 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "XLForm/XLForm/XLForm.bundle"
install_resource "libHN/Source/hn.json"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "XLForm/XLForm/XLForm.bundle"
install_resource "libHN/Source/hn.json"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
iOSTestApps/News-YC---iPhone
|
Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
mit
| 4,863 |
#!/bin/bash -e
################################################################################
## File: mysql.sh
## Desc: Installs MySQL Client
################################################################################
export ACCEPT_EULA=Y
# Mysql setting up root password
MYSQL_ROOT_PASSWORD=root
echo "mysql-server mysql-server/root_password password $MYSQL_ROOT_PASSWORD" | debconf-set-selections
echo "mysql-server mysql-server/root_password_again password $MYSQL_ROOT_PASSWORD" | debconf-set-selections
# Install MySQL Client
apt-get install mysql-client -y
# Install MySQL Server
apt-get install -y mysql-server
#Install MySQL Dev tools
apt install libmysqlclient-dev -y
# Disable mysql.service
systemctl is-active --quiet mysql.service && systemctl stop mysql.service
systemctl disable mysql.service
invoke_tests "Databases" "MySQL"
|
mattgwagner/New-Machine
|
.github-actions/images/linux/scripts/installers/mysql.sh
|
Shell
|
mit
| 858 |
#!/bin/bash
read_temperature() {
temperature=$(cat /sys/bus/w1/devices/28-*/w1_slave | grep -E 't=\w{3,6}' | grep -oE '\w{3,6}')
if [ ${#temperature} -lt 4 ]; then
return
fi
dot=$(expr ${#temperature} - 3)
result=${temperature:0:$dot}.${temperature:$dot}
logger -t "smartaquarium" "Current temperature: $resultºC"
mosquitto_pub -h m11.cloudmqtt.com -p 15347 -u temperature_sensor -P temperaturesensor -t smartaquarium/sensor/temperature/level -m $result
}
main() {
while [ 1 ]; do
read_temperature
sleep 10
done
}
main
|
uilianries/SmartAquarium
|
script/sensor/smartaquarium-temperature-sensor.sh
|
Shell
|
mit
| 552 |
#!/bin/bash
sudo apt-get update
sudo apt-get -y install mysql-server
# Need to add a line here using the command: sed Addthat will replace the bind-address value in /etc/mysql/my.cnf to give the value of your public IP address - so your client can connect to it
|
jhajek/db-samples
|
mysql-server-install.sh
|
Shell
|
mit
| 265 |
function malloc(){
dlcall -r pointer malloc $size_t:$2
eval $1=\$DLRETVAL
}
function free(){
dlcall free $1
}
|
cemeyer/httpd.sh
|
stdlib.sh
|
Shell
|
mit
| 123 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:1851
#
# Security announcement date: 2016-09-12 21:52:32 UTC
# Script generation date: 2017-01-26 21:24:37 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - jbcs-httpd24-httpd.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-httpd-debuginfo.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-httpd-devel.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-httpd-src-zip.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-httpd-tools.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-httpd-zip.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-mod_ldap.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-mod_proxy_html.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-mod_session.x86_64:2.4.6-77.SP1.jbcs.el6
# - jbcs-httpd24-mod_ssl.x86_64:2.4.6-77.SP1.jbcs.el6
#
# Last versions recommanded by security team:
# - jbcs-httpd24-httpd.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-httpd-debuginfo.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-httpd-devel.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-httpd-src-zip.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-httpd-tools.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-httpd-zip.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-mod_ldap.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-mod_proxy_html.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-mod_session.x86_64:2.4.23-102.jbcs.el6
# - jbcs-httpd24-mod_ssl.x86_64:2.4.23-102.jbcs.el6
#
# CVE List:
# - CVE-2016-5387
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install jbcs-httpd24-httpd.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-httpd-debuginfo.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-httpd-devel.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-httpd-src-zip.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-httpd-tools.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-httpd-zip.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-mod_ldap.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-mod_proxy_html.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-mod_session.x86_64-2.4.23 -y
sudo yum install jbcs-httpd24-mod_ssl.x86_64-2.4.23 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2016/RHSA-2016:1851.sh
|
Shell
|
mit
| 2,252 |
#------------------------------------------------------------------------------
# Convert all text files in $* to UNIX format, by erasing the carriage return
# (CR) characters.
nocr()
{
if [ $# -eq 0 ]; then
echo >&2 "Specify a list of files to convert to UNIX text format."
else
for f in $*; do
mv $f $f.old
tr -d '\r' < $f.old > $f
rm -f $f.old
done
fi
}
#------------------------------------------------------------------------------
# xterm, coloured.
# SSH into "$1", which is of the form "host" or "user@host".
# Set background colour to optional parameter $2.
# Set foreground colour to optional parameter $3.
xtc()
{
BG="${2:-black}"
FG="${3:-white}"
ssh -X -f "$1" "xterm -bg '$BG' -fg '$FG'"
}
#------------------------------------------------------------------------------
# Implementation of search and isearch.
# Arguments:
# $1 - caller, "search" or "isearch" (case insensitive)
# Remainder: [path][glob] regexp
__search()
{
if [ "$1" = "isearch" ]; then
CASE_ARG="-i"
fi
if [ $# -eq 2 ]; then
# search 'text'
DIR=.
GLOB='*'
PATTERN=$2
elif [ $# -eq 3 ]; then
# search 'path/to/*.java' text
DIR=$(dirname "$2")
GLOB=$(basename "$2")
PATTERN="$3"
else
echo "Usage: $0 \'[path/to/][glob]\' \'regexp\'"
false
return
fi
find -L "$DIR" -type f -a -iname "$GLOB" -exec egrep $CASE_ARG "$PATTERN" {} /dev/null \;
}
#------------------------------------------------------------------------------
# Search files for matching text.
# search [path/to/][glob] regexp
# isearch [path/to/][glob] regexp (case-insensitive variant)
search()
{
__search ${FUNCNAME[0]} "$@"
}
isearch()
{
__search ${FUNCNAME[0]} "$@"
}
#------------------------------------------------------------------------------
|
dlsnostress/.home
|
bashrc.d/functions.sh
|
Shell
|
mit
| 1,828 |
#!/bin/bash
sudo tc qdisc del dev lo root
|
luiz/modular-calc
|
restore.sh
|
Shell
|
mit
| 43 |
#!/bin/bash
if [[ "$OSTYPE" == "darwin"* ]]; then
realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"; }
ROOT=$(dirname $(dirname $(realpath "$0")))
else
ROOT=$(dirname $(dirname $(readlink -f $0)))
fi
cd $ROOT
if [[ "$OSTYPE" == "darwin"* ]]; then
NAME=`node -p "require('./product.json').nameLong"`
CODE="./.build/electron/$NAME.app/Contents/MacOS/Electron"
else
NAME=`node -p "require('./product.json').applicationName"`
CODE=".build/electron/$NAME"
fi
INTENDED_VERSION="v`node -p "require('./package.json').electronVersion"`"
INSTALLED_VERSION=$(cat .build/electron/version 2> /dev/null)
# Node modules
test -d node_modules || ./scripts/npm.sh install
# Get electron
(test -f "$CODE" && [ $INTENDED_VERSION == $INSTALLED_VERSION ]) || ./node_modules/.bin/gulp electron
# Unit Tests
if [[ "$1" == "--xvfb" ]]; then
cd $ROOT ; \
xvfb-run "$CODE" test/electron/index.js "$@"
elif [[ "$OSTYPE" == "darwin"* ]]; then
cd $ROOT ; ulimit -n 4096 ; \
"$CODE" \
test/electron/index.js "$@"
else
cd $ROOT ; \
"$CODE" \
test/electron/index.js "$@"
fi
|
hungys/vscode
|
scripts/test.sh
|
Shell
|
mit
| 1,081 |
#! /usr/bin/env zsh
foreach f g ({1..10}) { echo -n "${g} ${f} " }; echo ""
|
kmhjs/shell_gei
|
src/08/q01.sh
|
Shell
|
mit
| 77 |
#!/bin/sh
#
# Set environment variables so that we can compile rocksdb using
# fbcode settings. It uses the latest g++ compiler and also
# uses jemalloc
# location of libgcc
LIBGCC_BASE="/mnt/gvfs/third-party2/libgcc/7712e757d7355cb51292454ee0b7b46a467fdfed/4.8.1/gcc-4.8.1-glibc-2.17/8aac7fc"
LIBGCC_INCLUDE="$LIBGCC_BASE/include"
LIBGCC_LIBS=" -L $LIBGCC_BASE/libs"
# location of glibc
GLIBC_REV=6e40560b4e0b6d690fd1cf8c7a43ad7452b04cfa
GLIBC_INCLUDE="/mnt/gvfs/third-party2/glibc/$GLIBC_REV/2.17/gcc-4.8.1-glibc-2.17/99df8fc/include"
GLIBC_LIBS=" -L /mnt/gvfs/third-party2/glibc/$GLIBC_REV/2.17/gcc-4.8.1-glibc-2.17/99df8fc/lib"
# location of snappy headers and libraries
SNAPPY_INCLUDE=" -I /mnt/gvfs/third-party2/snappy/aef17f6c0b44b4fe408bd06f67c93701ab0a6ceb/1.0.3/gcc-4.8.1-glibc-2.17/43d84e2/include"
SNAPPY_LIBS=" /mnt/gvfs/third-party2/snappy/aef17f6c0b44b4fe408bd06f67c93701ab0a6ceb/1.0.3/gcc-4.8.1-glibc-2.17/43d84e2/lib/libsnappy.a"
# location of zlib headers and libraries
ZLIB_INCLUDE=" -I /mnt/gvfs/third-party2/zlib/25c6216928b4d77b59ddeca0990ff6fe9ac16b81/1.2.5/gcc-4.8.1-glibc-2.17/c3f970a/include"
ZLIB_LIBS=" /mnt/gvfs/third-party2/zlib/25c6216928b4d77b59ddeca0990ff6fe9ac16b81/1.2.5/gcc-4.8.1-glibc-2.17/c3f970a/lib/libz.a"
# location of bzip headers and libraries
BZIP_INCLUDE=" -I /mnt/gvfs/third-party2/bzip2/c9ef7629c2aa0024f7a416e87602f06eb88f5eac/1.0.6/gcc-4.8.1-glibc-2.17/c3f970a/include/"
BZIP_LIBS=" /mnt/gvfs/third-party2/bzip2/c9ef7629c2aa0024f7a416e87602f06eb88f5eac/1.0.6/gcc-4.8.1-glibc-2.17/c3f970a/lib/libbz2.a"
LZ4_REV=065ec7e38fe83329031f6668c43bef83eff5808b
LZ4_INCLUDE=" -I /mnt/gvfs/third-party2/lz4/$LZ4_REV/r108/gcc-4.8.1-glibc-2.17/c3f970a/include"
LZ4_LIBS=" /mnt/gvfs/third-party2/lz4/$LZ4_REV/r108/gcc-4.8.1-glibc-2.17/c3f970a/lib/liblz4.a"
# location of gflags headers and libraries
GFLAGS_INCLUDE=" -I /mnt/gvfs/third-party2/gflags/1ad047a6e6f6673991918ecadc670868205a243a/1.6/gcc-4.8.1-glibc-2.17/c3f970a/include/"
GFLAGS_LIBS=" /mnt/gvfs/third-party2/gflags/1ad047a6e6f6673991918ecadc670868205a243a/1.6/gcc-4.8.1-glibc-2.17/c3f970a/lib/libgflags.a"
# location of jemalloc
JEMALLOC_INCLUDE=" -I /mnt/gvfs/third-party2/jemalloc/c60d854f7824f334195fe7fd34b2bc9057e3c1f9/3.6.0/gcc-4.8.1-glibc-2.17/4d53c6f/include"
JEMALLOC_LIB=" -Wl,--whole-archive /mnt/gvfs/third-party2/jemalloc/c60d854f7824f334195fe7fd34b2bc9057e3c1f9/3.6.0/gcc-4.8.1-glibc-2.17/4d53c6f/lib/libjemalloc.a"
# location of numa
NUMA_REV=829d10dac0230f99cd7e1778869d2adf3da24b65
NUMA_INCLUDE=" -I /mnt/gvfs/third-party2/numa/$NUMA_REV/2.0.8/gcc-4.8.1-glibc-2.17/c3f970a/include/"
NUMA_LIB=" /mnt/gvfs/third-party2/numa/$NUMA_REV/2.0.8/gcc-4.8.1-glibc-2.17/c3f970a/lib/libnuma.a"
# location of libunwind
LIBUNWIND_REV=2c060e64064559905d46fd194000d61592087bdc
LIBUNWIND="/mnt/gvfs/third-party2/libunwind/$LIBUNWIND_REV/1.1/gcc-4.8.1-glibc-2.17/675d945/lib/libunwind.a"
# use Intel SSE support for checksum calculations
export USE_SSE=" -msse -msse4.2 "
BINUTILS="/mnt/gvfs/third-party2/binutils/2aff2e7b474cd3e6ab23495ad1224b7d214b9f8e/2.21.1/centos6-native/da39a3e/bin"
AR="$BINUTILS/ar"
DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE"
GCC_BASE="/mnt/gvfs/third-party2/gcc/1ec615e23800f0815d474478ba476a0adc3fe788/4.8.1/centos6-native/cc6c9dc"
STDLIBS="-L $GCC_BASE/lib64"
if [ -z "$USE_CLANG" ]; then
# gcc
CC="$GCC_BASE/bin/gcc"
CXX="$GCC_BASE/bin/g++"
CFLAGS="-B$BINUTILS/gold -m64 -mtune=generic"
CFLAGS+=" -I $LIBGCC_INCLUDE -I $GLIBC_INCLUDE"
CFLAGS+=" $DEPS_INCLUDE"
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_FALLOCATE_PRESENT"
CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DNUMA"
else
# clang
CLANG_BASE="/mnt/gvfs/third-party2/clang/9ab68376f938992c4eb5946ca68f90c3185cffc8/3.4"
CLANG_INCLUDE="$CLANG_BASE/gcc-4.8.1-glibc-2.17/fb0f730/lib/clang/3.4/include"
CC="$CLANG_BASE/centos6-native/9cefd8a/bin/clang"
CXX="$CLANG_BASE/centos6-native/9cefd8a/bin/clang++"
KERNEL_HEADERS_INCLUDE="/mnt/gvfs/third-party2/kernel-headers/a683ed7135276731065a9d76d3016c9731f4e2f9/3.2.18_70_fbk11_00129_gc8882d0/gcc-4.8.1-glibc-2.17/da39a3e/include/"
CFLAGS="-B$BINUTILS -nostdinc -nostdlib"
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1 "
CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1/x86_64-facebook-linux "
CFLAGS+=" -isystem $GLIBC_INCLUDE"
CFLAGS+=" -isystem $LIBGCC_INCLUDE"
CFLAGS+=" -isystem $CLANG_INCLUDE"
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux "
CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE "
CFLAGS+=" -Wall -Wno-sign-compare -Wno-unused-variable -Winvalid-pch -Wno-deprecated -Woverloaded-virtual"
CFLAGS+=" $DEPS_INCLUDE"
CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_FALLOCATE_PRESENT"
CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DNUMA"
CXXFLAGS="$CFLAGS -nostdinc++"
fi
EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $GFLAGS_LIBS $NUMA_LIB"
EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib/ld.so"
EXEC_LDFLAGS+=" -Wl,--no-whole-archive $LIBUNWIND"
PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++"
EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $GFLAGS_LIBS"
VALGRIND_REV=b2a9f85e4b70cd03abc85a7f3027fbc4cef35bd0
VALGRIND_VER="/mnt/gvfs/third-party2/valgrind/$VALGRIND_REV/3.8.1/gcc-4.8.1-glibc-2.17/c3f970a/bin/"
export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE
|
talrasha007/node-kv
|
3rd-party/leveldb/leveldb-rocksdb-3.7/build_tools/fbcode_config.sh
|
Shell
|
mit
| 5,507 |
#! /bin/sh
# input should be a directory
path=$1
# for loop all the files and check the license
for file in $path/*
do
echo $file
sh $CBIG_CODE_DIR/setup/check_license/CBIG_check_license_matlab_file.sh $file
clear
done
exit 0;
|
ThomasYeoLab/CBIG
|
setup/check_license/CBIG_check_license_one_folder.sh
|
Shell
|
mit
| 234 |
#!/bin/bash
##########################
# General parameters
##########################
TIME_THRESHOLD=30
DISK_THRESHOLD=70
LOAD_THRESHOLD=1.5
TIME_NOTICE=120
TMP_FILE=/tmp/smartkids
[email protected]
MAIL_OPTS=''
TARGET="waag"
##########################
# Host specific parameters
##########################
if [ "${TARGET}" = "waag" ]
then
# set -x
MY_HOST=sensor.waag.org
# SENSORPORT=8090
MY_USER=stefano
SSH_PORT=2234
MQTT_AGENT_LOG='/home/stefano/making-sensor/server/mosquitto-agent/screenlog.0'
# MY_DIR='/Users/SB/Software/code/'
elif [ "${TARGET}" = "local" ]
then
MY_HOST=192.168.56.101
# SENSORPORT=80
MY_USER=vagrant
SSH_PORT=22
MQTT_AGENT_LOG='/var/log/smartkids-agent/smartkids-agent.log'
# MY_DIR='/Users/SB/Software/code/'
else
echo "Unknown server: ${1}" | tee ${TMP_FILE}
cat ${TMP_FILE} | dos2unix | mail ${MAIL_OPTS} -s "SMARTKIDS Test NOT passed" ${EMAIL_ADDRESS}
exit 1
fi
diff_min(){
local DATA_TIME="${1}00"
if date -j >/dev/null 2>&1
then
local DATA_S_TIME=$(date -j -f "%Y-%m-%d %H:%M:%S%z" "${DATA_TIME}" "+%s")
else
local DATA_S_TIME=$(date -d "${DATA_TIME}" "+%s")
fi
local NOW=$(date +%s)
ELAPSED_MIN=$(( (${NOW}-${DATA_S_TIME}) / 60 ))
}
check_time(){
local MY_TIME="$1"
TMPIFS="${IFS}"
IFS=$'\n'
for i in $(echo "${MY_TIME}");
do
ID="$(echo ${i}|cut -d'|' -f1)"
ID_TIME="$(echo ${i}|cut -d'|' -f2)"
# echo "Most recent sensor data: ${ID_TIME} for sensor: ${ID}" | tee -a ${TMP_FILE}
ID_TIME="$(echo ${ID_TIME} | sed 's/\(.*\)\.[0-9][0-9]*\(\+.*\)/\1\2/g')"
diff_min "${ID_TIME}"
# echo "Data is ${ELAPSED_MIN} min old" | tee -a ${TMP_FILE}
echo "Data of sensor: ${ID} is ${ELAPSED_MIN} min old" | tee -a ${TMP_FILE}
if (( ${ELAPSED_MIN} > ${TIME_THRESHOLD} )) && (( ${ELAPSED_MIN} < ${TIME_NOTICE} ))
then
echo -e "\n*** Data of sensor ${ID} is too old ***\n" | tee -a ${TMP_FILE}
PASSED=false
ISSENSOR=true
fi
done
IFS="${TMPIFS}"
}
if [ ! -z ${TERM} ]
then
clear
fi
echo "Test start at $(date) for ${TARGET}, time threshold for sensor data is ${TIME_THRESHOLD} min, notice time is ${TIME_NOTICE} min" | tee ${TMP_FILE}
PASSED=true
ISSENSOR=false
if ! curl --max-time 15 ${MY_HOST} &> /dev/null
then
echo "ERROR no connection to default webserver on ${MY_HOST}" | tee -a ${TMP_FILE}
PASSED=false
else
MY_KEY=$(find ${HOME} -name airq_key 2>/dev/null | head -1)
SSH_OPTS='-q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
SSH_PARAMS="${SSH_OPTS} -p ${SSH_PORT} -i ${MY_KEY} ${MY_USER}@${MY_HOST}"
DISK_LEVELS="$(ssh ${SSH_PARAMS} 'df -h -t ext4 -t ext2 -t ext3 | tr -s " " | cut -d" " -f5 | /bin/grep -v "Use%" | tr -d "%" | tr "\n" " " ')"
for i in ${DISK_LEVELS}
do
if (( $i > ${DISK_THRESHOLD} ))
then
echo "ERROR disk usage above threshold ${DISK_THRESHOLD} on ${MY_HOST}: ${i}" | tee -a ${TMP_FILE}
PASSED=false
fi
done
LOAD_LEVEL="$(ssh ${SSH_PARAMS} 'cat /proc/loadavg | cut -d" " -f1 ')"
if (( $(echo "${LOAD_LEVEL} > ${LOAD_THRESHOLD}" | bc -l) ))
then
echo "ERROR load above threshold ${LOAD_THRESHOLD} on ${MY_HOST}: ${LOAD_LEVEL}" | tee -a ${TMP_FILE}
PASSED=false
fi
## Test measures
MY_TIME="$(ssh ${SSH_PARAMS} 'sudo su postgres -c "psql -t -A -d smartkidsdb -c \"SELECT id, max(srv_ts) from measures where id > 100 group by id\" " ' 2>/dev/null)"
# echo "ssh ${SSH_PARAMS}"
if [ ! -z "${MY_TIME}" ]
then
check_time "${MY_TIME}"
else
echo "ssh command for sensor data failed" | tee -a ${TMP_FILE}
PASSED=false
fi
MY_TIME="$(ssh ${SSH_PARAMS} 'sudo su postgres -c "psql -t -A -d lora2db -c \"SELECT hardware_serial, max(server_time) from measures where hardware_serial NOT LIKE '\''-%'\'' group by hardware_serial\" " ' 2>/dev/null)"
# echo "ssh ${SSH_PARAMS}"
if [ ! -z "${MY_TIME}" ]
then
check_time "${MY_TIME}"
else
echo "ssh command for sensor data failed" | tee -a ${TMP_FILE}
PASSED=false
fi
ssh ${SSH_PARAMS} "grep -A 10 -E \"ERROR|CRITICAL\" ${MQTT_AGENT_LOG} " 2>/dev/null > /tmp/newErrorsSmartKids.${TARGET}
if [ -f /tmp/oldErrorsSmartKids.${TARGET} ]
then
ERRORS="$(diff /tmp/newErrorsSmartKids.${TARGET} /tmp/oldErrorsSmartKids.${TARGET})"
else
ERRORS="$(cat /tmp/newErrorsSmartKids.${TARGET})"
fi
mv /tmp/newErrorsSmartKids.${TARGET} /tmp/oldErrorsSmartKids.${TARGET}
if [ ! -z "${ERRORS}" ]
then
echo "New errors in ${MQTT_AGENT_LOG}: ${ERRORS}" | tee -a ${TMP_FILE}
PASSED=false
else
echo "No new errors in ${MQTT_AGENT_LOG}" | tee -a ${TMP_FILE}
fi
fi
if [ ! "$PASSED" = "true" ]
then
echo -e "\n*** Test NOT passed ***\n" | tee -a ${TMP_FILE}
if [ "$ISSENSOR" = "true" ]
then
cat ${TMP_FILE} | dos2unix | mail ${MAIL_OPTS} -s "SMARTKIDS sensors NOT active" ${EMAIL_ADDRESS}
#osascript -e 'tell app "System Events" to display dialog "SMARTKIDS Test NOT passed!!"'
else
cat ${TMP_FILE} | dos2unix | mail ${MAIL_OPTS} -s "SMARTKIDS Test NOT passed" ${EMAIL_ADDRESS}
fi
else
echo "Test passed" | tee -a ${TMP_FILE}
fi
|
waagsociety/making-sensor
|
doTestSensors.sh
|
Shell
|
cc0-1.0
| 5,145 |
#!/usr/bin/env bash
# start database
PSQLVER=$( cd /usr/lib/postgresql && ls | tail -n 1 )
PSQLBINDIR=/usr/lib/postgresql/${PSQLVER}/bin/
if [ -d "$PSQLBINDIR" ]; then
export PATH=$PSQLBINDIR:$PATH
fi
DATADIR=/data/psql/
export LC_ALL=C
# if database dir not found, create and init
if [ ! -d "${DATADIR}" ]; then
mkdir ${DATADIR}
pg_ctl initdb -D ${DATADIR}
sed -i "s+.*unix_socket_directories.*+unix_socket_directories = '\/data\/psql'+" ${DATADIR}/postgresql.conf
pg_ctl start -D ${DATADIR} -l ${DATADIR}/log.txt
# wait until started
for i in {1..10}; do
if pg_isready -h localhost -p 5432; then break; fi
echo "sleep... $(date)"
sleep 5
done
createuser -s ppiuser -h localhost -p 5432
createdb -O ppiuser ppidb1 -h localhost -p 5432
else
pg_ctl start -D ${DATADIR} -l ${DATADIR}/log.txt
fi
export PGHOST=localhost
export PGPORT=5432
# run propairs
$PROPAIRSROOT/start.sh -i $PROPAIRSROOT -o /data $*
|
propairs/propairs
|
dockerentry.sh
|
Shell
|
gpl-2.0
| 948 |
*
* Copyright (C) 2001 Tensilica, Inc. All Rights Reserved.
* Revised to support Tensilica processors and to improve overall performance
*
* Copyright (C) 2000 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pky,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*
*
*
* USMID "@(#) libu/sci/c1/whenm.sh 92.0 10/08/98 14:57:41"
*
*-------------------------------------------------------------------
*\Documentation:
*
*\Name: WHENMMAC
*
*\Description:
* This macro finds locations of elements whose subfields
* have a specified relationship to the target. This
* relationship is to be determined by the calling routines.
* These routines are WHENMEQ, WHENMGE, WHENMGT, WHENMLE,
* WHENMLT, and WHENMNE. By subfields, it is meant that this
* algorithm is looking at subfields of the array elements by
* performing a right shift on the array value and then masking
* it. This allows one to compare the values of subfields of the
* array elements with that of the target.
* A2 ... ( input ) Address of the amount that the array value
* is to be right-shifted.
* S2 ... ( input ) Address of the MASK chosen by the user.
* B.A ... ( input ) Address of the array, A, being searched.
* B.INCA ... ( input ) The stride through the input array.
* B.INDEX ... ( output ) Array to receive the locations of
* modified elements that have a true
* relation to the target. For WHENMEQ,
* all modified elements that are equal
* to the target. For WHENMNE, all
* modified elements that are not equal
* to the target.
* B.N ... ( input ) N, the size of the problem.
* B.NN ... ( output ) Number of objects found.
* T.OBJECT ... ( input ) Target, OBJECT, being searched for.
*
*\Usage:
* WHENMMAC
*
*\Arguments:
* B-registers
* A
* INCA
* N
* VLENG
* NN
* INDEX
* T-registers
* OBJECT
*
*\Register arguments:
* A1, A2
*
*\Modified registers:
* All A-registers
* S1, S2, S5, S7
* V0, V1, V3, V4
*
*\Remarks:
*
*\Examples:
*
*\Enddoc
*
*---------------------------------------------------------------------
*
*\Lib
*
*\Local variables:
*
*\Author:
* Barrie Kletscher
* Mathematical Software Group
* Cray Research, Inc.
*
*\References:
*
*\Macros called:
* COMPUTE - calling this macro. If the routine is looking
* for elements that are equal to the target, then
* COMPUTE will subtract OBJECT from the elements
* in A and then SETMASK will set the appropriate
* bit in the vector mask on a zero hit. In the
* case of looking for elements that are less than
* the target, the vector mask looks for a hit on
* minus.
* LOADTOBJ - Loads the object being searched in a T-register
* called OBJECT.
* LZCNT - This macro gets the leading zero count of the vector
* mask register.
* SETMASK - What this does is contingent on the routine calling
* this macro. In some, it sets the vector mask on
* a zero hit. In others, it sets the vector mask on
* a minus hit. It sets the vector mask. SETMASK
* and COMPUTE work together.
* VMTOS - On a YMP, one does SVM <-- VM. This macro does that
* step. If the code is being executed on a Y16, it
* performs the additional step of doing SVM1 <-- VM1.
*
*\Keywords:
* search, table lookup
*
*\Routines called:
* None
*
*\Revision history:
* 02 Jan 91 orginal version
*
*\Performance data:
*
*\Endlib
*---------------------------------------------------------------------
*
MACRO
WHENMMAC
ARGADD A1,ARGMASK
ARGADD A2,ARGRD
S2 ,A1
A2 ,A2
T.MASK S2
B.RD A2
A0 B.INCA CHECK FOR NEG INCREMENT
A1 B.INCA
A1 -A1
$IF AM
A2 B.N
A2 A2-1 (N-1)
A3 A1*A2 ABS(INCA)*(N-1)
A4 B.A BASE ADDRESS OF A
A4 A4+A3 (N-1)*ABS(INCA)
B.A A4 STORE IN B REGISTER
$ENDIF
LOADTOBJ
A1 B.N
A0 -A1 -VL
A3 -A1
S2 <VBIT MASK
S1 A3
JAP DONE ZERO VECTOR LENGTH
S1 #S1&S2
A3 S1
A3 A3+1 FIRST SEG LENGTH
VL A3
*
A1 B.A
A2 B.INCA
A7 0
B.I A7
A0 A1
V0 ,A0,A2
S1 T.MASK
A7 B.RD
V1 V0>A7
V3 S1&V1 VALUES ISOLATED FOR THE TEST
A2 A2*A3 HOW MUCH TO INCREASE THE ADDRESS
S1 T.OBJECT
COMPUTE V4,S1,V3 WILL BE ZERO ON A HIT
A1 A1+A2 INCREASE A BY THAT VALUE
B.A A1 RESET A ADDRESS
SETMASK V4 SET MASK ON ZERO HIT
A5 B.N
A4 A5-A3 CHECK POSITION
B.VLENG A3 KEEP OLD VECTOR LENGTH
A3 VLEN
A0 A4
B.N A4
JAZ COMPLETE THIS CASE IS LESS THAN/EQUAL TO VLEN
*
LOOP = *
VL A3 SET VL
A1 B.A
A2 B.INCA
A0 A1
V0 ,A0,A2
S1 T.MASK
A7 B.RD
V1 V0>A7
V3 S1&V1 VALUES ISOLATED FOR THE TEST
A2 A2*A3 HOW MUCH TO INCREASE THE ADDRESS
S1 T.OBJECT
COMPUTE V4,S1,V3 WILL BE ZERO ON A HIT
A1 A1+A2 INCREASE A BY THAT VALUE
B.A A1 RESET A ADDRESS
VMTOS S7,S5
SETMASK V4 SET MASK ON ZERO HIT
A7 PS7 POPULATION COUNT
CRAY16 IFE MAX$VL,NE,64
A5 PS5 POPULATION COUNT OF VM1
A7 A7+A5 TOTAL POPULATION COUNT
CRAY16 ENDIF
A5 B.N
A1 A5-A3 CHECK POSITION
A0 A7
JAN ANALYZE
HOME = *
A3 VLEN
A0 A1
A5 B.VLENG VECTOR LENGTH FROM OLD MASK
A6 B.I CURRENT I POSITION FOR OLD MASK
A6 A6+A5 ADD TO UPDATE
B.VLENG A3 UPDATE VECTOR LENGTH
B.I A6 UPDATE I POINTER
B.N A1
JAN LOOP LOOP BACK
J COMPLETE
ANALYZE = *
LZCNT A6,S7,S5,A4,A5
A4 B.NN
A4 A4+A7
A5 B.INDEX
B.NN A4
A4 B.I
A6 A6+1
J ALIGNED
ALIGN
ALIGNED = *
LOOPA = *
CRAY16 IFE MAX$VL,NE,64
S7 S7,S5<A6 SHIFT OFF ZEROS
S5 S5<A6
CRAY16 ELSE
S7 S7<A6 SHIFT OFF ZEROS
CRAY16 ENDIF
A7 A7-1
A4 A6+A4 CURRENT POINT ON I
LZCNT A6,S7,S5,A4,A2 DO LEADING ZERO COUNT
,A5 A4 STORE THE LATEST INDEX
A0 A7 CHECK FOR LOOP BACK
A5 A5+1 INCREMENT INDEX ADDRESS
A6 A6+1
JAZ RETURNS
CRAY16 IFE MAX$VL,NE,64
S7 S7,S5<A6 SHIFT OFF ZEROS
S5 S5<A6
CRAY16 ELSE
S7 S7<A6 SHIFT OFF ZEROS
CRAY16 ENDIF
A7 A7-1
A4 A6+A4 CURRENT POINT ON I
LZCNT A6,S7,S5,A4,A2 DO LEADING ZERO COUNT
,A5 A4 STORE THE LATEST INDEX
A0 A7 CHECK FOR LOOP BACK
A5 A5+1 INCREMENT INDEX ADDRESS
A6 A6+1
JAZ RETURNS
CRAY16 IFE MAX$VL,NE,64
S7 S7,S5<A6 SHIFT OFF ZEROS
S5 S5<A6
CRAY16 ELSE
S7 S7<A6 SHIFT OFF ZEROS
CRAY16 ENDIF
A7 A7-1
A4 A6+A4 CURRENT POINT ON I
LZCNT A6,S7,S5,A4,A2 DO LEADING ZERO COUNT
,A5 A4 STORE THE LATEST INDEX
A0 A7 CHECK FOR LOOP BACK
A5 A5+1 INCREMENT INDEX ADDRESS
A6 A6+1
JAZ RETURNS
CRAY16 IFE MAX$VL,NE,64
S7 S7,S5<A6 SHIFT OFF ZEROS
S5 S5<A6
CRAY16 ELSE
S7 S7<A6 SHIFT OFF ZEROS
CRAY16 ENDIF
A7 A7-1
A4 A6+A4 CURRENT POINT ON I
LZCNT A6,S7,S5,A4,A2 DO LEADING ZERO COUNT
,A5 A4 STORE THE LATEST INDEX
A0 A7 CHECK FOR LOOP BACK
A5 A5+1 INCREMENT INDEX ADDRESS
A6 A6+1
JAZ RETURNS
CRAY16 IFE MAX$VL,NE,64
S7 S7,S5<A6 SHIFT OFF ZEROS
S5 S5<A6
CRAY16 ELSE
S7 S7<A6 SHIFT OFF ZEROS
CRAY16 ENDIF
A7 A7-1
A4 A6+A4 CURRENT POINT ON I
LZCNT A6,S7,S5,A4,A2 DO LEADING ZERO COUNT
,A5 A4 STORE THE LATEST INDEX
A0 A7 CHECK FOR LOOP BACK
A5 A5+1 INCREMENT INDEX ADDRESS
A6 A6+1
JAN LOOPA
RETURNS = *
A0 A1
B.INDEX A5
JAP HOME
J DONE
*
COMPLETE = *
VMTOS S7,S5
A7 PS7 POPULATION COUNT
CRAY16 IFE MAX$VL,NE,64
A5 PS5 POPULATION COUNT OF VM1
A7 A7+A5 TOTAL POPULATION COUNT
CRAY16 ENDIF
A1 -1 KNOW NOT TO RETURN FROM ANALYZE
A0 A7
JAN ANALYZE
*
DONE = *
A1 B.NNA
A2 B.NN
,A1 A2 STORE NN
WHENMMAC ENDM
|
qiyao/xcc
|
libu/sci/c1/whenm.sh
|
Shell
|
gpl-2.0
| 12,303 |
#!/bin/bash
PHPREDMIN_DIR="phpredmin"
PHPREDMIN_PATH="$(pwd)/$PHPREDMIN_DIR/public"
PHPREDMIN_CONF="/etc/supervisor/conf.d/phpredmin.conf"
echo " * Setting up Redis..."
sudo apt-get update
sudo apt-get -y install redis-server php5-redis gearman-server php5-gearman supervisor
sudo service php5-fpm restart
if [ ! -d "$PHPREDMIN_DIR" ]; then
echo " * Cloning PHPRedMin..."
git clone https://github.com/sasanrose/phpredmin.git "$PHPREDMIN_DIR"
else
echo " * Updating PHPRedMin..."
cd "$PHPREDMIN_DIR"
git pull origin master
fi
if [ ! -f "$PHPREDMIN_CONF" ]; then
sudo cat >"$PHPREDMIN_CONF" <<CONF
[program:phpredmin]
directory=$PHPREDMIN_PATH
command=php index.php gearman/index
process_name=%(program_name)s
numprocs=1
stdout_logfile=/var/log/supervisor/phpredmin.log
autostart=true
autorestart=true
user=vagrant
CONF
sudo service supervisor force-reload
fi
|
goblindegook/VVV-Redis
|
vvv-init.sh
|
Shell
|
gpl-2.0
| 890 |
#!/bin/bash
# Original script written by Justin Decker, copyright 2015. For licensing
# purposes, use GPLv2
#
# To use, create a "user job" that runs like so:
# /path/to/script/mythtv-plex-links.sh "%CHANID%" "%STARTTIMEUTC%"
# The following values adjust the script parameters:
#
# Set this to the directory of the Plex Library where myth recording symlinks
# should reside.
PLEXLIBRARYDIR="/mnt/esata/recordedtv"
# Set this to the URL prefix of your Plex Media Server
PMSURL="http://192.168.1.20:32400"
# Set this to the section number of your recorded TV shows library. To find
# this out, go to your plex media server and navigate to the desired library.
# Look at the URL for that page, and at the end you should see
# /section/<number>. The number here is your section number.
PMSSEC="6"
# Set this to the location of the mythtv config.xml file. It's needed to
# determine the mysql login. If you're running mythbuntu, you shouldn't need to
# change this.
# TODO: sanity check file and db values
CONFIGXML="/home/mythtv/.mythtv/config.xml"
# Leave everything below this line alone unless you know what you're doing.
#
# Discover mysql username and password from mythtv config.xml. Alternatively
# you can manually enter them after the = sign.
DBUSER="$(awk -F '[<>]' '/UserName/{print $3}' $CONFIGXML)"
DBPASS="$(awk -F '[<>]' '/Password/{print $3}' $CONFIGXML)"
# TODO: sanity check values (sql injection)
CHANID=$1 && STARTTIME=$2
# Populate recording information from sql database. Set field separator (IFS)
# to tab and tell mysql to give us a tab-delimited result with no column names
# (-Bs). Without this, IFS defaults to any whitespace, meaning words separated
# by spaces in the result fields (such as the title) would be interpreted as
# individual array elements. That would be bad since we expect the whole
# title to be contained in array element 0 later.
OLDIFS=$IFS
IFS=$'\t'
RECORDING=($(mysql mythconverg --user=$DBUSER --password=$DBPASS -Bse \
"SELECT title, season, episode, basename, storagegroup FROM recorded WHERE chanid=\"$CHANID\" AND starttime=\"$STARTTIME\" LIMIT 1;"))
IFS=$OLDIFS
# Set vars from above query results, padding season and episode with 0 if needed
# TODO: sanity check values
TITLE=${RECORDING[0]}
SEASON=`printf "%02d" ${RECORDING[1]}`
EPISODE=`printf "%02d" ${RECORDING[2]}`
FILENAME=${RECORDING[3]}
STORAGEGROUP=${RECORDING[4]}
# If season is '00', use 2 digit year
if [ "$SEASON" == "00" ]; then
SEASON=`date +%y`
fi
# If episode is '00', use 3 digit day-of-year
if [ "$EPISODE" == "00" ]; then
EPISODE=`date +%j`
fi
SGDIR=$(mysql mythconverg --user=$DBUSER --password=$DBPASS -se \
"SELECT dirname FROM storagegroup WHERE groupname=\"$STORAGEGROUP\";")
MYTHFILE="$SGDIR/$FILENAME"
PLEXFILE="$TITLE - s${SEASON}e${EPISODE} - $STARTTIME.mpg"
PLEXSHOWDIR="$PLEXLIBRARYDIR/$TITLE/Season ${SEASON}"
PLEXFILEPATH="$PLEXSHOWDIR/$PLEXFILE"
# create plex library subdir and symlink for this recording
mkdir -p "$PLEXSHOWDIR"
ln -s "$MYTHFILE" "$PLEXFILEPATH"
# Prune all dead links and empty folders
find "$PLEXLIBRARYDIR" -xtype l -delete
find "$PLEXLIBRARYDIR" -type d -empty -delete
# Update Plex library
curl "${PMSURL}/library/sections/${PMSSEC}/refresh"
|
kwmonroe/mythtv-plex-links
|
mythtv-plex-links.sh
|
Shell
|
gpl-2.0
| 3,226 |
#!/bin/sh
####DO_NOT_TOUCH####
# if an argument is given, we git clone buildroot and use the argument
# as the commit where we want to go back in tree
if [ $# = 0 ]
then
VERSION=2013.11
NAME=buildroot-${VERSION}.tar.bz2
URL=http://buildroot.uclibc.org/downloads/${NAME}
wget ${URL} -O /tmp/${NAME}
mkdir buildroot
cd buildroot
tar xvf /tmp/${NAME} -C . --strip 1
else
git clone http://git.buildroot.net/git/buildroot.git buildroot
cd buildroot
git checkout $1
fi
# patch buildroot
for p in ../custom/patchs/*.patch
do
patch < "$p"
done
for p in ../custom/patchs/git/*.patch
do
git am "$p"
done
#back to root directory
cd ..
#gitignore
cat >> .gitignore <<EOF
/buildroot
/.gitignore
/bootstrap.sh
EOF
#dummyfy bootstrap.sh
sed -i '0,/####DO_NOT_TOUCH####/s//echo "bootstrap.sh already done" ; exit 0/g' ./bootstrap.sh
|
chep/BR_custom
|
bootstrap.sh
|
Shell
|
gpl-2.0
| 868 |
#!/bin/bash
VERSION=$1
if [[ -z "$VERSION" ]]; then
VERSION=$(git describe --tags | sed 's/^v//' )
# c.f. grep AssemblyVersion Properties/AssemblyInfo.cs
fi
if [[ -z "$VERSION" ]]; then
echo "Error: Could not determine the version string." >&2
exit 1
fi
nuget pack NCommander.nuspec -Version $VERSION
|
izrik/NCommander
|
release.sh
|
Shell
|
gpl-2.0
| 311 |
cvlc -vvv rtsp://admin:[email protected]:554 --sout '#transcode{vcodec=h264,venc=x264{qp=99},scale=Auto,width=1024,acodec=none}:http{mux=ffmpeg{mux=flv},dst=:8080/}'
|
ElvisLouis/code
|
work/sh/sucessed.sh
|
Shell
|
gpl-2.0
| 169 |
#!/bin/bash
# 1.*_Pre-installation
#
# https://wiki.archlinux.org/index.php/Installation_guide#Pre-installation
#
# *----------------------------------------------------------------------------*
# 09-01-2017 - 22:49:31 (by gener-guide.bash)
# *----------------------------------------------------------------------------*
# [list]
export cible='tux'
export navigateur='Chrome'
export ipCible='192.168.1.25'
export ipBox='90.66.216.111'
export edit='vim'
export wikiOrg='https://wiki.archlinux.org/index.php/Installation_guide'
export wikiFr='https://wiki.archlinux.fr/Installation'
# [end]
#. $outils/visu.bash '1.*_Pre-installation.bash' 'Liste des Variables'
# file title
# *----------------------------------------------------------------------------*
echo "+++ Test Erreur +++"
return 0
|
RC69/RCsite1
|
archlinux/installation-guide-old/contents/0.+_Initialisation.bash
|
Shell
|
gpl-2.0
| 856 |
#!/bin/sh
# Copyright (C) 2005-2013 MaNGOS project <http://getmangos.com/>
#
# This file is free software; as a special exception the author gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## Expected param 1 to be 'a' for all, else ask some questions
## Normal log file (if not overwritten by second param
LOG_FILE="MaNGOSExtractor.log"
## Detailed log file
DETAIL_LOG_FILE="MaNGOSExtractor_detailed.log"
## Change this to a value fitting for your sys!
NUM_CPU="2"
## ! Use below only for finetuning or if you know what you are doing !
USE_AD="0"
USE_VMAPS="0"
USE_MMAPS="0"
USE_MMAPS_OFFMESH="0"
if [ "$1" = "a" ]
then
## extract all
USE_AD="1"
USE_VMAPS="1"
USE_MMAPS="1"
else
## do some questioning!
echo
echo "Welcome to helper script to extract required dataz for MaNGOS!"
echo "Should all dataz (dbc, maps, vmaps and mmaps be extracted? (y/n)"
read line
if [ "$line" = "y" ]
then
## extract all
USE_AD="1"
USE_VMAPS="1"
USE_MMAPS="1"
else
echo
echo "Should dbc and maps be extracted? (y/n)"
read line
if [ "$line" = "y" ]; then USE_AD="1"; fi
echo
echo "Should vmaps be extracted? (y/n)"
read line
if [ "$line" = "y" ]; then USE_VMAPS="1"; fi
echo
echo "Should mmaps be extracted? (y/n)"
echo "WARNING! This will take several hours!"
read line
if [ "$line" = "y" ]
then
USE_MMAPS="1";
else
echo "Only reextract offmesh tiles for mmaps?"
read line
if [ "$line" = "y" ]
then
USE_MMAPS_OFFMESH="1";
fi
fi
fi
fi
## Special case: Only reextract offmesh tiles
if [ "$USE_MMAPS_OFFMESH" = "1" ]
then
echo "Only extracting offmesh meshes"
MoveMapGen.sh offmesh $LOG_FILE $DETAIL_LOG_FILE
exit 0
fi
## Obtain numbe ob processes
if [ "$USE_MMAPS" = "1" ]
then
echo "How many CPUs should be used for extracting mmaps? (1-4)"
read line
if [ "$line" -ge "1" -a "$line" -le "4" ]
then
NUM_CPU=$line
else
echo "Only number between 1 and 4 supported!"
exit 1
fi
fi
## Give some status
echo "Current Settings: Extract DBCs/maps: $USE_AD, Extract vmaps: $USE_VMAPS, Extract mmaps: $USE_MMAPS on $NUM_CPU processes"
if [ "$1" != "a" ]
then
echo "If you don't like this settings, interrupt with CTRL+C"
read line
fi
echo "`date`: Start extracting dataz for MaNGOS" | tee $LOG_FILE
## Handle log messages
if [ "$USE_AD" = "1" ];
then
echo "DBC and map files will be extracted" | tee -a $LOG_FILE
else
echo "DBC and map files won't be extracted!" | tee -a $LOG_FILE
fi
if [ "$USE_VMAPS" = "1" ]
then
echo "Vmaps will be extracted" | tee -a $LOG_FILE
else
echo "Vmaps won't be extracted!" | tee -a $LOG_FILE
fi
if [ "$USE_MMAPS" = "1" ]
then
echo "Mmaps will be extracted with $NUM_CPU processes" | tee -a $LOG_FILE
else
echo "Mmaps files won't be extracted!" | tee -a $LOG_FILE
fi
echo | tee -a $LOG_FILE
echo "`date`: Start extracting dataz for MaNGOS, DBCs/maps $USE_AD, vmaps $USE_VMAPS, mmaps $USE_MMAPS on $NUM_CPU processes" | tee $DETAIL_LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
## Extract dbcs and maps
if [ "$USE_AD" = "1" ]
then
echo "`date`: Start extraction of DBCs and map files..." | tee -a $LOG_FILE
ad | tee -a $DETAIL_LOG_FILE
echo "`date`: Extracting of DBCs and map files finished" | tee -a $LOG_FILE
echo | tee -a $LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
fi
## Extract vmaps
if [ "$USE_VMAPS" = "1" ]
then
echo "`date`: Start extraction of vmaps..." | tee -a $LOG_FILE
vmapExtractor | tee -a $DETAIL_LOG_FILE
echo "`date`: Extracting of vmaps finished" | tee -a $LOG_FILE
mkdir vmaps
echo "`date`: Start assembling of vmaps..." | tee -a $LOG_FILE
vmap_assembler buildings vmaps | tee -a $DETAIL_LOG_FILE
echo "`date`: Assembling of vmaps finished" | tee -a $LOG_FILE
echo | tee -a $LOG_FILE
echo | tee -a $DETAIL_LOG_FILE
fi
## Extract mmaps
if [ "$USE_MMAPS" = "1" ]
then
sh MoveMapGen.sh $NUM_CPU $LOG_FILE $DETAIL_LOG_FILE
fi
|
ralph93/crtoe
|
contrib/extractor_binary/ExtractResources.sh
|
Shell
|
gpl-2.0
| 4,274 |
# export AWS_ACCESS_KEY="Your-Access-Key"
# export AWS_SECRET_KEY="Your-Secret-Key"
today=`date +"%d-%m-%Y","%T"`
logfile="/awslog/ec2-access.log"
# Grab all Security Groups IDs for DISALLOW action and export the IDs to a text file
sudo aws ec2 describe-security-groups --filters Name=tag:close-allports-time,Values=08-00 Name=tag:bash-profile,Values=wd --query SecurityGroups[].[GroupId] --output text > ~/tmp/disallowall_wd_info.txt 2>&1
# Take list of changing security groups
for group_id in $(cat ~/tmp/disallowall_wd_info.txt)
do
# Change rules in security group
sudo aws ec2 revoke-security-group-ingress --group-id $group_id --protocol all --port all --cidr 0.0.0.0/0
# Put info into log file
echo Attempt $today disallow access to instances with attached group $group_id for all ports >> $logfile
done
|
STARTSPACE/aws-access-to-ec2-by-timetable
|
all/disallow-wd/all-disallow-wd-08.sh
|
Shell
|
gpl-2.0
| 817 |
#!/bin/bash
#
# Install Eclipse Java EE Plugins
#
# $1 is the repository it is at
# $2 is plugin classname to install
REPOSITORY="$1"
CLASSNAME="$2"
if find_executable "eclipse" "eclipse"; then
ECLIPSE="$EXECUTABLE"
set +e
while true; do
echo "> Running Eclipse installation of $CLASSNAME from $REPOSITORY"
if "$ECLIPSE" -nosplash -application org.eclipse.equinox.p2.director -repository "$REPOSITORY" -installIU "$CLASSNAME"; then
set -e
break
elif ! prompt "> Failed to install Eclipse plugin, try again"; then
exit 1
fi
done
else
echo "> Couldn't find an installation of Eclipse to install plugins into"
exit 1
fi
|
ripply/devenv
|
development/setup_eclipse_plugin.sh
|
Shell
|
gpl-2.0
| 713 |
#! /bin/sh
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test to make sure that depcomp and compile are added to DIST_COMMON.
# Report from Pavel Roskin. Report of problems with '--no-force' from
# Scott James Remnant (Debian #206299)
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_CONFIG_FILES([subdir/Makefile])
AC_OUTPUT
END
cat > Makefile.am << 'END'
SUBDIRS = subdir
END
mkdir subdir
: > subdir/foo.c
cat > subdir/Makefile.am << 'END'
noinst_PROGRAMS = foo
foo_SOURCES = foo.c
foo_CFLAGS = -DBAR
END
$ACLOCAL
for opt in '' --no-force; do
rm -f compile depcomp
$AUTOMAKE $opt --add-missing
test -f compile
test -f depcomp
for dir in . subdir; do
# FIXME: the logic of this check and other similar ones in other
# FIXME: 'distcom*.sh' files should be factored out in a common
# FIXME: subroutine in 'am-test-lib.sh'...
sed -n -e "
/^DIST_COMMON =.*\\\\$/ {
:loop
p
n
t clear
:clear
s/\\\\$/\\\\/
t loop
s/$/ /
s/[$tab ][$tab ]*/ /g
p
n
}" $dir/Makefile.in > $dir/dc.txt
done
cat dc.txt # For debugging.
cat subdir/dc.txt # Likewise.
$FGREP ' $(top_srcdir)/depcomp ' subdir/dc.txt
# The 'compile' script will be listed in the DIST_COMMON of the top-level
# Makefile because it's required in configure.ac (by AC_PROG_CC).
$FGREP ' $(top_srcdir)/compile ' dc.txt || $FGREP ' compile ' dc.txt
done
:
|
infoburp/automake
|
t/distcom2.sh
|
Shell
|
gpl-2.0
| 2,101 |
#! /bin/sh
# Copyright (C) 2011-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that DejaGnu testsuites have 'srcdir' defined to a relative path
# (both as TCL variable and as environment variable).
required=runtest
. test-init.sh
cat >> configure.ac << 'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
AUTOMAKE_OPTIONS = dejagnu
DEJATOOL = tcl env
EXTRA_DIST = env.test/env.exp tcl.test/tcl.exp lib/tcl.exp
END
mkdir env.test tcl.test lib
# DejaGnu can change $srcdir behind our backs, so we have to
# save its original value. Thanks to Ian Lance Taylor for the
# suggestion.
cat > lib/tcl.exp << 'END'
send_user "tcl_lib_srcdir: $srcdir\n"
set orig_srcdir $srcdir
END
cat > env.test/env.exp << 'END'
set env_srcdir $env(srcdir)
send_user "env_srcdir: $env_srcdir\n"
if { [ regexp "^\.(\./\.\.)?$" $env_srcdir ] } {
pass "test_env_src"
} else {
fail "test_env_src"
}
END
cat > tcl.test/tcl.exp << 'END'
send_user "tcl_srcdir: $srcdir\n"
if { [ regexp "^\.(\./\.\.)?$" $srcdir ] } {
pass "test_tcl_src"
} else {
fail "test_tcl_src"
}
send_user "tcl_orig_srcdir: $orig_srcdir\n"
if { [ regexp "^\.(\./\.\.)?$" $orig_srcdir ] } {
pass "test_tcl_orig_src"
} else {
fail "test_tcl_orig_src"
}
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE --add-missing
./configure --srcdir=.
$MAKE check
# Sanity check: all tests have run.
test -f env.log
test -f env.sum
test -f tcl.log
test -f tcl.sum
$MAKE distcheck
:
|
komh/automake-os2
|
t/dejagnu-relative-srcdir.sh
|
Shell
|
gpl-2.0
| 2,044 |
#! /bin/bash
# SPDX-License-Identifier: BSD-3-Clause
. ${DIR}/tun_3descbc_sha1_common_defs.sh
SGW_CMD_XPRM='-e -w 300 -l'
config_remote_xfrm()
{
ssh ${REMOTE_HOST} ip xfrm policy flush
ssh ${REMOTE_HOST} ip xfrm state flush
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \
dir out ptype main action allow \
tmpl src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \
proto esp mode tunnel reqid 1
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \
dir in ptype main action allow \
tmpl src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \
proto esp mode tunnel reqid 2
ssh ${REMOTE_HOST} ip xfrm state add \
src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \
proto esp spi 7 reqid 1 mode tunnel replay-window 64 flag esn \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm state add \
src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \
proto esp spi 7 reqid 2 mode tunnel replay-window 64 flag esn \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
}
config6_remote_xfrm()
{
config_remote_xfrm
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \
dir out ptype main action allow \
tmpl src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \
proto esp mode tunnel reqid 3
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \
dir in ptype main action allow \
tmpl src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \
proto esp mode tunnel reqid 4
ssh ${REMOTE_HOST} ip xfrm state add \
src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \
proto esp spi 9 reqid 3 mode tunnel replay-window 64 flag esn \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm state add \
src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \
proto esp spi 9 reqid 4 mode tunnel replay-window 64 flag esn \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc "cbc\(des3_ede\)" 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
}
|
grivet/dpdk
|
examples/ipsec-secgw/test/tun_3descbc_sha1_esn_defs.sh
|
Shell
|
gpl-2.0
| 2,280 |
#!/bin/sh
if [ ! -e /usr/local/tce.installed/qemu-arm ]; then tce-load -i qemu-arm; fi
qemu-system-arm -kernel zImage -cpu arm1176 -m 256 -M versatilepb -no-reboot \
-serial stdio -initrd piCore.gz -append \
"root=/dev/ram0 elevator=deadline rootwait quiet nozswap nortc noswap"
|
redrock9/PAD
|
picore/start.sh
|
Shell
|
gpl-2.0
| 279 |
#scripta
#!/bin/bash
cd /etc
sudo yum install -y git
sudo yum install -y gcc-c++ make &
wait $!
echo Installed gcc-c++ and make
sleep 2
sudo yum install -y openssl-devel &
wait $!
echo Installed openssl-devel
sleep 3
sudo yum install -y ppp &
wait $!
wget http://poptop.sourceforge.net/yum/stable/rhel6/x86_64/pptpd-1.4.0-1.el6.x86_64.rpm &
wait $!
sudo yum -y localinstall pptpd-1.4.0-1.el6.x86_64.rpm &
wait $!
sudo git clone https://github.com/mikkopoyhonen/noisetester.git
sudo sed -i 's/secure_path = \/sbin:\/bin:\/usr\/sbin:\/usr\/bin/secure_path = \/sbin:\/bin:\/usr\/sbin:\/usr\/bin:\/usr\/local\/bin/g' /etc/sudoers
sudo git clone https://github.com/nodejs/node.git &
wait $!
echo Cloned node.git
sleep 2
echo entering nodejs workfolder
cd node
git checkout v0.12.2
sudo ./configure
make &
wait $!
sudo make install &
wait $!
sleep 3
sudo git clone https://github.com/isaacs/npm.git &
wait $!
echo cloned NPM
echo entering npm workfolder
cd npm
sudo make install &
wait $!
cd /etc
echo returned to /etc, now pulling Noise-tester
echo waiting for npm to finish installing
cd /etc/noisetester/server
sleep 20
npm install &
wait $!
read -p "Enter your IP address which you want to use for PPTP prefer elastic ip in amazon EC2 : " iplocal
echo "localip $iplocal" >> /etc/pptpd.conf
read -p "Enter ip address pool allocated to machines, for example 192.168.0.1-20 : " ipremote
echo "remoteip $ipremote" >> /etc/pptpd.conf
echo "Added $IPREMTOE as remote ip!"
read -p "Enter username for pptp : " uname
echo "Hi, $uname. Let us be friends!"
read -p "Enter your password for pptp : " pword
echo "$uname pptpd $pword *" >> /etc/ppp/chap-secrets
sudo sed -i 's/#ms-dns 10.0.0.1/ms-dns 8.8.8.8/g' /etc/ppp/options.pptpd
sudo sed -i 's/#ms-dns 10.0.0.2/ms-dns 8.8.4.4/g' /etc/ppp/options.pptpd
sudo sed -i 's/net.ipv4.ip_forward = 0/net.ipv4.ip_forward = 1/g' /etc/sysctl.conf
sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
sudo service iptables save &
wait $!
sudo service iptables restart &
wait $!
sudo /sbin/sysctl -p
sudo service pptpd stop
sleep 1
sudo tc qdisc add dev eth0 root netem
sudo service pptpd start &
wait $!
sudo chkconfig pptpd on
cd /etc/noisetester/server/src
sudo node index.js
|
mikkopoyhonen/noise-builder
|
startup.sh
|
Shell
|
gpl-2.0
| 2,266 |
#!/bin/bash
set -e
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
echo "################################################################"
echo "cinnamon"
echo "################################################################"
package="cinnamon"
#----------------------------------------------------------------------------------
#checking if application is already installed or else install with aur helpers
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "################## "$package" is already installed"
echo "################################################################"
else
#checking which helper is installed
if pacman -Qi packer &> /dev/null; then
echo "Installing with packer"
packer -S --noconfirm --noedit $package
elif pacman -Qi pacaur &> /dev/null; then
echo "Installing with pacaur"
pacaur -S --noconfirm --noedit $package
elif pacman -Qi yaourt &> /dev/null; then
echo "Installing with yaourt"
yaourt -S --noconfirm $package
fi
fi
package="mdm-display-manager"
#----------------------------------------------------------------------------------
#checking if application is already installed or else install with aur helpers
if pacman -Qi $package &> /dev/null; then
echo "################################################################"
echo "################## "$package" is already installed"
echo "################################################################"
else
#checking which helper is installed
if pacman -Qi packer &> /dev/null; then
echo "Installing with packer"
packer -S --noconfirm --noedit $package
elif pacman -Qi pacaur &> /dev/null; then
echo "Installing with pacaur"
pacaur -S --noconfirm --noedit $package
elif pacman -Qi yaourt &> /dev/null; then
echo "Installing with yaourt"
yaourt -S --noconfirm $package
fi
fi
echo "Display manager being activated"
sudo systemctl enable mdm.service
echo "Reboot and select the proper desktop environment"
echo "with the gauge symbol."
echo "################################################################"
echo "################### T H E E N D ######################"
echo "################################################################"
echo "Type sudo reboot"
|
erikdubois/ArchCinnamon
|
installation/050-install-cinnamon-core-v1.sh
|
Shell
|
gpl-2.0
| 2,936 |
#!/bin/bash
rm .version
# Bash Color
green='\033[01;32m'
red='\033[01;31m'
blink_red='\033[05;31m'
restore='\033[0m'
clear
# Resources
THREAD="-j$(grep -c ^processor /proc/cpuinfo)"
KERNEL="Image"
DTBIMAGE="dtb"
DEFCONFIG="phasma_defconfig"
# Kernel Details
VER=".R7.bullhead."
# Paths
KERNEL_DIR=`pwd`
REPACK_DIR="${HOME}/android/AK-OnePone-AnyKernel2"
PATCH_DIR="${HOME}/android/AK-OnePone-AnyKernel2/patch"
MODULES_DIR="${HOME}/android/AK-OnePone-AnyKernel2/modules"
ZIP_MOVE="${HOME}/android/AK-releases"
ZIMAGE_DIR="${HOME}/android/bullhead/arch/arm64/boot/"
# Functions
function clean_all {
rm -rf $MODULES_DIR/*
cd ~/android/bullhead/out/kernel
rm -rf $DTBIMAGE
git reset --hard > /dev/null 2>&1
git clean -f -d > /dev/null 2>&1
cd $KERNEL_DIR
echo
make clean && make mrproper
}
function make_kernel {
echo
make $DEFCONFIG
make $THREAD
}
function make_modules {
rm `echo $MODULES_DIR"/*"`
find $KERNEL_DIR -name '*.ko' -exec cp -v {} $MODULES_DIR \;
}
function make_dtb {
$REPACK_DIR/tools/dtbToolCM -2 -o $REPACK_DIR/$DTBIMAGE -s 2048 -p scripts/dtc/ arch/arm64/boot/
}
function make_boot {
cp -vr $ZIMAGE_DIR/Image.gz-dtb ~/android/bullhead/out/kernel/zImage
. appendramdisk.sh
}
function make_zip {
cd ~/android/bullhead/out
zip -r9 `echo $AK_VER`.zip *
mv `echo $AK_VER`.zip $ZIP_MOVE
cd $KERNEL_DIR
}
DATE_START=$(date +"%s")
echo -e "${green}"
echo "-----------------"
echo "Making Kylo Kernel:"
echo "-----------------"
echo -e "${restore}"
while read -p "Do you want to use UBERTC 4.9(1) or UBERTC 5.2(2)? " echoice
do
case "$echoice" in
1 )
export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-4.9-kernel/bin/aarch64-linux-android-
TC="UBER4.9"
echo
echo "Using UBERTC"
break
;;
2 )
export CROSS_COMPILE=${HOME}/android/uberbuild/out/aarch64-linux-android-5.2-kernel/bin/aarch64-linux-android-
TC="UBER5.2"
echo
echo "Using SM"
break
;;
* )
echo
echo "Invalid try again!"
echo
;;
esac
done
# Vars
BASE_AK_VER="Phasma"
AK_VER="$BASE_AK_VER$VER$TC"
export LOCALVERSION=~`echo $AK_VER`
export LOCALVERSION=~`echo $AK_VER`
export ARCH=arm64
export SUBARCH=arm64
export KBUILD_BUILD_USER=DespairFactor
export KBUILD_BUILD_HOST=DarkRoom
echo
while read -p "Do you want to clean stuffs (y/n)? " cchoice
do
case "$cchoice" in
y|Y )
clean_all
echo
echo "All Cleaned now."
break
;;
n|N )
break
;;
* )
echo
echo "Invalid try again!"
echo
;;
esac
done
echo
while read -p "Do you want to build?" dchoice
do
case "$dchoice" in
y|Y )
make_kernel
make_dtb
make_modules
make_boot
make_zip
break
;;
n|N )
break
;;
* )
echo
echo "Invalid try again!"
echo
;;
esac
done
echo -e "${green}"
echo "-------------------"
echo "Build Completed in:"
echo "-------------------"
echo -e "${restore}"
DATE_END=$(date +"%s")
DIFF=$(($DATE_END - $DATE_START))
echo "Time: $(($DIFF / 60)) minute(s) and $(($DIFF % 60)) seconds."
echo
|
VRToxin-AOSP/android_kernel_lge_bullhead
|
despair-build.sh
|
Shell
|
gpl-2.0
| 2,997 |
#!/bin/bash
/etc/init.d/evw-self-serve stop || /bin/true
|
UKHomeOffice/evw-self-serve
|
fpm/scripts/preuninstall.sh
|
Shell
|
gpl-2.0
| 58 |
# USPDroidsSimulator
# (C) 2007 Diogo Oliveira de Melo
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# executar.sh
#!/bin/bash
while [ 1 = 1 ]
do
echo "iniciando o kernel..."
echo "new===" >> log.txt
./bin/simulator_main localhost 4321 > simulator_out &
sleep 1
echo "iniciando a estratégia 1..."
./bin/strategy localhost 1 &
sleep 1
echo "iniciando a estratégia 2..."
./bin/strategy_pf2 localhost 2 20 5
done
|
dmelo/uspds
|
executar_loop.sh
|
Shell
|
gpl-2.0
| 1,106 |
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE[0]}")"
source installers/ENV.sh
TAG=$1
docker exec -it -u ${USER} ${TAG} bash
|
xiaoxq/single-script-toolkit
|
docker/dev-env/login.sh
|
Shell
|
gpl-2.0
| 128 |
awk 'BEGIN{for(i=0;i<256;i++)printf "%c",i}' > $1
|
scottrcarlson/dirt-simple-comms
|
gen_ascii_bin.sh
|
Shell
|
gpl-2.0
| 50 |
#! /bin/sh
# Copyright (C) 2011-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test that some Java-related variables and rules are not repeatedly
# defined.
. test-init.sh
cat > Makefile.am << 'END'
javadir = $(datadir)/java
java_JAVA = a.java
dist_java_JAVA = b.java
nodist_java_JAVA = c.java
nobase_java_JAVA = d.java
nobase_dist_java_JAVA = e.java
nobase_nodist_java_JAVA = f.java
END
$ACLOCAL
$AUTOMAKE
$EGREP -i '\.stamp|\.class|java|classpath' Makefile.in # For debugging.
for var in JAVAC JAVAROOT CLASSPATH_ENV am__java_sources; do
grep "^$var =" Makefile.in
test $(grep -c "^[$sp$tab]*$var[$sp$tab]*=" Makefile.in) -eq 1
done
grep '^classjava\.stamp:' Makefile.in
test $(grep -c "class.*java.*\.stamp.*:" Makefile.in) -eq 1
:
|
pylam/automake
|
t/java-no-duplicate.sh
|
Shell
|
gpl-2.0
| 1,360 |
#!/bin/bash
# variable definition
# log file
readonly mlog=log_admin_content_tests.json
# default db socket for MAMP
readonly local_socket="/Applications/MAMP/tmp/mysql/mysql.sock"
# phpunit configuration file
readonly config_file=../administrator/components/com_content/Tests/phpunit.xml
# tests to be run
readonly testSuite=../administrator/components/com_content/Tests/.
# delete previous log file
if [ -a "$mlog" ]
then
rm "$mlog"
fi
# administrator com_content component tests
if [ -a "$local_socket" ]
then
phpunit --stderr -d mysql.default_socket="$local_socket" --log-json "$mlog" -c "$config_file" "$testSuite"
else
phpunit --stderr --log-json "$mlog" -c "$config_file" "$testSuite"
fi
exit
|
AngelVillanueva/soap
|
tests/run_admin_content_tests.sh
|
Shell
|
gpl-2.0
| 711 |
#!/bin/sh
function status_resource_exe(){
unique_resource_name="$1"
exe="$2"
# Uncomment to debug
# DEBUG_PREFIX="echo "
cmd="${DEBUG_PREFIX}curl"
# Specify password without making it visible in process
# list (e.g. 'ps awwx')
$cmd \
--insecure \
--cert $certfile \
--key $key \
--pass `awk '/pass/ {print $2}' $MiGuserconf` \
--url "$migserver/cgi-bin/statusexe.py?unique_resource_name=${unique_resource_name}&exe_name=$exe"
}
function usage(){
echo "Usage..."
echo "status_resource_exe.sh unique_resource_name exe"
echo "Example: status_resource_exe.sh dido.imada.sdu.dk.0 exe"
}
########
# Main #
########
MiGuserconf=~/.MiG/MiGuser.conf
if [ ! -r $MiGuserconf ]; then
echo "status_resource_exe.sh requires a readable configuration in $MiGuserconf"
usage
exit 1
fi
migserver=`awk '/migserver/ {print $2}' $MiGuserconf`
certfile=`awk '/certfile/ {print $2}' $MiGuserconf`
key=`awk '/key/ {print $2}' $MiGuserconf`
if [ $# -eq 2 ]; then
status_resource_exe $1 $2
else
usage
exit 1
fi
|
heromod/migrid
|
mig/resource/status_resource_exe.sh
|
Shell
|
gpl-2.0
| 1,092 |
printf '$UNLT,10*' > $TTY_DEV
sleep 0.1
printf '$UNST,250*' > $TTY_DEV
sleep 0.1
printf '$UNSEN,70*' > $TTY_DEV
sleep 0.1
printf '$UNBF,2500*' > $TTY_DEV
sleep 0.1
printf '$UNBR,100*' > $TTY_DEV
sleep 0.1
printf '$UNLV,9*' > $TTY_DEV
sleep 0.1
printf '$UNLBF,500*' > $TTY_DEV
sleep 0.1
printf '$UNSPA,95*' > $TTY_DEV
sleep 0.1
printf '$UNSPB,0*' > $TTY_DEV
sleep 0.1
printf '$UNSAVE*' > $TTY_DEV
|
polachp/Unity
|
LK8000/kobo/unityConfig.sh
|
Shell
|
gpl-2.0
| 396 |
#! /bin/sh
# Make sure all of these programs work properly
# when invoked with --help or --version.
# Copyright (C) 2000-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Ensure that $SHELL is set to *some* value and exported.
# This is required for dircolors, which would fail e.g., when
# invoked via debuild (which removes SHELL from the environment).
test "x$SHELL" = x && SHELL=/bin/sh
export SHELL
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
expected_failure_status_chroot=125
expected_failure_status_env=125
expected_failure_status_nice=125
expected_failure_status_nohup=125
expected_failure_status_stdbuf=125
expected_failure_status_timeout=125
expected_failure_status_printenv=2
expected_failure_status_tty=3
expected_failure_status_sort=2
expected_failure_status_expr=3
expected_failure_status_lbracket=2
expected_failure_status_dir=2
expected_failure_status_ls=2
expected_failure_status_vdir=2
expected_failure_status_cmp=2
expected_failure_status_zcmp=2
expected_failure_status_sdiff=2
expected_failure_status_diff3=2
expected_failure_status_diff=2
expected_failure_status_zdiff=2
expected_failure_status_zgrep=2
expected_failure_status_zegrep=2
expected_failure_status_zfgrep=2
expected_failure_status_grep=2
expected_failure_status_egrep=2
expected_failure_status_fgrep=2
test "$built_programs" \
|| fail_ "built_programs not specified!?!"
test "$VERSION" \
|| fail_ "set envvar VERSION; it is required for a PATH sanity-check"
# Extract version from --version output of the first program
for i in $built_programs; do
v=$(env $i --version | sed -n '1s/.* //p;q')
break
done
# Ensure that it matches $VERSION.
test "x$v" = "x$VERSION" \
|| fail_ "--version-\$VERSION mismatch"
for i in $built_programs; do
# Skip 'test'; it doesn't accept --help or --version.
test $i = test && continue
# false fails even when invoked with --help or --version.
# true and false are tested with these options separately.
test $i = false || test $i = true && continue
# The just-built install executable is always named 'ginstall'.
test $i = install && i=ginstall
# Make sure they exit successfully, under normal conditions.
env $i --help >/dev/null || fail=1
env $i --version >/dev/null || fail=1
# Make sure they fail upon 'disk full' error.
if test -w /dev/full && test -c /dev/full; then
env $i --help >/dev/full 2>/dev/null && fail=1
env $i --version >/dev/full 2>/dev/null && fail=1
status=$?
test $i = [ && prog=lbracket || prog=$(echo $i|sed "s/$EXEEXT$//")
eval "expected=\$expected_failure_status_$prog"
test x$expected = x && expected=1
if test $status = $expected; then
: # ok
else
fail=1
echo "*** $i: bad exit status '$status' (expected $expected)," 1>&2
echo " with --help or --version output redirected to /dev/full" 1>&2
fi
fi
done
bigZ_in=bigZ-in.Z
zin=zin.gz
zin2=zin2.gz
tmp=tmp-$$
tmp_in=in-$$
tmp_in2=in2-$$
tmp_dir=dir-$$
tmp_out=out-$$
mkdir $tmp || fail=1
cd $tmp || fail=1
comm_setup () { args="$tmp_in $tmp_in"; }
csplit_setup () { args="$tmp_in //"; }
cut_setup () { args='-f 1'; }
join_setup () { args="$tmp_in $tmp_in"; }
tr_setup () { args='a a'; }
chmod_setup () { args="a+x $tmp_in"; }
# Punt on these.
chgrp_setup () { args=--version; }
chown_setup () { args=--version; }
mkfifo_setup () { args=--version; }
mknod_setup () { args=--version; }
# Punt on uptime, since it fails (e.g., failing to get boot time)
# on some systems, and we shouldn't let that stop 'make check'.
uptime_setup () { args=--version; }
# Create a file in the current directory, not in $TMPDIR.
mktemp_setup () { args=mktemp.XXXX; }
cmp_setup () { args="$tmp_in $tmp_in2"; }
# Tell dd not to print the line with transfer rate and total.
# The transfer rate would vary between runs.
dd_setup () { args=status=noxfer; }
zdiff_setup () { args="$zin $zin2"; }
zcmp_setup () { args="$zin $zin2"; }
zcat_setup () { args=$zin; }
gunzip_setup () { args=$zin; }
zmore_setup () { args=$zin; }
zless_setup () { args=$zin; }
znew_setup () { args=$bigZ_in; }
zforce_setup () { args=$zin; }
zgrep_setup () { args="z $zin"; }
zegrep_setup () { args="z $zin"; }
zfgrep_setup () { args="z $zin"; }
gzexe_setup () { args=$tmp_in; }
# We know that $tmp_in contains a "0"
grep_setup () { args="0 $tmp_in"; }
egrep_setup () { args="0 $tmp_in"; }
fgrep_setup () { args="0 $tmp_in"; }
diff_setup () { args="$tmp_in $tmp_in2"; }
sdiff_setup () { args="$tmp_in $tmp_in2"; }
diff3_setup () { args="$tmp_in $tmp_in2 $tmp_in2"; }
cp_setup () { args="$tmp_in $tmp_in2"; }
ln_setup () { args="$tmp_in ln-target"; }
ginstall_setup () { args="$tmp_in $tmp_in2"; }
mv_setup () { args="$tmp_in $tmp_in2"; }
mkdir_setup () { args=$tmp_dir/subdir; }
realpath_setup () { args=$tmp_in; }
rmdir_setup () { args=$tmp_dir; }
rm_setup () { args=$tmp_in; }
shred_setup () { args=$tmp_in; }
touch_setup () { args=$tmp_in2; }
truncate_setup () { args="--reference=$tmp_in $tmp_in2"; }
mkid_setup () { printf 'f(){}\ntypedef int t;\n' > f.c; args=. ; }
lid_setup () { args=; }
fid_setup () { args=f.c; }
fnid_setup () { args=; }
xtokid_setup () { args=; }
aid_setup () { args=f; }
eid_setup () { args=--version; }
gid_setup () { args=f; }
defid_setup () { args=t; }
basename_setup () { args=$tmp_in; }
dirname_setup () { args=$tmp_in; }
expr_setup () { args=foo; }
# Punt, in case GNU 'id' hasn't been installed yet.
groups_setup () { args=--version; }
pathchk_setup () { args=$tmp_in; }
yes_setup () { args=--version; }
logname_setup () { args=--version; }
nohup_setup () { args=--version; }
printf_setup () { args=foo; }
seq_setup () { args=10; }
sleep_setup () { args=0; }
stdbuf_setup () { args="-oL true"; }
timeout_setup () { args=--version; }
# I'd rather not run sync, since it spins up disks that I've
# deliberately caused to spin down (but not unmounted).
sync_setup () { args=--version; }
test_setup () { args=foo; }
# This is necessary in the unusual event that there is
# no valid entry in /etc/mtab.
df_setup () { args=/; }
# This is necessary in the unusual event that getpwuid (getuid ()) fails.
id_setup () { args=-u; }
# Use env to avoid invoking built-in sleep of Solaris 11's /bin/sh.
kill_setup () {
env sleep 31.5 &
args=$!
}
link_setup () { args="$tmp_in link-target"; }
unlink_setup () { args=$tmp_in; }
readlink_setup () {
ln -s . slink
args=slink;
}
stat_setup () { args=$tmp_in; }
unlink_setup () { args=$tmp_in; }
lbracket_setup () { args=": ]"; }
parted_setup () { args="-s $tmp_in mklabel gpt"
dd if=/dev/null of=$tmp_in seek=2000; }
# Ensure that each program "works" (exits successfully) when doing
# something more than --help or --version.
for i in $built_programs; do
# Skip these.
case $i in chroot|stty|tty|false|chcon|runcon) continue;; esac
rm -rf $tmp_in $tmp_in2 $tmp_dir $tmp_out $bigZ_in $zin $zin2
echo z |gzip > $zin
cp $zin $zin2
cp $zin $bigZ_in
# This is sort of kludgey: use numbers so this is valid input for factor,
# and two tokens so it's valid input for tsort.
echo 2147483647 0 > $tmp_in
# Make $tmp_in2 identical. Then, using $tmp_in and $tmp_in2 as arguments
# to the likes of cmp and diff makes them exit successfully.
cp $tmp_in $tmp_in2
mkdir $tmp_dir
# echo ================== $i
test $i = [ && prog=lbracket || prog=$(echo $i|sed "s/$EXEEXT$//")
if type ${prog}_setup > /dev/null 2>&1; then
${prog}_setup
else
args=
fi
if env $i $args < $tmp_in > $tmp_out; then
: # ok
else
echo FAIL: $i
fail=1
fi
rm -rf $tmp_in $tmp_in2 $tmp_out $tmp_dir
done
Exit $fail
|
oldcap/coreutils
|
tests/misc/help-version.sh
|
Shell
|
gpl-3.0
| 8,222 |
cmd_clone-del_help() {
cat <<_EOF
clone-del <tag>
Delete clone 'btr_<tag>'.
_EOF
}
cmd_clone-del() {
local tag=$1
[[ -n $tag ]] || fail "Usage:\n$(cmd_clone-del_help)"
ds @wsproxy domains-rm $tag.$DOMAIN
ds @wsproxy exec sh -c "
cp /etc/hosts /etc/hosts.1 ;
sed -i /etc/hosts.1 -e '/$tag.$DOMAIN/d' ;
cat /etc/hosts.1 > /etc/hosts;
rm -f /etc/hosts.1"
ds inject dev/clone-del.sh $tag
ds @$DBHOST exec mysql -e "drop database if exists ${DBNAME}_$tag"
}
|
B-Translator/btr_server
|
ds/cmd/clone-del.sh
|
Shell
|
gpl-3.0
| 529 |
#!/bin/bash
# Builds libgpg-error for all three current iPhone targets: iPhoneSimulator-i386,
# iPhoneOS-armv6, iPhoneOS-armv7.
#
# Copyright 2012 Mike Tigas <[email protected]>
#
# Based on work by Felix Schulze on 16.12.10.
# Copyright 2010 Felix Schulze. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
# Choose your libgpg-error version and your currently-installed iOS SDK version:
#
VERSION="1.10"
SDKVERSION="6.0"
#
#
###########################################################################
#
# Don't change anything under this line!
#
###########################################################################
# No need to change this since xcode build will only compile in the
# necessary bits from the libraries we create
ARCHS="i386 armv7 armv7s"
DEVELOPER=`xcode-select -print-path`
cd "`dirname \"$0\"`"
REPOROOT=$(pwd)
# Where we'll end up storing things in the end
OUTPUTDIR="${REPOROOT}/dependencies"
mkdir -p ${OUTPUTDIR}/include
mkdir -p ${OUTPUTDIR}/lib
mkdir -p ${OUTPUTDIR}/bin
BUILDDIR="${REPOROOT}/build"
# where we will keep our sources and build from.
SRCDIR="${BUILDDIR}/src"
mkdir -p $SRCDIR
# where we will store intermediary builds
INTERDIR="${BUILDDIR}/built"
mkdir -p $INTERDIR
########################################
cd $SRCDIR
# Exit the script if an error happens
set -e
if [ ! -e "${SRCDIR}/libgpg-error-${VERSION}.tar.bz2" ]; then
echo "Downloading libgpg-error-${VERSION}.tar.bz2"
curl -LO ftp://ftp.gnupg.org/gcrypt/libgpg-error/libgpg-error-${VERSION}.tar.bz2
else
echo "Using libgpg-error-${VERSION}.tar.bz2"
fi
tar zxf libgpg-error-${VERSION}.tar.bz2 -C $SRCDIR
cd "${SRCDIR}/libgpg-error-${VERSION}"
set +e # don't bail out of bash script if ccache doesn't exist
CCACHE=`which ccache`
if [ $? == "0" ]; then
echo "Building with ccache: $CCACHE"
CCACHE="${CCACHE} "
else
echo "Building without ccache"
CCACHE=""
fi
set -e # back to regular "bail out on error" mode
for ARCH in ${ARCHS}
do
if [ "${ARCH}" == "i386" ];
then
PLATFORM="iPhoneSimulator"
EXTRA_CONFIG=""
else
PLATFORM="iPhoneOS"
EXTRA_CONFIG="--host=arm-apple-darwin10"
fi
mkdir -p "${INTERDIR}/${PLATFORM}${SDKVERSION}-${ARCH}.sdk"
./configure --disable-shared --enable-static ${EXTRA_CONFIG} \
--prefix="${INTERDIR}/${PLATFORM}${SDKVERSION}-${ARCH}.sdk" \
CC="${CCACHE}${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer/usr/bin/gcc -arch ${ARCH}" \
LDFLAGS="$LDFLAGS -L${OUTPUTDIR}/lib" \
CFLAGS="$CFLAGS -I${OUTPUTDIR}/include -isysroot ${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer/SDKs/${PLATFORM}${SDKVERSION}.sdk" \
CPPFLAGS="$CPPFLAGS -I${OUTPUTDIR}/include -isysroot ${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer/SDKs/${PLATFORM}${SDKVERSION}.sdk"
# Build the application and install it to the fake SDK intermediary dir
# we have set up. Make sure to clean up afterward because we will re-use
# this source tree to cross-compile other targets.
make -j2
make install
make clean
done
########################################
echo "Build library..."
# These are the libs that comprise libgpg-error.
OUTPUT_LIBS="libgpg-error.a"
for OUTPUT_LIB in ${OUTPUT_LIBS}; do
INPUT_LIBS=""
for ARCH in ${ARCHS}; do
if [ "${ARCH}" == "i386" ];
then
PLATFORM="iPhoneSimulator"
else
PLATFORM="iPhoneOS"
fi
INPUT_ARCH_LIB="${INTERDIR}/${PLATFORM}${SDKVERSION}-${ARCH}.sdk/lib/${OUTPUT_LIB}"
if [ -e $INPUT_ARCH_LIB ]; then
INPUT_LIBS="${INPUT_LIBS} ${INPUT_ARCH_LIB}"
fi
done
# Combine the three architectures into a universal library.
if [ -n "$INPUT_LIBS" ]; then
lipo -create $INPUT_LIBS \
-output "${OUTPUTDIR}/lib/${OUTPUT_LIB}"
else
echo "$OUTPUT_LIB does not exist, skipping (are the dependencies installed?)"
fi
done
for ARCH in ${ARCHS}; do
if [ "${ARCH}" == "i386" ];
then
PLATFORM="iPhoneSimulator"
else
PLATFORM="iPhoneOS"
fi
cp -R ${INTERDIR}/${PLATFORM}${SDKVERSION}-${ARCH}.sdk/include/* ${OUTPUTDIR}/include/
if [ $? == "0" ]; then
# We only need to copy the headers over once. (So break out of forloop
# once we get first success.)
break
fi
done
for ARCH in ${ARCHS}; do
if [ "${ARCH}" == "i386" ];
then
PLATFORM="iPhoneSimulator"
else
PLATFORM="iPhoneOS"
fi
cp -R ${INTERDIR}/${PLATFORM}${SDKVERSION}-${ARCH}.sdk/bin/* ${OUTPUTDIR}/bin/
if [ $? == "0" ]; then
# We only need to copy the binaries over once. (So break out of forloop
# once we get first success.)
break
fi
done
####################
echo "Building done."
echo "Cleaning up..."
rm -fr ${INTERDIR}
rm -fr "${SRCDIR}/libgpg-error-${VERSION}"
echo "Done."
|
auviz/safejab
|
OTRKit/build-libgpg-error.sh
|
Shell
|
gpl-3.0
| 5,442 |
#! /bin/sh
# Author: Na Li <[email protected]>
# Created: 2005/06/16 18:51:57
# Time-stamp: "Thu Dec 02 09:59:57 CST 2004 (nali@bass)"
# Purpose: run R scripts that allow command line arguments
if [ $# -lt 1 ]; then
echo "Usage: `basename $0` Rscript.R [ arguments ]"
exit 2
fi
RCMD="R --vanilla -q"
RSCRIPT=$1
if [ ! -f $RSCRIPT ]; then
echo "R script '$RSCRIPT' does not exist"
exit 3
fi
shift
echo "source (\"${RSCRIPT}\", echo = TRUE)" | ${RCMD} --args $@
|
rna-seq/pipeline
|
bin/runR.sh
|
Shell
|
gpl-3.0
| 474 |
#!/bin/bash
get_full_path() {
# Absolute path to this script, e.g. /home/user/bin/foo.sh
SCRIPT=$(readlink -f $0)
if [ ! -d ${SCRIPT} ]; then
# Absolute path this script is in, thus /home/user/bin
SCRIPT=`dirname $SCRIPT`
fi
( cd "${SCRIPT}" ; pwd )
}
export SYS_CONFIG_DIR="/etc/sysconfig"
export INITD_CONFIG_DIR="/etc/init.d"
$(get_full_path ./)/../server-instance.sh --action=remove --instance=server --destructive
|
LeonanCarvalho/testcube-server
|
cube-build/files/cube-server/bin/redhat/uninstall.sh
|
Shell
|
gpl-3.0
| 459 |
#!/bin/sh
# Ensure GNU address extensions are rejected in posix mode
# Copyright (C) 2016-2018 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
. "${srcdir=.}/testsuite/init.sh"; path_prepend_ ./sed
print_ver_ sed
cat <<\EOF> exp-err-addr0 || framework_failure_
sed: -e expression #1, char 6: invalid usage of line address 0
EOF
cat <<\EOF >exp-err-bad-addr || framework_failure_
sed: -e expression #1, char 3: unexpected `,'
EOF
printf "%s\n" A B A C D E F G H I J >in1 || framework_failure_
# The expected output with zero-line address '0,/A/'
# the regex will match the first line
printf "A\n" >exp-l0 || framework_failure_
# The expected output with one-line address '1,/A/'
# the regex will not be checked against the first line,
# will match the third line
printf "%s\n" A B A >exp-l1 || framework_failure_
# The expected output with address '2,+1'
# (from line 2, count 1 addition line = line 3)
printf "%s\n" B A >exp-plus || framework_failure_
# The expected output with address '5,~4'
# (from line 5 till a multiple of 4 = line 8)
printf "%s\n" D E F G >exp-mult || framework_failure_
#
# Addressing extension: 0,/regexp/
#
# sanity check: address line=1 is valid for both posix and gnu
sed -n '1,/A/p' in1 > out-l1 || fail=1
compare_ exp-l1 out-l1 || fail=1
# address line=0 is a gnu extension
sed -n '0,/A/p' in1 > out-gnu-l0 || fail=1
compare_ exp-l0 out-gnu-l0 || fail=1
# rejected in posix mode
returns_ 1 sed --posix -n '0,/A/p' in1 2>err-posix-l0 || fail=1
compare_ exp-err-addr0 err-posix-l0 || fail=1
#
# Addressing extension: addr,+N
#
sed -n '2,+1p' in1 > out-plus || fail=1
compare_ exp-plus out-plus || fail=1
returns_ 1 sed --posix -n '2,+1p' in1 2> err-plus || fail=1
compare_ exp-err-bad-addr err-plus || fail=1
#
# Addressing extension: addr,~N
#
sed -n '5,~4p' in1 > out-mult || fail=1
compare_ exp-mult out-mult || fail=1
returns_ 1 sed --posix -n '5,~4p' in1 2> err-mult || fail=1
compare_ exp-err-bad-addr err-mult || fail=1
Exit $fail
|
pexip/os-sed
|
testsuite/posix-mode-addr.sh
|
Shell
|
gpl-3.0
| 2,607 |
#!/bin/sh
set -e
echo "[INFO]: Starting up..."
if [ "${1}" = "bootstrap" ]; then
POLICIES=$(curl -sS http://${RANCHER_API_HOST}/${RANCHER_API_VERSION}/self/service/metadata/policies)
if ( "${POLICIES}" = "Not found" ); then
echo "[INFO]: cant find any policies...skipping"
else
echo "[DEBUG]: found policies: ${POLICIES}"
for p in ${POLICIES}; do
# strip the tailing slash
policy=$(echo "${p}" | sed -e 's/\///g')
echo "[INFO]: creating policy: ${policy}"
pattern=$(curl -sS http://${RANCHER_API_HOST}/${RANCHER_API_VERSION}/self/service/metadata/policies/${policy}/pattern)
definition=$(curl -sS http://${RANCHER_API_HOST}/${RANCHER_API_VERSION}/self/service/metadata/policies/${policy}/definition)
json=$(cat <<EOF
{
"pattern":"${pattern}",
"definition": ${definition}
}
EOF
)
STATUSCODE=$(curl \
--silent \
--output /dev/stderr \
--write-out "%{http_code}" \
-X PUT \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-d "${json}" \
-u "guest:guest" \
http://rabbitmq:15672/api/policies/%2f/${policy})
echo "[INFO]: Status code: $STATUSCODE"
if [[ $STATUSCODE -eq 204 || $STATUSCODE -eq 200 ]]; then
echo "[INFO]: created policy: ${policy}"
else
echo "[ERROR]: failed to create policy: ${policy} with statuscode: ${STATUSCODE}"
exit 1
fi
done
fi
else
exec $@
fi
|
nodeintegration/rabbitmq-bootstrap
|
docker-entrypoint.sh
|
Shell
|
gpl-3.0
| 1,483 |
set -u
set -e
echo "This script has manual steps and will prompt you"
echo "Press enter to continue..."
read
#WARN: do not use spaces in these...
UNPACK_HOME=/root/deploy #you will need to edit buildout.cfg if you change this
MECAT_HOME=/opt/mecat
MECAT_ANSTO=$MECAT_HOME/mecat-ansto
DJANGO=$MECAT_ANSTO/bin/django
TOMCAT_HOME=/usr/local/tomcat6
#some defaults
yum install subversion python26 cyrus-sasl-ldap cyrus-sasl-devel openldap-devel libxslt libxslt-devel libxslt-python python26-devel gcc openssl-devel httpd python26-mod_wsgi java-1.6.0-openjdk java-1.6.0-openjdk-devel
/sbin/chkconfig httpd on
/sbin/service httpd start
#prepare home directory
mkdir $MECAT_HOME
chmod 755 $MECAT_HOME
# checkout code
svn co http://mytardis.googlecode.com/svn/apps/mecat-ansto/tags/mecat-ansto-dec2011 $MECAT_ANSTO
cd $MECAT_ANSTO
python26 bootstrap.py
cp -f $UNPACK_HOME/buildout_deploy.cfg $MECAT_ANSTO/buildout_deploy.cfg
echo "copy instantclient files to /root/"
echo "press enter when done"
read
cd $MECAT_ANSTO
./bin/buildout -c buildout_deploy.cfg
echo "$MECAT_ANSTO/parts/python-oracle/" > /etc/ld.so.conf.d/oracle.conf
/sbin/ldconfig
# set permissions of directories defined in buildout
mkdir /var/mytardis/log
chown -R apache:apache /var/mytardis/{store,staging,oai,log}
cp -f $UNPACK_HOME/settings_deploy.py $MECAT_ANSTO/mecat/settings_deploy.py
#prepare database
$DJANGO syncdb --noinput # ignore warnings about indexes at the end
echo "Schema.objects.all().delete()" | $DJANGO shell_plus # deletes initial (AS-specific) fixtures
#load schemas
$DJANGO loadschemas $MECAT_ANSTO/src/ands_register/ands_register/fixtures/ands_register_schema.json
$DJANGO loadschemas $MECAT_ANSTO/src/related_info/related_info/fixtures/related_schemas.json
$DJANGO loadschemas $UNPACK_HOME/ansto_schemas.json
#creating seed data
$DJANGO createembargopermission
$DJANGO createtokenuser
echo "manual step"
echo "press enter to continue"
read
$DJANGO createsuperuser
# add http to https redirect
cp $UNPACK_HOME/httpd.conf.add /etc/httpd/conf.d/https_redirect.conf
#add cronjobs
crontab $UNPACK_HOME/cronjobs.txt
# single search/SOLR
echo "MANUAL STEP! edit $MECAT_ANSTO/mecat/settings_deploy.py SINGLE_SEARCH_ENABLED=True"
echo "Press enter to continue..."
read
tar xvzf $UNPACK_HOME/apache-solr-1.4.1.tgz -C $UNPACK_HOME
tar xvzf $UNPACK_HOME/apache-tomcat-6.0.33.tar.gz -C $UNPACK_HOME
mkdir -p `dirname $TOMCAT_HOME`
cp -r $UNPACK_HOME/apache-tomcat-6.0.33 $TOMCAT_HOME/
cp -r $UNPACK_HOME/apache-solr-1.4.1/example/solr/ $TOMCAT_HOME/solr
mkdir -p $TOMCAT_HOME/conf/Catalina/localhost/
cp $UNPACK_HOME/solr.xml $TOMCAT_HOME/conf/Catalina/localhost/solr.xml
cp $UNPACK_HOME/apache-solr-1.4.1/dist/apache-solr-1.4.1.war $TOMCAT_HOME/webapps/solr.war
cd $TOMCAT_HOME/bin
tar xvf commons-daemon-native.tar.gz
cd commons-daemon-1.0.7-native-src/unix
autoconf && ./configure --with-java=/usr/lib/jvm/java-1.6.0-openjdk.x86_64/ && make
#jOAI
unzip $UNPACK_HOME/joai_v3.1.1.zip
cp $UNPACK_HOME/joai_v3.1.1/oai.war $TOMCAT_HOME/webapps
cp $UNPACK_HOME/tomcat6 /etc/init.d/tomcat6
/sbin/chkconfig tomcat6 on
/sbin/service tomcat6 start
cp $UNPACK_HOME/proxy_ajp.conf /etc/httpd/conf.d/
cp $UNPACK_HOME/python26-mod_wsgi.conf /etc/httpd/conf.d/
/sbin/service httpd restart
cp $UNPACK_HOME/tomcat-users.xml $TOMCAT_HOME/conf/tomcat-users.xml
echo 'uncomment the comment starting just before security-constraint in "$TOMCAT_HOME/webapps/oai/WEB-INF/web.xml"'
echo 'change the username/password in $TOMCAT_HOME/conf/tomcat-users.xml for role oai_admin'
echo 'press enter to continue'
read
/sbin/service tomcat6 stop && /sbin/service tomcat6 start
# manual OAI setup
echo "setup OAI by visiting http://localhost/oai"
echo "data provider > repository information and administration"
echo "Name: ANSTO MyTARDIS"
echo "administrator e-mail: [email protected]"
echo "description: ANSTO tardis-test OAI-PMH/RIF-CS Server"
echo "Identifier: tardis-test.nbi.ansto.gov.au"
echo "Press enter to continue..."
read
echo "data provider > metadata files configuration > add metadata directory"
echo "Name: tardis-test rif-cs"
echo "Format of files: rif"
echo "Path: /var/mytardis/oai/"
echo "Press enter to continue..."
read
cp $UNPACK_HOME/.netrc /root/.netrc
echo 'edit $TOMCAT_HOME/solr/conf/solrconfig.xml dataDir
<dataDir>${solr.data.dir:./solr/data}</dataDir>
<dataDir>${solr.data.dir:/var/solr/data}</dataDir>
'
echo 'press enter to continue..'
read
/sbin/service tomcat6 stop && /sbin/service tomcat6 start
cp $UNPACK_HOME/update-solr-schema.sh $MECAT_ANSTO/src/MyTARDIS/utils/update-solr-schema.sh
$MECAT_ANSTO/src/MyTARDIS/utils/update-solr-schema.sh
echo "Done"
|
IntersectAustralia/dc2c
|
deploy/install.sh
|
Shell
|
gpl-3.0
| 4,663 |
#!/bin/bash
# Author: Alexander Epstein https://github.com/alexanderepstein
if [[ -f /usr/local/bin/currency ]];then
echo -n "Do you wish to uninstall currency [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd currency || exit 1
./uninstall.sh || exit 1
cd .. || exit 1
fi
unset answer
fi
if [[ -f /usr/local/bin/stocks ]];then
echo -n "Do you wish to uninstall stocks [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd stocks || exit 1
./uninstall.sh
cd .. || exit 1
fi
unset answer
fi
if [[ -f /usr/local/bin/weather ]];then
echo -n "Do you wish to uninstall weather [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd weather || exit 1
./uninstall.sh
cd .. || exit 1
fi
unset answer
fi
if [[ -f /usr/local/bin/crypt ]];then
echo -n "Do you wish to uninstall crypt [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd crypt || exit 1
./uninstall.sh
cd .. || exit 1
fi
unset answer
fi
if [[ -f /usr/local/bin/movies ]];then
echo -n "Do you wish to uninstall movies [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd movies || exit 1
./uninstall.sh
cd .. || exit 1
fi
fi
if [[ -f /usr/local/bin/taste ]];then
echo -n "Do you wish to uninstall taste [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd taste || exit 1
./uninstall.sh
cd .. || exit 1
fi
fi
if [[ -f /usr/local/bin/short ]];then
echo -n "Do you wish to uninstall short [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd short || exit 1
./uninstall.sh
cd .. || exit 1
fi
fi
if [[ -f /usr/local/bin/geo ]];then
echo -n "Do you wish to uninstall geo [Y/n]: "
read -r answer
if [[ "$answer" == "Y" || "$answer" == "y" ]] ;then
cd geo || exit 1
./uninstall.sh
cd .. || exit 1
fi
fi
|
Priyansh2/test
|
Bash-Snippets-master/uninstall.sh
|
Shell
|
gpl-3.0
| 2,020 |
adb shell screenrecord --output-format=h264 - | ffplay -
|
digitalghost/pycv-gameRobot
|
ffplayScreencast.sh
|
Shell
|
gpl-3.0
| 57 |
#!/bin/sh
#
# make manager files
#
# Copyright 2011-2012, Bmath Web Application Platform Project. (http://bmath.jp)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @copyright Copyright 2011-2012, Bmath Web Application Platform Project. (http://bmath.jp)
# @link http://bmath.jp Bmath Web Application Platform Project
# @package Ao.webapp.tool
# @since
# @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html
# @author aotake ([email protected])
#
##
## Form, Validator, Model, Vo を生成するスクリプト
##
##
# 生成するマネージャのモジュール名、マネージャ名を指定する
module_name=$1
manager_name=$2
# マネージャで利用するモジュールを以下の書式で指定
# mod_name=tbl_name:mod_name2=tbl_name2:....
use_module=$4
#if [ "$1" = "" ]; then
if [ $# -lt 4 ]; then
/bin/echo "Usage: $0 <module_name> <manager_name> Manager mod1=tbl1:mod2=tbl2:..."
/bin/echo " e.g.) $0 admin-photo index Manager user=user_prof:user=user_prof_opt"
exit
fi
. ./func.sh
toPascal $module_name HYPHEN; MODULE_NAME=$RESULT
toPascal $manager_name; MANAGER_NAME=$RESULT
## ディレクトリを設定
LIBDIR=../modules/$module_name/libs/$MODULE_NAME
HANDLER_DIR=$LIBDIR/Handler
MANAGER_DIR=$LIBDIR/Manager
FORM_DIR=$LIBDIR/Form
VALIDATOR_DIR=$LIBDIR/Form/Validator
MODEL_DIR=$LIBDIR/Model
VO_DIR=$LIBDIR/Vo
if [ ! -d $LIBDIR ]; then
/bin/echo "ERROR: No such libdir: $LIBDIR"
/bin/echo "----> Please check LIBDIR path"
exit
fi
_DIRS="$HANDLER_DIR $MANAGER_DIR $FORM_DIR $VALIDATOR_DIR $MODEL_DIR $VO_DIR"
for d in $_DIRS; do
if [ ! -d $d ]; then
mkdir $d
fi
done
## skelton から文字列置換してコピー
SKEL_DIR=./skel
## Manager ファイルを生成してコピー
/bin/echo -n "===> put ${MANAGER_DIR}/${MANAGER_NAME}.php"
php ./dbconf.php $module_name $manager_name Manager $use_module > tmp
CMD="cp tmp $MANAGER_DIR/${MANAGER_NAME}.php"
if [ -f $MANAGER_DIR/${MANAGER_NAME}.php ]; then
/bin/echo
/bin/echo "already exist: $MANAGER_DIR/${MANAGER_NAME}.php"
/bin/echo -n "Do you want to replace this file? [y/N]"
read answer
if [ "$answer" = "y" ]; then
${CMD}
/bin/echo ".....replaced."
/bin/echo
else
/bin/echo ".....skip."
/bin/echo
fi
else
${CMD}
/bin/echo ".....created."
fi
## ゴミ清掃
rm -f ./tmp
/bin/echo
/bin/echo done.
|
aotake/peachp1
|
webapp/tool/manager.sh
|
Shell
|
gpl-3.0
| 3,035 |
#!/bin/bash
# Get the 1 of 6 git Repository
echo "Get liuchangdong.github.io.git start"
git clone https://github.com/liuchangdong/liuchangdong.github.io.git
echo "Get liuchangdong.github.io.git done"
# Get the 2 of 6 git Repository
echo "Get dotfiles.git start"
git clone https://github.com/liuchangdong/dotfiles.git
echo "Get dotfiles.git done"
# Get the 3 of 6 git Repository
echo "Get resume.git start"
git clone https://github.com/liuchangdong/resume.git
echo "Get resume.git done"
# Get the 4 of 6 git Repository
echo "Get linux-learning.git start"
git clone https://github.com/liuchangdong/linux-learning.git
echo "Get linux-learning.git done"
# Get the 5 of 6 git Repository
echo "Get android-app.git start"
git clone https://github.com/liuchangdong/android-app.git
echo "Get android-app.git done"
# Get the 6 of 6 git Repository
echo "Get cs-basic.git start"
git clone https://github.com/liuchangdong/cs-basic.git
echo "Get cs-basic.git done"
|
liuchangdong/vimconf
|
github.sh
|
Shell
|
gpl-3.0
| 957 |
ghdl -a bcd_counter/cont_bcd.vhd
ghdl -a bcd_counter/cont_bcd_tb.vhd
ghdl -e cont_bcd_tb
ghdl -r cont_bcd_tb --vcd=out.vcd --stop-time=20us
gtkwave out.vcd
|
Xero-Hige/LuGus-VHDL
|
TPS-2016/tps-Gaston/tp1/run.sh
|
Shell
|
gpl-3.0
| 156 |
#!/bin/bash
gcc -o bin2vmem bin2vmem.c
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ int.c -o int.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ support.c -o support.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ main.c -o main.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ uart.c -o uart.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ tick.c -o tick.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -c -I./ interrupts.c -o interrupts.o
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -I./ -c -o except.o except.S
or32-elf-gcc -Wall -mhard-mul -mhard-div -nostdlib -g -O0 -I./ -c -DIC=0 -DDC=0 -o reset.o reset.S
or32-elf-ld -T orp.ld main.o uart.o support.o int.o reset.o except.o tick.o interrupts.o -o uart.or32
or32-elf-objcopy -O binary uart.or32 uart.bin
./bin2vmem uart.bin > uart.vmem
|
feddischson/soc_maker
|
examples/or1200_test/sw/compile.sh
|
Shell
|
gpl-3.0
| 928 |
rpmbuild -ba SPECS/atom.spec
|
fusion809/rpmbuild
|
build.sh
|
Shell
|
gpl-3.0
| 29 |
#!/bin/bash
# Documentation
# http://rogerdudler.github.io/git-guide/
# https://stackoverflow.com/questions/5617211/what-is-git-remote-add-and-git-push-origin-master
# Initialize the local repository
# cd ~/NetBeansProjects
# git init
# git remote add origin [email protected]:jsfan3/CodenameOne-Apps.git
# Update all local files from the GIT repository
cd ~/NetBeansProjects
git pull origin master
git checkout -f HEAD
./bashScripts/restoreAll.sh
read -p "Press ENTER to exit..."
|
jsfan3/CodenameOne-Apps
|
bashScripts/updateLocalFromGit.sh
|
Shell
|
gpl-3.0
| 483 |
#!/bin/sh
#-------------------------------------------------------------------
# config.sh: This file is read at the beginning of the execution of the ASGS to
# set up the runs that follow. It is reread at the beginning of every cycle,
# every time it polls the datasource for a new advisory. This gives the user
# the opportunity to edit this file mid-storm to change config parameters
# (e.g., the name of the queue to submit to, the addresses on the mailing list,
# etc)
#-------------------------------------------------------------------
#
# Copyright(C) 2019 Jason Fleming
#
# This file is part of the ADCIRC Surge Guidance System (ASGS).
#
# The ASGS is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# ASGS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# the ASGS. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
# Fundamental
INSTANCENAME=southfl_al052019_jgf # "name" of this ASGS process
# Input files and templates
GRIDNAME=southfl_v11-1_final
source $SCRIPTDIR/config/mesh_defaults.sh
# Physical forcing (defaults set in config/forcing_defaults.sh)
TIDEFAC=on # tide factor recalc
HINDCASTLENGTH=30.0 # length of initial hindcast, from cold (days)
BACKGROUNDMET=off # NAM download/forcing
FORECASTCYCLE="06"
TROPICALCYCLONE=on # tropical cyclone forcing
STORM=05 # storm number, e.g. 05=ernesto in 2006
YEAR=2019 # year of the storm
WAVES=off # wave forcing
REINITIALIZESWAN=no # used to bounce the wave solution
VARFLUX=off # variable river flux forcing
CYCLETIMELIMIT="99:00:00"
# Computational Resources (related defaults set in platforms.sh)
NCPU=959 # number of compute CPUs for all simulations
NCPUCAPACITY=1000
NUMWRITERS=1
if [[ $HPCENVSHORT = "queenbee" && $USER = "jgflemin" ]]; then
ACCOUNT=loni_cera_2019a
fi
# Post processing and publication
INTENDEDAUDIENCE=general # "general" | "developers-only" | "professional"
#POSTPROCESS=( accumulateMinMax.sh createMaxCSV.sh cpra_slide_deck_post.sh includeWind10m.sh createOPeNDAPFileList.sh opendap_post.sh )
POSTPROCESS=( createMaxCSV.sh includeWind10m.sh createOPeNDAPFileList.sh opendap_post.sh )
OPENDAPNOTIFY="[email protected],[email protected]"
NOTIFY_SCRIPT=ncfs_cyclone_notify.sh
# Initial state (overridden by STATEFILE after ASGS gets going)
COLDSTARTDATE=auto
HOTORCOLD=hotstart
LASTSUBDIR=http://fortytwo.cct.lsu.edu:8080/thredds/fileServer/2019/al05/25/southfl_v11-1_final/supermic.hpc.lsu.edu/southfl_al052019_jgf/nhcConsensus
# Scenario package
#PERCENT=default
SCENARIOPACKAGESIZE=2
case $si in
-2)
ENSTORM=hindcast
;;
-1)
# do nothing ... this is not a forecast
ENSTORM=nowcast
;;
0)
ENSTORM=veerLeft100Wind10m
PERCENT=-100
source $SCRIPTDIR/config/io_defaults.sh # sets met-only mode based on "Wind10m" suffix
;;
1)
ENSTORM=veerLeft100
PERCENT=-100
;;
2)
ENSTORM=nhcConsensusWind10m
source $SCRIPTDIR/config/io_defaults.sh # sets met-only mode based on "Wind10m" suffix
;;
3)
ENSTORM=nhcConsensus
;;
*)
echo "CONFIGRATION ERROR: Unknown ensemble member number: '$si'."
;;
esac
PREPPEDARCHIVE=prepped_${GRIDNAME}_${INSTANCENAME}_${NCPU}.tar.gz
HINDCASTARCHIVE=prepped_${GRIDNAME}_hc_${INSTANCENAME}_${NCPU}.tar.gz
|
jasonfleming/asgs
|
config/2019/asgs_config_southfl_al052019_jgf.sh
|
Shell
|
gpl-3.0
| 3,935 |
#!/bin/bash
# Global variables
TRIM_LEFT=8 # The sequences should already be trimmed appropriately
TRIM_RIGHT=35 # idem
INDELS=$1
SNPS=$2
TEMP_FILE="clean_align_clean.temp"
# Help
echo "---"
echo "Help for program 05_align_clean_align.sh"
echo
echo "Usage:"
echo " ./scripts/05_align_clean_align.sh INDELS SNPS"
echo
echo " INDELS = Decimal number, minimal proportion for indels to be considered real"
echo " SNPS = Decimal number, minimal proportion for SNPs to be considered real"
echo
echo "Example run:"
echo " ./scripts/05_align_clean_align.sh 0.01 0.01"
echo "---"
# Create list of files to treat
ls -1 trimmed_separated_sequences/*MID*.fasta > $TEMP_FILE
sed -i 's/\.fasta//' $TEMP_FILE
# Clean, align, Clean, align, Clean
cat $TEMP_FILE | while read i; do
echo "Treating file: $i"
muscle -in "$i.fasta" -out $i".aca_temp1" -diags -maxiters 4 -quiet
./scripts/alignment_clean.py -i $i".aca_temp1" -o $i".aca_temp2" -l $TRIM_LEFT -r $TRIM_RIGHT -I $INDELS -s $SNPS
muscle -in $i".aca_temp2" -out $i".aca_temp3" -diags -maxiters 4 -quiet
./scripts/alignment_clean.py -i $i".aca_temp3" -o $i".aca_temp4" -I 0.025 -s 0.025
muscle -in $i".aca_temp4" -out $i"_cleaned_aligned.fasta" -diags -maxiters 4 -quiet
rm trimmed_separated_sequences/*aca_temp*
done
# Clean up directory
rm $TEMP_FILE
mv trimmed_separated_sequences/*_cleaned_aligned.fasta cleaned_aligned
|
enormandeau/ngs_genotyping
|
scripts/05_align_clean_align.sh
|
Shell
|
gpl-3.0
| 1,401 |
cufflinks-2.2.1.Linux_x86_64/cuffmerge -s bundle/hg19/ucsc.hg19.fasta assemblies.txt
|
CSB-IG/hmgb1_rna_seq
|
cuffmerge.sh
|
Shell
|
gpl-3.0
| 85 |
#!/bin/bash
primusrun ./CharacterAnimation
|
marccomino/Crowd_Simulator
|
CharacterAnimation.sh
|
Shell
|
gpl-3.0
| 43 |
#!/bin/bash
# Script para generar los SQLs comprimidos para el phpMyAdmin
rutaScriptsLimpieza="/home/alex/programacion/Intercambia/importadorWordpress"
# Comprimo el script de creacion de las tablas
zip creacionTablas.sql.zip creacionTablas.sql
# Ahora para todos los SQL pequenios
archivos="datosTablas/02_itmt.sql datosTablas/03_listaDescarga.sql datosTablas/04_contenidoAgenda.sql datosTablas/paginasEstaticas.sql"
sql1="datosCreade1.sql"
# Si existe el archivo intermedio lo elimino para empezar desde 0
[ -f $sql1 ] && rm $sql1
for archivo in $archivos
do
#Los limpio y concateno en un solo archivo
${rutaScriptsLimpieza}/limpiaCaracteres.sh ${archivo}
${rutaScriptsLimpieza}/limpiaHTML.sh ${archivo}_LIMPIO
cat ${archivo}_LIMPIO_LIMPIO >> ${sql1}
echo "" >> ${sql1}
rm ${archivo}_LIMPIO
rm ${archivo}_LIMPIO_LIMPIO
done
# Incluyo el de paginas estatcas que al haberlo escrito a mano ya esta limpio
#cat datosTablas/paginasEstaticas.sql >> ${sql1}
# Y lo comprimo
zip ${sql1}.zip ${sql1}
rm ${sql1}
# Para el archivo mas grande, relizo los mismos pasos pero generando su archivo independiente
archivo="datosTablas/01_bloqueTexto.sql"
sql2="datosCreade2.sql"
# Si existe el archivo intermedio lo elimino para empezar desde 0
[ -f $sql2 ] && rm $sql2
# Limpio y concateno
${rutaScriptsLimpieza}/limpiaCaracteres.sh ${archivo}
${rutaScriptsLimpieza}/limpiaHTML.sh ${archivo}_LIMPIO
cat ${archivo}_LIMPIO_LIMPIO >> ${sql2}
echo "" >> ${sql2}
rm ${archivo}_LIMPIO
rm ${archivo}_LIMPIO_LIMPIO
#Y comprimo
zip ${sql2}.zip ${sql2}
rm ${sql2}
|
Eidansoft/Creade
|
paraMySql/generaScripts.sh
|
Shell
|
gpl-3.0
| 1,556 |
#!/bin/bash
# Author: pblaas ([email protected])
# Initial version 04-2017
# This script is used to generate Kubernetes cloud-init files for CoreoS.
if [ ! -f config.env ]; then
echo config.env not found.
echo cp config.env.sample to config.env to get started.
exit 1
fi
. config.env
if [ ! $1 ]; then
echo You need to provide one or more ip adresses.
echo e.g $0 192.168.10.12
exit 1
fi
cd set
CUSTOMSALT=$(openssl rand -base64 12)
HASHED_USER_CORE_PASSWORD=$(perl -le "print crypt '$USER_CORE_PASSWORD', '\$6\$$CUSTOMSALT' ")
#create worker certs
for i in $1; do
openssl genrsa -out ${i}-worker-key.pem 2048
if [ "$CLOUD_PROVIDER" == "openstack" ]; then
CERTID=k8s-${CLUSTERNAME}-node${i##*.}
else
CERTID=${i}
fi
WORKER_IP=${i} openssl req -new -key ${i}-worker-key.pem -out ${i}-worker.csr -subj "/CN=system:node:${CERTID}/O=system:nodes" -config ../template/worker-openssl.cnf
WORKER_IP=${i} openssl x509 -req -in ${i}-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}-worker.pem -days 365 -extensions v3_req -extfile ../template/worker-openssl.cnf
done
for i in $1; do
openssl genrsa -out ${i}-etcd-worker-key.pem 2048
WORKER_IP=${i} openssl req -new -key ${i}-etcd-worker-key.pem -out ${i}-etcd-worker.csr -subj "/CN=${i}" -config ../template/worker-openssl.cnf
WORKER_IP=${i} openssl x509 -req -in ${i}-etcd-worker.csr -CA etcd-ca.pem -CAkey etcd-ca-key.pem -CAcreateserial -out ${i}-etcd-worker.pem -days 365 -extensions v3_req -extfile ../template/worker-openssl.cnf
done
#gzip base64 encode files to store in the cloud init files.
CAKEY=$(cat ca-key.pem | gzip | base64 -w0)
CACERT=$(cat ca.pem | gzip | base64 -w0)
APISERVERKEY=$(cat apiserver-key.pem | gzip | base64 -w0)
APISERVER=$(cat apiserver.pem | gzip | base64 -w0)
for i in $1; do
j=$i-worker-key.pem
k=$i-worker.pem
l=$i-etcd-worker-key.pem
m=$i-etcd-worker.pem
WORKERKEY=$(cat $j | gzip | base64 -w0)
WORKER=$(cat $k | gzip | base64 -w0)
ETCDWORKERKEY=$(cat $l | gzip | base64 -w0)
ETCDWORKER=$(cat $m | gzip | base64 -w0)
echo WORKERKEY_$i:$WORKERKEY >> index.txt
echo WORKER_$i:$WORKER >> index.txt
echo ETCDWORKERKEY_$i:$ETCDWORKERKEY >> index.txt
echo ETCDWORKER_$i:$ETCDWORKER >> index.txt
done
if [ $NET_OVERLAY == "calico" ]; then
NETOVERLAY_MOUNTS="--volume cni-net,kind=host,source=/etc/cni/net.d \\\\\n --mount volume=cni-net,target=/etc/cni/net.d \\\\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\\\n --mount volume=cni-bin,target=/opt/cni/bin \\\\"
NETOVERLAY_DIRS="ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin\n ExecStartPre=/usr/bin/mkdir -p /etc/cni/net.d"
NETOVERLAY_CNICONF="--cni-conf-dir=/etc/cni/net.d \\\\\n --cni-bin-dir=/opt/cni/bin \\\\"
else
NETOVERLAY_CNICONF="--cni-conf-dir=/etc/kubernetes/cni/net.d \\\\"
NETOVERLAY_MOUNTS="\\\\"
NETOVERLAY_DIRS="\\\\"
fi
#genereate the worker yamls from the worker.yaml template
for i in $1; do
sed -e "s,WORKER_IP,$i,g" \
-e "s,DISCOVERY_ID,`cat index.txt|grep DISCOVERY_ID|cut -d: -f2`,g" \
-e "s,WORKER_GW,$WORKER_GW,g" \
-e "s,DNSSERVER,$DNSSERVER,g" \
-e "s,MASTER_HOST_IP,$MASTER_HOST_IP,g" \
-e "s,CLUSTER_DNS,$CLUSTER_DNS,g" \
-e "s@ETCD_ENDPOINTS_URLS@${ETCD_ENDPOINTS_URLS}@g" \
-e "s,USER_CORE_SSHKEY1,${USER_CORE_KEY1}," \
-e "s,USER_CORE_SSHKEY2,${USER_CORE_KEY2}," \
-e "s,USER_CORE_PASSWORD,${HASHED_USER_CORE_PASSWORD},g" \
-e "s,CLOUD_PROVIDER,${CLOUD_PROVIDER},g" \
-e "s,K8S_VER,$K8S_VER,g" \
-e "s,\<CACERT\>,$CACERT,g" \
-e "s,\<WORKERKEY\>,`cat index.txt|grep -w WORKERKEY_$i|cut -d: -f2`,g" \
-e "s,\<WORKER\>,`cat index.txt|grep -w WORKER_$i|cut -d: -f2`,g" \
-e "s,\<ETCDCACERT\>,`cat index.txt|grep -w ETCDCACERT|cut -d: -f2`,g" \
-e "s,\<ETCDWORKERKEY\>,`cat index.txt|grep -w ETCDWORKERKEY_$i|cut -d: -f2`,g" \
-e "s,\<ETCDWORKER\>,`cat index.txt|grep -w ETCDWORKER_$i|cut -d: -f2`,g" \
-e "s,CLOUDCONF,`cat index.txt|grep -w CLOUDCONF|cut -d: -f2`,g" \
-e "s,FLANNEL_VER,$FLANNEL_VER,g" \
-e "s@NETOVERLAY_MOUNTS@${NETOVERLAY_MOUNTS}@g" \
-e "s@NETOVERLAY_DIRS@${NETOVERLAY_DIRS}@g" \
-e "s@NETOVERLAY_CNICONF@${NETOVERLAY_CNICONF}@g" \
../template/worker_proxy.yaml > node_$i.yaml
echo Generated: node_$i.yaml
done
echo -----------------------------------
cd -
echo ""
|
pblaas/cloudinit_generator
|
add_node.sh
|
Shell
|
gpl-3.0
| 4,282 |
#!/bin/bash
#
# Linux/MacOS X script to install rEFInd
#
# Usage:
#
# ./install.sh [options]
#
# options include:
# "--esp" to install to the ESP rather than to the system's root
# filesystem. This is the default on Linux.
# "--usedefault {devicefile}" to install as default
# (/EFI/BOOT/BOOTX64.EFI and similar) to the specified device
# (/dev/sdd1 or whatever) without registering with the NVRAM.
# "--ownhfs {devicefile}" to install to an HFS+ volume that's NOT currently
# an OS X boot volume.
# "--root {dir}" to specify installation using the specified directory
# as the system's root
# "--alldrivers" to install all drivers along with regular files
# "--nodrivers" to suppress driver installation (default in Linux is
# driver used on /boot; --nodrivers is OS X default)
# "--shim {shimfile}" to install a shim.efi file for Secure Boot
# "--preloader" is synonymous with "--shim"
# "--localkeys" to re-sign x86-64 binaries with a locally-generated key
# "--yes" to assume a "yes" response to all prompts
#
# The "esp" option is valid only on Mac OS X; it causes
# installation to the EFI System Partition (ESP) rather than
# to the current OS X boot partition. Under Linux, this script
# installs to the ESP by default.
#
# This program is copyright (c) 2012 by Roderick W. Smith
# It is released under the terms of the GNU GPL, version 3,
# a copy of which should be included in the file COPYING.txt.
#
# Revision history:
#
# 0.7.9 -- Fixed bug that caused errors if dmraid utility not installed
# 0.7.7 -- Fixed bug that created mangled refind_linux.conf file; added ability
# to locate and mount ESP on Linux, if it's not mounted
# 0.7.6 -- Added --ownhfs {device-filename} option
# 0.7.5 -- Fixed bug when installing to ESP on recent versions of OS X
# 0.7.2 -- Fixed code that could be confused by use of autofs to mount the ESP
# 0.7.0 -- Added support for the new Btrfs driver
# 0.6.12 -- Added support for PreLoader as well as for shim
# 0.6.11 -- Improvements in script's ability to handle directories with spaces
# in their names
# 0.6.9 -- Install gptsync on Macs
# 0.6.8 -- Bug fix: ESP scan now uses "uniq".
# 0.6.6 -- Bug fix: Upgrade drivers when installed to EFI/BOOT. Also enable
# copying shim.efi and MokManager.efi over themselves.
# 0.6.4 -- Copies ext2 driver rather than ext4 driver for ext2/3fs
# 0.6.3 -- Support for detecting rEFInd in EFI/BOOT and EFI/Microsoft/Boot
# directories & for installing to EFI/BOOT in BIOS mode
# 0.6.2-1 -- Added --yes option & tweaked key-copying for use with RPM install script
# 0.6.1 -- Added --root option; minor bug fixes
# 0.6.0 -- Changed --drivers to --alldrivers and added --nodrivers option;
# changed default driver installation behavior in Linux to install
# the driver needed to read /boot (if available)
# 0.5.1.2 -- Fixed bug that caused failure to generate refind_linux.conf file
# 0.5.1.1 -- Fixed bug that caused script failure under OS X
# 0.5.1 -- Added --shim & --localkeys options & create sample refind_linux.conf
# in /boot
# 0.5.0 -- Added --usedefault & --drivers options & changed "esp" option to "--esp"
# 0.4.5 -- Fixed check for rEFItBlesser in OS X
# 0.4.2 -- Added notice about BIOS-based OSes & made NVRAM changes in Linux smarter
# 0.4.1 -- Added check for rEFItBlesser in OS X
# 0.3.3.1 -- Fixed OS X 10.7 bug; also works as make target
# 0.3.2.1 -- Check for presence of source files; aborts if not present
# 0.3.2 -- Initial version
#
# Note: install.sh version numbers match those of the rEFInd package
# with which they first appeared.
RootDir="/"
TargetDir=/EFI/refind
LocalKeysBase="refind_local"
ShimSource="none"
ShimType="none"
TargetShim="default"
TargetX64="refind_x64.efi"
TargetIA32="refind_ia32.efi"
LocalKeys=0
DeleteRefindDir=0
AlwaysYes=0
#
# Functions used by both OS X and Linux....
#
GetParams() {
InstallToEspOnMac=0
if [[ $OSName == "Linux" ]] ; then
# Install the driver required to read /boot, if it's available
InstallDrivers="boot"
else
InstallDrivers="none"
fi
while [[ $# -gt 0 ]]; do
case $1 in
--esp | --ESP) InstallToEspOnMac=1
;;
--ownhfs) OwnHfs=1
TargetPart="$2"
TargetDir=/System/Library/CoreServices
shift
;;
--usedefault) TargetDir=/EFI/BOOT
TargetPart="$2"
TargetX64="bootx64.efi"
TargetIA32="bootia32.efi"
shift
;;
--root) RootDir="$2"
shift
;;
--localkeys) LocalKeys=1
;;
--shim | --preloader) ShimSource="$2"
ShimType=`basename $ShimSource`
shift
;;
--drivers | --alldrivers) InstallDrivers="all"
;;
--nodrivers) InstallDrivers="none"
;;
--yes) AlwaysYes=1
;;
* ) echo "Usage: $0 [--esp | --usedefault {device-file} | --root {directory} |"
echo " --ownhfs {device-file} ]"
echo " [--nodrivers | --alldrivers] [--shim {shim-filename}]"
echo " [--localkeys] [--yes]"
exit 1
esac
shift
done
if [[ $InstallToEspOnMac == 1 && "$TargetDir" == '/EFI/BOOT' ]] ; then
echo "You may use --esp OR --usedefault, but not both! Aborting!"
exit 1
fi
if [[ "$RootDir" != '/' && "$TargetDir" == '/EFI/BOOT' ]] ; then
echo "You may use --usedefault OR --root, but not both! Aborting!"
exit 1
fi
if [[ "$RootDir" != '/' && $InstallToEspOnMac == 1 ]] ; then
echo "You may use --root OR --esp, but not both! Aborting!"
exit 1
fi
if [[ "$TargetDir" != '/System/Library/CoreServices' && "$OwnHfs" == '1' ]] ; then
echo "If you use --ownhfs, you may NOT use --usedefault! Aborting!"
exit 1
fi
RLConfFile="$RootDir/boot/refind_linux.conf"
EtcKeysDir="$RootDir/etc/refind.d/keys"
} # GetParams()
# Get a yes/no response from the user and place it in the YesNo variable.
# If the AlwaysYes variable is set to 1, skip the user input and set "Y"
# in the YesNo variable.
ReadYesNo() {
if [[ $AlwaysYes == 1 ]] ; then
YesNo="Y"
echo "Y"
else
read YesNo
fi
}
# Abort if the rEFInd files can't be found.
# Also sets $ConfFile to point to the configuration file,
# $IconsDir to point to the icons directory, and
# $ShimSource to the source of the shim.efi file (if necessary).
CheckForFiles() {
# Note: This check is satisfied if EITHER the 32- or the 64-bit version
# is found, even on the wrong platform. This is because the platform
# hasn't yet been determined. This could obviously be improved, but it
# would mean restructuring lots more code....
if [[ ! -f "$RefindDir/refind_ia32.efi" && ! -f "$RefindDir/refind_x64.efi" ]] ; then
echo "The rEFInd binary file is missing! Aborting installation!"
exit 1
fi
if [[ -f "$RefindDir/refind.conf-sample" ]] ; then
ConfFile="$RefindDir/refind.conf-sample"
elif [[ -f "$ThisDir/refind.conf-sample" ]] ; then
ConfFile="$ThisDir/refind.conf-sample"
else
echo "The sample configuration file is missing! Aborting installation!"
exit 1
fi
if [[ -d "$RefindDir/icons" ]] ; then
IconsDir="$RefindDir/icons"
elif [[ -d "$ThisDir/icons" ]] ; then
IconsDir="$ThisDir/icons"
else
echo "The icons directory is missing! Aborting installation!"
exit 1
fi
if [[ "$ShimSource" != "none" ]] ; then
if [[ -f "$ShimSource" ]] ; then
if [[ $ShimType == "shimx64.efi" || $ShimType == "shim.efi" ]] ; then
TargetX64="grubx64.efi"
MokManagerSource=`dirname "$ShimSource"`/MokManager.efi
elif [[ $ShimType == "preloader.efi" || $ShimType == "PreLoader.efi" ]] ; then
TargetX64="loader.efi"
MokManagerSource=`dirname "$ShimSource"`/HashTool.efi
else
echo "Unknown shim/PreBootloader filename: $ShimType!"
echo "Known filenames are shimx64.efi, shim.efi, and PreLoader.efi. Aborting!"
exit 1
fi
else
echo "The specified shim/PreBootloader file, $ShimSource, doesn't exist!"
echo "Aborting installation!"
exit 1
fi
fi
} # CheckForFiles()
# Helper for CopyRefindFiles; copies shim files (including MokManager, if it's
# available) to target.
CopyShimFiles() {
cp -fb "$ShimSource" "$InstallDir/$TargetDir/$TargetShim"
if [[ $? != 0 ]] ; then
Problems=1
fi
if [[ -f "$MokManagerSource" ]] ; then
cp -fb "$MokManagerSource" "$InstallDir/$TargetDir/"
fi
if [[ $? != 0 ]] ; then
Problems=1
fi
} # CopyShimFiles()
# Copy the public keys to the installation medium
CopyKeys() {
if [[ $LocalKeys == 1 ]] ; then
mkdir -p "$InstallDir/$TargetDir/keys/"
cp "$EtcKeysDir/$LocalKeysBase.cer" "$InstallDir/$TargetDir/keys/"
cp "$EtcKeysDir/$LocalKeysBase.crt" "$InstallDir/$TargetDir/keys/"
fi
} # CopyKeys()
# Copy drivers from $RefindDir/drivers_$1 to $InstallDir/$TargetDir/drivers_$1,
# honoring the $InstallDrivers condition. Must be passed a suitable
# architecture code (ia32 or x64).
CopyDrivers() {
local Blkid
Blkid=`which blkid 2> /dev/null`
if [[ $InstallDrivers == "all" ]] ; then
mkdir -p "$InstallDir/$TargetDir/drivers_$1"
cp "$ThisDir"/drivers_$1/*_$1.efi "$InstallDir/$TargetDir/drivers_$1/" 2> /dev/null
cp "$RefindDir"/drivers_$1/*_$1.efi "$InstallDir/$TargetDir/drivers_$1/" 2> /dev/null
elif [[ "$InstallDrivers" == "boot" && -x "$Blkid" ]] ; then
BootPart=`df /boot | grep dev | cut -f 1 -d " "`
BootFS=`$Blkid -o export $BootPart 2> /dev/null | grep TYPE= | cut -f 2 -d =`
DriverType=""
case $BootFS in
ext2 | ext3) DriverType="ext2"
# Could use ext4, but that can create unwanted entries from symbolic
# links in / to /boot/vmlinuz if a separate /boot partition is used.
;;
ext4) DriverType="ext4"
;;
reiserfs) DriverType="reiserfs"
;;
btrfs) DriverType="btrfs"
;;
hfsplus) DriverType="hfs"
;;
*) BootFS=""
esac
if [[ -n $BootFS ]] ; then
echo "Installing driver for $BootFS (${DriverType}_$1.efi)"
mkdir -p "$InstallDir/$TargetDir/drivers_$1"
cp "$ThisDir/drivers_$1/${DriverType}_$1.efi" "$InstallDir/$TargetDir/drivers_$1/" 2> /dev/null
cp "$RefindDir/drivers_$1/${DriverType}_$1.efi" "$InstallDir/$TargetDir/drivers_$1"/ 2> /dev/null
fi
fi
}
# Copy tools (currently only gptsync, and that only on Macs) to the EFI/tools
# directory on the ESP. Must be passed a suitable architecture code (ia32
# or x64).
CopyTools() {
mkdir -p $InstallDir/EFI/tools
if [[ $OSName == 'Darwin' ]] ; then
cp -f "$RefindDir/tools_$1/gptsync_$1.efi" "$InstallDir/EFI/tools/"
if [[ -f "$InstallDir/EFI/tools/gptsync.efi" ]] ; then
mv "$InstallDir/EFI/tools/gptsync.efi" "$InstallDir/EFI/tools/gptsync.efi-disabled"
echo "Found old gptsync.efi; disabling it by renaming it to gptsync.efi-disabled"
fi
fi
} # CopyTools()
# Copy the rEFInd files to the ESP or OS X root partition.
# Sets Problems=1 if any critical commands fail.
CopyRefindFiles() {
mkdir -p "$InstallDir/$TargetDir"
if [[ "$TargetDir" == '/EFI/BOOT' ]] ; then
cp "$RefindDir/refind_ia32.efi" "$InstallDir/$TargetDir/$TargetIA32" 2> /dev/null
if [[ $? != 0 ]] ; then
echo "Note: IA32 (x86) binary not installed!"
fi
cp "$RefindDir/refind_x64.efi" "$InstallDir/$TargetDir/$TargetX64" 2> /dev/null
if [[ $? != 0 ]] ; then
Problems=1
fi
if [[ "$ShimSource" != "none" ]] ; then
TargetShim="bootx64.efi"
CopyShimFiles
fi
if [[ $InstallDrivers == "all" ]] ; then
cp -r "$RefindDir"/drivers_* "$InstallDir/$TargetDir/" 2> /dev/null
cp -r "$ThisDir"/drivers_* "$InstallDir/$TargetDir/" 2> /dev/null
elif [[ $Upgrade == 1 ]] ; then
if [[ $Platform == 'EFI64' ]] ; then
CopyDrivers x64
CopyTools x64
else
CopyDrivers ia32
CopyTools ia32
fi
fi
Refind=""
CopyKeys
elif [[ $Platform == 'EFI64' || $TargetDir == "/EFI/Microsoft/Boot" ]] ; then
cp "$RefindDir/refind_x64.efi" "$InstallDir/$TargetDir/$TargetX64"
if [[ $? != 0 ]] ; then
Problems=1
fi
CopyDrivers x64
CopyTools x64
Refind="refind_x64.efi"
CopyKeys
if [[ "$ShimSource" != "none" ]] ; then
if [[ "$TargetShim" == "default" ]] ; then
TargetShim=`basename "$ShimSource"`
fi
CopyShimFiles
Refind="$TargetShim"
if [[ $LocalKeys == 0 ]] ; then
echo "Storing copies of rEFInd Secure Boot public keys in $EtcKeysDir"
mkdir -p "$EtcKeysDir"
cp "$ThisDir/keys/refind.cer" "$EtcKeysDir" 2> /dev/null
cp "$ThisDir/keys/refind.crt" "$EtcKeysDir" 2> /dev/null
fi
fi
if [[ "$TargetDir" == '/System/Library/CoreServices' ]] ; then
SetupMacHfs $TargetX64
fi
elif [[ $Platform == 'EFI32' ]] ; then
cp "$RefindDir/refind_ia32.efi" "$InstallDir/$TargetDir/$TargetIA32"
if [[ $? != 0 ]] ; then
Problems=1
fi
CopyDrivers ia32
CopyTools ia32
Refind="refind_ia32.efi"
if [[ "$TargetDir" == '/System/Library/CoreServices' ]] ; then
SetupMacHfs $TargetIA32
fi
else
echo "Unknown platform! Aborting!"
exit 1
fi
echo "Copied rEFInd binary files"
echo ""
if [[ -d "$InstallDir/$TargetDir/icons" ]] ; then
rm -rf "$InstallDir/$TargetDir/icons-backup" &> /dev/null
mv -f "$InstallDir/$TargetDir/icons" "$InstallDir/$TargetDir/icons-backup"
echo "Notice: Backed up existing icons directory as icons-backup."
fi
cp -r "$IconsDir" "$InstallDir/$TargetDir"
if [[ $? != 0 ]] ; then
Problems=1
fi
mkdir -p "$InstallDir/$TargetDir/keys"
cp -rf "$ThisDir"/keys/*.[cd]er "$InstallDir/$TargetDir/keys/" 2> /dev/null
cp -rf "$EtcKeysDir"/*.[cd]er "$InstallDir/$TargetDir/keys/" 2> /dev/null
if [[ -f "$InstallDir/$TargetDir/refind.conf" ]] ; then
echo "Existing refind.conf file found; copying sample file as refind.conf-sample"
echo "to avoid overwriting your customizations."
echo ""
cp -f "$ConfFile" "$InstallDir/$TargetDir"
if [[ $? != 0 ]] ; then
Problems=1
fi
else
echo "Copying sample configuration file as refind.conf; edit this file to configure"
echo "rEFInd."
echo ""
cp -f "$ConfFile" "$InstallDir/$TargetDir/refind.conf"
if [[ $? != 0 ]] ; then
Problems=1
fi
fi
if [[ $DeleteRefindDir == 1 ]] ; then
echo "Deleting the temporary directory $RefindDir"
rm -r "$RefindDir"
fi
} # CopyRefindFiles()
# Mount the partition the user specified with the --usedefault or --ownhfs option
MountDefaultTarget() {
InstallDir=/tmp/refind_install
mkdir -p "$InstallDir"
UnmountEsp=1
if [[ $OSName == 'Darwin' ]] ; then
if [[ $OwnHfs == '1' ]] ; then
Temp=`diskutil info "$TargetPart" | grep "Mount Point"`
InstallDir=`echo $Temp | cut -f 3-30 -d ' '`
if [[ $InstallDir == '' ]] ; then
InstallDir=/tmp/refind_install
mount -t hfs "$TargetPart" "$InstallDir"
else
UnmountEsp=0
fi
else
mount -t msdos "$TargetPart" "$InstallDir"
fi
elif [[ $OSName == 'Linux' ]] ; then
mount -t vfat "$TargetPart" "$InstallDir"
fi
if [[ $? != 0 ]] ; then
echo "Couldn't mount $TargetPart ! Aborting!"
rmdir "$InstallDir"
exit 1
fi
echo "UnmountEsp = $UnmountEsp"
} # MountDefaultTarget()
#
# A series of OS X support functions....
#
# Mount the ESP at /Volumes/ESP or determine its current mount
# point.
# Sets InstallDir to the ESP mount point
# Sets UnmountEsp if we mounted it
MountOSXESP() {
# Identify the ESP. Note: This returns the FIRST ESP found;
# if the system has multiple disks, this could be wrong!
Temp=$(mount | sed -n -E "/^(\/dev\/disk[0-9]+s[0-9]+) on \/ \(.*$/s//\1/p")
if [ $Temp ]; then
Temp=$(diskutil list $Temp | sed -n -E '/^ *[0-9]+:[ ]+EFI EFI[ ]+[0-9.]+ [A-Z]+[ ]+(disk[0-9]+s[0-9]+)$/ { s//\1/p
q
}' )
if [ -z $Temp ]; then
echo "Warning: root device doesn't have an EFI partition"
fi
else
echo "Warning: root device could not be found"
fi
if [ -z $Temp ]; then
Temp=$(diskutil list | sed -n -E '/^ *[0-9]+:[ ]+EFI EFI[ ]+[0-9.]+ [A-Z]+[ ]+(disk[0-9]+s[0-9]+)$/ { s//\1/p
q
}' )
if [ -z $Temp ]; then
echo "Could not find an EFI partition. Aborting!"
exit 1
fi
fi
Esp=/dev/`echo $Temp`
# If the ESP is mounted, use its current mount point....
Temp=`df -P | grep "$Esp"`
InstallDir=`echo $Temp | cut -f 6 -d ' '`
if [[ "$InstallDir" == '' ]] ; then
mkdir /Volumes/ESP &> /dev/null
mount -t msdos "$Esp" /Volumes/ESP
if [[ $? != 0 ]] ; then
echo "Unable to mount ESP! Aborting!\n"
exit 1
fi
UnmountEsp=1
InstallDir="/Volumes/ESP"
fi
} # MountOSXESP()
# Set up for booting from Mac HFS+ volume that boots rEFInd in MJG's way
# (http://mjg59.dreamwidth.org/7468.html)
# Must be passed the original rEFInd binary filename (without a path).
SetupMacHfs() {
if [[ -s "$InstallDir/mach_kernel" ]] ; then
echo "Attempt to install rEFInd to a partition with a /mach_kernel file! Aborting!"
exit 1
fi
cp -n "$InstallDir/$TargetDir/boot.efi" "$InstallDir/$TargetDir/boot.efi-backup" &> /dev/null
ln -f "$InstallDir/$TargetDir/$1" "$InstallDir/$TargetDir/boot.efi"
touch "$InstallDir/mach_kernel"
rm "$InstallDir/$TargetDir/SystemVersion.plist" &> /dev/null
cat - << ENDOFHERE >> "$InstallDir/$TargetDir/SystemVersion.plist"
<xml version="1.0" encoding="UTF-8"?>
<plist version="1.0">
<dict>
<key>ProductBuildVersion</key>
<string></string>
<key>ProductName</key>
<string>rEFInd</string>
<key>ProductVersion</key>
<string>0.7.6</string>
</dict>
</plist>
ENDOFHERE
} # SetupMacHfs()
# Control the OS X installation.
# Sets Problems=1 if problems found during the installation.
InstallOnOSX() {
echo "Installing rEFInd on OS X...."
if [[ "$TargetDir" == "/EFI/BOOT" || "$OwnHfs" == '1' ]] ; then
MountDefaultTarget
elif [[ "$InstallToEspOnMac" == "1" ]] ; then
MountOSXESP
else
InstallDir="$RootDir/"
fi
echo "Installing rEFInd to the partition mounted at $InstallDir"
Platform=`ioreg -l -p IODeviceTree | grep firmware-abi | cut -d "\"" -f 4`
CopyRefindFiles
if [[ $InstallToEspOnMac == "1" ]] ; then
bless --mount "$InstallDir" --setBoot --file "$InstallDir/$TargetDir/$Refind"
elif [[ "$TargetDir" != "/EFI/BOOT" ]] ; then
bless --setBoot --folder "$InstallDir/$TargetDir" --file "$InstallDir/$TargetDir/$Refind"
fi
if [[ $? != 0 ]] ; then
Problems=1
fi
if [[ -f /Library/StartupItems/rEFItBlesser || -d /Library/StartupItems/rEFItBlesser ]] ; then
echo
echo "/Library/StartupItems/rEFItBlesser found!"
echo "This program is part of rEFIt, and will cause rEFInd to fail to work after"
echo -n "its first boot. Do you want to remove rEFItBlesser (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "Deleting /Library/StartupItems/rEFItBlesser..."
rm -r /Library/StartupItems/rEFItBlesser
else
echo "Not deleting rEFItBlesser."
fi
fi
echo
echo "WARNING: If you have an Advanced Format disk, *DO NOT* attempt to check the"
echo "bless status with 'bless --info', since this is known to cause disk corruption"
echo "on some systems!!"
echo
} # InstallOnOSX()
#
# Now a series of Linux support functions....
#
# Check for evidence that we're running in Secure Boot mode. If so, and if
# appropriate options haven't been set, warn the user and offer to abort.
# If we're NOT in Secure Boot mode but the user HAS specified the --shim
# or --localkeys option, warn the user and offer to abort.
#
# FIXME: Although I checked the presence (and lack thereof) of the
# /sys/firmware/efi/vars/SecureBoot* files on my Secure Boot test system
# before releasing this script, I've since found that they are at least
# sometimes present when Secure Boot is absent. This means that the first
# test can produce false alarms. A better test is highly desirable.
CheckSecureBoot() {
VarFile=`ls -d /sys/firmware/efi/vars/SecureBoot* 2> /dev/null`
if [[ -n "$VarFile" && "$TargetDir" != '/EFI/BOOT' && "$ShimSource" == "none" ]] ; then
echo ""
echo "CAUTION: Your computer appears to support Secure Boot, but you haven't"
echo "specified a valid shim.efi file source. If you've disabled Secure Boot and"
echo "intend to leave it disabled, this is fine; but if Secure Boot is active, the"
echo "resulting installation won't boot. You can read more about this topic at"
echo "http://www.rodsbooks.com/refind/secureboot.html."
echo ""
echo -n "Do you want to proceed with installation (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "OK; continuing with the installation..."
else
exit 0
fi
fi
if [[ "$ShimSource" != "none" && ! -n "$VarFile" ]] ; then
echo ""
echo "You've specified installing using a shim.efi file, but your computer does not"
echo "appear to be running in Secure Boot mode. Although installing in this way"
echo "should work, it's unnecessarily complex. You may continue, but unless you"
echo "plan to enable Secure Boot, you should consider stopping and omitting the"
echo "--shim option. You can read more about this topic at"
echo "http://www.rodsbooks.com/refind/secureboot.html."
echo ""
echo -n "Do you want to proceed with installation (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "OK; continuing with the installation..."
else
exit 0
fi
fi
if [[ $LocalKeys != 0 && ! -n "$VarFile" ]] ; then
echo ""
echo "You've specified re-signing your rEFInd binaries with locally-generated keys,"
echo "but your computer does not appear to be running in Secure Boot mode. The"
echo "keys you generate will be useless unless you enable Secure Boot. You may"
echo "proceed with this installation, but before you do so, you may want to read"
echo "more about it at http://www.rodsbooks.com/refind/secureboot.html."
echo ""
echo -n "Do you want to proceed with installation (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "OK; continuing with the installation..."
else
exit 0
fi
fi
} # CheckSecureBoot()
# Check for the presence of locally-generated keys from a previous installation in
# $EtcKeysDir (/etc/refind.d/keys). If they're not present, generate them using
# openssl.
GenerateKeys() {
PrivateKey="$EtcKeysDir/$LocalKeysBase.key"
CertKey="$EtcKeysDir/$LocalKeysBase.crt"
DerKey="$EtcKeysDir/$LocalKeysBase.cer"
OpenSSL=`which openssl 2> /dev/null`
# Do the work only if one or more of the necessary keys is missing
# TODO: Technically, we don't need the DerKey; but if it's missing and openssl
# is also missing, this will fail. This could be improved.
if [[ ! -f "$PrivateKey" || ! -f "$CertKey" || ! -f "$DerKey" ]] ; then
echo "Generating a fresh set of local keys...."
mkdir -p "$EtcKeysDir"
chmod 0700 "$EtcKeysDir"
if [[ ! -x "$OpenSSL" ]] ; then
echo "Can't find openssl, which is required to create your private signing keys!"
echo "Aborting!"
exit 1
fi
if [[ -f "$PrivateKey" ]] ; then
echo "Backing up existing $PrivateKey"
cp -f "$PrivateKey" "$PrivateKey.backup" 2> /dev/null
fi
if [[ -f "$CertKey" ]] ; then
echo "Backing up existing $CertKey"
cp -f "$CertKey" "$CertKey.backup" 2> /dev/null
fi
if [[ -f "$DerKey" ]] ; then
echo "Backing up existing $DerKey"
cp -f "$DerKey" "$DerKey.backup" 2> /dev/null
fi
"$OpenSSL" req -new -x509 -newkey rsa:2048 -keyout "$PrivateKey" -out "$CertKey" \
-nodes -days 3650 -subj "/CN=Locally-generated rEFInd key/"
"$OpenSSL" x509 -in "$CertKey" -out "$DerKey" -outform DER
chmod 0600 "$PrivateKey"
else
echo "Using existing local keys...."
fi
}
# Sign a single binary. Requires parameters:
# $1 = source file
# $2 = destination file
# Also assumes that the SBSign, PESign, UseSBSign, UsePESign, and various key variables are set
# appropriately.
# Aborts script on error
SignOneBinary() {
$SBSign --key "$PrivateKey" --cert "$CertKey" --output "$2" "$1"
if [[ $? != 0 ]] ; then
echo "Problem signing the binary $1! Aborting!"
exit 1
fi
}
# Re-sign the x86-64 binaries with a locally-generated key, First look for appropriate
# key files in $EtcKeysDir. If they're present, use them to re-sign the binaries. If
# not, try to generate new keys and store them in $EtcKeysDir.
ReSignBinaries() {
SBSign=`which sbsign 2> /dev/null`
echo "Found sbsign at $SBSign"
TempDir="/tmp/refind_local"
if [[ ! -x "$SBSign" ]] ; then
echo "Can't find sbsign, which is required to sign rEFInd with your own keys!"
echo "Aborting!"
exit 1
fi
GenerateKeys
mkdir -p "$TempDir/drivers_x64"
cp "$RefindDir/refind.conf-sample $TempDir" 2> /dev/null
cp "$ThisDir/refind.conf-sample $TempDir" 2> /dev/null
cp "$RefindDir/refind_ia32.efi $TempDir" 2> /dev/null
cp -a "$RefindDir/drivers_ia32 $TempDir" 2> /dev/null
cp -a "$ThisDir/drivers_ia32 $TempDir" 2> /dev/null
SignOneBinary "$RefindDir/refind_x64.efi" "$TempDir/refind_x64.efi"
SaveIFS=$IFS
IFS=$(echo -en "\n\b")
for Driver in `ls "$RefindDir"/drivers_x64/*.efi "$ThisDir"/drivers_x64/*.efi 2> /dev/null` ; do
TempName=`basename "$Driver"`
SignOneBinary "$Driver" "$TempDir/drivers_x64/$TempName"
done
IFS=$SaveIFS
RefindDir="$TempDir"
DeleteRefindDir=1
} # ReSignBinaries()
# Locate and mount an ESP, if possible, based on parted output.
# Should be called only if /boot/efi is NOT an acceptable ESP.
# Sets InstallDir to the mounted ESP's path ($RootDir/boot/efi)
# and EspFilesystem the filesystem (always "vfat")
FindLinuxESP() {
echo "The ESP doesn't seem to be mounted! Trying to find it...."
local Drive
local PartNum
local TableType
local DmStatus
local SkipIt
local Dmraid
for Drive in `ls /dev/[sh]d?` ; do
SkipIt=0
Dmraid=`which dmraid 2> /dev/null`
if [ -x "$Dmraid" ] ; then
DmStatus=`dmraid -r | grep $Drive`
if [ -n "$DmStatus" ] ; then
echo "$Drive seems to be part of a RAID array; skipping!"
SkipIt=1
fi
fi
TableType=`parted $Drive print -m -s 2>/dev/null | awk -F: '$1 == "'$Drive'" { print $6 }'`
if [[ $TableType == 'gpt' && $SkipIt == 0 ]] ; then # read only GPT disks that aren't part of dmraid array
PartNum=`LANG=C parted $Drive print -m -s 2>/dev/null | awk -F: '$7 ~ "(^boot| boot)" { print $1 }' | head -n 1`
if [ "$PartNum" -eq "$PartNum" ] 2> /dev/null ; then
InstallDir="$RootDir/boot/efi"
mkdir -p $InstallDir
mount $Drive$PartNum $InstallDir
EspFilesystem=`grep "$Drive$PartNum.*/boot/efi" /etc/mtab | uniq | grep -v autofs | cut -d " " -f 3`
if [[ $EspFilesystem != 'vfat' ]] ; then
umount $InstallDir
else
echo "Mounting ESP at $InstallDir"
break;
fi
fi # $PartNum -eq $PartNum
fi # TableType
done
} # FindLinuxESP()
# Identifies the ESP's location (/boot or /boot/efi, or these locations under
# the directory specified by --root); aborts if the ESP isn't mounted at
# either location.
# Sets InstallDir to the ESP mount point.
FindMountedESP() {
mount /boot &> /dev/null
mount /boot/efi &> /dev/null
EspLine=`df "$RootDir/boot/efi" 2> /dev/null | grep boot/efi`
if [[ ! -n "$EspLine" ]] ; then
EspLine=`df "$RootDir"/boot | grep boot`
fi
InstallDir=`echo $EspLine | cut -d " " -f 6`
if [[ -n "$InstallDir" ]] ; then
EspFilesystem=`grep "$InstallDir" /etc/mtab | uniq | grep -v autofs | cut -d " " -f 3`
fi
if [[ $EspFilesystem != 'vfat' ]] ; then
FindLinuxESP
fi
if [[ $EspFilesystem != 'vfat' ]] ; then
echo "$RootDir/$InstallDir doesn't seem to be on a VFAT filesystem. The ESP must be"
echo "mounted at $RootDir/boot or $RootDir/boot/efi and it must be VFAT! Aborting!"
exit 1
fi
echo "ESP was found at $InstallDir using $EspFilesystem"
} # FindMountedESP
# Uses efibootmgr to add an entry for rEFInd to the EFI's NVRAM.
# If this fails, sets Problems=1
AddBootEntry() {
local PartNum
InstallIt="0"
Efibootmgr=`which efibootmgr 2> /dev/null`
if [[ "$Efibootmgr" ]] ; then
InstallDisk=`grep "$InstallDir" /etc/mtab | cut -d " " -f 1 | cut -c 1-8`
PartNum=`grep "$InstallDir" /etc/mtab | cut -d " " -f 1 | cut -c 9-10`
EntryFilename="$TargetDir/$Refind"
EfiEntryFilename=`echo ${EntryFilename//\//\\\}`
EfiEntryFilename2=`echo ${EfiEntryFilename} | sed s/\\\\\\\\/\\\\\\\\\\\\\\\\/g`
ExistingEntry=`"$Efibootmgr" -v | grep -i "$EfiEntryFilename2"`
if [[ "$ExistingEntry" ]] ; then
ExistingEntryBootNum=`echo "$ExistingEntry" | cut -c 5-8`
FirstBoot=`"$Efibootmgr" | grep BootOrder | cut -c 12-15`
if [[ "$ExistingEntryBootNum" != "$FirstBoot" ]] ; then
echo "An existing rEFInd boot entry exists, but isn't set as the default boot"
echo "manager. The boot order is being adjusted to make rEFInd the default boot"
echo "manager. If this is NOT what you want, you should use efibootmgr to"
echo "manually adjust your EFI's boot order."
"$Efibootmgr" -b $ExistingEntryBootNum -B &> /dev/null
InstallIt="1"
fi
else
InstallIt="1"
fi
if [[ $InstallIt == "1" ]] ; then
echo "Installing it!"
"$Efibootmgr" -c -l "$EfiEntryFilename" -L "rEFInd Boot Manager" -d $InstallDisk -p $PartNum &> /dev/null
if [[ $? != 0 ]] ; then
EfibootmgrProblems=1
Problems=1
fi
fi
else # efibootmgr not found
EfibootmgrProblems=1
Problems=1
fi
if [[ $EfibootmgrProblems ]] ; then
echo
echo "ALERT: There were problems running the efibootmgr program! You may need to"
echo "rename the $Refind binary to the default name (EFI/boot/bootx64.efi"
echo "on x86-64 systems or EFI/boot/bootia32.efi on x86 systems) to have it run!"
echo
else
echo "rEFInd has been set as the default boot manager."
fi
} # AddBootEntry()
# Create a minimal/sample refind_linux.conf file in /boot.
GenerateRefindLinuxConf() {
if [[ -f "$RLConfFile" ]] ; then
echo "Existing $RLConfFile found; not overwriting."
else
echo "Creating $RLConfFile; edit it to adjust kernel options."
if [[ -f "$RootDir/etc/default/grub" ]] ; then
# We want the default options used by the distribution, stored here....
source "$RootDir/etc/default/grub"
echo "Setting default boot options based on $RootDir/etc/default/grub"
fi
RootFS=`df "$RootDir" | grep dev | cut -f 1 -d " "`
StartOfDevname=`echo "$RootFS" | cut -b 1-7`
if [[ "$StartOfDevname" == "/dev/sd" || "$StartOfDevName" == "/dev/hd" ]] ; then
# Identify root filesystem by UUID rather than by device node, if possible
Uuid=`blkid -o export -s UUID "$RootFS" 2> /dev/null | grep UUID=`
if [[ -n $Uuid ]] ; then
RootFS="$Uuid"
fi
fi
DefaultOptions="$GRUB_CMDLINE_LINUX $GRUB_CMDLINE_LINUX_DEFAULT"
echo "\"Boot with standard options\" \"ro root=$RootFS $DefaultOptions \"" > $RLConfFile
echo "\"Boot to single-user mode\" \"ro root=$RootFS $DefaultOptions single\"" >> $RLConfFile
echo "\"Boot with minimal options\" \"ro root=$RootFS\"" >> $RLConfFile
fi
}
# Set varaibles for installation in EFI/BOOT directory
SetVarsForBoot() {
TargetDir="/EFI/BOOT"
if [[ $ShimSource == "none" ]] ; then
TargetX64="bootx64.efi"
TargetIA32="bootia32.efi"
else
if [[ $ShimType == "shim.efi" || $ShimType == "shimx64.efi" ]] ; then
TargetX64="grubx64.efi"
elif [[ $ShimType == "preloader.efi" || $ShimType == "PreLoader.efi" ]] ; then
TargetX64="loader.efi"
else
echo "Unknown shim/PreBootloader type: $ShimType"
echo "Aborting!"
exit 1
fi
TargetIA32="bootia32.efi"
TargetShim="bootx64.efi"
fi
} # SetFilenamesForBoot()
# Set variables for installation in EFI/Microsoft/Boot directory
SetVarsForMsBoot() {
TargetDir="/EFI/Microsoft/Boot"
if [[ $ShimSource == "none" ]] ; then
TargetX64="bootmgfw.efi"
else
if [[ $ShimType == "shim.efi" || $ShimType == "shimx64.efi" ]] ; then
TargetX64="grubx64.efi"
elif [[ $ShimType == "preloader.efi" || $ShimType == "PreLoader.efi" ]] ; then
TargetX64="loader.efi"
else
echo "Unknown shim/PreBootloader type: $ShimType"
echo "Aborting!"
exit 1
fi
TargetShim="bootmgfw.efi"
fi
}
# TargetDir defaults to /EFI/refind; however, this function adjusts it as follows:
# - If an existing refind.conf is available in /EFI/BOOT or /EFI/Microsoft/Boot,
# install to that directory under the suitable name; but DO NOT do this if
# refind.conf is also in /EFI/refind.
# - If booted in BIOS mode and the ESP lacks any other EFI files, install to
# /EFI/BOOT
# - If booted in BIOS mode and there's no refind.conf file and there is a
# /EFI/Microsoft/Boot/bootmgfw.efi file, move it down one level and
# install under that name, "hijacking" the Windows boot loader filename
DetermineTargetDir() {
Upgrade=0
if [[ -f $InstallDir/EFI/BOOT/refind.conf ]] ; then
SetVarsForBoot
Upgrade=1
fi
if [[ -f $InstallDir/EFI/Microsoft/Boot/refind.conf ]] ; then
SetVarsForMsBoot
Upgrade=1
fi
if [[ -f $InstallDir/EFI/refind/refind.conf ]] ; then
TargetDir="/EFI/refind"
Upgrade=1
fi
if [[ $Upgrade == 1 ]] ; then
echo "Found rEFInd installation in $InstallDir$TargetDir; upgrading it."
fi
if [[ ! -d /sys/firmware/efi && $Upgrade == 0 ]] ; then # BIOS-mode
FoundEfiFiles=`find "$InstallDir/EFI/BOOT" -name "*.efi" 2> /dev/null`
FoundConfFiles=`find "$InstallDir" -name "refind\.conf" 2> /dev/null`
if [[ ! -n "$FoundConfFiles" && -f "$InstallDir/EFI/Microsoft/Boot/bootmgfw.efi" ]] ; then
mv -n "$InstallDir/EFI/Microsoft/Boot/bootmgfw.efi" "$InstallDir/EFI/Microsoft" &> /dev/null
SetVarsForMsBoot
echo "Running in BIOS mode with a suspected Windows installation; moving boot loader"
echo "files so as to install to $InstallDir$TargetDir."
elif [[ ! -n "$FoundEfiFiles" ]] ; then # In BIOS mode and no default loader; install as default loader
SetVarsForBoot
echo "Running in BIOS mode with no existing default boot loader; installing to"
echo $InstallDir$TargetDir
else
echo "Running in BIOS mode with an existing default boot loader; backing it up and"
echo "installing rEFInd in its place."
if [[ -d "$InstallDir/EFI/BOOT-rEFIndBackup" ]] ; then
echo ""
echo "Caution: An existing backup of a default boot loader exists! If the current"
echo "default boot loader and the backup are different boot loaders, the current"
echo "one will become inaccessible."
echo ""
echo -n "Do you want to proceed with installation (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "OK; continuing with the installation..."
else
exit 0
fi
fi
mv -n "$InstallDir/EFI/BOOT" "$InstallDir/EFI/BOOT-rEFIndBackup"
SetVarsForBoot
fi
fi # BIOS-mode
} # DetermineTargetDir()
# Controls rEFInd installation under Linux.
# Sets Problems=1 if something goes wrong.
InstallOnLinux() {
if [[ "$TargetDir" == "/System/Library/CoreServices" ]] ; then
echo "You may not use the --ownhfs option under Linux! Aborting!"
exit 1
fi
echo "Installing rEFInd on Linux...."
modprobe efivars &> /dev/null
if [[ $TargetDir == "/EFI/BOOT" ]] ; then
MountDefaultTarget
else
FindMountedESP
DetermineTargetDir
fi
CpuType=`uname -m`
if [[ $CpuType == 'x86_64' ]] ; then
Platform="EFI64"
elif [[ ($CpuType == 'i386' || $CpuType == 'i486' || $CpuType == 'i586' || $CpuType == 'i686') ]] ; then
Platform="EFI32"
# If we're in EFI mode, do some sanity checks, and alert the user or even
# abort. Not in BIOS mode, though, since that could be used on an emergency
# disc to try to recover a troubled Linux installation.
if [[ -d /sys/firmware/efi ]] ; then
if [[ "$ShimSource" != "none" && "$TargetDir" != "/BOOT/EFI" ]] ; then
echo ""
echo "CAUTION: shim does not currently supports 32-bit systems, so you should not"
echo "use the --shim option to install on such systems. Aborting!"
echo ""
exit 1
fi
echo
echo "CAUTION: This Linux installation uses a 32-bit kernel. 32-bit EFI-based"
echo "computers are VERY RARE. If you've installed a 32-bit version of Linux"
echo "on a 64-bit computer, you should manually install the 64-bit version of"
echo "rEFInd. If you're installing on a Mac, you should do so from OS X. If"
echo "you're positive you want to continue with this installation, answer 'Y'"
echo "to the following question..."
echo
echo -n "Are you sure you want to continue (Y/N)? "
ReadYesNo
if [[ $YesNo == "Y" || $YesNo == "y" ]] ; then
echo "OK; continuing with the installation..."
else
exit 0
fi
fi # in EFI mode
else
echo "Unknown CPU type '$CpuType'; aborting!"
exit 1
fi
if [[ $LocalKeys == 1 ]] ; then
ReSignBinaries
fi
CheckSecureBoot
CopyRefindFiles
if [[ "$TargetDir" != "/EFI/BOOT" && "$TargetDir" != "/EFI/Microsoft/Boot" ]] ; then
AddBootEntry
GenerateRefindLinuxConf
fi
} # InstallOnLinux()
#
# The main part of the script. Sets a few environment variables,
# performs a few startup checks, and then calls functions to
# install under OS X or Linux, depending on the detected platform.
#
OSName=`uname -s`
GetParams "$@"
ThisDir="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
RefindDir="$ThisDir/refind"
ThisScript="$ThisDir/`basename $0`"
if [[ `whoami` != "root" ]] ; then
echo "Not running as root; attempting to elevate privileges via sudo...."
sudo "$ThisScript" "$@"
if [[ $? != 0 ]] ; then
echo "This script must be run as root (or using sudo). Exiting!"
exit 1
else
exit 0
fi
fi
CheckForFiles
if [[ $OSName == 'Darwin' ]] ; then
if [[ "$ShimSource" != "none" ]] ; then
echo "The --shim option is not supported on OS X! Exiting!"
exit 1
fi
if [[ "$LocalKeys" != 0 ]] ; then
echo "The --localkeys option is not supported on OS X! Exiting!"
exit 1
fi
InstallOnOSX $1
elif [[ $OSName == 'Linux' ]] ; then
InstallOnLinux
else
echo "Running on unknown OS; aborting!"
fi
if [[ $Problems ]] ; then
echo
echo "ALERT:"
echo "Installation has completed, but problems were detected. Review the output for"
echo "error messages and take corrective measures as necessary. You may need to"
echo "re-run this script or install manually before rEFInd will work."
echo
else
echo
echo "Installation has completed successfully."
echo
fi
if [[ $UnmountEsp == '1' ]] ; then
echo "Unmounting install dir"
umount $InstallDir
fi
if [[ "$InstallDir" == /tmp/refind_install ]] ; then
# sleep 5
rmdir "$InstallDir"
fi
|
dab2000/rEFInd-custom
|
install.sh
|
Shell
|
gpl-3.0
| 41,259 |
#!/bin/bash
GO_ARCHIVE=go1.15.linux-amd64.tar.gz
if [ -z $(which go) ]
then
if [ ! -f $GO_ARCHIVE ]
then
echo "download golang archive.."
curl -OL https://golang.org/dl/$GO_ARCHIVE
fi
if [ ! -d /usr/local/go ]
then
echo "extract golang archive to /usr/local/go"
sudo tar -C /usr/local -xzf $GO_ARCHIVE
fi
export PATH=$PATH:/usr/local/go/bin
echo "added /usr/local/go/bin to your PATH env"
echo "if you want to set the PATH permanently, add the following line to your $HOME/.profile"
echo "export PATH=$PATH:/usr/local/go/bin"
fi
echo -n "Great! you have installed "
go version
echo "Go and build the kiwotigo tool with ./build.sh"
|
spearwolf/kiwotigo
|
setup-go-env.sh
|
Shell
|
gpl-3.0
| 706 |
#!/bin/bash
#BSUB -J "dk[1-408]"
#BSUB -o log.%I
#BSUB -e err.%I
#BSUB -q basement
cd /lustre/scratch116/casm/cgp/users/tn5/confetti/coverage
SAMPLE=`ls *.gz | head -n $LSB_JOBINDEX | tail -n 1`
python3 ../scripts/covintrip.py ../../davidrnaseq/genome/Mus_musculus.NCBIM37.67.gtf $SAMPLE >kruskal/${SAMPLE%.gz}
|
TravisCG/SI_scripts
|
runcovintrip.sh
|
Shell
|
gpl-3.0
| 315 |
#!/bin/sh
# Author: Marc Christensen ([email protected])
# Michael Hutchinson ([email protected])
MONO_FRAMEWORK_PATH=/Library/Frameworks/Mono.framework/Versions/Current
export DYLD_FALLBACK_LIBRARY_PATH=$MONO_FRAMEWORK_PATH/lib:/lib:/usr/lib
#prevent Macports from messing up mono and pkg-config
export PATH="$MONO_FRAMEWORK_PATH/bin:$PATH"
DIR=$(cd "$(dirname "$0")"; pwd)
# $0 should contain the full path from the root i.e. /Applications/<folder>.app/Contents/MacOS/<script>
EXE_PATH="$DIR/ffly.exe"
# Work around a bug in 'exec' in older versions of macosx
OSX_VERSION=$(uname -r | cut -f1 -d.)
if [ $OSX_VERSION -lt 9 ]; then # If OSX version is 10.4
MONO_EXEC="exec mono"
else
MONO_EXEC="exec -a ffly mono"
fi
#mono version check
REQUIRED_MAJOR=2
REQUIRED_MINOR=4
APPNAME="Community Flight Finder"
VERSION_TITLE="Cannot launch $APPNAME"
VERSION_MSG="$APPNAME requires the Mono Framework version $REQUIRED_MAJOR.$REQUIRED_MINOR or later."
DOWNLOAD_URL="http://www.go-mono.com/mono-downloads/download.html"
MONO_VERSION="$(mono --version | grep 'Mono JIT compiler version ' | cut -f5 -d\ )"
MONO_VERSION_MAJOR="$(echo $MONO_VERSION | cut -f1 -d.)"
MONO_VERSION_MINOR="$(echo $MONO_VERSION | cut -f2 -d.)"
if [ -z "$MONO_VERSION" ] \
|| [ $MONO_VERSION_MAJOR -lt $REQUIRED_MAJOR ] \
|| [ $MONO_VERSION_MAJOR -eq $REQUIRED_MAJOR -a $MONO_VERSION_MINOR -lt $REQUIRED_MINOR ]
then
osascript \
-e "set question to display dialog \"$VERSION_MSG\" with title \"$VERSION_TITLE\" buttons {\"Cancel\", \"Download...\"} default button 2" \
-e "if button returned of question is equal to \"Download...\" then open location \"$DOWNLOAD_URL\""
echo "$VERSION_TITLE"
echo "$VERSION_MSG"
exit 1
fi
# NOTE: remove this for stable releases
if [ -z "$MD_NO_DEBUG" ]; then
_MONO_OPTIONS=${MONO_OPTIONS:---debug}
else
_MONO_OPTIONS=$MONO_OPTIONS
fi
$MONO_EXEC $_MONO_OPTIONS "$EXE_PATH" $* >> ~/Documents/ffly-log.txt
|
jrising/flight-finder
|
desktop/Community Flight Finder Pristine.app/Contents/MacOS/run.sh
|
Shell
|
gpl-3.0
| 1,949 |
#!/bin/sh
#
# set up the environment where the uninstalled tools and plugins will be used by default.
# The following can be placed in the profile file
#
BUILD=$HOME/gstreamer_build
#
export PATH=$BUILD/bin:$PATH
export PKG_CONFIG_PATH=$BUILD/lib/pkgconfig:$PKG_CONFIG_PATH
# Linux
export LD_LIBRARY_PATH=/home/guofeng/gstreamer_build/lib
# OSX
export DYLD_LIBRARY_PATH=/home/guofeng/gstreamer_build/lib
#
# don't use any system-installed plug-ins at all
#
export GST_PLUGIN_SYSTEM_PATH_1_0=
export GST_PLUGIN_PATH_1_0=$BUILD/lib/gstreamer-1.0:/usr/lib/gstreamer-1.0
#
# set our registry somewhere else so we don't mess up the registry generated
# by an installed copy
#
export GST_REGISTRY_1_0=$BUILD/registry.dat
#
# Point at the uninstalled plugin scanner
#
export GST_PLUGIN_SCANNER=$BUILD/libexec/gstreamer-1.0/gst-plugin-scanner
#
export GI_TYPELIB_PATH=$BUILD/share/gir-1.0:$GI_TYPELIB_PATH
|
auslides/gstreamer-setup
|
env-exports.sh
|
Shell
|
gpl-3.0
| 899 |
#! /usr/bin/env bash
./vm_setup.sh
./vm_run.sh &disown;
clear
sleep 5 # booting
./upload.sh
ssh so ./test0.sh log > clean0
ssh so ./test1.sh log > clean1
ssh so ./prepare.sh &
sleep 15 # rebooting
ssh so ./test0.sh log > patched0
ssh so ./test1.sh log > patched1
ssh so poweroff
|
vlad7777/uni_coursework
|
4_sem/SO/5/do_all.sh
|
Shell
|
gpl-3.0
| 287 |
#!/bin/bash
# vim: ts=4:sw=4
. ~/plescripts/plelib.sh
. ~/plescripts/dblib.sh
. ~/plescripts/global.cfg
EXEC_CMD_ACTION=EXEC
PAUSE=OFF
typeset -r ME=$0
typeset -r PARAMS="$*"
typeset -i loops=2
typeset -r str_usage=\
"Usage :
$ME
-db=db_name
-pdb=pdb_name
[-loops=$loops] nombre d'insertion de la table my_objects dans elle même.
"
typeset db=undef
typeset pdb=undef
while [ $# -ne 0 ]
do
case $1 in
-emul)
EXEC_CMD_ACTION=NOP
shift
;;
-db=*)
db=${1##*=}
shift
;;
-pdb=*)
pdb=${1##*=}
shift
;;
-loops=*)
loops=${1##*=}
shift
;;
-h|-help|help)
info "$str_usage"
LN
exit 1
;;
*)
error "Arg '$1' invalid."
LN
info "$str_usage"
exit 1
;;
esac
done
#ple_enable_log -params $PARAMS
exit_if_param_undef db "$str_usage"
exit_if_param_undef pdb "$str_usage"
typeset -r ID="$db[$pdb] :"
typeset -r path=~/plescripts/db/crash
typeset -r dbrole=$(read_database_role $db)
if [ "$dbrole" != primary ]
then
error "$ID role must be primary"
LN
exit 1
fi
typeset -a physical_list
typeset -a stby_server_list
load_stby_database
typeset -r dbstby=${physical_list[0]}
info "Stop stby database $dbstby"
sqlplus_cmd_with "sys/$oracle_password@${dbstby} as sysdba" \
"$(set_sql_cmd "shutdown immediate;")"
LN
test_pause
info "$ID create table my_object"
sqlplus_cmd_with "sys/$oracle_password@${pdb}_oci as sysdba" \
"$(set_sql_cmd "@$path/my_object.sql $loops")"
LN
test_pause
info "$ID switch archivelog"
function sqlcmd_switch_archivelog
{
set_sql_cmd "alter system switch logfile;"
set_sql_cmd "alter system switch logfile;"
set_sql_cmd "alter system switch logfile;"
set_sql_cmd "alter system switch logfile;"
set_sql_cmd "alter system switch logfile;"
}
sqlplus_cmd "$(sqlcmd_switch_archivelog)"
LN
typeset -r recovery_path="$(orcl_parameter_value db_recovery_file_dest)"
typeset -r archivelog_path"=$recovery_path/$(to_upper $db)/archivelog"
info "$ID remove all archivelog"
if command_exists crsctl
then
exec_cmd "sudo -iu grid asmcmd ls $archivelog_path/"
exec_cmd "sudo -iu grid asmcmd rm -rf $archivelog_path/*"
# Erreur avec la 12.1
exec_cmd -c "sudo -iu grid asmcmd ls $archivelog_path/"
LN
else
exec_cmd "ls -rtl $archivelog_path/"
exec_cmd "rm -rf $archivelog_path/*"
exec_cmd "ls -rtl $archivelog_path/"
LN
info "Il faut recréer le répertoire du jour sinon la commande"
info "alter system archive log current; ne fonctionnera pas."
exec_cmd -c "mkdir $archivelog_path/$(date +%Y_%m_%d)"
LN
fi
test_pause
info "Start stby database $dbstby"
sqlplus_cmd_with "sys/$oracle_password@${dbstby} as sysdba" \
"$(set_sql_cmd "startup;")"
LN
timing 25
LN
exec_cmd "dgmgrl -silent -echo sys/$oracle_password 'show configuration'"
exec_cmd "dgmgrl -silent -echo sys/$oracle_password 'show database $dbstby'"
LN
|
PhilippeLeroux/plescripts
|
db/crash/desynchronize_physical_stby.sh
|
Shell
|
gpl-3.0
| 2,844 |
exec 2>/dev/null;
echo -n ".";
cat tests/data/pcapng_big | ./pipa -s 1000000 -x pcapng A%i.part;
mergecap -F pcapng -w glued.pcapng *.part;
S1=`capinfos -c tests/data/pcapng_big | awk 'END{print $4}'`;
S2=`capinfos -c glued.pcapng | awk 'END{print $4}'`;
if [ "$S1" != "$S2" ];
then echo -e "\nx_pcapng_rotation.sh: PCAP-NG file format with rotating.";
fi;
rm *.part;
rm glued.pcapng;
exit;
|
Janiczek/pipa
|
tests/x_pcapng_rotation.sh
|
Shell
|
gpl-3.0
| 409 |
#Phil, Lamia 10 Oct 08
source params.sh
sudo sysctl -w net.ipv6.conf.all.forwarding=1
./mr3_conf_nas_barca_outdoor.sh
./mr3_mpls_nas_barca.sh
sudo sysctl -w net.ipv6.conf.eth0.proxy_ndp=1
sudo sysctl -w net.ipv6.conf.nasmesh0.proxy_ndp=1
sudo ifconfig eth0 promisc
sudo ip -6 addr add $MR3_EG6_ADDR/64 dev eth0
sudo xterm -hold -e "$OPENAIR3_PMIP6D_PATH/pmip6d -m -s -L $CH2_IN6_ADDR -N $MR3_EG6_ADDR -E $MR3_IN6_ADDR" &
watch -n1 "cat /proc/openair2/lchan_stats ; cat /proc/openair1/bch_stats"
./stop_rf.sh
./del_mpls.sh
sleep 2
./del_mpls.sh
|
mspublic/openair4G-mirror
|
openair3/EXAMPLES/CONSEIL_SCIENTIFIQUE_DEMO/mr3_nas_barca_outdoor.sh
|
Shell
|
gpl-3.0
| 550 |
#!/bin/bash
#Opens todays journal, creating the file if it doesn't exist.
TODAY=$(date +%F)
JOURNAL_DIR=/media/FILES/journal/
TODAYS_JOURNAL=${JOURNAL_DIR}${TODAY}.jent
#Opens journal Entry for date parameter passed in.
####BUGGY CODE!!! Opening yesterdays journal prints todays date
####at the top of the entry.
openEntry () {
if [[ ! -f "$1" ]]; then
echo $(date '+%A %B %d %Y') >> $1
cat ${JOURNAL_DIR}TEMPLATE.txt >> $1
fi
echo >> $1; echo >> $1; echo $(date +%R) >> $1
focuswriter $1
echo "${TODAY}'s entry updated."
}
deleteEntry () {
echo "Do you really want to delete today's entry?!?"
read -p "Type 'yes' to confirm: " answer
if [[ $answer = 'yes' ]]; then
rm $TODAYS_JOURNAL
echo "Today's entry deleted"
else
echo "Deletion cancelled"
fi
}
#Prompts for each individual line in the template, allowing for
#a more convenient method of entering the ABC's of the day
interactiveMode () {
if [[ ! -f $TODAYS_JOURNAL ]]; then
echo $(date '+%A %B %d %Y') >> $TODAYS_JOURNAL
echo >> $TODAYS_JOURNAL
while read line; do
echo $line
read -u 3 input
echo "${line} ${input}" >> $TODAYS_JOURNAL
done 3<&0 < ${JOURNAL_DIR}TEMPLATE.txt
for counter in {1,2}; do
echo "" >> $TODAYS_JOURNAL
done
fi
openEntry $TODAYS_JOURNAL
}
#Prints help text
help () {
echo "Legal options:"
echo "-t Opens TEMPLATE.txt for modification"
echo "-h Display help text"
echo "-y Opens yesterday's journal entry. Numerical arguments"
echo " following this option will jump backwards that many days."
echo "-d Deletes todays journal entry after prompting"
echo "-i Interactive mode. Enter todays stats from the terminal."
echo "-r Read a few recent entries"
}
#Print previous 3 entries out to terminal
printOlder () {
more $(ls -t ${JOURNAL_DIR}20* | head -3)
}
#************#
# MAIN #
#************#
#check for presence of arguments
if [[ $# -eq 0 ]]; then
# openEntry $TODAYS_JOURNAL
interactiveMode
else
#getopts looks for single character options ('-y' in the case below)
#any option with a following colon looks for an argument after the option
#and stores it in the variable OPT
while getopts "thy:dir" OPT; do
case $OPT in
t) nano ${JOURNAL_DIR}TEMPLATE.txt
;;
h) help
;;
y)PREV_DAY=$(date --date=$OPTARG' days ago' +"%F")
TODAYS_JOURNAL="${JOURNAL_DIR}${PREV_DAY}.jent"
interactiveMode
;;
d) deleteEntry
;;
i) interactiveMode
;;
r) printOlder
;;
esac
done
fi
exit
|
Joe-Westra/Joernal
|
journal.sh
|
Shell
|
gpl-3.0
| 2,538 |
#!/bin/sh -x
# TODO: common place
fatal()
{
echo "FATAL: $*" >&2
exit 1
}
PKGNAME=brave-browser
if [ "$1" = "--remove" ] ; then
epm remove $PKGNAME
exit
fi
[ "$1" != "--run" ] && echo "Install Brave browser from the official site" && exit
[ "$($DISTRVENDOR -a)" != "x86_64" ] && echo "Only x86_64 is supported" && exit 1
arch=x86_64
pkgtype=rpm
PKG=$($EGET --list --latest https://github.com/brave/brave-browser/releases "$PKGNAME-*.$arch.$pkgtype") || fatal "Can't get package URL"
epm install "$PKG"
|
vitlav/eepm
|
prescription.d/brave.sh
|
Shell
|
agpl-3.0
| 526 |
#!/bin/bash
script=`readlink -f $0`
basedir=`dirname $script`
cd "$basedir/.."
ulimit -t unlimited
time ./bin/deepframe -m 1000 -n 200 -a0.03 -p \
-t resources/lines-test.dat -s resources/lines-labels.dat \
-c examples/lines-dbn.cfg $@
|
janvojt/ffwdnet
|
examples/lines-dbn.sh
|
Shell
|
agpl-3.0
| 241 |
#!/usr/bin/env bash
stop_bgm(){
touch /home/pi/.DisableMusic
clear
echo -e "\n\n\n Background Music Stopped\n\n\n"
sleep 3
}
start_bgm(){
rm /home/pi/.DisableMusic
echo -e "\n\n\n Background Music Started\n\n\n"
sleep 3
}
if [ -e /home/pi/.DisableMusic ]; then
start_bgm
else
stop_bgm
fi
exit
|
Shakz76/Eazy-Hax-RetroPie-Toolkit
|
cfg/Toggle Background Music.sh
|
Shell
|
agpl-3.0
| 428 |
cd /server/UnityStation_Data/StreamingAssets/config
if test $RCON_PASSWORD; then jq --arg v "$RCON_PASSWORD" '.RconPass = $v' config.json | sponge config.json; fi
if test $HUB_USER ; then jq --arg v "$HUB_USER" '.HubUser = $v' config.json | sponge config.json; fi
if test $HUB_PASSWORD ; then jq --arg v "$HUB_PASSWORD" '.HubPass = $v' config.json | sponge config.json; fi
cd /server/UnityStation_Data/StreamingAssets
if test $BUILD_NUMBER; then jq --arg v "$BUILD_NUMBER" '.BuildNumber = $v' buildinfo.json | sponge buildinfo.json; fi
if test $BUILD_FORK ; then jq --arg v "$BUILD_FORK" '.ForkName = $v' buildinfo.json | sponge buildinfo.json; fi
/server/UnityStation -batchmode -nographics -logfile /dev/stdout
|
Necromunger/unitystation
|
Docker/entrypoint.sh
|
Shell
|
agpl-3.0
| 732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.