code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#Check that the upload has moved all of the files it set out to.
log="$1"
errlog="$2"
if [[ "$log" == "" ]]; then echo "No log file given"; exit 1; fi
if [[ "$errlog" == "" ]]; then echo "No errlog file given"; exit 1; fi
#Tell the user how many files were indexed and how many were uploaded.
added=`grep -c "Adding file" "$log"`
uploads=`grep -c "Successfully uploaded" "$log"`
echo "" >> "$errlog"
echo "Files indexed: $added, files uploaded: $uploads" >> "$errlog"
#Check if the numbers are the same.
if [[ "$added" -ne "$uploads" ]]; then
#They are not, so figure out why.
echo "Files that differ:" >> "$errlog"
grep "Adding file" "$log" | \
tr "'" "-" | \
perl -ne 'print "$1\n" if (m/Adding file \-(.*)\-/)' | \
tr -s -c "[:alnum:]\.\n" "_" | \
sort >tmp_index
grep "Successfully uploaded" "$log" | \
perl -ne 'print "$1\n" if (m/Successfully uploaded (.*) to/)' | \
tr -s -c "[:alnum:]\.\n" "_" | \
sort >tmp_upload
comm -23 tmp_index tmp_upload >> "$errlog"
rm tmp_upload tmp_index
echo "" >> "$errlog"
echo "" >> "$errlog"
#Check how many of each error has happened.
errors=`grep "\[ERROR\]" "$errlog" | \
perl -pe "s/'.*'/***/; s/\d{4}\/\d{2}\/\d{2} \d{2}\:\d{2}\:\d{2} \- \[ERROR\]\s+//" | \
sort | \
uniq -c`
#Check if any errors are serious.
serious=`echo "$errors" | \
grep -v "File is empty" | \
grep -v "is a file, skipping" | \
grep -v "symbolic link" | \
wc -l`
#Show the errors
echo "$errors" >> "$errlog"
#check if any serious errors occured.
if [[ "$serious" -eq 0 ]]; then
echo "No serious errors. Nothing to worry about." >> "$log"
exit 0
else
echo "errors found!" >> "$log"
exit 2
fi
fi
exit 0
|
scsd/Drive-Upload
|
t/check.sh
|
Shell
|
mit
| 1,856 |
#!/bin/bash -l
set -eu
# TEST CORI SH
# Simple Swift/T+Python+Horovod tests on Cori
# Use this with Swift scripts in sanity/
if (( ${#} != 1 ))
then
echo "Provide a Swift script!"
exit 1
fi
SWIFT_SCRIPT=$1
module load java gcc
# module load tensorflow/intel-head
module load python/2.7-anaconda-4.4
PATH=$HOME/Public/sfw/login/swift-t-2018-04-16/stc/bin:$PATH
# export PYTHONPATH=$HOME/.local/lib/python2.7/site-packages
# swift-t -v
# which python
# echo PP ${PYTHONPATH:-}
# echo PUB $PYTHONUSERBASE
export SWIFT_PATH=$PWD
TIC=${SWIFT_SCRIPT%.swift}.tic
swift-t -u -o $TIC $SWIFT_SCRIPT
|
ECP-CANDLE/Supervisor
|
workflows/test-horovod/test-cori.sh
|
Shell
|
mit
| 602 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:1319
#
# Security announcement date: 2013-09-30 23:37:23 UTC
# Script generation date: 2017-01-01 21:14:52 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libipa_hbac.i386:1.5.1-70.el5
# - libipa_hbac-devel.i386:1.5.1-70.el5
# - sssd-client.i386:1.5.1-70.el5
# - sssd-debuginfo.i386:1.5.1-70.el5
# - libipa_hbac.x86_64:1.5.1-70.el5
# - libipa_hbac-devel.x86_64:1.5.1-70.el5
# - libipa_hbac-python.x86_64:1.5.1-70.el5
# - sssd.x86_64:1.5.1-70.el5
# - sssd-client.x86_64:1.5.1-70.el5
# - sssd-debuginfo.x86_64:1.5.1-70.el5
# - sssd-tools.x86_64:1.5.1-70.el5
#
# Last versions recommanded by security team:
# - libipa_hbac.i386:1.5.1-70.el5
# - libipa_hbac-devel.i386:1.5.1-70.el5
# - sssd-client.i386:1.5.1-70.el5
# - sssd-debuginfo.i386:1.5.1-70.el5
# - libipa_hbac.x86_64:1.5.1-70.el5
# - libipa_hbac-devel.x86_64:1.5.1-70.el5
# - libipa_hbac-python.x86_64:1.5.1-70.el5
# - sssd.x86_64:1.5.1-70.el5
# - sssd-client.x86_64:1.5.1-70.el5
# - sssd-debuginfo.x86_64:1.5.1-70.el5
# - sssd-tools.x86_64:1.5.1-70.el5
#
# CVE List:
# - CVE-2013-0219
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install libipa_hbac.i386-1.5.1 -y
sudo yum install libipa_hbac-devel.i386-1.5.1 -y
sudo yum install sssd-client.i386-1.5.1 -y
sudo yum install sssd-debuginfo.i386-1.5.1 -y
sudo yum install libipa_hbac.x86_64-1.5.1 -y
sudo yum install libipa_hbac-devel.x86_64-1.5.1 -y
sudo yum install libipa_hbac-python.x86_64-1.5.1 -y
sudo yum install sssd.x86_64-1.5.1 -y
sudo yum install sssd-client.x86_64-1.5.1 -y
sudo yum install sssd-debuginfo.x86_64-1.5.1 -y
sudo yum install sssd-tools.x86_64-1.5.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2013/RHSA-2013:1319.sh
|
Shell
|
mit
| 1,859 |
#!/bin/bash
Install_Nginx_Openssl()
{
if [ "${Enable_Nginx_Openssl}" = 'y' ]; then
Download_Files ${Download_Mirror}/lib/openssl/${Openssl_Ver}.tar.gz ${Openssl_Ver}.tar.gz
[[ -d "${Openssl_Ver}" ]] && rm -rf ${Openssl_Ver}
tar zxf ${Openssl_Ver}.tar.gz
Nginx_With_Openssl="--with-openssl=${cur_dir}/src/${Openssl_Ver}"
fi
}
Install_Nginx()
{
Echo_Blue "[+] Installing ${Nginx_Ver}... "
groupadd www
useradd -s /sbin/nologin -g www www
cd ${cur_dir}/src
Install_Nginx_Openssl
Tar_Cd ${Nginx_Ver}.tar.gz ${Nginx_Ver}
if echo ${Nginx_Ver} | grep -Eqi 'nginx-[0-1].[5-8].[0-9]' || echo ${Nginx_Ver} | grep -Eqi 'nginx-1.9.[1-4]$'; then
./configure --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_spdy_module --with-http_gzip_static_module --with-ipv6 --with-http_sub_module ${Nginx_With_Openssl} ${NginxMAOpt} ${Nginx_Modules_Options}
else
./configure --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module --with-http_v2_module --with-http_gzip_static_module --with-ipv6 --with-http_sub_module ${Nginx_With_Openssl} ${NginxMAOpt} ${Nginx_Modules_Options}
fi
make && make install
cd ../
ln -sf /usr/local/nginx/sbin/nginx /usr/bin/nginx
rm -f /usr/local/nginx/conf/nginx.conf
cd ${cur_dir}
if [ "${Stack}" = "lnmpa" ]; then
\cp conf/nginx_a.conf /usr/local/nginx/conf/nginx.conf
\cp conf/proxy.conf /usr/local/nginx/conf/proxy.conf
\cp conf/proxy-pass-php.conf /usr/local/nginx/conf/proxy-pass-php.conf
else
\cp conf/nginx.conf /usr/local/nginx/conf/nginx.conf
fi
\cp conf/rewrite/dabr.conf /usr/local/nginx/conf/dabr.conf
\cp conf/rewrite/discuz.conf /usr/local/nginx/conf/discuz.conf
\cp conf/rewrite/sablog.conf /usr/local/nginx/conf/sablog.conf
\cp conf/rewrite/typecho.conf /usr/local/nginx/conf/typecho.conf
\cp conf/rewrite/typecho2.conf /usr/local/nginx/conf/typecho2.conf
\cp conf/rewrite/wordpress.conf /usr/local/nginx/conf/wordpress.conf
\cp conf/rewrite/discuzx.conf /usr/local/nginx/conf/discuzx.conf
\cp conf/rewrite/none.conf /usr/local/nginx/conf/none.conf
\cp conf/rewrite/wp2.conf /usr/local/nginx/conf/wp2.conf
\cp conf/rewrite/phpwind.conf /usr/local/nginx/conf/phpwind.conf
\cp conf/rewrite/shopex.conf /usr/local/nginx/conf/shopex.conf
\cp conf/rewrite/dedecms.conf /usr/local/nginx/conf/dedecms.conf
\cp conf/rewrite/drupal.conf /usr/local/nginx/conf/drupal.conf
\cp conf/rewrite/ecshop.conf /usr/local/nginx/conf/ecshop.conf
\cp conf/rewrite/codeigniter.conf /usr/local/nginx/conf/codeigniter.conf
\cp conf/rewrite/laravel.conf /usr/local/nginx/conf/laravel.conf
\cp conf/rewrite/thinkphp.conf /usr/local/nginx/conf/thinkphp.conf
\cp conf/pathinfo.conf /usr/local/nginx/conf/pathinfo.conf
\cp conf/enable-php.conf /usr/local/nginx/conf/enable-php.conf
\cp conf/enable-php-pathinfo.conf /usr/local/nginx/conf/enable-php-pathinfo.conf
\cp conf/enable-ssl-example.conf /usr/local/nginx/conf/enable-ssl-example.conf
mkdir -p ${Default_Website_Dir}
chmod +w ${Default_Website_Dir}
mkdir -p /home/wwwlogs
chmod 777 /home/wwwlogs
chown -R www:www ${Default_Website_Dir}
mkdir /usr/local/nginx/conf/vhost
if [ "${Default_Website_Dir}" != "/home/wwwroot/default" ]; then
sed -i "s#/home/wwwroot/default#${Default_Website_Dir}#g" /usr/local/nginx/conf/nginx.conf
fi
if [ "${Stack}" = "lnmp" ]; then
cat >${Default_Website_Dir}/.user.ini<<EOF
open_basedir=${Default_Website_Dir}:/tmp/:/proc/
EOF
chmod 644 ${Default_Website_Dir}/.user.ini
chattr +i ${Default_Website_Dir}/.user.ini
cat >>/usr/local/nginx/conf/fastcgi.conf<<EOF
fastcgi_param PHP_ADMIN_VALUE "open_basedir=\$document_root/:/tmp/:/proc/";
EOF
fi
\cp init.d/init.d.nginx /etc/init.d/nginx
chmod +x /etc/init.d/nginx
if [ "${SelectMalloc}" = "3" ]; then
mkdir /tmp/tcmalloc
chown -R www:www /tmp/tcmalloc
sed -i '/nginx.pid/a\
google_perftools_profiles /tmp/tcmalloc;' /usr/local/nginx/conf/nginx.conf
fi
}
|
boxcore/shell
|
install-3rd/lnmp1.4/include/nginx.sh
|
Shell
|
mit
| 4,268 |
#!/bin/bash
source /unit_tests/test-utils.sh
#
# Exit status is 0 for PASS, nonzero for FAIL
#
STATUS=0
# devnode test
check_devnode "/dev/input/event0"
print_status
exit $STATUS
|
DYNA-instruments-open-source/eco-g45-linux-bsp
|
BSP-3.14/local_src/common/imx-test-11.09.01/test/mxc_keyb_test/autorun-keypad.sh
|
Shell
|
mit
| 183 |
#!/usr/bin/env sh
ProjectDir="../../../../../"
cd $ProjectDir
caffe_dire="caffe/"
# build/tools_dire/
tools_dire="build/tools/"
tools_dire=$caffe_dire$tools_dire
# models/Pose2/flic/d302/tmask_2b/
pt_dire="models/Pose2/"
sub_pt_dire="flic/d302/"
exper_name="tmask_2b/"
exper_path=$pt_dire$sub_pt_dire$exper_name
mkdir -p $exper_path
# models/Pose2/flic/d302/tmask_2b/solver.pt
solver_pt="solver.pt"
solver_pt=$exper_path$solver_pt
echo "\n" $solver_pt "\n"
# ../asserts/models/Pose2/flic/d302/tmask_2b/
model_dire="../asserts/"
model_dire=$model_dire$exper_path
mkdir -p $model_dire
# ../asserts/models/Pose2/flic/d302/tmask_2b/models/
model_path="models/"
model_path=$model_dire$model_path
mkdir -p $model_path
echo $model_path
# ../asserts/models/Pose2/flic/d302/tmask_2b/log/
log_path="log/"
log_path=$model_dire$log_path
mkdir -p $log_path
# prefix -- log file
file_prefix="flic_"
log_file=$(date -d "today" +"%Y-%m-%d-%H-%M-%S")
log_file=$log_path$file_prefix$log_file".log"
# execute file
caffe_bin="caffe"
caffe_bin=$tools_dire$caffe_bin
echo
echo "######################################"
echo
echo "Usage: "
echo " sh run.sh [re_iter]"
echo
echo "######################################"
echo
sleep_time=2
sleep $sleep_time
# resume model file
if [ ! -n "$1" ] ;then
re_iter=0
# run & log command
$caffe_bin train --solver=$solver_pt 2>&1 | tee -a $log_file
else
re_iter=$1
resume_model_file="flic_iter_"$re_iter".solverstate"
resume_model_file=$model_path$resume_model_file
echo
echo "re_iter:" $re_iter
echo "snapshot path:" $resume_model_file
echo
# run & log command
$caffe_bin train --solver=$solver_pt --snapshot=$resume_model_file 2>&1 | tee -a $log_file
fi
echo "Done!"
|
zimenglan-sysu-512/pose_action_caffe
|
models/Pose2/flic/d302/tmask_2b/run.sh
|
Shell
|
mit
| 1,709 |
[[ $c = [[:alpha:].~-] ]]
|
grncdr/js-shell-parse
|
tests/fixtures/shellcheck-tests/condition3/source.sh
|
Shell
|
mit
| 25 |
#!/usr/bin/env bash
PATH=$PATH:./node_modules/.bin
BROWSERIFY=watchify
[ ! -d www/lib ] && mkdir www/lib
$BROWSERIFY www/src/index.js --debug \
--transform [ babelify --presets [ react ] ] \
--outfile www/lib/index-bundle.js -v
|
pvdheijden/tiles-demo
|
scripts/watch-app.sh
|
Shell
|
mit
| 241 |
#!/bin/sh
rsync -avrc /vagrant/module/ /mnt/storage/rsss/33.33.33.9/module/
rsync -avrc /vagrant/public/ /mnt/storage/rsss/33.33.33.9/public/
chown apache:apache -R /mnt/storage/rsss
|
rgeyer/rs_selfservice
|
sync.sh
|
Shell
|
mit
| 184 |
#!/bin/bash
function usage() {
echo "$0 name
Updates a pathogen bundle using git subtree
name : name of the plugin"
}
test -z "$1" && usage && exit 1
plugin="$1"
remotes_file=remotes
if [ ! -f "$remotes_file" ]; then
echo "Cannot find $remotes_file; cannot update bundle."
exit 1
fi
repo_location=$(awk -F " " '/^'$plugin'/ {print $2}' $remotes_file)
repo_branch=$(awk -F " " '/^'$plugin'/ {print $3}' $remotes_file)
if [ -z "$repo_location" ]; then
echo "No bundle named $plugin in remotes file."
exit 1
fi
if ! $(git remote | grep -q $plugin); then
git remote add "$plugin" "$repo_location" || exit 1
fi
git fetch "$plugin" || exit 1
#git branch -f "$plugin" "$plugin/$repo_branch" || exit 1
echo "Merging $plugin branch."
git merge --squash -s subtree --no-commit "$plugin/$repo_branch" || exit 1
|
ibizaman/conffiles
|
update_bundle.sh
|
Shell
|
mit
| 834 |
#!/bin/bash
set -ex
BASE_DIR=$(cd $(dirname $0); pwd)
${BASE_DIR}/clean-containers.sh
${BASE_DIR}/smoke-test.sh ${BASE_DIR}/../.git 1
${BASE_DIR}/clean-containers.sh
${BASE_DIR}/smoke-test.sh ${BASE_DIR}/../.git 2
|
ototadana/pocci
|
test/test-private.sh
|
Shell
|
mit
| 217 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2823-1
#
# Security announcement date: 2013-12-18 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:47 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - pixman:0.16.4-1+deb6u1
#
# Last versions recommanded by security team:
# - pixman:0.16.4-1+deb6u2
#
# CVE List:
# - CVE-2013-6425
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade pixman=0.16.4-1+deb6u2 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2013/DSA-2823-1.sh
|
Shell
|
mit
| 613 |
#!/bin/bash
# open psql session to production db
source bin/env.sh
dcprod -f docker-compose.db.yml run --rm dbclient bash -c 'psql -h db -U $POSTGRES_USER $POSTGRES_DB'
|
Producters/docker-node-react-starter
|
bin/psql.sh
|
Shell
|
mit
| 171 |
#!/bin/sh
set +h # disable hashall
shopt -s -o pipefail
PKG_NAME="groff"
PKG_VERSION="1.22.3"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.gz"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
function prepare() {
ln -sv "/source/$TARBALL" "$TARBALL"
}
function unpack() {
tar xf ${TARBALL}
}
function build() {
PAGE=A4 ./configure --prefix=/usr
make -j1
}
function check() {
echo " "
}
function instal() {
make -j1 install
}
function clean() {
rm -rf "${SRC_DIR}" "$TARBALL"
}
clean;prepare;unpack;pushd ${SRC_DIR};build;[[ $MAKE_CHECK = TRUE ]] && check;instal;popd;clean
|
PandaLinux/pandaOS
|
phase2/groff/build.sh
|
Shell
|
mit
| 599 |
#!/usr/bin/env bash
#
# Convenience script to enable debug mode, disable caching, and
# enablenginx/gunicorn. The MTURK debug status is not changed.
#
# If you edit this script, also edit enable_production.sh
#
DIR="$( builtin cd "$( dirname "$( readlink -f "${BASH_SOURCE[0]}" )" )" && pwd )"
source "$DIR/load_config.sh"
F="$SRC_SETTINGS_DIR/settings_local.py"
sed -r -i \
-e 's/^\s*DEBUG\s*=.*$/DEBUG = True/' \
"$F"
echo "Relevant variables in $F:"
cat $F | grep "^DEBUG ="
cat $F | grep "^ENABLE_CACHING ="
cat $F | grep "^DEBUG_TOOLBAR ="
cat $F | grep "^MTURK_SANDBOX ="
bash "$DIR/files_changed.sh"
|
seanbell/django-scripts
|
enable_debug.sh
|
Shell
|
mit
| 615 |
export EDITOR='vim'
export TERM="xterm-256color"
export LANG="sv_SE.UTF-8"
export LC_CTYPE="sv_SE.UTF-8"
export LC_NUMERIC="en_US.UTF-8"
export LC_TIME="sv_SE.UTF-8"
export LC_COLLATE="sv_SE.UTF-8"
export LC_MONETARY="sv_SE.UTF-8"
export LC_MESSAGES="en_US.UTF-8"
export LC_PAPER="sv_SE.UTF-8"
export LC_NAME="sv_SE.UTF-8"
export LC_ADDRESS="sv_SE.UTF-8"
export LC_TELEPHONE="sv_SE.UTF-8"
export LC_MEASUREMENT="sv_SE.UTF-8"
export LC_IDENTIFICATION="sv_SE.UTF-8"
|
mhelmer/dotfiles
|
system/env.zsh
|
Shell
|
mit
| 465 |
#! ./testshell
echo "This script should be breakable by SIGINT if you run a shell with"
echo "asynchrnous traps enabled. Examples: FreeBSD's sh with switch -T"
echo "from April, 1999 or FreeBSD's sh between September 1998 and March"
echo "1999. SIGQUIT should do nothing"
trap : 3
trap 'echo SIGINT ; exit 1' 2
./hardguy
|
redox-os/ion
|
sh-interrupt/test28.sh
|
Shell
|
mit
| 322 |
#!/bin/bash -eu
_pwd=`pwd`
_root=$(cd $(dirname $0)/.. && pwd)
_path=${PATH}
# Generate constants by json.
python ${_root}/script/gen_constant.py --java-dst \
${_root}/src/android/app/src/main/java/org/processwarp/android/constant/Module.java \
${_root}/src/const/module.json
python ${_root}/script/gen_constant.py --java-dst \
${_root}/src/android/app/src/main/java/org/processwarp/android/constant/NID.java \
${_root}/src/const/nid.json
python ${_root}/script/gen_constant.py --java-dst \
${_root}/src/android/app/src/main/java/org/processwarp/android/constant/PID.java \
${_root}/src/const/pid.json
# Build jni module.
PATH=${PATH}:${NDK_PATH:-""}:~/Library/Android/sdk/ndk-bundle
if ! type ndk-build >/dev/null 2>&1; then
echo "Error: Program \"ndk-build\" not found in PATH"
echo "PATH=[${_path}]"
echo "Please set the path for ndk-build in PATH or NDK_PATH"
exit 1
fi
ndk-build APP_OPTIM=debug -C ${_root}/src/android/app/src/main/jni/
# Finish.
cd ${_pwd}
|
processwarp/processwarp
|
script/build_android_part.sh
|
Shell
|
mit
| 1,027 |
#!/bin/bash
sudo apt-get -y remove nginx
sudo apt-get -y install nginx-extras realpath
sudo nginx || true
|
cdpoffline/http-server
|
bin/install.sh
|
Shell
|
mit
| 109 |
#!/bin/bash
# The MIT License (MIT)
# Copyright (c) 2015 de-wiring.net
#
# EXAMPLE script, NOT READY FOR PRODUCTION USE
#
# --
# given a ca, create key and cert as intermediate
source '00_functions.sh'
if [[ ! -d $CA_PATH ]]; then
echo ERROR did not find ca path
exit 2
fi
create_key_req_cert wallet '/C=DE/L=Berlin/O=de-wiring.net/OU=containerwallet/CN=wallet'
F=/wallet/ca/private/dhparam.pem
if [[ ! -f $F ]]; then
openssl dhparam -out $F 2048
fi
|
de-wiring/containerwallet
|
tls/02_create_server_keycert.sh
|
Shell
|
mit
| 473 |
#!/bin/bash
alias docker-clear='docker rm -f `docker ps -aq`'
alias docker-clear-images='docker rmi $(docker images -f "dangling=true" -q)'
#
alias docker-cache='docker-compose -f $HOME/workspace/docker/stacks/cache/docker-compose.yml -p common up -d memcached_app memcached_session'
#
alias docker-gmus='docker-compose -f $HOME/workspace/docker/stacks/g-mus/docker-compose.yml -p gmus up -d'
alias docker-gmas='docker-compose -f $HOME/workspace/docker/stacks/g-mas/docker-compose.yml -p gmas up -d'
alias docker-postgres='docker-compose -f $HOME/workspace/docker-inovadora-stacks/database/docker-compose.yml -p postgres-9.6 up -d postgres-9.6'
|
jacksonveroneze/docker
|
bashrc.sh
|
Shell
|
mit
| 646 |
CQChartsTest -tcl -exec pareto.tcl
#CQChartsTest -ceil -exec pareto.cl
|
colinw7/CQCharts
|
data/ceil/pareto.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
# this function is the wrapper to run the `CBIG_pMFM_test_high_resolution.py`
# Written by Kong Xiaolu and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
cd ../../../../part2_pMFM_control_analysis/High_resolution/scripts
source activate pMFM
python CBIG_pMFM_test_high_resolution.py
mv ../output ../../../replication/part2_pMFM_control_analysis/High_resolution/results/
|
ThomasYeoLab/CBIG
|
stable_projects/fMRI_dynamics/Kong2021_pMFM/replication/part2_pMFM_control_analysis/High_resolution/scripts/CBIG_pMFM_test_high_resolution_wrapper.sh
|
Shell
|
mit
| 421 |
#!/bin/bash
for file in *.rs; do
rustc --edition=2018 "$file"
done
|
esjeon/graveyard
|
rust/compile.sh
|
Shell
|
mit
| 70 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:0128
#
# Security announcement date: 2013-01-08 06:52:00 UTC
# Script generation date: 2017-01-01 21:14:21 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - conga-debuginfo.x86_64:0.12.2-64.el5
# - luci.x86_64:0.12.2-64.el5
# - ricci.x86_64:0.12.2-64.el5
#
# Last versions recommanded by security team:
# - conga-debuginfo.x86_64:0.12.2-81.el5
# - luci.x86_64:0.12.2-81.el5
# - ricci.x86_64:0.12.2-81.el5
#
# CVE List:
# - CVE-2012-3359
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install conga-debuginfo.x86_64-0.12.2 -y
sudo yum install luci.x86_64-0.12.2 -y
sudo yum install ricci.x86_64-0.12.2 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2013/RHSA-2013:0128.sh
|
Shell
|
mit
| 838 |
# Replace this script to your update configuration.
#
# In this case the system will update repositories, upgrades packages,
# and remove unused packages.
sudo apt-get update && \
sudo apt-get upgrade -y && \
sudo apt-get dist-upgrade -y && \
sudo apt-get autoremove -y
|
gusknul/dotfiles
|
system/update.sh
|
Shell
|
mit
| 271 |
#!/bin/bash
docker build --tag vapor-oauth .
docker run --rm vapor-oauth
|
brokenhandsio/vapor-oauth
|
docker-test.sh
|
Shell
|
mit
| 73 |
#!/bin/bash
currentDir=$( pwd )
websiteUrl='http://getbootstrap.com'
galen test homePage.test -DwebsiteUrl=${websiteUrl} --htmlreport ../../reports/bootstrap/home
galen test cssPage.test -DwebsiteUrl=${websiteUrl}'/css' --htmlreport ../../reports/bootstrap/css
galen test jsPage.test -DwebsiteUrl=${websiteUrl}'/javascript' --htmlreport ../../reports/bootstrap/javascript
|
holisticon/holisticon.github.io
|
presentations/jsunconf_galen/tests/bootstrap/runGalenTests.sh
|
Shell
|
mit
| 373 |
#!/bin/bash
#[email protected]
# $1 : cPod Name
# $2 : PortGroup Name
# $3 : @IP
# $4 : # of ESX
# $5 : Root Domain
. ./env
[ "$1" == "" -o "$2" == "" -o "$3" == "" ] && echo "usage: $0 <name_of_vapp> <name_of_port_group> <ip_on_transit> <num_esx>" && exit 1
PS_SCRIPT=create_vapp.ps1
SCRIPT_DIR=/tmp/scripts
SCRIPT=/tmp/scripts/$$.ps1
mkdir -p ${SCRIPT_DIR}
cp ${COMPUTE_DIR}/${PS_SCRIPT} ${SCRIPT}
sed -i -e "s/###VCENTER###/${VCENTER}/" \
-e "s/###VCENTER_ADMIN###/${VCENTER_ADMIN}/" \
-e "s/###VCENTER_PASSWD###/${VCENTER_PASSWD}/" \
-e "s/###VCENTER_DATACENTER###/${VCENTER_DATACENTER}/" \
-e "s/###VCENTER_CLUSTER###/${VCENTER_CLUSTER}/" \
-e "s/###PORTGTOUP###/${2}/" \
-e "s/###CPOD_NAME###/${1}/" \
-e "s/###TEMPLATE_VM###/${TEMPLATE_VM}/" \
-e "s/###TEMPLATE_ESX###/${TEMPLATE_ESX}/" \
-e "s/###IP###/${3}/" \
-e "s/###ROOT_PASSWD###/${ROOT_PASSWD}/" \
-e "s/###DATASTORE###/${DATASTORE}/" \
-e "s/###NUMESX###/${4}/" \
-e "s/###ROOT_DOMAIN###/${5}/" \
${SCRIPT}
echo "Creating vApp '${HEADER}-${1}' with ${4} ESXi."
#docker run --rm -v ${SCRIPT_DIR}:${SCRIPT_DIR} --entrypoint="/usr/bin/pwsh" vmware/powerclicore:latest ${SCRIPT} 2>&1 > /dev/null
#docker run -it --rm --entrypoint="/usr/bin/pwsh" -v /tmp/scripts:/tmp/scripts vmware/powerclicore:latest ${SCRIPT} 2>&1 > /dev/null
docker run --rm --entrypoint="/usr/bin/pwsh" -v /tmp/scripts:/tmp/scripts vmware/powerclicore:ubuntu16.04 ${SCRIPT} 2>&1 > /dev/null
rm -fr ${SCRIPT}
|
bdereims/cPod
|
shwrfr/compute/create_vapp.sh
|
Shell
|
mit
| 1,456 |
#!/bin/bash
# install newer openssl version for protect 0day bug
# @link: http://www.cnblogs.com/nayu/p/5521486.html
cd ~
# mv /usr/local/bin/openssl /usr/local/bin/openssl.bak
# mv /usr/bin/openssl /usr/bin/openssl.bak
wget https://www.openssl.org/source/openssl-1.1.0e.tar.gz
tar zxf openssl-1.1.0e.tar.gz
cd openssl-1.1.0e
./config shared zlib
make depend
make
make install
rm -rf ~/openssl-1.1.0e
# fixed so depend:
# openssl: error while loading shared libraries: libssl.so.1.1: cannot open shared object file: No such file or directory
ln -s /usr/local/lib64/libssl.so.1.1 /usr/lib64/libssl.so.1.1
ln -s /usr/local/lib64/libcrypto.so.1.1 /usr/lib64/libcrypto.so.1.1
|
boxcore/shell
|
tools/openssl.sh
|
Shell
|
mit
| 674 |
#!/bin/bash
#
# requires the following variables:
# $CALGRAPH
# $TEST_DATA_PATH
# $TEST_OUTPUT_PATH
fails=0
total=0
testcases=(
"Basic Test"
"diff \
<(TARGET_PATH=$TEST_DATA_PATH/general $CALGRAPH) \
$TEST_OUTPUT_PATH/general"
)
echo "Running tests . . ."
for ((i = 0; i < ${#testcases[@]}; i+=2))
do
bash -c "${testcases[$i+1]}"
if [[ "$?" -ne 0 ]]
then
echo "${testcases[$i]} FAILED"
((fails++))
else
echo "${testcases[$i]} SUCCEEDED"
fi
((total++))
done
echo
echo "Ran $total tests. Encountered $fails failure(s)."
exit $fails
|
alex-hutton/calgraph
|
run_tests.sh
|
Shell
|
mit
| 553 |
# installs build tools
# gets ffmepg version and builds it
apt-get install build-essential git-core checkinstall yasm texi2html libvorbis-dev libx11-dev libxfixes-dev zlib1g-dev pkg-config
LIBVPX_VERSION=1.2.0
FFMPEG_VERSION=2.0.1
if [ ! -d "/usr/local/src/libvpx-${LIBVPX_VERSION}" ]; then
cd /usr/local/src
git clone http://git.chromium.org/webm/libvpx.git "libvpx-${LIBVPX_VERSION}"
cd "libvpx-${LIBVPX_VERSION}"
git checkout "v${LIBVPX_VERSION}"
./configure
make
checkinstall --pkgname=libvpx --pkgversion="${LIBVPX_VERSION}" --backup=no --deldoc=yes --default
fi
if [ ! -d "/usr/local/src/ffmpeg-${FFMPEG_VERSION}" ]; then
cd /usr/local/src
wget "http://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.bz2"
tar -xjf "ffmpeg-${FFMPEG_VERSION}.tar.bz2"
cd "ffmpeg-${FFMPEG_VERSION}"
./configure --enable-version3 --enable-postproc --enable-libvorbis --enable-libvpx
make
checkinstall --pkgname=ffmpeg --pkgversion="5:${FFMPEG_VERSION}" --backup=no --deldoc=yes --default
fi
|
NeovaHealth/bigbluebutton
|
puppet/files/build-ffmpeg.sh
|
Shell
|
mit
| 1,010 |
#!/bin/bash
# icons at https://www.dropbox.com/s/9iysh2i0gadi4ic/icons.pdf
base03="#002b36"
base02="#073642"
base01="#586e75"
base00="#657b83"
base0="#839496"
base1="#93a1a1"
base2="#eee8d5"
base3="#fdf6e3"
yellow="#b58900"
orange="#cb4b16"
red="#dc322f"
magenta="#d33682"
violet="#6c71c4"
blue="#268bd2"
cyan="#2aa198"
green="#859900"
export DISPLAY=:0
export XDG_RUNTIME_DIR=/run/user/`id -u`
memory () {
echo -e '\uf16c ' $(free -m | grep '^Mem' | awk '{$2/=1024;$3/=1024;printf "%.2f / %.2f GB",$3,$2 }')
}
cpu () {
echo -e '\uf0e4 ' $(top -bn 1 | grep 'Cpu' | tr -d 'usy,' | awk '{print "user " $2 ", sys " $3}')
}
power () {
echo -e '\uf215 ' $(upower -i `upower -e | grep BAT` | grep --color=never -E "percentage" | cut -d " " -f 15)
}
volume () {
# echo -e `` $(amixer get Master | awk -F'[][]' 'END{print $4":"$2 }')
echo -e '\uf026' $(amixer sget Master | awk -F'[][]' 'END{print $4":"$2 }')
}
show_date () {
echo $(date +"%a %b %d %R")
}
arrow () {
prevColor=$1
color=$2
echo "<span color=\"$color\" bgcolor=\"$prevColor\"></span>"
}
section () {
prevColor=$1
color=$2
text=$3
echo "$(arrow $prevColor $color)<span color=\"white\" bgcolor=\"$color\"> $text </span>"
}
section_memory () {
echo "$(section $base02 $base01 "$(memory)")"
}
section_volume () {
echo "$(section $base01 $base02 "$(volume)")"
}
section_power () {
echo "$(section $base02 $base00 "$(power)")"
}
section_date () {
echo "$(section $base00 $base02 "$(show_date)")"
}
# section_cpu () {
# echo "$(section $base01 $base02 "$(cpu)")"
# }
# status="$(section_memory)$(section_volume)$(section_cpu)$(section_power)$(section_date)"
status="$(section_memory)$(section_volume)$(section_power)$(section_date)"
# status="$(section_memory)$(section_power)$(section_date)"
echo $status
export DISPLAY=:0; xsetroot -name "$status" > /dev/null 2>&1
|
tiberiuc/dot-files
|
scripts/set-dwm-status.sh
|
Shell
|
mit
| 1,916 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-263-1
#
# Security announcement date: 2015-06-30 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:58 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - ruby1.9.1:1.9.2.0-2+deb6u5
#
# Last versions recommanded by security team:
# - ruby1.9.1:1.9.2.0-2+deb6u7
#
# CVE List:
# - CVE-2012-5371
# - CVE-2013-0269
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade ruby1.9.1=1.9.2.0-2+deb6u7 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2015/DLA-263-1.sh
|
Shell
|
mit
| 644 |
#!/bin/bash
curl -o /usr/local/bin/docker-compose -L "https://github.com/docker/compose/releases/download/1.9.0/docker-compose-$(uname -s)-$(uname -m)"
chmod +x /usr/local/bin/docker-compose
docker-compose -v
|
Praisebetoscience/Ubuntu_init_tools
|
install_docker_compose.sh
|
Shell
|
mit
| 211 |
#!/bin/bash
# This script should be run after installing the libaio RPM or libraries
# A valid large file should be passed to the test.
# These tests will only run correctly if the kernel and libaio has been compiled
# with at least a 3.3.X GCC. Older versions of the compiler will seg fault.
#
# 02/08/04 [email protected]
#
# 04/12/06 a Forth scenerio file has been added ltp-aiodio.part4
#
cd `dirname $0`
export LTPROOT=${PWD}
echo $LTPROOT | grep testscripts > /dev/null 2>&1
if [ $? -eq 0 ]; then
cd ..
export LTPROOT=${PWD}
fi
export PATH=$LTPROOT/testcases/bin:$PATH
export TMP=${TMP:=/tmp}
run0=0
runTest=0
nextTest=0
runExtendedStress=0
export TMPBASE="/tmp"
usage()
{
cat <<-END >&2
usage: ${0##*/} [ -f large_filename -b partition] [-o optional partition] [-e 1] [-t 1] [-j 1] [-x 1] or [-a 1]
defaults:
file1=$file1
part1=$part1
ext2=0
ext3=0
jfs=0
xfs=0
example: ${0##*/} -f MyLargeFile -b /dev/hdc1 [-o /dev/hdc2] [-a 1] or
[-e 1] [-x 1] [-j 1] [-t 1]
-o = optional partition allows some of the tests to utilize multiple filesystems to further stress AIO/DIO
-e = test ex2 filesystem.
-t = test ext3 filesystem
-s = test ext4 filesystem
-j = test JFS filesystem
-x = test XFS filesystem
or
-a = test all supported filesystems, this will override any other filesystem flags passed.
- a 1 turns on the test for the above supported filesystem, just omit passing the flag to skip that filesystem.
- A Large file should be passed to fully stress the test. You must pass at least one filesystem to test, you can pass any combination
but there is not a default filesystem. ReiserFS does not support AIO so these tests will not support ReiserFS.
- WARNING !! The partition you pass will be overwritten. This is a destructive test so only pass a partition where data can be destroyed.
END
exit
}
while getopts :a:b:e:f:t:s:o:x:j: arg
do case $arg in
f) file1=$OPTARG;;
b) part1=$OPTARG;;
o) part2=$OPTARG;;
e) ext2=$OPTARG;;
t) ext3=$OPTARG;;
s) ext4=$OPTARG;;
x) xfs=$OPTARG;;
j) jfs=$OPTARG;;
a) allfs=$OPTARG;;
\?) echo "************** Help Info: ********************"
usage;;
esac
done
if [ ! -n "$file1" ]; then
echo "Missing the large file. You must pass a large filename for testing"
usage;
exit
fi
if [ ! -n "$part1" ]; then
echo "Missing the partition. You must pass a partition for testing"
usage;
exit
fi
if [ -n "$allfs" ]; then
echo "testing ALL supported filesystems"
ext2="1"
ext3="1"
ext4="1"
jfs=""
xfs="1"
echo "test run = $run0"
fi
if [ -n "$ext2" ]; then
echo "** testing ext2 **"
run0=$(($run0+1))
fi
if [ -n "$ext3" ]; then
echo "** testing ext3 **"
run0=$(($run0+1))
fi
if [ -n "$ext4" ]; then
echo "** testing ext4 **"
run0=$(($run0+1))
fi
if [ -n "$xfs" ]; then
echo "** testing xfs **"
run0=$(($run0+1))
fi
if [ -n "$jfs" ]; then
echo "** testing jfs **"
run0=$(($run0+1))
fi
if [ -n "$part2" -a "$run0" -gt 1 ]; then
echo "** Running extended stress testing **"
runExtendedStress=1
elif [ -n "$part2" -a "$run0" -eq 1 ]; then
echo " ** You must pass at least 2 filesystems to run an extended AIO stress test **"
usage;
fi
if [ "$run0" -eq 0 ]; then
echo "No filesystems passed to test"
echo "Please pass at least one supported filesystem or the -a 1 flag to run all "
usage;
fi
mkdir $TMP > /dev/null 2>&1
mkdir $TMP/aiodio > /dev/null 2>&1
mkdir $TMP/aiodio2 > /dev/null 2>&1
while [ "$runTest" -lt "$run0" ]
do
echo "runTest=$runTest run0=$run0 nextTest=$nextTest"
if [ -n "$ext2" -a $nextTest -eq 0 ]; then
echo "***************************"
echo "* Testing ext2 filesystem *"
echo "***************************"
mkfs -t ext2 $part1
mount -t ext2 $part1 $TMP/aiodio
if [ "$runExtendedStress" -eq 1 -a -n "$ext3" ]; then
mkfs -t ext3 $part2
mount -t ext3 $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$jfs" ]; then
mkfs.jfs $part2 <testscripts/yesenter.txt
mount -t jfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$xfs" ]; then
mkfs.xfs -f $part2
mount -t xfs $part2 $TMP/aiodio2
fi
elif [ $nextTest -eq 0 ]; then
nextTest=$(($nextTest+1))
fi
if [ -n "$ext3" -a $nextTest -eq 1 ]; then
echo "***************************"
echo "* Testing ext3 filesystem *"
echo "***************************"
mkfs -t ext3 $part1
mount -t ext3 $part1 $TMP/aiodio
if [ "$runExtendedStress" -eq 1 -a -n "$jfs" ]; then
mkfs.jfs $part2 <testscripts/yesenter.txt
mount -t jfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$xfs" ]; then
mkfs.xfs -f $part2
mount -t xfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$ext2" ]; then
mkfs -t ext2 $part2
mount -t ext2 $part2 $TMP/aiodio2
fi
elif [ $nextTest -eq 1 ]; then
nextTest=$(($nextTest+1))
fi
if [ -n "$ext4" -a $nextTest -eq 2 ]; then
echo "***************************"
echo "* Testing ext4 filesystem *"
echo "***************************"
mkfs -t ext4 $part1
mount -t ext4 $part1 $TMP/aiodio
if [ "$runExtendedStress" -eq 1 -a -n "$jfs" ]; then
mkfs.jfs $part2 <testscripts/yesenter.txt
mount -t jfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$xfs" ]; then
mkfs.xfs -f $part2
mount -t xfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$ext2" ]; then
mkfs -t ext2 $part2
mount -t ext2 $part2 $TMP/aiodio2
fi
elif [ $nextTest -eq 2 ]; then
nextTest=$(($nextTest+1))
fi
if [ -n "$jfs" -a $nextTest -eq 3 ]; then
echo "**************************"
echo "* Testing jfs filesystem *"
echo "**************************"
mkfs.jfs $part1 <testscripts/yesenter.txt
mount -t jfs $part1 $TMP/aiodio
if [ "$runExtendedStress" -eq 1 -a -n "$ext3" ]; then
mkfs -t ext3 $part2
mount -t ext3 $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$xfs" ]; then
mkfs.xfs -f $part2
mount -t xfs $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$ext2" ]; then
mkfs -t ext2 $part2
mount -t ext2 $part2 $TMP/aiodio2
fi
elif [ $nextTest -eq 3 ]; then
nextTest=$(($nextTest+1))
fi
if [ -n "$xfs" -a $nextTest -eq 4 ]; then
echo "**************************"
echo "* Testing xfs filesystem *"
echo "**************************"
mkfs.xfs -f $part1
mount -t xfs $part1 $TMP/aiodio
if [ "$runExtendedStress" -eq 1 -a -n "$ext2" ]; then
mkfs -t ext2 $part2
mount -t ext2 $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$ext3" ]; then
mkfs -t ext3 $part2
mount -t ext3 $part2 $TMP/aiodio2
elif [ "$runExtendedStress" -eq 1 -a -n "$jfs" ]; then
mkfs.jfs $part2 <testscripts/yesenter.txt
mount -t jfs $part2 $TMP/aiodio2
fi
elif [ $nextTest -eq 4 ]; then
nextTest=$(($nextTest+1))
fi
nextTest=$(($nextTest+1))
runTest=$(($runTest+1))
mkdir $TMP/aiodio/junkdir
dd if=$file1 of=$TMP/aiodio/junkfile bs=8192 conv=block,sync
date
echo "************ Running aio-stress tests "
echo "current working dir = ${PWD}"
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aio-stress.part1 > ${TMPBASE}/ltp-aio-stress.part1
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiostresspart1 -n ltp-aiostresspart1 -l ltpaiostress.logfile -o ltpaiostress.outfile -p -f ${TMPBASE}/ltp-aio-stress.part1 &
wait $!
sync
echo "************ End Running aio-stress tests "
echo ""
if [ "$runExtendedStress" -eq 1 ];then
echo "************ Running EXTENDED aio-stress tests "
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aio-stress.part2 > ${TMPBASE}/ltp-aio-stress.part2
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiostresspart2 -n ltp-aiostresspart2 -l ltpaiostress.logfile -o ltpaiostress.outfile -p -f ${TMPBASE}/ltp-aio-stress.part2 &
wait $!
sync
fi
dd if=$file1 of=$TMP/aiodio/junkfile bs=8192 conv=block,sync
dd if=$file1 of=$TMP/aiodio/fff bs=4096 conv=block,sync
dd if=$file1 of=$TMP/aiodio/ff1 bs=2048 conv=block,sync
dd if=$file1 of=$TMP/aiodio/ff2 bs=1024 conv=block,sync
dd if=$file1 of=$TMP/aiodio/ff3 bs=512 conv=block,sync
echo "************ Running aiocp tests "
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aiodio.part1 > ${TMPBASE}/ltp-aiodio.part1
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiodiopart1 -n ltp-aiodiopart1 -l ltpaiodio1.logfile -o ltpaiodio1.outfile -p -f ${TMPBASE}/ltp-aiodio.part1 &
wait $!
sync
echo "************ End Running aiocp tests "
echo ""
echo "************ Running aiodio_sparse tests "
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aiodio.part2 > ${TMPBASE}/ltp-aiodio.part2
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiodiopart2 -n ltp-aiodiopart2 -l ltpaiodio2.logfile -o ltpaiodio2.outfile -p -f ${TMPBASE}/ltp-aiodio.part2 &
wait $!
sync
echo "************ End Running aiodio_sparse tests "
echo ""
if [ "$runExtendedStress" -eq 1 ];then
echo "************ Running fsx-linux tests "
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aiodio.part3 > ${TMPBASE}/ltp-aiodio.part3
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiodiopart3 -n ltp-aiodiopart3 -l ltpaiodio3.logfile -o ltpaiodio3.outfile -p -f ${TMPBASE}/ltp-aiodio.part3 &
wait $!
sync
fi
dd if=$file1 of=$TMP/aiodio/file2 bs=2048 conv=block,sync
dd if=$file1 of=$TMP/aiodio/file3 bs=1024 conv=block,sync
dd if=$file1 of=$TMP/aiodio/file4 bs=512 conv=block,sync
dd if=$file1 of=$TMP/aiodio/file5 bs=4096 conv=block,sync
echo "************ Running dio_sparse & miscellaneous tests "
${LTPROOT}/bin/rand_lines -g ${LTPROOT}/runtest/ltp-aiodio.part4 > ${TMPBASE}/ltp-aiodio.part4
${LTPROOT}/bin/ltp-pan -e -S -a ltpaiodiopart4 -n ltp-aiodiopart4 -l ltpaiodio4.logfile -o ltpaiodio4.outfile -p -f ${TMPBASE}/ltp-aiodio.part4 &
wait $!
sync
echo "************ End Running dio_sparse & miscellaneous tests "
echo ""
echo "************ Cleaning/Umounting"
rm -f $TMP/aiodio/fff
rm -f $TMP/aiodio/ff1
rm -f $TMP/aiodio/ff2
rm -f $TMP/aiodio/ff3
rm -f $TMP/aiodio/junkfile*
rm -f $TMP/aiodio/file*
rm -rf $TMP/aiodio/junkdir
umount $part1
if [ "$runExtendedStress" -eq 1 ]; then
umount $part2
fi
done
date
echo "AIO/DIO test complete "
|
qilongyun/ltp
|
testscripts/ltp-aiodio.sh
|
Shell
|
gpl-2.0
| 10,287 |
#!/sbin/sh
# _ _ _ _ __ _
# / \ _ __ ___| |__ (_) |/ /___ _ __ _ __ ___| |
# / _ \ | '__/ __| '_ \| | ' // _ \ '__| '_ \ / _ \ |
# / ___ \| | | (__| | | | | . \ __/ | | | | | __/ |
# /_/ \_\_| \___|_| |_|_|_|\_\___|_| |_| |_|\___|_|
#
# Copyright 2014-2015 Łukasz "JustArchi" Domeradzki
# Contact: [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Device-specific
KERNEL="/dev/block/mmcblk0p5" # THIS IS FOR GALAXY S3 ONLY
PARSERAMDISK=1 # If we don't need to worry about compressed ramdisk (i.e. putting modules inside), we can skip it
EXTRACT_RAMDISK() {
# $1 - Raw ramdisk source (file)
# $2 - Extracted ramdisk target (folder)
echo "INFO: Extracting $1 ramdisk to $2 folder"
mkdir -p "$2"
cd "$2" || return 1
if [[ ! -f "$1" ]]; then
echo "ERROR: Ramdisk $1 not found!"
return 1
fi
echo "INFO: Detecting $1 ramdisk format..."
if gunzip --help 2>&1 | grep -q "\-t" && gunzip -t "$1"; then
echo "INFO: GZIP format detected"
CBIN="gzip -9"
DBIN="gunzip -c"
elif lzop --help 2>&1 | grep -q "\-t" && lzop -t "$1"; then
echo "INFO: LZO format detected"
CBIN="lzop -9"
DBIN="lzop -dc"
elif xz --help 2>&1 | grep -q "\-t" && xz -t "$1"; then
echo "INFO: XZ format detected"
CBIN="xz -9"
DBIN="xz -dc"
elif lzma --help 2>&1 | grep -q "\-t" && lzma -t "$1"; then
echo "INFO: LZMA format detected"
CBIN="lzma -9"
DBIN="lzma -dc"
else
CBIN="raw"
DBIN="raw"
echo "INFO: Could not detect any known ramdisk compression format!"
echo "INFO: Will try uncompressed mode!"
fi
if [[ "$DBIN" != "raw" ]]; then
$DBIN "$1" | cpio -i || return 1
else
cpio -i < "$1" || return 1
fi
echo "INFO: Success!"
}
REPACK_RAMDISK() {
# $1 - Extracted ramdisk source (folder)
# $2 - Repacked ramdisk target (file)
# $3 - Compression type (optional)
cd "$1" || return 1
# Find which compression we should use
local LOCAL_CBIN="raw" # Default to raw
if [[ -n "$3" ]]; then
LOCAL_CBIN="$3" # If there is argument passed, use it
elif [[ -n "$CBIN" ]]; then
LOCAL_CBIN="$CBIN" # Otherwise check if we have global $CBIN declared
fi
echo "INFO: Repacking $1 folder into $2 ramdisk using $3 compression type"
if [[ "$LOCAL_CBIN" != "raw" ]]; then
find . | cpio -o -H newc | $LOCAL_CBIN > "$2"
else
find . | cpio -o -H newc > "$2"
fi
rm -rf "$1" # We don't need you anymore
echo "INFO: Success!"
}
PARSE_RAMDISK() {
# $1 - Extracted ramdisk source (folder)
cd "$1" || return 1
# Check if ramdisk makes sense
if [[ ! -f "init.rc" ]]; then
echo "ERROR: Ramdisk does not include init.rc!"
return 1
fi
# Detect AOSP/Samsung variant based on existing modules in ramdisk
if [[ -d "lib/modules" ]]; then
echo "INFO: Detected Samsung variant"
# Remove all current modules from ramdisk
find "lib/modules" -type f -name "*.ko" | while read line; do
rm -f "$line"
done
# Copy all new ArchiKernel modules from system to ramdisk
find "/system/lib/modules" -type f -name "*.ko" | while read line; do
cp "$line" "lib/modules/"
done
# We're on Sammy so we have no use of system modules, delete them to avoid confusion
rm -rf "/system/lib/modules"
else
echo "INFO: Detected AOSP variant"
fi
# If we have any ramdisk content, write it
if [[ -d "$AK/ramdisk" ]]; then
echo "INFO: Overwriting ramdisk with custom content"
find "$AK/ramdisk" -mindepth 1 -maxdepth 1 | while read line; do
cp -pR "$line" .
done
fi
# If we have any executable files/folders, chmod them
TO755="sbin res/synapse/actions"
for FILE in $TO755; do
if [[ -e "$FILE" ]]; then
chmod -R 755 "$FILE"
fi
done
# Add ArchiKernel Init if required
if grep -q "ArchiKernel-Init" "init.rc"; then
echo "INFO: User is updating the kernel!"
else
echo "INFO: User is flashing the kernel for the first time!"
{
echo
echo "service ArchiKernel-Init /sbin/ArchiKernel-Init"
echo " class main"
echo " user root"
echo " group root"
echo " oneshot"
} >> "init.rc"
fi
}
# Global
AK="/tmp/archikernel"
AKDROP="$AK/drop"
exec 1>"$AK/ArchiKernel.log"
exec 2>&1
date
echo "INFO: ArchiKernel flasher ready!"
echo "INFO: Safety check: ON, flasher will immediately terminate in case of ANY error"
if [[ ! -f "$AK/mkbootimg-static" || ! -f "$AK/unpackbootimg-static" ]]; then
echo "ERROR: No bootimg tools?!"
exit 1
fi
chmod 755 "$AK/mkbootimg-static" "$AK/unpackbootimg-static"
echo "INFO: Pulling boot.img from $KERNEL"
if which dump_image >/dev/null; then
dump_image "$KERNEL" "$AK/boot.img"
else
dd if="$KERNEL" of="$AK/boot.img"
fi
echo "INFO: Unpacking pulled boot.img"
mkdir -p "$AKDROP"
"$AK/unpackbootimg-static" -i "$AK/boot.img" -o "$AKDROP"
RAMDISK1="$AKDROP/ramdisk1"
RAMDISK2="$AKDROP/ramdisk2"
if [[ -f "$AKDROP/boot.img-ramdisk.gz" ]]; then
echo "INFO: Ramdisk found!"
EXTRACT_RAMDISK "$AKDROP/boot.img-ramdisk.gz" "$RAMDISK1"
RAMDISK1_CBIN="$CBIN"
# Detect kernel + recovery combo
if [[ -f "$RAMDISK1/sbin/ramdisk.cpio" ]]; then
echo "INFO: Detected kernel + recovery combo!"
EXTRACT_RAMDISK "$RAMDISK1/sbin/ramdisk.cpio" "$RAMDISK2"
RAMDISK2_CBIN="$CBIN"
PARSE_RAMDISK "$RAMDISK2"
REPACK_RAMDISK "$RAMDISK2" "$RAMDISK1/sbin/ramdisk.cpio" "$RAMDISK2_CBIN"
else
echo "INFO: Detected classic kernel variant (non-recovery combo)"
PARSE_RAMDISK "$RAMDISK1"
fi
REPACK_RAMDISK "$RAMDISK1" "$AKDROP/boot.img-ramdisk.gz" "$RAMDISK1_CBIN"
else
echo "ERROR: No ramdisk?!"
exit 2
fi
echo "INFO: Combining ArchiKernel zImage and current kernel ramdisk"
"$AK/mkbootimg-static" \
--kernel "$AK/zImage" \
--ramdisk "$AKDROP/boot.img-ramdisk.gz" \
--cmdline "$(cat $AKDROP/boot.img-cmdline)" \
--board "$(cat $AKDROP/boot.img-board)" \
--base "$(cat $AKDROP/boot.img-base)" \
--pagesize "$(cat $AKDROP/boot.img-pagesize)" \
--kernel_offset "$(cat $AKDROP/boot.img-kerneloff)" \
--ramdisk_offset "$(cat $AKDROP/boot.img-ramdiskoff)" \
--tags_offset "$(cat $AKDROP/boot.img-tagsoff)" \
--output "$AK/newboot.img"
echo "INFO: newboot.img ready!"
echo "INFO: Flashing newboot.img on $KERNEL"
if which flash_image >/dev/null; then
flash_image "$KERNEL" "$AK/newboot.img"
else
dd if="$AK/newboot.img" of="$KERNEL"
fi
echo "SUCCESS: Everything finished successfully!"
touch "$AK/_OK"
date
exit 0
|
ea4862/ArchiKernel_cm12.1
|
archikernel/flasher/core/init.sh
|
Shell
|
gpl-2.0
| 6,823 |
#!/bin/sh -l
cd tests
php writing.php
php reading.php
|
maplechori/php_stata
|
entrypoint.sh
|
Shell
|
gpl-2.0
| 55 |
#!/bin/bash
. /usr/local/etc/firewall/configurations.txt
. /usr/local/etc/firewall/firewall.conf
add_poor_ip(){
cmd=`$IPSET -t --list $POOR_IP_SET &> /dev/null`
cmd_ret=`echo $?`
if [ $cmd_ret -eq 1 ] && [ ! -e $POOR_REMOVED ] || [ ! -e $POOR_ADDED ]
then
#$IPSET create $POOR_IP_SET hash:net
while read line
do
$IPSET add $POOR_IP_SET $line
done < $TMP_FILE
#else
fi
if [ -s $POOR_REMOVED ]
then
while read line
do
$IPSET del $POOR_IP_SET $line
done < $POOR_REMOVED
else
echo "nothing to do"
fi
if [ -s $POOR_ADDED ]
then
while read line
do
$IPSET add $POOR_IP_SET $line
done < $POOR_ADDED
else
echo "nothing to do"
fi
#fi
}
add_ransom_ip(){
cmd=`$IPSET -t --list $RANSOM_IP_SET &> /dev/null`
cmd_ret=`echo $?`
if [ $cmd_ret -eq 1 ] && [ ! -e $RANSOM_REMOVED ] || [ ! -e $RANSOM_ADDED ]
then
#$IPSET create $RANSOM_IP_SET hash:net
while read line
do
$IPSET add $RANSOM_IP_SET $line
done < $TMP_RANSOM
#else
fi
if [ -s $RANSOM_REMOVED ]
then
while read line
do
$IPSET del $RANSOM_IP_SET $line
done < $RANSOM_REMOVED
else
echo "nothing to do"
fi
if [ -s $RANSOM_ADDED ]
then
while read line
do
$IPSET add $RANSOM_IP_SET $line
done < $RANSOM_ADDED
else
echo "nothing to do"
fi
#fi
}
case $1 in
ransom_ip)
add_ransom_ip
;;
poor_ips)
add_poor_ip
;;
esac
$IPSET save > $IPSET_RULESET
|
lradaelli85/iptables-firewall
|
add_ipset.sh
|
Shell
|
gpl-2.0
| 1,387 |
#!/bin/bash
# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
# vim:shiftwidth=4:softtabstop=4:tabstop=4:
#
# Tests for multiple mount protection (MMP) feature.
#
# Run select tests by setting ONLY, or as arguments to the script.
# Skip specific tests by setting EXCEPT.
#
# e.g. ONLY="5 6" or ONLY="`seq 8 11`" or EXCEPT="7"
set -e
ONLY=${ONLY:-"$*"}
# bug number for skipped test:
ALWAYS_EXCEPT=${ALWAYS_EXCEPT:-"$MMP_EXCEPT"}
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=$(cd $(dirname $0); echo $PWD)
export PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH:/sbin
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
# unmount and cleanup the Lustre filesystem
MMP_RESTORE_MOUNT=false
if is_mounted $MOUNT || is_mounted $MOUNT2; then
cleanupall
MMP_RESTORE_MOUNT=true
fi
SAVED_FAIL_ON_ERROR=$FAIL_ON_ERROR
FAIL_ON_ERROR=false
build_test_filter
# Get the failover facet.
get_failover_facet() {
local facet=$1
local failover_facet=${facet}failover
local host=$(facet_host $facet)
local failover_host=$(facet_host $failover_facet)
[ -z "$failover_host" -o "$host" = "$failover_host" ] && \
failover_facet=$facet
echo $failover_facet
}
# Initiate the variables for Lustre servers and targets.
init_vars() {
MMP_MDS=${MMP_MDS:-$SINGLEMDS}
MMP_MDS_FAILOVER=$(get_failover_facet $MMP_MDS)
local mds_num=$(echo $MMP_MDS | tr -d "mds")
MMP_MDSDEV=$(mdsdevname $mds_num)
MMP_OSS=${MMP_OSS:-ost1}
MMP_OSS_FAILOVER=$(get_failover_facet $MMP_OSS)
local oss_num=$(echo $MMP_OSS | tr -d "ost")
MMP_OSTDEV=$(ostdevname $oss_num)
}
# Stop the MDS and OSS services on the primary or failover servers.
stop_services() {
local flavor=$1
shift
local opts="$@"
local mds_facet
local oss_facet
if [ "$flavor" = "failover" ]; then
mds_facet=$MMP_MDS_FAILOVER
oss_facet=$MMP_OSS_FAILOVER
else
mds_facet=$MMP_MDS
oss_facet=$MMP_OSS
fi
stop $mds_facet $opts || return ${PIPESTATUS[0]}
stop $oss_facet $opts || return ${PIPESTATUS[0]}
}
# Enable the MMP feature.
enable_mmp() {
local facet=$1
local device=$2
do_facet $facet "$TUNE2FS -O mmp $device"
return ${PIPESTATUS[0]}
}
# Disable the MMP feature.
disable_mmp() {
local facet=$1
local device=$2
do_facet $facet "$TUNE2FS -O ^mmp $device"
return ${PIPESTATUS[0]}
}
# Set the MMP block to 'fsck' state
mark_mmp_block() {
local facet=$1
local device=$2
do_facet $facet "$LUSTRE/tests/mmp_mark.sh $device"
return ${PIPESTATUS[0]}
}
# Reset the MMP block (if any) back to the clean state.
reset_mmp_block() {
local facet=$1
local device=$2
do_facet $facet "$TUNE2FS -f -E clear-mmp $device"
return ${PIPESTATUS[0]}
}
# Check whether the MMP feature is enabled or not.
mmp_is_enabled() {
local facet=$1
local device=$2
do_facet $facet "$DUMPE2FS -h $device | grep mmp"
return ${PIPESTATUS[0]}
}
# Get MMP update interval (in seconds) from the Lustre server target.
get_mmp_update_interval() {
local facet=$1
local device=$2
local interval
interval=$(do_facet $facet \
"$DEBUGFS -c -R dump_mmp $device 2>$TMP/mmp.debugfs.msg" |
awk 'tolower($0) ~ /update.interval/ { print $NF }')
[ -z "$interval" ] && interval=5 &&
do_facet $facet cat $TMP/mmp.debugfs.msg &&
echo "$facet:$device: assume update interval=$interval" 1>&2 ||
echo "$facet:$device: got actual update interval=$interval" 1>&2
echo $interval
}
# Get MMP check interval (in seconds) from the Lustre server target.
get_mmp_check_interval() {
local facet=$1
local device=$2
local interval
interval=$(do_facet $facet \
"$DEBUGFS -c -R dump_mmp $device 2>$TMP/mmp.debugfs.msg" |
awk 'tolower($0) ~ /check.interval/ { print $NF }')
[ -z "$interval" ] && interval=5 &&
do_facet $facet cat $TMP/mmp.debugfs.msg &&
echo "$facet:$device: assume check interval=$interval" 1>&2 ||
echo "$facet:$device: got actual check interval=$interval" 1>&2
echo $interval
}
# Adjust the MMP update interval (in seconds) on the Lustre server target.
# Specifying an interval of 0 means to use the default interval.
set_mmp_update_interval() {
local facet=$1
local device=$2
local interval=${3:-0}
do_facet $facet "$TUNE2FS -E mmp_update_interval=$interval $device"
return ${PIPESTATUS[0]}
}
# Enable the MMP feature on the Lustre server targets.
mmp_init() {
init_vars
if [ $(facet_fstype $MMP_MDS) != ldiskfs ]; then
skip "Only applicable to ldiskfs-based MDTs"
exit
fi
if [ $(facet_fstype $MMP_OSS) != ldiskfs ]; then
skip "Only applicable to ldiskfs-based OSTs"
exit
fi
# The MMP feature is automatically enabled by mkfs.lustre for
# new file system at format time if failover is being used.
# Otherwise, the Lustre administrator has to manually enable
# this feature when the file system is unmounted.
local var=${MMP_MDS}failover_HOST
if [ -z "${!var}" ]; then
log "Failover is not used on MDS, enabling MMP manually..."
enable_mmp $MMP_MDS $MMP_MDSDEV || \
error "failed to enable MMP on $MMP_MDSDEV on $MMP_MDS"
fi
var=${MMP_OSS}failover_HOST
if [ -z "${!var}" ]; then
log "Failover is not used on OSS, enabling MMP manually..."
enable_mmp $MMP_OSS $MMP_OSTDEV || \
error "failed to enable MMP on $MMP_OSTDEV on $MMP_OSS"
fi
# check whether the MMP feature is enabled or not
mmp_is_enabled $MMP_MDS $MMP_MDSDEV || \
error "MMP was not enabled on $MMP_MDSDEV on $MMP_MDS"
mmp_is_enabled $MMP_OSS $MMP_OSTDEV || \
error "MMP was not enabled on $MMP_OSTDEV on $MMP_OSS"
}
# Disable the MMP feature on the Lustre server targets
# which did not use failover.
mmp_fini() {
local var=${MMP_MDS}failover_HOST
if [ -z "${!var}" ]; then
log "Failover is not used on MDS, disabling MMP manually..."
disable_mmp $MMP_MDS $MMP_MDSDEV || \
error "failed to disable MMP on $MMP_MDSDEV on $MMP_MDS"
mmp_is_enabled $MMP_MDS $MMP_MDSDEV && \
error "MMP was not disabled on $MMP_MDSDEV on $MMP_MDS"
fi
var=${MMP_OSS}failover_HOST
if [ -z "${!var}" ]; then
log "Failover is not used on OSS, disabling MMP manually..."
disable_mmp $MMP_OSS $MMP_OSTDEV || \
error "failed to disable MMP on $MMP_OSTDEV on $MMP_OSS"
mmp_is_enabled $MMP_OSS $MMP_OSTDEV && \
error "MMP was not disabled on $MMP_OSTDEV on $MMP_OSS"
fi
return 0
}
# Mount the shared target on the failover server after some interval it's
# mounted on the primary server.
mount_after_interval_sub() {
local interval=$1
shift
local device=$1
shift
local facet=$1
shift
local opts="$@"
local failover_facet=$(get_failover_facet $facet)
local mount_pid
local first_mount_rc=0
local second_mount_rc=0
log "Mounting $device on $facet..."
start $facet $device $opts &
mount_pid=$!
if [ $interval -ne 0 ]; then
log "sleep $interval..."
sleep $interval
fi
log "Mounting $device on $failover_facet..."
start $failover_facet $device $opts
second_mount_rc=${PIPESTATUS[0]}
wait $mount_pid
first_mount_rc=${PIPESTATUS[0]}
if [ $second_mount_rc -eq 0 -a $first_mount_rc -eq 0 ]; then
error_noexit "one mount delayed by mmp interval $interval should fail"
stop $facet || return ${PIPESTATUS[0]}
[ "$failover_facet" != "$facet" ] && stop $failover_facet || \
return ${PIPESTATUS[0]}
return 1
elif [ $second_mount_rc -ne 0 -a $first_mount_rc -ne 0 ]; then
error_noexit "mount failure on failover pair $facet,$failover_facet"
return $first_mount_rc
fi
return 0
}
mount_after_interval() {
local mdt_interval=$1
local ost_interval=$2
local rc=0
mount_after_interval_sub $mdt_interval $MMP_MDSDEV $MMP_MDS \
$MDS_MOUNT_OPTS || return ${PIPESTATUS[0]}
echo
mount_after_interval_sub $ost_interval $MMP_OSTDEV $MMP_OSS $OST_MOUNT_OPTS
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
stop $MMP_MDS
return $rc
fi
return 0
}
# Mount the shared target on the failover server
# during unmounting it on the primary server.
mount_during_unmount() {
local device=$1
shift
local facet=$1
shift
local mnt_opts="$@"
local failover_facet=$(get_failover_facet $facet)
local unmount_pid
local unmount_rc=0
local mount_rc=0
log "Mounting $device on $facet..."
start $facet $device $mnt_opts || return ${PIPESTATUS[0]}
log "Unmounting $device on $facet..."
stop $facet &
unmount_pid=$!
log "Mounting $device on $failover_facet..."
start $failover_facet $device $mnt_opts
mount_rc=${PIPESTATUS[0]}
wait $unmount_pid
unmount_rc=${PIPESTATUS[0]}
if [ $mount_rc -eq 0 ]; then
error_noexit "mount during unmount of the first filesystem should fail"
stop $failover_facet || return ${PIPESTATUS[0]}
return 1
fi
if [ $unmount_rc -ne 0 ]; then
error_noexit "unmount the $device on $facet should succeed"
return $unmount_rc
fi
return 0
}
# Mount the shared target on the failover server
# after clean unmounting it on the primary server.
mount_after_unmount() {
local device=$1
shift
local facet=$1
shift
local mnt_opts="$@"
local failover_facet=$(get_failover_facet $facet)
log "Mounting $device on $facet..."
start $facet $device $mnt_opts || return ${PIPESTATUS[0]}
log "Unmounting $device on $facet..."
stop $facet || return ${PIPESTATUS[0]}
log "Mounting $device on $failover_facet..."
start $failover_facet $device $mnt_opts || return ${PIPESTATUS[0]}
return 0
}
# Mount the shared target on the failover server after rebooting
# the primary server.
mount_after_reboot() {
local device=$1
shift
local facet=$1
shift
local mnt_opts="$@"
local failover_facet=$(get_failover_facet $facet)
local rc=0
log "Mounting $device on $facet..."
start $facet $device $mnt_opts || return ${PIPESTATUS[0]}
if [ "$FAILURE_MODE" = "HARD" ]; then
shutdown_facet $facet
reboot_facet $facet
wait_for_facet $facet
else
replay_barrier_nodf $facet
fi
log "Mounting $device on $failover_facet..."
start $failover_facet $device $mnt_opts
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
error_noexit "mount $device on $failover_facet should succeed"
stop $facet || return ${PIPESTATUS[0]}
return $rc
fi
return 0
}
# Run e2fsck on the Lustre server target.
run_e2fsck() {
local facet=$1
shift
local device=$1
shift
local opts="$@"
echo "Running e2fsck on the device $device on $facet..."
do_facet $facet "$E2FSCK $opts $device"
return ${PIPESTATUS[0]}
}
# Check whether there are failover pairs for MDS and OSS servers.
check_failover_pair() {
[ "$MMP_MDS" = "$MMP_MDS_FAILOVER" -o "$MMP_OSS" = "$MMP_OSS_FAILOVER" ] \
&& { skip_env "failover pair is needed" && return 1; }
return 0
}
mmp_init
# Test 1 - two mounts at the same time.
test_1() {
check_failover_pair || return 0
mount_after_interval 0 0 || return ${PIPESTATUS[0]}
stop_services primary || return ${PIPESTATUS[0]}
}
run_test 1 "two mounts at the same time"
# Test 2 - one mount delayed by mmp update interval.
test_2() {
check_failover_pair || return 0
local mdt_interval=$(get_mmp_update_interval $MMP_MDS $MMP_MDSDEV)
local ost_interval=$(get_mmp_update_interval $MMP_OSS $MMP_OSTDEV)
mount_after_interval $mdt_interval $ost_interval || return ${PIPESTATUS[0]}
stop_services primary || return ${PIPESTATUS[0]}
}
run_test 2 "one mount delayed by mmp update interval"
# Test 3 - one mount delayed by 2x mmp check interval.
test_3() {
check_failover_pair || return 0
local mdt_interval=$(get_mmp_check_interval $MMP_MDS $MMP_MDSDEV)
local ost_interval=$(get_mmp_check_interval $MMP_OSS $MMP_OSTDEV)
mdt_interval=$((2 * $mdt_interval + 1))
ost_interval=$((2 * $ost_interval + 1))
mount_after_interval $mdt_interval $ost_interval || return ${PIPESTATUS[0]}
stop_services primary || return ${PIPESTATUS[0]}
}
run_test 3 "one mount delayed by 2x mmp check interval"
# Test 4 - one mount delayed by > 2x mmp check interval.
test_4() {
check_failover_pair || return 0
local mdt_interval=$(get_mmp_check_interval $MMP_MDS $MMP_MDSDEV)
local ost_interval=$(get_mmp_check_interval $MMP_OSS $MMP_OSTDEV)
mdt_interval=$((4 * $mdt_interval))
ost_interval=$((4 * $ost_interval))
mount_after_interval $mdt_interval $ost_interval || return ${PIPESTATUS[0]}
stop_services primary || return ${PIPESTATUS[0]}
}
run_test 4 "one mount delayed by > 2x mmp check interval"
# Test 5 - mount during unmount of the first filesystem.
test_5() {
local rc=0
check_failover_pair || return 0
mount_during_unmount $MMP_MDSDEV $MMP_MDS $MDS_MOUNT_OPTS || \
return ${PIPESTATUS[0]}
echo
start $MMP_MDS $MMP_MDSDEV $MDS_MOUNT_OPTS || return ${PIPESTATUS[0]}
mount_during_unmount $MMP_OSTDEV $MMP_OSS $OST_MOUNT_OPTS
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
stop $MMP_MDS || return ${PIPESTATUS[0]}
return $rc
fi
stop $MMP_MDS || return ${PIPESTATUS[0]}
}
run_test 5 "mount during unmount of the first filesystem"
# Test 6 - mount after clean unmount.
test_6() {
local rc=0
check_failover_pair || return 0
mount_after_unmount $MMP_MDSDEV $MMP_MDS $MDS_MOUNT_OPTS || \
return ${PIPESTATUS[0]}
echo
mount_after_unmount $MMP_OSTDEV $MMP_OSS $OST_MOUNT_OPTS
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
stop $MMP_MDS_FAILOVER || return ${PIPESTATUS[0]}
return $rc
fi
stop_services failover || return ${PIPESTATUS[0]}
}
run_test 6 "mount after clean unmount"
# Test 7 - mount after reboot.
test_7() {
local rc=0
check_failover_pair || return 0
mount_after_reboot $MMP_MDSDEV $MMP_MDS $MDS_MOUNT_OPTS || \
return ${PIPESTATUS[0]}
echo
mount_after_reboot $MMP_OSTDEV $MMP_OSS $OST_MOUNT_OPTS
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
stop $MMP_MDS || return ${PIPESTATUS[0]}
stop $MMP_MDS_FAILOVER || return ${PIPESTATUS[0]}
return $rc
fi
stop_services failover || return ${PIPESTATUS[0]}
stop_services primary || return ${PIPESTATUS[0]}
}
run_test 7 "mount after reboot"
# Test 8 - mount during e2fsck (should never succeed).
test_8() {
local e2fsck_pid
local saved_interval
local new_interval
# After writing a new sequence number into the MMP block, e2fsck will
# sleep at least (2 * new_interval + 1) seconds before it goes into
# e2fsck passes.
new_interval=30
# MDT
saved_interval=$(get_mmp_update_interval $MMP_MDS $MMP_MDSDEV)
set_mmp_update_interval $MMP_MDS $MMP_MDSDEV $new_interval
run_e2fsck $MMP_MDS $MMP_MDSDEV "-fy" &
e2fsck_pid=$!
sleep 5
if start $MMP_MDS_FAILOVER $MMP_MDSDEV $MDS_MOUNT_OPTS; then
error_noexit \
"mount $MMP_MDSDEV on $MMP_MDS_FAILOVER should fail"
stop $MMP_MDS_FAILOVER || return ${PIPESTATUS[0]}
set_mmp_update_interval $MMP_MDS $MMP_MDSDEV $saved_interval
return 1
fi
wait $e2fsck_pid
set_mmp_update_interval $MMP_MDS $MMP_MDSDEV $saved_interval
# OST
echo
saved_interval=$(get_mmp_update_interval $MMP_OSS $MMP_OSTDEV)
set_mmp_update_interval $MMP_OSS $MMP_OSTDEV $new_interval
run_e2fsck $MMP_OSS $MMP_OSTDEV "-fy" &
e2fsck_pid=$!
sleep 5
if start $MMP_OSS_FAILOVER $MMP_OSTDEV $OST_MOUNT_OPTS; then
error_noexit \
"mount $MMP_OSTDEV on $MMP_OSS_FAILOVER should fail"
stop $MMP_OSS_FAILOVER || return ${PIPESTATUS[0]}
set_mmp_update_interval $MMP_OSS $MMP_OSTDEV $saved_interval
return 2
fi
wait $e2fsck_pid
set_mmp_update_interval $MMP_OSS $MMP_OSTDEV $saved_interval
return 0
}
run_test 8 "mount during e2fsck"
# Test 9 - mount after aborted e2fsck (should never succeed).
test_9() {
start $MMP_MDS $MMP_MDSDEV $MDS_MOUNT_OPTS || return ${PIPESTATUS[0]}
if ! start $MMP_OSS $MMP_OSTDEV $OST_MOUNT_OPTS; then
local rc=${PIPESTATUS[0]}
stop $MMP_MDS || return ${PIPESTATUS[0]}
return $rc
fi
stop_services primary || return ${PIPESTATUS[0]}
mark_mmp_block $MMP_MDS $MMP_MDSDEV || return ${PIPESTATUS[0]}
log "Mounting $MMP_MDSDEV on $MMP_MDS..."
if start $MMP_MDS $MMP_MDSDEV $MDS_MOUNT_OPTS; then
error_noexit "mount $MMP_MDSDEV on $MMP_MDS should fail"
stop $MMP_MDS || return ${PIPESTATUS[0]}
return 1
fi
reset_mmp_block $MMP_MDS $MMP_MDSDEV || return ${PIPESTATUS[0]}
mark_mmp_block $MMP_OSS $MMP_OSTDEV || return ${PIPESTATUS[0]}
log "Mounting $MMP_OSTDEV on $MMP_OSS..."
if start $MMP_OSS $MMP_OSTDEV $OST_MOUNT_OPTS; then
error_noexit "mount $MMP_OSTDEV on $MMP_OSS should fail"
stop $MMP_OSS || return ${PIPESTATUS[0]}
return 2
fi
reset_mmp_block $MMP_OSS $MMP_OSTDEV || return ${PIPESTATUS[0]}
return 0
}
run_test 9 "mount after aborted e2fsck"
# Test 10 - e2fsck with mounted filesystem.
test_10() {
local rc=0
log "Mounting $MMP_MDSDEV on $MMP_MDS..."
start $MMP_MDS $MMP_MDSDEV $MDS_MOUNT_OPTS || return ${PIPESTATUS[0]}
run_e2fsck $MMP_MDS_FAILOVER $MMP_MDSDEV "-fn"
rc=${PIPESTATUS[0]}
# e2fsck is called with -n option (Open the filesystem read-only), so
# 0 (No errors) and 4 (File system errors left uncorrected) are the only
# acceptable exit codes in this case
if [ $rc -ne 0 ] && [ $rc -ne 4 ]; then
error_noexit "e2fsck $MMP_MDSDEV on $MMP_MDS_FAILOVER returned $rc"
stop $MMP_MDS || return ${PIPESTATUS[0]}
return $rc
fi
log "Mounting $MMP_OSTDEV on $MMP_OSS..."
start $MMP_OSS $MMP_OSTDEV $OST_MOUNT_OPTS
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ]; then
stop $MMP_MDS || return ${PIPESTATUS[0]}
return $rc
fi
run_e2fsck $MMP_OSS_FAILOVER $MMP_OSTDEV "-fn"
rc=${PIPESTATUS[0]}
if [ $rc -ne 0 ] && [ $rc -ne 4 ]; then
error_noexit "e2fsck $MMP_OSTDEV on $MMP_OSS_FAILOVER returned $rc"
fi
stop_services primary || return ${PIPESTATUS[0]}
return 0
}
run_test 10 "e2fsck with mounted filesystem"
mmp_fini
FAIL_ON_ERROR=$SAVED_FAIL_ON_ERROR
complete $SECONDS
$MMP_RESTORE_MOUNT && setupall
exit_status
|
HPCStack/lustre-release
|
lustre/tests/mmp.sh
|
Shell
|
gpl-2.0
| 18,809 |
#!/usr/bin/env bash
cd "$(dirname "${BASH_SOURCE}")"
git pull origin master
function doIt() {
rsync --exclude ".git/" --exclude ".DS_Store" --exclude "bootstrap.sh" \
--exclude "README.md" --exclude "LICENSE" \
-av --no-perms . ~
source ~/.profile
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
doIt
else
read -p "Updating vim-config may overwrite existing customized configuration. Are you sure to proceed? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
doIt
fi
fi
unset doIt
|
sergiogoro/dotFiles
|
bootstrap.sh
|
Shell
|
gpl-2.0
| 534 |
#/bin/bash
# install nfs tool
sudo apt-get install nfs-kernel-server uml-utilities bridge-utils
# sudo echo "/srv/nfsroot/ *(rw,sync,no_root_squash,no_subtree_check)" >> /etc/exports
sudo exportfs -r
sudo /etc/init.d/nfs-kernel-server restart
# set tap0
sudo tunctl -u $USER -t tap0
sudo ifconfig tap0 192.168.123.1
|
cailiwei/buildroot
|
images/mini2440/nfs_ready.sh
|
Shell
|
gpl-2.0
| 321 |
#! /bin/sh
# Copyright (C) 2011-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test more basic functionalities of the 'py-compile' script,
# with "believable" python sources. See also related test
# 'py-compile-basic2.sh'.
required=python
. test-init.sh
cp "$am_scriptdir/py-compile" . \
|| fatal_ "failed to fetch auxiliary script py-compile"
cat > foo.py <<'END'
# Try out some non-trivial syntax in here.
'''Module docstring'''
def foo (*args, **kwargs):
"""Function docstring
with embedded newline"""
return 1
class Foo:
r"""Class docstring"""
def __init__(self):
r'''Method docstring
with
embedded
newlines'''
pass
bar = baz = (1, (2,), [3, 4]); zardoz = 0;
END
mkdir sub
cat > sub/bar.py <<'END'
# Import of non-existent modules, or assertion of false conditions,
# shouldn't cause problems, as it should be enough for the code to
# be syntactically correct.
import Automake.No.Such.Module
assert False
END
# An empty file in a more deeply-nested directory should be ok as well.
mkdir -p 1/_/2/_/3/_
: > 1/_/2/_/3/_/0.py
./py-compile foo.py sub/bar.py 1/_/2/_/3/_/0.py
py_installed foo.pyc
py_installed foo.pyo
py_installed sub/bar.pyc
py_installed sub/bar.pyo
py_installed 1/_/2/_/3/_/0.pyc
py_installed 1/_/2/_/3/_/0.pyo
:
|
Starlink/automake
|
t/py-compile-basic.sh
|
Shell
|
gpl-2.0
| 1,900 |
# export AWS_ACCESS_KEY="Your-Access-Key"
# export AWS_SECRET_KEY="Your-Secret-Key"
today=`date +"%d-%m-%Y","%T"`
logfile="/awslog/ec2-access.log"
# Grab all Security Groups IDs for DISALLOW action and export the IDs to a text file
sudo aws ec2 describe-security-groups --filters Name=tag:close-http-time,Values=19-00 Name=tag:bash-profile,Values=ad --query SecurityGroups[].[GroupId] --output text > ~/tmp/disallowhttp_ad_info.txt 2>&1
# Take list of changing security groups
for group_id in $(cat ~/tmp/disallowhttp_ad_info.txt)
do
# Change rules in security group
sudo aws ec2 revoke-security-group-ingress --group-id $group_id --protocol tcp --port 80 --cidr 0.0.0.0/0
# Put info into log file
echo Attempt $today disallow access to instances with attached group $group_id for all HTTP >> $logfile
done
|
STARTSPACE/aws-access-to-ec2-by-timetable
|
http-80/disallow-ad/http-disallow-ad-19.sh
|
Shell
|
gpl-2.0
| 813 |
#!/bin/sh
function progress {
echo $*
$*
}
progress o2-cnf.sh
progress o2-lst.sh
progress o2-cat.sh
if [ -r 00etc/linkbase.xml ]; then
cp -a 00etc/linkbase.xml 01bld
fi
if [ -r 01bld/linkbase.xml ]; then
scoregen.plx -link
fi
progress o2-xml.sh
progress o2-web.sh
progress o2-prm.sh
progress o2-finish.sh
|
oracc/oracc
|
misc/o2/o2-catalog.sh
|
Shell
|
gpl-2.0
| 322 |
#!/bin/bash
# #
## ##
###Iptable ###
## ##
# #
#On supprime les règles existantes
iptables -t filter -F
iptables -t filter -X
#On bloque tout le traffic
iptables -t filter -P INPUT DROP
iptables -t filter -P FORWARD DROP
iptables -t filter -P OUTPUT DROP
#On garde les connexions déjà ouverte
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
#On autorise le loopback ie on autorise localhost
iptables -t filter -A INPUT -i lo -j ACCEPT
iptables -t filter -A OUTPUT -o lo -j ACCEPT
#Gestion des ports
#SSH
iptables -t filter -A OUTPUT -p tcp --dport 22 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 22 -j ACCEPT
#FTP
iptables -t filter -A OUTPUT -p tcp --dport 20 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 20 -j ACCEPT
iptables -t filter -A OUTPUT -p tcp --dport 21 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 21 -j ACCEPT
#HTTP
iptables -t filter -A OUTPUT -p tcp --dport 80 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 80 -j ACCEPT
#HTTPS
iptables -t filter -A OUTPUT -p tcp --dport 443 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 443 -j ACCEPT
#Ping
iptables -t filter -A INPUT -p icmp -j ACCEPT
iptables -t filter -A OUTPUT -p icmp -j ACCEPT
#DNS
iptables -t filter -A OUTPUT -p tcp --dport 53 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 53 -j ACCEPT
iptables -t filter -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -t filter -A INPUT -p udp --dport 53 -j ACCEPT
# NTP (horloge du serveur)
iptables -t filter -A OUTPUT -p udp --dport 123 -j ACCEPT
#Rsync
iptables -t filter -A OUTPUT -p tcp --dport 837 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 837 -j ACCEPT
#MySQL
iptables -t filter -A OUTPUT -p tcp --dport 3306 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 3306 -j ACCEPT
#Protection contre le déni de service, on limite le nbr de connexion à 10co/s
#iptables -A FORWARD -p tcp --syn -m limit --limit 10/second -j ACCEPT
#iptables -A FORWARD -p udp -m limit --limit 10/second -j ACCEPT
#iptables -A FORWARD -p icmp --icmp-type echo-request -m limit --limit 10/second -j ACCEPT
|
athena-project/Install
|
prod/firewall-mysql.sh
|
Shell
|
gpl-2.0
| 2,230 |
convert images/OCS-421.png -crop 1645x4942+0+0 +repage images/OCS-421-A.png
convert images/OCS-421.png -crop 1661x4942+1645+0 +repage images/OCS-421-B.png
#/OCS-421.png
#
#
#
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/middlesplit.OCS-421.sh
|
Shell
|
gpl-2.0
| 175 |
####
#### file: #filename#.zsh
#### description: #pluginname# plugin script
#### portutils - one-word commands for various macports operations:
#### port-udpate, port-upgrade, port-outdated, port-trim
#### references:
####
#### author: Michael Stilson <[email protected]>
#### version: 0.1.1
####
##
## port-install: install ports with certain settings.
##
## depends: port - a BSD style package management system for mac os.
##
function port-install() {
local package=$1 port=$( which port )
if [[ ! -x $port ]] ; then
echo "Please install macports <https://www.macports.org/>"
return
fi
# -R also upgrade dependencies
# -f force mode
# -o always honor state files
# -s compile source?
# -u uninstall non-active ports
# -v verbose mode
sudo $port -Rfosuv install $package
}
##
## port-update: perform the selfupdate and present a prettified status
##
## depends: port - https://www.macports.org/
## egrep - https://trac.macports.org/browser/trunk/dports/sysutils/grep/Portfile
## gawk - https://trac.macports.org/browser/trunk/dports/lang/gawk/Portfile
## gsort - https://trac.macports.org/browser/trunk/dports/sysutils/coreutils/Portfile
##
function port-update() {
local egrep=$( which egrep ) gawk=$( which gawk ) gsort=$( which gsort ) port=$( which port )
local format_data='{printf "%s %s %s\n", $2, substr($3, 1, 24), substr($4, 1, 54)}'
local format_port='{printf " %-24s %s\n", $2, $3}'
local header=' port @version[+variant1..+variantN]
--------------------------------------------------------------------------------'
local storage=$HOME'/.portutils'
local install=$HOME'/.portutils/install.dat'
local uninstall=$HOME'/.portutils/uninstall.dat'
local updates=$HOME'/.portutils/updates.dat'
if [[ ! -x $egrep ]] ; then
echo "Please install grep <https://trac.macports.org/browser/trunk/dports/sysutils/grep/Portfile>"
return
fi
if [[ ! -x $gawk ]] ; then
echo "Please install gawk <https://trac.macports.org/browser/trunk/dports/lang/gawk/Portfile>"
return
fi
if [[ ! -x $gsort ]] ; then
echo "Please install gsort <https://trac.macports.org/browser/trunk/dports/sysutils/coreutils/Portfile>"
return
fi
if [[ ! -x $port ]] ; then
echo "Please install macports <https://www.macports.org/>"
return
fi
if [[ ! -d $storage ]] ; then
mkdir -v $storage
touch -c $install
touch -c $uninstall
touch -c $updates
fi
# -o always honor state files
# -v verbose mode
sudo $port -ov selfupdate
# -R also upgrade dependencies
# -f force mode
# -o always honor state files
# -s compile source?
# -u uninstall non-active ports
# -v verbose mode
# -y dry run
sudo $port -Rfosuvy upgrade outdated | $egrep '(de)*activate' > $updates
if [[ -s $updates ]] ; then
$gawk $format_data $updates | $egrep '^activate' > $install
$gawk $format_data $updates | $egrep '^deactivate' > $uninstall
echo -e "\n* installing"
echo $header
$gawk $format_port $install | $gsort
echo -e "\n* un-installing"
echo $header
$gawk $format_port $uninstall | $gsort
else
echo -e "\n* no updates at this time"
fi
}
##
## port-upgrade: upgrade ports that have updates
##
## depends: port - https://www.macports.org/
##
function port-upgrade() {
local port=$( which port )
if [[ ! -x $port ]] ; then
echo "Please install macports <https://www.macports.org/>"
return
fi
# -R also upgrade dependencies
# -f force mode
# -o always honor state files
# -s compile source?
# -u uninstall non-active ports
# -v verbose mode
sudo $port -Rfosuv upgrade outdated
}
##
## port-outdated: list ports that have updates
##
## depends: port - https://www.macports.org/
## egrep - https://trac.macports.org/browser/trunk/dports/sysutils/grep/Portfile
## gawk - https://trac.macports.org/browser/trunk/dports/lang/gawk/Portfile
## gsort - https://trac.macports.org/browser/trunk/dports/sysutils/coreutils/Portfile
##
function port-outdated() {
local egrep=$( which egrep ) gawk=$( which gawk ) gsort=$( which gsort ) port=$( which port )
local format_data='{printf "%s %s %s\n", $2, substr($3, 1, 24), substr($4, 1, 54)}'
local format_port='{printf " %-24s %s\n", $2, $3}'
local header=' port @version[+variant1..+variantN]
--------------------------------------------------------------------------------'
local storage=$HOME'/.portutils'
local install=$HOME'/.portutils/install.dat'
local uninstall=$HOME'/.portutils/uninstall.dat'
local updates=$HOME'/.portutils/updates.dat'
if [[ ! -x $egrep ]] ; then
echo "Please install grep <https://trac.macports.org/browser/trunk/dports/sysutils/grep/Portfile>"
return
fi
if [[ ! -x $gawk ]] ; then
echo "Please install gawk <https://trac.macports.org/browser/trunk/dports/lang/gawk/Portfile>"
return
fi
if [[ ! -x $gsort ]] ; then
echo "Please install gsort <https://trac.macports.org/browser/trunk/dports/sysutils/coreutils/Portfile>"
return
fi
if [[ ! -x $port ]] ; then
echo "Please install macports <https://www.macports.org/>"
return
fi
if [[ ! -d $storage ]] ; then
mkdir -v $storage
touch -c $install
touch -c $uninstall
touch -c $updates
fi
# -R also upgrade dependencies
# -f force mode
# -o always honor state files
# -s compile source?
# -u uninstall non-active ports
# -v verbose mode
# -y dry run
sudo $port -Rfosuvy upgrade outdated | $egrep '(de)*activate' > $updates
$gawk $format_data $updates | $egrep '^activate' > $install
$gawk $format_data $updates | $egrep '^deactivate' > $uninstall
if [ -s $updates ] ; then
echo -e "\n* installing"
echo $header
$gawk $format_port $install | $gsort
echo -e "\n* un-installing"
echo $header
$gawk $format_port $uninstall | $gsort
else
echo -e "\n* no updates at this time"
fi
}
##
## port-trim: remove installed ports that are no longer needed
##
## depends: port - https://www.macports.org/
## port_cutleaves - https://trac.macports.org/browser/trunk/dports/sysutils/port_cutleaves/Portfile
##
function port-trim() {
local port=$( which port ) trimmer=$( which port_cutleaves )
if [[ ! -x $port ]] ; then
echo "Please install macports <https://www.macports.org/>"
return
fi
if [[ ! -x $trimmer ]] ; then
echo "Please install port_cutleaves <https://trac.macports.org/browser/trunk/dports/sysutils/port_cutleaves/Portfile>"
return
fi
sudo $trimmer
}
|
sunoterra/cliniceness
|
src/portutils.zsh
|
Shell
|
gpl-2.0
| 7,109 |
#!/bin/bash
#
# cd to build.sh dir
cd $(dirname $0)
#
# no limit level
no_limit_levels () {
local name="$1" ; shift
local levels_file="$1" ; shift
local buyin_min=$1 ; shift
local buyin_max=$(((buyin_min*2)-1));
local blind_frequency=$1 ; shift
local blind_frequency_unit="$1" ; shift
local unit=$1 ; shift
sed \
-e "s;_NAME_;$name;g" \
-e "s;_MAX_BUY_IN_;$buyin_max;g" \
-e "s;_BUY_IN_;$buyin_min;g" \
-e "s/_BLIND_LEVEL_FILE_/$levels_file/g" \
-e "s/_BLIND_FREQUENCY_/$blind_frequency/g" \
-e "s/_BLIND_UNIT_/$blind_frequency_unit/g" \
-e "s/_UNIT_/$unit/g" \
no-limit-levels-blind.template > poker.level-${name}.xml
echo poker.level-${name}.xml
}
#
no_limit_levels 10-20-no-limit-lsng9 poker.levels-blinds-lsng9.xml 1000 6 minutes 5
no_limit_levels 15-30-no-limit-wfrmtt poker.levels-blinds-colin.xml 1500 10 minutes 5
no_limit_levels 15-30-no-limit-wsop poker.levels-blinds-colin.xml 1500 6 minutes 5
no_limit_levels 50-100-no-limit-deep-stack poker.levels-blinds-deep-stack.xml 10000 6 minutes 5
#
# no limit level ante
no_limit_levels_ante () {
local name="$1" ; shift
local levels_file="$1" ; shift
local ante_levels_file="$1" ; shift
local buyin_min=$1 ; shift
local buyin_max=$(((buyin_min*2)-1));
local blind_frequency=$1 ; shift
local blind_frequency_unit="$1" ; shift
local unit=$1 ; shift
sed \
-e "s;_NAME_;$name;g" \
-e "s;_MAX_BUY_IN_;$buyin_max;g" \
-e "s;_BUY_IN_;$buyin_min;g" \
-e "s/_BLIND_LEVEL_FILE_/$levels_file/g" \
-e "s/_ANTE_LEVEL_FILE_/$ante_levels_file/g" \
-e "s/_BLIND_FREQUENCY_/$blind_frequency/g" \
-e "s/_BLIND_UNIT_/$blind_frequency_unit/g" \
-e "s/_ANTE_FREQUENCY_/$blind_frequency/g" \
-e "s/_ANTE_UNIT_/$blind_frequency_unit/g" \
-e "s/_UNIT_/$unit/g" \
no-limit-levels-blind-ante.template > poker.level-${name}.xml
echo poker.level-${name}.xml
}
no_limit_levels_ante 15-30-no-limit-ante poker.levels-blinds-colin.xml poker.levels-ante-colin.xml 1500 6 minutes 5
no_limit_levels_ante 15-30-no-limit-late-ante poker.levels-blinds-colin.xml poker.levels-late-ante.xml 1500 6 minutes 5
#
# no limit level ante with fixed_blind
no_limit_levels_ante_fixed_blind () {
local name="$1" ; shift
local blind_small="$1" ; shift
local blind_big="$1" ; shift
local ante_levels_file="$1" ; shift
local buyin_min=$1 ; shift
local buyin_max=$(((buyin_min*2)-1));
local blind_frequency=$1 ; shift
local blind_frequency_unit="$1" ; shift
local unit=$1 ; shift
sed \
-e "s;_NAME_;$name;g" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
-e "s;_MAX_BUY_IN_;$buyin_max;g" \
-e "s;_BUY_IN_;$buyin_min;g" \
-e "s/_ANTE_LEVEL_FILE_/$ante_levels_file/g" \
-e "s/_BLIND_FREQUENCY_/$blind_frequency/g" \
-e "s/_BLIND_UNIT_/$blind_frequency_unit/g" \
-e "s/_ANTE_FREQUENCY_/$blind_frequency/g" \
-e "s/_ANTE_UNIT_/$blind_frequency_unit/g" \
-e "s/_UNIT_/$unit/g" \
no-limit-levels-ante-blind-fixed.template > poker.level-ante-fixed-blind-${name}.xml
echo poker.level-ante-fixed-blind-${name}.xml
}
no_limit_levels_ante_fixed_blind 10-20-no-limit-raising 10 20 poker.levels-ante-raising.xml 1500 6 minutes 5
#
# pokermania
pokermania () {
local blind_small=$1 ; shift
local blind_big=$1 ; shift
local buyin_min=$1 ; shift
local buyin_max=$1 ; shift
local buyin_best=$buyin_max
local unit=1
local cap=-1
local name="${blind_small}-${blind_big}_${buyin_min}-${buyin_max}"
local desc="$name"
sed \
-e "s/_NAME_/$name/" \
-e "s/_DESC_/$desc/" \
-e "s/_UNIT_/$unit/" \
-e "s/_BEST_BUY_IN_/$buyin_best/" \
-e "s/_MAX_BUY_IN_/$buyin_max/" \
-e "s/_BUY_IN_/$buyin_min/" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
-e "s/_CAP_/$cap/" \
pokermania.template > poker.${name}_pokermania.xml
echo poker.${name}_pokermania.xml
}
# small big buyin buyin_max
pokermania 1 2 10 100
pokermania 2 4 10 100
pokermania 2 4 100 200
pokermania 5 10 200 500
pokermania 5 10 500 1000
pokermania 20 40 1000 2000
pokermania 20 40 2000 4000
pokermania 60 120 4000 6000
pokermania 60 120 6000 8000
pokermania 100 200 8000 10000
pokermania 100 200 10000 15000
pokermania 200 400 15000 20000
pokermania 200 400 20000 25000
pokermania 300 600 25000 30000
pokermania 300 600 30000 40000
pokermania 500 1000 40000 50000
pokermania 500 1000 50000 100000
pokermania 1500 3000 100000 150000
pokermania 1500 3000 150000 200000
pokermania 2500 5000 200000 250000
pokermania 2500 5000 250000 300000
pokermania 4000 8000 300000 400000
pokermania 4000 8000 400000 600000
pokermania 8000 16000 600000 800000
pokermania 8000 16000 800000 1000000
pokermania 15000 30000 1000000 1500000
pokermania 15000 30000 1500000 2000000
pokermania 100000 200000 6000000 8000000
#
# no-limit
no_limit () {
local blind_small=$1 ; shift
local blind_big=$1 ; shift
local unit=$1 ; shift
local buyin_min=$[$blind_big*10]
local buyin_best=$[$blind_big*50]
local buyin_max=$[$blind_big*100]
local name="${blind_small}-${blind_big}_${buyin_min}-${buyin_max}"
local desc="$name"
sed \
-e "s/_NAME_/$name/" \
-e "s/_DESC_/$desc/" \
-e "s/_UNIT_/$unit/" \
-e "s/_BEST_BUY_IN_/$buyin_best/" \
-e "s/_MAX_BUY_IN_/$buyin_max/" \
-e "s/_BUY_IN_/$buyin_min/" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
no-limit.template > poker.${name}_no-limit.xml
echo poker.${name}_no-limit.xml
}
# small big unit
no_limit 1 2 1
no_limit 2 4 1
no_limit 3 6 1
no_limit 5 10 5
no_limit 10 20 10
no_limit 30 60 10
no_limit 50 100 50
no_limit 100 200 100
#
# pot-limit
pot_limit () {
local blind_small=$1 ; shift
local blind_big=$1 ; shift
local unit=$1 ; shift
local buyin_min=$[$blind_big*10]
local buyin_best=$[$blind_big*50]
local buyin_max=$[$blind_big*100]
local name="${blind_small}-${blind_big}_${buyin_min}-${buyin_max}"
local desc="$name"
sed \
-e "s/_NAME_/$name/" \
-e "s/_DESC_/$desc/" \
-e "s/_UNIT_/$unit/" \
-e "s/_BEST_BUY_IN_/$buyin_best/" \
-e "s/_MAX_BUY_IN_/$buyin_max/" \
-e "s/_BUY_IN_/$buyin_min/" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
pot-limit.template > poker.${name}_pot-limit.xml
echo poker.${name}_pot-limit.xml
}
# small big unit
pot_limit 1 2 1
pot_limit 2 4 1
pot_limit 3 6 1
pot_limit 5 10 5
pot_limit 10 20 10
pot_limit 30 60 10
pot_limit 50 100 50
pot_limit 100 200 100
#
# limit
limit () {
local blind_small=$1 ; shift
local blind_big=$1 ; shift
local unit=$1 ; shift
local buyin_min=$[$blind_big*10]
local buyin_best=$[$blind_big*50]
local buyin_max=$[$blind_big*100]
local big_bet=$[$blind_big*2]
local name="${blind_small}-${blind_big}_${buyin_min}-${buyin_max}"
local desc="$name"
sed \
-e "s/_NAME_/$name/" \
-e "s/_DESC_/$desc/" \
-e "s/_UNIT_/$unit/" \
-e "s/_BEST_BUY_IN_/$buyin_best/" \
-e "s/_MAX_BUY_IN_/$buyin_max/" \
-e "s/_BUY_IN_/$buyin_min/" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
-e "s/_BIGBET_/$big_bet/" \
limit.template > poker.${name}_limit.xml
echo poker.${name}_limit.xml
}
# small big unit
limit 1 2 1
limit 5 10 5
limit 15 30 5
limit 25 50 5
limit 50 100 50
#
# no blinds no ante limit
sed \
-e "s/_NAME_/0-0_50-5000/" \
-e "s/_DESC_/No blind, no antes/" \
-e "s/_UNIT_/100/" \
-e "s/_BEST_BUY_IN_/5000/" \
-e "s/_MAX_BUY_IN_/500000/" \
-e "s/_BUY_IN_/500000/" \
-e "s/_SMALL_/0/" \
-e "s/_BIG_/0/" \
limit.template > poker.0-0_50-5000_limit.xml
echo poker.0-0_50-5000_limit.xml
#
# ante-limit
ante_limit () {
local ante=$1 ; shift
local bringin=$1 ; shift
local blind_small=$1 ; shift
local blind_big=$1 ; shift
local unit=$1 ; shift
local buyin_min=$[$blind_big*5]
local buyin_best=$[$blind_big*30]
local buyin_max=$[$blind_big*100000]
local name="${blind_small}-${blind_big}_${buyin_min}-${buyin_max}"
local desc="$name"
sed \
-e "s/_NAME_/$name/" \
-e "s/_DESC_/$desc/" \
-e "s/_UNIT_/$unit/" \
-e "s/_BEST_BUY_IN_/$buyin_best/" \
-e "s/_MAX_BUY_IN_/$buyin_max/" \
-e "s/_BUY_IN_/$buyin_min/" \
-e "s/_SMALL_/$blind_small/" \
-e "s/_BIG_/$blind_big/" \
-e "s/_ANTE_/$ante/" \
-e "s/_BRINGIN_/$bringin/" \
ante-limit.template > poker.${name}_ante-limit.xml
echo poker.${name}_ante-limit.xml
}
# ante bringin small big unit
ante_limit 1 5 10 20 1
ante_limit 5 15 30 60 5
|
pokermania/pokerengine
|
conf/build.sh
|
Shell
|
gpl-3.0
| 9,535 |
#!/bin/csh
# This file is part of BOINC.
# http://boinc.berkeley.edu
# Copyright (C) 2008 University of California
#
# BOINC is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# BOINC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with BOINC. If not, see <http://www.gnu.org/licenses/>.
##
# Script to convert Macintosh BOINC installer to GridRepublic Desktop installer
# updated 4/16/10 by Charlie Fenton
##
## Usage:
## First put the following files into a working directory:
## the BOINC installer to be converted
## the Uninstall BOINC application to be converted
## GR_ReadMe.rtf
## gridrepublic.icns
## GR_install.icns
## GR_uninstall.icns
## COPYING
## COPYING.LESSER (for version 6.3.x and later only)
## COPYRIGHT
## skins directory containing GridRepublic skin (optional)
## acct_mgr_url.xml (to have BOINC automatically connect to Account Manager)
## PostInstall.app (needed only for version 6.2.x or earlier)
## gridrepublic.tiff (for screensaver coordinator)
## gridrepublic_ss_logo.png (for screensaver coordinator)
## GR_saver directory containing GridRepublic default screensaver and associated files, including:
## gridrepublic_ss_logo.jpg
##
## NOTE: This script uses PackageMaker, which is installed as part of the
## XCode developer tools. So you must have installed XCode Developer
## Tools on the Mac before running this script.
##
## NOTE: PackageMaker may write 3 lines to the terminal with "Setting to : 0 (null)"
## and "relocate: (null) 0". This is normal and does not indicate a problem.
##
## cd to the working directory:
##
## Invoke this script with the three parts of version number as arguments.
## For example, if the version is 3.2.1:
## sh [path_to_this_script] 3 2 1
##
## This will create a directory "BOINC_Installer" in the parent directory of
## the current directory
##
## For different branding, modify the following 9 variables:
PR_PATH="GR_Pkg_Root"
IR_PATH="GR_Installer_Resources"
SCRIPTS_PATH="GR_Installer_Scripts"
NEW_DIR_PATH="New_Release_GR_$1_$2_$3"
README_FILE="GR-ReadMe.rtf"
## BRANDING_FILE="GR-Branding"
BRANDING_INFO="BrandId=1"
ICNS_FILE="gridrepublic.icns"
INSTALLER_ICNS_FILE="GR_install.icns"
UNINSTALLER_ICNS_FILE="GR_uninstall.icns"
SAVER_DIR="GR_saver"
SAVER_SYSPREF_ICON="gridrepublic.tiff"
SAVER_LOGO="gridrepublic_ss_logo.png"
BRAND_NAME="GridRepublic"
MANAGER_NAME="GridRepublic Desktop"
LC_BRAND_NAME="gridrepublic"
SOURCE_PKG_PATH="BOINC Installer.app/Contents/Resources/BOINC.pkg/Contents"
if [ $# -lt 3 ]; then
echo "Usage:"
echo " cd working_directory"
echo " sh [path_to_this_script] major_version minor_version revision_number"
echo ""
echo "See comments at start of script for more info."
echo ""
exit 1
fi
pushd ./
## Make sure sed uses UTF-8 text encoding
unset LC_CTYPE
unset LC_MESSAGES
unset __CF_USER_TEXT_ENCODING
export LANG=en_US.UTF-8
if [ -f /Developer/usr/bin/packagemaker ]; then
PACKAGEMAKER_VERSION=3
else
PACKAGEMAKER_VERSION=2
fi
sudo rm -dfR "${IR_PATH}"
sudo rm -dfR "${PR_PATH}"
sudo rm -dfR "${SCRIPTS_PATH}"
mkdir -p "${IR_PATH}"
mkdir -p "${PR_PATH}"
sudo rm -dfR "${NEW_DIR_PATH}/"
mkdir -p "${NEW_DIR_PATH}/"
mkdir -p "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal"
mkdir -p "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras"
cp -fp "${SOURCE_PKG_PATH}/Archive.pax.gz" "${PR_PATH}/"
cd "${PR_PATH}"
sudo gunzip ./Archive.pax.gz
sudo pax -r -pe -f Archive.pax
rm -df "Archive.pax"
cd ..
cp -fp "${SOURCE_PKG_PATH}/Resources/License.rtf" "${IR_PATH}/"
cp -fp "${README_FILE}" "${IR_PATH}/ReadMe.rtf"
# Update version number
sed -i "" s/"<VER_NUM>"/"$1.$2.$3"/g "${IR_PATH}/ReadMe.rtf"
if [ "$PACKAGEMAKER_VERSION" = "3" ]; then
mkdir -p "${SCRIPTS_PATH}"
else
SCRIPTS_PATH=${IR_PATH}
fi
# Create the installer's preinstall and preupgrade scripts from the standard preinstall script
# Older versions of BOINC installer did not use preinstall and preupgrade scripts, so check first
if [ -f "${SOURCE_PKG_PATH}/Resources/preinstall" ]; then
cp -fp "${SOURCE_PKG_PATH}/Resources/preinstall" "${SCRIPTS_PATH}/"
sed -i "" s/BOINCManager/"${MANAGER_NAME}"/g "${SCRIPTS_PATH}/preinstall"
sed -i "" s/BOINCSaver/"${BRAND_NAME}"/g "${SCRIPTS_PATH}/preinstall"
cp -fp "${SCRIPTS_PATH}/preinstall" "${SCRIPTS_PATH}/preupgrade"
fi
cp -fp "${SOURCE_PKG_PATH}/Resources/postinstall" "${SCRIPTS_PATH}/"
cp -fp "${SOURCE_PKG_PATH}/Resources/postupgrade" "${SCRIPTS_PATH}/"
if [ "$1" = "6" ] && [ "$2" = "2" ]; then
cp -fpR "PostInstall.app" "${IR_PATH}/"
else
cp -fpR "${SOURCE_PKG_PATH}/Resources/PostInstall.app" "${IR_PATH}/"
cp -fpR "${SOURCE_PKG_PATH}/Resources/WaitPermissions.app" "${IR_PATH}/"
fi
cp -fp "${SOURCE_PKG_PATH}/Resources/all_projects_list.xml" "${IR_PATH}/"
##### We've decided not to customize BOINC Data directory name for branding
#### mkdir -p "${PR_PATH}/Library/Application Support/${BRAND_NAME} Data"
#### mkdir -p "${PR_PATH}/Library/Application Support/${BRAND_NAME} Data/locale"
mkdir -p "${PR_PATH}/Library/Application Support/BOINC Data"
## If skins folder is present. copy it into BOINC Data folder
if [ -d "skins" ]; then
sudo cp -fR "skins" "${PR_PATH}/Library/Application Support/BOINC Data/"
fi
## Normally, we would put the account manager URL file into the Package Root folder
## for delivery to the BOINC Data folder. But if the user later installs standard
## BOINC (without this file), the Apple installer would then delete the file.
## So we "hide" it in the installer's resources, and have the PostInstall script copy
## it into the BOINC Data folder
##
## If account manager URL file is present, copy it into installer resources for
## eventual delivery into the BOINC Data folder
if [ -f "acct_mgr_url.xml" ]; then
## sudo cp -fR "acct_mgr_url.xml" "${PR_PATH}/Library/Application Support/BOINC Data/acct_mgr_url.xml"
sudo cp -fR "acct_mgr_url.xml" "${IR_PATH}/"
fi
## Modify for Grid Republic
# Rename the Manager's bundle and its executable inside the bundle
sudo mv -f "${PR_PATH}/Applications/BOINCManager.app/" "${PR_PATH}/Applications/${MANAGER_NAME}.app/"
sudo mv -f "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/MacOS/BOINCManager" "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/MacOS/${MANAGER_NAME}"
# Update the Manager's info.plist, InfoPlist.strings files
sudo sed -i "" s/BOINCManager/"${MANAGER_NAME}"/g "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Info.plist"
sudo sed -i "" s/BOINCMgr.icns/"${ICNS_FILE}"/g "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Info.plist"
sudo chmod a+w "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo iconv -f UTF-16 -t UTF-8 "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings" > "${PR_PATH}/tempUTF81"
sudo sed -i "" s/BOINC/"${MANAGER_NAME}"/g "${PR_PATH}/tempUTF81"
sudo iconv -f UTF-8 -t UTF-16 "${PR_PATH}/tempUTF81" > "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo rm -f "${PR_PATH}/tempUTF81"
# Replace the Manager's BOINCMgr.icns file
sudo cp -fp "${ICNS_FILE}" "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/${ICNS_FILE}"
sudo rm -f "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/BOINCMgr.icns"
# Put Branding file in both Installer Package and Application Bundle
sudo echo ${BRANDING_INFO} > "${IR_PATH}/Branding"
sudo cp -fp "${IR_PATH}/Branding" "${PR_PATH}/Applications/${MANAGER_NAME}.app/Contents/Resources/Branding"
## Put Branding file into BOINC Data folder to make it available to screensaver coordinator
sudo cp -fp "${IR_PATH}/Branding" "${PR_PATH}/Library/Application Support/BOINC Data/Branding"
# Rename the screensaver coordinator bundle and its executable inside the bundle
sudo mv -f "${PR_PATH}/Library/Screen Savers/BOINCSaver.saver" "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver"
sudo mv -f "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/MacOS/BOINCSaver" "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/MacOS/${BRAND_NAME}"
# Update screensaver coordinator's info.plist, InfoPlist.strings files
sudo sed -i "" s/BOINCSaver/"${BRAND_NAME}"/g "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Info.plist"
sudo chmod a+w "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/English.lproj/InfoPlist.strings"
sudo iconv -f UTF-16 -t UTF-8 "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/English.lproj/InfoPlist.strings" > "${PR_PATH}/tempUTF82"
sudo sed -i "" s/BOINC/"${BRAND_NAME}"/g "${PR_PATH}/tempUTF82"
sudo iconv -f UTF-8 -t UTF-16 "${PR_PATH}/tempUTF82" > "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/English.lproj/InfoPlist.strings"
sudo rm -f "${PR_PATH}/tempUTF82"
# Replace screensaver coordinator's boinc.tiff or boinc.jpg file
sudo rm -f "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/boinc.jpg"
sudo cp -fp "${SAVER_SYSPREF_ICON}" "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/boinc.tiff"
# Replace screensaver coordinator's boinc_ss_logo.png file
sudo rm -f "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/boinc_ss_logo.png"
sudo cp -fp "${SAVER_LOGO}" "${PR_PATH}/Library/Screen Savers/${BRAND_NAME}.saver/Contents/Resources/boinc_ss_logo.png"
# Delete the BOINC default screensaver and its associated files
sudo rm -f "${PR_PATH}/Library/Application Support/BOINC Data/boinc_logo_black.jpg"
# Copy the GridRepublic default screensaver files into BOINC Data folder
sudo cp -fR "${SAVER_DIR}/" "${PR_PATH}/Library/Application Support/BOINC Data/"
# Copy and rename the Uninstall application's bundle and rename its executable inside the bundle
sudo cp -fpR "Uninstall BOINC.app" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app"
sudo mv -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/MacOS/Uninstall BOINC" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/MacOS/Uninstall ${BRAND_NAME}"
# Update Uninstall application's info.plist, InfoPlist.strings files
sudo sed -i "" s/BOINC/"${BRAND_NAME}"/g "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Info.plist"
sudo sed -i "" s/MacUninstaller.icns/"${UNINSTALLER_ICNS_FILE}"/g "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Info.plist"
sudo chmod a+w "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo iconv -f UTF-16 -t UTF-8 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings" > "${PR_PATH}/tempUTF83"
sudo sed -i "" s/BOINC/"${BRAND_NAME}"/g "${PR_PATH}/tempUTF83"
sudo iconv -f UTF-8 -t UTF-16 "${PR_PATH}/tempUTF83" > "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo rm -f "${PR_PATH}/tempUTF83"
# Replace the Uninstall application's MacUninstaller.icns file
sudo cp -fp "${UNINSTALLER_ICNS_FILE}" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/${UNINSTALLER_ICNS_FILE}"
sudo rm -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/MacUninstaller.icns"
# Remove the Uninstall application's resource file so it will show generic "Are you sure?" dialog
sudo rm -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app/Contents/Resources/Uninstall BOINC.rsrc"
sudo chown -R root:admin "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app"
sudo chmod -R 755 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/Uninstall ${BRAND_NAME}.app"
## Fix up ownership and permissions
sudo chown -R root:admin "${PR_PATH}"/*
sudo chmod -R u+rw,g+rw,o+r-w "${PR_PATH}"/*
sudo chmod 1775 "${PR_PATH}/Library"
sudo chown -R 501:admin "${PR_PATH}/Library/Application Support"/*
sudo chmod -R u+rw,g+r-w,o+r-w "${PR_PATH}/Library/Application Support"/*
sudo chown -R root:admin "${IR_PATH}"/*
sudo chmod -R u+rw,g+r-w,o+r-w "${IR_PATH}"/*
sudo chown -R root:admin "${SCRIPTS_PATH}"/*
sudo chmod -R u+rw,g+r-w,o+r-w "${SCRIPTS_PATH}"/*
sudo cp -fp "${IR_PATH}/ReadMe.rtf" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/ReadMe.rtf"
sudo chown -R 501:admin "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/ReadMe.rtf"
sudo chmod -R 644 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/ReadMe.rtf"
sudo cp -fp "COPYING" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.txt"
sudo chown -R 501:admin "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.txt"
sudo chmod -R 644 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.txt"
sudo cp -fp "COPYRIGHT" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYRIGHT.txt"
sudo chown -R 501:admin "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYRIGHT.txt"
sudo chmod -R 644 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYRIGHT.txt"
# COPYING.LESSER is part of GNU License v3, included only with BOINC 6.3.x and later
if [ -f "COPYING.LESSER" ]; then
sudo cp -fp "COPYING.LESSER" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.LESSER.txt"
sudo chown -R 501:admin "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.LESSER.txt"
sudo chmod -R 644 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/extras/COPYING.LESSER.txt"
fi
# Make temporary copies of Pkg-Info.plist and Description.plist for PackageMaker and update for this branding
sudo cp -fp "${SOURCE_PKG_PATH}/Info.plist" "${NEW_DIR_PATH}/Pkg-Info.plist"
sudo chown -R 501:admin "${NEW_DIR_PATH}/Pkg-Info.plist"
sudo chmod -R 666 "${NEW_DIR_PATH}/Pkg-Info.plist"
if [ -f "${SOURCE_PKG_PATH}/Resources/English.lproj/Description.plist" ]; then
sudo cp -fp "${SOURCE_PKG_PATH}/Resources/English.lproj/Description.plist" "${NEW_DIR_PATH}"
else
sudo cp -fp "${SOURCE_PKG_PATH}/Resources/en.lproj/Description.plist" "${NEW_DIR_PATH}"
fi
sudo chown -R 501:admin "${NEW_DIR_PATH}/Description.plist"
sudo chmod -R 666 "${NEW_DIR_PATH}/Description.plist"
# Update Pkg-Info.plist name and ensure it is in XML format
defaults write "`pwd`/${NEW_DIR_PATH}/Pkg-Info" "CFBundleGetInfoString" "$BRAND_NAME $1.$2.$3"
plutil -convert xml1 "`pwd`/${NEW_DIR_PATH}/Pkg-Info.plist"
# Update Description.plist name and ensure it is in XML format
defaults write "`pwd`/${NEW_DIR_PATH}/Description" "IFPkgDescriptionTitle" "$MANAGER_NAME"
plutil -convert xml1 "`pwd`/${NEW_DIR_PATH}/Description.plist"
# Copy the installer wrapper application "${BRAND_NAME} Installer.app"
sudo cp -fpR "BOINC Installer.app" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app"
sudo rm -dfR "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/BOINC.pkg"
# Update the installer wrapper application's info.plist, InfoPlist.strings files
sudo sed -i "" s/BOINC/"${BRAND_NAME}"/g "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Info.plist"
sudo sed -i "" s/MacInstaller.icns/"${INSTALLER_ICNS_FILE}"/g "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Info.plist"
sudo chmod a+w "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo iconv -f UTF-16 -t UTF-8 "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/English.lproj/InfoPlist.strings" > "${PR_PATH}/tempUTF84"
sudo sed -i "" s/BOINC/"${MANAGER_NAME}"/g "${PR_PATH}/tempUTF84"
sudo iconv -f UTF-8 -t UTF-16 "${PR_PATH}/tempUTF84" > "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/English.lproj/InfoPlist.strings"
sudo rm -f "${PR_PATH}/tempUTF84"
# Replace the installer wrapper application's MacInstaller.icns file
sudo cp -fp "${INSTALLER_ICNS_FILE}" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${INSTALLER_ICNS_FILE}"
sudo rm -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/MacInstaller.icns"
# Rename the installer wrapper application's executable inside the bundle
sudo mv -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/MacOS/BOINC Installer" "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/MacOS/${BRAND_NAME} Installer"
# Build the installer package inside the wrapper application's bundle
if [ "$PACKAGEMAKER_VERSION" = "3" ]; then
# Packagemaker Version 3
## /Developer/usr/bin/packagemaker -r ../BOINC_Installer/Pkg_Root -e ../BOINC_Installer/Installer\ Resources/ -s ../BOINC_Installer/Installer\ Scripts/ -f mac_build/Pkg-Info.plist -t "BOINC Manager" -n "$1.$2.$3" -b -o ../BOINC_Installer/New_Release_$1_$2_$3/boinc_$1.$2.$3_macOSX_universal/BOINC\ Installer.app/Contents/Resources/BOINC.pkg
/Developer/usr/bin/packagemaker -r "${PR_PATH}" -e "${IR_PATH}" -s "${SCRIPTS_PATH}" -f "${NEW_DIR_PATH}/Pkg-Info.plist" -t "${MANAGER_NAME}" -n "$1.$2.$3" -b -o "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${BRAND_NAME}.pkg"
# Remove TokenDefinitions.plist and IFPkgPathMappings in Info.plist, which would cause installer to find a previous copy of ${MANAGER_NAME} and install there
sudo rm -f "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${BRAND_NAME}.pkg/Contents/Resources/TokenDefinitions.plist"
defaults delete "`pwd`/${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${BRAND_NAME}.pkg/Contents/Info" IFPkgPathMappings
else
# Packagemaker Version 2
## /Developer/Tools/packagemaker -build -p ../BOINC_Installer/New_Release_$1_$2_$3/boinc_$1.$2.$3_macOSX_universal/BOINC\ Installer.app/Contents/Resources/BOINC.pkg -f ../BOINC_Installer/Pkg_Root -r ../BOINC_Installer/Installer\ Resources/ -i mac_build/Pkg-Info.plist -d mac_Installer/Description.plist -ds
/Developer/Tools/packagemaker -build -p "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${BRAND_NAME}.pkg" -f "${PR_PATH}" -r "${IR_PATH}" -i "${NEW_DIR_PATH}/Pkg-Info.plist" -d "${NEW_DIR_PATH}/Description.plist" -ds
fi
## for debugging
## if [ $? -ne 0 ]; then
## echo ""
## echo "********** /Pkg-Info.plist File contents: *************"
## echo ""
## cp "${NEW_DIR_PATH}/Pkg-Info.plist" /dev/stdout
## echo ""
## echo "********** End /Pkg-Info.plist File contents *************"
## echo ""
## fi
# Allow the installer wrapper application to modify the package's Info.plist file
sudo chmod u+w,g+w,o+w "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app/Contents/Resources/${BRAND_NAME}.pkg/Contents/Info.plist"
# Update the installer wrapper application's creation date
sudo touch "${NEW_DIR_PATH}/${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal/${BRAND_NAME} Installer.app"
# Remove temporary copies of Pkg-Info.plist and Description.plist
sudo rm ${NEW_DIR_PATH}/Pkg-Info.plist
sudo rm ${NEW_DIR_PATH}/Description.plist
# Remove temporary directories
sudo rm -dfR "${IR_PATH}"
sudo rm -dfR "${PR_PATH}"
sudo rm -dfR "${SCRIPTS_PATH}"
# Compress the products
cd ${NEW_DIR_PATH}
zip -rqy ${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal.zip ${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal
##### We've decided not to create branded command-line executables; they are identical to standard ones
#### zip -rqy ${LC_BRAND_NAME}_$1.$2.$3_universal-apple-darwin.zip ${LC_BRAND_NAME}_$1.$2.$3_universal-apple-darwin
##### We've decided not to create branded symbol table file; it is identical to standard one
#### zip -rqy ${LC_BRAND_NAME}_$1.$2.$3_macOSX_SymbolTables.zip ${LC_BRAND_NAME}_$1.$2.$3_macOSX_SymbolTables
# Force Finder to recognize changed icons by deleting the uncompressed products and expanding the zip file
sudo rm -dfR ${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal
open ${LC_BRAND_NAME}_$1.$2.$3_macOSX_universal.zip
popd
exit 0
|
freehal/boinc-freehal
|
mac_installer/make_GridRepublic.sh
|
Shell
|
gpl-3.0
| 21,459 |
#!/bin/bash
create()
{
#users and groups
psql -U $dbuser -d $dbname -f ./sql/create/groups_users.sql
# app configuration
psql -U $dbuser -d $dbname -f ./sql/create/configurations.sql
# formats for responses
psql -U $dbuser -d $dbname -f ./sql/create/formats.sql
psql -U $dbuser -d $dbname -f ./sql/insert/formats.sql
# formats for responses
psql -U $dbuser -d $dbname -f ./sql/create/corpuses.sql
psql -U $dbuser -d $dbname -f ./sql/insert/corpuses.sql
# accounts for request
psql -U $dbuser -d $dbname -f ./sql/create/accounts.sql
#visiblis supported languages
psql -U $dbuser -d $dbname -f ./sql/create/languages.sql
psql -U $dbuser -d $dbname -f ./sql/insert/languages.sql
#categories for semantic request (url title text)
psql -U $dbuser -d $dbname -f ./sql/create/categories.sql
psql -U $dbuser -d $dbname -f ./sql/insert/categories.sql
#keyword send by the api
psql -U $dbuser -d $dbname -f ./sql/create/keywords.sql
#the semantic request
psql -U $dbuser -d $dbname -f ./sql/create/semantic_requests.sql
#the results for keyword to a request
psql -U $dbuser -d $dbname -f ./sql/create/keyword_link_requests.sql
#some metrics send by the api
psql -U $dbuser -d $dbname -f ./sql/create/semantic_responses.sql
#if new install (to do 0.1->0.2)
#Request for command & http status code for doc http codes
psql -U $dbuser -d $dbname -f ./sql/create/notification_texts.sql
psql -U $dbuser -d $dbname -f ./sql/create/notifications.sql
#Request for command & http status code for doc http codes
psql -U $dbuser -d $dbname -f ./sql/create/request_for_comments.sql
psql -U $dbuser -d $dbname -f ./sql/insert/request_for_comments.sql
psql -U $dbuser -d $dbname -f ./sql/create/http_status_codes.sql
psql -U $dbuser -d $dbname -f ./sql/insert/http_status_codes.sql
psql -U $dbuser -d $dbname -f ./sql/create/semantic_cocoons.sql
psql -U $dbuser -d $dbname -f ./sql/create/queue_elements.sql
psql -U $dbuser -d $dbname -f ./sql/create/semantic_cocoon_responses.sql
psql -U $dbuser -d $dbname -f ./sql/create/semantic_cocoon_links.sql
psql -U $dbuser -d $dbname -f ./sql/create/semantic_cocoon_uniform_ressource_locators.sql
#if new install (to do 0.2->0.3)
}
|
WildTurtles/illumination
|
scripts/sql/db.sh
|
Shell
|
gpl-3.0
| 2,167 |
#
# https://github.com/munishgaurav5/ks/raw/master/U/kick_install.sh
#cd /tmp && wget https://github.com/munishgaurav5/ks/raw/master/U/kick_install.sh && chmod 777 kick_install.sh && ./kick_install.sh
# Update
apt-get update -y
apt-get upgrade -y
# install apps
apt-get install -y nano wget curl net-tools lsof bzip2 zip unzip git sudo make cmake sed at
# configs
export KSURL="https://github.com/munishgaurav5/ks/raw/master/U/kick_tmp.cfg"
export MIRROR="http://fastserver.me/admin/iso/"
NETWORK_INTERFACE_NAME="$(ip -o -4 route show to default | awk '{print $5}')"
Boot_device="eth0"
export DNS1=8.8.8.8
export DNS2=8.8.4.4
export IPADDR=$(ip a s $NETWORK_INTERFACE_NAME |grep "inet "|awk '{print $2}'| awk -F '/' '{print $1}')
export PREFIX=$(ip a s $NETWORK_INTERFACE_NAME |grep "inet "|awk '{print $2}'| awk -F '/' '{print $2}')
export GW=$(ip route|grep default | awk '{print $3}')
##### SETUP
curl -o /boot/ubuntu_vmlinuz ${MIRROR}install/vmlinuz
curl -o /boot/ubuntu_initrd.gz ${MIRROR}install/initrd.gz
echo ""
echo ""
# root_value=`grep "set root=" /boot/grub2/grub.cfg | head -1`
# root_value=`grep "set root=" /boot/grub/grub.cfg | head -2`
root_value="set root='hd0,msdos1'"
echo "$root_value"
echo ""
echo ""
sleep 2
echo ""
cat << EOF >> /etc/grub.d/40_custom
menuentry "reinstall" {
$root_value
linux /ubuntu_vmlinuz ip=${IPADDR}::${GW}:${PREFIX}:$(hostname):$Boot_device:off nameserver=$DNS1 nameserver=$DNS2 repo=$MIRROR vga=788 file=${MIRROR}preseed/ubuntu-server.seed ks=$KSURL preseed/file=${MIRROR}ks.preseed vnc vncconnect=${IPADDR}:1 vncpassword=changeme headless
initrd /ubuntu_initrd.gz
}
EOF
#cat << EOF >> /etc/grub.d/40_custom
#menuentry "reinstall" {
# $root_value
# linux /ubuntu_vmlinuz net.ifnames=0 biosdevname=0 ip=${IPADDR}::${GW}:${PREFIX}:$(hostname):$Boot_device:off nameserver=$DNS1 nameserver=$DNS2 inst.repo=$MIRROR inst.ks=$KSURL inst.vnc inst.vncconnect=${IPADDR}:1 inst.vncpassword=changeme inst.headless inst.lang=en inst.keymap=us
# initrd /ubuntu_initrd.gz
#}
#EOF
sed -i -e "s/GRUB_DEFAULT.*/GRUB_DEFAULT=\"reinstall\"/g" /etc/default/grub
sudo update-grub
#grub2-mkconfig
#grub2-mkconfig --output=/boot/grub2/grub.cfg
#grubby --info=ALL
echo ""
echo ""
echo "Setting Up default Grub Entry ..."
echo ""
sleep 5
echo ""
# install grub-customizer
### Permanent Boot Change
#grubby --default-index
#grub2-set-default 'reinstall'
#grubby --default-index
### Permanent Boot Change
#grubby --default-index
#grubby --set-default /boot/vmlinuz
#grubby --default-index
### One Time Boot Change
#grubby --default-index
#grub-reboot 1
#grub2-reboot "reinstall"
#grubby --default-index
echo ""
echo ""
echo " >>> Manually update 'IP, Gateway & Hostname' in kickstart config file .. <<<"
echo "IP : $IPADDR"
echo "Gateway : $GW"
echo "Network Interface : $NETWORK_INTERFACE_NAME"
echo ""
echo "DONE!"
|
munishgaurav5/ks
|
U/kick_install.sh
|
Shell
|
gpl-3.0
| 2,891 |
#!/bin/bash
#$ -N Latvia
#$ -cwd -V
#$ -M [email protected]
#$ -m e
#$ -l mem_free=2G,h_vmem=2G
#$ -q parallel.q
#$ -pe openmpi 128
#$ -R y
# mpirun -np 128 R CMD BATCH latvia.R
# mpirun -np 128 R CMD BATCH latviapt1.R
mpirun -np 128 R CMD BATCH latvia_wc.R
mpirun -np 128 R CMD BATCH latviapt1_wc.R
|
beansrowning/Modelling
|
Current/Cluster/latvia.sh
|
Shell
|
gpl-3.0
| 313 |
for i in $(awk -F: '{print $2}' /etc/trueuserdomains); do top -bn 1 -u "$i"|awk '/^[0-9]/ {sumCpu +=$9; sumRam += $10;} END {if ((sumCpu > 0) || (sumRam > 0)) print "User '$i':\t\tTotal %RAM: ", sumRam, "\t\tTotal %CPU: ", sumCpu; else print "User '$i' is not showing resource usage";}'; done |sort -nk 5
|
patbeagan1/Scripts
|
v2/newscripts/awk_resource_script.sh
|
Shell
|
gpl-3.0
| 304 |
#!/bin/bash
# SSAFEM verification test I regression test
PISM_PATH=$1
MPIEXEC=$2
MPIEXEC_COMMAND="$MPIEXEC -n 2"
PISM_SOURCE_DIR=$3
EXT=""
if [ $# -ge 4 ] && [ "$4" == "-python" ]
then
PYTHONEXEC=$5
MPIEXEC_COMMAND="$MPIEXEC_COMMAND $PYTHONEXEC"
PYTHONPATH=${PISM_PATH}/site-packages:${PYTHONPATH}
PISM_PATH=${PISM_SOURCE_DIR}/examples/python/ssa_tests
EXT=".py"
fi
output=`mktemp pism-ssa-test-i.XXXX` || exit 1
set -e
OPTS="-verbose 1 -ssa_method fem -o_size none -Mx 5 -ksp_type cg"
# do stuff
$MPIEXEC_COMMAND $PISM_PATH/ssa_testi${EXT} -My 61 $OPTS > ${output}
$MPIEXEC_COMMAND $PISM_PATH/ssa_testi${EXT} -My 121 $OPTS >> ${output}
set +e
# Check results:
diff ${output} - <<END-OF-OUTPUT
NUMERICAL ERRORS in velocity relative to exact solution:
velocity : maxvector prcntavvec maxu maxv avu avv
16.2024 0.14888 16.2024 0.7544 1.1522 0.0513
NUM ERRORS DONE
NUMERICAL ERRORS in velocity relative to exact solution:
velocity : maxvector prcntavvec maxu maxv avu avv
4.2045 0.03669 4.2045 0.1967 0.2838 0.0134
NUM ERRORS DONE
END-OF-OUTPUT
if [ $? != 0 ];
then
cat ${output}
exit 1
fi
exit 0
|
pism/pism
|
test/regression/ssa/ssa_testi_fem.sh
|
Shell
|
gpl-3.0
| 1,235 |
#!/bin/bash
function blueEcho()
{
echo -ne '\e[0;34m'
echo $1
echo -ne '\e[0m'
}
function findCanonicalBranch()
{
dir=$1
rosinstalldir=$2
if [ -e "$rosinstalldir/.rosinstall" ]
then
name=$(basename $dir)
desiredBranch=$(perl -0 -ne 'print qq($1\n) if /\b'$name'\b.*\n.*version: ([^}]+)/mg' ${WORKSPACE_ROOT}/.rosinstall)
fi
echo $desiredBranch
}
function displayStatus()
{
old_d=`pwd`
dir=$1
desiredBranch=$2
cd $dir
if [ -z "$desiredBranch" ]
then
desiredBranch=$(git log --pretty='%d' -1 HEAD | perl -ne 'm#(?<=origin/)([^,]*)# && print "$1\n"')
fi
if [ -e "$dir/.git" ]
then
if [ "$(git rev-parse --abbrev-ref HEAD)" != "$desiredBranch" ] \
|| [ -n "$(git status --porcelain)" ] \
|| [ -n "$(git status | grep -P 'branch is (ahead|behind)')" ]
then
echo "$PWD :"
if [ "$(git rev-parse --abbrev-ref HEAD)" != "$desiredBranch" ]
then
git status | grep "On branch" | perl -pe "chomp"
echo -e " (should be on branch $desiredBranch)"
fi
git status | grep -P 'branch is (ahead|behind)'
git status | grep "modified"
git status | grep "new file"
git status | grep "deleted"
if [ -n "$(git status | grep 'Untracked files')" ]
then
git status --porcelain | grep '??' | sed -r 's/^.{3}//' \
| xargs -I file echo -e '\tuntracked: '"file"
fi
echo
fi
elif [ -e "$dir/.hg" ]; then
if [ "$(hg branch)" != "$desiredBranch" ] \
|| [ -n "$(hg status)" ]
then
echo "$PWD :"
echo "On hg branch `hg branch`"
hg status
hg incoming | grep "changes"
echo
fi
#else
#echo "$PWD is not a repository!"
#echo
fi
cd $old_d
}
cd ${WORKSPACE_ROOT}
blueEcho "Looking for changes in $PWD ..."
displayStatus $PWD
catkin_src=${ROS_WORKSPACE}
blueEcho "Looking for changes in ${catkin_src} ..."
for d in `find ${catkin_src} -mindepth 1 -maxdepth 3 -type d`;
do
branch=$(findCanonicalBranch $d ${catkin_src}/..)
displayStatus $d $branch
done
if [ -d $WORKSPACE_ROOT/rosinstall/optional/custom/.git ]; then
cd $WORKSPACE_ROOT/rosinstall/optional/custom
blueEcho "Looking for changes in $PWD ..."
displayStatus $PWD
fi
|
team-vigir/vigir_scripts
|
scripts/status.sh
|
Shell
|
gpl-3.0
| 2,256 |
#!/bin/bash
exec /usr/sbin/init
|
StephanX/centos-ssh-testbed
|
centos-systemd-docker/entrypoint.sh
|
Shell
|
gpl-3.0
| 32 |
#!/bin/bash
if [ ! -d ~/trash ]; then
mkdir -p ~/trash
fi
id=`date +%Y%m%d%H%M%S`
argc=$#
for ((i=1; i<=${argc}; ++i))
do
filename=`echo "$1" | awk -F "/" '{if($NF=="")print $(NF-1); else print $NF;}'`
mv $1 ~/trash/${filename}.${id}
shift
done
|
honglianglv/lib
|
shell/rmaliasmv.sh
|
Shell
|
gpl-3.0
| 296 |
#!/bin/sh
#Script que gestiona la contrasenya de l'usuari local "ShiroKabuto".
#Pre-requisits:
# 1. L'usuari local ShiroKabuto _ja ha d'estar creat_ (ho fa linuxcaib.deb).
# 2. L'usuari que executa l'script ha de tenir permissos de root (uid=0) o sudo.
# 3. Cal tenir instal·lat el paquet "whois" que conté el binari mkpasswd i perl
#WARN: estaria bé poder comprovar que encara que no hagin passat els 7 dies, la contrasenya segueixi essent vàlida (per si hem arrancat el windows)?
#MILLORA: MIRAR SI PODEM SIMPLIFICAR AQUEST CODI EMPRANT USERMOD I PASSWD -S !!!!
#MILLORA: també mirant de emprar "useradd -p passw"
#Miram si el ShiroKabuto ha estat deshabilitat a nivell global
if [ -r /etc/caib/linuxcaib/ShiroDisabled ];then
logger -t "linuxcaib-conf-shirokabuto($USER)" -s "Gestió de ShiroKabuto deshabilitada"
fi
#Importam les funcions auxiliars
#Ruta base scripts
BASEDIR=$(dirname $0)
if [ "$CAIBCONFUTILS" != "SI" ]; then
logger -t "linuxcaib-conf-shirokabuto($USER)" "CAIBCONFUTILS=$CAIBCONFUTILS Carregam utilitats de $BASEDIR/caib-conf-utils.sh"
. $BASEDIR/caib-conf-utils.sh
fi
# Initialize our own variables:
output_file=""
show_caib_conf_shirokabuto_help () {
cat << EOF
El programa "${0##*/}" actualitza la contrasenya de shirokabuto (admin local), cal usuari i contrasenya de connexió al SEYCON.
Ús: ${0##*/} [-cfhv] [-u USUARI] [-p PASSWORD]
-c agafa les credencials del fitxer "credentials". IMPORTANT, l'usuari ha de ser l'usuari de SEYCON.
-f força canvi password
-h mostra aquesta ajuda
-u USUARI nom de l'usuari a emprar
-p PASSWORD contrasenya de l'usuari a emprar
-v mode verbose
Exemples:
${0##*/} -u u83511 -p password_u83511 Execució passant usuari i contrasenya
${0##*/} -c Execució emprant fitxer de credencials
EOF
}
#Fi funcions
if [ $USER = "root" ]; then
logger -t "linuxcaib-conf-shirokabuto($USER)" -s "ERROR: no se pot executar com a root!"
exit 1;
fi
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
# Initialize our own variables:
output_file=""
while getopts "hfcv?u:p:" opt; do
case "$opt" in
h|\?)
show_caib_conf_shirokabuto_help
exit 0
;;
c)
if [ "$seyconSessionUser" != "" ];then
USERNAME=$seyconSessionUser
PASSWORD=$seyconSessionPassword
else
#Com a backup intentam agafar el nom i contrasenya del fitxer credentials que hi ha dins el home de l'usuari.
USERNAME=$(grep -i "^username=" $HOME/credentials | tr -d '\r'| tr -d '\n'| cut -f 2 -d "=" --output-delimiter=" ")
PASSWORD=$(grep -i "^password=" $HOME/credentials | tr -d '\r'| tr -d '\n'| cut -f 2 -d "=" --output-delimiter=" ")
fi
;;
f) FORCE_CHANGE_SHIRO_PASS="s"
;;
v) DEBUG=$(($DEBUG + 1))
;;
u) USERNAME="$OPTARG"
;;
p) PASSWORD="$OPTARG"
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto($USER)" -s "seyconSessionUser=$seyconSessionUser"
if [ "$DEBUG" -ge 3 ]; then
# trace output
set -x
fi
if [ -z "$USERNAME" ] || [ -z "$PASSWORD" ]
then
#Si NO tenim usuari i password no podem actualitzar la contrasenya de ShiroKabuto
logger -t "linuxcaib-conf-shirokabuto($USER)" -s "ERROR: Se necessita usuari i contrassenya per poder actualitzar la contrasenya de ShiroKabuto" >&2
show_caib_conf_shirokabuto_help
exit 1
fi
#Si debug no està definida, la definim
if [ -z $DEBUG ]; then DEBUG=0; fi
if [ "$DEBUG" -ge 3 ]; then
# trace output
set -x
fi
if [ "$(echo $(hostname) | sed -e 's/\(^.*\)\(.$\)/\2/')" = "l" ];then
#Màquina que acaba en "l", suposaré que és un àlies.
#Als àlies no podem actualitzar la contrasenya de ShiroKabuto
logger -t "linuxcaib-conf-shirokabuto" "INFO: Contrasenya ShiroKabuto NO actualitzada, màquina és un àlies"
echo "# Contrasenya ShiroKabuto NO actualitzada, màquina és un àlies";sleep 3;
exit 0;
fi
#Actualitzar password ShiroKabuto
#Si no existeix el fitxer de hostname desat, o és diferent al hostname actual, hem de forçar canvi password.
if [ "$SHIRO_HOSTNAME" != "$(hostname)" -o "$SHIRO_HOSTNAME" = "" ];then
FORCE_CHANGE_SHIRO_PASS="s";
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "DEBUG: hostname changed or first run, updating ShiroKabuto password to seycon forced"
fi
# Si la passwd de ShiroKabuto te més de 7 dies, la canviam.
CURRENT_EPOCH=`grep ShiroKabuto /etc/shadow | cut -d: -f3`
# Find the epoch time since the user's password was last changed
#WARN: Nota, si no volem emprar perl, hem d'emprar ksh + script gymd2uday (scriptss_temps_shell.sh)
#http://stackoverflow.com/questions/1094291/get-current-date-in-epoch-from-unix-shell-script
EPOCH=`perl -e 'print int(time/(60*60*24))'`
AGE=`echo $EPOCH - $CURRENT_EPOCH | bc`
logger -t "linuxcaib-conf-shirokabuto" "INFO: ShiroKabuto's password age $AGE FORCE_CHANGE_SHIRO_PASS=$FORCE_CHANGE_SHIRO_PASS"
OLD_SHIRO_PASS=$(grep ShiroKabuto /etc/shadow | cut -d: -f2)
#Si la contrasenya té més de 7 dies o NO hi ha definida contrasenya, en posam una de nova
if [ $AGE -gt 7 ] || [ "$OLD_SHIRO_PASS" = "\!" ] || [ "$OLD_SHIRO_PASS" = "" ] || [ "$FORCE_CHANGE_SHIRO_PASS" = "s" ];then
#Pass de 15 caracters evitant caracters que se poden confondre
NEW_SHIRO_PASS=$(pwgen -N 1 -B -s 7)
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "DEBUG: updating password to seycon... =wget ... \"https://$SEYCON_SERVER:$SEYCON_PORT/sethostadmin?host=$(hostname)&user=ShiroKabuto&pass=$NEW_SHIRO_PASS\""
UPDATE_SHIRO_PASSWORD_ANSWER=$(wget -O - -q --http-user=$USERNAME --http-password=$PASSWORD --no-check-certificate "https://$SEYCON_SERVER:$SEYCON_PORT/sethostadmin?host=$(hostname)&user=ShiroKabuto&pass=$NEW_SHIRO_PASS" )
RESULTAT=$(echo $UPDATE_SHIRO_PASSWORD_ANSWER | cut -d\| -f1)
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "DEBUG: updating password to seycon, server response =$RESULTAT"
if [ "$RESULTAT" = "OK" ];then
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "INFO: ShiroKabuto's password changed successfully on machine $(echo $UPDATE_SHIRO_PASSWORD_ANSWER | cut -d "|" -f2) "
#Xifram nou pass amb SHA-512 per shadow (a partir glibc 2.7)
NEW_SHIRO_PASS=$(echo $NEW_SHIRO_PASS | mkpasswd -m sha-512 -s)
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "Nou password xifrat de ShiroKabuto $NEW_SHIRO_PASS"
NEW_SHIRO_PASS_SEDESCAPED=$(echo $NEW_SHIRO_PASS| sed -e 's/[]\/$*.^|[]/\\&/g')
OLD_SHIRO_PASS_SEDESCAPED=$(echo $OLD_SHIRO_PASS| sed -e 's/[]\/$*.^|[]/\\&/g')
#Feim copia de seguretat del fitxer shadow per si acas.
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "DEBUG: Backing up shadow file to shadow_linuxcaib"
cp /etc/shadow /etc/shadow_linuxcaib
if [ "$OLD_SHIRO_PASS" = "" ];then
/bin/sed -i "s/ShiroKabuto::/ShiroKabuto:$NEW_SHIRO_PASS_SEDESCAPED:/g" /etc/shadow
else
/bin/sed -i "s/ShiroKabuto:$OLD_SHIRO_PASS_SEDESCAPED/ShiroKabuto:$NEW_SHIRO_PASS_SEDESCAPED/g" /etc/shadow
fi
#Comprovar que no hem romput res. En cas contrari tornar a posar el /etc/shadow anterior.
if (! pwck -q -r /etc/shadow );then
#Hem romput el shadow el recuperam de la copia de seguretat
cp /etc/shadow_linuxcaib /etc/shadow
logger -t "linuxcaib-conf-shirokabuto" "ERROR: No he pogut canviar el password LOCAL de ShiroKabuto torn a posar el password anterior"
wget -O - -q --http-user=$USERNAME --http-password=$PASSWORD --no-check-certificate "https://$SEYCON_SERVER:$SEYCON_PORT/sethostadmin?host=$(hostname)&user=ShiroKabuto&pass=$OLD_SHIRO_PASS"
fi
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "DEBUG: ShiroKabuto password synced"
#Feim que la contrasenya no caduqui.
chage -E -1 ShiroKabuto
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" -s "Data canvi contrasenya: $(date +%F)"
chage -d $(date +%F) ShiroKabuto #Format data yyyy-mm-dd
#Si està tot bé, actualitzam el hostname associat al ShiroKabuto
echo $(hostname) | tee /etc/caib/linuxcaib/ShiroHostname > /dev/null
[ "$DEBUG" -gt "0" ] && logger -t "linuxcaib-conf-shirokabuto" "Actualitzat fitxer /etc/caib/linuxcaib/ShiroHostname amb el hostname actual associat al ShiroKabuto ($(hostname))"
else
logger -t "linuxcaib-conf-shirokabuto" "ERROR: ShiroKabuto's password could not be changed Error: $UPDATE_SHIRO_PASSWORD_ANSWER"
exit 1;
fi
else
logger -t "linuxcaib-conf-shirokabuto" "INFO: ShiroKabuto's password does not need to be updated"
fi
exit 0;
|
annonygmouse/linuxcaib
|
linuxcaib.deb/linuxcaib/opt/caib/linuxcaib/caib-conf-shirokabuto.sh
|
Shell
|
gpl-3.0
| 9,434 |
#!/usr/bin/env bash
set -eou pipefail
set -x
TMPDIR=$PWD/
TMP=$(mktemp -d ${TMPDIR:-/tmp/}$(basename -- "$0").XXXXXXXXXX)
ARCH="x86_64"
PLATFORM="ubuntu14"
RENCI_RELEASE_URL="ftp://ftp.renci.org"
IRODS_VERSION="4.1.10"
IRODS_DEV=irods-dev-${IRODS_VERSION}-${PLATFORM}-${ARCH}.deb
IRODS_DEV_SHA256="62980d2bb222f314e10fc9f7f80fd7dca4b235988e72da017d8374f250170804"
IRODS_ICOMMANDS=irods-icommands-${IRODS_VERSION}-${PLATFORM}-${ARCH}.deb
IRODS_ICOMMANDS_SHA256="4f42477b32ae4a088dba9778e068b156e9a0db5675379c8b9f88254c51378cdb"
trap cleanup EXIT INT TERM
cleanup() {
rm -rf "$TMP"
}
download_irods() {
for pkg in "$IRODS_DEV" "$IRODS_ICOMMANDS"; do
curl -sSL -o "$pkg" "${RENCI_RELEASE_URL}/pub/irods/releases/${IRODS_VERSION}/${PLATFORM}/$pkg"
done
}
verify_irods_packages() {
echo "$IRODS_DEV_SHA256 *$IRODS_DEV" | sha256sum -c -
echo "$IRODS_ICOMMANDS_SHA256 *$IRODS_ICOMMANDS" | sha256sum -c -
}
install_irods() {
sudo dpkg -i "$IRODS_DEV" "$IRODS_ICOMMANDS"
}
sudo apt-get install -y libssl-dev
pushd "$TMP"
download_irods
verify_irods_packages
install_irods
popd
|
keithj/npg_irods
|
scripts/minion/install_irods.sh
|
Shell
|
gpl-3.0
| 1,114 |
#!/bin/bash
#1i
. /opt/ownsec/ITSEC-Install-Scripts-ORIG/001.functions/all-scripts.sh
GITREPO=https://github.com/HiroshiManRise/anonym8
BRANCH=master
GITREPOROOT=/opt/ITSEC/8.Tunnel/anonym8/HiroshiManRise/anonym8
GITCONFDIR=/opt/ITSEC/8.Tunnel/anonym8/HiroshiManRise/anonym8/.git
GITCLONEDIR=/opt/ITSEC/8.Tunnel/anonym8/HiroshiManRise
DSKTPFLSDEST=/home/$USER/.local/share/applications/8.Tunnel/1.Pivot/anonym8
DSKTPFL=anonym8.desktop
APTLSTDIR=/opt/ownsec/ITSEC-Install-Scripts-ORIG/8.Tunnel/1.Pivot/anonym8
#hd6cv
echo "${bold}
_ _ _ ___ _ ___ ____ __ ___
/ \ | \ | |/ _ \| \ | \ \ / / \/ |( _ )
/ _ \ | \| | | | | \| |\ V /| |\/| |/ _ \
/ ___ \| |\ | |_| | |\ | | | | | | | (_) |
/_/ \_\_| \_|\___/|_| \_| |_| |_| |_|\___/
UPDATE
${normal}"
GITUPTODATE
if git checkout $BRANCH &&
git fetch origin $BRANCH &&
[ `git rev-list HEAD...origin/$BRANCH --count` != 0 ] &&
git merge origin/$BRANCH
then
GITCLONEFUNC
GITRESET
GITSBMDLINIT
chmod +x ./INSTALL.sh
#sudo ./INSTALL.sh
echo "${bold}
UPDATED
${normal}"
else
echo "${bold}
UP TO DATE
${normal}"
fi
|
alphaaurigae/ITSEC-Install-Scripts
|
ITSEC-Install-Scripts-ORIG/8.Tunnel/anonym8/#anonym8-update.sh
|
Shell
|
gpl-3.0
| 1,117 |
#!/bin/sh
#
# Copyright (C) 2003-2021 Sébastien Helleu <[email protected]>
#
# This file is part of WeeChat, the extensible chat client.
#
# WeeChat is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# WeeChat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
#
#
# Updates git version in config-git.h if the output of "git describe" has changed.
#
# Syntax:
# git-version.sh <rootdir> <version> <headerfile>
#
# rootdir : root directory with WeeChat files (to search .git/ directory)
# version : WeeChat version, for example 0.3.9 or 0.4.0-dev
# headerfile: file to update, for example config-git.h
#
if [ $# -lt 3 ]; then
echo "Syntax: $0 <rootdir> <version> <headerfile>"
exit 1
fi
ROOTDIR=$1
VERSION=$2
HEADERFILE=$3
# debug:
#echo "pwd=$PWD, rootdir=$ROOTDIR, version=$VERSION, headerfile=$HEADERFILE"
# read git version if we are in a devel/rc version and if we are in a repository
GIT_VERSION=""
case ${VERSION} in
*-*)
# devel/rc version (like 0.4.0-dev or 0.4.0-rc1)
if [ -d "${ROOTDIR}/.git" ]; then
GIT_VERSION=$(cd "${ROOTDIR}" && git describe 2>/dev/null)
fi
;;
*)
# stable version => no git version
;;
esac
# check if git version has changed
if [ ! -f "${HEADERFILE}" ]; then
# header does not exist => create it
echo "Creating file ${HEADERFILE} with git version: \"${GIT_VERSION}\""
echo "#define PACKAGE_VERSION_GIT \"${GIT_VERSION}\"" >"${HEADERFILE}"
else
if grep -q "#define PACKAGE_VERSION_GIT \"${GIT_VERSION}\"" "${HEADERFILE}"; then
# git version matches the file => NO update
echo "File ${HEADERFILE} is up-to-date (git version: \"${GIT_VERSION}\")"
else
# git version not found in file => update file with this git version
echo "Updating file ${HEADERFILE} with git version: \"${GIT_VERSION}\""
sed "s/#define PACKAGE_VERSION_GIT \".*\"/#define PACKAGE_VERSION_GIT \"${GIT_VERSION}\"/" "${HEADERFILE}" >"${HEADERFILE}.tmp"
mv -f "${HEADERFILE}.tmp" "${HEADERFILE}"
fi
fi
|
talisein/weechat-1
|
tools/git-version.sh
|
Shell
|
gpl-3.0
| 2,545 |
#!/bin/sh
# visual path
VPATH="~/.zekyll/zekyll"
# pre path
PPATH="$HOME/.zekyll"
# actual name
ANAME="zekyll"
# actual path
APATH="$PPATH/$ANAME"
if ! type git 2>/dev/null 1>&2; then
echo "Please install GIT first"
echo "Exiting"
exit 1
fi
if ! test -d "$PPATH"; then
mkdir "$PPATH"
fi
if test -d "$APATH"; then
echo ">>> Updating zekyll (found in $VPATH)"
cd "$APATH"
git pull origin master
else
echo ">>> Downloading zekyll to ${VPATH}.."
cd "$PPATH"
git clone https://github.com/psprint/zekyll.git "$ANAME"
fi
echo ">>> Done"
if ! test -x "$HOME/$ANAME"; then
echo
echo "Creating $ANAME binary in $HOME"
cp -v "$APATH/zekyll-wrapper" "$HOME/$ANAME"
chmod +x "$HOME/$ANAME"
ls -l "$HOME/$ANAME"
echo ">>> Done"
fi
|
psprint/zekyll
|
install.sh
|
Shell
|
gpl-3.0
| 787 |
#!/bin/bash
# Instalación de los paquetes necesarios
sudo apt-get install -y python-virtualenv
sudo apt-get install -y git
sudo apt-get install -y python-dev
sudo apt-get install -y libxml2-dev libxslt-dev
# Variable de entorno necesaria para twitter
export LC_ALL=C
# Creación del entorno virtual Python
cd ~
mkdir env
cd env
virtualenv --distribute env1
cd env1/
source bin/activate
# Instalación de los paquetes Python necesarios
# dentro del entorno virtual
pip install web.py
pip install mako
pip install pymongo
pip install lxml
pip install tweepy
# Recuperamos la aplicación del repositorio
git clone https://github.com/fllodrab/Python.git
cd Python
# Ejecutamos el servidor web de web.py
python code.py
|
fllodrab/Practica3
|
script/configurationScript.sh
|
Shell
|
gpl-3.0
| 721 |
#!/bin/sh
chgrp root /etc/shadow
|
Taywee/dod-stig-scripts
|
linux/V-38503/fix.sh
|
Shell
|
gpl-3.0
| 34 |
#!/bin/bash
echo "TRANSLATING THE BAD TEST SET"
"$HOME"/mosesdecoder/bin/moses \
--config mert-work/moses.ini \
--threads 1 \
< bad-testset.source \
> OUTPUT
## Requires sacrebleu, which you can get by going:
## sudo pip3 install sacrebleu
## https://github.com/awslabs/sockeye/tree/master/contrib/sacrebleu
echo "CALCULATING THE BAD BLEU SCORE"
sacrebleu ./bad-testset.target < OUTPUT
|
alexrudnick/terere
|
moses-models/bible-es-gn/translate_testset.sh
|
Shell
|
gpl-3.0
| 397 |
#!/bin/sh
echo "Removing user file"
rm *.pro.user
echo "Creating profile executable"
qmake -config debug
make clean
make
echo "Removing makefile"
rm Makefile
echo "Removing object files"
rm *.o
echo "Start the application"
./gtst
echo "Analyse the gprof results"
gprof gtst > gprof.txt
echo "Remove temporary gprof file"
rm gmon.out
|
richelbilderbeek/GTST
|
profile.sh
|
Shell
|
gpl-3.0
| 340 |
#!/bin/bash
### BEGIN INIT INFO
# Provides: firelamb
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Control firelamb
# Description: Control firelamb
### END INIT INFO
## Adjust these
upstream=eth0
phy=wlan1
conf=/home/pi/Kali-Pi/Menus/mana/hostapd-mana.conf
hostapd=/usr/lib/mana-toolkit/hostapd
cmdline="${hostapd} ${conf}"
## End adjustment
PROGLONG=$(realpath $0)
PROGSHORT=$(basename ${PROGLONG})
PIDFILE="/var/run/"${PIDFILE:-"${PROGSHORT}.pid"}
usage() {
echo "Usage: `basename $0` {start|stop|restart|status}" >&2
}
# Did we get an argument?
if [[ -z "${1}" ]]; then
usage
exit 1
fi
# Get the PID from PIDFILE if we don't have one yet.
if [[ -z "${PID}" && -e ${PIDFILE} ]]; then
PID=$(cat ${PIDFILE});
fi
start() {
echo "--------------------------------"
echo " STARTING $PROGSHORT"
echo "--------------------------------"
service network-manager stop
rfkill unblock wlan
ifconfig $phy up
sed -i "s/^interface=.*$/interface=$phy/" $conf
$cmdline & echo $! > ${PIDFILE}
sleep 5
ifconfig $phy 10.0.0.1 netmask 255.255.255.0
route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1
dnsmasq -z -C /etc/mana-toolkit/dnsmasq-dhcpd.conf -i $phy -I lo
echo '1' > /proc/sys/net/ipv4/ip_forward
iptables --policy INPUT ACCEPT
iptables --policy FORWARD ACCEPT
iptables --policy OUTPUT ACCEPT
iptables -F
iptables -t nat -F
iptables -t nat -A POSTROUTING -o $upstream -j MASQUERADE
iptables -A FORWARD -i $phy -o $upstream -j ACCEPT
}
stop() {
echo "--------------------------------"
echo " STOPPING $PROGSHORT"
echo "--------------------------------"
if [[ -z "${PID}" ]]; then
echo "${PROGSHORT} is not running (missing PID)."
elif [[ -e /proc/${PID}/cmdline && "`tr -d '\0' < /proc/${PID}/cmdline`" == *"$( echo -e "${cmdline}" | tr -d '[:space:]')"* ]]; then
pkill dnsmasq
pkill sslstrip
pkill sslsplit
pkill hostapd
pkill python
iptables -t nat -F
else
echo "${PROGSHORT} is not running (tested PID: ${PID})."
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
force-reload)
;;
status)
if [[ -z "${PID}" ]]; then
echo "${PROGSHORT} is not running (missing PID)."
elif [[ -e /proc/${PID}/cmdline && "`tr -d '\0' < /proc/${PID}/cmdline`" == *"$( echo -e ${cmdline} | tr -d '[:space:]')"* ]]; then
echo "${PROGSHORT} is running (PID: ${PID})."
exit 1
else
echo "${PROGSHORT} is not running (tested PID: ${PID})."
exit 0
fi
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
esac
|
Re4son/Kali-Pi
|
Menus/mana/kalipi-nat-simple.sh
|
Shell
|
gpl-3.0
| 3,052 |
#!/bin/bash
# This shell script is for formatting Goodix config. After that, we can copy the formatted
# code to *config.h file
####################################
# 2017-08-24 first version
####################################
#set -x
inputfile=$1
outputfile=$1.bak
show_usage()
{
printf "\nUsage: gtp.sh [config_file]\n\n"
printf "
full path or relative path will ok,
outfile will be generated in current dir,
default name is 'config_file.bak'\n"
}
if [ $# != 1 ]; then
show_usage;
exit 1;
fi
/usr/bin/awk -F , '{ for(i=0; i<24; i++) {for(j=1;j<11;j++) printf("%s,", $(i*10+j)); printf(" \\\n");} }' $inputfile > $outputfile
/bin/sed -i 's/,,//' $outputfile
printf "Format done\n"
|
11hwu2/script_tools
|
work/gtp.sh
|
Shell
|
gpl-3.0
| 698 |
#!/bin/bash
# instructions based on http://support.amd.com/en-us/kb-articles/Pages/AMDGPU-PRO-Install.aspx
# they clame a reboot will be needed to launch with the new graphics stack
sudo apt update
sudo apt dist-upgrade
wget -O /tmp/ 'https://www2.ati.com/drivers/linux/ubuntu/amdgpu-pro-17.10-414273.tar.xz'
cd /tmp
tar -Jxvf amdgpu-pro-17.10-414273.tar.xz
cd amdgpu-pro-17.10-414273
./amdgpu-pro-install -y
# may need to reboot here
#check if in "video" group
# groups
# add to video group?
# sudo usermod -a -G video $LOGNAME
# You will need to log out and in again to activate this change.
# Uninstalling the AMD GPU-PRO Driver
#If for any reason you wish to remove the AMDGPU-PRO graphics stack you can do this using the uninstallation script which was part of the installation and is present in your path. From the command prompt enter the following command:
#amdgpu-pro-uninstall
|
mkantzer/HomeLinux
|
subscripts/gpudriver.sh
|
Shell
|
gpl-3.0
| 895 |
#!/bin/bash
set -e
## Zero Resource Speech Challenge 2017
## CREATION OF CACHE FILES FOR ZRTOOLS
## build a cache with files that will be used by the plebdisc algorithm.
## It uses sox to preprocess the files the wav files: reduce the volume,
## cut the files and setting the same sampling rate for all the files.
## the input S is the size of the random projection in ZRTools and should
## be 32 or 64
S=$1
DIM=39
if [ -z "$S" ]; then
echo "USAGE: " $0 " VALUE [VALUE=32,64]";
exit 1;
fi
if [ "$S" != "64" ]; then
echo "USAGE: " $0 " VALUE [VALUE=64]";
exit 1;
fi
# testing for
for cmd in "sox" "feacalc" "standfeat" "lsh" ; do
printf "%-10s" "$cmd"
if hash "$cmd" 2>/dev/null; then
printf "OK\n";
else
printf "missing\n";
exit 1;
fi
done
# directory with the VAD for each wav file
VAD_DIR=./vad
CACHE=./cache
LOC_TMP=$(pwd)
# output dirs
mkdir -p ${CACHE}/D${DIM}S${S}/feat
mkdir -p ${CACHE}/D${DIM}S${S}/lsh
mkdir -p ${CACHE}/D${DIM}S${S}/wav
tempdir=$(mktemp -d --tmpdir=${LOC_TMP});
echo "### " $tempdir
# the random proj fie
genproj -D $DIM -S $S -seed 1 -projfile ${CACHE}/proj_S${S}xD${DIM}_seed1
# trim 10 mins of waveforms
trim="" #trim="trim 0 10:00"
# addapted from from ZRTools/script/generate_plp_lsh
function p_norm() {
file_=$1;
id=$(basename $file_ .wav);
# Get audio into a 16-bit 8kHz wav file
out_wav=${tempdir}/${id}.wav
echo ">>>>>> doing sox";
sox -v 0.8 -t wav $file_ -t wav -e signed-integer \
-b 16 -c 1 -r 8000 $out_wav $trim;
### Generate 39-d PLP (13 cc's + delta + d-deltas using ICSI feacalc)
echo ">>>>>> doing feacalc";
feacalc -plp 12 -cep 13 -dom cep -deltaorder 2 \
-dither -frqaxis bark -samplerate 8000 -win 25 \
-step 10 -ip MSWAVE -rasta false -compress true \
-op swappedraw -o ${tempdir}/${id}.binary \
${tempdir}/${id}.wav;
echo ">>>>>> doing standfeat";
standfeat -D $DIM -infile ${tempdir}/${id}.binary \
-outfile ${tempdir}/${id}.std.binary \
-vadfile ${VAD_DIR}/${id}
echo ">>>>>> doing lsh";
lsh -D $DIM -S $S -projfile ${CACHE}/proj_S${S}xD${DIM}_seed1 \
-featfile ${tempdir}/${id}.std.binary \
-sigfile ${tempdir}/${id}.std.lsh64 -vadfile ${VAD_DIR}/${id}
cp -rf ${tempdir}/${id}.std.lsh64 ${CACHE}/D${DIM}S${S}/lsh
cp -rf ${tempdir}/${id}.std.binary ${CACHE}/D${DIM}S${S}/feat
}
# do all the file
for i in $(cat french.lst); do p_norm $i; done
rm -rf $tempdir
exit 0;
|
bootphon/zerospeech2017
|
track2/baseline/baseline_french/data/make_french_cache.sh
|
Shell
|
gpl-3.0
| 2,683 |
#!/bin/bash
declare -a TESTS
TESTS[0]="arithmetic"
for i in "${TESTS[@]}"
do
echo "Testing $i..."
rm examples/tests/$i.sf.*
./sf examples/tests/$i.sf &> /dev/null
diff "examples/tests/$i.tp" "examples/tests/$i.sf.ps2"
diff "examples/tests/$i.ti" "examples/tests/$i.sf.icg"
done
|
jeffreysanti/StaticFunc
|
runtests.sh
|
Shell
|
gpl-3.0
| 292 |
#!/bin/bash
# include the conf
source pocketMoneyContract.conf;
#call the jar
$JAVA -DEthereumFacadeProvider=$INSTANCE -Drpc-url=$RPC_URL -Dchain-id=$NET -DapiKey=$INFRA_API_KEY -jar $JAR "$@" -sk $KEYFILE -sp $PASS
|
UrsZeidler/smart-contract-examples
|
pocketMoneyContract/etc/pocketMoneyContract.sh
|
Shell
|
gpl-3.0
| 222 |
### alsa
#csound -m 96 -realtime -+rtaudio=alsa -l -o dac:hw:${2},0 -Mhw:${1},0,0 xtouch_poscil.orc xtouch_poscil.sco
### jack
# csound -realtime -+rtmidi=alsa -M hw:${1},0,0 -+rtaudio=jack -+jack_client=xtouch -b 1000 -o dac -i adc xtouch_poscil.orc xtouch_poscil.sco
### soundfile
csound -d -realtime -+rtmidi=alsa -M hw:${1},0,0 -+rtaudio=jack -b 1024 -i used_cars.wav -o dac xtouch_poscil.orc xtouch_poscil.sco
|
khirai/xtouch
|
xtouch_poscil.sh
|
Shell
|
gpl-3.0
| 428 |
#! /bin/sh
MP_PID=$(ps -wx -o pid,command | grep MarcoPolo | grep MacOS | awk '{ print $1 }')
echo "Monitoring pid $MP_PID"
SLEEP_INTERVAL=10
last_vsize=-1
while true ; do
rsize=$(ps -p $MP_PID -o rsz | tail -1 | tr -d ' ')
vsize=$(ps -p $MP_PID -o vsize | tail -1 | tr -d ' ')
printf "RSIZE/VSIZE: $rsize/$vsize KB"
if [ $last_vsize -eq -1 ]; then
echo
else
r_gain=$(($rsize - $last_rsize))
r_rate=$(($r_gain / $SLEEP_INTERVAL))
v_gain=$(($vsize - $last_vsize))
v_rate=$(($v_gain / $SLEEP_INTERVAL))
echo " (gain: $r_gain/$v_gain ==> $r_rate/$v_rate KB/s)"
fi
last_rsize=$rsize
last_vsize=$vsize
sleep $SLEEP_INTERVAL
done
|
nriley/MarcoPolo
|
scripts/vsize-monitor.sh
|
Shell
|
gpl-3.0
| 650 |
#!/bin/sh
# Use environment variable to change the prefix,
# by default it's going to be stored into the:
# /opt/imgurDownloader
PREFIX=${PREFIX:-/opt/imgurDownloader}
FILE=imgurDownloader.sh
BATCHFILE=imgurListDownloader.sh
EXAMPLEFILE=imgurSpace.sh
VIEWFILE=imgurView.sh
BUFFERFILE=imgurBufferView.sh
echo
echo -------- Dependencies: compiling and installing jshon -----------
echo
if hash jshon 2>/dev/null; then
echo "Looks 'jshon' is already present, skipping installation"
else
echo "Installing build depedencies (Ubuntu/Debian only):"
sudo apt-get install git libjansson-dev # Getting dependancies of dependancies
echo
echo "Fetching the source"
git clone [email protected]:keenerd/jshon.git # Getting source code
echo
echo "Complining the source"
mkdir jshon > /dev/null 2>&1
cd jshon
make
echo
echo Jshon compiled, trying to install it:
sudo make install
cd ..
rm -rf jshon/ # Removing the jshon source directory
fi
echo
echo ------------------ installing imgurDownloader -------------------
echo
# Making all executable
chmod a+x $FILE $BATCHFILE $EXAMPLEFILE $VIEWFILE $BUFFERFILE batchExample/_runBatchExample.sh
# Copy the file to the target
sudo -s -- "mkdir -p $PREFIX && cp $FILE $PREFIX/$FILE && cp $BATCHFILE $PREFIX/$BATCHFILE && cp $VIEWFILE $PREFIX/$VIEWFILE && cp $BUFFERFILE $PREFIX/$BUFFERFILE"
echo
echo "Creating symbolic link: ~/bin/$FILE"
mkdir ~/bin 1>/dev/null 2>&1
ln -s $PREFIX/$FILE ~/bin/$FILE 1>/dev/null 2>&1
echo "Creating symbolic link: ~/bin/$BATCHFILE"
ln -s $PREFIX/$BATCHFILE ~/bin/$BATCHFILE 1>/dev/null 2>&1
echo "Creating symbolic link: ~/bin/$VIEWFILE"
ln -s $PREFIX/$VIEWFILE ~/bin/$VIEWFILE 1>/dev/null 2>&1
echo "Creating symbolic link: ~/bin/$BUFFERFILE"
ln -s $PREFIX/$BUFFERFILE ~/bin/$BUFFERFILE 1>/dev/null 2>&1
echo "Copying example: ~/bin/$EXAMPLEFILE"
cp $EXAMPLEFILE ~/bin/$EXAMPLEFILE 1>/dev/null 2>&1
echo
echo "Warning!"
echo "If you have not ~/bin in your path these files will not be found anyway, you need to add ~/bin to your PATH enviroment variable"
echo
|
truhlikfredy/imgurDownloader2015
|
install.sh
|
Shell
|
gpl-3.0
| 2,075 |
#!/bin/bash
export LIBVIRT_DEFAULT_URI="qemu:///system"
export VM_TYPES="controller,compute1,compute2,network1"
. $(dirname $0)/include/tmppath.rc
. $(dirname $0)/include/virtpwn.rc
. $(dirname $0)/include/pckstck.sh
function usage()
{
cat << EOF
Usage:
-h|--help
-k|--keep Keep the created VMS, don't stop and delete them.
-b|--packstack-branch <branch> Sets specific packstack branch to clone. Default: master
-g|--packstack-git <url> Sets packstack git url to clone.
Default: https://github.com/stackforge/packstack.git
-m|--opm-branch <branch> Sets specific opm branch to clone. Default: branch set in packstack setup.py
-o|--opm-git <url> Sets opm git url to clone in setup.py. Default: url set in packstack setup.py
-r|--repo <url> Installs repo rpm from specified url. If unspecified no repo is used.
Default: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
-n|--source-vms <vms> Comma separated list of VMs to use for clonning.
All tests run for each source VM.
-d|--deploy <allinone,multi> Comma separated list of deployment types to use. Multi node deployment will do
Controller Network and 2*Compute. Default: allinone
-p|--packstack-options <o=v;.> Semicollon separated packstack config options which will be set to packstack
config file with sed.
-x|--extra-node Adds extra node to multinode deployment, if
you need to pass config option with IP of
this node use 'CONFIG=MAGIC_NODE'.
EOF
}
function echoerr()
{
cat <<< "$@" 1>&2
}
SHORTOPTS="htz6kxb::g::m::o::r::n:d::p:"
LONGOPTS="help,extra-node,packstack-branch::,packstack-git::,opm-branch::,opm-git::,repo::,source-vms:,keep,dns,test,ipv6,deploy::,packstack-options:"
PROGNAME=${0##*/}
ARGS=$(getopt -s bash --options $SHORTOPTS \
--longoptions $LONGOPTS -n $PROGNAME -- "$@" )
# default options
PACKSTACK_BRANCH='master'
PACKSTACK_GIT='https://github.com/stackforge/packstack.git'
KEEP_VMS=false
RALLY=false
TEST=false
USE_DNS=false
DEPLOY='allinone'
export IPV6=false
# options that can be empty
REPO=""
PACKSTACK_OPTIONS=""
OPM_BRANCH=""
OPM_GIT=""
# required options
VMS=""
# eval breaks parsing spaces before optional args
#eval set -- "$ARGS"
echo
while true; do
case "${1}" in
-h|--help)
usage
exit 0
;;
-b|--packstack-branch)
case "${2}" in
""|-*)
echoerr "No branch specified. Using branch ${PACKSTACK_BRANCH}!"
;;
*) PACKSTACK_BRANCH="${2}" ; shift ;;
esac
;;
-g|--packstack-git)
case "${2}" in
""|-*)
echoerr "No packstack git url specified. Using ${PACKSTACK_GIT}!"
;;
*) PACKSTACK_GIT="${2}" ; shift ;;
esac
;;
-m|--opm-branch)
case "${2}" in
""|-*)
echoerr "No branch specified. Using branch from setup.py!"
;;
*) OPM_BRANCH="${2}" ; shift ;;
esac
;;
-o|--opm-git)
case "${2}" in
""|-*)
echoerr "No omp git url specified. Using url from setup.py!"
;;
*) OPM_GIT="${2}" ; shift ;;
esac
;;
-r|--repo)
case "${2}" in
""|-*)
REPO='https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm'
echoerr "No repo rpm url specified. Using ${REPO}!"
;;
*) REPO="${2}" ; shift ;;
esac
;;
-x|--extra-node)
VM_TYPES="${VM_TYPES},magic"
;;
-6|--ipv6)
export IPV6=true
;;
-R|--rally)
export RALLY=true
;;
-t|--test)
export TEST=true
;;
-z|--dns)
export USE_DNS=true
;;
-k|--keep)
export KEEP_VMS=true
;;
-d|--deploy)
case "${2}" in
""|-*)
DEPLOY='allinone'
echoerr "No deployment selected. Using ${REPO}!"
;;
*) DEPLOY="${2}" ; shift ;;
esac
;;
-p|--packstack-options)
PACKSTACK_OPTIONS="$( echo ${2} | sed -E 's/[[:space:]]+/\;/g')"
shift
;;
-n|--source-vms)
VMS="${2}"
shift
;;
--)
break
;;
*)
break
;;
esac
shift
done
prepare_tmp_path || exit 1
if [ "${VMS}" == "" ]
then
echoerr "No source VMs were specified, see README about preparing VMs"
exit 1
else
ALLINONE=false
MULTI=false
for deployment in ${DEPLOY//,/ }; do
if [ "${deployment}" == "allinone" ]; then
ALLINONE=true
elif [ "${deployment}" == "multi" ]; then
MULTI=true
else
echoerr "No such deployment ${deployment} possible."
exit 1
fi
done
if $TEST; then
PACKSTACK_OPTIONS="${PACKSTACK_OPTIONS};CONFIG_PROVISION_TEMPEST=y"
PACKSTACK_OPTIONS="${PACKSTACK_OPTIONS};CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/redhat-openstack/tempest.git"
fi
if $ALLINONE; then
for VM in ${VMS//,/ }; do
prepare_allinone_vms "${VM}" && run_allinone "${VM}" "${PACKSTACK_GIT}" "${PACKSTACK_BRANCH}" "${OPM_GIT}" "${OPM_BRANCH}" "${REPO}" "${PACKSTACK_OPTIONS}" &
done
fi
if $MULTI; then
for VM in ${VMS//,/ }; do
prepare_multinode_vms "${VM}" && run_multinode "${VM}" "${PACKSTACK_GIT}" "${PACKSTACK_BRANCH}" "${OPM_GIT}" "${OPM_BRANCH}" "${REPO}" "${PACKSTACK_OPTIONS}" &
done
fi
wait
if $TEST; then
for VM in ${VMS//,/ }; do
if $MULTI; then
RUN setup_tempest_test "controller-${VM}" && RUN run_tempest_test "controller-${VM}" &
fi
if $ALLINONE; then
RUN setup_tempest_test "allinone-${VM}" && RUN run_tempest_test "allinone-${VM}" &
fi
done
wait
fi
if $RALLY; then
if $MULTI; then
for VM in ${VMS//,/ }; do
if [[ ${VM_TYPES} == *"magic"* ]]; then
RUN run_rally "magic-${VM}" "controller-${VM}" &
else
RUN run_rally "controller-${VM}" "controller-${VM}" &
fi
done
fi
wait
fi
# ensure we have local stored logs
if $ALLINONE; then
for VM in ${VMS//,/ }; do
RUN collect_logs "allinone-${VM}" &
done
fi
if $MULTI; then
for VM in ${VMS//,/ }; do
for vm_type in ${VM_TYPES//,/ }; do
NAME="${vm_type}-${VM}"
RUN collect_logs "${NAME}" &
done
done
fi
wait
if $KEEP_VMS;
then
exit 0
fi
if $ALLINONE; then delete_allinone_vms "${VMS}"; fi
if $MULTI; then delete_multinode_vms "${VMS}"; fi
fi
|
xbezdick/pckstck
|
run_test.sh
|
Shell
|
gpl-3.0
| 6,766 |
#!/bin/bash
# bash-gorillas is a demake of QBasic GORILLAS completely rewritten
# in Bash.
# Copyright (C) 2013 Istvan Szantai <szantaii at sidenote dot hu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (LICENSE).
# If not, see <http://www.gnu.org/licenses/>.
# Prints the name of players to the top left and right corners of the screen
print_player_names()
{
# Position the cursor to the top left corner of the playing field
tput cup ${top_padding_height} ${left_padding_width} >> ${buffer}
# Print player1's name ($player1_name)
printf "${player1_name}" >> ${buffer}
# Position the cursor to the top right corner ot the playing field
tput cup ${top_padding_height} \
$(($((left_padding_width + grid_width)) - ${#player2_name}))>> ${buffer}
# Print player2's name ($player2_name)
printf "${player2_name}" >> ${buffer}
# Refresh the screen
refresh_screen
}
|
szantaii/bash-gorillas
|
print-player-names.sh
|
Shell
|
gpl-3.0
| 1,424 |
#!/usr/bin/env bash
sqlite3 downloads.sqlite 'UPDATE files SET FileStatus=2 WHERE FileStatus = 904'
|
Piskvor/webFetchFace
|
restartAll.sh
|
Shell
|
gpl-3.0
| 100 |
#!/bin/sh
# Drive the dup-clobber program.
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/init.sh"; path_prepend_ ../parted
PATH="..:$PATH"
export PATH
dup-clobber || fail=1
Exit $fail
|
philenotfound/parted
|
tests/t0500-dup-clobber.sh
|
Shell
|
gpl-3.0
| 848 |
#!/bin/bash
#removebadbarcodes in=<infile> out=<outfile>
usage(){
echo "
Written by Brian Bushnell.
Last modified March 16, 2015
Description: Removes reads with improper barcodes.
Usage: removebadbarcodes.sh in=<file> out=<file>
Parameters:
in=<file> Input reads; required parameter.
out=<file> Destination for good reads; optional.
ziplevel=2 (zl) Compression level for gzip output.
pigz=f Spawn a pigz (parallel gzip) process for faster
compression than Java. Requires pigz to be installed.
Java Parameters:
-Xmx This will be passed to Java to set memory usage, overriding the program's automatic memory detection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx800m will specify 800 megs. The max is typically 85% of physical memory.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
NATIVELIBDIR="$DIR""jni/"
z="-Xmx200m"
z2="-Xmx200m"
EA="-ea"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
parseXmx "$@"
}
calcXmx "$@"
removebadbarcodes() {
if [[ $NERSC_HOST == genepool ]]; then
module unload oracle-jdk
module load oracle-jdk/1.8_64bit
module load pigz
fi
local CMD="java $EA $z $z2 -cp $CP jgi.RemoveBadBarcodes $@"
echo $CMD >&2
eval $CMD
}
removebadbarcodes "$@"
|
richrr/scripts
|
bash/bbtools/bbmap/removebadbarcodes.sh
|
Shell
|
gpl-3.0
| 1,684 |
#! /bin/bash
./../basics.sh
|
chGoodchild/setup_scripts
|
setup.sh
|
Shell
|
agpl-3.0
| 29 |
#!/bin/sh
set -e
openssl genrsa -out /certs/${KEY_NAME}.key 4096
if [ -z "${EMAIL_ADDRESS}" ]; then
openssl req -new -nodes -sha256 \
-subj "/C=${COUNTRY}/ST=${STATE}/L=${LOCATION}/O=${ORGANISATION}/OU=${ORGANISATION_UNIT}/CN=${COMMON_NAME}" \
-key /certs/${KEY_NAME}.key \
-out /certs/${KEY_NAME}.csr
else
openssl req -new -nodes -sha256 \
-subj "/C=${COUNTRY}/ST=${STATE}/L=${LOCATION}/O=${ORGANISATION}/OU=${ORGANISATION_UNIT}/CN=${COMMON_NAME}/emailAddress=${EMAIL_ADDRESS}" \
-key /certs/${KEY_NAME}.key \
-out /certs/${KEY_NAME}.csr
fi
if [ -z "${CA_NAME}" ]; then
openssl x509 -req -days ${DAYS} -set_serial 01 \
-signkey /certs/${KEY_NAME}.key \
-in /certs/${KEY_NAME}.csr \
-out /certs/${KEY_NAME}.pem
else
openssl x509 -req -days ${DAYS} -set_serial 01 \
-CA /certs/${CA_NAME}.pem -CAkey /certs/${CA_NAME}.key \
-in /certs/${KEY_NAME}.csr \
-out /certs/${KEY_NAME}.pem
fi
|
JensPiegsa/openssl
|
entry.sh
|
Shell
|
agpl-3.0
| 966 |
#!/bin/sh
MAJOR_HOME="lib/major"
ant clean compile
$MAJOR_HOME/bin/ant coverage
|
jaredkaczynski/520FinalPython
|
coverage.sh
|
Shell
|
agpl-3.0
| 82 |
#!/bin/sh -e
lookup() {
awk "/^$1=/ { split(\$0,s,\"=\"); print s[2]; exit }" $VF
}
VF=../../VERSION
PKGNAME="`lookup PKGNAME`"
VERSION="`lookup VERSION`"
MAINTAINER="`lookup MAINTAINER`"
ARCH="`lookup ARCH`"
LONGNAME="`lookup FULLNAME`"
SHORTNAME="`lookup PROJNAME`"
WORKDIR=workdir
DEB="${PKGNAME}_${VERSION}_${ARCH}.deb"
if [ "x$1" = "x-k" ]; then
KEEPDIR=1
shift
else
KEEPDIR=0
fi
rm -rf $WORKDIR
mkdir -p $WORKDIR
mkdir $WORKDIR/DEBIAN
for F in control/*; do
sed -e "s/@PKGNAME@/$PKGNAME/g" \
-e "s/@VERSION@/$VERSION/g" \
-e "s/@MAINTAINER@/$MAINTAINER/g" \
-e "s/@ARCH@/$ARCH/g" \
-e "s/@LONGNAME@/$LONGNAME/g" \
-e "s/@SHORTNAME@/$SHORTNAME/g" \
$F > $WORKDIR/DEBIAN/`basename $F`
done
chmod +x $WORKDIR/DEBIAN/postinst
chmod +x $WORKDIR/DEBIAN/prerm
cat filelist | while read T F X; do
case $T in
d) mkdir $WORKDIR/$F;;
f) cp -p files/$F $WORKDIR/$F
if [ -x files/$F ]; then
chmod 755 $WORKDIR/$F
fi;;
F)
sed -e "s/@PKGNAME@/$PKGNAME/g" \
-e "s/@VERSION@/$VERSION/g" \
-e "s/@MAINTAINER@/$MAINTAINER/g" \
-e "s/@ARCH@/$ARCH/g" \
-e "s/@LONGNAME@/$LONGNAME/g" \
-e "s/@SHORTNAME@/$SHORTNAME/g" \
files/$F > $WORKDIR/$F
if [ -x files/$F ]; then
chmod 755 $WORKDIR/$F
fi;;
l) ln -s $X $WORKDIR/$F;;
\#*) # skip
;;
esac
done
( cd $WORKDIR; find . -name DEBIAN -prune -o -type f -printf '%P ' | xargs md5sum > DEBIAN/md5sums )
fakeroot dpkg-deb -b $WORKDIR .
test "$KARMALBKEY" && dpkg-sig -k $KARMALBKEY --sign builder $DEB
test $KEEPDIR -eq 1 || rm -rf $WORKDIR
|
sgoldthorpe/karmalb
|
src/karmalb-pkg/make_package.sh
|
Shell
|
lgpl-2.1
| 1,561 |
#!/bin/bash
java -Xmx3000M -cp .:BiNoM_all.jar fr.curie.BiNoM.pathways.utils.acsn.ACSNProcedures --scalepng --png1 rcd_master-1.png --pngout rcd_master-0.png
|
sysbio-curie/NaviCell
|
map_construction_procedures/procedure_scripts/scale_1_to_0.sh
|
Shell
|
lgpl-3.0
| 158 |
pt_testcase_begin
using_measure
pt_add_fifo "$main_fifo_file"
pt_add_fifo "$wakeup_fifo_file"
pt_write_widget_file <<__EOF__
f = assert(io.open('$main_fifo_file', 'w'))
f:setvbuf('line')
f:write('init\n')
$preface
widget = {
plugin = '$PT_BUILD_DIR/plugins/fs/plugin-fs.so',
opts = {
paths = {'/'},
period = 0.5,
fifo = '$wakeup_fifo_file',
},
cb = function(t)
_validate_t(t, {'/'})
f:write('cb called\n')
end,
}
__EOF__
pt_spawn_luastatus
exec {pfd}<"$main_fifo_file"
pt_expect_line 'init' <&$pfd
measure_start
pt_expect_line 'cb called' <&$pfd
measure_check_ms 0
pt_expect_line 'cb called' <&$pfd
measure_check_ms 500
pt_expect_line 'cb called' <&$pfd
measure_check_ms 500
touch "$wakeup_fifo_file"
pt_expect_line 'cb called' <&$pfd
measure_check_ms 0
pt_expect_line 'cb called' <&$pfd
measure_check_ms 500
pt_close_fd "$pfd"
pt_testcase_end
|
shdown/luastatus
|
tests/pt_tests/plugin-fs/06-wakeup-fifo.lib.bash
|
Shell
|
lgpl-3.0
| 904 |
#!/bin/sh
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK rest-python.
#
# REDHAWK rest-python is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK rest-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Absolute path to this script, e.g. /home/user/bin/foo.sh
SCRIPT=$(readlink -f "$0")
# Absolute path this script is in, thus /home/user/bin
SCRIPTPATH=$(dirname "$SCRIPT")
pyvenv="${SCRIPTPATH}/pyvenv"
if [ ! -f ${pyvenv} ]; then
pyvenv=/var/redhawk/web/bin/pyvenv
fi
source /etc/profile.d/redhawk.sh
exec ${pyvenv} python ${SCRIPTPATH}/pyrest.py $@
|
RedhawkSDR/rest-python
|
start.sh
|
Shell
|
lgpl-3.0
| 1,205 |
#!/bin/sh
#M3TIOR 2016
i3DIR="~/.i3";
SETTINGSDIR="$i3DIR/settings.d";
# So starting today, I'm trying to unify the heap of small things into a larger
# file that will keep our ram and disk usage to a minimum. Starting a bunch of
# sepperate shell sessions is going to get extremely redundant evenutally. So
# this is the file where all of our code is going to go.
#debug redirection unit.
log(){
echo "$(date):$*"
}
#get and set settings from file by name.
import(){
echo $(cat $SETTINGSDIR/$1);
}
export(){
echo $2 > $SETTINGSDIR/$1;
}
gender(){
#generates defaults if they don't exist
if ! [ -e $SETTINGSDIR/$1 ]; then
echo "$2" > $SETTINGSDIR/$1;
fi
}
# I tried to do a thing and failed (;-;)
# don't wanna waste time to commit it...
# shame, it was a 100 line blob of oop junk. XP
#controllers:
lcontrol(){
#backlight controller#
local v=$(import backlight)
case "$*" in
"up")
local v=$(( $v + 10 ));
if [ $v -gt 100 ]; then
local v=100;
fi
xbacklight -set $v;
log "Backlight dimmed; @$v";
;;
"down")
local v=$(( $v - 10 ));
if [ $v -lt 0 ]; then
local v=0;
fi
xbacklight -set $v;
log "Backlight dimmed; @$v";
;;
esac
export backlight $v;
}
vcontrol(){
#volume controller#
local v=$(import volume);
case $* in
"up")
local v=$(($v + 5));
if [ $v -gt 100 ]; then
local v=100;
fi
pactl set-sink-volume 0 "$v%";
log "Volume raised; @$v";
;;
"down")
local v=$(($v - 5));
if [ $v -lt 0 ]; then
local v=0;
fi
pactl set-sink-volume 0 "$v%";
log "Volume decreased; @$v";
;;
"mute")
local v=0;
pactl set-sink-volume 0 "$v%";
log "Volume muted; @$v";
;;
esac
export volume $v
}
gender backlight 100
gender volume 50
gender debug.redirect "/var/log/i3/debug.log"
for arg in $*; do
case $arg in
volume)
shift;
vcontrol $1;
;;
backlight)
shift;
lcontrol $1;
;;
*)
log "Bad argument; @$arg";
;;
done;
#exit
|
M3TIOR/MY_i3-wm_CONFIG
|
files/control.sh
|
Shell
|
unlicense
| 2,595 |
# function _tmux_completions() {
# local -a sessions
# sessions=($(tmux-ls))
# compadd -a sessions
# }
#
# compdef _tmux_completions tss
|
huynle/dotfiles
|
.zsh/completion/tmux.zsh
|
Shell
|
unlicense
| 143 |
#!/bin/bash
HDIMGPATH=`realpath hd.img`
if ! losetup -nl | grep -q "$HDIMGPATH"; then
echo "mounting"
losetup -Pf --show "$HDIMGPATH"
fi
LOOPDEV=$(losetup -nl | grep "$HDIMGPATH" | cut -d ' ' -f 1)
sudo mount -t vfat -o uid=7777 "$LOOPDEV""p1" ./hd
|
Sembiance/xutil
|
legacy/dos/mountHD.sh
|
Shell
|
unlicense
| 255 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
BASE_DIR=$(pwd)
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
source ${SCRIPTDIR}/shared_utilities.sh
if [[ -z "${GRADLE_TASK}" ]]; then
echo "GRADLE_TASK must be set. exiting..."
exit 1
fi
ROOT_DIR=$(pwd)
BUILD_DATE=$(date +%s)
# Precheckin does not get a geode-build-version
if [ -e "${ROOT_DIR}/geode-build-version" ] ; then
GEODE_BUILD_VERSION_FILE=${ROOT_DIR}/geode-build-version/number
GEODE_BUILD_DIR=/tmp/geode-build
GEODE_PULL_REQUEST_ID_FILE=${ROOT_DIR}/geode/.git/resource/version.json
if [ -e "${GEODE_PULL_REQUEST_ID_FILE}" ]; then
GEODE_PULL_REQUEST_ID=$(cat ${GEODE_PULL_REQUEST_ID_FILE} | jq --raw-output '.["pr"]')
FULL_PRODUCT_VERSION="geode-pr-${GEODE_PULL_REQUEST_ID}"
else
CONCOURSE_VERSION=$(cat ${GEODE_BUILD_VERSION_FILE})
echo "Concourse VERSION is ${CONCOURSE_VERSION}"
# Rebuild version, zero-padded
FULL_PRODUCT_VERSION=$(get-full-version ${CONCOURSE_VERSION})
BUILD_ID=$(get-geode-build-id-padded ${CONCOURSE_VERSION} 2> /dev/null)
fi
fi
if [[ ${PARALLEL_GRADLE:-"true"} == "true" ]]; then
PARALLEL_GRADLE="--parallel"
else
PARALLEL_GRADLE=""
fi
DEFAULT_GRADLE_TASK_OPTIONS="${PARALLEL_GRADLE} --console=plain --no-daemon"
GRADLE_SKIP_TASK_OPTIONS=""
SSHKEY_FILE="instance-data/sshkey"
SSH_OPTIONS="-i ${SSHKEY_FILE} -o ConnectionAttempts=60 -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=5"
INSTANCE_IP_ADDRESS="$(cat instance-data/instance-ip-address)"
scp ${SSH_OPTIONS} ${SCRIPTDIR}/capture-call-stacks.sh geode@${INSTANCE_IP_ADDRESS}:.
if [[ -n "${PARALLEL_DUNIT}" && "${PARALLEL_DUNIT}" == "true" ]]; then
PARALLEL_DUNIT="-PparallelDunit -PdunitDockerUser=geode"
if [ -n "${DUNIT_PARALLEL_FORKS}" ]; then
DUNIT_PARALLEL_FORKS="-PdunitParallelForks=${DUNIT_PARALLEL_FORKS}"
fi
else
PARALLEL_DUNIT=""
DUNIT_PARALLEL_FORKS=""
fi
SET_JAVA_HOME="export JAVA_HOME=/usr/lib/jvm/java-${JAVA_BUILD_VERSION}-openjdk-amd64"
if [ -v CALL_STACK_TIMEOUT ]; then
ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "${SET_JAVA_HOME} && tmux new-session -d -s callstacks; tmux send-keys ~/capture-call-stacks.sh\ ${PARALLEL_DUNIT}\ ${CALL_STACK_TIMEOUT} C-m"
fi
if [ -z "${FULL_PRODUCT_VERSION}" ] ; then
FULL_PRODUCT_VERSION="0.0.0-UndefinedVersion"
fi
GRADLE_ARGS="\
${DEFAULT_GRADLE_TASK_OPTIONS} \
${GRADLE_SKIP_TASK_OPTIONS} \
${GRADLE_GLOBAL_ARGS} \
-Pversion=${FULL_PRODUCT_VERSION} \
-PbuildId=${BUILD_ID} \
build install javadoc spotlessCheck rat checkPom resolveDependencies pmdMain -x test"
EXEC_COMMAND="mkdir -p tmp \
&& cp geode/ci/scripts/attach_sha_to_branch.sh /tmp/ \
&& /tmp/attach_sha_to_branch.sh geode ${BUILD_BRANCH} \
&& cd geode \
&& ${SET_JAVA_HOME} \
&& ./gradlew ${GRADLE_ARGS}"
echo "${EXEC_COMMAND}"
ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "${EXEC_COMMAND}"
|
davebarnes97/geode
|
ci/scripts/execute_build.sh
|
Shell
|
apache-2.0
| 4,050 |
#!/usr/bin/env bash
# Generate md5sum compatible output for a complete delivery in DOMS.
# fail if any command fails - this is to avoid checking the return code of any curl call
set -e
VERBOSE=
if [ "$1" == "-v" ]
then
VERBOSE=$1
shift
fi
if [ -z $1 ]
then
echo "usage: [P=\"-u user:pass\"] $0 [-v] uuid:...dpa_roundtrip..."
exit 1
fi
# ----------------------------------------------------------------------
# xmlstarlet and curl must be on PATH
CURL=curl
XMLSTARLET=xmlstarlet
# xmlstarlet namespace declarations
N="-N rdf=http://www.w3.org/1999/02/22-rdf-syntax-ns# -N fedora=info:fedora/fedora-system:def/relations-external# -N dc=http://purl.org/dc/elements/1.1/"
ROUNDTRIP_UUID=$1
# curl -s -u fedoraAdmin:fedoraAdminPass 'http://localhost:7880/fedora/objects/uuid:627ef2a0-88d4-4423-8b15-6f37dc522e29/datastreams/RELS-EXT/content'
# curl -s $P "$O/$UUID/datastreams/RELS-EXT/content"
O=${O:-http://${FEDORA_HOST:-localhost}:7880/fedora/objects}
# For non-local servers use fedoraReadOnlyAdmin - find password in /home/doms/services/fedora/server/config/fedora-users.xml
# xmlstarlet sel -t -m '//user[@name="fedoraReadOnlyAdmin"]' -v '@password' -n /tmp/fedora-users.xml
P=${P:--u ${FEDORA_USER:-fedoraAdmin}:${FEDORA_PASSWORD:-fedoraAdminPass}}
# ----------------------------------------------------------------------
# http://localhost:7880/fedora/objects/uuid:a8072382-20db-4d48-aa3a-5ad1392a242f/datastreams/DC/content
ROUNDTRIP_DC=$($CURL -s $P "$O/$ROUNDTRIP_UUID/datastreams/DC/content")
ROUNDTRIP_CURL_EXIT_CODE=$?
if [ $ROUNDTRIP_CURL_EXIT_CODE -gt 0 ]
then
echo "curl error: $ROUNDTRIP_CURL_EXIT_CODE (No DOMS there?)"
exit 1
fi
# <oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
# <dc:title>DPA Roundtrip</dc:title>
# <dc:identifier>uuid:a8072382-20db-4d48-aa3a-5ad1392a242f</dc:identifier>
# <dc:identifier>path:dl_20180114_rt1</dc:identifier>
# </oai_dc:dc>
if [ "DPA Roundtrip" != "$(echo $ROUNDTRIP_DC | $XMLSTARLET sel $N -t -m '//dc:title' -v '.')" ]
then
echo "Not DPA Roundtrip."
exit 1
fi
# echo "Original path: $(echo $ROUNDTRIP_DC | $XMLSTARLET sel -t -m '//dc:identifier[starts-with(text(), "path:")]' -v '.')"
# http://localhost:7880/fedora/objects/uuid:a8072382-20db-4d48-aa3a-5ad1392a242f/datastreams/RELS-EXT/content
ROUNDTRIP_RELSEXT=$($CURL -s $P "$O/$ROUNDTRIP_UUID/datastreams/RELS-EXT/content")
# <?xml version="1.0"?>
# <rdf:RDF xmlns:doms="http://doms.statsbiblioteket.dk/relations/default/0/1/#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
# <rdf:Description rdf:about="info:fedora/uuid:a8072382-20db-4d48-aa3a-5ad1392a242f">
# <hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/doms:ContentModel_DPARoundTrip"/>
# <hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/doms:ContentModel_Item"/>
# <hasModel xmlns="info:fedora/fedora-system:def/model#" rdf:resource="info:fedora/doms:ContentModel_DOMS"/>
# <doms:isPartOfCollection rdf:resource="info:fedora/doms:DPA_Collection"/>
# <hasPart xmlns="info:fedora/fedora-system:def/relations-external#" rdf:resource="info:fedora/uuid:4170dbda-f215-495f-937d-52977b87fc01"/>
# <hasPart xmlns="info:fedora/fedora-system:def/relations-external#" rdf:resource="info:fedora/uuid:23a76588-97d1-4b31-a6f2-2166f0790694"/>
# ...
# </rdf:Description>
# </rdf:RDF>
PAPER_UUID_SEEN=no
for PAPER_UUID in $(echo $ROUNDTRIP_RELSEXT | $XMLSTARLET sel $N -t -m '//fedora:hasPart[starts-with(@rdf:resource, "info:fedora/")] ' -v 'substring-after(@rdf:resource, "info:fedora/")' -n); do
PAPER_UUID_SEEN=yes
# uuid:4170dbda-f215-495f-937d-52977b87fc01
PAPER_RELSEXT=$($CURL -s $P "$O/$PAPER_UUID/datastreams/RELS-EXT/content")
# <rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
# <rdf:Description rdf:about="info:fedora/uuid:4170dbda-f215-495f-937d-52977b87fc01">
# <isPartOfCollection xmlns="http://doms.statsbiblioteket.dk/relations/default/0/1/#" rdf:resource="info:fedora/doms_sboi_dpaCollection"/>
# <hasPart xmlns="info:fedora/fedora-system:def/relations-external#" rdf:resource="info:fedora/uuid:7bd4fb56-f268-426d-bab1-cdd8aefbde7f"/>
# <hasPart xmlns="info:fedora/fedora-system:def/relations-external#" rdf:resource="info:fedora/uuid:6131049f-21fd-467a-9c34-21b231e29940"/>
# </rdf:Description>
# </rdf:RDF>
for PAPER_PART_UUID in $(echo $PAPER_RELSEXT | $XMLSTARLET sel $N -t -m '//fedora:hasPart[starts-with(@rdf:resource, "info:fedora/")] ' -v 'substring-after(@rdf:resource, "info:fedora/")' -n); do
# articles and pages
PAPER_PART_DC=$($CURL -s $P "$O/$PAPER_PART_UUID/datastreams/DC/content")
# path:dl_20180114_rt1/aarhusstiftstidende/pages
PAPER_PART_IDENTIFIER=$(echo $PAPER_PART_DC | $XMLSTARLET sel $N -t -m '//dc:identifier[starts-with(text(), "path:")]' -v 'text()')
case $PAPER_PART_IDENTIFIER in
*pages)
# print out md5sum entry for all PDF files
if [ -n "$VERBOSE" ]
then
echo "$PAPER_PART_IDENTIFIER $PAPER_PART_UUID"
fi
PAGES_RELSEXT=$($CURL -s $P "$O/$PAPER_PART_UUID/datastreams/RELS-EXT/content")
for PAGE_UUID in $(echo $PAGES_RELSEXT | $XMLSTARLET sel $N -t -m '//fedora:hasPart[starts-with(@rdf:resource, "info:fedora/")] ' -v 'substring-after(@rdf:resource, "info:fedora/")' -n); do
# Each page node has XML datastream and a subnode to put metadata on the PDF file.
PAGE_XML_FILENAME=$($CURL -s $P "$O/$PAGE_UUID/datastreams/DC/content" | $XMLSTARLET sel -t -m '//dc:identifier[starts-with(text(), "path:")]' -v '.' | sed 's![^/]*/!!' )
$CURL -s $P "$O/$PAGE_UUID/datastreams/XML/content" | md5sum | awk "{print \$1 \" ${PAGE_XML_FILENAME}.xml\"}"
PAGE_RELSEXT=$($CURL -s $P "$O/$PAGE_UUID/datastreams/RELS-EXT/content")
for PAGE_CONTENT_UUID in $(echo $PAGE_RELSEXT | $XMLSTARLET sel $N -t -m '//fedora:hasPart[starts-with(@rdf:resource, "info:fedora/")] ' -v 'substring-after(@rdf:resource, "info:fedora/")' -n); do
# We now look at the node for the individual PDF file.
# Extract the path name from DC and strip the "path:dl_XXXX_rtY/" prefix.
PAGE_CONTENT_FILENAME=$($CURL -s $P "$O/$PAGE_CONTENT_UUID/datastreams/DC/content" | $XMLSTARLET sel -t -m '//dc:identifier[starts-with(text(), "path:")]' -v '.' | sed 's![^/]*/!!' )
# Get redirect target as returned by Fedora - https://unix.stackexchange.com/a/157219/4869
REAL_URL=$($CURL -w "%{url_effective}\n" -I -L -S -s $P "$O/$PAGE_CONTENT_UUID/datastreams/CONTENTS/content" -o /dev/null)
# and md5sum the content.
curl -s $REAL_URL | md5sum | awk "{print \$1 \" $PAGE_CONTENT_FILENAME\"}"
done
done
;;
*articles)
# print out md5sum entry for XML file. We assume an ".xml" suffix.
if [ -n "$VERBOSE" ]
then
echo "$PAPER_PART_IDENTIFIER $PAPER_PART_UUID"
fi
ARTICLES_RELSEXT=$($CURL -s $P "$O/$PAPER_PART_UUID/datastreams/RELS-EXT/content")
for ARTICLE_UUID in $(echo $ARTICLES_RELSEXT | $XMLSTARLET sel $N -t -m '//fedora:hasPart[starts-with(@rdf:resource, "info:fedora/")] ' -v 'substring-after(@rdf:resource, "info:fedora/")' -n); do
XML_CONTENT_FILENAME=$($CURL -s $P "$O/$ARTICLE_UUID/datastreams/DC/content" | $XMLSTARLET sel -t -m '//dc:identifier[starts-with(text(), "path:")]' -v '.' | sed 's![^/]*/!!' )
# Article XML is in the "XML" datastream of the node.
$CURL -s $P "$O/$ARTICLE_UUID/datastreams/XML/content" | md5sum | awk "{print \$1 \" ${XML_CONTENT_FILENAME}.xml\"}"
done
;;
esac
done
done
if [ "$PAPER_UUID_SEEN" = "no" ]
then
echo "No newspapers stored for delivery round trip"
fi
|
statsbiblioteket/digital-pligtaflevering-aviser-tools
|
bin/checksums-for-delivery-in-doms.sh
|
Shell
|
apache-2.0
| 8,279 |
#!/usr/bin/env bash
set -e -x
source bosh-cpi-release/ci/tasks/utils.sh
check_param base_os
manifest_dir=bosh-concourse-ci/pipelines/bosh-aws-cpi
echo "checking in BOSH deployment state"
cd deploy/${manifest_dir}
git add ${base_os}-director-manifest-state.json
git config --global user.email "[email protected]"
git config --global user.name "bosh-ci"
git commit -m ":airplane: Concourse auto-updating deployment state for bats pipeline, on ${base_os}"
|
ilackarms/bosh-aws-cpi-release
|
ci/tasks/save-deployment.sh
|
Shell
|
apache-2.0
| 471 |
#!/bin/bash
#
# This function checks for one or more devices connected via ADB.
# It will return the device's serial number.
# If no devices are connected, it will return '0'.
# If more than one device is connected, it will prompt the user to select one.
# In that case, it will return the selected device, or '0' if they didn't select any device.
#
# USAGE:
# Call the function as follows:
#
# selectedDevice MYVAL
#
# The device's serial number will be stored in the MYVAL variable.
#
function selectDevice() {
# Run adb devices once, in event adb hasn't been started yet
BLAH=$(adb devices)
# Grab the IDs of all the connected devices / emulators
IDS=($(adb devices | sed '1,1d' | sed '$d' | cut -f 1 | sort))
NUMIDS=${#IDS[@]}
# Check for number of connected devices / emulators
if [[ 0 -eq "$NUMIDS" ]]; then
# No IDs, return 0
eval "$1='0'"
elif [[ 1 -eq "$NUMIDS" ]]; then
# Only one device, return its ID
eval "$1='${IDS[0]}'"
else
# There are multiple devices, need to get information then prompt user for which device/emulator to uninstall from
# Grab the model name for each device / emulator
declare -a MODEL_NAMES
for (( x=0; x < $NUMIDS; x++ )); do
MODEL_NAMES[x]=$(adb devices | grep ${IDS[$x]} | cut -f 1 | xargs -I $ adb -s $ shell cat /system/build.prop 2> /dev/null | grep "ro.product.model" | cut -d "=" -f 2 | tr -d ' \r\t\n')
done
# Grab the platform version for each device / emulator
declare -a PLATFORM_VERSIONS
for (( x=0; x < $NUMIDS; x++ )); do
PLATFORM_VERSIONS[x]=$(adb devices | grep ${IDS[$x]} | cut -f 1 | xargs -I $ adb -s $ shell cat /system/build.prop 2> /dev/null | grep "ro.build.version.release" | cut -d "=" -f 2 | tr -d ' \r\t\n')
done
# Prompting user to select a device
echo "Multiple devices detected, please select one"
for (( x=0; x < $NUMIDS; x++ )); do
echo -e "$[x+1]: ${IDS[x]}\t\t${PLATFORM_VERSIONS[x]}\t\t${MODEL_NAMES[x]}"
done
echo -n "> "
read USER_CHOICE
# Validate user entered a number and return appropriate serial number
if [[ $USER_CHOICE =~ ^[0-9]+$ ]]; then
if [[ $USER_CHOICE -gt $NUMIDS ]]; then
eval "$1='0'"
else
eval "$1='${IDS[$USER_CHOICE-1]}'"
fi
else
eval "$1='0'"
fi
fi
}
|
mitsnosrap/shell-scripts
|
adbwrapper-func.sh
|
Shell
|
apache-2.0
| 2,324 |
#!/bin/bash
path_abs() {
[[ $1 = /* ]] && eval "$2='$1'" || eval "$2='$PWD/${1#./}'"
}
if [ -z $BASE_DIR ]; then
path_abs $0 PATH_ABS
BASE_DIR=$(dirname $PATH_ABS)
fi
if [[ $(uname) = CYGWIN* ]]; then
NODE=node.exe
if [ -d "C:\\cygwin" ]; then
BASE_DIR=C:\\cygwin/$BASE_DIR
else
BASE_DIR=C:\\cygwin64/$BASE_DIR
fi
elif [[ $(uname) = Darwin* ]]; then
NODE=$BASE_DIR/../node/bin/node
else
NODE=/usr/local/bin/node
fi
LOG_DIR="$BASE_DIR/log"
PID_FILE="$BASE_DIR/.driver.pid"
LOCK_FILE="$BASE_DIR/thingplus_device.lock"
if [ ! -e $LOCK_FILE ]; then
trap "rm -f $LOCK_FILE; exit" INT TERM EXIT
touch $LOCK_FILE
else
echo $0 is already running, remove "$LOCK_FILE" to exceute.
exit 1;
fi
if [ ! -d $LOG_DIR ] ; then
mkdir -p $LOG_DIR
fi
check_running() {
if [ -f $PID_FILE ] ; then
PID=`cat $PID_FILE`
ps -p $PID > /dev/null 2>&1
return $?
fi
return 1;
}
start() {
#recover rsync if needed
if check_running ; then
echo "already running"
else
echo "starting... wait 5 sec"
cd $APP_DIR
$NODE $BASE_DIR/app.js 2>&1 >> $LOG_DIR/thingplus_device.log &
echo $! > $PID_FILE;
sleep 5
fi
}
stop() {
sync
if [[ $(uname) == CYGWIN* ]]; then
taskkill /F /PID `cat $PID_FILE`
else
pkill -F $PID_FILE 2> /dev/null;
fi
rm -f $PID_FILE;
}
case "$1" in
status)
if check_running; then
echo "running"
else
echo "stopped"
fi
;;
start)
start
;;
stop)
stop
;;
restart)
stop
sleep 5;
start
;;
setup)
#setup only
;;
*)
echo "Usage: $0 {start|stop|restart|setup}"
rm -f $LOCK_FILE
exit 1
esac
rm -f $LOCK_FILE
|
daliworks/openhardware
|
arduino/grove-starter-kit/thingplus_device.sh
|
Shell
|
apache-2.0
| 1,695 |
#!/bin/bash
set -eu
# Copyright 2017-Present Pivotal Software, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
desired_version=$(jq --raw-output '.Release.Version' < ./pivnet-product/metadata.json)
AVAILABLE=$(om-linux \
--skip-ssl-validation \
--username "${OPSMAN_USERNAME}" \
--password "${OPSMAN_PASSWORD}" \
--target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \
curl -path /api/v0/available_products)
STAGED=$(om-linux \
--skip-ssl-validation \
--username "${OPSMAN_USERNAME}" \
--password "${OPSMAN_PASSWORD}" \
--target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \
curl -path /api/v0/staged/products)
# Figure out which products are unstaged.
UNSTAGED_ALL=$(jq -n --argjson available "$AVAILABLE" --argjson staged "$STAGED" \
'$available - ($staged | map({"name": .type, "product_version": .product_version}))')
UNSTAGED_PRODUCT=$(
jq -n "$UNSTAGED_ALL" \
"map(select(.name == \"$PRODUCT_NAME\")) | map(select(.product_version|startswith(\"$desired_version\")))"
)
# There should be only one such unstaged product.
if [ "$(echo $UNSTAGED_PRODUCT | jq '. | length')" -ne "1" ]; then
echo "Need exactly one unstaged build for $PRODUCT_NAME version $desired_version"
jq -n "$UNSTAGED_PRODUCT"
exit 1
fi
full_version=$(echo "$UNSTAGED_PRODUCT" | jq -r '.[].product_version')
om-linux --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \
--skip-ssl-validation \
--username "${OPSMAN_USERNAME}" \
--password "${OPSMAN_PASSWORD}" \
stage-product \
--product-name "${PRODUCT_NAME}" \
--product-version "${full_version}"
|
BrianMMcClain/pcf-pipelines
|
tasks/stage-product/task.sh
|
Shell
|
apache-2.0
| 2,092 |
#!/bin/bash
mkdir -p /data/mysql/
cd services/mysql
PASSWORD=$(date | md5sum | awk '{print $1}')
sed -i "s/INSERT_PASSWORD/$PASSWORD/g" docker-compose.yml
docker-compose up -d
|
Romke-vd-Meulen/Beethoven
|
services/mysql/install.sh
|
Shell
|
apache-2.0
| 178 |
#!/bin/bash
UPLOAD_RUNTIME=false
doUpload() {
if [ ${UPLOAD_RUNTIME} = "true" ];
then uploadRuntime
else uploadExceptRuntime
fi
}
upload() {
rm settings.gradle
cp settings.gradle.before settings.gradle
./gradlew clean assemble
./gradlew :core:uploadArchives
./gradlew :views:uploadArchives
./gradlew :blockcanary:uploadArchives
./gradlew :components:uploadArchives
cleanGradleCache
rm settings.gradle
cp settings.gradle.ready settings.gradle
./gradlew clean
./gradlew :runtime:uploadArchives
./gradlew :noop-java:uploadArchives
./gradlew :noop-kotlin:uploadArchives
}
uploadExceptRuntime() {
rm settings.gradle
cp settings.gradle.before settings.gradle
./gradlew clean assemble
./gradlew uploadArchives
}
uploadRuntime() {
rm settings.gradle
cp settings.gradle.ready settings.gradle
./gradlew :runtime:clean
./gradlew :runtime:uploadArchives
}
uploadPrimary() {
git checkout 1.1.x -f
doUpload
}
upload101_23() {
git checkout v23/1.1.x -f
doUpload
}
upload101_22() {
git checkout v22/1.1.x -f
doUpload
}
cleanGradleCache() {
rm -rf ~/.gradle/caches/modules-2/files-2.1/com.exyui.android
}
upload101_22
upload101_23
uploadPrimary
git checkout 1.1.x
rm settings.gradle
cp settings.gradle.before settings.gradle
|
kiruto/debug-bottle
|
upload_to_mvn.sh
|
Shell
|
apache-2.0
| 1,340 |
#!/bin/bash
# Set the output directory
outdir=${1:-gh-pages}
################################################################################
# Define list of services
################################################################################
services=(
AssistantV1
AssistantV2
CompareComplyV1
DiscoveryV1
DiscoveryV2
LanguageTranslatorV3
NaturalLanguageClassifierV1
NaturalLanguageUnderstandingV1
PersonalityInsightsV3
SpeechToTextV1
TextToSpeechV1
ToneAnalyzerV3
VisualRecognitionV3
VisualRecognitionV4
)
################################################################################
# Change directory to repository root
################################################################################
pushd `dirname $0` > /dev/null
root=`pwd`
popd > /dev/null
cd $root
cd ..
################################################################################
# Create folder for generated documentation
################################################################################
mkdir -p ${outdir}/services
################################################################################
# Run Jazzy to generate documentation
################################################################################
for service in ${services[@]}; do
mkdir ${outdir}/services/${service}
jazzy --config "Scripts/jazzy-config/${service}.jazzy.yaml"
done
################################################################################
# Generate index.html and copy supporting files
################################################################################
(
version=$(git describe --tags)
cat Scripts/generate-documentation-resources/index-prefix | sed "s/SDK_VERSION/$version/"
for service in ${services[@]}; do
echo "<li><a target="_blank" href="./services/${service}/index.html">${service}</a></li>"
done
echo -e " </section>\n </section>"
sed -n '/<section id="footer">/,/<\/section>/p' ${outdir}/services/${services[0]}/index.html
cat Scripts/generate-documentation-resources/index-postfix
) > ${outdir}/index.html
cp -r Scripts/generate-documentation-resources/* ${outdir}
rm ${outdir}/index-prefix ${outdir}/index-postfix
################################################################################
# Collect undocumented.json files
################################################################################
declare -a undocumenteds
undocumenteds=($(ls -r ${outdir}/services/*/undocumented.json))
(
echo "["
if [ ${#undocumenteds[@]} -gt 0 ]; then
echo -e -n "\t"
cat "${undocumenteds[0]}"
unset undocumenteds[0]
for f in "${undocumenteds[@]}"; do
echo ","
echo -e -n "\t"
cat "$f"
done
fi
echo -e "\n]"
) > ${outdir}/undocumented.json
|
watson-developer-cloud/ios-sdk
|
Scripts/generate-documentation.sh
|
Shell
|
apache-2.0
| 2,790 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.