code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
echo "+--------------------------+"
echo "| Install Framework: Nails |"
echo "+--------------------------+"
# --------------------------------------------------------------------------
if ! [ -x "$(command -v nails)" ]; then
echo "... installing Nails Command Line Tool"
composer global require "nails/command-line-tool"
fi
echo "... installing Nails"
nails new --dir="www" --no-docker
|
nailsapp/site-main
|
docker/webserver/apache-nails-php72/templates/install-framework.sh
|
Shell
|
mit
| 418 |
function __git_prompt() {
local g="$(git rev-parse --git-dir 2>/dev/null)"
if [ -n "$g" ]; then
local r
local b
if [ -d "$g/../.dotest" ]
then
if test -f "$g/../.dotest/rebasing"
then
r="|REBASE"
elif test -f "$g/../.dotest/applying"
then
r="|AM"
else
r="|AM/REBASE"
fi
b="$(git symbolic-ref HEAD 2>/dev/null)"
elif [ -f "$g/.dotest-merge/interactive" ]
then
r="|REBASE-i"
b="$(cat "$g/.dotest-merge/head-name")"
elif [ -d "$g/.dotest-merge" ]
then
r="|REBASE-m"
b="$(cat "$g/.dotest-merge/head-name")"
elif [ -f "$g/MERGE_HEAD" ]
then
r="|MERGING"
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
if [ -f "$g/BISECT_LOG" ]
then
r="|BISECTING"
fi
if ! b="$(git symbolic-ref HEAD 2>/dev/null)"
then
if ! b="tag: $(git describe --exact-match --tags HEAD 2>/dev/null)"
then
b="$(cut -c1-7 "$g/HEAD")..."
fi
fi
fi
if [ -n "$1" ]; then
printf "$1" "${b##refs/heads/}$r"
else
printf "[%s]" "${b##refs/heads/}$r"
fi
fi
}
function __shorten_path() {
local fullpath=${1}
local maxlen=$2 # Note that maxlen will not be strictly adhered to since the
# last dir in the path will never be shortened.
local len=${#fullpath}
local parts
local result
fullpath=$(echo $fullpath | sed "s@^$HOME@~@" | sed "s@^$(realpath "$HOME")@~@")
if [ $len -gt $maxlen ]; then
parts=("${(s@/@)fullpath}")
for i in {1..$(((${#parts[@]} - 1)))}; do
if [ $len -le $maxlen ]; then break; fi
local part=$parts[$i]
len=$((($len - ${#part})))
parts[$i]=$part[0,1]
done
IFS='/' result=${parts[*]}
else
result=$fullpath
fi
echo $result
}
function __prompt() {
local colon
local git
local hostname
local curpath
local username
local jailname
local nixshell
local color_dir='%{%F{cyan}%}'
local color_git='%{%b%F{yellow}%}'
local color_jail='%{%F{8}%}'
local color_none='%{%b%f%}'
local color_prompt='%{%B%F{red}%}'
local color_user_host='%{%F{magenta}%}'
local maxpathlen=40 # max length of curpath (in characters)
local usernames="${DOTFILES_USERNAMES:-amarshall,andrew.marshall}"
[[ -n $JAIL_NAME ]] && jailname=$(printf '⟦JAIL: %s⟧ ' $JAIL_NAME)
[[ -n $IN_NIX_SHELL ]] && nixshell=$(printf '⟦NIX: %s⟧ ' $IN_NIX_SHELL)
echo "$usernames" | grep --extended-regexp --quiet "(^|,)$USERNAME(,|$)" || username='%n' # Don't display user if it's my own
[[ -n $SSH_CONNECTION ]] && hostname=$hostname'@%m' # If running locally, we probably know what machine it is
# always display "@" so host is not mistaken for username
([ -n "$username" ] || [ -n "$hostname" ]) && colon=':' # Don't separate if nothing to separate
[[ $HOME != $(pwd) ]] && curpath=$(__shorten_path "$(pwd)" $maxpathlen) # Don't display the path if we're home
git=$(__git_prompt) # Display information about current git repo
case "$1" in
prompt)
printf "%s" $color_jail$nixshell$jailname$color_user_host$username$hostname $color_none$colon $color_dir$curpath $color_git$git $color_prompt'%(!.#.⦆)'$color_none' '
;;
title)
print -Pn "\e]0;" $username$hostname$colon "%~\a"
;;
esac
}
function __precmd_title() {
__prompt title
}
precmd_functions=(__precmd_title $precmd_functions)
PROMPT='$(__prompt prompt)'
RPROMPT=''
|
amarshall/dotfiles
|
lib/zsh/lib/prompt.zsh
|
Shell
|
mit
| 3,546 |
#!/bin/bash
echo "### Waiting for ${API_HOST}:${API_PORT} to get ready..."
while ! nc -vz ${API_HOST} ${API_PORT}
do
echo "### Retry..."
sleep 3;
done
echo "### Starting unit tests."
nodeunit chefmatetests-rest.js
|
APIBrickwork/REST-API-Adapter
|
tests/testscripts-chefmate-rest/startUnitTests.sh
|
Shell
|
mit
| 220 |
#!/usr/bin/env bash
curl -H "X-Requested-By: ambari" -X POST -d @/tmp/cluster_hdp.json -u admin:admin $1:8080/api/v1/clusters/$2
|
tourunen/pouta-ansible-cluster
|
roles/ambari_blueprints/files/cluster_create.sh
|
Shell
|
mit
| 129 |
#!/bin/bash
/usr/bin/sudo apt-get update
/usr/bin/sudo apt-get -y install python-virtualenv git libxslt1-dev libxml2-dev python-dev libyaml-dev lib32z1-dev
#rm -R -f ~/py27
/usr/bin/virtualenv ~/py27
source ~/py27/bin/activate
/usr/bin/env python setup.py install
pip install --upgrade pip
pip install nose
pip install coverage
pip install unittest2
chmod 0644 ~/.python-eggs
|
unixunion/python-libsolace
|
cicd.sh
|
Shell
|
mit
| 377 |
#!/bin/sh
ruby extconf.rb --with-rokapi-include=../
|
ncloudioj/okapi_ir_ext
|
ruby/config.sh
|
Shell
|
mit
| 53 |
#!/usr/bin/env bash
set -e
if [[ -n "${DEBUG}" ]]; then
set -x
fi
cid="$(docker run -d -e DEBUG --name "${NAME}" "${IMAGE}")"
trap "docker rm -vf $cid > /dev/null" EXIT
redis() {
docker run --rm -i -e DEBUG --link "${NAME}" "${IMAGE}" "${@}"
}
redis make check-ready max_try=10 host="${NAME}"
echo -n "Checking Redis version... "
redis redis-server -v | grep -q "v=${REDIS_VER}"
echo "OK"
echo -n "Flushing Redis cache... "
redis make flushall host="${NAME}"
echo "OK"
|
Wodby/redis
|
tests/run.sh
|
Shell
|
mit
| 481 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-406-1
#
# Security announcement date: 2016-01-30 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:07 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - phpmyadmin:4:3.3.7-11
#
# Last versions recommanded by security team:
# - phpmyadmin:4:3.3.7-11
#
# CVE List:
# - CVE-2016-2039
# - CVE-2016-2041
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade phpmyadmin=4:3.3.7-11 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2016/DLA-406-1.sh
|
Shell
|
mit
| 631 |
function configure_phantomjs() {
local -n __var=$1
PHANTOMJSPATH="${HOME}/install/phantomjs"
PHANTOMJSPATHBIN="${PHANTOMJSPATH}/bin"
if [ -d "$PHANTOMJSPATH" ] && [ -d "$PHANTOMJSPATHBIN" ]
then
pathutils_add_head PATH "${PHANTOMJSPATHBIN}"
export PHANTOMJSPATH
__var=0
return
fi
__var=1
}
register configure_phantomjs
|
veltzer/bashy
|
plugins/phantomjs.bash
|
Shell
|
mit
| 335 |
ASYNCTASK_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
FABRIC_EXTS_PATH="$FABRIC_EXTS_PATH:$ASYNCTASK_DIR/../exts"
export FABRIC_EXTS_PATH
|
tadevelopment/AsyncTask
|
Tests/environment.sh
|
Shell
|
mit
| 148 |
#!/bin/bash
#fail fast
set -e
if [[ -z "$LOCAL_PORT" || -z "$TARGET_ADDRESS" ]] ; then
echo "Required env variables LOCAL_PORT and/or TARGET_ADDRESS missing"
exit 1
fi
DEBUG_LEVEL=${DEBUG_LEVEL:-warning}
cat << EOF > /etc/stunnel/plainToTLS.conf
foreground = yes
syslog = no
debug = ${DEBUG_LEVEL}
options = NO_SSLv2
socket=l:TCP_NODELAY=1
socket=r:TCP_NODELAY=1
[plainToTLS]
client = yes
accept = ${LOCAL_PORT}
connect = ${TARGET_ADDRESS}
EOF
if [ ! -z "${CA_FILE}" ] ; then
echo "CAfile = ${CA_FILE}" >> /etc/stunnel/plainToTLS.conf
fi
#no verify means ignoring all server/client certs
#see https://www.stunnel.org/static/stunnel.html
if [ ! -z "$VERIFY_LEVEL" ] ; then
echo "verify = ${VERIFY_LEVEL}" >> /etc/stunnel/plainToTLS.conf
fi
exec /usr/bin/stunnel /etc/stunnel/plainToTLS.conf
|
herzogf/docker-plaintotls
|
entrypoint.sh
|
Shell
|
mit
| 808 |
#!/usr/bin/env bash
# Install nginx as stated in http://wiki.nginx.org/Install
# 1. Add repo
sudo cp /vagrant/repo/nginx.repo /etc/yum.repos.d/nginx.repo
# 2. Install package
sudo yum install -y nginx
# 3. Start service
sudo systemctl start nginx
|
4BitBen/vagrant-examples
|
2_networking/scripts/installNginx.sh
|
Shell
|
mit
| 251 |
welcome="\n ===============Welcome to blingy!===============\n"
commandslist1="setup commands: <install>, <refresh>, <run>\n"
commandslist2="create commands: <new>, <publish>, <list>\n"
commandslist3="remove commands: <unpublish>, <delete>\n"
commandslist4="misc commands: <update-template>, <watch>\n"
hr="================================================\n"
start=" \n\n ----------Getting Started: \n\n"
install=" [install] \n Sets up blingy in current directory \n\n "
refresh=" [refresh] \n After updating your site.js for the first time, run install \n\n "
run=" [run] (recommend running in a new tab) \n Run this command while you work and before you push to the outside world \n\t also lets you view your blog locally: http://127.0.0.1:9966 \n\n "
workflow="\n\n ----------Writing new articles: \n\n"
new=" [new] <articleName> \n Creates a new draft article in .md \n (located in /posts) where you type up your new post! \n\n "
publish=" [publish] <articleName> \n Publishes a draft article in html, this will make it visible on your blog \n\n "
list=" [list] Lists all articles \n "
workflow2="\n\n ----------Removing articles: \n\n"
unpublish=" [unpublish] <articleName> \n Removes article from live site and deletes the HTML article \n\n "
remove=" [delete] <articleName> \n Deletes draft article \n"
misc="\n\n ----------Misc: \n\n"
template=" [update-template] \n Run this if you make any changes to the handlebars templates \n\n"
watch=" [watchjs] \n Watches main.js for any changes and browserifies it to bundle.js \n\n"
case $1 in
"install" )
/usr/local/lib/node_modules/blingy/bin/client/install.sh ;;
"refresh" )
/usr/local/lib/node_modules/blingy/bin/client/init.sh ;;
"unpublish" )
/usr/local/lib/node_modules/blingy/bin/unpublish.sh $2 ;;
"delete" )
/usr/local/lib/node_modules/blingy/bin/remove-post.sh $2 ;;
"new" )
/usr/local/lib/node_modules/blingy/bin/draft-post.sh $2 ;;
"list")
/usr/local/lib/node_modules/blingy/bin/list-posts.sh ;;
"publish" )
/usr/local/lib/node_modules/blingy/bin/publish-post.sh $2 ;;
"run" )
/usr/local/lib/node_modules/blingy/bin/beefy.sh ;;
"watchjs" )
/usr/local/lib/node_modules/blingy/bin/watchify.sh ;;
"update-template" )
/usr/local/lib/node_modules/blingy/bin/update-template.sh ;;
"help" )
echo $welcome $hr " ------> Usage 'blingy <command>' <------ \n\n" $commandslist1 $commandslist2 $commandslist3 $commandslist4 $hr $start $install $refresh $run $workflow $new $publish $list $workflow2 $unpublish $remove $misc $template $watch;;
*)
echo $welcome $hr " ------> Usage 'blingy <command>' <------ \n\n" $commandslist1 $commandslist2 $commandslist3 $commandslist4;;
esac
# install
# unpublish
# remove
# draft
# list
# make
# run
# watchjs
#
# "bin/client/commands.sh"
# "bin/client/install.sh"
# "bin/unpublish.sh"
# "bin/remove-post.sh"
# "bin/draft-post.sh"
# "bin/list-posts.sh"
# "bin/publish-post.sh"
# "bin/beefy.sh"
# "bin/watchify.sh"
|
zubaird/zubaird.github.io
|
bin/client/commands.sh
|
Shell
|
mit
| 3,062 |
#!/bin/sh
#Title
#Description
#By David Rubinstein
#alt --user=$USER
#figure out way to get tensorflow inside f the instance
#for CPU
# export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
#for GPU
# export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.11.0rc0-cp27-none-linux_x86_64.whl
#sudo pip install --upgrade $TF_BINARY_URL
docker run -it -p 55555:22 -p 9000:9000 -p 8000:8000 \
--user $(id -u) \
--env="DISPLAY" \
--workdir="/home/$USER" \
--volume="/home/$USER:/home/$USER" \
--volume="/etc/group:/etc/group:ro" \
--volume="/etc/passwd:/etc/passwd:ro" \
--volume="/etc/shadow:/etc/shadow:ro" \
--volume="/etc/sudoers.d:/etc/sudoers.d:ro" \
--volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
--volume="/home/yotta/david/git/focus:/mount/focus" \
--volume="/dev/video0:/dev/video0" \
--privileged \
bamos/openface \
/bin/bash
#osrf/ros:indigo-desktop-full \
#rqt
|
drubinstein/focus
|
docker_run.sh
|
Shell
|
mit
| 999 |
#!/bin/bash
if test -f .vbox_version ; then
# The netboot installs the VirtualBox support (old) so we have to remove it
if test -f /etc/init.d/virtualbox-ose-guest-utils ; then
/etc/init.d/virtualbox-ose-guest-utils stop
fi
rmmod vboxguest
aptitude -y purge virtualbox-ose-guest-x11 virtualbox-ose-guest-dkms virtualbox-ose-guest-utils
# Install dkms for dynamic compiles
apt-get install -y dkms
# If libdbus is not installed, virtualbox will not autostart
apt-get -y install --no-install-recommends libdbus-1-3
# Install the VirtualBox guest additions
VBOX_ISO=VBoxGuestAdditions.iso
mount -o loop $VBOX_ISO /mnt
yes|sh /mnt/VBoxLinuxAdditions.run
umount /mnt
# Start the newly build driver
/etc/init.d/vboxadd start
# Make a temporary mount point
mkdir /tmp/veewee-validation
# Test mount the veewee-validation
mount -t vboxsf veewee-validation /tmp/veewee-validation
rm $VBOX_ISO
fi
|
janschumann/debian-images
|
scripts/virtualbox.sh
|
Shell
|
mit
| 942 |
#!/bin/bash
#PBS -l nodes=1:ppn=1
#PBS -l walltime=47:00:00
#PBS -A bap-052-aa
#PBS -o outputfileB
#PBS -e errorfileB
#PBS -V
#PBS -N vidjilRunTCR002
module add gcc/4.9.1
module add clang/3.5.0
FILENAME=$1
cd /gs/project/bap-052-aa/Park_data/RNAseq/vidjil-2016.03/
while read mySAMPLE
do
let count++
echo "$count $mySAMPLE"
cd /gs/project/bap-052-aa/Park_data/RNAseq/vidjil-2016.03/
./vidjil -g ../vidjil-2015.12/germline/germlines-TR.data -i -w 30 -r 1 -U -y 1000 -z 1000 ../VIDJIL_SEGVDJ_FA/"${mySAMPLE}".segmented.vdj.fa > "${mySAMPLE}".stdout
done < $FILENAME
echo -e "\nTotal $count Lines read"
|
radiaj/rjscripts
|
rnaseq/runVidjilTCR.sh
|
Shell
|
mit
| 727 |
cd ../../;cc -g -std=c99 script/icon/OTE.res src/c/OTE.c src/c/action.c src/c/base.c src/c/editor.c src/c/text_selection.c src/c/event_delegator.c ../OTG/src/c/OTG.c ../OSAL/src/c/gpu.c ../OSAL/src/c/graphics.c ../OSAL/src/c/input.c ../OSAL/src/c/net.c ../OSAL/src/c/sound.c ../OSAL/src/c/system.c ../OSAL/src/c/util.c ../OSAL/release_windows/SDL2.lib ../OSAL/release_windows/SDL2_image.lib ../OSAL/release_windows/SDL2_mixer.lib ../OSAL/release_windows/SDL2_net.lib ../OSAL/release_windows/SDL2_ttf.lib ../OSAL/release_windows/SDL2main.lib ../OSAL/release_windows/SDL2test.lib -Wl,-subsystem,windows -o bin/OTE_mingw.exe;cd script/build;
|
oddthread/OTE
|
script/build/make_mingw.bash
|
Shell
|
mit
| 639 |
#!/usr/bin/sh
browserify browser_setup.js > bundle.js && echo "Game has been built. Now running server." && node server/server.js
|
Coteh/MinesweeperClone
|
scripts/build-n-serve.sh
|
Shell
|
mit
| 131 |
#!/bin/sh
haxelib run weblog "$@"
|
zasmarkany/weblog
|
bin/weblog.sh
|
Shell
|
mit
| 34 |
#!/bin/bash -e
# When run under interactive mode, bashrc wont run
export NVM_DIR="/home/ubuntu/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
pwd=${PWD##/}
root="/home/ubuntu/sqat"
# current working directory must be $root
if [ "$pwd" != "home/ubuntu/sqat" ]
then
echo "Changing your working directory to home/ubuntu/sqat"
cd "$root"
fi
git checkout develop
git pull
# Install style checker micro service
cd "${root}/stylechecker"
./gradlew installDist
# Install frontend server
cd "$root/website"
npm install
npm run deploy
# Stop all forever process
forever stopall
# Start frontend server
cd "$root/website"
NODE_ENV=production PORT=4000 forever start ./backend/babel_index.js
# Start stylechecker microservice
cd "${root}/stylechecker"
cd build/install/sqat-stylechecker/bin
forever start -c bash sqat-stylechecker
forever list
|
Andyccs/sqat
|
deploy.sh
|
Shell
|
mit
| 872 |
export EDITOR='subl'
export SPARK_LOCAL_IP=127.0.0.1
|
abdinoor/dotfiles
|
system/env.zsh
|
Shell
|
mit
| 53 |
#!/bin/sh
OUTPUT=day11a.jar
MAINCLASS=Day11a
#kotlinc -verbose -include-runtime -d $OUTPUT *.kt
kotlinc -verbose -include-runtime -d $OUTPUT src_kotlin/*.kt
if [ $? -ne 0 ] ; then
echo "ERROR: $?"
exit 1
fi
#java -jar $OUTPUT
java -cp $OUTPUT $MAINCLASS $1
|
kannix68/advent_of_code_2016
|
day11/run.sh
|
Shell
|
mit
| 263 |
# Provisioning docker
wget -qO- https://get.docker.com/ | sh
usermod -aG docker vagrant
#Provisioning docker-comose
curl -L "https://github.com/docker/compose/releases/download/1.11.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
#Provisioning Node
apt-get install python-software-properties
curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash -
apt-get install nodejs
#Provisioning Yarn
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
apt-get update && sudo apt-get install yarn
#Provisioning Angular cli
npm install -g @angular/cli
#Echo provisioned versions
docker --version
docker-compose --version
node --version
npm --version
yarn --version
ng --version
|
vitalibynda/webpacknow
|
provision.sh
|
Shell
|
mit
| 862 |
set +x
# Add PostgreSQL to sources list
curl -sSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& echo 'deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main' $PG_MAJOR > /etc/apt/sources.list.d/pgdg.list
# Install PostgreSQL client and libs
apt-get update -qq && \
DEBIAN_FRONTEND=noninteractive apt-get install -yq libpq-dev \
postgresql-client-$PG_MAJOR
|
michelson/chaskiq
|
.docker-files/pg.sh
|
Shell
|
mit
| 397 |
names=`grep "exit code 1" out.* -l`
for name in $names; do
job=`sed -n '4,4p' $name | sed -n "s/Job <\([^>]*\)>.*/\1/p"`
echo bsub -q short -n 12 -R rusage[mem=110000] -R span[hosts=1] -o "out.%J" -e "err.%J" ${job}
done
|
schmiedc/pipeline_2.0
|
jobs_master_2.2/fusion/resubmit-failed-jobs.sh
|
Shell
|
mit
| 223 |
#!/usr/bin/env bash
##
# Sends local instance DB dump to given remote.
#
# Optionally creates a new dump before sending it over, or uses most recent
# local instance DB dump (default). Always wipes out and restores the dump on
# remote DB.
#
# @param 1 String : the remote id.
# @param 2 [optional] String : path to dump file override or 'new' to create one.
#
# @example
# make db-sync-to my_remote_id
# make db-sync-to my_remote_id new
# make db-sync-to my_remote_id path/to/local/dump/file.sql.tgz
# # Or :
# cwt/extensions/remote/db/sync_to.sh my_remote_id
# cwt/extensions/remote/db/sync_to.sh my_remote_id new
# cwt/extensions/remote/db/sync_to.sh my_remote_id path/to/local/dump/file.sql.tgz
#
. cwt/bootstrap.sh
u_remote_sync_db_to $@
|
Paulmicha/common-web-tools
|
cwt/extensions/remote_db/db/sync_to.sh
|
Shell
|
mit
| 759 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-FASlideView_Tests/FASlideView.framework"
install_framework "Pods-FASlideView_Tests/FBSnapshotTestCase.framework"
install_framework "Pods-FASlideView_Tests/Nimble.framework"
install_framework "Pods-FASlideView_Tests/Nimble_Snapshots.framework"
install_framework "Pods-FASlideView_Tests/Quick.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-FASlideView_Tests/FASlideView.framework"
install_framework "Pods-FASlideView_Tests/FBSnapshotTestCase.framework"
install_framework "Pods-FASlideView_Tests/Nimble.framework"
install_framework "Pods-FASlideView_Tests/Nimble_Snapshots.framework"
install_framework "Pods-FASlideView_Tests/Quick.framework"
fi
|
kildevaeld/FASlideView
|
Example/Pods/Target Support Files/Pods-FASlideView_Tests/Pods-FASlideView_Tests-frameworks.sh
|
Shell
|
mit
| 4,098 |
retcode=1
echo Install to $1 with $2
while [ $retcode -ne 0 ]; do
sleep 1s
xcrun simctl install $1 $2
retcode=$?
done
|
tinyunit/tinyunit
|
cmake/scripts/install-ios.sh
|
Shell
|
mit
| 124 |
#!/bin/bash -eux
export cwd=$(pwd)
pushd $cwd/florence
make audit
popd
|
ONSdigital/florence
|
ci/scripts/audit.sh
|
Shell
|
mit
| 74 |
#!/bin/bash
#
# Check for new messages and notify on Slack.
#
# VERSION :0.1.1
# DATE :2018-02-02
# URL :https://github.com/szepeviktor/debian-server-tools
# AUTHOR :Viktor Szépe <[email protected]>
# LICENSE :The MIT License (MIT)
# BASH-VERSION :4.2+
# DEPENDS :pip3 install slack-webhook-cli
# LOCATION :/usr/local/bin/mail-notify.sh
# Cron example
# */10 * * * * virtual /usr/local/bin/mail-notify.sh "/var/mail/Maildir/new" "https://hooks.slack.com/services/ABC123"
MAIL_FOLDER="$1"
WEB_HOOK="$2"
ICON_URL="https://assets.change.org/photos/7/az/tr/URAzTrefPrgDHRC-128x128-noPad.jpg"
if [ -n "$(find "$MAIL_FOLDER" -type f)" ]; then
/usr/local/bin/slack -w "$WEB_HOOK" -u "@$(hostname -d)" -i "$ICON_URL" "You've got mail"
fi
exit 0
|
szepeviktor/debian-server-tools
|
mail/ses-to-imap/mail-notify.sh
|
Shell
|
mit
| 798 |
#!/bin/bash
Detalhes() {
whiptail --title "Detalhes pacote básico" --msgbox "
Gimp -> Editor de fotos
Gksu -> Interface gráfica do sudo
Inkscape -> Editor de imagens vetoriais
RarUnrar -> Descompactador de arquivos
Ubuntu Restricted Extras -> Software essenciais
Unity Tweak Tools -> Utilitario de edição do Unity" 14 78
}
Detalhes
|
matheus-souza/backpack
|
app/basicos/menu/Detalhes.sh
|
Shell
|
mit
| 351 |
#!/bin/bash
SRC=$(realpath $(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../)
set -e
pushd $SRC &> /dev/null
(set -x;
go get -u $@ $(go list -tags most -f '{{ join .Imports "\n" }}' ./internal/...)
)
PKGS=$(go list -tags most -f '{{ join .Imports "\n" }}'|grep 'github.com/xo/usql'|grep -v drivers|grep -v internal)
(set -x;
go get -u $@ $PKGS
)
(set -x;
go mod tidy
)
popd &> /dev/null
|
knq/usql
|
internal/update-deps.sh
|
Shell
|
mit
| 399 |
#!/bin/sh
donelanes=`find . -name "*.tar.gz"`
donedirs=()
for d in ${donelanes[@]}; do
donedirs+=(`echo $d | sed "s/\.\///g" | sed "s/\/out_.*//g"`)
done
for d in ${donedirs[@]}; do
cd $d
scp ${d}.tar.gz mattfel@holodeck-zc706: 2>1 >/dev/null
tmg=`grep -r "VIOLATED" . | grep -v "synth" | wc -l`
if [ $tmg != 0 ]; then tmg=fail; else tmg=pass; fi
echo "TIMING MET? -- $tmg"
cd ..
yn="aeou"
while [[ $yn != "" ]]; do
ssh mattfel@holodeck-zc706 "mkdir ${d};tar -xvf ${d}.tar.gz -C $d;cd $d;mkdir verilog && mv accel.bit.bin verilog" 2>1 >/dev/null
read -p "Args for ${d} ? (type \"skip\" to skip): " choice
if [[ $choice != "skip" ]]; then
python bin/scrape.py $d | xclip -selection c
echo "Copied scraped data"
ssh -t mattfel@holodeck-zc706 "cd ${d}; bash run.sh $choice | grep \"PASS\|Design done,\|does not exist\""
fi
if [[ $choice != "skip" ]]; then
read -p "Hit enter to go to next test, or any input to redo this test: " yn
else
yn=""
fi
done
ssh mattfel@holodeck-zc706 "rm -rf ${d}*" 2>1 >/dev/null
done
|
stanford-ppl/spatial-lang
|
utilities/zynq_scrape.sh
|
Shell
|
mit
| 1,099 |
#!/bin/bash
set -ux
result=0
for ver in {3.6,3.7,3.8}; do
# latest tag will override after each build, leaving only the newest python version tagged
docker build ./ --build-arg VERSION=$ver -t "isort:$ver" -t "isort:latest" && docker run "isort:$ver"
result=$(( $? + $result ))
done
exit $result
|
PyCQA/isort
|
scripts/docker.sh
|
Shell
|
mit
| 302 |
# Copyright (C) 2006,2007 Shawn O. Pearce <[email protected]>
# Copyright (C) 2016 Robert Bragg <[email protected]>
#
# Based on some code from bash/zsh completion support for core Git.
#
# Distributed under the GNU General Public License, version 2.0.
#
# Note: this depends on implementation details of the completion
# support for core Git
#
__git_subdir_count_positionals()
{
local word i c=0
# Skip "git" (first argument)
for ((i=1; i < ${#words[@]}; i++)); do
word="${words[i]}"
case "$word" in
-*)
continue
;;
"subdir")
# Skip the specified git command and discard git
# main options
((c = 0))
;;
"$1")
# Skip the specified git subdir command and discard git or
# subdir main options
((c = 0))
;;
?*)
((c++))
;;
esac
done
printf "%d" $c
}
__git_subdir_find_subdirs()
{
local subdirs=''
local subdir=''
for subdir in $(echo ./*/.git-subdir)
do
if test -f $subdir; then
subdirs="$(dirname $subdir) $subdirs"
fi
done
echo -n $subdirs
}
_git_subdir()
{
local subcommands='add fetch commit branch rebase push status config'
local subcommand=$(__git_find_on_cmdline "$subcommands")
local n_positionals=$(__git_subdir_count_positionals "$subcommand")
local add_opts="--branch --upstream= --upstream-branch= --pre-integrated-commit= --message= "
local fetch_opts="--progress"
local branch_opts="--branch= "
local rebase_opts="--local --onto= "
local push_opts="--upstream --dry-run "
local commit_opts="--message= --dry-run "
local config_opts="--key= --value= --unset "
case "$subcommand,$cur" in
,--*)
__gitcomp '--debug'
return
;;
,*)
__gitcomp "init fetch commit clone branch rebase push status config"
;;
add,--*)
__gitcomp "$add_opts"
return
;;
add,*)
if test $n_positionals -eq 0; then
add_opts="$add_opts <repository> [<upstream>] <subdir> "
fi
if test $n_positionals -lt 3; then
add_opts="$add_opts [<upstream>] <subdir> "
fi
__gitcomp "$add_opts "
return
;;
fetch,--*)
__gitcomp "$fetch_opts "
return
;;
fetch,*)
__gitcomp "$fetch_opts $(__git_subdir_find_subdirs)"
return
;;
commit,--*)
__gitcomp "$commit_opts "
return
;;
commit,*)
if test $n_positionals -eq 0; then
commit_opts="$commit_opts $(__git_subdir_find_subdirs)"
fi
__gitcomp "$commit_opts"
return
;;
branch,--*)
__gitcomp "$branch_opts"
return
;;
branch,*)
if test $n_positionals -eq 0; then
branch_opts="$branch_opts $(__git_subdir_find_subdirs)"
fi
__gitcomp "$branch_opts"
return
;;
rebase,--*)
__gitcomp "$rebase_opts"
return
;;
rebase,*)
if test $n_positionals -eq 0; then
rebase_opts="$rebase_opts $(__git_subdir_find_subdirs)"
fi
__gitcomp "$rebase_opts"
return
;;
push,--*)
__gitcomp "$push_opts"
return
;;
push,*)
if test $n_positionals -eq 0; then
push_opts="$push_opts $(__git_subdir_find_subdirs)"
fi
__gitcomp "$push_opts"
return
;;
status,*)
__gitcomp "$(__git_subdir_find_subdirs)"
return
;;
config,--key=*)
__gitcomp "upstream.url upstream.branch integration.url integration.branch" "" "${cur#*=}"
return
;;
config,--*)
__gitcomp "$config_opts"
return
;;
config,*)
if test $n_positionals -eq 0; then
config_opts="$config_opts $(__git_subdir_find_subdirs)"
fi
__gitcomp "$config_opts"
return
;;
esac
}
|
rib/git-subdir
|
completion.bash
|
Shell
|
mit
| 4,547 |
SPP=Populus_trichocarpa
PREFIX=ftp://ftp.ncbi.nih.gov/genomes/Populus_trichocarpa/
mkdir $SPP
cd $SPP
wget ${PREFIX}LG{I,II,III,IV,V,VI,VII,VIII,IX,X,XI,XII,XIII,XIV,XV,XVI,XVII,XVIII,XIX}/*.fna
cat *.fna > ../$SPP.fasta
|
sauloal/cnidaria
|
analysis/data/converters/external/Plants/Populus_trichocarpa.sh
|
Shell
|
mit
| 221 |
RVM_BIN_PATH="${HOME}/.rvm/bin"
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
if [[ -d ${RVM_BIN_PATH} ]]; then
export PATH="${PATH}:${RVM_BIN_PATH}"
fi
# Load RVM into a shell session *as a function*
if [[ -s "${HOME}/.rvm/scripts/rvm" ]]; then
source "${HOME}/.rvm/scripts/rvm"
fi
unset -v RVM_BIN_PATH
|
agude/dotfiles
|
bash/bashrc.d/203.rvm.bash
|
Shell
|
mit
| 352 |
#!/bin/bash
#SBATCH --partition=mono
#SBATCH --ntasks=1
#SBATCH --time=4-0:00
#SBATCH --mem-per-cpu=8000
#SBATCH -J Deep-DAE_SDAE_7_bot_bin_CAE_relu
#SBATCH -e Deep-DAE_SDAE_7_bot_bin_CAE_relu.err.txt
#SBATCH -o Deep-DAE_SDAE_7_bot_bin_CAE_relu.out.txt
source /etc/profile.modules
module load gcc
module load matlab
cd ~/deepLearn && srun ./deepFunction 7 'DAE' 'SDAE' '128 500 1500 1000 2000 250 10' '0 1 1 1 1 1 1' '7_bot_bin' 'CAE_relu' "'iteration.n_epochs', 'learning.lrate', 'use_tanh', 'noise.drop', 'noise.level', 'rica.cost', 'cae.cost'" '200 1e-3 2 0 0 0.01 0' "'iteration.n_epochs', 'use_tanh'" '200 2'
|
aciditeam/matlab-ts
|
jobs/deepJobs_DAE_SDAE_7_bot_bin_CAE_relu.sh
|
Shell
|
mit
| 632 |
#!/bin/bash
clear
echo Copy app deployables .....
echo Clearing...
rm -r /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage
mkdir /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage
echo Copying....
cp -a /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/styles/. /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage/styles
cp -a /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/scripts/. /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage/scripts
cp -a /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/index.html /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage
echo Done copying... Now listing
echo "- - - - - - - - - - - - - - "
ls /Users/cssian/Projects/rhs/rhs-music-metrix/metrics/stage/*
echo "- - - - - - - - - - - - - - "
echo "###"
|
netaisllc/rhs-music-metrix
|
metrics/command/copy-deployable-set.sh
|
Shell
|
mit
| 799 |
## Command history configuration
HISTFILE=$HOME/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt append_history
setopt extended_history
setopt hist_expire_dups_first
setopt hist_ignore_dups # ignore duplication command history list
setopt hist_ignore_space
setopt hist_verify
setopt inc_append_history
setopt share_history # share command history data
setopt interactivecomments # comments in the profile
|
ltw/dotfiles
|
shells/zsh/lib/history.zsh
|
Shell
|
mit
| 407 |
#!/usr/bin/env bash
# Written by Wu Jianxiao and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
# This function runs the make_xyzIndex_vol function to make index volumes for both the original MNI152 template and the
# normalised version
###########################################
#Define paths
###########################################
UTILITIES_DIR=$(dirname "$(dirname "$(readlink -f "$0")")")/utilities
###########################################
#Main commands
###########################################
main(){
matlab -nodesktop -nosplash -nojvm -r "addpath('$UTILITIES_DIR'); CBIG_RF_make_xyzIndex_vol('$template', \
'$output_dir', '$template_type'); exit"
}
###########################################
#Function usage
###########################################
#usage
usage() { echo "
Usage: $0 -p <template_type> -t <template> -o <output_dir>
This scripts create x/y/z index files in a volumetric template space as step 1 in RF approaches. In an index file, each
voxel is assigned value based on the x/y/z RAS coordinate of the voxel.
REQUIRED ARGUMENTS:
-p <template_type> type of volumetric template to use. Input to this option is also used as prefix of output files.
Possible options are:
'MNI152_orig': use FSL_MNI152 1mm template
'MNI152_norm': use FSL MNI152 1mm template's normalised volume generated by FreeSurfer's recon-all
process
'Colin27_orig': use SPM_Colin27 1mm template
'Colin27_norm': use SPM_Colin27 1mm template's normalised volume generated by FreeSurfer's recon-all
process
others: use user-defined volumetric template file. In this case, input to this option an be any string;
the user is expected to provide a template using "-t" option.
OPTIONAL ARGUMENTS:
-t <template> absolute path to user-defined volumetric template file
[ default: unset ]
-o <output_dir> absolute path to output directory
[ default: $(pwd)/results/index_MNI152 ]
-h display help message
OUTPUTS:
$0 will create 3 output files in the output directory, corresponding to the x/y/z index files.
For example:
MNI152_norm_x.INDEX.nii.gz
MNI152_norm_y.INDEX.nii.gz
MNI152_norm_z.INDEX.nii.gz
EXAMPLE:
$0 -p 'MNI152_orig'
$0 -p 'my_template' -t path/to/my/template.nii.gz
" 1>&2; exit 1; }
#Display help message if no argument is supplied
if [ $# -eq 0 ]; then
usage; 1>&2; exit 1
fi
###########################################
#Parse arguments
###########################################
#Default parameters
output_dir=$(pwd)/results/index_MNI152
#Assign arguments
while getopts "p:t:o:h" opt; do
case $opt in
p) template_type=${OPTARG} ;;
t) template=${OPTARG} ;;
o) output_dir=${OPTARG} ;;
h) usage; exit ;;
*) usage; 1>&2; exit 1 ;;
esac
done
#Set up default type templates
case $template_type in
MNI152_norm)
template=$CBIG_CODE_DIR/data/templates/volume/FSL_MNI152_FS4.5.0/mri/norm.nii.gz ;;
MNI152_orig)
template=$FSL_DIR/data/standard/MNI152_T1_1mm_brain.nii.gz ;;
Colin27_norm)
template=$CBIG_CODE_DIR/data/templates/volume/SPM_Colin27_FS4.5.0/mri/norm.mgz ;;
Colin27_orig)
template=$CBIG_CODE_DIR/data/templates/volume/SPM_Colin27_FS4.5.0/mri/orig/001.mgz ;;
esac
###########################################
#Check parameters
###########################################
if [ -z $template_type ]; then
echo "Template type not defined."; 1>&2; exit 1;
fi
if [ -z $template ]; then
echo "User-defined template is not provided."; 1>&2; exit 1
fi
###########################################
#Other set-ups
###########################################
#Make sure output directory is set up
if [ ! -d "$output_dir" ]; then
echo "Output directory does not exist. Making directory now..."
mkdir -p $output_dir
fi
###########################################
#Implementation
###########################################
main
|
ThomasYeoLab/CBIG
|
stable_projects/registration/Wu2017_RegistrationFusion/registration_fusion/scripts_vol2surf/CBIG_RF_step1_make_xyzIndex_volTemplate.sh
|
Shell
|
mit
| 3,968 |
#!/bin/bash
rm -rf /opt/vpnbook
|
alexdesousa/vpnbook_button
|
vpnbook/install/root-uninstall.sh
|
Shell
|
mit
| 33 |
#!/bin/bash
for config in P4PORT P4USER P4PASSWD P4TOKEN; do
if [ -z "${!config:-}" ]; then
echo FAIL: $config not defined 1>&2
exit 1
fi
done
# Trust the server when connecting over ssl
if [[ $P4PORT == "ssl"* ]]; then
echo SSL connection detected, establishing trust
if ! p4 trust -y; then
echo FAIL: Could not establish trust for $P4PORT 1>&2
exit 1
fi
fi
# Check login
if ! echo $P4PASSWD | p4 login > /dev/null; then
echo FAIL: Could not login using provided user/password
exit 1
fi
cd /opt/perforce/search
# Configure files
## Server connection details
PROTOCOL=none
HOST=127.0.0.1
parts=(${P4PORT//:/ })
case ${#parts[@]} in
1)
PORT=${parts[0]}
;;
2)
HOST=${parts[0]}
PORT=${parts[1]}
;;
3)
PROTOCOL=${parts[0]}
HOST=${parts[1]}
PORT=${parts[2]}
;;
esac
sed -i 's/\(serverProtocol\)=.*/\1='$PROTOCOL'/' jetty/resources/search.config
sed -i 's/\(serverHost\)=.*/\1='$HOST'/' jetty/resources/search.config
sed -i 's/\(serverPort\)=.*/\1='$PORT'/' jetty/resources/search.config
sed -i 's/\(indexerUser\)=.*/\1='$P4USER'/' jetty/resources/search.config
sed -i 's/\(indexerPassword\)=.*/\1='$P4PASSWD'/' jetty/resources/search.config
if [ ! -z "$P4CHARSET" ]; then
sed -i 's/\(serverCharset\)=.*/\1='$P4CHARSET'/' jetty/resources/search.config
fi
if [ ! -z "$P4COMMONSURL" ]; then
sed -i 's>^\(# \|\)\(com.perforce.search.commonsURL\)=.*>\2='$P4COMMONSURL'>' jetty/resources/search.config
fi
if [ ! -z "$P4WEBURL" ]; then
sed -i 's>^\(# \|\)\(com.perforce.search.webURL\)=.*>\2='$P4WEBURL'>' jetty/resources/search.config
fi
if [ ! -z "$P4SWARMURL" ]; then
sed -i 's>^\(# \|\)\(com.perforce.search.swarmURL\)=.*>\2='$P4SWARMURL'>' jetty/resources/search.config
fi
# Token
sed -i 's/^# \(com.perforce.search.fileScannerToken\)/\1/' jetty/resources/search.config
sed -i 's/\(searchEngineToken\)=.*/\1='$P4TOKEN'/' jetty/resources/search.config
## Solr config
sed -i 's/^JPORT=.*/JPORT=8983/' solr/example/solr-control.sh
sed -i 's/^STOP=.*/STOP=8984/' solr/example/solr-control.sh
# Start solr and jetty/p4search
solr/example/solr-control.sh start
jetty/p4search-control.sh start
JETTY_PID=$(ps axf | grep jetty/start.jar | grep -v grep | awk '{print $1}')
tail --pid=$JETTY_PID -f start.log logs/solr.log
|
noonien/docker-perforce
|
perforce-search/run.sh
|
Shell
|
mit
| 2,377 |
#!/usr/bin/env bash
RESCUETIME_ENDPOINT="https://www.rescuetime.com/anapi/data?key=${RESCUETIME_API_KEY}&format=csv&rs=day&rk=productivity"
segments::rescuetime_fetch_changes() {
result=$(curl -s "$RESCUETIME_ENDPOINT" | grep -v '^Rank')
exit_code=$?
if [[ "$exit_code" -gt 0 ]]; then
debug::log "Could not reach rescuetime RESCUETIME_ENDPOINT"
return 0
fi
echo "$result"
}
segments::rescuetime_refresh() {
refresh_rate="${SEGMENTS_RESCUETIME_REFRESH_RATE:-600}"
if [[ ! -f "$SEGMENT_CACHE" ]]; then
debug::log "No cache folder"
fi
if [[ -f "$SEGMENT_CACHE" ]]; then
last_update=$(stat -f "%m" "$SEGMENT_CACHE")
else
last_update=0
fi
local current_time
_sbp_get_current_time current_time
time_since_update=$(( current_time - last_update ))
if [[ "$time_since_update" -lt "$refresh_rate" ]]; then
return 0
fi
if [[ -z $RESCUETIME_API_KEY ]]; then
debug::log "RESCUETIME_API_KEY not set"
return 1
fi
result="$(segments::rescuetime_fetch_changes)"
if [[ -z "$result" ]]; then
# No data, so no logging of time today
command rm -f "$SEGMENT_CACHE"
return 0
fi
for line in $result ; do
seconds=$(cut -d ',' -f 2 <<<"$line")
total_seconds=$(( seconds + total_seconds ))
value=$(cut -d ',' -f 4 <<<"$line")
productivity_value=$(( value + 2 ))
score=$(( seconds * productivity_value ))
productive_score=$(( score + productive_score ))
done
max_score=$(( total_seconds * 4 ))
pulse="$(( productive_score * 100 / max_score ))%"
hours=$(( total_seconds / 60 / 60 ))
hour_seconds=$(( hours * 60 * 60 ))
remaining_seconds=$(( total_seconds - hour_seconds ))
minutes=$(( remaining_seconds / 60 ))
time="${hours}h:${minutes}m"
printf '%s;%s' "$pulse" "$time" > "$SEGMENT_CACHE"
}
segments::rescuetime() {
if [[ -f "$SEGMENT_CACHE" ]]; then
read -r cache < "$SEGMENT_CACHE"
pulse="${cache/;*}"
time="${cache/*;}"
print_themed_segment 'normal' "$pulse" "$time"
fi
execute::execute_nohup_function segments::rescuetime_refresh
}
|
brujoand/sbp
|
src/segments/rescuetime.bash
|
Shell
|
mit
| 2,078 |
#!/bin/sh
#
# Copyright 2015,2017 Cumulus Networks, Inc. All rights reserved.
#
# This file is licensed to You under the Eclipse Public License (EPL);
# You may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.opensource.org/licenses/eclipse-1.0.php
#
# Creates nroff manpages from rst files in the rst dir in the source directory
# Add this script into the makefile for the build rule, before build_fnc
# Make sure to put manpages into rst and have the debian .manpages file
# look for manpages in /souredir/man/*
PKG_DIR="$1"
[ -d "$PKG_DIR/rst" ] || {
echo "$0 Usage: $0 <package dir>"
exit 1
}
man_dir="man"
mkdir "$1/$man_dir"
echo -n "Generating man pages "
#Loop over .rst files in package/rst
for p in $(ls $1/rst/*.rst) ; do
dst_file=$PKG_DIR/${man_dir}/`basename "$p" .rst`
rst2man --halt=2 "$p" > $dst_file || {
echo
echo "Error: problems genertaing man page: $p"
exit 1
}
echo -n "."
done
echo " done."
exit 0
|
CumulusNetworks/ptm
|
scripts/deb-rst2man.sh
|
Shell
|
epl-1.0
| 1,020 |
#!/bin/bash
### ENVIRONMENT VARIABLES
# REQUIRED:
#
# KAFKA_ZOOKEEPER_CONNECT
# S3_BUCKET_NAME
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
#
# OPTIONAL:
#
# RIEMANN_HOST
# TOPIC_WHITELIST
# TOPIC_BLACKLIST
# setting defaults
export RIEMANN_HOST=${RIEMANN_HOST:-nil}
export TOPIC_WHITELIST=${TOPIC_WHITELIST:-nil}
export TOPIC_BLACKLIST=${TOPIC_BLACKLIST:-nil}
export CONFIG_FILE=/opt/bifrost/conf/config.edn
# replace variables in template with environment values
echo "TEMPLATE: generating configuation."
perl -pe 's/%%([A-Za-z0-9_]+)%%/defined $ENV{$1} ? $ENV{$1} : $&/eg' < ${CONFIG_FILE}.tmpl > $CONFIG_FILE
# check if all properties have been replaced
if grep -qoP '%%[^%]+%%' $CONFIG_FILE ; then
echo "ERROR: Not all variable have been resolved,"
echo " please set the following variables in your environment:"
grep -oP '%%[^%]+%%' $CONFIG_FILE | sed 's/%//g' | sort -u
exit 1
fi
### NOTE : Using exec because if a monitor (supervisord or envconsul) tries to kill this script
### the signal won't be passed onto the java process(which runs in a subshell).
### By using exec, we avoid the java process running in a subshell.
exec java -Dlogback.configurationFile=/opt/bifrost/conf/logback.xml -server -jar /opt/bifrost/lib/bifrost-*-standalone.jar --config /opt/bifrost/conf/config.edn
|
FundingCircle/bifrost
|
docker/configure-and-start.sh
|
Shell
|
epl-1.0
| 1,349 |
#!/bin/bash
git remote add upstream git://github.com/rsetienne/PBD.git
git fetch origin -v; git fetch upstream -v; git merge upstream/master
|
rsetienne/PBD
|
pull_upstream.sh
|
Shell
|
gpl-2.0
| 140 |
#!/bin/bash
if [ ! $# == 2 ]; then
echo "Usage: ./perf_collect_logs.sh <directory> <log-name-prefix>"
exit
fi
logdir=$1
prefix=$2
rm -rf ${logdir}${prefix}.perf.log.gz
cd $logdir
for file in `ls ${prefix}.log-* | sort -V`
do
echo "Processing $file"
cp ${file} ${file}-2.gz
gunzip ${file}-2.gz
cat ${file}-2 >> ${logdir}${prefix}.perf.log
rm -rf ${file}-2
done
cat ${logdir}${prefix}.log >> ${logdir}${prefix}.perf.log
gzip ${logdir}${prefix}.perf.log
|
thom-at-redhat/cfme_tests
|
scripts/perf_collect_logs.sh
|
Shell
|
gpl-2.0
| 468 |
#!/bin/sh
#
# $Id: test_flute_RLC.sh,v 1.2 2005/05/12 16:03:20 moi Exp $
#
# Copyright (c) 1999-2004 INRIA - All rights reserved
# (main authors: Julien Laboure - [email protected]
# Vincent Roca - [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
# skipped !
#echo "WARNING: test skipped !!!!!!"
#exit 0
if [ -z $1 ]
then
echo "You must specify the flute binary file of MADs Flute implementation."
echo "Syntax: ./test_.._...sh mad_flute"
exit 1
fi
if [ ! -x $1 ]
then
echo "You specified a non executable file."
echo "You must specify the flute binary file of MADs Flute implementation."
echo "Syntax: ./test_.._...sh mad_flute"
exit 1
fi
host_name=`uname -s`
host_network_name=`uname -n`
host_ip=`hostname -i`
case ${host_name} in
Linux)
echo "Running FLUTE Linux Send/Recv Test"
flute_path="../../../bin/linux/flute"
;;
SunOS)
echo "Running FLUTE Solaris Send/Recv Test"
flute_path="../../../bin/solaris/flute"
;;
FreeBSD)
echo "Running FLUTE Solaris Send/Recv Test"
flute_path="../../../bin/freebsd/flute"
;;
# other OS???? todo
esac
#
# MCL to MAD using RSE
#
# for debug...
#verbosity_recv='-v5' # receiver part
verbosity_recv='-stat1' # has to be at least -v1 or stat1
#verbosity_send='-v5' # sender part
verbosity_send='-stat1' # has to be at least -v1 or stat1
echo ""
echo "** MCL to MAD using RLC"
echo ""
echo "$1 -A -m:225.1.2.3 -p:9991 -v:0 -w:1 -c:5 -s:${host_ip} -B:./"
echo "${flute_path} ${verbosity_recv} -send -cc1 -l5 -fec1.5 -a225.1.2.3/9991 -demux1 ./"
echo ""
recv_val=1
send_val=1
rm -Rf ../files/recv/*
cd ../files/recv
$1 -A -m:225.1.2.3 -p:9991 -v:0 -w:1 -c:5 -s:${host_ip} -B:./&
flute_recv_pid=$!
cd ../send
../${flute_path} ${verbosity_recv} -send -cc1 -l5 -fec1.0 -a225.1.2.3/9991 -demux1 ./ &
flute_send_pid=$!
wait ${flute_recv_pid}
recv_val=$?
wait ${flute_send_pid}
send_val=$?
# TODO : what if sender has finished before receiver ??
cd ../../iop
#diff -r ./files/send ./files/recv
diff ../files/send/ ../files/recv/
diff_val=$?
if [ ${send_val} -ne 0 ]
then
echo "FLUTE Send Failed"
exit 1
elif [ ${recv_val} -ne 253 ]
then
echo "FLUTE Recv Failed"
exit 1
elif [ ${diff_val} -ne 0 ]
then
echo "Test failed: received files do not match sent files!"
exit 1
fi
#
# MAD to MCL using RSE
#
# for debug...
#verbosity_recv='-v5' # receiver part
verbosity_recv='-stat1' # has to be at least -v1 or stat1
#verbosity_send='-v5' # sender part
verbosity_send='-stat1' # has to be at least -v1 or stat1
echo ""
echo "** MAD to MCL using RSE "
echo ""
echo "${flute_path} ${verbosity_recv} -recv -a225.1.2.3/9991 -demux1"
echo "$1 -S -m:225.1.2.3 -p:9991 -v:0 -x:2 -F:../"
echo ""
recv_val=1
send_val=1
rm -Rf ../files/recv/*
cd ../files/recv
../${flute_path} ${verbosity_recv} -recv -cc1 -l5 -a225.1.2.3/9991 -demux1 &
flute_recv_pid=$!
cd ..
$1 -S -m:225.1.2.3 -p:9991 -w:1 -c:5 -v:0 -F:./send &
flute_send_pid=$!
wait ${flute_recv_pid}
recv_val=$?
wait ${flute_send_pid}
send_val=$?
# TODO : what if sender has finished before receiver ??
cd ../iop
#diff -r ./files/send ./files/recv
diff ../files/send/ ../files/recv/${host_network_name}/send
diff_val=$?
if [ ${send_val} -ne 0 ]
then
echo "FLUTE Send Failed"
exit 1
elif [ ${recv_val} -ne 0 ]
then
echo "FLUTE Recv Failed"
exit 1
elif [ ${diff_val} -ne 0 ]
then
echo "Test failed: received files do not match sent files!"
exit 1
fi
|
jcable/mcl
|
check/flute/iop/test_flute_RLC.sh
|
Shell
|
gpl-2.0
| 4,115 |
#!/bin/sh
#set -o allexport
#source configs/params.conf
#set +o allexport
/usr/bin/TG_3D $KEYS
|
razdoburdin/TG_3D
|
docker-entrypoint.sh
|
Shell
|
gpl-2.0
| 97 |
#
# Copyright (C) 2010 OpenWrt.org
#
. /lib/ramips.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ramips.sh
platform_check_image() {
local board=$(ramips_board_name)
local magic="$(get_magic_word "$1")"
[ "$ARGC" -gt 1 ] && return 1
case "$board" in
dir-300-b1 | dir-600-b1 | dir-600-b2 | fonera20n | nw718 | rt-g32-b1 | v22rw-2x2 | whr-g300n | hw550-3g | mofi3500-3gn)
[ "$magic" != "2705" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_do_upgrade() {
local board=$(ramips_board_name)
case "$board" in
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
append sysupgrade_pre_upgrade disable_watchdog
|
riedel/openwrt-wdtv
|
target/linux/ramips/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 855 |
#!/bin/sh
CVS_SANDBOX=${CVS_SANDBOX:-"${HOME}"}
echo $CVS_SANDBOX
BOREALIS_HOME=${CVS_SANDBOX}/borealis
BOREALIS_SRC_HOME=${BOREALIS_HOME}/src/src
CLIENT_HOME=${BOREALIS_HOME}/demo/mitre
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${INSTALL_NMSTL}/lib:${INSTALL_ANTLR}/lib
PATH=${PATH}:${BOREALIS_SRC_HOME}:${BOREALIS_HOME}/tool/marshal:${BOREALIS_HOME}/tool/head
echo "CentralLookupServer"
xterm -T [email protected]:30000 -geometry 80x10+0+0 -e sh -l -c "LD_LIBRARY_PATH=$LD_LIBRARY_PATH ${BOREALIS_SRC_HOME}/CentralLookupServer -d 127.0.0.1:30000 " &
sleep 4
echo "borealis"
xterm -T [email protected]:15000 -geometry 80x10+0+200 -e sh -l -c "LD_LIBRARY_PATH=$LD_LIBRARY_PATH ${BOREALIS_SRC_HOME}/borealis -d 127.0.0.1:15000 2>&1 |tee borealis.log" &
sleep 4
echo "mitre_demo"
xterm -T client -geometry 80x10+0+400 -e sh -l -c "LD_LIBRARY_PATH=$LD_LIBRARY_PATH PATH=$PATH ./mitre_demo 2>&1 |tee mitre_demo.log" &
sleep 4
echo "client"
java -cp ${CLIENT_HOME}/visualizer.jar Visualizer
|
dhu/borealis-demo
|
mitre/run.sh
|
Shell
|
gpl-2.0
| 986 |
#!/bin/bash
SCRIPT_DIR=`dirname $0`
source ${SCRIPT_DIR}/quiz_common.sh
IMAGE_FILES=`ls ${QUIZ_DIR}/omr_output/ 2> /dev/null | grep "^[^(jpg)]*jpg$"`
QUIZ_PART_NB=0
echo $QUIZ_PARTS
for QUIZ_PART in ${QUIZ_PARTS}
do
echo "======================================="
echo "Creating data for '${QUIZ_PART}'"
NB_STUDENTS=0
OUTPUT_FILE=${QUIZ_DIR}/correction/${QUIZ_PART}.students_answers
echo "First question: ${QUIZ_PARTS_MIN_QUESTIONS[$QUIZ_PART_NB]}"
echo "Last question: ${QUIZ_PARTS_MAX_QUESTIONS[$QUIZ_PART_NB]}"
echo -n "" > ${OUTPUT_FILE}
for IMAGE_FILE in ${IMAGE_FILES}
do
if [ -e ${QUIZ_DIR}/omr_output/${IMAGE_FILE}.mmr_data ]
then
OMR_DATA_FILE=${QUIZ_DIR}/omr_output/${IMAGE_FILE}.mmr_data
elif [ -e ${QUIZ_DIR}/omr_output/${IMAGE_FILE}.omr_data ]
then
OMR_DATA_FILE=${QUIZ_DIR}/omr_output/${IMAGE_FILE}.omr_data
elif [ -e ${QUIZ_DIR}/omr_output/${IMAGE_FILE}.omr1_data ]
then
echo "Error: No mmr_data or omr_data file found for ${IMAGE_FILE}. Failing back to omr1_data to continue." >2
OMR_DATA_FILE=${QUIZ_DIR}/omr_output/${IMAGE_FILE}.omr1_data
else
echo "Error: No mr_data found for ${IMAGE_FILE}."
exit -1
fi
BACKSLASHED_OMR_OUTPUT_DIR=`echo "${QUIZ_DIR}/omr_output/" | sed 's/\//\\\\\//g'`
SHORT_FILE=`echo ${OMR_DATA_FILE} | sed "s/${BACKSLASHED_OMR_OUTPUT_DIR}//"`
echo " Parsing ${SHORT_FILE}."
OUTPUT_TEXT=`awk -v min=${QUIZ_PARTS_MIN_QUESTIONS[$QUIZ_PART_NB]} -v max=${QUIZ_PARTS_MAX_QUESTIONS[$QUIZ_PART_NB]} -v file=${OMR_DATA_FILE} '
BEGIN {
FS=" "
login[1] = -1
login[2] = -1
login[3] = -1
login[4] = -1
login[5] = -1
question_number = 1
}
NR <= 5 {
for (i = 1; i <= NF; i = i + 1) {
if ($i == "1") {
if (login[NR] != -1) {
print "Error: Two marks on line ", NR, " in file ", file > "/dev/stderr"
exit 1
}
login[NR] = i - 1
}
}
}
NR == 5 {
if ((login[1] == -1) || (login[2] == -1) || (login[3] == -1) || (login[4] == -1) || (login[4] == -1)) {
print "Error: Incomplete login in file ", file > "/dev/stderr"
exit 2
}
if (login[1] == 1) {
printf "f"
} else {
printf "p"
}
printf "%d%d%d%d%d;", login[1], login[2], login[3], login[4], login[5]
}
NR > 5 {
for (i = 1; i <= NF; i = i + 1) {
if (i <= 5) {
answers[NR-5,i] = $i
} else {
answers[(NR-5)+40,i-5] = $i
}
}
nb_questions = nb_questions + 1
}
END {
for (question_number = 1; question_number <= 80; question_number = question_number + 1) {
if ((question_number >= min) && (question_number <= max)) {
first_answer=1
for (i = 1; i <= 5; i = i + 1) {
if (answers[question_number,i] == "1") {
if (first_answer == 1)
first_answer = 0
else
printf "\\\\"
printf "R%d", i
}
}
printf ";"
}
}
print ""
}
' ${OMR_DATA_FILE}`
LOGIN=`echo ${OUTPUT_TEXT} | cut -d ";" -f 1`
if [ "${LOGIN}" = "p00000" ]
then
echo ${OUTPUT_TEXT} > ${QUIZ_DIR}/correction/${QUIZ_PART}.correction_answers
else
echo ${OUTPUT_TEXT} >> ${OUTPUT_FILE}
NB_STUDENTS=$((${NB_STUDENTS} + 1))
fi
done
QUIZ_PART_NB=$((${QUIZ_PART_NB} + 1))
echo ${NB_STUDENTS} > ${OUTPUT_FILE}.nb_students
echo ""
done
echo "All done successfully."
|
CedricDinont/PaperQuiz
|
bin/prepare_correction.sh
|
Shell
|
gpl-2.0
| 3,265 |
# Simple colors and git status related functions.
COLOR_RED="\033[0;91m"
COLOR_YELLOW="\033[0;93m"
COLOR_GREEN="\033[0;32m"
COLOR_PURPLE="\033[0;95m"
COLOR_BLUE="\033[0;94m"
COLOR_WHITE="\033[0;37m"
COLOR_RESET="\033[0m"
function __git_color {
local git_status="$(git status 2> /dev/null)"
if [[ ! $git_status =~ "working directory clean" ]]; then
echo -e $COLOR_RED
elif [[ $git_status =~ "Your branch is ahead of" ]]; then
echo -e $COLOR_PURPLE
elif [[ $git_status =~ "nothing to commit" ]]; then
echo -e $COLOR_GREEN
else
echo -e $COLOR_YELLOW
fi
}
function __git_branch {
local git_status="$(git status 2> /dev/null)"
local on_branch="On branch ([^${IFS}]*)"
local on_commit="HEAD detached at ([^${IFS}]*)"
if [[ $git_status =~ $on_branch ]]; then
local branch=${BASH_REMATCH[1]}
echo "{$branch}"
elif [[ $git_status =~ $on_commit ]]; then
local commit=${BASH_REMATCH[1]}
echo "{$commit}"
fi
}
# Set a decent default that is git friendly.
export PS1="\[$COLOR_WHITE\][\u]\[\$(__git_color)\] \$(__git_branch) \[$COLOR_YELLOW\]\w\[$COLOR_BLUE\]\$\[$COLOR_RESET\] "
|
simesy/mine
|
bash/bashrc_prompt.sh
|
Shell
|
gpl-2.0
| 1,129 |
#!/bin/sh
aclocal && autoconf && autoheader && libtoolize && automake --add-missing && echo "OK, you can run \`./configure' now."
|
md5crew/inotify-tools
|
autogen.sh
|
Shell
|
gpl-2.0
| 132 |
make clean
rm -rf ./df0.adz
rm -rf ./kick.rom
rm -rf ./config.log
rm -rf ./config.status
rm -rf ./configure
rm -rf ./aclocal.m4
rm -rf ./src/gfxdep
rm -rf ./src/guidep
rm -rf ./src/joydep
rm -rf ./src/machdep
rm -rf ./src/osdep
rm -rf ./src/sounddep
rm -rf ./src/threaddep
rm -rf `find . -type d -name autom4te.cache`
rm -rf `find . -type d -name .deps`
rm -rf `find . -type f -name Makefile`
rm -rf `find . -type f -name *~`
|
voorhees1979/PUAE
|
release_.sh
|
Shell
|
gpl-2.0
| 430 |
#!/bin/sh
#set -e -x
usage()
{
echo "Usage: $0 minlength inputfile"
}
checkerror()
{
$1
if test $? -ne 0
then
echo "failure: ${1}"
exit 1
fi
}
cleanhashlines()
{
TMPFILE=`mktemp TMP.XXXXXX` || exit 1
sed -e '/^#/d' -e 's/[ ][ ]*/ /g' $1 > ${TMPFILE}
mv ${TMPFILE} $1
}
sortlines()
{
TMPFILE=`mktemp TMP.XXXXXX` || exit 1
sort -n $1 > ${TMPFILE}
mv ${TMPFILE} $1
}
extractlines()
{
TMPFILE=`mktemp TMP.XXXXXX` || exit 1
/sw/bin/gawk '/.*/ {print $1 " " $3 " " $4 " " $5 " " $7}' $1 > ${TMPFILE}
mv ${TMPFILE} $1
}
if test $# -ne 2
then
usage
exit 1
fi
minlength=$1
filename=$2
GTDIR=/Users/stefan/genometools
checkerror "${GTDIR}/bin/gt suffixerator -db ${filename} -indexname sfxidx -dna -suf -tis -lcp -pl"
checkerror "${GTDIR}/bin/gt repfind -l ${minlength} -r -ii sfxidx" > result.gt
cleanhashlines result.gt
extractlines result.gt
sortlines result.gt
checkerror "/Users/stefan/bin-ops/i686-apple-darwin/repfind.x -allmax -l ${minlength} -r -noevalue -nodistance $filename" > result.rep
cleanhashlines result.rep
sortlines result.rep
checkerror "diff -w result.rep result.gt"
|
bioh4x/NeatFreq
|
lib/genometools-1.4.1/scripts/repfvsrepf.sh
|
Shell
|
gpl-2.0
| 1,129 |
#!/bin/bash
if [[ $# -ne 2 ]]
then
echo "Error - parameters missing"
echo "Syntax : $0 test_group test_name"
echo "example : $0 samples hello"
exit 1
fi
./runRTEMStest-opal.sh output image.iso $1 $2 sources symbols
|
gedare/GEMS
|
support/opal-quicktest.sh
|
Shell
|
gpl-2.0
| 226 |
#!/bin/sh
clear
ver="0.9.9"
printf "** Automated sqlmap (asqlmap) for BackBox v. $ver **\n"
printf " developed by Gualty \n"
printf " http://github.com/Gualty \n"
printf "\n\nEach operation will be performed using the --tor flag for your anonymity"
printf "\n ** Check that TOR and Polipo are running **\n"
#Check if sqlmap is installed on the system
sqlmapexist=$(which sqlmap)
if [ -z "$sqlmapexist" ]; then
echo "ERROR: sqlmap is not installed!\n\n Install it before run sqlmap"
exit 0
fi
#Variables from the command line
l="1"
r="1"
t="1"
#If 2° element is -r
if [ "$2" = "-r" ]; then
r=$3
fi
#If 4° element is -r
if [ "$4" = "-r" ]; then
r=$5
fi
#If 6° element is -r
if [ "$6" = "-r" ]; then
r=$7
fi
#If 2° element is -l
if [ "$2" = "-l" ]; then
l=$3
fi
#If 4° element is -l
if [ "$4" = "-l" ]; then
l=$5
fi
#If 6° element is -l
if [ "$6" = "-l" ]; then
l=$7
fi
#If 2° element is -t
if [ "$2" = "-t" ]; then
t=$3
fi
#If 4° element is -t
if [ "$4" = "-t" ]; then
t=$5
fi
#If 6° element is -t
if [ "$6" = "-t" ]; then
t=$7
fi
#Google Dork
case $1 in
-g) echo "\nATTENTION: Google Dork search will not use Tor, so you will not be anonymous.\nPress ENTER to continue at your own risk or CTRL+C to close asqlmap\n";read tasto;sqlmap -g $2 --random-agent -b --dbs --table --eta --cleanup --identify-waf;echo "Google Dork search done\n\nPress any key to close asqlmap";read tasto;exit;;
esac
#Check if the user specified an URL if is not a Google Dork search
if [ -z "$1" ]
then
printf "\nNo URL specified. \nEg. ./asqlmap.sh http://www.example.com/index.php?id= \n\nPress any key to close asqlmap\n";read tasto;exit;
fi
if [ "$1" = "-h" ] || [ "$1" = "-help" ]; then
echo "USAGE:\n\t./asqlmap.sh \"URL\" [OPTIONS]\nOptions after URL:\n\t-r <risk value>\t\tRisk of test to perform (1-3, default 1)\n\t-l <level value>\tLevel of test to perform (1-5, default 1)\n\t-t <# of threads>\tNumber of threads (1-10, default 1)\nOptions without URL:\n\t-g <google dork>\tSearch for Google Dorks\n\t-purge-output\t\tSecurely erase the sqlmap output directory\n\t-h,-help\t\tShow this help\n\t-v\t\t\tShow the version of asqlmap"
exit 0
fi
if [ "$1" = "-v" ]; then
echo "\nasqlmap v. $ver\n"
exit 0
fi
if [ "$1" = "-purge-output" ]; then
printf "\nATTENTION: this operation will be irreversible.\nIf an error of sqlmap appear, update sqlmap.\nPress ENTER to continue or CTRL+C to abort.\n";read tasto;sqlmap --purge-output;printf "sqlmap output folder was securely purged\n\nPress any key to continue";read tasto;
exit 0
fi
# The options menu
printf "\nURL: "; echo $1
printf "\nWhat do you want to do?\n\n"
printf "1) Vulnerability check and information research (Databases,tables)\n"
printf "2) Users, passwords and privileges research\n"
printf "3) Open SQL Shell\n"
printf "4) Open OS Shell\n"
printf "5) Dump single table\n"
printf "6) Dump single database\n"
printf "7) Dump all databases\n"
printf "============================================================================\n"
printf "8) Update this tool\n"
printf "9) Update sqlmap\n"
printf "0) Quit\n"
printf "Choise: "
read choice
# Execute the right operation based on the choice of the user
case "$choice" in
1) sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --dbs --table --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "Vulnerability check done\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
2) sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --users --passwords --privileges --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nRetrieving credentials and privileges done\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
3) sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --sql-shell --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nSQL Shell closed\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
4) sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --os-shell --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nOS Shell closed\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
5) echo "\nTable name: "; read tabella; sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --dump -T $tabella --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nDump of the table '$tabella' done\n\nPress any key to continuee";read tasto;$0 $1 $2 $3 $4 $5;;
6) echo "\nDatabase name: "; read database; sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --dump -D $database --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nDump of the database '$database' done\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
7) sqlmap -u $1 --random-agent --level=$l --risk=$r --threads=$t -b --dump-all --tor --eta --cleanup --identify-waf --exclude-sysdbs;echo "\nDump of all databases done\n\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
8) git pull; echo "\nasqlmap updated\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
9) sudo sqlmap --update; echo "\nsqlmap updated\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
0) echo "\nBye bye =)\n"; exit 0;;
q) echo "\nBye bye =)\n"; exit 0;;
quit) echo "\nBye bye =)\n"; exit 0;;
*) echo "\nNot valid command\nPress any key to continue";read tasto;$0 $1 $2 $3 $4 $5;;
esac
|
Gualty/asqlmap-bb
|
asqlmap.sh
|
Shell
|
gpl-2.0
| 5,500 |
#!/bin/zsh
clear
echo -e "
\e[1;37m
+------------------------------------------------------------------------------------+
| malukenho@devfestne ~ |
+------------------------------------------------------------------------------------+
| |
| $ \e[1;32mappcfg.py update .\e[1;37m |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
+------------------------------------------------------------------------------------+
"
read slide
|
malukenho/pgae
|
slides/deploy.sh
|
Shell
|
gpl-2.0
| 3,431 |
dt=0.004
cat $1 | ~/progs/fourier.py 0 10000 $dt > $2
|
zakirovandrey/DTmaxwell4
|
calc_purcell.sh
|
Shell
|
gpl-2.0
| 54 |
#!/bin/bash
# We use some bash-isms (getopts?)
# Copyright (C) 2007-2010 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# lvm_dump: This script is used to collect pertinent information for
# the debugging of lvm issues.
# following external commands are used throughout the script
# echo and test are internal in bash at least
MKDIR=mkdir # need -p
TAR=tar # need czf
RM=rm # need -rf
CP=cp
TAIL=tail # we need -n
LS=ls # need -la
PS=ps # need alx
SED=sed
DD=dd
CUT=cut
DATE=date
BASENAME=basename
UDEVADM=udevadm
UNAME=uname
# user may override lvm and dmsetup location by setting LVM_BINARY
# and DMSETUP_BINARY respectively
LVM=${LVM_BINARY-lvm}
DMSETUP=${DMSETUP_BINARY-dmsetup}
die() {
code=$1; shift
echo "$@" 1>&2
exit $code
}
"$LVM" version >& /dev/null || die 2 "Could not run lvm binary '$LVM'"
"$DMSETUP" version >& /dev/null || DMSETUP=:
function usage {
echo "$0 [options]"
echo " -h print this message"
echo " -a advanced collection - warning: if lvm is already hung,"
echo " then this script may hang as well if -a is used"
echo " -m gather LVM metadata from the PVs"
echo " -d <directory> dump into a directory instead of tarball"
echo " -c if running clvmd, gather cluster data as well"
echo " -u gather udev info and context"
echo ""
exit 1
}
advanced=0
clustered=0
metadata=0
udev=0
while getopts :acd:hmu opt; do
case $opt in
s) sysreport=1 ;;
a) advanced=1 ;;
c) clustered=1 ;;
d) userdir=$OPTARG ;;
h) usage ;;
m) metadata=1 ;;
u) udev=1 ;;
:) echo "$0: $OPTARG requires a value:"; usage ;;
\?) echo "$0: unknown option $OPTARG"; usage ;;
*) usage ;;
esac
done
NOW=`$DATE -u +%G%m%d%k%M%S | /usr/bin/tr -d ' '`
if test -n "$userdir"; then
dir="$userdir"
else
dirbase="lvmdump-$HOSTNAME-$NOW"
dir="$HOME/$dirbase"
fi
test -e $dir && die 3 "Fatal: $dir already exists"
$MKDIR -p $dir || die 4 "Fatal: could not create $dir"
log="$dir/lvmdump.log"
myecho() {
echo "$@"
echo "$@" >> "$log"
}
log() {
echo "$@" >> "$log"
eval "$@"
}
warnings() {
if test "$UID" != "0" && test "$EUID" != "0"; then
myecho "WARNING! Running as non-privileged user, dump is likely incomplete!"
elif test "$DMSETUP" = ":"; then
myecho "WARNING! Could not run dmsetup, dump is likely incomplete."
fi
}
warnings
myecho "Creating dump directory: $dir"
echo " "
if (( $advanced )); then
myecho "Gathering LVM volume info..."
myecho " vgscan..."
log "\"$LVM\" vgscan -vvvv >> \"$dir/vgscan\" 2>&1"
myecho " pvscan..."
log "\"$LVM\" pvscan -v >> \"$dir/pvscan\" 2>> \"$log\""
myecho " lvs..."
log "\"$LVM\" lvs -a -o +devices >> \"$dir/lvs\" 2>> \"$log\""
myecho " pvs..."
log "\"$LVM\" pvs -a -v >> \"$dir/pvs\" 2>> \"$log\""
myecho " vgs..."
log "\"$LVM\" vgs -v >> \"$dir/vgs\" 2>> \"$log\""
fi
if (( $clustered )); then
myecho "Gathering cluster info..."
{
for i in nodes status services; do
cap_i=$(echo $i|tr a-z A-Z)
printf "$cap_i:\n----------------------------------\n"
log "cman_tool $i 2>> \"$log\""
echo
done
echo "LOCKS:"
echo "----------------------------------"
if [ -f /proc/cluster/dlm_locks ]
then
echo clvmd > /proc/cluster/dlm_locks
cat /proc/cluster/dlm_locks
echo
echo "RESOURCE DIR:"
cat /proc/cluster/dlm_dir
echo
echo "DEBUG LOG:"
cat /proc/cluster/dlm_debug
echo
fi
if [ -f /debug/dlm/clvmd ]
then
cat /debug/dlm/clvmd
echo
echo "WAITERS:"
cat /debug/dlm/clvmd_waiters
echo
echo "MASTER:"
cat /debug/dlm/clvmd_master
fi
} >> $dir/cluster_info
fi
myecho "Gathering LVM & device-mapper version info..."
echo "LVM VERSION:" >> "$dir/versions"
"$LVM" lvs --version >> "$dir/versions" 2>> "$log"
echo "DEVICE MAPPER VERSION:" >> "$dir/versions"
"$DMSETUP" --version >> "$dir/versions" 2>> "$log"
echo "KERNEL VERSION:" >> "$dir/versions"
"$UNAME" -a >> "$dir/versions" 2>> "$log"
echo "DM TARGETS VERSIONS:" >> "$dir/versions"
"$DMSETUP" targets >> "$dir/versions" 2>> "$log"
myecho "Gathering dmsetup info..."
log "\"$DMSETUP\" info -c >> \"$dir/dmsetup_info\" 2>> \"$log\""
log "\"$DMSETUP\" table >> \"$dir/dmsetup_table\" 2>> \"$log\""
log "\"$DMSETUP\" status >> \"$dir/dmsetup_status\" 2>> \"$log\""
# cat as workaround to avoid tty ioctl (selinux)
log "\"$DMSETUP\" ls --tree 2>> \"$log\" | cat >> \"$dir/dmsetup_ls_tree\""
myecho "Gathering process info..."
log "$PS alx >> \"$dir/ps_info\" 2>> \"$log\""
myecho "Gathering console messages..."
log "$TAIL -n 75 /var/log/messages >> \"$dir/messages\" 2>> \"$log\""
myecho "Gathering /etc/lvm info..."
log "$CP -a /etc/lvm \"$dir/lvm\" 2>> \"$log\""
myecho "Gathering /dev listing..."
log "$LS -laR /dev >> \"$dir/dev_listing\" 2>> \"$log\""
myecho "Gathering /sys/block listing..."
log "$LS -laR /sys/block >> \"$dir/sysblock_listing\" 2>> \"$log\""
log "$LS -laR /sys/devices/virtual/block >> \"$dir/sysblock_listing\" 2>> \"$log\""
if (( $metadata )); then
myecho "Gathering LVM metadata from Physical Volumes..."
log "$MKDIR -p \"$dir/metadata\""
pvs="$("$LVM" pvs --separator , --noheadings --units s --nosuffix -o \
name,pe_start 2>> "$log" | $SED -e 's/^ *//')"
for line in $pvs
do
test -z "$line" && continue
pv="$(echo $line | $CUT -d, -f1)"
pe_start="$(echo $line | $CUT -d, -f2)"
name="$($BASENAME "$pv")"
myecho " $pv"
log "$DD if=$pv \"of=$dir/metadata/$name\" bs=512 count=$pe_start 2>> \"$log\""
done
fi
if (( $udev )); then
myecho "Gathering udev info..."
udev_dir="$dir/udev"
log "$MKDIR -p \"$udev_dir\""
log "$UDEVADM info --version >> \"$udev_dir/version\" 2>> \"$log\""
log "$UDEVADM info --export-db >> \"$udev_dir/db\" 2>> \"$log\""
log "$CP -a /etc/udev/udev.conf \"$udev_dir/conf\" 2>> \"$log\""
log "$LS -la /lib/udev >> \"$udev_dir/lib_dir\" 2>> \"$log\""
log "$CP -aR /etc/udev/rules.d \"$udev_dir/rules_etc\" 2>> \"$log\""
log "$CP -aR /lib/udev/rules.d \"$udev_dir/rules_lib\" 2>> \"$log\""
fi
if test -z "$userdir"; then
lvm_dump="$dirbase.tgz"
myecho "Creating report tarball in $HOME/$lvm_dump..."
fi
warnings
if test -z "$userdir"; then
cd "$HOME"
"$TAR" czf "$lvm_dump" "$dirbase" 2>/dev/null
"$RM" -rf "$dir"
fi
exit 0
|
andyvand/cyglvm2
|
scripts/lvmdump.sh
|
Shell
|
gpl-2.0
| 6,590 |
#!/bin/bash
set -x
if [ $# -lt 2 ]; then
echo -e $0 "<site_name> <site_name_log>"
exit 1;
fi
NEW_SITE=$1
NEW_SITE_LONG=$2
SETTING_DIR="gestionale"
DEFAULT="$SETTING_DIR/default"
NEW=$SETTING_DIR/$NEW_SITE
CURRENT_DIR=`pwd`
DB_GEN_PASS=`tr -dc A-Za-z0-9_ < /dev/urandom | head -c 16 | xargs`
cp -r $DEFAULT $NEW
sed -i "s/DEFAULT/$NEW_SITE/g" $NEW/local_settings.py
sed -i "s/DEFAULT_LONG/$NEW_SITE_LONG/g" $NEW/local_settings.py
sed -i "s/DB_NAME/${NEW_SITE}_gestionale/g" $NEW/local_settings.py
sed -i "s/DB_USER/$NEW_SITE/g" $NEW/local_settings.py
sed -i "s/DB_PASS/$DB_GEN_PASS/g" $NEW/local_settings.py
sed -i "s,PWD,$CURRENT_DIR,g" $NEW/local_settings.py
sed -i "s/DEFAULT/$NEW_SITE/g" $NEW/wsgi.py
mkdir "main/data/$NEW_SITE"
mkdir "main/templates/$NEW_SITE"
mkdir "main/static/$NEW_SITE"
ln -s /home/asterix/venv/lib/python2.7/site-packages/Django-1.4-py2.7.egg/django/contrib/admin/static/admin/ main/static/$NEW_SITE/admin
ln -s ../common main/static/$NEW_SITE/common
mkdir "log/$NEW_SITE"
mkdir "html/$NEW_SITE"
touch "html/$NEW_SITE/favicon.ico"
cat <<EOF >> gestionale/__init__.py
if os.environ["DJANGO_SETTINGS_MODULE"] == "gestionale.${NEW_SITE}.settings":
from ${NEW_SITE} import local_settings
from ${NEW_SITE} import settings
from ${NEW_SITE}.local_settings import local_env
EOF
|
asterix24/GestionaleCaldaie
|
new_gestionale_site.sh
|
Shell
|
gpl-2.0
| 1,318 |
#!/bin/bash
# https://github.com/titouwan/CIS
VERB=0
score=0
function inv {
if [ $1 -ge 1 ]
then
echo "0"
else
echo "1"
fi
}
function out {
if [ -z $1 ]
then
echo "Cannot go further"
exit 99
else
if [ $1 -eq 0 ]
then
RESULT="OK"
else
RESULT="NOK"
let score=$score+1
fi
if [ "x$2" == "x" ]
then
NUM="0.0.0"
TIT="Something"
else
shift
NUM=$1
shift
TIT=$@
fi
fi
if [ "$RESULT" == "NOK" ] || [ "$VERB" == 1 ]
then
printf "%-10.9s %-90.89s %-10.9s \n" "$NUM" "$TIT" "$RESULT"
fi
}
case $1 in
"-v") VERB=1
;;
esac
# Make sure only root can run our script
if [[ $EUID -ne 0 ]]
then
echo "This script must be run as root" 1>&2
exit 1
fi
# Make sure that we are on RHEL6 OS
if [ ! -f /etc/redhat-release ]
then
echo "This script must be run on RHEL"
exit 2
else
grep Santiago /etc/redhat-release > /dev/null
if [ $? -ne 0 ]
then
echo "This script must be run on version 6 of RHEL"
exit 2
fi
fi
### 1 - Install Updates, Patches and Additional Security Software ###
echo "### 1 - Install Updates, Patches and Additional Security Software"
## 1.1 - Filesystem Configuration ##
INT='1.1.1 Verify that there is a /tmp file partition in the /etc/fstab file'
grep "[[:space:]]/tmp[[:space:]]" /etc/fstab > /dev/null
out $? $INT
INT='1.1.2 Set nodev option for /tmp Partition'
grep /tmp /etc/fstab | grep nodev > /dev/null
out $? $INT
INT='1.1.3 Set nosuid option for /tmp Partition'
grep /tmp /etc/fstab | grep nosuid > /dev/null
out $? $INT
INT='1.1.4 Set noexec option for /tmp Partition'
grep /tmp /etc/fstab | grep noexec > /dev/null
out $? $INT
INT='1.1.5 Verify that there is a /var file partition in the /etc/fstab file'
grep "[[:space:]]/var[[:space:]]" /etc/fstab > /dev/null
out $? $INT
INT='1.1.6 Bind Mount the /var/tmp directory to /tmp'
#grep -e "^/tmp" /etc/fstab | grep /var/tmp > /dev/null
#grep -e "^/tmp" /etc/fstab | grep /var/tmp
grep "/tmp[[:space:]]*/var/tmp[[:space:]]*none[[:space:]]*bind" /etc/fstab > /dev/null
out $? $INT
INT='1.1.7 Create Separate Partition for /var/log'
grep /var/log /etc/fstab > /dev/null
out $? $INT
INT='1.1.8 Create Separate Partition for /var/log/audit'
grep /var/log/audit /etc/fstab > /dev/null
out $? $INT
INT='1.1.9 Create Separate Partition for /home'
grep /home /etc/fstab > /dev/null
out $? $INT
INT='1.1.10 Add nodev Option to /home'
grep /home /etc/fstab | grep nodev > /dev/null
out $? $INT
INT='1.1.11 Add nodev Option to Removable Media Partitions'
err=0
for i in `grep -v -e "^#" -e mapper -e /var* -e /boot -e proc -e sysfs -e devpts -e tmpfs -e "^$" /etc/fstab |awk '{print $1}'`
do
grep $i /etc/fstab|grep nodev > /dev/null
let err=$err+$?
done
out $err $INT
INT='1.1.12 Add noexec Option to Removable Media Partitions'
err=0
for i in `grep -v -e "^#" -e mapper -e /var* -e /boot -e proc -e sysfs -e devpts -e tmpfs -e "^$" /etc/fstab |awk '{print $1}'`
do
grep $i /etc/fstab|grep noexec > /dev/null
let err=$err+$?
done
out $err $INT
INT='1.1.13 Add nosuid Option to Removable Media Partitions'
err=0
for i in `grep -v -e "^#" -e mapper -e /var* -e /boot -e proc -e sysfs -e devpts -e tmpfs -e "^$" /etc/fstab |awk '{print $1}'`
do
grep $i /etc/fstab|grep nosuid > /dev/null
let err=$err+$?
done
out $err $INT
INT='1.1.14 Add nodev Option to /dev/shm Partition'
grep /dev/shm /etc/fstab | grep nodev > /dev/null
out $? $INT
INT='1.1.15 Add nosuid Option to /dev/shm Partition'
grep /dev/shm /etc/fstab | grep nosuid > /dev/null
out $? $INT
INT='1.1.16 Add noexec Option to /dev/shm Partition'
grep /dev/shm /etc/fstab | grep noexec > /dev/null
out $? $INT
INT='1.1.17 Set Sticky Bit on All World-Writable Directories'
lin=`df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) |wc -l 2>/dev/null`
out $? $INT
INT='1.1.18 Disable Mounting of cramfs Filesystems'
/sbin/lsmod | grep cramfs > /dev/null
out $(inv $?) $INT
INT='1.1.19 Disable Mounting of freevxfs Filesystems'
/sbin/lsmod | grep freexvfs > /dev/null
out $(inv $?) $INT
INT='1.1.20 Disable Mounting of jffs2 Filesystems'
/sbin/lsmod | grep jffs2 > /dev/null
out $(inv $?) $INT
INT='1.1.21 Disable Mounting of hfs Filesystems'
/sbin/lsmod | grep hfs > /dev/null
out $(inv $?) $INT
INT='1.1.22 Disable Mounting of hfsplus Filesystems'
/sbin/lsmod | grep hfsplus > /dev/null
out $(inv $?) $INT
INT='1.1.23 Disable Mounting of squashfs Filesystems'
/sbin/lsmod | grep squashfs > /dev/null
out $(inv $?) $INT
INT='1.1.24 Disable Mounting of udf Filesystems'
/sbin/lsmod | grep udf > /dev/null
out $(inv $?) $INT
INT='1.1.25 Verify that there is a /opt file partition in the /etc/fstab file'
grep "[[:space:]]/opt[[:space:]]" /etc/fstab > /dev/null
out $? $INT
INT='1.1.26 Set nodev option for /opt Partition'
grep /opt /etc/fstab | grep nodev > /dev/null
out $? $INT
INT='1.1.27 Set nosuid option for /opt Partition'
grep /opt /etc/fstab | grep nosuid > /dev/null
out $? $INT
## 1.2 - Configure Software Updates ##
#echo " 1.2 - Configure Software Updates"
INT='1.2.1 Configure Connection to the RHN RPM Repositories'
/usr/bin/yum check-update|grep "not registered" > /dev/null
out $(inv $?) $INT
INT='1.2.2 Verify Red Hat GPG Key is Installed'
/bin/rpm -q --queryformat "%{SUMMARY}\n" gpg-pubkey > /dev/null
out $? $INT
INT='1.2.3 Verify that gpgcheck is Globally Activated'
x=0
y=0
for i in /etc/yum.conf /etc/yum.repos.d/*
do
for j in `grep "\[*\]" $i`
do
let x=$x+1
done
for k in `grep gpgcheck[[:space:]]*=[[:space:]]*1 $i`
do
let y=$y+1
done
done
if [ $x -gt $y ]
then
out "1" $INT
else
out "0" $INT
fi
INT='1.2.4 Disable the rhnsd Daemon'
/sbin/chkconfig --list rhnsd|grep "on" > /dev/null
out $(inv $?) $INT
INT='1.2.5 Obtain Software Package Updates with yum'
ret=`/usr/bin/yum check-update |wc -l`
out $ret $INT
INT='1.2.6 Verify Package Integrity Using RPM'
/bin/rpm -qVa 2>&1| awk '$2 != "c" { print $0}' | grep "Unsatisfied dependencies"
out $(inv $?) $INT
INT='1.2.7 Is the system up-to-date ?'
nb=`/usr/bin/yum check-update |grep -v -e "Loaded plugins" -e "This system" -e "^$"|wc -l`
out $nb $INT
INT='1.2.8 Only RedHat Repositories are used'
x=0
for i in `grep baseurl /etc/yum.conf /etc/yum.repos.d/*|awk -F= '{print $2}'`
do
if [[ ! $i =~ ftp.redhat.com ]]
then
let x=$x+1
fi
done
out $x $INT
## 1.3 Advanced Intrusion Detection Environment (AIDE) ##
#echo " 1.3 Advanced Intrusion Detection Environment (AIDE)"
INT='1.3.1 Install AIDE'
/bin/rpm -q aide > /dev/null
out $? $INT
INT='1.3.2 Implement Periodic Execution of File Integrity'
/usr/bin/crontab -u root -l 2>/dev/null| grep aide > /dev/null
out $? $INT
## 1.4 Configure SELinux ##
#echo " 1.4 Configure SELinux"
INT='1.4.1 Enable SELinux in /etc/grub.conf'
grep selinux=0 /etc/grub.conf > /dev/null
let x=$(inv $?)
grep enforcing=0 /etc/grub.conf > /dev/null
let x=$x+$(inv $?)
out $x $INT
INT='1.4.2 Set the SELinux State'
grep "SELINUX[[:space:]]*=[[:space:]]*enforcing" /etc/selinux/config > /dev/null
out $? $INT
INT='1.4.3 Set the SELinux Policy'
grep SELINUXTYPE[[:space:]]*=[[:space:]]*targeted /etc/selinux/config > /dev/null
out $? $INT
INT='1.4.4 Remove SETroubleshoot'
/bin/rpm -q setroubleshoot > /dev/null
out $(inv $?) $INT
INT='1.4.5 Remove MCS Translation Service (mcstrans)'
/bin/rpm -q mcstrans > /dev/null
out $(inv $?) $INT
INT='1.4.6 Check for Unconfined Daemons'
ps -eZ | grep -v "tr|ps|egrep|bash|awk"|grep initrc > /dev/null
out $(inv $?) $INT
## 1.5 Secure Boot Settings
#echo " 1.5 Secure Boot Settings"
INT='1.5.1 Set User/Group Owner on /etc/grub.conf'
stat -L -c "%u %g" /etc/grub.conf | egrep "0 0" > /dev/null
out $? $INT
INT='1.5.2 Set Permissions on /etc/grub.conf'
stat -L -c "%a" /etc/grub.conf | egrep ".00" > /dev/null
out $? $INT
INT='1.5.3 Set Boot Loader Password'
grep "^password" /etc/grub.conf > /dev/null
out $? $INT
INT='1.5.4 Require Authentication for Single-User Mode'
grep "SINGLE[[:space:]]*=[[:space:]]*[a-zA-Z/]*sulogin" /etc/sysconfig/init > /dev/null
out $x $INT
INT='1.5.5 Disable Interactive Boot'
grep "PROMPT[[:space:]]*=[[:space:]]*no" /etc/sysconfig/init > /dev/null
out $x $INT
## 1.6 Additional Process Hardening ##
#echo " 1.6 Additional Process Hardening"
INT='1.6.1 Restrict Core Dumps'
grep "hard[[:space:]]*core[[:space:]]*0" /etc/security/limits.conf > /dev/null
let x=$?
let x=$x+`sysctl fs.suid_dumpable|awk '{print $3}'`
out $x $INT
INT='1.6.2 Configure ExecShield'
x=`sysctl kernel.exec-shield|awk '{print $3}'`
out $(inv $x) $INT
INT='1.6.3 Enable Randomized Virtual Memory Region Placement'
x=`sysctl kernel.randomize_va_space|awk '{print $3}'`
out $(inv $x) $INT
INT='1.6.4 Use the Latest OS Release'
LATEST="6.4"
x=`cat /etc/redhat-release |awk '{print $7}'`
if [ "$LATEST" == "$x" ]
then
out "0" $INT
else
out "1" $INT
fi
### 2 - OS Services ###
echo "### 2 - OS Services"
## 2.1 Remove Legacy Services ##
#echo " 2.1 Remove Legacy Services"
INT='2.1.1 Remove telnet-server'
/bin/rpm -q telnet-server > /dev/null
out $(inv $?) $INT
INT='2.1.2 Remove telnet Clients'
/bin/rpm -q telnet > /dev/null
out $(inv $?) $INT
INT='2.1.3 Remove rsh-server'
/bin/rpm -q rsh-server > /dev/null
out $(inv $?) $INT
INT='2.1.4 Remove rsh'
/bin/rpm -q rsh > /dev/null
out $(inv $?) $INT
INT='2.1.5 Remove NIS Client'
/bin/rpm -q ypbind > /dev/null
out $(inv $?) $INT
INT='2.1.6 Remove NIS Server'
/bin/rpm -q ypserv > /dev/null
out $(inv $?) $INT
INT='2.1.7 Remove tftp'
/bin/rpm -q tftp > /dev/null
out $(inv $?) $INT
INT='2.1.8 Remove tftp-server'
/bin/rpm -q tftp-server > /dev/null
out $(inv $?) $INT
INT='2.1.9 Remove talk'
/bin/rpm -q talk > /dev/null
out $(inv $?) $INT
INT='2.1.10 Remove talk-server'
/bin/rpm -q talk-server > /dev/null
out $(inv $?) $INT
INT='2.1.11 Remove xinetd'
/bin/rpm -q xinetd > /dev/null
out $(inv $?) $INT
INT='2.1.12 Disable chargen-dgram'
/sbin/chkconfig --list chargen-dgram 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.13 Disable chargen-stream'
/sbin/chkconfig --list chargen-stream 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.14 Disable daytime-dgram'
/sbin/chkconfig --list daytime-dgram 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.15 Disable daytime-stream'
/sbin/chkconfig --list daytime-stream 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.16 Disable echo-dgram'
/sbin/chkconfig --list echo-dgram 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.17 Disable echo-stream'
/sbin/chkconfig --list echo-stream 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='2.1.18 Disable tcpmux-server'
/sbin/chkconfig --list tcpmux-server 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
### 3 - Special Purpose Services ###
echo "### 3 - Special Purpose Services"
INT='3.1 Set Daemon umask'
grep "umask[[:space:]]*027" /etc/sysconfig/init > /dev/null
out $? $INT
INT='3.2 Remove X Windows'
/bin/rpm -q xorg-x11-server > /dev/null
out $(inv $?) $INT
INT='3.3 Disable Avahi Server'
/sbin/chkconfig --list avahi-daemon 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='3.4 Disable Print Server - CUPS'
/sbin/chkconfig --list cups 2>/dev/null|grep "on" > /dev/null
out $(inv $?) $INT
INT='3.5 Remove DHCP Server'
/bin/rpm -q dhcp > /dev/null
out $(inv $?) $INT
INT='3.6 Configure Network Time Protocol (NTP)'
grep -e "restrict" -e "default" /etc/ntp.conf > /dev/null 2>&1
out $? $INT
INT='3.7 Remove LDAP'
/bin/rpm -q openldap-servers > /dev/null
x=$?
/bin/rpm -q openldap-clients > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.8 Disable NFS and RPC'
/sbin/chkconfig --list nfslock 2>/dev/null|grep "on" > /dev/null
x=$?
/sbin/chkconfig --list rpcgssd 2>/dev/null|grep "on" > /dev/null
let x=$x+$?
/sbin/chkconfig --list rpcbind 2>/dev/null|grep "on" > /dev/null
let x=$x+$?
/sbin/chkconfig --list rpcidmapd 2>/dev/null|grep "on" > /dev/null
let x=$x+$?
/sbin/chkconfig --list rpcsvcgssd 2>/dev/null|grep "on" > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.9 Remove DNS Server'
/bin/rpm -q bind > /dev/null
out $(inv $?) $INT
INT='3.10 Remove FTP Server'
/bin/rpm -q vsftpd > /dev/null
x=$?
/bin/rpm -q proftpd > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.11 Remove HTTP Server'
/bin/rpm -q httpd > /dev/null
x=$?
/bin/rpm -q nginx > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.12 Remove Dovecot'
/bin/rpm -q dovecot > /dev/null
out $(inv $?) $INT
INT='3.13 Remove Samba'
/bin/rpm -q samba > /dev/null
x=$?
/bin/rpm -q samba4 > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.14 Remove HTTP Proxy Server'
/bin/rpm -q squid > /dev/null
x=$?
/bin/rpm -q tinyproxy > /dev/null
let x=$x+$?
out $(inv $x) $INT
INT='3.15 Remove SNMP Server'
/bin/rpm -q net-snmp > /dev/null
out $(inv $x) $INT
INT='3.16 Configure Mail Transfer Agent for Local-Only Mode'
grep "^inet_interfaces[[:space:]]*=[[:space:]]*localhost" /etc/postfix/main.cf > /dev/null
out $? $INT
INT='3.17 Set default Runlevel to 3'
rl=`runlevel|awk '{print $2}'`
if [ $rl -eq 3 ]
then
out "0" $INT
else
out "1" $INT
fi
### 4 - Network Configuration and Firewalls ###
echo "### 4 - Network Configuration and Firewalls"
## 4.1 - Modify Network Parameters (Host Only) ##
#echo " 4.1 - Modify Network Parameters (Host Only)"
INT='4.1.1 Disable IP Forwarding'
x=`/sbin/sysctl net.ipv4.ip_forward|awk '{print $3}'`
out $x $INT
INT='4.1.2 Disable Send Packet Redirects'
x=`/sbin/sysctl net.ipv4.conf.all.send_redirects|awk '{print $3}'`
let x=$x+`/sbin/sysctl net.ipv4.conf.default.send_redirects|awk '{print $3}'`
out $x $INT
## 4.2 - Modify Network Parameters (Host and Router) ##
#echo " 4.2 - Modify Network Parameters (Host and Router)"
INT='4.2.1 Disable Source Routed Packet Acceptance'
x=`/sbin/sysctl net.ipv4.conf.all.accept_source_route|awk '{print $3}'`
let x=$x+`/sbin/sysctl net.ipv4.conf.default.accept_source_route|awk '{print $3}'`
out $x $INT
INT='4.2.2 Disable ICMP Redirect Acceptance'
x=`/sbin/sysctl net.ipv4.conf.all.accept_redirects|awk '{print $3}'`
let x=$x+`/sbin/sysctl net.ipv4.conf.default.accept_redirects|awk '{print $3}'`
out $x $INT
INT='4.2.3 isable Secure ICMP Redirect Acceptance'
x=`/sbin/sysctl net.ipv4.conf.all.secure_redirects|awk '{print $3}'`
let x=$x+`/sbin/sysctl net.ipv4.conf.default.secure_redirects|awk '{print $3}'`
out $x $INT
INT='4.2.4 Log Suspicious Packets'
x=$(inv `/sbin/sysctl net.ipv4.conf.all.log_martians|awk '{print $3}'`)
let x=$x+$(inv `/sbin/sysctl net.ipv4.conf.default.log_martians|awk '{print $3}'`)
out $x $INT
INT='4.2.5 Enable Ignore Broadcast Requests'
x=`/sbin/sysctl net.ipv4.icmp_echo_ignore_broadcasts|awk '{print $3}'`
out $(inv $x) $INT
INT='4.2.6 Enable Bad Error Message Protection'
x=`/sbin/sysctl net.ipv4.icmp_ignore_bogus_error_responses|awk '{print $3}'`
out $(inv $x) $INT
INT='4.2.7 Enable RFC-recommended Source Route Validation'
x=$(inv `/sbin/sysctl net.ipv4.conf.all.rp_filter|awk '{print $3}'`)
let x=$x+$(inv `/sbin/sysctl net.ipv4.conf.default.rp_filter|awk '{print $3}'`)
out $x $INT
INT='4.2.8 Enable TCP SYN Cookies'
x=`/sbin/sysctl net.ipv4.tcp_syncookies|awk '{print $3}'`
out $(inv $x) $INT
## 4.3 - Wireless Networking ##
#echo " 4.3 - Wireless Networking"
INT='4.3.1 Deactivate Wireless Interfaces'
ifconfig |grep wlan
out $(inv $?) $INT
## 4.4 - Disable IPv6 ##
#echo " 4.4 - Disable IPv6"
# 4.4.1 Configure IPv6
INT='4.4.1.1 Disable IPv6 Router Advertisements'
ret=`/sbin/sysctl -e net.ipv6.conf.all.accept_ra|awk '{print $3}'`
x=${ret:-0}
ret=`/sbin/sysctl -e net.ipv6.conf.default.accept_ra|awk '{print $3}'`
y=${ret:-0}
let x=$x+$y
out $x $INT
INT='4.4.1.2 Disable IPv6 Redirect Acceptance'
ret=`/sbin/sysctl net.ipv6.conf.all.accept_redirect 2>/dev/null|awk '{print $3}'`
x=${ret:-0}
ret=`/sbin/sysctl net.ipv6.conf.default.accept_redirect 2>/dev/null|awk '{print $3}'`
y=${ret:-0}
let x=$x+$y
out $x $INT
INT='4.4.2 Disable IPv6'
grep "NETWORKING_IPV6[[:space:]]*=[[:space:]]*no" /etc/sysconfig/network > /dev/null
x=$?
grep "IPV6INIT[[:space:]]*=[[:space:]]*no" /etc/sysconfig/network > /dev/null
let x=$x+$?
grep "ipv6[[:space:]]*disable[[:space:]]*=[[:space:]]*1" /etc/modprobe.d/*.conf > /dev/null
let x=$x+$?
out $x $INT
## 4.5 - Install TCP Wrappers ##
#echo " 4.5 - Install TCP Wrappers"
INT='4.5.1 Install TCP Wrappers'
/bin/rpm -q tcp_wrappers > /dev/null
out $? $INT
INT='4.5.2 Create /etc/hosts.allow'
if [ -f /etc/hosts.allow ]
then
grep "^ALL[[:space:]]*:[[:space:]]*[0-9.]*\/[0-9.]*" /etc/hosts.allow > /dev/null
out $? $INT
else
out "1" $INT
fi
INT='4.5.3 Verify Permissions on /etc/hosts.allow'
stat -L -c "%a" /etc/hosts.allow|grep 644 > /dev/null
out $? $INT
INT='4.5.4 Create /etc/hosts.deny'
if [ -f /etc/hosts.deny ]
then
grep "^ALL[[:space:]]*:[[:space:]]*ALL" /etc/hosts.deny > /dev/null
out $? $INT
else
out "1" $INT
fi
INT='4.5.5 Verify Permissions on /etc/hosts.deny'
stat -L -c "%a" /etc/hosts.deny|grep 644 > /dev/null
out $? $INT
## 4.6 - Uncommon Network Protocols ##
#echo " 4.6 - Uncommon Network Protocols"
INT='4.6.1 Disable DCCP'
grep "install[[:space:]]*dccp[[:space:]]/bin/true" /etc/modprobe.d/*.conf > /dev/null
out $? $INT
INT='4.6.2 Disable SCTP'
grep "install[[:space:]]*sctp[[:space:]]/bin/true" /etc/modprobe.d/*.conf > /dev/null
out $? $INT
INT='4.6.3 Disable RDS'
grep "install[[:space:]]*rds[[:space:]]/bin/true" /etc/modprobe.d/*.conf > /dev/null
out $? $INT
INT='4.6.4 Disable TIPC'
grep "install[[:space:]]*tipc[[:space:]]/bin/true" /etc/modprobe.d/*.conf > /dev/null
out $? $INT
## 4.6 - Configure IPtables
INT='4.7 Enable IPtables'
chkconfig --list iptables|grep "on" > /dev/null
out $? $INT
INT='4.8 Disable IP6tables'
chkconfig --list ip6tables|grep "on" > /dev/null
out $(inv $?) $INT
### 5 - Logging and Auditing ###
echo "### 5 - Logging and Auditing"
## 5.1 - Configure rsyslog ##
#echo " 5.1 - Configure rsyslog"
INT='5.1.1 Install the rsyslog package'
/bin/rpm -q rsyslog > /dev/null
out $? $INT
INT='5.1.2 Activate the rsyslog Service'
chkconfig --list rsyslog|grep "on" > /dev/null
out $? $INT
# 5.1.3 Configure /etc/rsyslog.conf
out "0" "5.1.3" "TODO"
#x=0
#for i in "auth user kern daemon syslog lpr news uucp local"
#do
#
#done
#do
# prt=grep $i /etc/rsyslog.conf > /dev/null
# if [ $? -eq 0 ]
# then
# if [[ "$prt" =~ "$i[\.\*]*[[:space:]]/var/log/[a-z]A-Z+" ]]
# else
# let x=$x+1
# fi
#done
INT='5.1.4 Create and Set Permissions on rsyslog Log Files'
files=`grep "IncludeConfig" /etc/rsyslog.conf|awk '{print $2}'`
list="/etc/rsyslog.conf"
for i in $files
do
if [ -f $i ]
then
list="$list $i"
fi
done
x=0
for i in $list
do
stat -L -c "%u %g" $i | egrep "0 0" > /dev/null
let x=$x+$?
stat -L -c "%a" $i|grep 600 > /dev/null
let x=$x+$?
done
out $x $INT
INT='5.1.5 Configure rsyslog to Send Logs to a Remote Log Host'
grep "^*.*[^I][^I]*@" /etc/rsyslog.conf > /dev/null
out $? $INT
INT="5.1.6 Don't Accept Remote rsyslog Messages Only on Designated Log Hosts"
grep '$ModLoad[[:space:]]*imtcp.so' /etc/rsyslog.conf > /dev/null
out $(inv $?) $INT
# 5.2 Configure System Accounting (auditd)
# 5.2.1 Configure Data Retention
INT='5.2.1.1 Configure Audit Log Storage Size'
grep max_log_file /etc/audit/auditd.conf > /dev/null
out $? $INT
INT='5.2.1.2 Disable System on Audit Log Full'
grep "space_left_action[[:space:]]*=[[:space:]]*email" /etc/audit/auditd.conf > /dev/null
x=$?
grep "action_mail_acct[[:space:]]*=[[:space:]]*root" /etc/audit/auditd.conf > /dev/null
let x=$x+$?
grep "admin_space_left_action[[:space:]]*=[[:space:]]*halt" /etc/audit/auditd.conf > /dev/null
let x=$x+$?
out $x $INT
INT='5.2.1.3 Keep All Auditing Information'
grep "max_log_file_action[[:space:]]*=[[:space:]]*keep_logs" /etc/audit/auditd.conf > /dev/null
out $? $INT
INT='5.2.2 Enable auditd Service'
chkconfig --list auditd|grep "on" > /dev/null
out $? $INT
INT='5.2.3 Enable Auditing for Processes That Start Prior to auditd'
nbk=`grep -v "^#" /etc/grub.conf |grep "kernel"|wc -l`
nba=`grep -v "^#" /etc/grub.conf |grep "kernel.*audit=1"|wc -l`
if [ $nbk -eq $nba ]
then
out "0" $INT
else
out "1" $INT
fi
# 5.2.4 Record Events That Modify Date and Time Information
out "0" "5.2.4" "TODO"
# 5.2.5 Record Events That Modify User/Group Information
out "0" "5.2.5" "TODO"
# 5.2.6 Record Events That Modify the System's Network Environment
out "0" "5.2.6" "TODO"
# 5.2.7 Record Events That Modify the System's Mandatory Access
out "0" "5.2.7" "TODO"
# 5.2.8 Collect Login and Logout Events
out "0" "5.2.8" "TODO"
# 5.2.9 Collect Session Initiation Information
out "0" "5.2.9" "TODO"
# 5.2.10 Collect Discretionary Access Control Permission Modification Events
out "0" "5.2.10" "TODO"
# 5.2.11 Collect Unsuccessful Unauthorized Access Attempts to Files
out "0" "5.2.11" "TODO"
# 5.2.12 Collect Use of Privileged Commands
out "0" "5.2.12" "TODO"
# 5.2.13 Collect Successful File System Mounts
out "0" "5.2.13" "TODO"
# 5.2.14 Collect File Deletion Events by User
out "0" "5.2.14" "TODO"
# 5.2.15 Collect Changes to System Administration Scope
out "0" "5.2.15" "TODO"
# 5.2.16 Collect System Administrator Actions
out "0" "5.2.16" "TODO"
# 5.2.17 Collect Kernel Module Loading and Unloading
out "0" "5.2.17" "TODO"
# 5.2.18 Make the Audit Configuration Immutable
out "0" "5.2.18" "TODO"
INT='5.3 Configure logrotate'
grep '{' /etc/logrotate.d/syslog > /dev/null
out $? $INT
echo "### 6 System Access, Authentication and Authorization"
# 6.1 Configure cron and anacron
INT='6.1.1 Enable anacron Daemon'
/bin/rpm -q cronie-anacron > /dev/null
out $? $INT
INT='6.1.2 Enable crond Daemon'
chkconfig --list crond |grep "on" > /dev/null
out $? $INT
INT='6.1.3 Set User/Group Owner and Permission on /etc/anacrontab'
stat -L -c "%a %u %g" /etc/anacrontab | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.4 Set User/Group Owner and Permission on /etc/crontab'
stat -L -c "%a %u %g" /etc/crontab | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.5 Set User/Group Owner and Permission on /etc/cron.hourly'
stat -L -c "%a %u %g" /etc/cron.hourly | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.6 Set User/Group Owner and Permission on /etc/cron.daily'
stat -L -c "%a %u %g" /etc/cron.daily | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.7 Set User/Group Owner and Permission on /etc/cron.weekly'
stat -L -c "%a %u %g" /etc/cron.weekly | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.8 Set User/Group Owner and Permission on /etc/cron.monthly'
stat -L -c "%a %u %g" /etc/cron.monthly | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.9 Set User/Group Owner and Permission on /etc/cron.d'
stat -L -c "%a %u %g" /etc/cron.d | egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.1.10 Restrict at Daemon'
stat -L /etc/at.deny > /dev/null 2>&1
x=$(inv $?)
stat -L -c "%a %u %g" /etc/at.allow 2>/dev/null| egrep ".00 0 0" > /dev/null 2>&1
let x=$x+$?
out $x $INT
INT='6.1.11 Restrict at/cron to Authorized Users'
x=0
if [ -f /etc/cron.allow ]
then
stat -L -c "%a %u %g" /etc/cron.allow 2>/dev/null| egrep ".00 0 0" > /dev/null
let x=$x+$?
else
if [ -f /etc/cron.deny ]
then
stat -L -c "%a %u %g" /etc/cron.deny 2>/dev/null| egrep ".00 0 0" > /dev/null
let x=$x+$?
else
let x=$x+1
fi
fi
if [ -f /etc/at.allow ]
then
stat -L -c "%a %u %g" /etc/at.allow 2>/dev/null| egrep ".00 0 0" > /dev/null 2>&1
let x=$x+$?
else
if [ -f /etc/at.deny ]
then
stat -L -c "%a %u %g" /etc/at.deny 2>/dev/null| egrep ".00 0 0" > /dev/null
let x=$x+$?
else
let x=$x+1
fi
fi
out $x $INT
# 6.2 Configure SSH
INT='6.2.1 Set SSH Protocol to 2'
grep "^Protocol[[:space:]]*2" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.2 Set SSH LogLevel to INFO'
grep "^LogLevel[[:space:]]*INFO" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.3 Set Permissions on /etc/ssh/sshd_config'
stat -L -c "%a %u %g" /etc/ssh/sshd_config 2>/dev/null| egrep ".00 0 0" > /dev/null
out $? $INT
INT='6.2.4 Disable SSH X11 Forwarding'
grep "^X11Forwarding[[:space:]]*no" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.5 Set SSH MaxAuthTries to 4 or Less'
grep "^MaxAuthTries[[:space:]]*4" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.6 Set SSH IgnoreRhosts to Yes'
grep "^IgnoreRhosts[[:space:]]*yes" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.7 Set SSH HostbasedAuthentication to No'
grep "^HostbasedAuthentication[[:space:]]*no" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.8 Disable SSH Root Login'
grep "^PermitRootLogin[[:space:]]*no" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.9 Set SSH PermitEmptyPasswords to No'
grep "^PermitEmptyPasswords[[:space:]]*no" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.10 Do Not Allow Users to Set Environment Options'
grep "^PermitUserEnvironment[[:space:]]*no" /etc/ssh/sshd_config > /dev/null
out $? $INT
INT='6.2.11 Use Only Approved Cipher in Counter Mode'
grep "^Ciphers" /etc/ssh/sshd_config|grep -e aes128-ctr -e aes192-ctr -e aes256-ctr > /dev/null
out $? $INT
INT='6.2.12 Set Idle Timeout Interval for User Login'
grep "^ClientAliveInterval[[:space:]]*300" /etc/ssh/sshd_config > /dev/null
x=$?
grep "^ClientAliveCountMax[[:space:]]*0" /etc/ssh/sshd_config > /dev/null
let x=$x+$?
out $x $INT
INT='6.2.13 Limit Access via SSH'
grep "^AllowUsers" /etc/ssh/sshd_config > /dev/null
if [ $? -eq 1 ]
then
grep "^AllowGroups" /etc/ssh/sshd_config > /dev/null
x=$?
fi
grep "^DenyUsers" /etc/ssh/sshd_config > /dev/null
if [ $? -eq 1 ]
then
grep "^DenyGroups" /etc/ssh/sshd_config > /dev/null
let x=$x+$?
fi
out $x $INT
INT='6.2.14 Set SSH Banner'
grep "^Banner" /etc/ssh/sshd_config > /dev/null
out $? $INT
#echo "# 6.3 Configure PAM"
INT='6.3.1 Upgrade Password Hashing Algorithm to SHA-512'
authconfig --test | grep hashing | grep sha512 > /dev/null
out $? $INT
INT='6.3.2 Set Password Creation Requirement Parameters Using pam_cracklib'
grep -v "^#" /etc/pam.d/system-auth| grep pam_cracklib.so |grep try_first_pass |grep "retry[[:space:]]*=[[:space:]]*3" |grep "minlen[[:space:]]*=[[:space:]]*8" |grep "dcredit[[:space:]]*=[[:space:]]*-1" |grep "ucredit[[:space:]]*=[[:space:]]*-1" |grep "ocredit[[:space:]]*=[[:space:]]*-1" |grep "lcredit[[:space:]]*=[[:space:]]*-1" > /dev/null
out $? $INT
INT='6.3.3 Set Lockout for Failed Password Attempts'
grep "^auth.*pam_faillock" /etc/pam.d/password-auth > /dev/null
x=$?
grep "^auth.*pam_unix.so" /etc/pam.d/password-auth | grep "success[[:space:]]*=[[:space:]]*1"
let x=$x+$?
grep "^auth.*pam_faillock" /etc/pam.d/system-auth > /dev/null
let x=$x+$?
grep "^auth.*pam_unix.so" /etc/pam.d/system-auth | grep "success[[:space:]]*=[[:space:]]*1"
let x=$x+$?
out $x $INT
INT='6.3.4 Limit Password Reuse'
grep "^password.*pam_unix" /etc/pam.d/system-auth| grep "remember[[:space:]]*=[[:space:]]*5" > /dev/null
out $? $INT
INT='6.4 Restrict root Login to System Console'
x=`grep -v -e "^console" -e "^vc" -e "^tty" /etc/securetty |wc -l`
out $x $INT
INT='6.5 Restrict Access to the su Command'
ret=`grep "^auth.*pam_wheel.so" /etc/pam.d/su`
x=$?
group="wheel"
[[ "$ret" =~ group=([^ .]*)( |$) ]] && group=${BASH_REMATCH[1]}
grep "^$group" /etc/group > /dev/null
let x=$x+$?
out $x $INT
INT='6.6 Restrict root Login for ftp'
#auth required /lib/security/pam_listfile.so item=user sense=deny file=/etc/ftpusers.deny onerr=succeed
x=0
for i in `find /etc/pam.d -name *ftp*`
do
grep pam_listfile.*sense=deny.*file= $i > /dev/null
let x=$x+$?
done
out $x $INT
INT='6.7 Restrict root Login from X'
x=0
for i in `find /etc/pam.d | grep -e kdm -e xdm -e gdm`
do
grep pam_listfile.*sense=deny.*file= $i > /dev/null
let x=$x+$?
done
out $x $INT
echo "### 7 User Accounts and Environment"
# 7.1 Set Shadow Password Suite Parameters (/etc/login.defs)
INT='7.1.1 Set Password Expiration Days'
grep "^PASS_MAX_DAYS[[:space:]]*90" /etc/login.defs > /dev/null
out $? $INT
INT='7.1.2 Set Password Change Minimum Number of Days'
grep "^PASS_MIN_DAYS[[:space:]]*7" /etc/login.defs > /dev/null
out $? $INT
INT='7.1.3 Set Password Expiring Warning Days'
grep "^PASS_WARN_AGE[[:space:]]*7" /etc/login.defs > /dev/null
out $? $INT
INT='7.2 Disable System Accounts'
ret=`egrep -v "^\+" /etc/passwd | awk -F: '($1!="root" && $1!="sync" && $1!="shutdown" && $1!="halt" && $3<500 && $7!="/sbin/nologin") {print}'|wc -l`
out $ret $INT
INT='7.3 Set Default Group for root Account'
ret=`grep "^root:" /etc/passwd | cut -f4 -d:`
out $ret $INT
INT='7.4 Set Default umask for Users'
grep "^umask[[:space:]]*077" /etc/bashrc > /dev/null
x=$?
grep "^umask[[:space:]]*077" /etc/profile > /dev/null
let x=$x+$?
out $x $INT
INT='7.5 Lock Inactive User Accounts'
ret=`useradd -D | grep "INACTIVE"`
[[ "$ret" =~ INACTIVE=([\-0-9]*)$ ]] && age=${BASH_REMATCH[1]}
if [ $age -le 35 ] && [ $age -gt 0 ]
then
out 0 $INT
else
out 1 $INT
fi
INT='7.6 Trap SIGHUP, SIGINT, SIGQUIT and SIGTERM for console'
grep trap.*1.*2.*3.*15 /etc/profile > /dev/null
out $? $INT
echo "### 8 Warning Banners"
INT='8.1 Set Warning Banner for Standard Login Services'
stat -L -c "%a %u %g" /etc/motd | egrep "644 0 0" > /dev/null
x=$?
stat -L -c "%a %u %g" /etc/issue | egrep "644 0 0" > /dev/null
let x=$x+$?
stat -L -c "%a %u %g" /etc/issue.net | egrep "644 0 0" > /dev/null
let x=$x+$?
out $x $INT
INT='8.2 Remove OS Information from Login Warning Banners'
egrep '(\\v|\\r|\\m|\\s)' /etc/issue > /dev/null
x=$(inv $?)
egrep '(\\v|\\r|\\m|\\s)' /etc/motd > /dev/null
let x=$x+$(inv $?)
egrep '(\\v|\\r|\\m|\\s)' /etc/issue.net > /dev/null
let x=$x+$(inv $?)
out $x $INT
INT='8.3 Set GNOME Warning Banner'
x=0
rpm -q gdm > /dev/null
if [ $? -eq 0 ]
then
ret=`gconftool-2 --get /apps/gdm/simple-greeter/banner_message_text`
[[ $ret =~ "No value set for" ]] && x=1
else
x=0
fi
out $x $INT
echo "### 9 System Maintenance"
#echo "9.1 Verify System File Permissions"
INT='9.1.1 Verify System File Permissions'
x=0
for i in `rpm -Va --nomtime --nosize --nomd5 --nolinkto|awk '{print $NF}'`
do
grep $i $0 >/dev/null
if [ $? -ne 0 ]
then
let x=$x+1
fi
done
out $x $INT
INT='9.1.2 Verify Permissions on /etc/passwd'
stat -L -c "%a" /etc/passwd | egrep "644" > /dev/null
out $? $INT
INT='9.1.3 Verify Permissions on /etc/shadow'
stat -L -c "%a" /etc/shadow | egrep "0" > /dev/null
out $? $INT
INT='9.1.4 Verify Permissions on /etc/gshadow'
stat -L -c "%a" /etc/gshadow | egrep "0" > /dev/null
out $? $INT
INT='9.1.5 Verify Permissions on /etc/group'
stat -L -c "%a" /etc/group | egrep "644" > /dev/null
out $? $INT
INT='9.1.6 Verify User/Group Ownership on /etc/passwd'
stat -L -c "%u %g" /etc/group | egrep "0 0" > /dev/null
out $? $INT
INT='9.1.7 Verify User/Group Ownership on /etc/shadow'
stat -L -c "%u %g" /etc/group | egrep "0 0" > /dev/null
out $? $INT
INT='9.1.8 Verify User/Group Ownership on /etc/gshadow'
stat -L -c "%u %g" /etc/group | egrep "0 0" > /dev/null
out $? $INT
INT='9.1.9 Verify User/Group Ownership on /etc/group'
stat -L -c "%u %g" /etc/group | egrep "0 0" > /dev/null
out $? $INT
INT='9.1.10 Find World Writable Files'
ret=`df --local -P|awk {'if (NR!=1) print $NF'} |xargs -I '{}' find '{}' -xdev -type f -perm -0002 2>/dev/null|wc -l`
out $ret $INT
INT='9.1.11 Find Un-owned Files and Directories'
ret=`df --local -P|awk {'if (NR!=1) print $NF'} |xargs -I '{}' find '{}' -xdev -nouser|wc -l`
out $ret $INT
INT='9.1.12 Find Un-grouped Files and Directories'
ret=`df --local -P|awk {'if (NR!=1) print $NF'} |xargs -I '{}' find '{}' -xdev -nogroup|wc -l`
out $ret $INT
INT='9.1.13 Find SUID System Executables'
ret=`df --local -P|awk {'if (NR!=1) print $NF'} |xargs -I '{}' find '{}' -xdev -type f -perm -4000 2>/dev/null|wc -l`
out $ret $INT
INT='9.1.14 Find SGID System Executables'
ret=`df --local -P|awk {'if (NR!=1) print $NF'} |xargs -I '{}' find '{}' -xdev -type f -perm -2000 2>/dev/null|wc -l`
out $ret $INT
#echo "9.2 Review User and Group Settings"
INT='9.2.1 Ensure Password Fields are Not Empty'
ret=`/bin/awk -F: '($2 == "" ) { print $1 }' /etc/shadow |wc -l`
out $ret $INT
INT='9.2.2 Verify No Legacy "+" Entries Exist in /etc/passwd File '
/bin/grep '^+:' /etc/passwd
out $(inv $?) $INT
INT='9.2.3 Verify No Legacy "+" Entries Exist in /etc/shadow File'
/bin/grep '^+:' /etc/shadow
out $(inv $?) $INT
INT='9.2.4 Verify No Legacy "+" Entries Exist in /etc/group File'
/bin/grep '^+:' /etc/group
out $(inv $?) $INT
INT='9.2.5 Verify No UID 0 Accounts Exist Other Than root'
ret=`/bin/awk -F: '($3 == 0) { print $1 }' /etc/passwd |grep -v root |wc -l`
out $ret $INT
#INT='9.2.6 Ensure root PATH Integrity'
INT='9.2.6.1 root PATH does not contain any empty directory'
echo $PATH |grep "::" > /dev/null
out $(inv $?) $INT
INT='9.2.6.2 root PATH does not contain trailing ":"'
echo $PATH |grep ":$" > /dev/null
out $(inv $?) $INT
INT='9.2.6.3 root PATH contains current directory'
echo $PATH |grep -P "(^|:)\.(:|$)" > /dev/null
out $(inv $?) $INT
INT='9.2.6.4 Permissions of root PATH directories'
p=`echo $PATH | /bin/sed -e 's/::/:/' -e 's/:$//' -e 's/:/ /g'`
set -- $p
x=0
for i in $p
do
if [ -d $i ]; then
dirperm=`/bin/ls -ldH $1 | /bin/cut -f1 -d" "`
if [ `echo $dirperm | /bin/cut -c6 ` != "-" ]; then
let x=$x+1
fi
if [ `echo $dirperm | /bin/cut -c9 ` != "-" ]; then
let x=$x+1
fi
if [ "`stat -L -c "%u" $i`" != "0" ] ; then
let x=$x+1
fi
else
let x=$x+1
fi
done
out $x $INT
INT='9.2.7 Check Permissions on User Home Directories'
x=0
for dir in `egrep -v '(root|halt|sync|shutdown)' /etc/passwd |awk -F: '($7 != "/sbin/nologin") { print $6 }'`
do
dirperm=`/bin/ls -ld $dir | /bin/cut -f1 -d" "`
if [ `echo $dirperm | /bin/cut -c6 ` != "-" ]; then
let x=$x+1
fi
if [ `echo $dirperm | /bin/cut -c8 ` != "-" ]; then
let x=$x+1
fi
if [ `echo $dirperm | /bin/cut -c9 ` != "-" ]; then
let x=$x+1
fi
if [ `echo $dirperm | /bin/cut -c10 ` != "-" ]; then
let x=$x+1
fi
done
out $x $INT
INT='9.2.8 Check User Dot File Permissions'
x=0
for dir in `egrep -v '(root|halt|sync|shutdown)' /etc/passwd |awk -F: '($7 != "/sbin/nologin") { print $6 }'`
do
for file in $dir/.[A-Za-z0-9]*
do
if [ ! -h "$file" -a -f "$file" ]
then
fileperm=`/bin/ls -ld $file | /bin/cut -f1 -d" "`
if [ `echo $fileperm | /bin/cut -c6 ` != "-" ]
then
let x=$x+1
fi
if [ `echo $fileperm | /bin/cut -c9 ` != "-" ]
then
let x=$x+1
fi
fi
done
done
out $x $INT
INT='9.2.9 Check Permissions on User .netrc'
x=0
for file in `egrep -v '(root|sync|halt|shutdown)' /etc/passwd |awk -F: '($7 != "/sbin/nologin") { print $6 "/.netrc" }'`
do
if [ -f $file ]; then
stat -L -c "%a" $file|grep ".00" > /dev/null
let x=$x+$?
fi
done
out $x $INT
INT='9.2.10 Check for Presence of User .rhosts Files'
x=0
for file in `egrep -v '(root|sync|halt|shutdown)' /etc/passwd |awk -F: '($7 != "/sbin/nologin") { print $6 "/.rhosts" }'`
do
if [ ! -h "$file" -a -f "$file" ]; then
let x=$x+1
fi
done
out $x $INT
INT='9.2.11 Check Groups in /etc/passwd'
x=0
for i in $(cut -s -d: -f4 /etc/passwd | sort -u )
do
grep -q -P "^.*?:x:$i:" /etc/group
if [ $? -ne 0 ]
then
let x=$x+1
fi
done
out $x $INT
INT='9.2.12 Check That Users Are Assigned Valid Home Directories'
x=0
awk -F: '{ print $1 " " $3 " " $6 }' /etc/passwd| while read user uid dir; do
if [ $uid -ge 500 -a ! -d "$dir" -a $user != "nfsnobody" ]; then
let x=$x+1
fi
done
out $x $INT
INT='9.2.13 Check User Home Directory Ownership'
x=0
awk -F: '{ print $1 " " $3 " " $6 }' /etc/passwd| while read user uid dir; do
if [ $uid -ge 500 -a -d "$dir" -a $user != "nfsnobody" ]; then
owner=$(stat -L -c "%U" "$dir")
if [ "$owner" != "$user" ]; then
let x=$x+1
fi
fi
done
out $x $INT
INT='9.2.14 Check for Duplicate UIDs'
ret=`cut -f3 -d: /etc/passwd|sort|uniq -c|sed 's/^[ ]*//'|grep -v ^1|wc -l`
out $ret $INT
INT='9.2.15 Check for Duplicate GIDs'
ret=`cut -f3 -d: /etc/group|sort|uniq -c|sed 's/^[ ]*//'|grep -v ^1|wc -l`
out $ret $INT
INT='9.2.16 Check That Reserved UIDs Are Assigned to System Accounts '
x=0
defUsers="root bin daemon adm lp sync shutdown halt mail news uucp operator games gopher ftp nobody nscd vcsa rpc mailnull smmsp pcap ntp dbus avahi sshd rpcuser nfsnobody haldaemon avahi-autoipd distcache apache oprofile webalizer dovecot squid named xfs gdm sabayon usbmuxd rtkit abrt saslauth pulse postfix tcpdump"
for i in `/bin/awk -F: '($3 < 500) { print $1 }' /etc/passwd`
do
echo $defUsers|grep $i > /dev/null
let x=$x+$?
done
out $x $INT
INT='9.2.17 Check for Duplicate User Names'
ret=`cut -f1 -d: /etc/passwd|sort|uniq -c|sed 's/^[ ]*//'|grep -v ^1|wc -l`
out $ret $INT
INT='9.2.18 Check for Duplicate Group Names'
ret=`cut -f1 -d: /etc/group|sort|uniq -c|sed 's/^[ ]*//'|grep -v ^1|wc -l`
out $ret $INT
INT='9.2.19 Check for Presence of User .netrc Files'
x=0
for dir in `awk -F: '{ print $6 }' /etc/passwd`; do
if [ ! -h "$dir/.netrc" -a -f "$dir/.netrc" ]; then
let x=$x+1
fi
done
out $x $INT
INT='9.2.20 Check for Presence of User .forward Files'
x=0
for dir in `awk -F: '{ print $6 }' /etc/passwd`; do
if [ ! -h "$dir/.forward" -a -f "$dir/.iforward" ]; then
let x=$x+1
fi
done
out $x $INT
echo "### 10 OMT Specific"
INT='10.1 root account is only used from VLAN 200'
x=0
for i in `last|awk {'if ($1=="root") print $3}'|cut -f3 -d.`
do
if [ "$i" != "200" ]
then
let x=$x+1
fi
done
out $x $INT
INT='10.2 Check "From" statement in authorized_keys files'
users=`egrep -v '(root|halt|sync|shutdown)' /etc/passwd |awk -F: '($7 != "/sbin/nologin") { print $1 }'`
ret=`grep ^AuthorizedKeysFile /etc/ssh/sshd_config|awk '{print $2}'`
if [ $? -ne 0 ]
then
ret=".ssh/authorized_keys"
fi
if [[ $ret =~ (\%[a-z]) ]]
then
case ${BASH_REMATCH[1]} in
"%u") x=0
for user in $users
do
auth_file=`echo $ret| sed s/\%u/$user/`
if [ -f $auth_file ]
then
err=`grep "^from=" $auth_file`
if [ $? -ne 0 ]
then
let x=$x+1
fi
fi
done
;;
"%h") x=0
for user in $users
do
home1=`grep "^$user:" /etc/passwd|awk -F: '{print $6}'`
home=`echo $home1 |sed "s/\//\\\\\\\\\//g"`
auth_file=`echo $ret| sed s/\%h/$home/`
if [ -f $auth_file ]
then
err=`grep "^from=" $auth_file`
if [ $? -ne 0 ]
then
let x=$x+1
fi
fi
done
;;
*) exit 0
;;
esac
else
x=0
for user in $users
do
home=`grep "^$user:" /etc/passwd|awk -F: '{print $6}'`
if [ -f $home/.ssh/authorized_keys ]
then
ret=`grep "^from=" $home/.ssh/authorized_keys`
if [ $? -ne 0 ]
then
let x=$x+1
fi
fi
done
fi
out $x $INT
#echo "10.3 Verify SNMPD configuration"
INT='10.3.1 Verify that syslocation is provided'
grep -i "^syslocation" /etc/snmp/snmpd.conf > /dev/null
out $? $INT
INT='10.3.2 Verify that syscontact is provided'
grep -i "^syscontact" /etc/snmp/snmpd.conf > /dev/null
out $? $INT
#echo "10.4 Verify NFS Server configuration"
if [ -f /etc/exports ]
then
INT='10.4.1 Do not use "no_root_squash" NFS option'
grep "no_root_squash" /etc/exports > /dev/null
out $(inv $?) $INT
INT='10.4.2 Check for NFS syntax errors'
grep "[^0-9^a-z](" /etc/exports
out $(inv $?) $INT
fi
#echo "10.5 Verify NFS CLient configuration"
INT='10.5.1 Check for nosuid nfs client mount option in /etc/fstab'
x=0
for i in `grep nfs /etc/fstab`
do
echo $i| grep nosuid > /dev/null
let x=$x+$?
done
out $x $INT
INT='10.5.2 Check for nodev nfs client mount option in /etc/fstab'
x=0
for i in `grep nfs /etc/fstab`
do
echo $i| grep nodev > /dev/null
let x=$x+$?
done
out $x $INT
if [ -f /etc/auto.master ]
then
LIST=`grep -v "^#" /etc/auto.master|grep -v "^+"|awk '{print $2}'|grep -v "^-"`
if [ -d /etc/auto.master.d ]
then
LIST=`echo $LIST; find /etc/auto.master.d -maxdepth 1 -type f`
fi
INT='10.5.3 Check for nosuid nfs client mount option in autofs'
x=0
for i in $LIST
do
for j in `grep nfs $i`
do
echo $i| grep nosuid > /dev/null
let x=$x+$?
done
done
out $x $INT
INT='10.5.4 Check for nodev nfs client mount option in autofs'
x=0
for i in $LIST
do
for j in `grep nfs $i`
do
echo $i| grep nodev > /dev/null
let x=$x+$?
done
done
out $x $INT
fi
/bin/rpm -q httpd > /dev/null
if [ $? -eq 0 ]
then
LIST="/etc/httpd/conf/httpd.conf"
LIST=`echo $LIST; grep "^Include" /etc/httpd/conf/httpd.conf |awk '{print "/etc/httpd/"$2}'`
INT='10.6.1 Check ServerTokens Apache directive'
grep "^ServerTokens[[:space:]]*Prod" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.2 Check ServerSignature Apache directive'
grep "^ServerSignature[[:space:]]*Off" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.3 Disable UserDir'
grep "^[[:space:]]*UserDir[[:space:]]*disable" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.4 Check for permissive AllowOverride'
x=0
for i in $LIST
do
grep "^[[:space:]]*AllowOverride[[:space:]][^None]" $i > /dev/null
let x=$x+$(inv $?)
done
out $x $INT
INT='10.6.5 Apache server has its own user'
grep "^User[[:space:]][apache|www\-data|httpd]" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.6 Apache server has its own group'
grep "^Group[[:space:]][apache|www\-data|httpd]" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.7 No usage of +Indexes options'
for i in $LIST
do
grep "^[[:space:]]*Options[[:space:]]*[^-][+]*Indexes" $i > /dev/null
let x=$x+$(inv $?)
done
out $x $INT
INT='10.6.8 No usage of +Includes options'
for i in $LIST
do
grep "^[[:space:]]*Options[[:space:]]*[^-][+]*Includes" $i > /dev/null
let x=$x+$(inv $?)
done
out $x $INT
INT='10.6.9 No usage of +ExecCGI options'
for i in $LIST
do
grep "^[[:space:]]*Options[[:space:]]*[^-][+]*ExecCGI" $i > /dev/null
let x=$x+$(inv $?)
done
out $x $INT
INT='10.6.10 No usage of +FollowSymLinks options'
for i in $LIST
do
grep "^[[:space:]]*Options[[:space:]]*[^-][+]*FollowSymLinks" $i > /dev/null
let x=$x+$(inv $?)
done
out $x $INT
INT='10.6.11 Apache runs mod_security'
grep "^LoadModule.*mod_security2.so$" /etc/httpd/conf/httpd.conf > /dev/null
out $? $INT
INT='10.6.12 Timeout is under 300'
tmt=`grep "^Timeout[[:space:]]" /etc/httpd/conf/httpd.conf| awk '{print $2}'`
if [ $tmt -lt 300 ]
then
out 0 $INT
else
out 1 $INT
fi
INT='10.6.13 Do not load cgi module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep cgi_module >/dev/null
out $(inv $?) $INT
INT='10.6.14 Do not load userdir module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep userdir_module >/dev/null
out $(inv $?) $INT
INT='10.6.15 Do not load dav module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep dav_module >/dev/null
out $(inv $?) $INT
INT='10.6.16 Do not load dav_fs module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep dav_fs_module >/dev/null
out $(inv $?) $INT
INT='10.6.17 Do not load info module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep info_module >/dev/null
out $(inv $?) $INT
INT='10.6.18 Do not load autoindex module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep autoindex_module >/dev/null
out $(inv $?) $INT
INT='10.6.19 Do not load suexec module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep suexec_module >/dev/null
out $(inv $?) $INT
INT='10.6.20 Do not load mysql_auth module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep mysql_auth_module >/dev/null
out $(inv $?) $INT
INT='10.6.21 Do not load proxy_ftp module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep proxy_ftp_module >/dev/null
out $(inv $?) $INT
INT='10.6.22 Do not load autoindex module'
/usr/sbin/apachectl -t -D DUMP_MODULES 2>&1| grep autoindex_module >/dev/null
out $(inv $?) $INT
fi
INT='10.7.1 DROP as default iptables INPUT policy'
iptables -L INPUT| head -1| grep DROP >/dev/null
out $? $INT
INT='10.7.2 DROP as default iptables OUTPUT policy'
iptables -L OUTPUT| head -1| grep DROP >/dev/null
out $? $INT
INT='10.7.3 DROP as default iptables FORWARD policy'
iptables -L FORWARD| head -1| grep DROP >/dev/null
out $? $INT
INT='10.8.1 Check that screen is installed'
/bin/rpm -q screen > /dev/null
out $? $INT
INT='10.9.1 Check that AD auth is used'
/bin/rpm -q krb5-workstation > /dev/null
x=$?
/bin/rpm -q pam_krb5 > /dev/null
let x=$x+$?
out $x $INT
INT='10.9.2 OMT Domain comtroller is used'
if [ -f /etc/krb5.conf ]
then
grep "CORP.OMT.LCL" /etc/krb5.conf >/dev/null
x=$?
else
x=1
fi
out $x $INT
echo
echo " ==> Score: $score"
echo
|
titouwan/CIS
|
cis_bench.sh
|
Shell
|
gpl-2.0
| 45,378 |
#! /bin/bash
# 03/08/2014 -- A. Spiga
# install the generic model
# with gfortran on Linux env
# for teaching purposes
# updated 2021 J-B Madeleine
#######################
# On ciclad or climserv, load a recent gfortran compiler
hostname=`hostname`
case ${hostname:0:6} in
ciclad|camelo|merlin) module load gnu/7.2.0 ;
echo Loaded gnu/7.2.0 ;;
esac
#######################
version="1359"
version="1370"
version="HEAD"
version="2233" # updated 2019-2020
version="2484" # updated 2020-2021
#######################
usefcm=1
#######################
zedim="8x8x16"
#######################
zeoptall=" -d "${zedim}" -p std -arch gfortran_mod "
zeopt=${zeoptall}" -full -cpp NODYN -b 1x1 -t 3 -s 1 -io noioipsl "
#######################
useplanetoplot=0
#######################
ini=$PWD
mod=$ini/MODELES
net=$mod/LMDZ.COMMON/netcdf/gfortran_netcdf-4.0.1
log=$ini/install.log
\rm $log > /dev/null 2> /dev/null
touch $log
##
echo "1. communicate with server"
cd $ini
rm -rf MODELES
svn co -N http://svn.lmd.jussieu.fr/Planeto/trunk MODELES >> $log 2>&1
###
echo "2. get model code (please wait)"
cd $mod
svn update -r $version LMDZ.GENERIC LMDZ.COMMON >> $log 2>&1
###
echo "3. get and compile netCDF librairies (please wait)"
cd $ini
ze_netcdf=netcdf-4.0.1
mywget="wget --no-check-certificate"
$mywget http://www.lmd.jussieu.fr/~lmdz/Distrib/$ze_netcdf.tar.gz -a $log
tar xzvf $ze_netcdf.tar.gz >> $log 2>&1
\rm $ze_netcdf.tar.gz*
export FC=gfortran
export FFLAGS=" -O2"
export F90=gfortran
export FCFLAGS="-O2 -ffree-form"
export CPPFLAGS=""
export CC=gcc
export CFLAGS="-O2"
export CXX=g++
export CXXFLAGS="-O2"
cd $ze_netcdf
PREFIX=$PWD
./configure --prefix=${PREFIX} --enable-separate-fortran \
>> $log 2>&1 #--disable-cxx
make >> $log 2>&1
make test >> $log 2>&1
make install >> $log 2>&1
cd ..
mkdir $ini/MODELES/LMDZ.COMMON/netcdf
mv $ze_netcdf $net
# in case netcdf was compiled in 64bits:
if [ -d $net/lib64 ] && !( [ -d $net/lib ] )
then
ln -sf lib64 $net/lib
fi
####
#echo "4. get and compile IOIPSL librairies (please wait)"
##cp $ini/fix/install_ioipsl_gfortran_noksh.bash $mod/LMDZ.COMMON/ioipsl/install_ioipsl_gfortran.bash
#cd $mod/LMDZ.COMMON/ioipsl
##sed -i s+"/home/aymeric/Science/MODELES"+$mod+g install_ioipsl_gfortran.bash
#./install_ioipsl_gfortran.bash >> $log 2>&1
#ls -l $mod/LMDZ.COMMON/ioipsl/modipsl/lib
###
echo "4. customize arch files"
cd $mod/LMDZ.COMMON/arch
cp arch-gfortran.fcm arch-gfortran_mod.fcm
cp arch-gfortran.path arch-gfortran_mod.path
echo NETCDF=$net > arch-gfortran_mod.env
###
echo "5. compile the model fully at least once (please wait)"
if [ $usefcm -eq 1 ] ; then
cd $mod
svn co http://forge.ipsl.jussieu.fr/fcm/svn/PATCHED/FCM_V1.2 >> $log 2>&1
fcmpath=$mod/FCM_V1.2/bin
PATH=$PATH:$fcmpath
cd $mod/LMDZ.COMMON
./makelmdz_fcm -j 2 $zeopt gcm >> $log 2>&1
else
cd $mod/LMDZ.COMMON
./makelmdz $zeopt gcm >> $log 2>&1
fi
###
echo "6. compile the program for initial condition at least once (please wait)"
cd $mod/LMDZ.COMMON
if [ $usefcm -eq 1 ] ; then
./makelmdz_fcm -j 2 $zeopt newstart >> $log 2>&1
else
./makelmdz $zeopt newstart >> $log 2>&1
fi
#### previous old local method
#cd $mod/LMDZ.GENERIC
#sed s+"/donnees/emlmd/netcdf64-4.0.1_gfortran"+$net+g makegcm_gfortran > makegcm_gfortran_local
#chmod 755 makegcm_gfortran_local
#./makegcm_gfortran_local -d 8x8x6 -debug newstart >> $log 2>&1
###
echo "7. download supplementary surface files"
cd $ini/RUN/DATAGENERIC
lmdzserv="http://www.lmd.jussieu.fr/~lmdz/planets/LMDZ.GENERIC/surfaces/"
if [[ ! (-f "surface_earth.nc") ]] ; then
$mywget "$lmdzserv/surface_earth.nc"
fi
if [[ ! (-f "surface_mars.nc") ]] ; then
$mywget "$lmdzserv/surface_mars.nc"
fi
if [[ ! (-f "surface_venus.nc") ]] ; then
$mywget "$lmdzserv/surface_venus.nc"
fi
if [[ ! (-f "surface_titan.nc") ]] ; then
$mywget "$lmdzserv/surface_titan.nc"
fi
if [[ ! (-f "surface_earth_paleo.tar.gz") ]] ; then
$mywget "$lmdzserv/surface_earth_paleo.tar.gz"
tar -xvzf surface_earth_paleo.tar.gz
fi
###
if [ $useplanetoplot -eq 1 ] ; then
echo "8. get post-processing tools"
cd $ini/TOOLS
rm -rf planetoplot
git clone https://github.com/aymeric-spiga/planetoplot >> $log 2>&1
rm -rf planets
git clone https://github.com/aymeric-spiga/planets >> $log 2>&1
fi
###
cd $ini
mv $log $mod/
|
aymeric-spiga/eduplanet
|
install.sh
|
Shell
|
gpl-2.0
| 4,339 |
#! /bin/sh
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check dirlist globbing support.
. test-init.sh
cat > configure.ac <<EOF
AC_INIT([$me], [1.0])
AM_INIT_GUILE_MODULE
AM_FOO_BAR
EOF
mkdir dirlist21-test dirlist22-test
cat >dirlist21-test/dirlist21-check.m4 <<'END'
AC_DEFUN([AM_INIT_GUILE_MODULE],[. $srcdir/../GUILE-VERSION])
END
cat >dirlist22-test/dirlist22-check.m4 <<'END'
AC_DEFUN([AM_FOO_BAR],[
: foo bar baz
])
END
mkdir my-acdir
cat > my-acdir/dirlist <<'END'
dirlist2*-test
END
$ACLOCAL --system-acdir my-acdir
$AUTOCONF
# There should be no m4_include in aclocal.m4, even though m4/dirlist
# contains './dirlist-test' as a relative directory. Only -I directories
# are subject to file inclusion.
grep m4_include aclocal.m4 && exit 1
grep 'GUILE-VERSION' configure
grep 'foo bar baz' configure
:
|
Starlink/automake
|
t/aclocal-dirlist-globbing.sh
|
Shell
|
gpl-2.0
| 1,457 |
#!/bin/sh
#
# Update Host Invetory - API ZABBIX
#
# Luiz Sales - [email protected]
# redhate.me - lsales.biz
#
# 10/09/15
#
# VARIABLES
#HOSTNAME=''
API='http://localhost/api_jsonrpc.php'
# CONSTANT VARIABLES
ZABBIX_USER=""
ZABBIX_PASS=""
HOST=$1
INV_MODE=$2
help() {
echo
echo "$0 <HOSTNAME> <INVENTORY_MODE>"
echo
echo "-1 - disabled;"
echo "0 - (default) manual;"
echo "1 - automatic."
echo
}
authenticate()
{
wget -O- -o /dev/null $API --header 'Content-Type: application/json-rpc' --post-data "{
\"jsonrpc\": \"2.0\",
\"method\": \"user.login\",
\"params\": {
\"user\": \"$ZABBIX_USER\",
\"password\": \"$ZABBIX_PASS\"},
\"id\": 0}" | cut -d'"' -f8
}
AUTH_TOKEN=$(authenticate)
get_host_id() {
wget -O- -o /dev/null $API --header 'Content-Type: application/json-rpc' --post-data "{
\"jsonrpc\": \"2.0\",
\"method\": \"host.get\",
\"params\": {
\"output\": [
\"hostid\",
\"host\"
],
\"filter\": {
\"name\" : [ \"$HOST\" ] }
},
\"auth\": \"$AUTH_TOKEN\",
\"id\": 2 }"
}
HOSTID=$(get_host_id | awk -v RS='{"' -F\" '/^hostid/ {print $3}')
add_inventory_auto_to_host() {
wget -O- -o /dev/null $API --header 'Content-Type: application/json-rpc' --post-data "{
\"jsonrpc\": \"2.0\",
\"method\": \"host.massupdate\",
\"params\": {
\"inventory_mode\": \"$INV_MODE\",
\"hosts\": [
{
\"hostid\": \"$HOSTID\"
}
]
},
\"auth\": \"$AUTH_TOKEN\",
\"id\": 1 }"
}
if [ -z $1 ]; then
help;
else
add_inventory_auto_to_host;
fi
|
lsa1es/Zabbix
|
HostInventory/HostInventory.sh
|
Shell
|
gpl-2.0
| 1,668 |
#!/usr/bin/env bash
set -e
rm -rf .git
rm -r .gitignore
echo ".editorconfig
.travis.yml
README.md
.bin
bower.json
gulpfile.js
node_modules
maps
package.json
src/html
src/images
src/jade
src/shell
README.md
setting.json
bower_components
.DS_store
vendor
composer.*
codesniffer.ruleset.xml
*.zip" > .gitignore
git init
git config user.name "featherplain"
git config user.email "[email protected]"
git add .
git commit --quiet -m "Deploy from travis"
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:release > /dev/null 2>&1
|
featherplain/amethyst
|
.bin/deploy.sh
|
Shell
|
gpl-2.0
| 545 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/ex29_majormenormitjana
OUTPUT_BASENAME=ex29_majormenormitjana
PACKAGE_TOP_DIR=ex29majormenormitjana/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/ex29majormenormitjana/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ex29majormenormitjana.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ex29majormenormitjana.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
pdavila13/Program_C
|
Ex29_majorMenorMitjana/nbproject/Package-Debug.bash
|
Shell
|
gpl-2.0
| 1,529 |
#!/usr/bin/env bash
GROUP="com/skyisland/questmanager"
PROJECT_NAME="QuestManager"
VERSION=$(ls ${HOME}/.m2/repository/${GROUP}/${PROJECT_NAME} | sed 's/maven-metadata-local.xml//' | xargs)
if [ "$(echo $VERSION | grep -o SNAPHOT)" != "SNAPSHOT" ]; then
echo "VERSION: $VERSION"
# Create new release
TAG_NAME="v$VERSION"
NAME="$PROJECT_NAME v$VERSION"
API_JSON="{\"tag_name\": \"$TAG_NAME\",\"target_commitish\": \"master\",\"name\": \"$NAME\",\"body\": \"Plugin release of version $VERSION from Travis build ${TRAVIS_BUILD_NUMBER}\",\"draft\": false,\"prerelease\": false}"
curl --data "$API_JSON" https://api.github.com/repos/Dove-Bren/QuestManager/releases?access_token=${GH_TOKEN}
# Upload assets to release
JAR_NAME="${PROJECT_NAME}-${VERSION}.jar"
ls ${HOME}
ls ${HOME}/QuestManager/build/libs/
curl --data "$(cat ${HOME}/QuestManager/build/libs/${JAR_NAME})" https://uploads.github.com/repos/dove-bren/QuestManager/releases/${VERSION}/assets?name=${JAR_NAME}&access_token=${GH_TOKEN}
fi
|
Dove-Bren/QuestManager
|
scripts/plugin-release.sh
|
Shell
|
gpl-3.0
| 1,043 |
#---------------------------------------------------------------------
# Function: InstallWebServer Ubuntu 18.04
# Install and configure Apache2, php + modules
#---------------------------------------------------------------------
InstallWebServer() {
if [ "$CFG_WEBSERVER" == "apache" ]; then
CFG_NGINX=n
CFG_APACHE=y
echo -n "Installing Web server (Apache) and modules... "
echo "phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2" | debconf-set-selections
# - DISABLED DUE TO A BUG IN DBCONFIG - echo "phpmyadmin phpmyadmin/dbconfig-install boolean false" | debconf-set-selections
echo "dbconfig-common dbconfig-common/dbconfig-install boolean false" | debconf-set-selections
# apt_install apache2 apache2-doc apache2-utils libapache2-mod-php libapache2-mod-fcgid apache2-suexec-pristine libapache2-mod-passenger libapache2-mod-python libexpat1 ssl-cert libruby
apt_install apache2 apache2-doc apache2-utils libapache2-mod-php libapache2-mod-fcgid apache2-suexec-pristine libruby libapache2-mod-python
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP and modules... "
# apt_install php7.2 php7.2-common php7.2-gd php7.2-mysql php7.2-imap php7.2-cli php7.2-cgi php-pear mcrypt imagemagick libruby php7.2-curl php7.2-intl php7.2-pspell php7.2-recode php7.2-sqlite3 php7.2-tidy php7.2-xmlrpc php7.2-xsl memcached php-memcache php-imagick php-gettext php7.2-zip php7.2-mbstring php7.2-fpm php7.2-opcache php-apcu
apt_install php7.2 php7.2-common php7.2-gd php7.2-mysql php7.2-imap php7.2-cli php7.2-cgi php-pear php7.2-curl php7.2-intl php7.2-pspell php7.2-recode php7.2-sqlite3 php7.2-tidy php7.2-xmlrpc php7.2-xsl php-memcache php-imagick php-gettext php7.2-zip php7.2-mbstring php-soap php7.2-soap
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing Opcache and APCu... "
apt_install php7.2-opcache php-apcu
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP-FPM... "
apt_install php7.2-fpm
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing needed programs for PHP and Apache (mcrypt, etc.)... "
apt_install mcrypt imagemagick memcached curl tidy snmp
echo -e "[${green}DONE${NC}]\n"
if [ "$CFG_PHPMYADMIN" == "yes" ]; then
echo "==========================================================================================="
echo "Attention: When asked 'Configure database for phpmyadmin with dbconfig-common?' select 'NO'"
echo "Due to a bug in dbconfig-common, this can't be automated."
echo "==========================================================================================="
echo "Press ENTER to continue... "
read DUMMY
echo -n "Installing phpMyAdmin... "
apt-get -y install phpmyadmin
echo -e "[${green}DONE${NC}]\n"
fi
# if [ "$CFG_XCACHE" == "yes" ]; then
# echo -n "Installing XCache... "
# apt_install php7-xcache
# echo -e "[${green}DONE${NC}]\n"
# fi
echo -n "Activating Apache modules... "
a2enmod suexec > /dev/null 2>&1
a2enmod rewrite > /dev/null 2>&1
a2enmod ssl > /dev/null 2>&1
a2enmod actions > /dev/null 2>&1
a2enmod include > /dev/null 2>&1
a2enmod cgi > /dev/null 2>&1
a2enmod dav_fs > /dev/null 2>&1
a2enmod dav > /dev/null 2>&1
a2enmod auth_digest > /dev/null 2>&1
a2enmod fastcgi > /dev/null 2>&1
a2enmod proxy_fcgi > /dev/null 2>&1
a2enmod alias > /dev/null 2>&1
# a2enmod fcgid > /dev/null 2>&1
echo -e "[${green}DONE${NC}]\n"
echo -n "Restarting Apache... "
service apache2 restart
echo -e "[${green}DONE${NC}]\n"
elif [ "$CFG_WEBSERVER" == "nginx" ]; then
CFG_NGINX=y
CFG_APACHE=n
echo -n "Installing Web server (nginx) and modules... "
service apache2 stop
hide_output update-rc.d -f apache2 remove
apt_install nginx
service nginx start
echo -e "[${green}DONE${NC}]\n"
# apt_install php7.2 php7.2-common php7.2-gd php7.2-mysql php7.2-imap php7.2-cli php7.2-cgi php-pear php-auth php7.2-mcrypt mcrypt imagemagick libruby php7.2-curl php7.2-intl php7.2-pspell php7.2-recode php7.2-sqlite3 php7.2-tidy php7.2-xmlrpc php7.2-xsl memcached php-memcache php-imagick php-gettext php7.2-zip php7.2-mbstring php7.2-fpm php7.2-opcache php-apcu
echo -n "Installing PHP-FPM... "
apt_install php7.2-fpm
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP and modules... "
apt_install php7.2 php7.2-common php7.2-gd php7.2-mysql php7.2-imap php7.2-cli php7.2-cgi php-pear mcrypt imagemagick libruby php7.2-curl php7.2-intl php7.2-pspell php7.2-recode php7.2-sqlite3 php7.2-tidy php7.2-xmlrpc php7.2-xsl memcached php-memcache php-imagick php-gettext php7.2-zip php7.2-mbstring php-soap php7.2-soap
sed -i "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/" /etc/php/7.2/fpm/php.ini
TIME_ZONE=$(echo "$TIME_ZONE" | sed -n 's/ (.*)$//p')
sed -i "s/;date.timezone =/date.timezone=\"${TIME_ZONE//\//\\/}\"/" /etc/php/7.2/fpm/php.ini
echo -e "[${green}DONE${NC}]\n"
echo -n "Reloading PHP-FPM... "
service php7-fpm reload
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing fcgiwrap... "
apt_install fcgiwrap
echo -e "[${green}DONE${NC}]\n"
echo "phpmyadmin phpmyadmin/reconfigure-webserver multiselect none" | debconf-set-selections
# - DISABLED DUE TO A BUG IN DBCONFIG - echo "phpmyadmin phpmyadmin/dbconfig-install boolean false" | debconf-set-selections
echo "dbconfig-common dbconfig-common/dbconfig-install boolean false" | debconf-set-selections
echo -n "Installing phpMyAdmin... "
apt-get -y install phpmyadmin
echo "With nginx phpMyAdmin is accessibile at http://$CFG_HOSTNAME_FQDN:8081/phpmyadmin or http://${IP_ADDRESS[0]}:8081/phpmyadmin"
echo -e "[${green}DONE${NC}]\n"
fi
echo -n "Installing Let's Encrypt (letsencrypt)... "
apt_install certbot
echo -e "[${green}DONE${NC}]\n"
}
|
servisys/ispconfig_setup
|
distros/ubuntu-18.04/install_webserver.sh
|
Shell
|
gpl-3.0
| 5,679 |
#!/bin/bash
#usage: tar7z source destination [options]
SOURCE=$1
DESTINATION=$2
ACTION=$3
shift; shift; shift
OPTIONS_7Z=$@
show_help() {
echo -e "Tar7Z - Preserve permissions, archive and compress/extract with 7z\n"
echo -e "Usage: usage: tar7z source destination action [7z_options]\n"
echo -e "ACTION:"
echo -e "-c compress"
echo -e "-x extract"
EXIT_CODE=$1
if [ -z "$EXIT_CODE" ]; then EXIT_CODE=0; fi
exit $EXIT_CODE
}
if [ "$SOURCE" = "--help" ] || [ "$SOURCE" = "-help" ] || [ "$SOURCE" = "-h" ]; then
show_help 0
fi
if [ "$ACTION" = "-c" ]; then
tar -cvpf - $SOURCE | 7z a $OPTIONS_7Z -si "$DESTINATION"
elif [ "$ACTION" = "-x" ]; then
7z x -so "$SOURCE" | tar xvpf - -C "$DESTINATION"
else
show_help 1
fi
|
alr46664/opensuse-auto
|
Tar7Z/tar7z.sh
|
Shell
|
gpl-3.0
| 767 |
#!/bin/bash
# Copyright (C) 2017 Christopher Towner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Utility script for verifying invoices (pdf) billto info isn't pushed into second page.
# check dir given
if [ $# -ne 1 ]; then
echo "Usage: $0 <dir>"; exit 1;
fi
pattern="^\s*This page is intentionally left blank*"
for f in $1/*.pdf; do
pdfseparate -f 2 -l 2 "${f}" /tmp/partial-invoice.pdf 2>/dev/null || continue
if ! [[ $(ps2ascii /tmp/partial-invoice.pdf) =~ ${pattern} ]]; then
echo $f
fi
done
|
christopwner/netsuite-utils
|
validate-invoice.sh
|
Shell
|
gpl-3.0
| 1,118 |
#!/bin/bash
gcc client.c network.c -o weatherclient || exit
gcc -lm -ggdb parser.c datautils.c network.c -o parser || exit
gcc conftool.c network.c -o conftool || exit
scp conftool surf:cgi-bin/
scp parser surf:bin/
scp weatherclient surf:bin/
|
surfmikko/wstools
|
scripts/mk.sh
|
Shell
|
gpl-3.0
| 246 |
#!/usr/bin/ksh
#memory calculator
um=`svmon -G | head -2|tail -1| awk {'print $3'}`
um=`expr $um / 256`
tm=`lsattr -El sys0 -a realmem | awk {'print $2'}`
tm=`expr $tm / 1024`
fm=`expr $tm - $um`
echo "\n-----------------------";
echo "System : `hostname`";
echo "-----------------------\n";
echo "Memory Information\n";
echo "total memory = $tm MB"
echo "free memory = $fm MB"
echo "used memory = $um MB"
echo "\n-----------------------\n";
|
zoftrix/OldScript
|
AIX/kmemory.sh
|
Shell
|
gpl-3.0
| 455 |
#!/bin/bash
ldapsearch -QLLL -o ldif-wrap=no -b "dc = grx" "(samAccountName=$1)" 2>&1
|
aavidad/grx-asistencia
|
ldap/ldap.sh
|
Shell
|
gpl-3.0
| 89 |
#! /bin/bash
# do not touch the following 5 definitions
BASIC=1
TONE=2
COLOR=3
CORRECT=4
EFFECT=5
###
# module-group order, just reorder the module-group
###
module_group=(
$BASIC
$CORRECT
$TONE
$COLOR
$EFFECT
)
###
# move module from one group to another
###
group_basic=(
'basecurve'
'clipping'
'demosaic'
'exposure'
'graduatednd'
'colorin'
'invert'
'lens'
'flip'
'colorout'
'ashift'
'rawprepare'
'rotatepixels'
'scalepixels'
'tonemap'
'profile_gamma'
'temperature'
'filmic'
'basicadj'
)
group_tone=(
'bloom'
'colisa'
'atrous'
'relight'
'globaltonemap'
'levels'
'rgblevels'
'bilat'
'shadhi'
'tonecurve'
'zonesystem'
'rgbcurve'
)
group_color=(
'channelmixer'
'colorbalance'
'colorcontrast'
'colorcorrection'
'colorchecker'
'colormapping'
'colortransfer'
'colorzones'
'colorize'
'lowlight'
'lut3d'
'monochrome'
'splittoning'
'velvia'
'vibrance'
)
group_correct=(
'cacorrect'
'colorreconstruct'
'defringe'
'bilateral'
'nlmeans'
'denoiseprofile'
'dither'
'hazeremoval'
'highlights'
'hotpixels'
'rawdenoise'
)
group_effect=(
'borders'
'grain'
'highpass'
'liquify'
'lowpass'
'retouch'
'sharpen'
'soften'
'spots'
'vignette'
'watermark'
)
######################################### END OF CONFIGURATION HERE
FILE=$HOME/.config/darktable/darktablerc
[ ! -f $FILE ] && echo darktable configuration file 'darktablerc' does not exists && exit 1
BCK="$FILE.iop-conf-backup-$(date +%Y%m%d-%H%M%S)"
cp $FILE $BCK
echo backup will be created in:
echo $BCK
echo Do you want to continue?
select yn in "Yes" "No"; do
case $yn in
Yes ) break;;
No ) exit;;
esac
done
sed -i "/plugins\/darkroom\/group_order\//d" $FILE
pos=0
while [ "x${module_group[pos]}" != "x" ]; do
group=${module_group[pos]}
pos=$(( $pos + 1 ))
echo "plugins/darkroom/group_order/$group=$pos" >> $FILE
done
function get_group_pos()
{
local GROUP=$1
pos=0
while [ "x${module_group[pos]}" != "x" ]; do
if [ ${module_group[pos]} == $GROUP ]; then
echo $(( $pos + 1 ))
fi
pos=$(( $pos + 1 ));
done
}
function set_iop_group()
{
local GROUP_POS=$(get_group_pos $1)
shift
local LIST=("${@}")
pos=0
while [ "x${LIST[pos]}" != "x" ]; do
name=${LIST[pos]}
pos=$(( $pos + 1 ))
echo "plugins/darkroom/$name/modulegroup=$GROUP_POS" >> $FILE
done
}
sed -i "/plugins\/darkroom\/[^/]*\/modulegroup/d" $FILE
set_iop_group $BASIC "${group_basic[@]}"
set_iop_group $TONE "${group_tone[@]}"
set_iop_group $COLOR "${group_color[@]}"
set_iop_group $CORRECT "${group_correct[@]}"
set_iop_group $EFFECT "${group_effect[@]}"
|
edgardoh/darktable
|
tools/iop-layout.sh
|
Shell
|
gpl-3.0
| 2,912 |
#!/bin/bash
ip addr show wlan0 | grep 'inet ' | head -1 | awk '{print $2}' | cut -d/ -f1;
|
HestiaPi/hestia-touch-openhab
|
home/pi/scripts/getwlan0ip.sh
|
Shell
|
gpl-3.0
| 92 |
#!/bin/bash
#SBATCH -J bdg2bw
#SBATCH -n 1 # Use 1 cores for the job
#SBATCH -t 0-12:00 # Runtime in D-HH:MM
#SBATCH -p serial_requeue # Partition to submit to
#SBATCH --mem=70000 # Memory pool for all cores (see also --mem-per-cpu)
#SBATCH -o bdg2bw.%A.out # File to which STDOUT will be written
#SBATCH -e bdg2bw.%A.err # File to which STDERR will be written
source new-modules.sh
module purge
module load bedtools2
for FILE in $(ls no*pool*bdg); do bedtools sort -i $FILE > sort_${FILE}; /n/home12/pgrayson/programs/bigWig/bedgraphToBigWig sort_${FILE} galGal.size ${FILE}.bw; done
|
tsackton/ratite-genomics
|
08_atacseq/bedgraph2BW.sh
|
Shell
|
gpl-3.0
| 648 |
#!/bin/sh
if [ $# -lt 6 ]; then
cat <<EOF
Usage: $0 smbclient3 server share user password directory
EOF
exit 1;
fi
incdir=`dirname $0`/../../../testprogs/blackbox
. $incdir/subunit.sh
failed=0
SMBCLIENT3="$1"; shift
SERVER="$1"; shift
SHARE="$1"; shift
USERNAME="$1"; shift
PASSWORD="$1"; shift
DIRECTORY="$1"; shift
# Can't use "testit" here -- it somehow breaks the -c command passed
# to smbclient into two, spoiling the "mget"
name="smbclient mget"
subunit_start_test "$name"
output=$("$SMBCLIENT3" //"$SERVER"/"$SHARE" \
-U"$USERNAME"%"$PASSWORD" -c "recurse;prompt;mget $DIRECTORY")
status=$?
if [ x$status = x0 ]; then
subunit_pass_test "$name"
else
echo "$output" | subunit_fail_test "$name"
fi
testit "rm foo" rm "$DIRECTORY"/foo || failed=`expr $failed + 1`
testit "rmdir $DIRECTORY" rmdir "$DIRECTORY" || failed=`expr $failed + 1`
testok $0 $failed
|
kernevil/samba
|
source3/script/tests/test_smbclient_mget.sh
|
Shell
|
gpl-3.0
| 885 |
#!/bin/bash
#
# hostInfo.sh ver 0.0.2
#
# Since the old, free version of maxminds' database has some info that
# the new version has not, it might be a good idea to query it also
# (using geoiplookup). Also, resolving the hostname of a given ip
# would be nice. This script does all this. If you have geoiplookup
# installed, it will query both the "normal" (Country) DB as well as
# the City Edition (if it is located in /usr/share/GeoIP).
# If you have host installed, host <ip> will also be called, which
# should reveal the hostname(s) for the given IP.
#
# Copyright © 2015 GGShinobi ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
cityEdition="/usr/share/GeoIP/GeoLiteCity.dat"
unset geoiplookupAvailable
unset cityEditionAvailable
geoiplookup -h &> /dev/null
if [ $? -eq 0 ]; then
geoiplookupAvailable=true
[ -f $cityEdition ] && cityEditionAvailable=true
fi
unset hostAvailable
host localhost &> /dev/null
[ $? -eq 0 ] && hostAvailable=true
for host in $*; do
if [[ $host =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
ip=$host
host=""
else
ip=`dig +short $host | tail -n1`
host="$host "
fi
geoIP2.py $ip
[ $geoiplookupAvailable ] && geoiplookup $ip
[ $cityEditionAvailable ] && geoiplookup -f $cityEdition $ip
[ $hostAvailable ] && host $ip
echo "(End of info on host ${host}with ip: $ip)"
done
|
GGShinobi/GeoIP2
|
hostInfo.sh
|
Shell
|
gpl-3.0
| 1,997 |
#!/bin/sh
ln -s $PWD/vimrc $HOME/.vimrc
ln -s $PWD $HOME/.vim
|
kmcculloch/myvim
|
install.sh
|
Shell
|
gpl-3.0
| 63 |
#!/bin/sh
#-------------------------------------------------------------------
# config.sh: This file is read at the beginning of the execution of the ASGS to
# set up the runs that follow. It is reread at the beginning of every cycle,
# every time it polls the datasource for a new advisory. This gives the user
# the opportunity to edit this file mid-storm to change config parameters
# (e.g., the name of the queue to submit to, the addresses on the mailing list,
# etc)
#-------------------------------------------------------------------
#
# Copyright(C) 2020 Jason Fleming
#
# This file is part of the ADCIRC Surge Guidance System (ASGS).
#
# The ASGS is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# ASGS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# the ASGS. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#-- testing
QSCRIPTTEMPLATE="$SCRIPTDIR/qscript.template-test"
# Fundamental
INSTANCENAME=southfl_v11-1_nam_bde # "name" of this ASGS process
ASGSADMIN="[email protected]"
ACCOUNT=ASC20001
QOS=vippj_p3000 # for priority during a storm
QUEUENAME=normal # same as SLURM partition
SERQUEUE=normal
PPN=56
GROUP="G-822560"
RMQMessaging_Enable="on"
RMQMessaging_Transmit="on"
# Input files and templates
GRIDNAME="southfl_v11-1_final"
source $SCRIPTDIR/config/mesh_defaults.sh
#FTPSITE=ftp.nhc-replay.stormsurge.email
#RSSSITE=nhc-replay.stormsurge.email
# Physical forcing (defaults set in config/forcing_defaults.sh)
TIDEFAC=on # tide factor recalc
HINDCASTLENGTH=30.0 # length of initial hindcast, from cold (days)
BACKGROUNDMET=on # NAM download/forcing
FORECASTCYCLE="00,06,12,18"
TROPICALCYCLONE=off # tropical cyclone forcing
STORM=03 # storm number, e.g. 05=ernesto in 2006
YEAR=2020 # year of the storm
WAVES=off # wave forcing
REINITIALIZESWAN=no # used to bounce the wave solution
VARFLUX=off # variable river flux forcing
#STATICOFFSET=0.30
#
CYCLETIMELIMIT="99:00:00"
# Computational Resources (related defaults set in platforms.sh)
NCPU=999 # number of compute CPUs for all simulations
NCPUCAPACITY=9999
NUMWRITERS=1
# Post processing and publication
INTENDEDAUDIENCE=general # "general" | "developers-only" | "professional"
#POSTPROCESS=( accumulateMinMax.sh createMaxCSV.sh cpra_slide_deck_post.sh includeWind10m.sh createOPeNDAPFileList.sh opendap_post.sh )
POSTPROCESS=( createMaxCSV.sh includeWind10m.sh createOPeNDAPFileList.sh opendap_post.sh )
#OPENDAPNOTIFY="[email protected],[email protected],[email protected],[email protected]" #,[email protected]"
OPENDAPNOTIFY="[email protected],[email protected],[email protected],[email protected],[email protected],[email protected]" #,[email protected]"
NOTIFY_SCRIPT=ut-nam-notify.sh
TDS=( tacc_tds lsu_tds renci_tds )
# Initial state (overridden by STATEFILE after ASGS gets going)
COLDSTARTDATE=2020062700
HOTORCOLD=coldstart
LASTSUBDIR=null
#
# Scenario package
#
#PERCENT=default
SCENARIOPACKAGESIZE=2
case $si in
-2)
ENSTORM=hindcast
;;
-1)
# do nothing ... this is not a forecast
ENSTORM=nowcast
;;
0)
ENSTORM=namforecastWind10m
source $SCRIPTDIR/config/io_defaults.sh # sets met-only mode based on "Wind10m" suffix
;;
1)
ENSTORM=namforecast
;;
*)
echo "CONFIGRATION ERROR: Unknown ensemble member number: '$si'."
;;
esac
#
PREPPEDARCHIVE=prepped_${GRIDNAME}_${INSTANCENAME}_${NCPU}.tar.gz
HINDCASTARCHIVE=prepped_${GRIDNAME}_hc_${INSTANCENAME}_${NCPU}.tar.gz
|
jasonfleming/asgs
|
config/2020/asgs_config_southfl_v11-1_final_nam_bde-frontera.sh
|
Shell
|
gpl-3.0
| 4,187 |
#!/bin/bash
#Local Docker Builds
docker build -t "rwhales:latest" .
docker tag rwhales gcr.io/api-project-773889352370/rwhales
gcloud docker -- push gcr.io/api-project-773889352370/rwhales
|
bw4sz/WhalePhys
|
Cloud/build_local.sh
|
Shell
|
gpl-3.0
| 193 |
echo " install current docker easy script to ~/docker/ "
mkdir ~/docker
cp * ~/docker/.
sudo apt update
sudo apt install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable"
sudo apt update
apt-cache policy docker-ce
sudo apt install docker-ce
|
MasterJJ/utility
|
easy_docker/install_docker_script.sh
|
Shell
|
gpl-3.0
| 440 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-taskevent_4-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::taskevent_4:1.0 -N ID0000010 -R condorpool -L example_workflow -T 2016-11-09T02:49:01+00:00 ./example_workflow-taskevent_4-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/instances/11_2_workflow_full_10files_secondary_wmj_3sh_3rs_with_annot_with_proj_3s_hash/dags/ubuntu/pegasus/example_workflow/20161109T024901+0000/00/00/taskevent_4_ID0000010.sh
|
Shell
|
gpl-3.0
| 1,237 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-sessioncompute_4-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::sessioncompute_4:1.0 -N ID0000007 -R condorpool -L example_workflow -T 2017-01-24T23:15:29+00:00 ./wikiflow-sessioncompute_4-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/logs/w-11_1/20170124T231530+0000/00/00/sessioncompute_4_ID0000007.sh
|
Shell
|
gpl-3.0
| 1,228 |
#!/usr/bin/env bash
pid_get()
{
local process_name=$1
local process_list=$(ps aux)
while read line
do
local cur_process=$line
if [ "${cur_process/"PID"}" != "$cur_process" ]
then
local process_header=($line)
fi
if [ "${cur_process/$process_name}" != "$cur_process" ]
then
local process_name_found=($line)
break
fi
done <<< $process_list
local cnt="0"
for element in "${process_header[@]}"
do
if [ $element == "PID" ]
then
break
fi
cnt=$cnt+1
done
if [[ ${process_name_found[$cnt]} =~ ^-?[0-9]+$ ]]
then
echo ${process_name_found[$cnt]}
elif [[ ${process_name_found[$cnt+1]} =~ ^-?[0-9]+$ ]]
then
echo ${process_name_found[$cnt+1]}
else
echo -1
fi
}
pid_kill()
{
local cnt="0"
while [ $cnt -lt 4 ]
do
local process_id=$(pid_get $1)
if [ $process_id -lt 0 ]
then
if [ $cnt -eq 0 ]
then
printf "Fail to find process of $1\n"
else
printf "Successfully kill process of $1 in loop $cnt\n"
fi
break
else
if [ $cnt -eq 0 ]
then
kill $process_id
elif [ $cnt -eq 1 ]
then
kill -15 $process_id
elif [ $cnt -eq 2 ]
then
kill -9 $process_id
else
printf "Bad process, fail to kill it"
fi
fi
cnt=$[$cnt+1]
done
}
main()
{
pid_kill $1
}
main $@
|
huhumt/vimrc_huhumt
|
pid_method.sh
|
Shell
|
gpl-3.0
| 1,680 |
#!/bin/bash
# Maximum memory (m for MBs, g for GBs)
memory=7000m
# Number of threads to use. If set higher than 1 then sentences are going to be parsed in parallel
numThreads=1
# Output Directory
execDir=output/sample_output/iSRL
# Input file containing input sentences
inputPath=input/sample_input_pltag
# Input file containing input sentences in CoNLL format with SRL annotation
# (Use only in conjunction with lexiconType=oracle, oracleRoles, and fullLex=false)
inputPathConll=input/sample_input_conll
# Input type. Currently supported types: plain, posTagged, pltag, dundee
inputType=pltag
# Beam size
beamSize=400
# Nbest list size
nBest=250
# If set to true then the parser uses the provided gold POS tags (posTagged inputType and pltag), or
# predicted ones, computed using the Stanford POS tagger
goldPosTags=false
# iSRL - Argument Classifier Paths
argumentLabellerModel=data/isrl_classifiers/bilexical_no_sense_opPath.labeller.tuned.model
argumentIdentifierModel=data/isrl_classifiers/bilexical_no_sense_opPath.identifier.weight.tuned.model
featureIndexers=data/isrl_classifiers/bilexical_no_sense_syntactic_opPath.indexers
# iSRL - Lexicon model type: set to 'oracle' in order to use gold standard lexicon entries for the
# input examples and gold standard oracle semantic role labels (use ONLY in conjunction with a single test
# file that contains gold PLTAG trees and lexicon entries, and a CoNLL input file that contains gold
# SRL annotation), set to 'oracleAllRoles' to use gold standard lexicon entries only and all annotated
# SRL roles with ambiguity, set to 'parsedAllRoles (DEFAULT) to use the full lexicon and full SRL annotations.
lexiconType=parsedAllRoles
# iSRL - Apply heuristics adopted in the CoNLL 2008/9 annotation: preposition is head of a PP, infinitive marker
# (IM) is head of VPs, subordinating conjunction (IN/DT) is the head in SBARs. (DEFAULT=true)
applyConllHeuristics=true
# Use the full lexicon and not just the gold standard trees (DEFAULT=true).
# Use in conjunction with lexiconType=parsedAllRoles. For lexiconType=oracle, oracleRoles set to false.
useFullLexicon=true
## OUTPUT
# If set to true then the parser generates syntactic surprisal, verification and combined scores per word
estimateProcDifficulty=false
# Output incremental (in)-complete semantic role triples.
printIncrementalDependencies=true
# Output incremental complete only semantic role triples.
printIncrementalCompleteDependencies=false
# If set to true, then the parser operates in interactive mode and accepts input from the console. Simply, enter tokenised sentences. Note, that the file set in the inputPath parameter is bypassed.
interactiveMode=false
## EVALUATION
# If set to true then the parser computes incremental evalb F1 scores
evaluateIncrementalDependencies=true
# Path and prefix to parameter files
paramsPath=data/params/0221_noCountNoneAdj_final
# Path and prefix to lexicon files
lexiconPath=data/lexicon/Lexicon_wsj_0221_withSemantics_tracesFix_files/Lexicon_wsj_0221_withSemantics_tracesFix
# Parameter files suffix
paramsSuffix=txt.final
cd ..
java -Xmx${memory} -cp bin/PLTAG.jar:lib/Helper.jar:lib/commons-collections4-4.1.jar:lib/stanford-corenlp-3.5.1.jar:stanford-corenlp-3.5.1-models.jar:lib/concurrentlinkedhashmap-lru-1.4.jar:lib/liblinear-1.94.jar \
pltag.runtime.Parse \
-numThreads $numThreads \
-create \
-overwriteExecDir \
-lexicon ${lexiconPath}-Freq-Parser-tag \
-predLexicon ${lexiconPath}-Freq-Parser-prediction \
-listOfFreqWords data/wordsFreqOverFive.txt \
-treeFrequencies ${paramsPath}/TreeFrequencies.${paramsSuffix} \
-wordFrequencies ${paramsPath}/WordFrequencies.${paramsSuffix} \
-superTagStruct ${paramsPath}/SuperTagStruct.${paramsSuffix} \
-superTagFringe ${paramsPath}/SuperTagFringe.${paramsSuffix} \
-beamMin ${beamSize} \
-beamEntry ${beamSize} \
-beamProp 8 \
-nBest ${nBest} \
-execDir ${execDir} \
-inputPaths ${inputPath} ${inputPathConll} \
-examplesInSingleFile \
-timeOutStage2 60000 \
-timeOutStage1 300000 \
-useSemantics \
-semanticsModel ${lexiconType} \
-fullLex ${useFullLexicon} \
-useClassifiers \
-argumentLabellerModel ${argumentLabellerModel} \
-argumentIdentifierModel ${argumentIdentifierModel} \
-featureIndexers ${featureIndexers} \
-applyConllHeuristics ${applyConllHeuristics} \
-inputType ${inputType} \
-goldPosTags ${goldPosTags} \
-estimateProcDifficulty ${estimateProcDifficulty} \
-outputIncrementalDependencies ${printIncrementalDependencies} \
-outputCompletedIncrementalDependencies ${printIncrementalCompleteDependencies} \
-interactiveMode ${interactiveMode} \
-evaluateIncrementalDependencies ${evaluateIncrementalDependencies} \
-outputExampleFreq 10 \
-outputFullPred
#-inputPaths ${inputPath} ${inputPathConll} \
|
sinantie/PLTAG
|
scripts/pltag_iSRL.sh
|
Shell
|
gpl-3.0
| 4,747 |
#!/bin/bash -x
# @link http://techbrahmana.blogspot.co.uk/2013/10/creating-wildcard-self-signed.html
# NOTE: Copies of infographic-generator.san.key/infographic-generator.san.crt have been copied to /puppet/modules/sslcerts/files
cd "$(dirname "$0")"
# Set Params
Country=GB
State=London
City=London
Organization="Crystalline Technologies"
Section=""
FQDN=infographic.jamesmcguigan.com
[email protected]
## Generate Private Key
openssl genrsa -des3 -passout pass:foobar -out infographic-generator.san.key.password 2048
## Convert the private key to an unencrypted format
openssl rsa -passin pass:foobar -in infographic-generator.san.key.password -out infographic-generator.san.key
## Create the certificate signing request
openssl req -new -key infographic-generator.san.key -out infographic-generator.san.csr <<EOF
$Country
$State
$City
$Organization
$Section
$FQDN
$Email
.
.
EOF
## Sign the certificate with extensions
openssl x509 -req -extensions v3_req -days 365 -in infographic-generator.san.csr -signkey infographic-generator.san.key -out infographic-generator.san.crt -extfile generate.san.conf
# -CA ../rootCA/infographic.rootCA.crt -CAkey ../rootCA/infographic.rootCA.key -CAcreateserial
#
#openssl genrsa -out infographic-generator.san.key 2048
#openssl req -new -nodes -out infographic-generator.san.csr -config infographic-generator.san.conf
#openssl x509 -req -CA ../rootCA/infographic.rootCA.pem -CAkey ../rootCA/infographic.rootCA.key -CAcreateserial -in infographic-generator.san.csr -out infographic-generator.san.crt -days 3650
##end
exit 0
|
JamesMcGuigan/infographic-generator
|
sslcert/san/generate-san.sh
|
Shell
|
gpl-3.0
| 1,608 |
# ~/.bash_functions_inc.git: sourced by ~/.bash_functions
# to provide additional git-related functions
# convenience-script for redmine-links
shorten_sha1() {
SHORT=$(echo "$1" | cut -c 1-7)
echo "commit:$SHORT $SHORT"
}
|
ehrenfeu/simplify
|
configs/bash_functions_git.inc.sh
|
Shell
|
gpl-3.0
| 233 |
#!/bin/bash
if [ "$#" -eq 0 ]
then
echo Compute max. memory per CPU for Gadget-2 simulations with PLACEHIGHRESREGION enabled.
echo
echo Usage:
echo
echo " gadgetMaxMem.sh PM box extent Ncpu"
echo
exit
fi
# PM=$1, box=$2, extent=$3, Ncpu=$4 in awk
echo $@
echo $@ |awk '{print "Total mem: ", ( $1^3*12 + (2*$1)^3*16 ) / 1024^3, "GiB"}'
echo $@ |awk '{fact=$2/$3/$4/2; fact=(fact>1)?1:fact; print "1 CPU mem: ", ( $1^3*12/$4 + (2*$1)^3*16*fact ) / 1024^3, "GiB"}'
|
ginnungagapgroup/ginnungagap
|
tools/zoomTools/gadgetMaxMem.sh
|
Shell
|
gpl-3.0
| 489 |
'
NOTE: To see what a particular program does, check out the "Program List.docx" file.
@author Suryakant Bharti 2013
'
echo Enter file name
read file
if [ -f $file ]
then
echo file exists
cat $file
fi
|
Suryakant-Bharti/Operating_System_Lab_Programs
|
programs/FilePrint.sh
|
Shell
|
gpl-3.0
| 202 |
# transpose a file; https://stackoverflow.com/a/1729980/5359531
t () {
awk '
BEGIN { IFS = "\t"; OFS = "\t"; FS = "\t" }
{
for (i=1; i<=NF; i++) {
a[NR,i] = $i
}
}
NF>p { p = NF }
END {
for(j=1; j<=p; j++) {
str=a[1,j]
for(i=2; i<=NR; i++){
str=str" "a[i,j];
}
print str
}
}' "${1}"
}
|
stevekm/dotfiles
|
t.sh
|
Shell
|
gpl-3.0
| 359 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-init_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::init_0:1.0 -N ID0000001 -R condorpool -L example_workflow -T 2017-01-26T03:23:09+00:00 ./wikiflow-init_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/instances/9_2_wikiflow_1sh_1s_noannot_wmj/dags/ubuntu/pegasus/example_workflow/20170126T032309+0000/00/00/init_0_ID0000001.sh
|
Shell
|
gpl-3.0
| 1,198 |
#!/bin/bash
# bashlint.sh - script to check the syntax of other Bash scripts
#
# Copyright (C) 2015 Thoronador
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# print Bash version to see which version is used for syntax check
bash --version
echo
echo
# find all .sh files and run them through Bash's syntax check
find ${BASH_SOURCE%/*}/../../ -name '*.sh' -print0 | xargs -0 -i bash -n {}
if [[ $? -ne 0 ]]
then
echo "Some scripts contain syntax errors!"
echo "You should do something about it."
echo 'And do it "soon(TM)".'
exit 1
else
echo "Syntax seems to be correct."
echo "Please take this happy smilie with you. :)"
exit 0
fi
|
Thoronador/copy-file-stats
|
tests/script-sanity/bashlint.sh
|
Shell
|
gpl-3.0
| 1,252 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-sessioncompute_4-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::sessioncompute_4:1.0 -N ID0000007 -R condorpool -L example_workflow -T 2017-01-25T13:53:10+00:00 ./wikiflow-sessioncompute_4-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/logs/w-10_1/20170125T135310+0000/00/00/sessioncompute_4_ID0000007.sh
|
Shell
|
gpl-3.0
| 1,228 |
#!/bin/bash
# $Id: fortran_debug.sh,v 1.2 2011-07-26 16:51:01 juanca Exp $
# Default Fortran language debug script for VPL
# Copyright (C) 2011 Juan Carlos Rodríguez-del-Pino. All rights reserved.
# License GNU/GPL, see LICENSE.txt or http://www.gnu.org/licenses/gpl-2.0.html
# Author Juan Carlos Rodriguez-del-Pino
#load common script and check programs
. common_script.sh
check_program gfortran
check_program gdb
get_source_files f f77
#compile
gfortran -o program -g -O0 $SOURCE_FILES
if [ -f program ] ; then
echo "#!/bin/bash" >> vpl_execution
echo "export LC_ALL=en_US.utf8" >> vpl_execution
echo "gdb program" >> vpl_execution
chmod +x vpl_execution
fi
|
ecastro/moodle23ulpgc
|
mod/vpl/jail/default_scripts/fortran_debug.sh
|
Shell
|
gpl-3.0
| 666 |
#! /bin/bash -x
################################################################################
# Copyright (c) 2015 Genome Research Ltd.
#
# Author: George Hall <[email protected]>
#
# This file is part of K-mer Toolkit.
#
# K-mer Toolkit is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
REFERENCE=$1
WORKING_DIR=$2
CONTIG_MASK_BIN=$3"/../bin/contig_mask"
IN_PATH=$4
IN_NAME=${IN_PATH##*/}
IN_NAME=${IN_NAME%*.*}
cd $WORKING_DIR
if [ ! -d "Masked Repeats" ]; then
mkdir "Masked Repeats"
fi
$CONTIG_MASK_BIN $REFERENCE $IN_PATH $WORKING_DIR"/Masked Repeats/"$IN_NAME"_mask"
cd ..
|
george-hall/k_mer_tools
|
src/scripts/mask_repeats.sh
|
Shell
|
gpl-3.0
| 1,261 |
#!/bin/sh
echo "waiting for fedora tomcat to start"
while [ ! -d "/var/www/fedora/data" ]
do
printf "."
sleep 1
done
echo "tomcat started"
|
TheLanguageArchive/EasyLAT
|
docker/flat/fedora/wait.sh
|
Shell
|
gpl-3.0
| 145 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.