code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
set -euo pipefail
docker run -it --rm -v $(pwd):/source -v /dev/ttyUSB0:/dev/ttyUSB0 docker.cantireinnovations.com/esp-open-sdk:2 "$*"
|
CanTireInnovations/blinker
|
do-d.sh
|
Shell
|
mit
| 157 |
#!/usr/bin/env bash
echo "Install supplied collection data in an annalist site."
echo "(Assumes Python virtualenv is activated with Annalist installed.)"
echo ""
rm -rf $(annalist-manager sitedirectory --personal)/c/Resource_defs
annalist-manager installcoll Resource_defs
rm -rf $(annalist-manager sitedirectory --personal)/c/Concept_defs
annalist-manager installcoll Concept_defs
rm -rf $(annalist-manager sitedirectory --personal)/c/Journal_defs
annalist-manager installcoll Journal_defs
rm -rf $(annalist-manager sitedirectory --personal)/c/RDF_schema_defs
annalist-manager installcoll RDF_schema_defs
rm -rf $(annalist-manager sitedirectory --personal)/c/Annalist_schema
annalist-manager installcoll Annalist_schema
echo "To run Annalist:"
echo " annalist-manager runserver"
echo "or:"
echo " nohup annalist-manager runserver >annalist.out &"
# End.
|
gklyne/annalist
|
src/install_collection_data.sh
|
Shell
|
mit
| 869 |
#!/bin/bash
about(){
echo "gen_gulp_input.sh v1.0 8/28/2010 Jeff Doak [email protected]
This program reads in a gulp output file from standard input and writes a gulp
or vasp formatted input file to standard output.
Command-Line Arguments:
-h | --help : Prints this document.
-v | --vasp : Create a vasp-formatted input.
-g | --gulp : Create a gulp-formatted input."
}
gulp_file(){
#This function creates a gulp formatted input file from the results
#of gulp calculations.
local a=`sed -n "s/^ *a = *\([0-9]*[.][0-9]*\) *alpha = *[0-9]*[.][0-9]*/\1/p" /dev/fd/0`
local alpha=`sed -n "s/^ *a = *[0-9]*[.][0-9]* *alpha = *\([0-9]*[.][0-9]*\)/\1/p" /dev/fd/0`
local b=`sed -n "s/^ *b = *\([0-9]*[.][0-9]*\) *beta = *[0-9]*[.][0-9]*/\1/p" /dev/fd/0`
local beta=`sed -n "s/^ *b = *[0-9]*[.][0-9]* *beta = *\([0-9]*[.][0-9]*\)/\1/p" /dev/fd/0`
local c=`sed -n "s/^ *c = *\([0-9]*[.][0-9]*\) *gamma = *[0-9]*[.][0-9]*/\1/p" /dev/fd/0`
local gamma=`sed -n "s/^ *c = *[0-9]*[.][0-9]* *gamma = *\([0-9]*[.][0-9]*\)/\1/p" /dev/fd/0`
local num_atoms=`sed -n "s/^ *Total number atoms\/shells = *\([0-9][0-9]*\)/\1/p" /dev/fd/0`
echo "cell"
echo "$a $b $c $alpha $beta $gamma"
#grep "Formula = " /dev/fd/0 | sed "s/ *[A-Z=][a-z]*/ /g"
echo "Fractional"
local flag=0; local count=0
let local temp=$num_atoms+4
cat /dev/fd/0 |
while read line
do
if echo $line | grep -q "Final fractional coordinates of atoms :"
then
flag=1
elif [ $flag -eq 1 ] && [ $count -le $temp ]
then
echo $line | sed -n "s/^ *[0-9][0-9]* *\([A-Z][a-z]*\) *c *\([0-9]*[.][0-9]*\) *\([0-9]*[.][0-9]*\) *\([0-9]*[.][0-9]*\) *[0-9]*[.][0-9]*/\1 \2 \3 \4/p"
(( count++ ))
fi
done
echo "output"
}
vasp_file(){
#This function creates a vasp-formatted input file from a gulp output file
#sent to standard input.
local num_atoms=`sed -n "s/^ *Total number atoms\/shells = *\([0-9][0-9]*\)/\1/p" /dev/fd/0`
echo "title"
echo "1.0"
local flag=0; local count=0
let temp=3
cat /dev/fd/0 |
while read line
do
if echo $line | grep -q "Final Cartesian lattice vectors (Angstroms) :"
then
flag=1
elif [ $flag -eq 1 ] && [ $count -le $temp ]
then
echo $line | sed -n "s/\(-*[0-9]*[.][0-9]*\) *\(-*[0-9]*[.][0-9]*\) *\(-*[0-9]*[.][0-9]*\)/\1 \2 \3/p"
(( count++ ))
fi
done
grep "Formula = " /dev/fd/0 | sed "s/ *[A-Z=][a-z]*/ /g"
echo "Direct"
flag=0; count=0
let temp=$num_atoms+4
cat /dev/fd/0 |
while read line
do
if echo $line | grep -q "Final fractional coordinates of atoms :"
then
flag=1
elif [ $flag -eq 1 ] && [ $count -le $temp ]
then
echo $line | sed -n "s/^ *[0-9][0-9]* *\([A-Z][a-z]*\) *c *\([0-9]*[.][0-9]*\) *\([0-9]*[.][0-9]*\) *\([0-9]*[.][0-9]*\) *[0-9]*[.][0-9]*/\2 \3 \4 \1/p"
(( count++ ))
fi
done
}
case "$1" in
-h|--help)
about
;;
-v|--vasp)
vasp_file
;;
-g|--gulp)
gulp_file
;;
*)
echo "Please put either -v or -g as a command-line argument."
;;
esac
exit
|
jeffwdoak/vasp_scripts
|
vasp_scripts/gen_gulp_input.sh
|
Shell
|
mit
| 3,073 |
#!/bin/bash
##
# Copyright 2011-2012 Jeroen Doggen ([email protected])
#
# Script to convert png/jpg bitmap images to pdf vector images (+upscaling)
# - Usage: place images in 'images' folder, pdf version of images will be created in 'pdf' folder
# - Depends on: tree, convert, potrace, epstopdf
##
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##################################################################
# SELECT FILES TO CONVERT #
##################################################################
# Intermediate file extension (for 'temp' files)
ext='jpg'
# Selected file extensions
EXTS[0]='JPG'
EXTS[1]='gif'
EXTS[2]='jpeg'
EXTS[3]='png'
# Last element of the file extension array that is selected (only change when addding extra extensions)
LASTONE='3'
##################################################################
# SCRIPT SETTINGS #
##################################################################
threshold=0.90
resize=3000
# Helper variables
count=0
##################################################################
# FUNCTIONS #
##################################################################
function createFolders
{
# Create 'pdf' folder if it does not exist
tree -d | grep pdf > /dev/null
if [ $? -eq 1 ]
then
mkdir pdf
fi
# Create 'pdf' folder if it does not exist
tree -d | grep images > /dev/null
if [ $? -eq 1 ]
then
mkdir temp
fi
}
# Convert images with various extension to .jpg images
function convertTo_jpg
{
cd images
for ((i=0;i<=LASTONE;i++)); do
for f in $(find -type f -iname "*.""${EXTS[i]}" )
do
dest=`echo ${f%.*}`
echo "Convert from : '$dest.${EXTS[i]}' to '$dest.$ext'"
convert "${f}" "${dest}.jpg"
if [ "${EXTS[i]}" == "$ext" ]
then
cp "${dest}.jpg" ../temp
else
cp "${dest}.jpg" ../temp
fi
done
done
}
function convertTo_pdf
{
cd images
for ((i=0;i<=LASTONE;i++)); do
# Convert *.jpg to *.pdf (+filtering)
for f in $(find -type f -iname '*.'"${EXTS[i]}" )
do
dest=`echo ${f%.*}`
echo "Converting to pdf: $dest.${EXTS[i]}"
let count=count+1
convert -resize $resize -quality 100% "${f}" "../pdf/${dest}.jpg"
convert -resize $resize -quality 100% "${f}" "../pdf/${dest}.bmp"
mkbitmap "../pdf/${dest}.bmp" -f 10 -o "../pdf/${dest}.bmpout" #low pass filter (remove slow gradients from scans)
potrace -k $threshold -r 100 "../pdf/${dest}.bmpout" #convert to eps
epstopdf "../pdf/${dest}.eps" #convert to pdf
rm "../pdf/${dest}.bmp"
rm "../pdf/${dest}.bmpout"
rm "../pdf/${dest}.jpg"
rm "../pdf/${dest}.eps"
done
done
}
function printLog
{
echo "$count images have been converted"
}
##################################################################
# "MAIN CODE" STARTS HERE #
##################################################################
createFolders
convertTo_pdf
printLog
|
jeroendoggen/scripts-tools-misc
|
vectorImageConverter/converter.sh
|
Shell
|
mit
| 3,830 |
#!/bin/sh
## create a new database, will prompt for password
psql -U postgres -h localhost -p 5432 -d postgres -c "create database bazaar_v6 with owner postgres encoding 'utf8';"
## create role bazaar
psql -U postgres -h localhost -p 5432 -d postgres -c "create role bazaar with login password 'b4z44r'"
## fill the newly created database, will prompt password again
psql -U postgres -h localhost -p 5432 -d bazaar_v6 -f ./scripts/bazaar_v6_all.sql
## change the password of postgresql as what is used in the examples and tests
psql -U postgres -h localhost -p 5432 -d postgres -c "alter role postgres with password 'p0stgr3s'"
|
mtorromeo/rustorm
|
scripts/setup.sh
|
Shell
|
mit
| 634 |
#!/bin/sh
#-------------------------------------------------------------------------------
# Name: common.sh
# Purpose: Common scripting functions used by all scripts
# Website: http://ronaldbradford.com
# Author: Ronald Bradford http://ronaldbradford.com
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Script Definition
#
COMMON_SCRIPT_VERSION="0.12 09-SEP-2011"
COMMON_SCRIPT_REVISION=""
#-------------------------------------------------------------------------------
# Constants - These values never change
#
FATAL="FATAL"
ERROR="ERROR"
WARN="WARN "
INFO="INFO "
DEBUG="DEBUG"
OK="OK"
LOGGING_LEVELS="${FATAL} ${ERROR} ${WARN} ${INFO} ${DEBUG}"
DEFAULT_LOG_DATE_FORMAT="+%Y%m%d %H:%M:%S %z"
LOG_EXT=".txt"
DATE_TIME_FORMAT="+%Y%m%d.%H%M"
SEP=","
DATA_EXT=".csv"
#-------------------------------------------------------------------------------
# Base Variables
#
DATE_TIME=`date ${DATE_TIME_FORMAT}`
DATE_TIME_TZ=`date ${DATE_TIME_FORMAT}%z`
DATE=`date +%Y%m%d`
[ -z "${TMP_DIR}" ] && TMP_DIR="/tmp"
TMP_FILE="${TMP_DIR}/${SCRIPT_NAME}.tmp.$$"
STOP_FILE="${TMP_DIR}/${SCRIPT_NAME}.stop"
USER_ID=`id -u`
[ "${USER_ID}" -eq 0 ] && ROOT_USER="Y"
FULL_HOSTNAME=`hostname 2>/dev/null`
SHORT_HOSTNAME=`hostname -s 2>/dev/null`
[ -z "${LOG_DATE_FORMAT}" ] && LOG_DATE_FORMAT="${DEFAULT_LOG_DATE_FORMAT}"
DEFAULT_LOG_COMPRESS_DAYS=5
DEFAULT_LOG_DELETE_DAYS=30
DEFAULT_LOCK_TIMEOUT=60
DEFAULT_PRODUCT="mysql"
DEFAULT_MYSQL_USER="dba"
GZIP=`which gzip 2>/dev/null`
[ ! -z `which pigz 2>/dev/null` ] && GZIP=`which pigz`
#----------------------------------------------------------------cleanup_exit --
# Exit the program, reporting exit code and clean up default tmp file
# Was named leave() by conflicts with BSD command
#
cleanup_exit() {
local FUNCTION="common:cleanup_exit()"
[ $# -ne 1 ] && fatal "${FUNCTION} This function requires one argument."
local EXIT_CODE="$1"
[ -z "${EXIT_CODE}" ] && fatal "${FUNCTION} \$EXIT_CODE is not defined"
debug "${FUNCTION} exiting script with '${EXIT_CODE}'"
service_unlock
[ $EXIT_CODE -ne 0 ] && info "Exiting with status code of '${EXIT_CODE}'"
[ ! -z "${TMP_FILE}" ] && [ -z "${USE_DEBUG}" ] && rm -f ${TMP_FILE}*
[ ! -z "${STOP_FILE}" ] && rm -f ${STOP_FILE}
exit ${EXIT_CODE}
}
#------------------------------------------------------------------------ log --
# Internal function for logging various levels of output
#
log() {
local FUNCTION="common:log()"
[ $# -lt 2 ] && fatal "${FUNCTION} This function requires at least two arguments."
local LEVEL="$1"; shift
local OUTPUT
OUTPUT=$*
[ -z "${LEVEL}" ] && fatal "${FUNCTION} \$LEVEL is not defined"
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
[ -z "${LOG_DATE_FORMAT}" ] && fatal "${FUNCTION} Global \$LOG_DATE_FORMAT is not defined"
local LOG_DT
LOG_DT=`date "${LOG_DATE_FORMAT}"`
#Causes potential infinite loop
#[ `echo ${LOGGING_LEVELS} | grep ${LEVEL} | wc -l` -ne 0 ] && fatal "log() specified \$LEVEL=\"${LEVEL}\" is not valid"
echo "${LOG_DT} ${LEVEL} [${SCRIPT_NAME}] ${OUTPUT}"
return 0
}
#---------------------------------------------------------------------- fatal --
# Log a fatal message
#
fatal() {
local FUNCTION="common:fatal()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT
OUTPUT=$*
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
log "${FATAL}" "INTERNAL ERROR: " ${OUTPUT}
local ERROR_CODE="100"
cleanup_exit ${ERROR_CODE}
return 0
}
#---------------------------------------------------------------------- error --
# Log an error message
#
error() {
local FUNCTION="common:error()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT
OUTPUT=$*
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
log "${ERROR}" ${OUTPUT}
local ERROR_CODE="1"
cleanup_exit ${ERROR_CODE}
return 0
}
#----------------------------------------------------------------------- warn --
# Log a warning message
#
warn() {
local FUNCTION="common:warn()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT
OUTPUT=$*
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
log "${WARN}" ${OUTPUT}
return 0
}
#----------------------------------------------------------------------- info --
# Log an information message
#
info() {
local FUNCTION="common:info()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT
OUTPUT=$*
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
[ -z "${QUIET}" ] && log "${INFO}" ${OUTPUT}
return 0
}
#---------------------------------------------------------------------- debug --
# Log a debugging message
#
debug() {
local FUNCTION="common:debug()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT
OUTPUT=$*
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
[ ! -z "${USE_DEBUG}" ] && log "${DEBUG}" ${OUTPUT}
return 0
}
#----------------------------------------------------------------- debug_file --
# Log a debugging message and output supplied file or tmp file
#
debug_file() {
local FUNCTION="common:debug_file()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires at least one argument."
local OUTPUT="$1"
[ -z "${OUTPUT}" ] && fatal "${FUNCTION} \$OUTPUT is not defined"
if [ ! -z "${USE_DEBUG}" ]
then
local FILE_NAME="$2"
[ -z "${FILE_NAME}" ] && FILE_NAME=${TMP_FILE}
log "${DEBUG}" ${OUTPUT}
[ -f "${FILE_NAME}" ] && cat ${FILE_NAME}
fi
return 0
}
#--------------------------------------------------------------- manages_logs --
# Manage logs to compress and purge
#
manage_logs() {
local FUNCTION="common:manage_logs()"
[ -z "${DEFAULT_LOG_COMPRESS_DAYS}" ] && fatal "${FUNCTION} \$DEFAULT_LOG_COMPRESS_DAYS is not defined"
[ -z "${DEFAULT_LOG_DELETE_DAYS}" ] && fatal "${FUNCTION} \$DEFAULT_LOG_DELETE_DAYS is not defined"
info "Compressing and purging logs"
compress_logs ${DEFAULT_LOG_COMPRESS_DAYS}
purge_logs ${DEFAULT_LOG_DELETE_DAYS}
return 0
}
#-------------------------------------------------------------- compress_logs --
# Compress logs for a given number of days
#
compress_logs() {
local FUNCTION="common:compress_logs()"
[ $# -ne 1 ] && fatal "${FUNCTION} This function requires one argument."
local DAYS="$1"
[ -z "${DAYS}" ] && fatal "${FUNCTION} \$DAYS is not defined"
find ${LOG_DIR} -maxdepth 1 -type f -name "*${LOG_EXT}" -mtime +${DAYS} -print -exec gzip {} \; > /dev/null 2>&1
return 0
}
#----------------------------------------------------------------- purge_logs --
# Purge logs for a given number of days
#
purge_logs() {
local FUNCTION="common:purge_logs()"
[ $# -ne 1 ] && fatal "${FUNCTION} This function requires one argument."
local DAYS="$1"
[ -z "${DAYS}" ] && fatal "${FUNCTION} \$DAYS is not defined"
find ${LOG_DIR} -maxdepth 1 -type f -name "*${LOG_EXT}.gz" -mtime +${DAYS} -print -exec rm -f {} \; > /dev/null 2>&1
return 0
}
#------------------------------------------------------------------- commence --
# Commence Script Logging with starting message
#
commence() {
local FUNCTION="common:commence()"
[ $# -ne 0 ] && fatal "${FUNCTION} This function accepts no arguments."
[ -f "${STOP_FILE}" ] && error "An existing stop file '${STOP_FILE}' exists, remove this to start processing"
START_SEC=`date +%s`
info "Script started (Version: ${SCRIPT_VERSION})"
return 0
}
#------------------------------------------------------------------- complete --
# Complete Script Logging with completed message
#
complete() {
local FUNCTION="common:complete()"
[ $# -ne 0 ] && fatal "${FUNCTION} This function accepts no arguments."
END_SEC=`date +%s`
[ -z "${START_SEC}" ] && warn "${FUNCTION} was not used to determine starting time of script" && TOTAL_SECS="[Unknonwn]"
[ -z" ${TOTAL_SECS}" ] && TOTAL_SECS=`expr ${END_SEC} - ${START_SEC}`
info "Script completed successfully (${TOTAL_SECS} secs)"
# Perform any post handling, expecially stdout redirection for cron logging
post_complete 2>/dev/null
cleanup_exit 0
}
#------------------------------------------------------------- set_base_paths --
# Set the essential paths for all scripts
# Scripts can be called from
# /path/to/scripts/name.sh
# [./]name.sh
# scripts/name.sh
#
set_base_paths() {
local FUNCTION="common:set_base_paths()"
[ $# -ne 0 ] && fatal "${FUNCTION} This function accepts no arguments."
[ -z "${BASE_DIR}" ] && BASE_DIR=`dirname $0 | sed -e "s/scripts//"`
if [ "${BASE_DIR}" = "." ]
then
BASE_DIR=".."
elif [ -z "${BASE_DIR}" ]
then
BASE_DIR="."
fi
[ -z "${BASE_DIR}" ] && fatal "${FUNCTION} Unable to determine BASE_DIR"
SCRIPT_DIR=${BASE_DIR}/scripts
CNF_DIR=${BASE_DIR}/etc
[ -z "${LOG_DIR}" ] && LOG_DIR="${BASE_DIR}/log"
debug "SCRIPT_DIR=${SCRIPT_DIR}"
debug "LOG_DIR=${LOG_DIR}"
debug "CNF_DIR=${CNF_DIR}"
if [ ! -d "${CNF_DIR}" ]
then
warn "The required configuration directory '${CNF_DIR}' was not found, creating"
run "Creating required configuration directory" mkdir -p ${CNF_DIR}
fi
if [ ! -d "${LOG_DIR}" ]
then
warn "The required log directory '${LOG_DIR}' was not found, creating"
run "Creating required log directory" mkdir -p ${LOG_DIR}
fi
COMMON_ENV_FILE="${CNF_DIR}/.common"
[ -f "${COMMON_ENV_FILE}" ] && . ${COMMON_ENV_FILE}
DEFAULT_CNF_FILE="${CNF_DIR}/${SCRIPT_NAME}.cnf"
DEFAULT_MY_CNF_FILE="${CNF_DIR}/${SCRIPT_NAME}.my.cnf"
DEFAULT_LOG_FILE="${LOG_DIR}/${SCRIPT_NAME}.${DATE_TIME}${LOG_EXT}"
DEFAULT_HOST_LOG_FILE="${LOG_DIR}/${SCRIPT_NAME}.${DATE_TIME}.${SHORT_HOSTNAME}${LOG_EXT}"
DEFAULT_HOST_CNF_FILE="${CNF_DIR}/${SCRIPT_NAME}.${SHORT_HOSTNAME}.cnf"
return 0
}
#-------------------------------------------------------------------- version --
# Display to stdout the Script Version details and exit
#
version() {
local FUNCTION="common:version()"
[ $# -ne 0 ] && fatal "${FUNCTION} This function accepts no arguments."
echo "Name: ${SCRIPT_NAME}.sh Version: ${SCRIPT_VERSION} Revision: ${SCRIPT_REVISION}"
return 0
}
#-------------------------------------------------------- check_for_long_args --
# Check for --long arguments
#
check_for_long_args() {
while [ ! -z "$1" ]
do
[ "$1" = "--version" ] && version && exit 0
[ "$1" = "--help" ] && help && exit 0
shift
done
return 0
}
#------------------------------------------------------------ check_stop_file --
# Check for Stop file and exit nicely
#
check_stop_file() {
if [ -f "${STOP_FILE}" ]
then
warn "A stop file was provided to stop script processing."
complete
fi
return 0
}
#-------------------------------------------------------------- service_lock --
# Lock to the current service so only one instance can run
#
service_lock() {
local FUNCTION="common:service_lock()"
[ $# -lt 1 -o $# -gt 2 ] && fatal "${FUNCTION} This function accepts one or two parameters"
local LOCK_TYPE="$1"
local LOCK_TIMEOUT="$2"
# Verify parameters
[ -z "${LOCK_TYPE}" ] && fatal "${FUNCTION} \$LOCK_TYPE is not defined"
# Required system default variables
[ -z "${DEFAULT_LOCK_TIMEOUT}" ] && fatal "${FUNCTION} Global \$DEFAULT_LOCK_TIMEOUT is not defined"
[ -z "${SCRIPT_NAME}" ] && fatal "${FUNCTION} Global \$SCRIPT_NAME is not defined"
# function variables
[ -z "${LOCK_TIMEOUT}" ] && LOCK_TIMEOUT=${DEFAULT_LOCK_TIMEOUT}
LOCK_FILE="${TMP_DIR}/${SCRIPT_NAME}.${LOCK_TYPE}.lock"
local LOCK_TIME
LOCK_TIME=`date +%s`
if [ -f "${LOCK_FILE}" ]
then
warn "An existing lock file exists"
local EXISTING_LOCK_TIME
EXISTING_LOCK_TIME=`head -1 ${LOCK_FILE}`
[ `expr ${EXISTING_LOCK_TIME} + ${LOCK_TIMEOUT}` -gt ${LOCK_TIME} ] && error "Another instance of this process is running '${SCRIPT_NAME}'/'${LOCK_TYPE}'"
warn "Another instance of '${SCRIPT_NAME}'/'${LOCK_TYPE}' was found but it now stale (> '${LOCK_TIMEOUT}' secs)"
rm -f ${LOCK_FILE}
fi
echo "${LOCK_TIME}" > ${LOCK_FILE}
return 0
}
#------------------------------------------------------------ service_unlock --
# Unlock the current service allowing another process to run
#
service_unlock() {
local FUNCTION="common:service_unlock()"
[ $# -ne 0 ] && fatal "${FUNCTION} This function accepts no arguments."
[ ! -z "${LOCK_FILE}" ] && [ ! -f "${LOCK_FILE}" ] && warn "No lock file found."
rm -f ${LOCK_FILE}
LOCK_FILE=""
return 0
}
#----------------------------------------------------------------------- run --
# Run a given command with implied error checking
#
run() {
local FUNCTION="common:run()"
[ $# -lt 2 ] && fatal "${FUNCTION} This function requires at least two arguments."
local RUN_DESCRIPTION="$1"
shift
local RUN_CMD="$*"
debug "About to run '${RUN_DESCRIPTION}' with '${RUN_CMD}'"
${RUN_CMD} > ${TMP_FILE} 2>&1
RC=$?
[ ${RC} -ne 0 ] && [ ! -z "${USE_DEBUG}" ] && cat ${TMP_FILE}
[ ${RC} -ne 0 ] && error "Unable to run command '${RUN_DESCRIPTION}' successfully. Exit code '${RC}'"
return 0
}
#--------------------------------------------------------- verify_mysql_login --
# Check for authentication to connect to mysql
#
verify_mysql_login() {
[ -z "${MYSQL_AUTHENTICATION}" ] && error "There is no MYSQL_AUTHENTICATION to execute mysql commands."
return 0
}
#------------------------------------------------------------ mysql_binaries -
# Check for defined mysql binaries in the current PATH
#
mysql_binaries() {
MYSQL=`which mysql`
MYSQLADMIN=`which mysqladmin`
MYSQLDUMP=`which mysqldump`
[ -z "${MYSQL}" ] && error "mysql client not found in path, \$MYSQL_HOME/bin should be added to PATH."
[ -z "${MYSQLADMIN}" ] && error "mysqladmin not found in path, \$MYSQL_HOME/bin should be added to PATH."
[ -z "${MYSQLDUMP}" ] && error "mysqldump not found in path, \$MYSQL_HOME/bin should be added to PATH."
return 0
}
#------------------------------------------------------------------- ec2_env --
# Check for correctly configured EC2 API tools
#
ec2_env() {
[ -z "${EC2_PRIVATE_KEY}" ] && [ -z "${EC2_CERT}" ] && error "EC2_PRIVATE_KEY and EC2_CERT must be specified"
[ -z "${EC2_PRIVATE_KEY}" ] && error "EC2_PRIVATE_KEY must be specified"
[ -z "${EC2_CERT}" ] && error "EC2_CERT must be specified"
[ -z "${AWS_ELB_HOME}" ] && error "AWS_ELB_HOME must be specified"
[ -z "${JAVA_HOME}" ] && error "JAVA_HOME must be specified"
[ -z `which ec2-describe-instances` ] && error "ec2-describe-instances not in path, Ensure ec2-api tools added to PATH"
[ -z `which elb-describe-lbs` ] && error "elb-describe-lbs not in path, Ensure elb-api tools added to PATH"
return 0
}
#---------------------------------------------------------------------- email --
email() {
local FUNCTION="common:email()"
[ $# -lt 1 ] && fatal "${FUNCTION} This function requires one - three arguments."
local MSG="$1"
[ -z "${MSG}" ] && fatal "${FUNCTION} \$MSG is not defined"
local TO_EMAIL="$2"
local MAIL_FILE="$3"
local EMAIL
if [ ! -z "${TO_EMAIL}" ]
then
info "Sending alert email to '${TO_EMAIL}'"
[ ! -f "${TMP_FILE}" ] && touch ${TMP_FILE}
[ -z "${MAIL_FILE}" ] && MAIL_FILE=${TMP_FILE}
cat ${MAIL_FILE} | mailx -s "[${SCRIPT_NAME}] ${MSG}" ${TO_EMAIL} 2>/dev/null
else
warn "No email notification recipient defined"
fi
return 0
}
# END
|
ronaldbradford/mysql
|
scripts/common.sh
|
Shell
|
mit
| 15,706 |
#!/bin/bash -eux
#===============================================================================
# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 spelllang=en ft=sh
#===============================================================================
readonly VBOX_VERSION=$(cat /home/vagrant/.vbox_version)
function echoinfo() {
local BC="\033[1;34m"
local EC="\033[0m"
printf "${BC} ☆ INFO${EC}: %s\n" "$@";
}
function install_virtualbox_guest_additions() {
echoinfo "Virtualbox Version: $VBOX_VERSION"
echoinfo "Installing VirtualBox guest additions"
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_"$VBOX_VERSION".iso /mnt
/mnt/VBoxLinuxAdditions.run --nox11
sleep 15
echoinfo "VirtualBox guest additions installed successfully"
echoinfo "Unmounting guest additions iso"
umount /mnt
echoinfo "Cleaning up guest additions installation files"
rm -rfv /home/vagrant/VBoxGuestAdditions_"$VBOX_VERSION".iso
rm -fv /home/vagrant/.vbox_version
echoinfo "Removing packages needed for building guest tools"
yum -y remove gcc cpp kernel-devel kernel-headers perl
}
if [[ $PACKER_BUILDER_TYPE =~ virtualbox ]]; then
install_virtualbox_guest_additions
fi
|
jrbing/ps-packer
|
scripts/oel/virtualbox.sh
|
Shell
|
mit
| 1,195 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/MAIN_BOARD_BIG_ROBOT_32.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=MAIN_BOARD_BIG_ROBOT_32.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=mainboardbigrobot32.x/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/mainboardbigrobot32.x/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/mainboardbigrobot32.x.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/mainboardbigrobot32.x.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
svanacker/cen-electronic
|
main/mainBoardBigRobot/MAIN_BOARD_BIG_ROBOT_32.X/nbproject/Package-default.bash
|
Shell
|
mit
| 1,449 |
#!/bin/bash -e
################################################################################
## File: validate-disk-space.sh
## Desc: Validate free disk space
################################################################################
availableSpaceMB=$(df / -hm | sed 1d | awk '{ print $4}')
minimumFreeSpaceMB=15000
echo "Available disk space: $availableSpaceMB MB"
if [ $RUN_VALIDATION != "true" ]; then
echo "Skipping validation disk space..."
exit 0
fi
if [ $availableSpaceMB -le $minimumFreeSpaceMB ]; then
echo "Not enough disk space on the image (minimum available space: $minimumFreeSpaceMB MB)"
exit 1
fi
|
mattgwagner/New-Machine
|
.github-actions/images/linux/scripts/installers/validate-disk-space.sh
|
Shell
|
mit
| 647 |
#!/bin/bash
# Copyright 2012 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This script rebuilds the time zone files using files
# downloaded from the ICANN/IANA distribution.
# Consult http://www.iana.org/time-zones for the latest versions.
# Versions to use.
CODE=2017c
DATA=2017c
set -e
rm -rf work
mkdir work
cd work
mkdir zoneinfo
curl -L -O http://www.iana.org/time-zones/repository/releases/tzcode$CODE.tar.gz
curl -L -O http://www.iana.org/time-zones/repository/releases/tzdata$DATA.tar.gz
tar xzf tzcode$CODE.tar.gz
tar xzf tzdata$DATA.tar.gz
# Turn off 64-bit output in time zone files.
# We don't need those until 2037.
perl -p -i -e 's/pass <= 2/pass <= 1/' zic.c
make CFLAGS=-DSTD_INSPIRED AWK=awk TZDIR=zoneinfo posix_only
# America/Los_Angeles should not be bigger than 1100 bytes.
# If it is, we probably failed to disable the 64-bit output, which
# triples the size of the files.
size=$(ls -l zoneinfo/America/Los_Angeles | awk '{print $5}')
if [ $size -gt 1200 ]; then
echo 'zone file too large; 64-bit edit failed?' >&2
exit 2
fi
cd zoneinfo
rm -f ../../zoneinfo.zip
zip -0 -r ../../zoneinfo.zip *
cd ../..
echo
if [ "$1" = "-work" ]; then
echo Left workspace behind in work/.
else
rm -rf work
fi
echo New time zone files in zoneinfo.zip.
|
christopher-henderson/Go
|
lib/time/update.bash
|
Shell
|
mit
| 1,366 |
#!/bin/bash
sass --watch app/theme/sass/_site.scss:app/theme/css/jingle-styles.css
|
idxos/jingle-jumble-app
|
sass-start.sh
|
Shell
|
mit
| 82 |
#!/bin/bash
# Download DASH modified Azure Storage lib
newJar="dash-azure-storage-2.2.0.jar"
newJarLocation="/tmp/"
wget --no-check-certificate -O $newJarLocation$newJar https://www.dash-update.net/client/latest/StorageSDK2.0/$newJar
# Pull down all the shared cache tarballs so that we can replace the Azure Storage jar
mkdir ~/hdp-cache
cd ~/hdp-cache
tarballs="$(hadoop fs -ls /hdp/apps/*/*/*.tar.gz | tr -s ' ' | cut -d ' ' -f8)"
for tar in $tarballs
do
hadoop fs -copyToLocal $tar .
tarname=$(basename $tar)
tarfile="$(readlink -f $tarname)"
tarprefix="${tarname%%.*}"
mkdir $tarprefix
tar -xzf $tarfile -C $tarprefix
jarfiles="$(find . -name azure*storage*jar)"
for jar in $jarfiles
do
dir=$(dirname $(readlink -f $jar))
echo "Replacing Azure storage jar in $dir"
cp -f $newJarLocation$newJar $dir
rm -f $jar
done
cd $tarprefix
tar -zcf $tarfile *
cd ..
hadoop fs -copyFromLocal -p -f $tarfile $tar
done
|
MicrosoftDX/Dash
|
Deployment/HDInsight/Linux/dash-linux-fixup-cache-files.sh
|
Shell
|
mit
| 940 |
#!/bin/bash
# mikrotik-backup script
# author tenhi ([email protected])
# Default variables ( may be overrided in custom config )
#### Connection ####################################
IDL="5s" # Default idle time
#### Backup variables ##############################
BKP_BINPWD="NvLB37zchdor9Y4E8KSpxibWHATfjstnw" # Default password for binary backup 33cr
BKP_EXPPWD="hGAEJKptcCznB2v8RaHkoxiSTYNFZ3suW" # Default password for export 33cr
ST_RTN="30" # Default retention time
#### Storage variables #############################
ST_ROOT="/mnt/bkp_share/mikrotik" # Default storage root
SC_USER=$(whoami) # default user for using script(need to chown dir)
SC_GROUP=$(whoami) # default group
ST_MODE="755"
#######################################################################################################################
# import config
## check if it's avaliable
if [[ ( -z ${1} ) || ( ! -r ${1} ) ]]
then
printf '%s\n' "ERR: cannot read config file"
exit 1
fi
# shellcheck source=example.cfg
source "${1}"
#######################################################################################################################
# Functions
#### Utils #############################################################################################
CMD_FIND=$(command -v find)
CMD_MV=$(command -v mv)
CMD_GZ=$(command -v gzip)
CMD_CHO=$(command -v chown)
CMD_CHM=$(command -v chmod)
CMD_MKD="$(command -v mkdir) -p "
CMD_RM=$(command -v rm)
CMD_DATE=$(date +%Y%m%d_%H%M) # date in format YYYYMMDD_HHmm
CMD_SSL=$(command -v openssl)
CMD_SSH=$(command -v ssh)
CMD_SCP=$(command -v scp)
ST_FULL="${ST_ROOT}/${TGT_HOSTNAME}/" # full path to .backup (/root_storage/hostname/)
ST_ARCH="${ST_FULL}archive/" # full path to archive (/root_storage/hostname/archive)
TGT_BKPNAME_BIN="${TGT_HOSTNAME}_${CMD_DATE}.backup"
TGT_BKPNAME_EXP="${TGT_HOSTNAME}_${CMD_DATE}.export"
#### Defining functions ################################################################################
function fn_check_log {
# Function for checking need of creating logfile
LOG="${ST_ROOT}/LOG.txt"
if [[ ! -r ${LOG} || -z ${LOG} ]]
then
printf '%s\n' \
'#######' \
"# logfile for ${0}" \
'# The format is:' \
'# DATE;STATE;FILENAME' \
'# author: tenhi([email protected])' \
'#######' ' ###' ' #' ' ' > $LOG
fi
}
function fn_check_readme {
# Function for checking need of creating readme
README="${ST_ROOT}/README.txt"
if [[ ! -r $README || -z ${LOG} ]]
then
printf '%s\n' \
'#######' \
"# readme for ${0}" \
'# backups located in:' \
'# hostname/..' \
'# achives located in:' \
'# hostname/archive/...' \
"# logs in ${LOG}" \
'#######' ' ###' ' #' ' ' > ${README}
fi
}
function fn_check_directory {
# Function for checking||creating full-path dirs
if [[ ( ! -d "${ST_FULL}archive" ) || ( ! -r "${ST_FULL}archive" ) ]]
then
# create dirs
if ! ${CMD_MKD} "${ST_FULL}archive"
then
printf '%s\n' "ERR: cannot create dir ${ST_FULL}archive"
exit 1
fi
# chown it
if ! ${CMD_CHO} "${SC_USER}":"${SC_GROUP}" "${ST_FULL}"
then
printf '%s\n' "cannot chown ${ST_FULL} to ${SC_USER}:${SC_GROUP}"
exit 1
fi
# chmod
if ! ${CMD_CHM} ${ST_MODE} "${ST_FULL}"
then
printf '%s\n' "ERR: cannot chmod ${ST_MODE} for ${ST_FULL}"
exit 1
fi
fi
}
function fn_mikrotik_cleanup {
# Function for cleaning up target mikrotik
${CMD_SSH} "${TGT_HOSTNAME}" "ip dns cache flush"
${CMD_SSH} "${TGT_HOSTNAME}" "console clear-history"
}
function fn_backup_binary {
# Function for saving binary backup
T_BKPSTR="system backup save name=${TGT_BKPNAME_BIN} dont-encrypt=no password=${BKP_BINPWD}"
T_BKPCLN="file remove [find name=${TGT_BKPNAME_BIN}]"
# Put output result exec mikrotik command to /dev/null
${CMD_SSH} "${TGT_HOSTNAME}" "${T_BKPSTR}" > /dev/null # Initializing backup
sleep ${IDL} && ${CMD_SCP} "${TGT_HOSTNAME}":/"${TGT_BKPNAME_BIN}" "${ST_FULL}" # Copy file to storage
sleep ${IDL} && ${CMD_SSH} "${TGT_HOSTNAME}" "${T_BKPCLN}" # Remove created file on mikrotik
}
function fn_backup_export {
# Function for saving exported config
EXP_TMP_FILE="/tmp/${RANDOM}.export"
sleep ${IDL} && ${CMD_SSH} "${TGT_HOSTNAME}" export > ${EXP_TMP_FILE}
${CMD_SSL} des3 -salt \
-k ${BKP_EXPPWD} \
-in ${EXP_TMP_FILE} \
-out "${ST_FULL}${TGT_BKPNAME_EXP}.des3"
${CMD_RM} ${EXP_TMP_FILE}
}
function fn_backup_retention {
# Function for rotating old backups
# Search old backups only one directory, not tree
${CMD_FIND} "${ST_FULL}" -maxdepth 1 -mtime +$ST_RTN -type f -exec "${CMD_MV}" {} "${ST_ARCH}" \;
# Add into archive only not gz files
${CMD_FIND} "${ST_ARCH}" -not -name "*.gz" -type f -exec "${CMD_GZ}" {} \;
}
function fn_log {
# Function for recording results to logfile
# log about binary backup
if [[ -r ${ST_FULL}${TGT_BKPNAME_BIN} ]]
then
printf '%s\n' "${CMD_DATE};okay;${TGT_BKPNAME_BIN}" >> $LOG
else
printf '%s\n' "${CMD_DATE};fail;${TGT_BKPNAME_BIN}" >> $LOG
fi
# log about text backup
if [[ -r ${ST_FULL}${TGT_BKPNAME_EXP}".des3" ]]
then
printf '%s\n' "${CMD_DATE};okay;${TGT_BKPNAME_EXP}.des3" >> $LOG
else
printf '%s\n' "${CMD_DATE};fail;${TGT_BKPNAME_EXP}.des3" >> $LOG
fi
}
##
# Start Execution
##
fn_check_directory # Checking and creating dirs
fn_check_log # Checking need of creating log-file
fn_check_readme # Checking need of creating readme
fn_backup_retention # Handling old backups
# init cleanup and check ssh connection
if ! fn_mikrotik_cleanup
then
fn_log
printf '%s\n' "ERR: cannot establish ssh-connection"
exit 1
fi
sleep ${IDL} && fn_backup_binary # save binary backup
sleep ${IDL} && fn_backup_export # save exported config
sleep ${IDL} && fn_mikrotik_cleanup # Clean it again to hide commands
fn_log # Recording backup results to file
|
tenhishadow/mbkp
|
mbkp.sh
|
Shell
|
mit
| 6,323 |
services=$(networksetup -listnetworkserviceorder | grep 'Hardware Port')
while read line; do
sname=$(echo $line | awk -F "(, )|(: )|[)]" '{print $2}')
sdev=$(echo $line | awk -F "(, )|(: )|[)]" '{print $4}')
# echo "Current service: $sname, $sdev, $currentservice"
if [ -n "$sdev" ]; then
ifconfig $sdev 2>/dev/null | grep 'status: active' > /dev/null 2>&1
rc="$?"
if [ "$rc" -eq 0 ]; then
currentservice="$sname"
break
fi
fi
done <<< "$(echo "$services")"
if [ -n "$currentservice" ] ; then
networksetup -getairportnetwork en0 | cut -c 24-
else
>&1 echo "No Wi-Fi Connection"
exit 1
fi
|
mikalcallahan/.dotfiles
|
ubersicht/bar.widget/scripts/wifi.sh
|
Shell
|
mit
| 676 |
#!/bin/bash -e
apt-get install -y --force-yes lsb-release sudo
dist=`lsb_release -is`
code=`lsb_release -cs`
cpu=`getconf LONG_BIT`
cpuvalue=
#enable apt.sourcefabric.org source
set +e
grep -E "deb http://apt.sourcefabric.org $code main" /etc/apt/sources.list
returncode=$?
set -e
if [ "$returncode" -ne "0" ]; then
echo "deb http://apt.sourcefabric.org $code main" >> /etc/apt/sources.list
fi
apt-get update
apt-get -y --force-yes install sourcefabric-keyring
apt-get update
if [ "$dist" = "Ubuntu" ]; then
set +e
grep -E "deb http://ca.archive.ubuntu.com/ubuntu/ $code multiverse" /etc/apt/sources.list
returncode=$?
set -e
if [ "$returncode" -ne "0" ]; then
echo "deb http://ca.archive.ubuntu.com/ubuntu/ $code multiverse" >> /etc/apt/sources.list
echo "deb http://ca.archive.ubuntu.com/ubuntu/ $code universe" >> /etc/apt/sources.list
fi
fi
#enable squeeze backports to get lame packages
if [ "$dist" = "Debian" -a "$code" = "squeeze" ]; then
set +e
grep -E "deb http://backports.debian.org/debian-backports squeeze-backports main" /etc/apt/sources.list
returncode=$?
set -e
if [ "$returncode" -ne "0" ]; then
echo "deb http://backports.debian.org/debian-backports squeeze-backports main" >> /etc/apt/sources.list
fi
fi
echo "System is $cpu bit..."
if [ "$cpu" = "64" ]; then
cpuvalue="amd64"
else
cpuvalue="i386"
fi
apt-get update
apt-get -y --force-yes -o Dpkg::Options::="--force-confold" upgrade
apt-get -y --force-yes install libopus0 libopus-dev libopus-dbg libopus-doc
#obsoleted code start
#apt-get -y --force-yes install wget
#rm -f libopu*
#rm -f aacplus*
#wget http://apt.sourcefabric.org/misc/libopus_1.0.1/libopus-dbg_1.0.1~$code~sfo-1_$cpuvalue.deb
#wget http://apt.sourcefabric.org/misc/libopus_1.0.1/libopus-dev_1.0.1~$code~sfo-1_$cpuvalue.deb
#wget http://apt.sourcefabric.org/misc/libopus_1.0.1/libopus0_1.0.1~$code~sfo-1_$cpuvalue.deb
#wget http://packages.medibuntu.org/pool/free/a/aacplusenc/aacplusenc_0.17.5-0.0medibuntu1_$cpuvalue.deb
#obsoleted code end
apt-get -y --force-yes install git-core ocaml-findlib libao-ocaml-dev \
libportaudio-ocaml-dev libmad-ocaml-dev libtaglib-ocaml-dev libalsa-ocaml-dev \
libvorbis-ocaml-dev libladspa-ocaml-dev libxmlplaylist-ocaml-dev libflac-dev \
libxml-dom-perl libxml-dom-xpath-perl patch autoconf libmp3lame-dev \
libcamomile-ocaml-dev libcamlimages-ocaml-dev libtool libpulse-dev camlidl \
libfaad-dev libpcre-ocaml-dev libfftw3-3 dialog
if [ "$code" != "lucid" ]; then
apt-get -y --force-yes install libvo-aacenc-dev
fi
#dpkg -i libopus-dbg_1.0.1~$code~sfo-1_$cpuvalue.deb libopus-dev_1.0.1~$code~sfo-1_$cpuvalue.deb libopus0_1.0.1~$code~sfo-1_$cpuvalue.deb aacplusenc_0.17.5-0.0medibuntu1_$cpuvalue.deb
#for aac+
#rm -rf libaac*
#apt-get -y --force-yes install libfftw3-dev pkg-config autoconf automake libtool unzip
#wget http://217.20.164.161/~tipok/aacplus/libaacplus-2.0.2.tar.gz
#tar -xzf libaacplus-2.0.2.tar.gz
#cd libaacplus-2.0.2
#./autogen.sh --enable-shared --enable-static
#make
#make install
#ldconfig
#cd ..
#end of aac+
rm -rf liquidsoap-full
git clone https://github.com/savonet/liquidsoap-full --depth 1
cd liquidsoap-full
make init
make update
#tmp
#cd liquidsoap
#git checkout ifdef-encoder
#git merge master
#cd ..
#tmp end
cp PACKAGES.minimal PACKAGES
sed -i "s/#ocaml-portaudio/ocaml-portaudio/g" PACKAGES
sed -i "s/#ocaml-alsa/ocaml-alsa/g" PACKAGES
sed -i "s/#ocaml-pulseaudio/ocaml-pulseaudio/g" PACKAGES
sed -i "s/#ocaml-faad/ocaml-faad/g" PACKAGES
sed -i "s/#ocaml-opus/ocaml-opus/g" PACKAGES
#sed -i "s/#ocaml-aacplus/ocaml-aacplus/g" PACKAGES
#sed -i "s/#ocaml-shine/ocaml-shine/g" PACKAGES
if [ "$code" != "lucid" ]; then
sed -i "s/#ocaml-voaacenc/ocaml-voaacenc/g" PACKAGES
fi
./bootstrap
./configure
make
cp /liquidsoap-full/liquidsoap/src/liquidsoap /
|
dz0ny/liquidsoap-docker
|
compile.sh
|
Shell
|
mit
| 3,865 |
#!/bin/bash
# This script can be run to prompt for bluetooth devices to connect to
# show this prompt with colour
if [ $1 = "connect" ]; then
# blue if connecting
TEXT_COLOUR="#33B5E1"
else
# otherwise red
TEXT_COLOUR="#EF5253"
fi
theme="element selected.normal { text-color: $TEXT_COLOUR; }"
select=$(bluetoothctl devices | sed 's/Device \(\w\)/\1/g' | rofi -dmenu -p "$1" -theme-str "$theme" -no-custom | sed 's/\([^ ]\+\).*/\1/g')
[ -z "$select" ] && exit 0
case $1 in
connect)
bluetoothctl connect "$select"
;;
disconnect)
bluetoothctl disconnect "$select"
;;
esac
|
jradtilbrook/dotfiles
|
files/home/local/bin/bluetooth.sh
|
Shell
|
mit
| 620 |
#!/bin/bash
set -e
desc="Convert back to fastq (lung)"
echo "+++++++++++++++++++++++++++++++++++++++++++"
echo "$desc"
echo "-------------------------------------------"
for i in {326,341,346,353}
do
echo "+++++++++++++++++++++++++++++++++++++++++++"
echo "Now $i"
echo "-------------------------------------------"
bedtools bamtofastq -i "ERR315${i}/tophat_out/accepted_hits_chr03.bam" -fq "ERR315${i}/ERR315${i}_chr03_1.fastq" -fq2 "ERR315${i}/ERR315${i}_chr03_2.fastq" 2>/dev/null
done
|
SilicoSciences/seminar-bioinformatics-rna-seq
|
scripts/to-fastq-lung.sh
|
Shell
|
cc0-1.0
| 495 |
#!/bin/bash
set -e
tophatOut=tophat_out*
function f_doIt {
echo "dollarOne: ${1}"
for f in ${1}/$tophatOut/accepted_hits.bam
do
echo "f: ${f}"
echo "+++++++++++++++++++++++++++++++++++++++++++"
echo "Cleaning $1/$tophatOut/"
echo "-------------------------------------------"
if [[ -f $f ]]; then
echo "hieroglyph: ${f%/*}"
rm -rf "${f%/*}"/tmp
rm -rf "${f%/*}"/logs
rm -f "${f%/*}"/*_sorted.bam
rm -f "${f%/*}"/*.bai
fi
done
}
while [ "$1" != "" ]; do
f_doIt $1 && shift;
done;
|
SilicoSciences/seminar-bioinformatics-rna-seq
|
scripts/cleanup-tophat.sh
|
Shell
|
cc0-1.0
| 487 |
#!/usr/bin/perl
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301, USA
# ****************************
package MySQLaccess;
#use strict;
use File::Temp qw(tempfile tmpnam);
use Fcntl;
BEGIN {
# ****************************
# static information...
$VERSION = "2.06, 20 Dec 2000";
$0 =~ m%/([^/]+)$%o;
$script = $1;
$script = 'MySQLAccess' unless $script;
$script_conf = "$script.conf";
$script_log = $ENV{'HOME'}."/$script.log";
# ****************************
# information on MariaDB
$MYSQL = '@bindir@/mysql'; # path to mysql executable
$SERVER = '3.21';
$MYSQL_OPT = ' --batch --unbuffered';
$ACCESS_DB = 'mysql'; # name of DB with grant-tables
$ACCESS_H = 'host'; #
$ACCESS_U = 'user'; #
$ACCESS_D = 'db'; #
# Add/Edit privileges
$ACCESS_H_TMP = 'host_tmp';
$ACCESS_U_TMP = 'user_tmp';
$ACCESS_D_TMP = 'db_tmp';
$ACCESS_H_BCK = 'host_backup';
$ACCESS_U_BCK = 'user_backup';
$ACCESS_D_BCK = 'db_backup';
$DIFF = '/usr/bin/diff';
$MYSQLDUMP = '@bindir@/mysqldump';
#path to mysqldump executable
$MYSQLADMIN= 'http://foobar.com/MySQLadmin';
#URL of CGI for manipulating
#the temporary grant-tables
}
END {
unlink $MYSQL_CNF if defined $MYSQL_CNF and not $DEBUG;
}
$INFO = <<"_INFO";
--------------------------------------------------------------------------
mysqlaccess (Version $VERSION)
~~~~~~~~~~~
Copyright (C) 1997,1998 Yves.Carlier\@rug.ac.be
University of Ghent (RUG), Belgium
Administratieve Informatieverwerking (AIV)
report the access-privileges for a USER from a HOST to a DB
Many thanks go to <monty\@mysql.com> and <psmith\@BayNetworks.COM>
for their suggestions, debugging and patches.
use `$script -?' to get more information on available options.
From version 2.0x, $script can also be used through a WEB-browser
if it is ran as a CGI-script. (See the release-notes)
--------------------------------------------------------------------------
_INFO
$OPTIONS = <<_OPTIONS;
Usage: $script [host [user [db]]] OPTIONS
-?, --help display this helpscreen and exit
-v, --version print information on the program `$script'
-u, --user=# username for logging in to the db
-p, --password=# validate password for user
-h, --host=# name or IP-number of the host
-d, --db=# name of the database
-U, --superuser=# connect as superuser
-P, --spassword=# password for superuser
-H, --rhost=# remote MariaDB-server to connect to
--old_server connect to old MariaDB-server (before v3.21) which
does not yet know how to handle full where clauses.
-b, --brief single-line tabular report
-t, --table report in table-format
--relnotes print release-notes
--plan print suggestions/ideas for future releases
--howto some examples of how to run `$script'
--debug=N enter debuglevel N (0..3)
--copy reload temporary grant-tables from original ones
--preview show differences in privileges after making
changes in (temporary) grant-tables
--commit copy grant-rules from temporary tables to grant-tables
(!don't forget to do an mysqladmin reload)
--rollback undo the last changes to the grant-tables.
Note:
At least the user and the db must be given (even with wildcards)
If no host is given, `localhost' is assumed
Wilcards (*,?,%,_) are allowed for host, user and db, but be sure
to escape them from your shell!! (ie type \\* or '*')
_OPTIONS
$RELEASE = <<'_RELEASE';
Release Notes:
-------------
0.1-beta1: internal
- first trial.
0.1-beta2: (1997-02-27)
- complete rewrite of the granting-rules, based on the documentation
found in de FAQ.
- IP-number and name for a host are equiv.
0.1-beta3: (1997-03-10)
- more information
- 'localhost' and the name/ip of the local machine are now equiv.
0.1-beta4: (1997-03-11)
- inform the user if he has not enough priv. to read the mysql db
1.0-beta1: (1997-03-12)
suggestions by Monty:
- connect as superuser with superpassword.
- mysqlaccess could also notice if all tables are empty. This means
that all user have full access!
- It would be nice if one could optionally start mysqlaccess without
any options just the arguments 'user db' or 'host user db', where
host is 'localhost' if one uses only two arguments.
1.0-beta2: (1997-03-14)
- bugfix: translation to reg.expr of \_ and \%.
- bugfix: error in matching regular expression and string given
by user which resulted in
'test_123' being matched with 'test'
1.0-beta3: (1997-03-14)
- bugfix: the user-field should not be treated as a sql-regexpr,
but as a plain string.
- bugfix: the host-table should not be used if the host isn't empty in db
or if the host isn't emty in user
(Monty)
1.0-beta4: (1997-03-14)
- bugfix: in an expression "$i = $j or $k", the '=' binds tighter than the or
which results in problems...
(by Monty)
- running mysqlaccess with "perl -w" gives less warnings... ;-)
1.0-beta5: (1997-04-04)
- bugfix: The table sorting was only being applied to the "user" table; all
the tables need to be sorted. Rewrote the sort algorithm, and
the table walk algorithm (no temp file anymore), and various
other cleanups. I believe the access calculation is 100% correct.
(by Paul D. Smith <psmith\@baynetworks.com>)
- Allow the debug level to be set on the cmd line with --debug=N.
(by Paul D. Smith <psmith\@baynetworks.com>)
- More -w cleanups; should be totally -w-clean.
(by Paul D. Smith <psmith\@baynetworks.com>)
1.1-beta1: (1997-04-xx)
1.1-beta2: (1997-04-11)
- new options:
--all_users : report access-rights for all possible users
--all_dbs : report access-rights for all possible dbs
--all_hosts : report access-rights for all possible hosts
--brief : as brief as possible, don't mention notes,warnings and rules
--password : validate password for user
- layout: long messages are wrapped on the report.
- functionality:
more descriptive notes and warnings
wildcards (*,?) are allowed in the user,host and db options
setting xxxx=* is equiv to using option --all_xxxx
note: make sure you escape your wildcards, so they don't get
interpreted by the shell. use \* or '*'
- bugfix: Fieldnames which should be skipped on the output can now have
a first capital letter.
- bugfix: any option with a '.' (eg ip-number) was interpreted as
a wildcard-expression.
- bugfix: When no entry was found in the db-table, the default accessrights are
N, instead of the faulty Y in a previous version.
1.1-beta-3 : (1997-04-xx)
1.1-beta-4 : (1997-04-xx)
1.1-beta-5 : (1997-04-xx)
1.1 : (1997-04-28)
- new options:
--rhost : name of mysql-server to connect to
--plan : print suggestions/ideas for future releases
--relnotes : display release-notes
--howto : display examples on how to use mysqlaccess
--brief : single-line tabular output
- functionality/bugfix:
* removed options --all_users,--all_dbs,--all_hosts, which
were redundant with the wildcard-expressions for the corresponding
options. They made the processing of the commandline too painful
and confusing ;-)
(suggested by psmith)
* redefined the option --brief, which now gives a single-line
tabular output
* Now we check if the right version of the mysql-client is used,
since we might use an option not yet implemented in an
older version (--unbuffered, since 3.0.18)
Also the error-messages the mysql-client reports are
better interpreted ;-)
* Wildcards can now be given following the SQL-expression
(%,_) and the Regular-expression (*,?) syntax.
- speed: we now open a bidirectional pipe to the mysql-client, and keep
it open throughout the whole run. Queries are written to,
and the answers read from the pipe.
(suggested by monty)
- bugfixes:
* the Rules were not properly reset over iterations
* when in different tables the field-names were not identical,
eg. Select_priv and select_priv, they were considered as
definitions of 2 different access-rights.
* the IP-number of a host with a name containing wildcards should
not be searched for in Name2IP and IP2Name.
* various other small things, pointed out by <monty> and <psmith>
1.2 : (1997-05-13)
- bugfix:
* Fixed bug in acl with anonymous user: Now if one gets accepted by the
user table as a empty user name, the user name is set to '' when
checking against the 'db' and 'host' tables. (Bug fixed in MySQL3.20.19)
1.2-1 : (1997-xx-xx)
- bugfix:
* hashes should be initialized with () instead of {} <psmith>
* "my" variable $name masks earlier declaration in same scope,
using perl 5.004 <????>
1.2-2 : (1997-06-10)
2.0p1-3 : (1997-10-xx)
- new
* packages
* log-file for debug-output : /tmp/mysqlaccess.log
* default values are read from a configuration file $script.conf
first this file is looked for in the current directory; if not
found it is looked for in @sysconfdir@
Note that when default-values are given, these can't get overridden
by empty (blanc) values!
* CGI-BIN version with HTML and forms interface. Simply place the
script in an ScriptAliased directory, make the configuration file
available in the that directory or in @sysconfdir@, and point your browser
to the right URL.
* copy the grant-rules to temporary tables, where you are safe to
play with them.
* preview changes in privileges after changing grant-rules,
before taking them into production
* copy the new grant-rules from the temporary tables back to the
grant-tables.
* Undo all changes made in the grant-tables (1-level undo).
-new options:
* --table : as opposite of the --brief option.
* --copy : (re)load temporary grant-tables from original ones.
* --preview : preview changes in privileges after changing
some or more entries in the grant-tables.
* --commit : copy grant-rules from temporary tables to grant-tables
(!don't forget to do an mysqladmin reload)
* --rollback: undo the last changes to the grant-tables.
- bugfix:
* if the table db is empty, mysqlaccess freezed
(by X Zhu <[email protected]>)
2.0 : (1997-10-09)
- fixed some "-w" warnings.
- complain when certain programs and paths can't be found.
2.01 : (1997-12-12)
- bugfix:
* rules for db-table where not calculated and reported correctly.
2.02 : (1998-01-xx)
- bugfix:
* Privileges of the user-table were not AND-ed properly with the
other privileges. (reported by monty)
- new option:
* --old_server: mysqlaccess will now use a full where clause when
retrieving information from the MySQL-server. If
you are connecting to an old server (before v3.21)
then use the option --old_server.
2.03 : (1998-02-27)
- bugfix:
* in Host::MatchTemplate: incorrect match if host-field was left empty.
2.04-alpha1 : (2000-02-11)
Closes vulnerability due to former implementation requiring passwords
to be passed on the command line.
- functionality
Option values for --password -p -spassword -P may now be omitted from
command line, in which case the values will be prompted for.
(fix supplied by Steve Harvey <[email protected]>)
2.05: (2000-02-17) Monty
Moved the log file from /tmp to ~
2.06: Don't print '+++USING FULL WHERE CLAUSE+++'
_RELEASE
$TODO = <<_TODO;
Plans:
-----
-a full where clause is use now. How can we handle older servers?
-add some more functionality for DNS.
-select the warnings more carefuly.
>> I think that the warnings should either be enhanced to _really_
>> understand and report real problems accurately, or restricted to
>> only printing things that it knows with 100% certainty. <psmith)
>> Why do I have both '%' and 'any_other_host' in there? Isn't that
>> the same thing? I think it's because I have an actual host '%' in
>> one of my tables. Probably the script should catch that and not
>> duplicate output. <psmith>
_TODO
# From the FAQ: the Grant-algorithm
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The host table is mainly to maintain a list of "secure" servers.
# At TCX hosts contain a list of all machines on local network. These are granted
# all privileges.
# Technically the user grant is calculated by:
#
# 1.First sort all entries by host by putting host without wildcards first,
# after this host with wildcards and entries with host = ".
# Under each host sort user by the same criterias.
# 2.Get grant for user from the "db" table.
# 3.If hostname is "empty" for the found entry, AND the privileges with
# the privileges for the host in "host" table.
# (Remove all which is not "Y" in both)
# 4.OR (add) the privileges for the user from the "user" table.
# (add all privileges which is "Y" in "user")
#
# When matching, use the first found match.
#
# -----------------------------------------------------------------------------------
$HOWTO = <<_HOWTO;
Examples of how to call $script:
~~~~~~~~
1)Calling $script with 2 arguments:
\$ $script root mysql
->report rights of user root logged on at the local host in db mysql
Access-rights
for USER 'root', from HOST 'localhost', to DB 'mysql'
+-----------------+---+ +-----------------+---+
| select_priv | Y | | drop_priv | Y |
| insert_priv | Y | | reload_priv | Y |
| update_priv | Y | | shutdown_priv | Y |
| delete_priv | Y | | process_priv | Y |
| create_priv | Y | | file_priv | Y |
+-----------------+---+ +-----------------+---+
BEWARE: Everybody can access your DB as user 'root'
: WITHOUT supplying a password. Be very careful about it!!
The following rules are used:
db : 'No matching rule'
host : 'Not processed: host-field is not empty in db-table.'
user : 'localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y'
2)Calling $script with 3 arguments:
\$ $script foo.bar nobody Foo
->report rights of user root logged in at machine foobar to db Foo
Access-rights
for USER 'nobody', from HOST 'foo.bar', to DB 'Foo'
+-----------------+---+ +-----------------+---+
| select_priv | Y | | drop_priv | N |
| insert_priv | Y | | reload_priv | N |
| update_priv | Y | | shutdown_priv | N |
| delete_priv | Y | | process_priv | N |
| create_priv | N | | file_priv | N |
+-----------------+---+ +-----------------+---+
BEWARE: Everybody can access your DB as user 'nobody'
: WITHOUT supplying a password. Be very careful about it!!
The following rules are used:
db : 'foo.bar','Foo','nobody','Y','Y','Y','N','N','N'
host : 'Not processed: host-field is not empty in db-table.'
user : 'foo.bar','nobody','','N','N','N','Y','N','N','N','N','N','N'
3)Using wildcards:
\$ $script \\* nobody Foo --brief
->report access-rights of user nobody from all machines to db Foo,
and use a matrix-report.
Sel Ins Upd Del Crea Drop Reld Shut Proc File Host,User,DB
---- ---- ---- ---- ---- ---- ---- ---- ---- ---- --------------------
Y Y Y Y N N N N N N localhost,nobody,Foo
N N N N N N N N N N %,nobody,Foo
N N N N N N N N N N any_other_host,nobody,Foo
_HOWTO
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# START OF THE PROGRAM #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
use Getopt::Long;
use Sys::Hostname;
use IPC::Open3;
# ****************************
# debugging flag
# can be set to 0,1,2,3
# a higher value gives more info
# ! this can also be set on the command-line
$DEBUG = 0;
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++>8
# Normally nothing should be changed beneeth this line
# ****************************
# no caching on STDOUT
$|=1;
$MYSQL_CNF = tmpnam();
%MYSQL_CNF = (client => { },
mysql => { },
mysqldump => { },
);
$NEW_USER = 'ANY_NEW_USER';
$NEW_DB = 'ANY_NEW_DB' ;
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
# mysqlaccess: #
# ~~~~~~~~~~~ #
# Lets get to it, #
# and start the program by processing the parameters #
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
($CMD,$CGI) = GetMode();
# ****************************
# the copyright message should
# always be printed (once)
MySQLaccess::Report::Print_Header();
# *****************************
# Read configuration-file
MySQLaccess::Debug::Print(1, "Reading configuration file...");
if (-f "./$script_conf") {
require "./$script_conf";
}
elsif (-f "@prefix@/$script_conf") {
require "@prefix@/$script_conf";
}
elsif (-f "@sysconfdir@/$script_conf") {
require "@sysconfdir@/$script_conf";
}
# ****************************
# Read in all parameters
if ($MySQLaccess::CMD) { #command-line version
# ----------------------------
# Get options from commandline
$Getopt::Long::ignorecase=0; #case sensitive options
if ( grep(/\-\?/,@ARGV) ) { MySQLaccess::Report::Print_Usage(); exit 0; }
GetOptions("help" => \$Param{'help'}
,"host|h=s" => \$Param{'host'}
,"user|u=s" => \$Param{'user'}
,"password|p:s" => \$Param{'password'}
,"db|d=s" => \$Param{'db'}
,"superuser|U=s" => \$Param{'superuser'}
,"spassword|P:s" => \$Param{'spassword'}
,"rhost|H=s" => \$Param{'rhost'}
,"old_server" => \$Param{'old_server'}
,"debug=i" => \$Param{'DEBUG'}
,"brief|b" => \$Param{'brief'}
,"table|t" => \$Param{'table'}
,"relnotes" => \$Param{'relnotes'}
,"plan" => \$Param{'plan'}
,"howto" => \$Param{'howto'}
,"version|v" => \$Param{'version'}
,"preview" => \$Param{'preview'}
,"copy" => \$Param{'copy'}
,"commit" => \$Param{'commit'}
,'rollback' => \$Param{'rollback'}
);
# -----------------------------
# set DEBUG
$DEBUG = $Param{'DEBUG'} if ($Param{'DEBUG'}>=$DEBUG);
# -----------------------------
# check for things which aren't
# declared as options:
# 2 arguments: (user,db) -> ('localhost','user','db')
if ($#ARGV == 1) {
MySQLaccess::Debug::Print(2,"$script called with 2 arguments:");
$Param{'host'} = $Param{'host'} || 'localhost';
$Param{'user'} = $ARGV[0] || $Param{'user'};
$Param{'db'} = $ARGV[1] || $Param{'db'};
}
# 3 arguments: (host,user,db)
if ($#ARGV == 2) {
MySQLaccess::Debug::Print(2,"$script called with 3 arguments:");
$Param{'host'} = $ARGV[0] || $Param{'host'};
$Param{'user'} = $ARGV[1] || $Param{'user'};
$Param{'db'} = $ARGV[2] || $Param{'db'};
}
# -------------------------------------
# prompt for user password if requested
if ( defined($Param{'password'}) && length($Param{'password'}) == 0 ) {
$Param{'password'} = PromptPass(
"Password for MySQL user $Param{'user'}: ");
}
}
if ($MySQLaccess::CGI) { #CGI-version
require CGI;
$Q = new CGI;
$Param{'help'} = $Q->param('help') ;
$Param{'host'} = $Q->param('host') || $Q->param('h') || $Param{'host'};
$Param{'user'} = $Q->param('user') || $Q->param('u') || $Param{'user'};
$Param{'db'} = $Q->param('db') || $Q->param('d') || $Param{'db'};
$Param{'password'} = $Q->param('password') || $Q->param('p') || $Param{'password'};
$Param{'superuser'} = $Q->param('superuser') || $Q->param('U') || $Param{'superuser'};
$Param{'spassword'} = $Q->param('spassword') || $Q->param('P') || $Param{'spassword'};
$Param{'rhost'} = $Q->param('rhost') || $Q->param('H') || $Param{'rhost'};
$Param{'old_server'}= $Q->param('old_server')|| $Param{'old_server'};
$Param{'debug'} = $Q->param('debug') || $Param{'debug'};
$Param{'brief'} = $Q->param('brief') || $Param{'brief'};
$Param{'table'} = $Q->param('table') || $Param{'table'};
$Param{'relnotes'} = $Q->param('relnotes');
$Param{'plan'} = $Q->param('plan');
$Param{'howto'} = $Q->param('howto');
$Param{'version'} = $Q->param('version') ? $Q->param('version') : $Q->param('v');
$Param{'edit'} = $Q->param('edit');
$Param{'preview'} = $Q->param('preview');
$Param{'copy'} = $Q->param('copy');
$Param{'commit'} = $Q->param('commit');
$Param{'rollback'} = $Q->param('rollback');
# -----------------------------
# set DEBUG
$DEBUG = $Q->param('debug') if ($Q->param('debug')>=$DEBUG);
}
# ----------------------
# brief and table-format
# exclude each-other
# table-format is prefered
if (defined($Param{'table'})) { undef($Param{'brief'}); }
if (defined($Param{'preview'}) or
defined($Param{'copy'}) or
defined($Param{'commit'}) or
defined($Param{'rollback'}) ) { $Param{'edit'}='on'; }
# ----------------------
# if no host is given
# assume we mean 'localhost'
if (!defined($Param{'host'})) { $Param{'host'}='localhost'; }
# ----------------------
# perform some checks
# -> eliminate 'broken pipe' error
push(@MySQLaccess::Grant::Error,'not_found_mysql') if !(-x $MYSQL);
push(@MySQLaccess::Grant::Error,'not_found_diff') if !(-x $DIFF);
push(@MySQLaccess::Grant::Error,'not_found_mysqldump') if !(-x $MYSQLDUMP);
if (@MySQLaccess::Grant::Error) {
MySQLaccess::Report::Print_Error_Messages() ;
exit 0;
}
#-----------------------
# get info/help if necc.
$print_usage=1;
if ( defined($Param{'version'}) ) {
MySQLaccess::Report::Print_Version();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
# exit 0;
}
if ( defined($Param{'relnotes'}) ) {
MySQLaccess::Report::Print_Relnotes();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
# exit 0;
}
if ( defined($Param{'plan'}) ) {
MySQLaccess::Report::Print_Plans();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
# exit 0;
}
if ( defined($Param{'howto'}) ) {
MySQLaccess::Report::Print_HowTo();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
# exit 0;
}
# -----------------------------
# generate a help-screen in CMD-mode
# or a blanc form in CGI-mode
if ( defined($Param{'help'})
or !defined($Param{'user'})
or !defined($Param{'host'})
or !defined($Param{'db'})
) {
push(@MySQLaccess::Grant::Error,'user_required') unless defined($Param{'user'});
push(@MySQLaccess::Grant::Error,'db_required') unless defined($Param{'db'});
push(@MySQLaccess::Grant::Error,'host_required') unless defined($Param{'host'});
MySQLaccess::Report::Print_Usage() if $print_usage;
exit 0;
}
# ----------------------------
# get hostname and local-ip
# for localhost
$localhost = MySQLaccess::Host::LocalHost();
$local_ip = MySQLaccess::Host::Name2IP($localhost);
$MySQLaccess::Host::localhost = MySQLaccess::Host::LocalHost();
$MySQLaccess::Host::local_ip = MySQLaccess::Host::Name2IP($localhost);
MySQLaccess::Debug::Print(3, "localhost name=$localhost, ip=$local_ip");
#-----------------------------------
# version of MySQL-server to connect
# to determine use of full where clause
$MySQLaccess::Host::SERVER = $Param{'old_server'} ? '3.20' : $SERVER;
#---------------------------------
# create the config file for mysql and mysqldump
# to avoid passing authentication info on the command line
#
MergeConfigFiles();
die "Unsafe config file found: $unsafeConfig\n" if $unsafeConfig;
if (defined($Param{'superuser'})) {
$MYSQL_CNF{'mysql'}{'user'} = $Param{'superuser'};
$MYSQL_CNF{'mysqldump'}{'user'} = $Param{'superuser'};
}
if (defined($Param{'spassword'})) {
if ( $CMD && length($Param{'spassword'}) == 0 ) {
$Param{'spassword'} =
PromptPass("Password for MySQL superuser $Param{'superuser'}: ");
}
if ( length($Param{'spassword'}) > 0 ) {
$MYSQL_CNF{'mysql'}{'password'} = $Param{'spassword'};
$MYSQL_CNF{'mysqldump'}{'password'} = $Param{'spassword'};
}
}
WriteTempConfigFile();
#---------------------------------
# Inform user if he has not enough
# privileges to read the access-db
if ( $nerror=MySQLaccess::DB::OpenConnection() ) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
}
# -----------------------
# Read MySQL ACL-files
if ($nerror=MySQLaccess::Grant::ReadTables()) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
};
if ($Param{'edit'} and $nerror=MySQLaccess::Grant::ReadTables('tmp')) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
}
#---------------------------------
# reload temporay grant-tables
# with data from original ones
if ( defined($Param{'copy'}) ) {
$nerror=MySQLaccess::DB::LoadTmpTables();
if ($nerror) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
}
my $msg = "The grant-rules are copied from the grant-tables to\n"
. "the temporary tables.";
MySQLaccess::Report::Print_Message([$msg]);
# MySQLaccess::Report::Print_Footer();
# MySQLaccess::DB::CloseConnection();
# exit 0;
}
#---------------------------------
# preview result of changes in the
# grant-tables
if ( defined($Param{'preview'}) ) {
$aref=MySQLaccess::Grant::Diff_Privileges();
MySQLaccess::Report::Print_Diff_ACL($aref);
# MySQLaccess::Report::Print_Footer();
# MySQLaccess::DB::CloseConnection();
# exit 0;
}
#---------------------------------
# reload grant-tables
# with data from temporary tables
if ( defined($Param{'commit'}) ) {
if ($nerror = MySQLaccess::DB::CommitGrantTables()) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
}
my $msg = "The grant-rules have been copied from the temporary tables\n"
. "to the grant-tables.";
my $msg1= "Don't forget to do an 'mysqladmin reload' before these\n"
. "changes take effect.";
my $msg2= "A backup-version of your original grant-rules are saved in the\n"
. "backup-tables, so you can always perform a 1-level rollback.";
MySQLaccess::Report::Print_Message([$msg,$msg1,$msg2]);
# MySQLaccess::Report::Print_Footer();
# MySQLaccess::DB::CloseConnection();
# exit 0;
}
#---------------------------------
# restore previous grant-rules
# with data from backup tables
if ( defined($Param{'rollback'}) ) {
if ($nerror = MySQLaccess::DB::RollbackGrantTables()) {
MySQLaccess::Report::Print_Error_Access($nerror);
exit 0;
}
my $msg = "The old grant-rules have been copied back from the backup tables\n"
. "to the grant-tables.";
my $msg1= "Don't forget to do an 'mysqladmin reload' before these\n"
. "changes take effect.";
MySQLaccess::Report::Print_Message([$msg,$msg1]);
# MySQLaccess::Report::Print_Footer();
# MySQLaccess::DB::CloseConnection();
# exit 0;
}
#----------------------------------
# show edit-taskbar
if ( defined($Param{'edit'})) {
if ($MySQLaccess::CGI ) {
MySQLaccess::Report::Print_Edit();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
}
else {
MySQLaccess::Report::Print_Edit();
$print_usage=0;
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
}
}
# -----------------------------
# Build list of users,dbs,hosts
# to process...
@all_dbs = @{MySQLaccess::DB::Get_All_dbs($Param{'db'})};
@all_users = @{MySQLaccess::DB::Get_All_users($Param{'user'})};
@all_hosts = @{MySQLaccess::DB::Get_All_hosts($Param{'host'})};
#if EDIT-mode
#@all_dbs_tmp = @{MySQLaccess::DB::Get_All_dbs($Param{'db'},'tmp')};
#@all_users_tmp = @{MySQLaccess::DB::Get_All_users($Param{'user'},'tmp')};
#@all_hosts_tmp = @{MySQLaccess::DB::Get_All_hosts($Param{'host'},'tmp')};
# -----------------------------
# Report access-rights for each
# tuple (host,user,db)
#$headers=0;
my %Access = ();
foreach $host (@all_hosts) {
foreach $user (@all_users) {
foreach $db (@all_dbs) {
MySQLaccess::Grant::Initialize();
%Access = MySQLaccess::Grant::Get_Access_Rights($host,$user,$db);
MySQLaccess::Report::Print_Access_rights($host,$user,$db,\%Access);
}
}
}
# -----------------------------
# End script
MySQLaccess::Report::Print_Footer();
MySQLaccess::DB::CloseConnection();
exit 0;
#############################################################
# FUNCTIONS #
###############
sub GetMode {
my $cmd=0;
my $cgi=0;
if (defined($ENV{'HTTP_HOST'})) { $cmd=0; $cgi=1; }
else { $cmd=1; $cgi=0; }
return ($cmd,$cgi);
}
# ================================
# sub PromptPass
# prompt tty for a password
# ================================
sub PromptPass {
my ($prompt) = @_;
my $password;
$ENV{PATH} = "/bin:/usr/bin";
$ENV{IFS} = " \t\n";
$ENV{SHELL} = "/bin/sh";
system "stty -echo";
print $prompt;
chomp($password = <STDIN>);
print "\n";
system "stty echo";
$password;
}
# =================================
# sub CheckUnsafeFile
# tell if a config file containing a password is unsafe
# =================================
sub CheckUnsafeFile {
my ($fname) = @_;
my ($dev, $ino, $mode, $nlink,
$uid, $gid, $rdev, $size,
$atime, $mtime, $ctime, $blksize, $blocks) = stat($fname);
if ( $uid != $< ) { # unsafe if owned by other than current user
return 1;
}
if ( $mode & 066 ) { # unsafe if accessible by other
return 1;
}
$fname =~ s#/[^/]+$##;
if ( (length $fname) > 0 ) {
return CheckUnsafeDir($fname);
}
return 0;
}
# =================================
# sub CheckUnsafeDir
# tell if a directory is unsafe
# =================================
sub CheckUnsafeDir {
my ($fname) = @_;
my ($dev, $ino, $mode, $nlink,
$uid, $gid, $rdev, $size,
$atime, $mtime, $ctime, $blksize, $blocks) = stat($fname);
# not owned by me or root
if ( ($uid != $<) && ($uid != 0) ) {
return 1;
}
if ( $mode & 022 ) { # unsafe if writable by other
return 1 unless $mode & 01000; # but sticky bit ok
}
$fname =~ s#/[^/]+$##;
if ( (length $fname) > 0 ) {
return CheckUnsafeDir($fname);
}
return 0;
}
# =================================
# sub MergeConfigFile
# merge data from .cnf file
# =================================
sub MergeConfigFile {
my ($fname) = @_;
my ($group, $item, $value);
if ( open CNF, $fname ) {
while (<CNF>) {
s/^\s+//;
next if /^[#;]/;
if ( /\[\s*(\w+)\s*]/ ) {
$group = $1;
$group =~ tr/A-Z/a-z/;
if ( !exists $MYSQL_CNF{$group} ) {
undef $group;
}
} elsif ( defined $group ) {
($item, $value) = /((?:\w|-)+)\s*=\s*(\S+)/;
# don't unquote backslashes as we just write it back out
if ( defined $item ) {
if ( $item =~ /^password$/ ) {
if ( CheckUnsafeFile($fname) ) {
$unsafeConfig = $fname;
}
}
if ( $group eq 'client' || $group eq "client-server") {
$MYSQL_CNF{'mysql'}{$item} = $value;
$MYSQL_CNF{'mysqldump'}{$item} = $value;
} else {
$MYSQL_CNF{$group}{$item} = $value;
}
}
}
}
close(CNF);
}
}
# =================================
# sub MergeConfigFiles
# merge options from config files
# NOTE: really should do two separate merges for each
# client to exactly duplicate order of resulting argument lists
# =================================
sub MergeConfigFiles {
my ($name,$pass,$uid,$gid,$quota,$comment,$gcos,$dir,$shell) = getpwuid $<;
MergeConfigFile("@prefix@/my.cnf");
MergeConfigFile("@sysconfdir@/my.cnf");
MergeConfigFile("$dir/.my.cnf");
}
# =================================
# sub WriteTempConfigFile
# write
# =================================
sub WriteTempConfigFile {
sysopen CNFFILE, $MYSQL_CNF, O_RDWR|O_CREAT|O_EXCL, 0700
or die "sysopen $MYSQL_CNF: $!";
# groups may be in any order, generic groups such as [client] assumed
# here to be empty
foreach $group (keys %MYSQL_CNF) {
print CNFFILE "[$group]\n";
foreach $item (keys %{$MYSQL_CNF{$group}}) {
if ( defined $MYSQL_CNF{$group}{$item} ) {
print CNFFILE "$item=$MYSQL_CNF{$group}{$item}\n";
} else {
print CNFFILE "$item\n";
}
}
print CNFFILE "\n";
}
close(CNFFILE);
}
######################################################################
package MySQLaccess::DB;
###########
BEGIN {
$DEBUG = 2;
$DEBUG = $MySQLaccess::DEBUG unless ($DEBUG);
# Error-messages from the MySQL client
%ACCESS_ERR= ('Access_denied' => 'Access denied'
,'Dbaccess_denied' => 'Access to database denied'
,'Unrecognized_option' => 'unrecognized option'
,'Unknown_table' => "Can't find file:"
,'unknown_error' => '^ERROR:'
);
}
# ######################################
# Connecting to the MYSQL DB
# ======================================
# sub OpenConnection
# Open an connection to the mysql-db
# questions to MYSQL_Q
# answers from MYSQL_A
# ======================================
sub OpenConnection {
my $pid;
MySQLaccess::Debug::Print(2,"OpenConnection:");
# check path to mysql-client executable
if (! -f $MySQLaccess::MYSQL) {
if ($MySQLaccess::CMD) { die "Could not find MySQL-client '$MySQLaccess::MYSQL'"; }
if ($MySQLaccess::CGI) {
print "<center>\n<font color=Red>\n";
print "ERROR: Could not find MySQL-client '$MySQLaccess::MYSQL'";
print "</center>\n</font>\n";
exit 0;
}
}
# path to mysql executable
my $connect = "$MySQLaccess::MYSQL --defaults-file=$MySQLaccess::MYSQL_CNF";
$connect .= " $MySQLaccess::MYSQL_OPT";
# superuser, spassword transmitted via defaults-file
if (defined($MySQLaccess::Param{'rhost'})) { $connect .= " --host=$MySQLaccess::Param{'rhost'}"; }
# other options??
# grant-database
$connect .= " $MySQLaccess::ACCESS_DB";
# open connection (not using /bin/sh -c)
MySQLaccess::Debug::Print(2,"Connecting to: $connect");
$pid=IPC::Open3::open3(\*MYSQL_Q,\*MYSQL_A,"",split /\s+/,$connect);
MySQLaccess::Debug::Print(2,"PID of open pipe: $pid");
# check connection
print MYSQL_Q "select 'ok';\n";
$answer = <MYSQL_A>; #answer from mysql
MySQLaccess::Debug::Print(2,"Answer: $answer\n");
foreach $nerror (sort(keys(%ACCESS_ERR))) {
MySQLaccess::Debug::Print(3,"check answer for error $ACCESS_ERR{$nerror}");
if (grep(/$ACCESS_ERR{$nerror}/i,$answer)) {
MySQLaccess::Debug::Print(2,"Answer contain error [$nerror]");
return $nerror;
}
}
if (0) {
# check server-version
print MYSQL_Q "select 'ok';\n";
$answer = <MYSQL_A>; #answer from mysql
MySQLaccess::Debug::Print(2,"Answer: $answer\n");
foreach $nerror (sort(keys(%ACCESS_ERR))) {
MySQLaccess::Debug::Print(3,"check answer for error $ACCESS_ERR{$nerror}");
if (grep(/$ACCESS_ERR{$nerror}/i,$answer)) {
MySQLaccess::Debug::Print(2,"Answer contain error [$nerror]");
return $nerror;
}
}
}
my $skip=<MYSQL_A>;
return 0;
}
# ======================================
# sub CloseConnection
# Close the connection to the mysql-db
# ======================================
sub CloseConnection {
close MYSQL_Q;
close MYSQL_A;
}
# ===========================================================
# sub CreateTable($table)
# Create temporary/backup table
# ===========================================================
sub CreateTable {
my $pid;
my ($table,$force) = @_;
my %tables = ( $MySQLaccess::ACCESS_U_TMP => $MySQLaccess::ACCESS_U,
$MySQLaccess::ACCESS_H_TMP => $MySQLaccess::ACCESS_H,
$MySQLaccess::ACCESS_D_TMP => $MySQLaccess::ACCESS_D,
$MySQLaccess::ACCESS_U_BCK => $MySQLaccess::ACCESS_U,
$MySQLaccess::ACCESS_H_BCK => $MySQLaccess::ACCESS_H,
$MySQLaccess::ACCESS_D_BCK => $MySQLaccess::ACCESS_D,
$MySQLaccess::ACCESS_U => $MySQLaccess::ACCESS_U_BCK,
$MySQLaccess::ACCESS_H => $MySQLaccess::ACCESS_H_BCK,
$MySQLaccess::ACCESS_D => $MySQLaccess::ACCESS_D_BCK,
);
my $tbl;
my $query="";
my $delim;
my $skip;
my $create;
my @known_tables=();
# print STDERR "CreateTable($table)\n";
MySQLaccess::Debug::Print(1,"CreateTable($table):");
## error-handling
return 'Unknown_table' unless defined($tables{$table});
## build list of known/existing tables;
## if 'force' existing table is dropped first
if (defined($force) and $force) {
@known_tables = Show_Tables();
if (grep(/^$table$/,@known_tables)) {
$query = "DROP TABLE $table;";
}
}
## path to mysqldump executable
my $connect = $MySQLaccess::MYSQLDUMP;
$connect .= " --defaults-file=$MySQLaccess::MYSQL_CNF --no-data";
# superuser, spassword transmitted via defaults-file
if (defined($MySQLaccess::Param{'rhost'})) { $connect .= " --host=$MySQLaccess::Param{'rhost'}"; }
$connect .= " $MySQLaccess::ACCESS_DB";
$connect .= " $tables{$table}";
## get creation-data for original table
$create = '';
my $mysqldump = $connect;
$mysqldump =~ s/ \$TABLE / $tbl /;
# open connection (not using /bin/sh -c)
MySQLaccess::Debug::Print(2,"Connecting to: $connect");
$pid=IPC::Open3::open3(\*DONTCARE,\*CREATE,"",split /\s+/,$mysqldump);
MySQLaccess::Debug::Print(2,"PID of open pipe: $pid");
#open(CREATE,"$mysqldump");
@create = <CREATE>;
$create = "@create";
foreach $nerror (sort(keys(%ACCESS_ERR))) {
MySQLaccess::Debug::Print(3,"check answer for error $ACCESS_ERR{$nerror}");
if (grep(/$ACCESS_ERR{$nerror}/i,$create)) {
MySQLaccess::Debug::Print(2,"Answer contain error [$nerror]");
return $nerror;
}
}
close(CREATE);
close(DONTCARE);
## manipulate result for creation-data for temporary table
$create =~ s/CREATE TABLE $tables{$table} \(/CREATE TABLE $table \(/;
## recreate temporary table
$query .= "$create\n";
$query .= "select 'ok';";
## execute query
print MYSQL_Q "$query\n";
# print STDERR $query;
$answer = <MYSQL_A>; #answer from mysql
# print STDERR "A>",$answer;
MySQLaccess::Debug::Print(2,"Answer: $answer\n");
foreach $nerror (sort(keys(%ACCESS_ERR))) {
# print STDERR "->$nerror?";
MySQLaccess::Debug::Print(3,"check answer for error $ACCESS_ERR{$nerror}");
if (grep(/$ACCESS_ERR{$nerror}/i,$answer)) {
# print STDERR "Yes!";
MySQLaccess::Debug::Print(2,"Answer contain error [$nerror]");
return $nerror;
}
}
$delim = <MYSQL_A>; # read header
if ($delim ne "ok\n") {
while (($line=<MYSQL_A>) ne "ok\n")
{ MySQLaccess::Debug::Print(3," A> $line"); }
$skip = <MYSQL_A>; # skip result 'ok'
}
# print STDERR "CreateTable done\n";
return 0;
}
# ===========================================================
# sub CopyTable()
# Copy the structure and the data of a table to another table
# ===========================================================
sub CopyTable {
my ($from,$to,$force) = @_;
my @known_tables = Show_Tables();
my $query = "";
my $nerror= 0;
my $skip;
# print STDERR "CopyTable($from,$to)\n";
MySQLaccess::Debug::Print(1,"MySQLaccess::DB::CopyTable($from,$to)");
## error-handling
if (!grep(/^$from$/,@known_tables)) { return 'Unknown_table'; }
## copy structure
## if forced
if (defined($force) and $force) {
return $nerror if ($nerror=CreateTable($to,$force));
# print STDERR "Structure copied\n";
}
## copy data
$query .= "DELETE FROM $to;";
$query .= "INSERT INTO $to SELECT * FROM $from;";
$query .= "SELECT 'ok';\n";
MySQLaccess::Debug::Print(2,"Query: $query");
## execute query
print MYSQL_Q "$query\n";
# print STDERR $query;
## check for errors...
my $answer = <MYSQL_A>; #answer from mysql
# print STDERR $answer;
MySQLaccess::Debug::Print(2,"Answer: $answer\n");
foreach $nerror (sort(keys(%ACCESS_ERR))) {
MySQLaccess::Debug::Print(3,"check answer for error $ACCESS_ERR{$nerror}");
if (grep(/$ACCESS_ERR{$nerror}/i,$answer)) {
MySQLaccess::Debug::Print(2,"Answer contain error [$nerror]");
return $nerror;
}
}
my $delim = <MYSQL_A>; # read header
# print STDERR $delim;
if ($delim ne "ok\n") {
while (($line=<MYSQL_A>) ne "ok\n")
{ MySQLaccess::Debug::Print(3," A> $line"); }
$skip = <MYSQL_A>; # skip result 'ok'
}
return 0;
}
# ===========================================================
# sub LoadTmpTables()
# (Re)load temporary tables with entries of ACL-tables
# ===========================================================
sub LoadTmpTables {
my %tables = ( $MySQLaccess::ACCESS_U => $MySQLaccess::ACCESS_U_TMP,
$MySQLaccess::ACCESS_H => $MySQLaccess::ACCESS_H_TMP,
$MySQLaccess::ACCESS_D => $MySQLaccess::ACCESS_D_TMP,
);
my $tbl;
my $nerror;
# print STDERR "LoadTmpTables:\n";
MySQLaccess::Debug::Print(1,"LoadTmpTables():");
foreach $tbl (keys(%tables)) {
# print STDERR "$tbl -> $tables{$tbl}\n";
MySQLaccess::Debug::Print(2,"Loading table $tbl -> $tables{$tbl}.");
return $nerror if ($nerror=CopyTable($tbl,$tables{$tbl},'force'));
}
return 0;
}
# ===========================================================
# sub BackupGrantTables()
# Make a backup of the original grant-tables
# ===========================================================
sub BackupGrantTables {
my %tables = ( $MySQLaccess::ACCESS_U => $MySQLaccess::ACCESS_U_BCK,
$MySQLaccess::ACCESS_H => $MySQLaccess::ACCESS_H_BCK,
$MySQLaccess::ACCESS_D => $MySQLaccess::ACCESS_D_BCK,
);
my $tbl;
my $nerror;
# print STDERR "BackupGrantTables:\n";
MySQLaccess::Debug::Print(1,"BackupGrantTables():");
foreach $tbl (keys(%tables)) {
# print STDERR "$tbl -> $tables{$tbl}\n";
MySQLaccess::Debug::Print(2,"Backup table $tbl -> $tables{$tbl}.");
return $nerror if ($nerror=CopyTable($tbl,$tables{$tbl},'force'));
}
return 0;
}
# ===========================================================
# sub RollbackGrantTables()
# Rollback the backup of the grant-tables
# ===========================================================
sub RollbackGrantTables {
my %tables = ( $MySQLaccess::ACCESS_U_BCK => $MySQLaccess::ACCESS_U,
$MySQLaccess::ACCESS_H_BCK => $MySQLaccess::ACCESS_H,
$MySQLaccess::ACCESS_D_BCK => $MySQLaccess::ACCESS_D,
);
my $tbl;
my $nerror;
# print STDERR "RollbackGrantTables:\n";
MySQLaccess::Debug::Print(1,"RollbackGrantTables():");
foreach $tbl (keys(%tables)) {
# print STDERR "$tbl -> $tables{$tbl}\n";
MySQLaccess::Debug::Print(2,"Rollback table $tbl -> $tables{$tbl}.");
return $nerror if ($nerror=CopyTable($tbl,$tables{$tbl},'force'));
}
return 0;
}
# ===========================================================
# sub CommitGrantTables()
# Copy grant-rules from temporary tables to the ACL-tables
# ===========================================================
sub CommitGrantTables {
my %tables = ( $MySQLaccess::ACCESS_U => $MySQLaccess::ACCESS_U_TMP,
$MySQLaccess::ACCESS_H => $MySQLaccess::ACCESS_H_TMP,
$MySQLaccess::ACCESS_D => $MySQLaccess::ACCESS_D_TMP,
);
my $tbl;
my $query;
my $delim;
my $skip;
my $create;
print STDERR "CommitGrantTables()\n";
MySQLaccess::Debug::Print(1,"CommitGrantTables():");
## Make backup of original grant-tables
MySQLaccess::Debug::Print(2,"Making backup of original grant-tables...");
BackupGrantTables();
## Copy data from temporay tables to grant-tables
foreach $tbl (keys(%tables)) {
print STDERR "$tbl -> $tables{$tbl}\n";
MySQLaccess::Debug::Print(2,"Loading data $tables{$tbl} -> $tbl.");
return $nerror if ($nerror=CopyTable($tables{$tbl},$tbl));
}
return 0;
}
# ===========================================================
# sub Show_Fields($table):
# return (a reference to) a hash which holds the names
# of all relevant grant-fields, with their index in the record,
# and (a reference to) an array which holds the fieldnames.
# ===========================================================
sub Show_Fields {
my ($table) = @_;
my %skip = ('host' => [0,1]
,'user' => [0,1,2]
,'db' => [0,1,2]
);
my %Struct = ();
my @Struct = ();
my $query = "show fields from $table;select 'ok';\n";
my $i=0;
my $line;
#print STDERR $query;
MySQLaccess::Debug::Print(1,"Show_Fields($table):");
MySQLaccess::Debug::Print(2,"SQL: $query");
print MYSQL_Q "$query";
my $skip = <MYSQL_A>; #skip header
while (($line=<MYSQL_A>) ne "ok\n")
{
#print STDERR ">",$line;
chop($line);
MySQLaccess::Debug::Print(2," $table>: $line");
my ($field,$type,$null,$key,$default,$extra) = split(' ',$line);
$field = ucfirst($field);
MySQLaccess::Debug::Print(3, " <split: $field - $type - $null - $key - $default - $extra");
if (! grep(/$i/,@{$skip{$table}}) ){
$Struct{$field} = $i; #hash
push(@Struct,$field); #array
MySQLaccess::Debug::Print(3," ==> added column[$i]: $field ($Struct{$field})");
}
else {
MySQLaccess::Debug::Print(3," ==> skipped column[$i], value=[$field]");
}
$i++;
}
$skip=<MYSQL_A>; # Get ok row (found already ok header)
MySQLaccess::Debug::Print(2, "Array:");
foreach $field (@Struct) { MySQLaccess::Debug::Print(2,"+ $field"); }
MySQLaccess::Debug::Print(2,"Hash:");
foreach $field (keys(%Struct)) { MySQLaccess::Debug::Print(2,"+ $field -> $Struct{$field}"); }
return (\%Struct,\@Struct);
}
# ===========================================================
# sub Show_Tables():
# return (a reference to) an array which holds all
# known tables.
# ===========================================================
sub Show_Tables {
my @Tables = ();
my $query = "show tables;select 'ok';\n";
my $i=0;
my $line;
MySQLaccess::Debug::Print(1,"Show_Tables():");
MySQLaccess::Debug::Print(2,"SQL: $query");
print MYSQL_Q "$query";
my $skip = <MYSQL_A>; #skip header
while (($line=<MYSQL_A>) ne "ok\n")
{
chop($line);
push(@Tables,$line); #array
MySQLaccess::Debug::Print(3," ==> added table: $line");
}
$skip=<MYSQL_A>; # Get ok row (found already ok header)
MySQLaccess::Debug::Print(2, "Array:");
foreach $tbl (@Tables) { MySQLaccess::Debug::Print(2,"+ $tbl"); }
return @Tables;
}
# ======================================
# sub Validate_Password($passwd,$host,$user,$encpw)
# Validate the given password
# for user '$user'
# connecting from host '$host'
# ======================================
sub Validate_Password {
my ($password,$host,$user,$encpw) = @_;
my $valid=0;
MySQLaccess::Debug::Print(1,"Validate_Password($password,$host,$user,$encpw)");
my $sql = "select host,user,password from user having "
."host='$host' and user='$user' and password='$encpw' "
."and password=PASSWORD('$password');\n";
$sql .= "select 'ok';\n";
MySQLaccess::Debug::Print(2,"SQL = $sql");
print MYSQL_Q "$sql";
# if password is valid, at least 1 row returns before we read 'ok'
while ( ($line=<MYSQL_A>) ne "ok\n") {
MySQLaccess::Debug::Print(2," A> $line");
$valid = defined($line);
}
my $skip = <MYSQL_A>; # read 'ok'
return $valid;
}
# ==========================================================
# sub Sort_fields: (rewritten by psmith)
# Build the query for an ordered list of entries
# ==========================================================
sub Sort_fields {
my ($start, $end, $sofar, $this, @rest) = (@_);
my @where = ("((FIELD not like '\\%') AND (FIELD <> ''))",
"((FIELD like '%\\%%') OR (FIELD like '%\\_%'))",
"(FIELD = '')");
my $res = '';
$this or return ("$start $sofar $end");
$sofar .= ' AND ' if $sofar;
foreach $w (@where) {
my $f = $w;
$f =~ s/FIELD/$this/g;
$res .= Sort_fields($start, $end, "$sofar$f", @rest);
}
return ($res);
}
# ===========================================================
# sub Sort_table: (rewritten by psmith)
# return all entries in the given table,
# in an ordered fashion
# ===========================================================
sub Sort_table {
my ($tbl, @order) = @_;
my @res=();
# as long as there's no full where clause (Distrib 3.20)...
# use having :-(
# NOTE: this clause WILL NOT work on 3.21, because of the
# order of 'ORDER BY' and 'HAVING'
my $start = "SELECT *,UCASE(host) as ucase_host FROM $tbl ";
$start .= 'ORDER BY ' . join(',', @order) ." HAVING ";
my $end = ";\n";
# server version 3.21 has a full where clause :-)
if ($MySQLaccess::Host::SERVER >= '3.21') {
# print "+++USING FULL WHERE CLAUSE+++\n";
$start = "SELECT *,UCASE(host) as ucase_host FROM $tbl WHERE ";
$end = ' ORDER BY ' . join(',', @order) . ";\n";
}
MySQLaccess::Debug::Print(1,"Sort_table():");
MySQLaccess::Debug::Print(2,"Sorting table $tbl by `@order'");
my $tmp;
foreach $tmp (@order)
{
$tmp="UCASE(host)" if ($tmp eq "ucase_host");
}
my $query = Sort_fields($start, $end, '', @order);
$query .= "select 'ok';\n";
MySQLaccess::Debug::Print(2,"Query: $query");
print MYSQL_Q "$query\n";
my $delim = <MYSQL_A>; # read header
MySQLaccess::Debug::Print(3," A> $delim");
if ($delim ne "ok\n") {
if ($delim =~ /^ERROR/) {
push(@MySQLaccess::Grant::Error,'use_old_server');
MySQLaccess::Report::Print_Error_Messages() ;
exit 1;
}
while (($line=<MYSQL_A>) ne "ok\n")
{
MySQLaccess::Debug::Print(3," A> $line");
push(@res,$line);
}
}
my $skip = <MYSQL_A>; # skip result 'ok'
# remove columnheaders from output
@res = grep(!/^\Q$delim\E$/, @res);
# remove trailing \n from each returned record
chomp(@res);
# each record has 1 field to much : ucase_host
@res = grep { /(.*)\t.*$/; $_ = $1; } @res;
MySQLaccess::Debug::Print(2,"Result of sorted table $tbl:");
foreach $line (@res) { MySQLaccess::Debug::Print(2," >>$line"); }
return @res;
}
# ===========================================================
# sub Get_All_db(template):
# return all db the grant-tables are working on,
# which conform to the template
# ===========================================================
sub Get_All_dbs {
my ($template,$tmp) = @_;
my @db=();
my $aref;
# working with temporary tables or production tables
if (defined($tmp) and $tmp) {
$aref = \@MySQLaccess::Grant::sorted_db_tmp_table ;
}
else {
$aref = \@MySQLaccess::Grant::sorted_db_table;
}
MySQLaccess::Debug::Print(1," template=[$template]");
# get all db for which access-rights can be calculated,
# which conform to the template.
# !! these db's don't have to exist yet, so it's not
# enough to look which db already exist on the system
$reg_expr = $template;
if ($template =~ /[\*\?]/) {
$reg_expr =~ tr/*?/%_/;
#$reg_expr = MySQLaccess::Wildcards::Wild2Reg($template);
}
$reg_expr = MySQLaccess::Wildcards::SQL2Reg("$reg_expr");
if ( ! ($template =~ /[\*\?%_]/) ) {
push(@db,$template);
return \@db;
}
MySQLaccess::Debug::Print(2,"#Reading db-table...");
foreach $record (@{$aref}) { #MySQLaccess::Grant::sorted_db_table) {
my @record=split(/\t/,$record);
my $db = $record[1];
MySQLaccess::Debug::Print(2,"> $db ");
if ( (!grep(/$db/i,@db)) and ($db =~/$reg_expr/i) ) {
push(@db,$db);
MySQLaccess::Debug::Print(2,"added");
}
else {
MySQLaccess::Debug::Print(2,"skipped");
}
}
# if no rule is found for a certain db in the db-table,
# the rights of the user are used, so we should inform
# the user for
if (!grep(/^%$/,@db)) { push(@db,"$MySQLaccess::NEW_DB"); }
return \@db;
}
# ===========================================================
# sub Get_All_users(template):
# return all users the grant-tables are working on,
# which conform to the template
# ===========================================================
sub Get_All_users {
($template,$tmp) = @_; # nog verder uitwerken!!!
my @user=();
my $aref;
# working with temporary tables or production tables
if (defined($tmp) and $tmp) {
$aref = \@MySQLaccess::Grant::sorted_user_tmp_table ;
}
else {
$aref = \@MySQLaccess::Grant::sorted_user_table;
}
MySQLaccess::Debug::Print(1,"Debug Get_All_users:");
# get all db for which access-rights can be calculated.
# !! these db's don't have to exist yet, so it's not
# enough to look which db already exist on the system
$reg_expr = $template;
if ($template =~ /[\*\?]/) {
$reg_expr =~ tr/*?/%_/;
#$reg_expr = MySQLaccess::Wildcards::Wild2Reg($template);
}
$reg_expr = MySQLaccess::Wildcards::SQL2Reg("$reg_expr");
if ( ! ($template =~ /[\*\?%_]/) ) {
push(@user,$template);
return \@user;
}
MySQLaccess::Debug::Print(2,"#Reading user-table...");
foreach $record (@{$aref}) { #MySQLaccess::Grant::sorted_user_table) {
my @record=split(/\t/,$record);
my $user = $record[1];
MySQLaccess::Debug::Print(2,"> $user ");
if ( (!grep(/$user/,@user)) and ($user=~/$reg_expr/)) {
push(@user,$user);
MySQLaccess::Debug::Print(2, "added");
}
else {
MySQLaccess::Debug::Print(2, "skipped");
}
}
# Any user means also:
# - the 'empty' user, ie without supplying a username
# - any user still to be defined/created
#push(@user,''); #without_suplying_a_username
push(@user,"$MySQLaccess::NEW_USER");
#push(@Warnings,'minimum_priv');
return \@user;
}
# ===========================================================
# sub Get_All_hosts(template):
# return all hosts the grant-tables are working on,
# which conform to the template
# ===========================================================
sub Get_All_hosts {
my ($template,$tmp) = @_;
my @host=();
my $aref;
my $aref1;
# working with temporary tables or production tables
if (defined($tmp) and $tmp) {
$aref = \@MySQLaccess::Grant::sorted_host_tmp_table ;
$aref1= \@MySQLaccess::Grant::sorted_db_tmp_table ;
}
else {
$aref = \@MySQLaccess::Grant::sorted_host_table;
$aref1= \@MySQLaccess::Grant::sorted_db_table ;
}
MySQLaccess::Debug::Print(1, "Debug Get_All_hosts:");
# get all db for which access-rights can be calculated.
# !! these db's don't have to exist yet, so it's not
# enough to look which db already exist on the system
$reg_expr = $template;
if ($template =~ /[\*\?]/) {
$reg_expr =~ tr/*?/%_/;
#$reg_expr = MySQLaccess::Wildcards::Wild2Reg($template);
}
$reg_expr = MySQLaccess::Wildcards::SQL2Reg("$reg_expr");
if ( ! ($template =~ /[\*\?%_]/) ) {
push(@host,$template);
return \@host;
}
MySQLaccess::Debug::Print(1, "#Reading db-table...");
foreach $record (@{$aref1}) { #MySQLaccess::Grant::sorted_db_table) {
my @record=split(/\t/,$record);
my $host = $record[0];
MySQLaccess::Debug::Print(2, "> $host ");
if (! grep(/$host/i,@host)) {
push(@host,$host);
MySQLaccess::Debug::Print(2, "added");
}
else {
MySQLaccess::Debug::Print(2, "skipped");
}
}
MySQLaccess::Debug::Print(1, "#Reading host-table...");
foreach $record (@{$aref}) {
my @record=split(/\t/,$record);
my $host = $record[0];
MySQLaccess::Debug::Print(2, "> $host ");
if ( (!grep(/$host/,@host)) and ($host=~/$reg_expr/)) {
push(@host,$host);
MySQLaccess::Debug::Print(2, "added");
}
else {
MySQLaccess::Debug::Print(2, "skipped");
}
}
# DOUBT:
#print "#Reading user-table...\n" if ($DEBUG>1);
#foreach $record (@MySQLaccess::Grant::sorted_user_table) {
# my @record=split(/\t/,$record);
# my $host = $record[0];
# print "> $host " if ($DEBUG>2);
# if ( (!grep(/$host/,@host)) and ($host=~/$reg_expr/)) {
# push(@host,$host);
# print "added\n" if ($DEBUG>2);
# }
# else {
# print "skipped\n" if ($DEBUG>2);
# }
#}
# Any host also means:
# - any host still to be defined/created
#push(@host,"any_other_host");
@host = sort(@host);
return \@host;
}
##########################################################################
package MySQLaccess::Grant;
##############
BEGIN {
$DEBUG = 0;
$DEBUG = $MySQLaccess::DEBUG unless ($DEBUG);
}
# ===========================================================
# sub Diff_Privileges()
# Calculate diff between temporary and original grant-tables
# ===========================================================
sub Diff_Privileges {
my @before=();
my @after =();
my @diffs =();
# -----------------------------
# Build list of users,dbs,hosts
# to process...
my @all_dbs = @{MySQLaccess::DB::Get_All_dbs('*')};
my @all_users = @{MySQLaccess::DB::Get_All_users('*')};
my @all_hosts = @{MySQLaccess::DB::Get_All_hosts('*')};
#if EDIT-mode
my @all_dbs_tmp = @{MySQLaccess::DB::Get_All_dbs('*','tmp')};
my @all_users_tmp = @{MySQLaccess::DB::Get_All_users('*','tmp')};
my @all_hosts_tmp = @{MySQLaccess::DB::Get_All_hosts('*','tmp')};
my %Access;
# ------------------------------------
# Build list of priv. for grant-tables
foreach $host (@all_hosts) {
foreach $user (@all_users) {
foreach $db (@all_dbs) {
MySQLaccess::Grant::Initialize();
%Access = MySQLaccess::Grant::Get_Access_Rights($host,$user,$db);
push(@before,MySQLaccess::Report::Raw_Report($host,$user,$db,\%Access));
}
}
}
# ----------------------------------
# Build list of priv. for tmp-tables
foreach $host (@all_hosts_tmp) {
foreach $user (@all_users_tmp) {
foreach $db (@all_dbs_tmp) {
MySQLaccess::Grant::Initialize('tmp');
%Access = MySQLaccess::Grant::Get_Access_Rights($host,$user,$db,'tmp');
push(@after,MySQLaccess::Report::Raw_Report($host,$user,$db,\%Access));
}
}
}
# ----------------------------------
# Write results to temp-file to make
# DIFF
@before = sort(@before);
@after = sort(@after);
($hb, $before) = tempfile("$MySQLaccess::script.XXXXXX") or
push(@MySQLaccess::Report::Errors,"Can't create temporary file: $!");
($ha, $after) = tempfile("$MySQLaccess::script.XXXXXX") or
push(@MySQLaccess::Report::Errors,"Can't create temporary file: $!");
print $hb join("\n",@before);
print $ha join("\n",@after);
close $hb;
close $ha;
# ----------------------------------
# compute difference
my $cmd="$MySQLaccess::DIFF $before $after |";
open(DIFF,"$cmd");
@diffs = <DIFF>;
@diffs = grep(/[<>]/,@diffs);
chomp(@diffs);
close(DIFF);
# ----------------------------------
# cleanup temp. files
unlink($before);
unlink($after);
return \@diffs;
}
# ===========================================================
# sub Initialize()
#
# ===========================================================
sub Initialize {
%MySQLaccess::Grant::Access = %{Default_Access_Rights()};
@MySQLaccess::Grant::Errors = ();
@MySQLaccess::Grant::Warnings = ();
@MySQLaccess::Grant::Notes = ();
# -----
# rules
$MySQLaccess::Grant::Rules{'user'} = 'no_rule_found';
$MySQLaccess::Grant::Rules{'db'} = 'no_rule_found';
$MySQLaccess::Grant::Rules{'host'} = 'no_equiv_host';
$MySQLaccess::Grant::full_access = 1;
$MySQLaccess::Grant::process_host_table = 0;
return 1;
}
# ===========================================================
# sub ReadTables()
#
# ===========================================================
sub ReadTables {
my ($tmp) = @_;
my ($HOST,$DB,$USER);
my @tables;
# build list of available tables
@tables = MySQLaccess::DB::Show_Tables();
# reading production grant-tables or temporary tables?
$tmp = (defined($tmp) and $tmp) ? 1 : 0;
if ($tmp) { #reading temporary tables
$HOST=$MySQLaccess::ACCESS_H_TMP;
$DB =$MySQLaccess::ACCESS_D_TMP;
$USER=$MySQLaccess::ACCESS_U_TMP;
# ----------------------------
# do tables exist?
if (!grep(/$HOST/,@tables)) { MySQLaccess::DB::CreateTable($HOST); }
if (!grep(/$USER/,@tables)) { MySQLaccess::DB::CreateTable($USER); }
if (!grep(/$DB/,@tables)) { MySQLaccess::DB::CreateTable($DB); }
MySQLaccess::Debug::Print(1,"Finding fields in tmp-ACL files:");
# -----------------------------
# Get record-layout
my ($h1,$h2) = MySQLaccess::DB::Show_Fields($HOST);
my ($d1,$d2) = MySQLaccess::DB::Show_Fields($DB);
my ($u1,$u2) = MySQLaccess::DB::Show_Fields($USER);
%MySQLaccess::Grant::H_tmp = %{$h1}; @MySQLaccess::Grant::H_tmp = @{$h2};
%MySQLaccess::Grant::D_tmp = %{$d1}; @MySQLaccess::Grant::D_tmp = @{$d2};
%MySQLaccess::Grant::U_tmp = %{$u1}; @MySQLaccess::Grant::U_tmp = @{$u2};
# @MySQLaccess::Grant::Privileges_tmp=@{Make_Privlist()};
#
MySQLaccess::Debug::Print(1, "Reading sorted temp-tables:");
@MySQLaccess::Grant::sorted_db_tmp_table = MySQLaccess::DB::Sort_table($DB, 'ucase_host', 'user', 'db');
@MySQLaccess::Grant::sorted_host_tmp_table= MySQLaccess::DB::Sort_table($HOST, 'ucase_host', 'db');
@MySQLaccess::Grant::sorted_user_tmp_table= defined($MySQLaccess::Param{'password'}) ?
MySQLaccess::DB::Sort_table($USER, 'ucase_host', 'user', 'password'):
MySQLaccess::DB::Sort_table($USER, 'ucase_host', 'user');
}
else { #reading production grant-tables
$HOST=$MySQLaccess::ACCESS_H;
$DB =$MySQLaccess::ACCESS_D;
$USER=$MySQLaccess::ACCESS_U;
MySQLaccess::Debug::Print(1,"Finding fields in ACL files:");
# -----------------------------
# Get record-layout
my ($h1,$h2) = MySQLaccess::DB::Show_Fields($HOST);
my ($d1,$d2) = MySQLaccess::DB::Show_Fields($DB);
my ($u1,$u2) = MySQLaccess::DB::Show_Fields($USER);
%MySQLaccess::Grant::H = %{$h1}; @MySQLaccess::Grant::H = @{$h2};
%MySQLaccess::Grant::D = %{$d1}; @MySQLaccess::Grant::D = @{$d2};
%MySQLaccess::Grant::U = %{$u1}; @MySQLaccess::Grant::U = @{$u2};
@MySQLaccess::Grant::Privileges=@{Make_Privlist()};
MySQLaccess::Debug::Print(1, "Reading sorted tables:");
@MySQLaccess::Grant::sorted_db_table = MySQLaccess::DB::Sort_table($DB, 'ucase_host', 'user', 'db');
@MySQLaccess::Grant::sorted_host_table= MySQLaccess::DB::Sort_table($HOST, 'ucase_host', 'db');
@MySQLaccess::Grant::sorted_user_table= defined($MySQLaccess::Param{'password'}) ?
MySQLaccess::DB::Sort_table($USER, 'ucase_host', 'user', 'password'):
MySQLaccess::DB::Sort_table($USER, 'ucase_host', 'user');
}
return 0;
}
# ===========================================================
# sub Get_Access_Rights(host,user,db)
# report the access_rights for the tuple ($host,$user,$db).
# ===========================================================
sub Get_Access_Rights {
local ($host,$user,$db,$tmp) = @_;
my $aref_user;
my $aref_host;
my $aref_db;
# working with temporary tables or production tables
if (defined($tmp) and $tmp) {
$aref_user = \@MySQLaccess::Grant::sorted_user_tmp_table;
$aref_host = \@MySQLaccess::Grant::sorted_host_tmp_table;
$aref_db = \@MySQLaccess::Grant::sorted_db_tmp_table;
}
else {
$aref_user = \@MySQLaccess::Grant::sorted_user_table;
$aref_host = \@MySQLaccess::Grant::sorted_host_table;
$aref_db = \@MySQLaccess::Grant::sorted_db_table;
}
my ($refrecord,$refgrant);
my ($_host_,$_user_,$encpw_);
my %_Access_;
MySQLaccess::Debug::Print(1, "for ($host,$user,$db):");
# ******************************************************************************
# Create default access-rights
# default access-rights are no access at all!!
# ******************************************************************************
# get hostname for IP-address
# get IP-address for hostname
local $host_name = MySQLaccess::Host::IP2Name($host);
local $host_ip = MySQLaccess::Host::Name2IP($host);
MySQLaccess::Debug::Print(3,"host=$host, hostname=$host_name, host-ip =$host_ip");
MySQLaccess::Debug::Print(3,"user=$user");
MySQLaccess::Debug::Print(3,"db =$db");
# ***********************************************************************
# retrieve information on USER
# check all records in mysql::user for matches with the tuple (host,user)
# ***********************************************************************
# 4.OR (add) the privileges for the user from the "user" table.
# (add all privileges which is "Y" in "user")
($refrecord,$refgrant) = Get_grant_from_user($host,$user,$aref_user);
($_host_,$_user_,$encpw_) = @{$refrecord};
%_access_ = %{$refgrant};
foreach $field (keys(%U)) { ##only priv. set in user-table
$MySQLaccess::Grant::Access{$field} = ($MySQLaccess::Grant::Access{$field} or $_access_{$field});
}
if ($_user_ eq $MySQLaccess::NEW_USER) {
push(@Warnings,'minimum_priv');
}
if ($_user_ ne $user) {
$user=$_user_;
push(@Warnings,'anonymous_access');
}
# *******************************************************
# Validate password if this has been asked to do
# *******************************************************
if (defined($password)) {
$valid = Validate_Password($password,$_host_,$_user_,$_encpw_,$aref_user);
if (!$valid) { push(@Errors,'invalid_password'); }
else { push(@Notes,'valid_password'); }
}
# ******************************************************************************
# retrieve information on DB
# check all records in mysql::db for matches with the triple (host,db,user)
# first match is used.
# ******************************************************************************
# 2.Get grant for user from the "db" table.
($refrecord,$refgrant)=Get_grant_from_db($host,$db,$user,$aref_db); #set process_host_table
($_host_,$_user_,$encpw_) = @{$refrecord};
%_access_ = %{$refgrant};
foreach $field (keys(%D)) { ##only priv. set in db-table
$MySQLaccess::Grant::Access{$field} = ($MySQLaccess::Grant::Access{$field} or $_access_{$field});
}
# ***********************************************************************
# retrieve information on HOST
# check all records in mysql::host for matches with the tuple (host,db)
#
# ' The host table is mainly to maintain a list of "secure" servers. '
# ***********************************************************************
# 3.If hostname is "empty" for the found entry, AND the privileges with
# the privileges for the host in "host" table.
# (Remove all which is not "Y" in both)
if ($MySQLaccess::Grant::process_host_table) {
($refrecord,$refgrant)=Get_grant_from_host($host,$db,$aref_host);
($_host_,$_user_,$encpw_) = @{$refrecord};
%_access_ = %{$refgrant};
foreach $field (keys(%H)) { ##only priv. set in host-table
$MySQLaccess::Grant::Access{$field} = ($MySQLaccess::Grant::Access{$field} and $_access_{$field});
}
}
MySQLaccess::Debug::Print(1,"done for ($host,$user,$db)");
return %MySQLaccess::Grant::Access;
}
# ####################################
# FINDING THE RIGHT GRANT-RULE
# ==========================================================
# sub Get_grant_from_user:
# ==========================================================
sub Get_grant_from_user {
my ($host,$user,$aref) = @_;
MySQLaccess::Debug::Print(1, "");
MySQLaccess::Debug::Print(1, "(host=$host,user=$user)");
my %Access_user = %{Default_Access_Rights()};
my $rule_found=0;
my @record = ();
my $record;
foreach $record (@{$aref}) {
$MySQLaccess::Grant::full_access=0;
MySQLaccess::Debug::Print(3, "Record= $record");
@record=split(/\t/,$record);
# check host and db
# with possible wildcards in field
# replace mysql-wildcards by reg-wildcards
my $host_tpl = MySQLaccess::Wildcards::SQL2Reg($record[0]);
my $user_tpl = $record[1]; #user field isn't pattern-matched!!
my $passwd = $record[2];
MySQLaccess::Debug::Print(3, "=>host_tpl : read=$record[0] -> converted=$host_tpl");
MySQLaccess::Debug::Print(3, "=>user_tpl : read=$record[1] -> $user_tpl");
MySQLaccess::Debug::Print(3, "=>password : read=$record[2] -> $passwd");
if ( MySQLaccess::Host::MatchTemplate($host,$host_tpl) and
MySQLaccess::Wildcards::MatchTemplate($user_tpl,$user)
)
{
MySQLaccess::Debug::Print(2, "FOUND!!");
if ($passwd eq '') { push(@Warnings,'insecure_user'); }
else { push(@Notes,'password_required'); }
foreach $field (keys(%U)) {
$Access_user{$field} = $MySQLaccess::Report::Answer{$record[$U{$field}]};
}
#print "\n" if $DEBUG;
$MySQLaccess::Grant::Rules{'user'} = $record;
$rule_found=1;
last;
}
}
# -------------------------------
# setting privileges to user-priv
MySQLaccess::Debug::Print(2, "Rights after parsing user-table..:");
if (! $rule_found ) {
@record=();
MySQLaccess::Debug::Print(2, "NO record found in the user-table!!");
}
else {
MySQLaccess::Debug::Print(2, "Selected record=@record");
MySQLaccess::Debug::Print(2, "<=?=> $record");
}
MySQLaccess::Debug::Print(1, "returning @record");
return (\@record,\%Access_user); #matching record in user-table
}
# ==========================================================
# sub Get_grant_from_db:
# ==========================================================
sub Get_grant_from_db {
my ($host,$db,$user,$aref) = @_;
MySQLaccess::Debug::Print(1, "(host=$host,user=$user,db=$db)");
my %Access_db = %{Default_Access_Rights()};
my $rule_found=0;
foreach $record (@{$aref}) {
$full_access=0;
MySQLaccess::Debug::Print(2, "Read db: $record");
@record=split(/\t/,$record);
# check host and db
# with possible wildcards in field
# replace mysql-wildcards by reg-wildcards
my $host_tpl = MySQLaccess::Wildcards::SQL2Reg($record[0]);
my $db_tpl = MySQLaccess::Wildcards::SQL2Reg($record[1]);
my $user_tpl = $record[2]; #user field isn't pattern matched!!
MySQLaccess::Debug::Print(3, "=>host_tpl : read=$record[0] -> converted=$host_tpl");
MySQLaccess::Debug::Print(3, "=>db_tpl : read=$record[1] -> $db_tpl");
MySQLaccess::Debug::Print(3, "=>user_tpl : read=$record[2] -> $user_tpl");
if ( ( MySQLaccess::Host::Is_localhost($host_tpl)
or MySQLaccess::Wildcards::MatchTemplate($host_tpl,$host_name)
or MySQLaccess::Wildcards::MatchTemplate($host_tpl,$host_ip) )
and ( MySQLaccess::Wildcards::MatchTemplate($db_tpl,$db) )
and ( MySQLaccess::Wildcards::MatchTemplate($user_tpl,$user) ) ) {
$MySQLaccess::Grant::process_host_table = ($record[0] eq '');
if ($user_tpl eq '') { push(@Warnings,'public_database'); }
foreach $field (keys(%D)) {
$Access_db{$field} = $MySQLaccess::Report::Answer{$record[$D{$field}]};
}
$rule_found=1;
$MySQLaccess::Grant::Rules{'db'} = $record;
last;
}
}
# -------------------------------
# setting privileges to db-priv
MySQLaccess::Debug::Print(2, "Rights after parsing db-table..:");
if (! $rule_found ) {
MySQLaccess::Debug::Print(2, "NO rule found in db-table => no access granted!!");
}
return (\@record,\%Access_db);
}
# ==========================================================
# sub Get_grant_from_host:
# ==========================================================
sub Get_grant_from_host {
my ($host,$db,$aref) = @_;
MySQLaccess::Debug::Print(1, "Get_grant_from_host()");
my %Access_host = %{Default_Access_Rights()};
# the host-table doesn't have to be processed if the host-field
# in the db-table isn't empty
if (!$MySQLaccess::Grant::process_host_table) {
MySQLaccess::Debug::Print(2, ">> Host-table doesn't have to be processed!!");
$MySQLaccess::Grant::Rules{'host'} = 'no_equiv_host';
return ([],\%Access_host);
}
my $rule_found=0;
my @record = ();
foreach $record (@{$aref}) {
$full_access=0;
MySQLaccess::Debug::Print(2, "host: $record");
@record=split(/\t/,$record);
# check host and db
# with possible wildcards in field
# replace mysql-wildcards by reg-wildcards
my $host_tpl = MySQLaccess::Wildcards::SQL2Reg($record[0]);
my $db_tpl = MySQLaccess::Wildcards::SQL2Reg($record[1]);
MySQLaccess::Debug::Print(3, "=>host_tpl : $record[0] -> $host_tpl");
MySQLaccess::Debug::Print(3, "=>db_tpl : $record[1] -> $db_tpl");
if ( ( MySQLaccess::Host::Is_localhost($host_tpl)
or MySQLaccess::Wildcards::MatchTemplate($host_tpl,$host_name)
or MySQLaccess::Wildcards::MatchTemplate($host_tpl,$host_ip) )
and ( MySQLaccess::Wildcards::MatchTemplate($db_tpl,$db) ) ) {
$MySQLaccess::Grant::Rules{'host'} = $record;
$rule_found=1;
foreach $field (keys(%H)) {
$Access_host{$field} = $MySQLaccess::Report::Answer{$record[$H{$field}]};
}
last;
}
}
# -------------------------------
# setting privileges to host-priv
MySQLaccess::Debug::Print(2, "Rights after parsing host-table..:");
if (! $rule_found ) {
@record=();
MySQLaccess::Debug::Print(2, "NO restrictions found in the host-table!!");
}
# --------------------------------
# debugging access-rights in db
return (\@record,\%Access_host); #matching record in host-table
}
# ===========================================================
# sub Default_Access_Rights():
# return (a reference to) a hash which holds all default
# priviliges currently defined in the grant-tables.
# ===========================================================
sub Default_Access_Rights {
my %right = ();
MySQLaccess::Debug::Print(2, "Debug Default_Access_Rights():");
# add entry for all fields in the HOST-table
foreach $field (keys(%MySQLaccess::Grant::H)) {
$right{$field}='0' unless (defined($right{$field}));
}
# add entry for all fields in the DB-table
foreach $field (keys(%MySQLaccess::Grant::D)) {
$right{$field}='0' unless (defined($right{$field}));
}
# add entry for all fields in the USER-table
foreach $field (keys(%MySQLaccess::Grant::U)) {
$right{$field}='0' unless (defined($right{$field}));
}
# --------------
# debugging info
foreach $field (keys(%right)) { MySQLaccess::Debug::Print(3, sprintf("> %15s : %1s",$field,$right{$field})); }
return \%right;
}
# ======================================
# sub Make_Privlist
# Make an ordered list of the privileges
# that should be reported
# ======================================
sub Make_Privlist {
# layout:
#'select_priv', 'create_priv',
#'insert_priv', 'drop_priv',
#'update_priv', 'reload_priv',
#'delete_priv', 'process_priv',
#'file_priv', 'shutdown_priv');
my $right;
my @privlist=();
foreach $right (@U) {
if (! grep(/$right/,@privlist)) { push(@privlist,$right); }
};
foreach $right (@D) {
if (! grep(/$right/,@privlist)) { push(@privlist,$right); }
};
foreach $right (@H) {
if (! grep(/$right/,@privlist)) { push(@privlist,$right); }
};
# print "Privileges:\n";
# foreach $field (@privlist) { print " > $field\n"; }
return \@privlist;
}
########################################################################
package MySQLaccess::Report;
use Exporter ();
@EXPORT = qw(&Print_Header());
BEGIN {
$FORM = $ENV{'SCRIPT_NAME'};
$DEBUG = 0;
$DEBUG = $MySQLaccess::DEBUG unless ($DEBUG);
# translation-table for poss. answers
%Answer = ('Y' => 1 , 'N' => 0
, 1 => 'Y', 0 => 'N'
,'?' => '?', '' => '?'
);
$headers = 0;
$separator = 0;
# ****************************
# Notes and warnings
%MESSAGES = (
'insecure_user'
=> "Everybody can access your DB as user `\$user' from host `\$host'\n"
."WITHOUT supplying a password.\n"
."Be very careful about it!!"
,'password_required'
=> "A password is required for user `\$user' :-("
,'invalid_password'
=> "The password '\$password' for user `\$user' is invalid :-P"
, 'valid_password'
=> "You supplied the right password for user `\$user' :-)"
,'public_database'
=> "Any user with the appropriate permissions has access to your DB!\n"
."Check your users!"
,'full_access'
=> "All grant-tables are empty, which gives full access to ALL users !!"
,'no_rule_found'
=> "No matching rule"
,'no_equiv_host'
=> "Not processed: host-field is not empty in db-table."
,'least_priv'
=> "If the final priveliges of the user are more then you gave the user,\n"
."check the priveliges in the db-table `\$db'."
,'minimum_priv'
=> "The privileges for any new user are AT LEAST\n"
."the ones shown in the table above,\n"
."since these are the privileges of the db `\$db'.\n"
,'not_found_mysql'
=> "The MySQL client program <$MySQLaccess::MYSQL> could not be found.\n"
."+ Check your path, or\n"
."+ edit the source of this script to point \$MYSQL to the mysql client.\n"
,'not_found_mysqldump'
=> "The MySQL dump program <$MySQLaccess::MYSQLDUMP> could not be found.\n"
."+ Check your path, or\n"
."+ edit the source of this script to point \$MYSQLDUMP to the mysqldump program.\n"
,'not_found_diff'
=> "The diff program <$MySQLaccess::DIFF> could not be found.\n"
."+ Check your path, or\n"
."+ edit the source of this script to point \$DIFF to the diff program.\n"
,'Unrecognized_option'
=> "Sorry,\n"
."You are using an old version of the mysql-program,\n"
."which does not yet implement a neccessary option.\n"
."\n"
."You need at least Version 6.2 of the mysql-client,\n"
."which was build in MySQL v3.0.18, to use this version\n"
."of `$MySQLaccess::script'."
,'Access_denied'
=> "Sorry,\n"
."An error occurred when trying to connect to the database\n"
."with the grant-tables:\n"
."* Maybe YOU do not have READ-access to this database?\n"
."* If you used the -U option, you may have supplied an invalid username?\n"
." for the superuser?\n"
."* If you used the -U option, it may be possible you have to supply\n"
." a superuser-password to, with the -P option?\n"
."* If you used the -P option, you may have supplied an invalid password?\n"
,'Dbaccess_denied'
=> "Sorry,\n"
."An error occurred when trying to connect to the database\n"
."with the grant-tables. (dbaccess denied)\n"
,'Unknown_tmp_table'
=> "Sorry,\n"
."An error occurred when trying to work with the temporary tables in the database\n"
."with the grant-tables. (One of the temporary tables does not exist)\n"
,'Unknown_table'
=> "Sorry,\n"
."An error occurred when trying to work with some tables in the database\n"
."with the grant-tables. (table does not exist)\n"
,'use_old_server'
=> "Sorry,\n"
."An error occurred when executing an SQL statement.\n"
."You might consider altering the use of the parameter `--old_server' when \n"
."calling `$MySQLaccess::script'."
,'unknown_error'
=> "Sorry,\n"
."An error occurred when trying to connect to the database\n"
."with the grant-tables. (unknown error)\n"
,'anonymous_access'
=> "Accessing the db as an anonymous user.\n"
."Your username has no relevance\n"
,'user_required'
=> "You have to supply a userid."
,'db_required'
=> "You have to supply the name of a database."
,'host_required'
=> "You have to supply the name of a host."
);
}
# =====================================
# sub Print_Header:
# print header info
# =====================================
sub Print_Header {
if ($MySQLaccess::CMD) { #command-line mode
print "$MySQLaccess::script Version $MySQLaccess::VERSION\n"
."By RUG-AIV, by Yves Carlier (Yves.Carlier\@rug.ac.be)\n"
."Changes by Steve Harvey (sgh\@vex.net)\n"
."This software comes with ABSOLUTELY NO WARRANTY.\n";
}
if ($MySQLaccess::CGI) { #CGI-BIN mode
print "content-type: text/html\n\n"
. "<HTML>\n"
."<HEAD>\n"
."<TITLE>MySQLaccess</TITLE>\n"
."</HEAD>\n"
."<BODY>\n"
."<H1>$MySQLaccess::script Version $MySQLaccess::VERSION</H1>\n"
."<CENTER>\n<ADDRESS>\n"
."By RUG-AIV, by Yves Carlier (<a href=mailto:Yves.Carlier\@rug.ac.be>Yves.Carlier\@rug.ac.be</a>)<BR>\n"
."Changes by Steve Harvey (<a href=mailto:sgh\@vex.net>sgh\@vex.net</a>)<BR>\n"
."This software comes with ABSOLUTELY NO WARRANTY.<BR>\n"
."</ADDRESS>\n</CENTER>\n"
."<HR>\n";
Print_Taskbar();
print "<HR>\n";
}
return 1;
}
# =====================================
# sub Print_Footer:
# print footer info
# =====================================
sub Print_Footer {
if ($MySQLaccess::CMD) { #command-line mode
print "\n"
."BUGs can be reported at https://jira.mariadb.org\n";
}
if ($MySQLaccess::CGI) { #CGI-BIN mode
if ($MySQLaccess::Param{'brief'}) {
print "</table>\n"; #close table in brief-output
}
print "<HR>\n"
."<ADDRESS>\n"
."BUGs can be reported at <a href=\"https://jira.mariadb.org\">MariaDB JIRA</a><BR>\n"
# ."Don't forget to mention the version $VERSION!<BR>\n"
."</ADDRESS>\n"
."</BODY>\n"
."</HTML>\n";
}
return 1;
}
# =====================================
# sub Print_Taskbar:
# print taskbar on STDOUT
# =====================================
sub Print_Taskbar {
print "<CENTER>\n"
."[<a href=$FORM?relnotes=on>Release Notes</a>] \n"
."[<a href=$FORM?version=on>Version</a>] \n"
."[<a href=$FORM?plan=on>Future Plans</a>] \n"
."[<a href=$FORM?howto=on>Examples</a>] \n"
."[<a href=$FORM?help=on>New check</a>] \n"
."[<a href=$FORM?edit=on>Change/edit ACL</a>] \n"
."</CENTER>\n";
return 1;
}
# =====================================
# sub Print_Form:
# print CGI-form
# =====================================
sub Print_Form {
print <<EOForm;
<center>
<!-- Quering -->
<FORM method=POST action=$FORM>
<table border width="100%" >
<tr>
<th>MySQL server</th>
<th>User information</th>
<th>Reports</th>
</tr>
<tr>
<td valign=top>
<table>
<tr>
<td halign=right><b>Host</b><br><font size=-2>(Host on which MySQL-server resides.)</font></td>
<td valign=top><INPUT name=rhost type=text size=15 maxlength=15 value="$MySQLaccess::Param{'rhost'}"></td>
</tr>
<tr>
<td halign=right><b>Superuser</b><br><font size=-2>(User which has <font color="Red">read-access</font> to grant-tables.)</font></td>
<td valign=top><INPUT name=superuser type=text size=15 maxlength=15 value="$MySQLaccess::Param{'superuser'}"></td>
</tr>
<tr>
<td halign=right><b>Password</b><br><font size=-2>(of Superuser.)</font></td>
<td valign=top><INPUT name=spassword type=password size=15 maxlength=15 value="$MySQLaccess::Param{'spassword'}"></td>
</tr>
</table>
</td>
<td valign=top>
<table>
<tr>
<td halign=right><b><font color=Red>User</font></b><br><font size=-2>(Userid used to connect to MySQL-database.)</font></td>
<td halign=top><INPUT name=user type=text size=15 maxlength=15 value="$MySQLaccess::Param{'user'}"></td>
</tr>
<tr>
<td halign=right><b>Password</b><br><font size=-2>(Password user has to give to get access to MySQL-database.)</font></td>
<td valign=top><INPUT name=password type=password size=15 maxlength=15 value="$MySQLaccess::Param{'password'}"></td>
</tr>
<tr>
<td halign=right><b><font color=Red>Database</font></b><br><font size=-2>(Name of MySQL-database user tries to connect to.</font><br><font size=-2>Wildcards <font color="Green">(*,?,%,_)</font> are allowed.)</font></td>
<td valign=top><INPUT name=db type=text size=15 maxlength=15 value="$MySQLaccess::Param{'db'}"></td>
</tr>
<tr>
<td halign=right><b>Host</b><br><font size=-2>(Host from where the user is trying to connect to MySQL-database.</font><br><font size=-2>Wildcards <font color="Green">(*,?,%,_)</font> are allowed.)</font></td>
<td valign=top><INPUT name=host type=text size=15 maxlength=15 value="$MySQLaccess::Param{'host'}"></td>
</tr>
</table>
</td>
<td valign=center>
<table cellspacing=5 cellpadding=2 cols=1 height="100%">
<tr align=center>
<td halign=right><INPUT type=submit name=brief value="Brief"><br>
<INPUT type=submit name=table value="Tabular"></td>
</tr>
<tr align=center>
<td></td>
</tr>
<tr align=center>
<td halign=right><INPUT type=reset value="Clear"></td>
</tr>
</table>
</td>
</tr>
</table>
</form>
</BODY>
</HTML>
EOForm
return 1;
}
# =====================================
# sub Print_Usage:
# print some information on STDOUT
# =====================================
sub Print_Usage {
Print_Error_Messages();
if ($MySQLaccess::CMD) { #command-line mode
Print_Options();
}
if ($MySQLaccess::CGI) { #CGI-BIN mode
Print_Form();
}
return 1;
}
# ======================================
# sub Print_Version:
# ======================================
sub Print_Version {
if ($MySQLaccess::CMD) {
print $MySQLaccess::INFO;
}
if ($MySQLaccess::CGI) {
print "<PRE>\n";
print $MySQLaccess::INFO;
print "</PRE>\n";
}
return 1;
}
# ======================================
# sub Print_Relnotes:
# ======================================
sub Print_Relnotes {
if ($MySQLaccess::CMD) {
print $MySQLaccess::RELEASE;
}
if ($MySQLaccess::CGI) {
print "<PRE>\n";
print $MySQLaccess::RELEASE;
print "</PRE>\n";
}
return 1;
}
# ======================================
# sub Print_Plans:
# ======================================
sub Print_Plans {
if ($MySQLaccess::CMD) {
print $MySQLaccess::TODO;
}
if ($MySQLaccess::CGI) {
print "<PRE>\n";
print $MySQLaccess::TODO;
print "</PRE>\n";
}
return 1;
}
# ======================================
# sub Print_HowTo:
# ======================================
sub Print_HowTo {
if ($MySQLaccess::CMD) {
print $MySQLaccess::HOWTO;
}
if ($MySQLaccess::CGI) {
print "<PRE>\n";
print $MySQLaccess::HOWTO;
print "</PRE>\n";
}
return 1;
}
# ======================================
# sub Print_Options:
# ======================================
sub Print_Options {
if ($MySQLaccess::CGI) { print "<PRE>\n"; }
print $MySQLaccess::OPTIONS;
if ($MySQLaccess::CGI) { print "</PRE>\n"; }
return 1;
}
# ======================================
# sub Print_Error_Access:
# ======================================
sub Print_Error_Access {
my ($error) = @_;
print "\n";
if ($MySQLaccess::CGI) { print "<font color=Red>\n<PRE>\n"; }
print $MESSAGES{$error};
if ($MySQLaccess::CGI) { print "</PRE>\n</font>\n"; }
print "\n";
return 1;
}
# ======================================
# sub Print_Error_Messages:
# ======================================
sub Print_Error_Messages {
# my ($error) = @_;
print "\n";
if ($MySQLaccess::CGI) { print "<font color=Red>\n<center>\n"; }
foreach $error (@MySQLaccess::Grant::Error) {
print $MESSAGES{$error};
print $MySQLaccess::CGI ? "<br>\n" : "\n";
}
if ($MySQLaccess::CGI) { print "</center>\n</font>\n"; }
print "\n";
return 1;
}
# ======================================
# sub Print_Message:
# ======================================
sub Print_Message {
my ($aref) = @_;
my @messages = @{$aref};
print "\n";
if ($MySQLaccess::CGI) { print "<font color=DarkGreen>\n<center>\n"; }
foreach $msg (@messages) {
print $msg;
print $MySQLaccess::CGI ? "<br>\n" : "\n";
}
if ($MySQLaccess::CGI) { print "</center>\n</font>\n"; }
print "\n";
return 1;
}
# ======================================
# sub Print_Edit:
# ======================================
sub Print_Edit {
print "\n";
if (!$MySQLaccess::CGI) {
print "Note: Editing the temporary tables is NOT supported in CMD-line mode!\n";
return 0;
}
print "<CENTER>\n"
."<form action=$FORM method=GET>\n"
."<table width=90% border>\n"
."<tr>\n"
." <td><input type=checkbox name=copy value=on> Copy grant-rules to temporary tables<br></td>\n"
." <td rowspan=5 align=center valign=center><input type=submit value=Go></td>\n"
."</tr>\n"
."<tr>\n"
." <td> Edit temporary tables with external application:<br>"
." <a href=\"$MySQLaccess::MYSQLADMIN\">$MySQLaccess::MYSQLADMIN</a></td>\n"
."</tr>\n"
."<tr>\n"
." <td><input type=checkbox name=preview value=on> Preview changes made in temporary tables</td>\n"
."</tr>\n"
."<tr>\n"
." <td><input type=checkbox name=commit value=on> Make changes permanent</td>\n"
."</tr>\n"
."<tr>\n"
." <td><input type=checkbox name=rollback value=on> Restore previous grand-rules</td>\n"
."</tr>\n"
."<tr>\n"
." <td colspan=2 align=center><font size=-2 color=Red>You need write,delete and drop-privileges to perform the above actions</font></td>\n"
."</tr>\n"
."</table>\n"
."</form>\n"
."</CENTER>\n";
return 1;
}
# ======================================
# sub Print_Access_rights:
# print the access-rights on STDOUT
# ======================================
sub Print_Access_rights {
my ($host,$user,$db,$refhash) = @_;
if (defined($MySQLaccess::Param{'brief'})) {
# if ($MySQLaccess::CGI) { print "<PRE>\n"; }
Matrix_Report($host,$user,$db,$refhash);
# if ($MySQLaccess::CGI) { print "</PRE>\n"; }
}
else {
Tabular_Report($host,$user,$db,$refhash);
$MySQLaccess::Report::separator = $MySQLaccess::CGI ? "<hr>" : "-"x80;
}
return 1;
}
# ======================================
# sub Print_Diff_ACL:
# print the diff. in the grants before and after
# ======================================
sub Print_Diff_ACL {
my ($aref) = @_;
my @diffs = @{$aref};
my %block = ( '<' => 'Before',
'>' => 'After',
);
my %color = ( '<' => 'Green',
'>' => 'Red',
);
my $curblock = '';
# -----------------------------
# create column-headers
foreach $field (@MySQLaccess::Grant::Privileges) {
push(@headers,substr($field,0,4));
}
if ($MySQLaccess::CMD) {
print "\n";
print "Differences in access-rights BEFORE and AFTER changes in grant-tables\n";
# print "---------------------------------------------------------------------\n";
my $line1="";
my $line2="";
$line1 .= sprintf("| %-30s|",'Host,User,DB');
$line2 .= sprintf("+-%-30s+",'-' x 30);
foreach $header (@headers) {
$line1 .= sprintf("%-4s|",$header);
$line2 .= sprintf("%s+",'----');
}
print "$line2\n";
print "$line1\n";
print "$line2\n";
$format = "format STDOUT = \n"
. "^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< " . " @|||" x 10 ."\n"
. '$host_user_db,@priv' . "\n"
. ".\n";
#print $format;
eval $format;
}
if ($MySQLaccess::CGI) {
print "<table border width=100%>\n";
print "<tr>\n";
print "<th colspan=11>";
print "Differences in access-rights <font color=$color{'<'}>BEFORE</font> "
."and <font color=$color{'>'}>AFTER</font> changes to grant-tables</font>\n";
print "</th>";
print "</tr>\n";
print "<tr>\n";
$line1 .= sprintf("<th>%-20s</th>",'Host, User, DB');
foreach $header (@headers) {
$line1 .= sprintf("<th>%-4s</th>",$header);
}
print "$line1</tr>\n";
}
foreach $line (@diffs) {
$type = substr($line,0,1);
$line = substr($line,1);
($host,$user,$db,@priv) = split(/,/,$line);
if ($MySQLaccess::CMD) {
if ($type ne $curblock) {
$curblock = $type;
print $block{$curblock},":\n";
}
#print "$line\n";
write;
}
if ($MySQLaccess::CGI) {
if ($type ne $curblock) {
$curblock = $type;
print "<tr><td><b>$block{$curblock}<b></td></tr>\n";
}
$line1="<td><font color=$color{$type}>$host, $user, $db</font></td>";
foreach $field (@priv) {
$line1 .= sprintf("<td align=center><font color=$color{$type}>%-4s</font></td>",$field);
}
print "<tr>$line1</tr>\n";
}
}
print "\n";
if ($MySQLaccess::CMD) {
print "---------------------------------------------------------------------\n";
}
if ($MySQLaccess::CGI) {
print "</table><br>";
}
return 1;
}
# ======================================
# sub Tabular_Report
# Tabular report,
# suitable for 1 triple (host,db,user)
# ======================================
sub Tabular_Report {
my ($host,$user,$db,$a) = @_;
my $column=2;
# -----------------------------
# separator
if ($MySQLaccess::Report::separator) { print "$MySQLaccess::Report::separator\n"; }
# -----------------------------
# print table of access-rights
my $rows = int(@MySQLaccess::Grant::Privileges/2); #round up
my @table=();
$j=0;
for $i (0 .. $rows-1) {
$table[$j]=$MySQLaccess::Grant::Privileges[$i];
$j = $j+2;
}
$j=1;
for $i ($rows .. $#MySQLaccess::Grant::Privileges) {
$table[$j]=$MySQLaccess::Grant::Privileges[$i];
$j = $j+2;
}
if ($MySQLaccess::CMD) {
print "\n";
print "Access-rights\n";
print "for USER '$user', from HOST '$host', to DB '$db'\n";
}
if ($MySQLaccess::CGI) {
print "<table border width=100%>\n";
print "<tr>\n";
}
if ($MySQLaccess::CGI) {
print "<th colspan=5>";
print "<font color=Red>Access-rights</font>\n";
print "for USER '<font color=Green>$user</font>', from HOST '<font color=Green>$host</font>', to DB '<font color=Green>$db</font>'\n";
print "</th>";
print "</tr>\n";
print "<tr>\n";
}
if ($MySQLaccess::CMD) {
print "\t+-----------------+---+\t+-----------------+---+";
}
foreach $field (@table) {
if ($MySQLaccess::CMD) {
if ($column==2) { print "\n\t"; $column=1;}
else { print "\t"; $column=2;}
printf "| %-15s | %s |",$field,$Answer{$a->{$field}};
}
if ($MySQLaccess::CGI) {
if ($column==2) { print "</tr>\n<tr>\n"; $column=1;}
else { print "<td width=10%></td>"; $column=2;}
printf " <td width=35%><b>%-15s</b></td><td width=10%>%s</td>\n",$field,$Answer{$a->{$field}};
}
}
print "\n";
if ($MySQLaccess::CMD) {
print "\t+-----------------+---+\t+-----------------+---+\n";
}
if ($MySQLaccess::CGI) {
print "</tr>\n</table><br>";
}
# ---------------
# print notes:
foreach $note (@MySQLaccess::Grant::Notes) {
my $message = $MESSAGES{$note};
$message =~ s/\$user/$user/g;
$message =~ s/\$db/$db/g;
$message =~ s/\$host/$host/g;
$message =~ s/\$password/$password/g;
$PREFIX='NOTE';
if ($MySQLaccess::CMD) {
my @lines = split(/\n/,$message);
foreach $line (@lines) {
print "$PREFIX:\t $line\n";
$PREFIX=' ';
}
}
if ($MySQLaccess::CGI) {
print "<b>$PREFIX:</b> $message<br>\n";
}
}
# ---------------
# print warnings:
foreach $warning (@MySQLaccess::Grant::Warnings) {
my $message = $MESSAGES{$warning};
$message =~ s/\$user/$user/g;
$message =~ s/\$db/$db/g;
$message =~ s/\$host/$host/g;
$message =~ s/\$password/$password/g;
$PREFIX='BEWARE';
if ($MySQLaccess::CMD) {
my @lines = split(/\n/,$message);
foreach $line (@lines) {
print "$PREFIX:\t $line\n";
$PREFIX=' ';
}
}
if ($MySQLaccess::CGI) {
print "<b>$PREFIX:</b> $message<br>\n";
}
}
# ---------------
# print errors:
foreach $error (@MySQLaccess::Grant::Errors) {
my $message = $MESSAGES{$error};
$message =~ s/\$user/$user/g;
$message =~ s/\$db/$db/g;
$message =~ s/\$host/$host/g;
$message =~ s/\$password/$password/g;
$PREFIX='ERROR';
if ($MySQLaccess::CMD) {
my @lines = split(/\n/,$message);
foreach $line (@lines) {
print "$PREFIX:\t $line\n";
$PREFIX=' ';
}
}
if ($MySQLaccess::CGI) {
print "<b>$PREFIX:</b> $message<br>\n";
}
}
# ---------------
# inform if there are no rules ==> full access for everyone.
if ($MySQLaccess::Grant::full_access) { print "$MESSAGES{'full_access'}\n"; }
# ---------------
# print the rules used
print "\n";
if ($MySQLaccess::CMD) {
print "The following rules are used:\n";
foreach $field (sort(keys(%MySQLaccess::Grant::Rules))) {
my $rule = (defined($MESSAGES{$MySQLaccess::Grant::Rules{$field}}) ? $MESSAGES{$MySQLaccess::Grant::Rules{$field}} : $MySQLaccess::Grant::Rules{$field});
$rule =~ s/\t/','/g;
printf " %-5s : '%s'\n",$field,$rule;
}
}
if ($MySQLaccess::CGI) {
print "<br>\n";
print "<table border width=100%>\n";
print "<tr><th colspan=2>The following rules are used:</th></tr>\n";
foreach $field (sort(keys(%MySQLaccess::Grant::Rules))) {
my $rule = (defined($MESSAGES{$MySQLaccess::Grant::Rules{$field}}) ? $MESSAGES{$MySQLaccess::Grant::Rules{$field}} : $MySQLaccess::Grant::Rules{$field});
$rule =~ s/\t/','/g;
printf "<tr><th>%-5s</th><td>'%s'</td></tr>\n",$field,$rule;
}
print "</table>\n";
}
return 1;
}
# ======================================
# sub Matrix_Report:
# single-line output foreach triple,
# no notes,warnings,...
# ======================================
sub Matrix_Report {
my ($host,$user,$db,$a) = @_;
my @headers = ();
if (! $headers) {
# -----------------------------
# create column-headers
foreach $field (@MySQLaccess::Grant::Privileges) {
push(@headers,substr($field,0,4));
}
# -----------------------------
# print column-headers
print "\n";
if ($MySQLaccess::CMD) {
my $line1="";
my $line2="";
foreach $header (@headers) {
$line1 .= sprintf("%-4s ",$header);
$line2 .= sprintf("%s ",'----');
}
$line1 .= sprintf("| %-20s",'Host,User,DB');
$line2 .= sprintf("+ %-20s",'-' x 20);
print "$line1\n";
print "$line2\n";
}
if ($MySQLaccess::CGI) {
print "<table width=100% border>\n";
my $line1="<tr>";
foreach $header (@headers) {
$line1 .= sprintf("<th>%-4s</th>",$header);
}
$line1 .= sprintf("<th>%-20s</th>",'Host, User, DB');
print "$line1</tr>\n";
}
# ----------------------------
# column-headers should only be
# printed once.
$MySQLaccess::Report::headers=1;
}
# ------------------------
# print access-information
if ($MySQLaccess::CMD) {
foreach $field (@MySQLaccess::Grant::Privileges) {
printf " %-2s ",$Answer{$a->{$field}};
}
printf "| %-20s",join(',',$host,$user,$db);
print "\n";
}
if ($MySQLaccess::CGI) {
print "<tr>";
foreach $field (@MySQLaccess::Grant::Privileges) {
printf "<td align=center>%-2s</td>",$Answer{$a->{$field}};
}
printf "<td><b>%-20s</b></td>",join(', ',$host,$user,$db);
print "</tr>\n";
}
return 1;
}
# ======================================
# sub Raw_Report:
# single-line output foreach triple,
# no notes,warnings,...
# ======================================
sub Raw_Report {
my ($host,$user,$db,$a) = @_;
my @headers = ();
my $string = "";
# ------------------------
# print access-information
$string = "$host,$user,$db,";
foreach $field (@MySQLaccess::Grant::Privileges) {
$string .= $Answer{$a->{$field}} . ",";
}
return $string;
}
#######################################################################
package MySQLaccess::Wildcards;
BEGIN {
$DEBUG = 0;
$DEBUG = $MySQLaccess::DEBUG unless ($DEBUG);
}
# ############################################
# SQL, WILDCARDS and REGULAR EXPRESSIONS
# ============================================
# translage SQL-expressions to Reg-expressions
# ============================================
sub SQL2Reg {
my ($expr) = @_;
my $expr_o = $expr;
$expr =~ s/\./\\./g;
$expr =~ s/\\%/\002/g;
$expr =~ s/%/.*/g;
$expr =~ s/\002/%/g;
$expr =~ s/\\_/\002/g;
$expr =~ s/_/.+/g;
$expr =~ s/\002/_/g;
MySQLaccess::Debug::Print(2,"$expr_o --> $expr");
return $expr;
}
# translage WILDcards to Reg-expressions
# ============================================
sub Wild2Reg {
my ($expr) = @_;
my $expr_o = $expr;
$expr =~ s/\./\\./g;
$expr =~ s/\\\*/\002/g;
$expr =~ s/\*/.*/g;
$expr =~ s/\002/*/g;
$expr =~ s/\\\?/\002/g;
$expr =~ s/\?/.+/g;
$expr =~ s/\002/?/g;
MySQLaccess::Debug::Print(2,"$expr_o --> $expr");
return $expr;
}
# =============================================
# match a given string with a template
# =============================================
sub MatchTemplate {
my ($tpl,$string) = @_;
my $match=0;
if ($string=~ /^$tpl$/ or $tpl eq '') { $match=1; }
else { $match=0;}
MySQLaccess::Debug::Print(2,"($tpl,$string) --> $match");
return $match;
}
#######################################################################
package MySQLaccess::Host;
BEGIN {
$localhost = undef;
$DEBUG = 2;
$DEBUG = $MySQLaccess::DEBUG unless ($DEBUG);
}
# ======================================
# sub IP2Name
# return the Name with the corr. IP-nmbr
# (no aliases yet!!)
# ======================================
sub IP2Name {
my ($ip) = @_;
my $ip_o = $ip;
if ($ip !~ /([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/o) {
MySQLaccess::Debug::Print(3,"'$ip' is not an ip-number, returning IP=$ip");
return $ip;
}
MySQLaccess::Debug::Print(4,"IP=$ip split up => $1.$2.$3.$4");
$ip = pack "C4",$1,$2,$3,$4;
MySQLaccess::Debug::Print(4,"IP packed -> >>$ip<<\n");
my ($name,$aliases,$addrtype,$length,@addrs) = gethostbyaddr($ip, AF_INET);
MySQLaccess::Debug::Print(3,"IP=$ip_o => hostname=$name");
MySQLaccess::Debug::Print(4,"aliases=$aliases");
MySQLaccess::Debug::Print(4,"addrtype=$addrtype - length=$length");
return ($name || $ip);
#return ($name || undef);
}
# ======================================
# sub Name2IP
# return the IP-number of the host
# ======================================
sub Name2IP {
my ($name) = @_;
if ($name =~ /[%_]/) {
MySQLaccess::Debug::Print(3,"'$name' contains SQL-wildcards, returning name=$name");
return $name;
}
my ($_name,$aliases,$addrtype,$length,@addrs) = gethostbyname($name);
my ($a,$b,$c,$d) = unpack('C4',$addrs[0]);
my $ip = "$a.$b.$c.$d";
MySQLaccess::Debug::Print(3,"hostname=$name => IP=$ip");
MySQLaccess::Debug::Print(4,"aliases=$aliases");
MySQLaccess::Debug::Print(4,"addrtype=$addrtype - length=$length");
#if ($ip ne "") { return "$ip"; }
#else { return undef; }
return ($ip || $name);
}
# ========================================
# sub LocalHost
# some special action has to be taken for
# the localhost
# ========================================
sub LocalHost {
if (!defined($MySQLaccess::Host::localhost)) {
$MySQLaccess::Host::localhost = Sys::Hostname::hostname();
MySQLaccess::Debug::Print(3,"Setting package variable \$localhost=$MySQLaccess::Host::localhost");
}
my $host = $localhost;
MySQLaccess::Debug::Print(3,"localhost = $host");
return $host;
}
# ========================================
# check if the given hostname (or ip)
# corresponds with the localhost
# ========================================
sub Is_localhost {
my ($host_tpl) = @_;
my $isit = 0;
if (($MySQLaccess::host_name eq $localhost) or ($MySQLaccess::host_ip eq $local_ip)) {
MySQLaccess::Debug::Print(2,"Checking for localhost");
MySQLaccess::Debug::Print(3,"because ($MySQLaccess::host_name EQ $localhost) AND ($MySQLaccess::host_ip EQ $local_ip)");
$isit = ( 'localhost' =~ /$host_tpl/ ) ? 1 : 0;
MySQLaccess::Debug::Print(3," 'localhost' =?= $host_tpl -> $isit");
return $isit;
}
else {
MySQLaccess::Debug::Print(4,"Not checking for localhost");
MySQLaccess::Debug::Print(4,"because ($MySQLaccess::host_name != $localhost) AND ($MySQLaccess::host_ip != $local_ip)");
return 0;
}
}
# =========================================
# check if host (IP or name) can be matched
# on the template.
# =========================================
sub MatchTemplate {
my ($host,$tpl) = @_;
my $match = 0;
MySQLaccess::Debug::Print(1, "($host) =?= ($tpl)");
my $host_name = IP2Name($host);
my $host_ip = Name2IP($host);
MySQLaccess::Debug::Print(2, "name=$host_name ; ip=$host_ip");
$match = (MySQLaccess::Wildcards::MatchTemplate($tpl,$host_name) or
MySQLaccess::Wildcards::MatchTemplate($tpl,$host_ip));
MySQLaccess::Debug::Print(2, "($host_name,$host_ip) =?= ($tpl): $ncount");
return $match;
}
########################################################################
package MySQLaccess::Debug;
BEGIN {
my $dbg_file = "$MySQLaccess::script_log";
open(DEBUG,"> $dbg_file") or warn "Could not open outputfile $dbg_file for debugging-info\n";
select DEBUG;
$| = 1;
select STDOUT;
}
# =========================================
# Print debugging information on STDERR
# =========================================
sub Print {
my ($level,$mesg) = @_;
my ($pack,$file,$line,$subname,$hasargs,$wantarray) = caller(1);
my ($PACK) = split('::',$subname);
my $DEBUG = ${$PACK."::DEBUG"} ? ${$PACK."::DEBUG"} : $MySQLaccess::DEBUG ;
my ($sec,$min,$hour) = localtime();
print DEBUG "[$hour:$min:$sec $subname] $mesg\n" if ($DEBUG>=$level);
}
|
chidelmun/server
|
scripts/mysqlaccess.sh
|
Shell
|
gpl-2.0
| 111,618 |
#!/bin/sh
#host=192.168.1.123
#kvmurl="qemu+ssh://$host/system"
host=localhost
kvmurl="qemu:///system"
display=$(virsh -c $kvmurl domdisplay $1)
# echo $display ; exit
case $display in
vnc*)
vncviewer $(echo $display | sed -e 's,vnc://,,g') &
;;
spice*)
spicec $(echo $display | sed -e s/localhost/$host/g -e 's,spice://,--host ,g' -e 's,:, --port ,g') &
;;
*)
echo Unknown display type or VM not running
;;
esac
|
TimJDFletcher/personal-scripts
|
KVM/vmdisplay.sh
|
Shell
|
gpl-2.0
| 442 |
#!/bin/sh
DOCKER_IS_AVAILABLE="$(docker -v 2>&1 >/dev/null)"
if [[ ${DOCKER_IS_AVAILABLE} == '' ]]; then
echo -e "${bakgrn}[installed][Docker]${txtrst} already installed ;)" ;
else
echo -e "${bakcyn}[Docker] Start Install ${txtrst}";
# wget -qO- https://get.docker.com/ | sh;
# service docker start ;
# usermod -aG docker $DOTFILE_DEFAULT_USER ;
# chkconfig docker on ;
dnf -y install dnf-plugins-core
dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
dnf config-manager --set-enabled docker-ce-edge
dnf config-manager --set-enabled docker-ce-test
dnf install -y docker-ce
service docker start
usermod -aG docker $DOTFILE_DEFAULT_USER
chkconfig docker on
echo -e "${bakgrn}[Docker] Finish Install ${txtrst}";
fi
|
rtancman/dotfiles
|
sh/fedora/scripts/docker.sh
|
Shell
|
gpl-2.0
| 784 |
#!/bin/sh -e
# Checks for the Apache user being in cachegrp
# Without this Apache can't see Trak web content
. ./functions.sh
check_SLES() {
APACHEUSER=wwwrun
check_LinuxGen
return $?
}
check_RHEL() {
APACHEUSER=apache
check_LinuxGen
return $?
}
check_LinuxGen() {
apachegroups=`groups $APACHEUSER | cut -d: -f2 | sed 's/^ \+//' | sed 's/ /,/g'`
if ! listunion cachegrp $apachegroups; then
echo "=ALERT - Apache user \"$APACHEUSER\" not in group \"cachegrp\""
else
echo "=OK - Apache user \"$APACHEUSER\" found in group \"cachegrp\""
fi
}
# get on with the job
echo "*CHECK - Apache Groups"
preflightargs $@
checkfieldquit CacheBuild,TrakUpgrade,TrakBuild,GoLive $STAGE
checkfieldquit web,analytics $FUNCTIONS
# would have bailed above if no match
osspecific check
|
casep/isc_coding
|
trakautomation/preflightchecks.d/ApacheGroups.sh
|
Shell
|
gpl-2.0
| 786 |
#!/bin/bash
# _3v is as _3t but decreasing the --num-jesus-blocks from 100 to 50.
# I stopped it early after likelihoods were not promising:
# on iter 90, train prob was -0.1226->-0.1240, valid -0.1304->-0.1340.
# _3t is as _3s but using slightly wider context. Dumping our own egs.
# _3s is as _3r but reducing jesus-forward-input-dim from 500 to 400.
# _3r is as _3p but reducing the number of parameters as it seemed to be
# overtraining (despite already being quite a small model): [600,1800 ->
# 500,1500]. Also in the interim there was a script change to
# nnet3/chain/train_tdnn.sh to, on mix-up iters, apply half the max-change.
# [changing it right now from 1/2 to 1/sqrt(2) which is more consistent
# with the halving of the minibatch size.]
# _3p is the same as 3o, but after a code and script change so we can use
# natural gradient for the RepeatedAffineComponent.
# [natural gradient was helpful, based on logs;
# also made a change to use positive bias for the jesus-component affine parts.]
# _3o is as _3n but filling in the first splice-indexes from -1,2 to -1,0,1,2.
# _3n is as _3d (a non-recurrent setup), but using the more recent scripts that support
# recurrence, with improvements to the learning of the jesus layers.
# _3g is as _3f but using 100 blocks instead of 200, as in d->e 200 groups was found
# to be worse.
# It's maybe a little better than the baseline 2y; and better than 3d [-> I guess recurrence
# is helpful.]
#./show_wer.sh 3g
#%WER 17.05 [ 8387 / 49204, 905 ins, 2386 del, 5096 sub ] exp/chain/tdnn_3g_sp/decode_train_dev_sw1_tg/wer_11_0.0
#%WER 15.67 [ 7712 / 49204, 882 ins, 2250 del, 4580 sub ] exp/chain/tdnn_3g_sp/decode_train_dev_sw1_fsh_fg/wer_11_0.0
#%WER 18.7 | 4459 42989 | 83.5 11.1 5.3 2.2 18.7 56.2 | exp/chain/tdnn_3g_sp/decode_eval2000_sw1_tg/score_10_0.0/eval2000_hires.ctm.filt.sys
#%WER 16.8 | 4459 42989 | 85.1 9.9 5.0 2.0 16.8 53.7 | exp/chain/tdnn_3g_sp/decode_eval2000_sw1_fsh_fg/score_10_0.5/eval2000_hires.ctm.filt.sys
#a03:s5c: ./show_wer.sh 2y
#%WER 16.99 [ 8358 / 49204, 973 ins, 2193 del, 5192 sub ] exp/chain/tdnn_2y_sp/decode_train_dev_sw1_tg/wer_11_0.0
#%WER 15.86 [ 7803 / 49204, 959 ins, 2105 del, 4739 sub ] exp/chain/tdnn_2y_sp/decode_train_dev_sw1_fsh_fg/wer_11_0.0
#%WER 18.9 | 4459 42989 | 83.4 11.3 5.3 2.3 18.9 56.3 | exp/chain/tdnn_2y_sp/decode_eval2000_sw1_tg/score_10_0.0/eval2000_hires.ctm.filt.sys
#%WER 17.0 | 4459 42989 | 85.1 10.1 4.8 2.1 17.0 53.5 | exp/chain/tdnn_2y_sp/decode_eval2000_sw1_fsh_fg/score_10_0.0/eval2000_hires.ctm.filt.sys
#a03:s5c: ./show_wer.sh 3d
#%WER 17.35 [ 8539 / 49204, 1023 ins, 2155 del, 5361 sub ] exp/chain/tdnn_3d_sp/decode_train_dev_sw1_tg/wer_10_0.0
#%WER 16.09 [ 7919 / 49204, 1012 ins, 2071 del, 4836 sub ] exp/chain/tdnn_3d_sp/decode_train_dev_sw1_fsh_fg/wer_10_0.0
#%WER 18.9 | 4459 42989 | 83.2 11.2 5.6 2.1 18.9 56.6 | exp/chain/tdnn_3d_sp/decode_eval2000_sw1_tg/score_10_0.0/eval2000_hires.ctm.filt.sys
#%WER 17.0 | 4459 42989 | 85.0 9.8 5.2 2.0 17.0 53.6 | exp/chain/tdnn_3d_sp/decode_eval2000_sw1_fsh_fg/score_10_0.0/eval2000_hires.ctm.filt.sys
# _3f is as _3e, but modifying the splicing setup to add (left) recurrence:
# added the :3's in --splice-indexes "-2,-1,0,1,2 -1,2 -3,0,3:-3 -6,-3,0,3:-3 -6,-3,0,3:-3"
# Therefore it's
# no longer really a tdnn, more like an RNN combined with TDNN. BTW, I'm not re-dumping egs with extra
# context, and this isn't really ideal - I want to see if this seems promising first.
# _3e is as _3d, but increasing the --num-jesus-blocks from 100 (the default)
# to 200 in order to reduce computation in the Jesus layer.
# _3d is as _2y, and re-using the egs, but using --jesus-opts and
# configs from make_jesus_configs.py.
# --jesus-opts "--affine-output-dim 600 --jesus-output-dim 1800 --jesus-hidden-dim 15000" \
# --splice-indexes "-2,-1,0,1,2 -1,2 -3,0,3 -6,-3,0,3 -6,-3,0,3"
# _2y is as _2o, but increasing the --frames-per-iter by a factor of 1.5, from
# 800k to 1.2 million. The aim is to avoid some of the per-job overhead
# (model-averaging, etc.), since each iteration takes only a minute or so.
# I added the results to the table below. It seems the same on average-
# which is good. We'll probably keep this configuration.
# _2o is as _2m, but going back to our original 2-state topology, which it turns
# out that I never tested to WER.
# hm--- it's about the same, or maybe slightly better!
# caution: accidentally overwrote most of this dir, but kept the key stuff.
# note: when I compare with the rerun of 2o (not shown), this run is actually
# better.
# WER on 2m 2o 2y [ now comparing 2o->2y:]
# train_dev,tg 17.22 17.24 16.99 0.2% better
# train_dev,fg 15.87 15.93 15.86 0.1% better
# eval2000,tg 18.7 18.7 18.9 0.2% worse
# eval2000,fg 17.0 16.9 17.0 0.1% worse
# train-prob,final -0.0803 -0.0835
# valid-prob,final -0.0116 -0.0122
# _2m is as _2k, but setting --leftmost-questions-truncate=-1, i.e. disabling
# that mechanism.
# _2k is as _2i, but doing the same change as in _s -> _2e, in which we
# set --apply-deriv-weights false and --frames-overlap-per-eg 0.
# _2i is as _2d but with a new set of code for estimating the LM, in which we compute
# the log-like change when deciding which states to back off. The code is not the same
# as the one in 2{f,g,h}. We have only the options --num-extra-lm-states=2000. By
# default it estimates a 4-gram, with 3-gram as the no-prune order. So the configuration
# is quite similar to 2d, except new/more-exact code is used.
# _2d is as _2c but with different LM options:
# --lm-opts "--ngram-order=4 --leftmost-context-questions=/dev/null --num-extra-states=2000"
# ... this gives us a kind of pruned 4-gram language model, instead of a 3-gram.
# the --leftmost-context-questions=/dev/null option overrides the leftmost-context-questions
# provided from the tree-building, and effectively puts the leftmost context position as a single
# set.
# This seems definitely helpful: on train_dev, with tg improvement is 18.12->17.55 and with fg
# from 16.73->16.14; and on eval2000, with tg from 19.8->19.5 and with fg from 17.8->17.6.
# _2c is as _2a but after a code change in which we start using transition-scale
# and self-loop-scale of 1 instead of zero in training; we change the options to
# mkgraph used in testing, to set the scale to 1.0. This shouldn't affect
# results at all; it's is mainly for convenience in pushing weights in graphs,
# and checking that graphs are stochastic.
# _2a is as _z but setting --lm-opts "--num-extra-states=8000".
# _z is as _x but setting --lm-opts "--num-extra-states=2000".
# (see also y, which has --num-extra-states=500).
# _x is as _s but setting --lm-opts "--num-extra-states=0".
# this is a kind of repeat of the u->v experiment, where it seemed to make things
# worse, but there were other factors involved in that so I want to be sure.
# _s is as _q but setting pdf-boundary-penalty to 0.0
# This is helpful: 19.8->18.0 after fg rescoring on all of eval2000,
# and 18.07 -> 16.96 on train_dev, after fg rescoring.
# _q is as _p except making the same change as from n->o, which
# reduces the parameters to try to reduce over-training. We reduce
# relu-dim from 1024 to 850, and target num-states from 12k to 9k,
# and modify the splicing setup.
# note: I don't rerun the tree-building, I just use the '5o' treedir.
# _p is as _m except with a code change in which we switch to a different, more
# exact mechanism to deal with the edges of the egs, and correspondingly
# different script options... we now dump weights with the egs, and apply the
# weights to the derivative w.r.t. the output instead of using the
# --min-deriv-time and --max-deriv-time options. Increased the frames-overlap
# to 30 also. This wil. give 10 frames on each side with zero derivs, then
# ramping up to a weight of 1.0 over 10 frames.
# _m is as _k but after a code change that makes the denominator FST more
# compact. I am rerunning in order to verify that the WER is not changed (since
# it's possible in principle that due to edge effects related to weight-pushing,
# the results could be a bit different).
# The results are inconsistently different but broadly the same. On all of eval2000,
# the change k->m is 20.7->20.9 with tg LM and 18.9->18.6 after rescoring.
# On the train_dev data, the change is 19.3->18.9 with tg LM and 17.6->17.6 after rescoring.
# _k is as _i but reverting the g->h change, removing the --scale-max-param-change
# option and setting max-param-change to 1.. Using the same egs.
# _i is as _h but longer egs: 150 frames instead of 75, and
# 128 elements per minibatch instead of 256.
# _h is as _g but different application of max-param-change (use --scale-max-param-change true)
# _g is as _f but more splicing at last layer.
# _f is as _e but with 30 as the number of left phone classes instead
# of 10.
# _e is as _d but making it more similar in configuration to _b.
# (turns out b was better than a after all-- the egs' likelihoods had to
# be corrected before comparing them).
# the changes (vs. d) are: change num-pdfs target from 8k to 12k,
# multiply learning rates by 5, and set final-layer-normalize-target to 0.5.
# _d is as _c but with a modified topology (with 4 distinct states per phone
# instead of 2), and a slightly larger num-states (8000) to compensate for the
# different topology, which has more states.
# _c is as _a but getting rid of the final-layer-normalize-target (making it 1.0
# as the default) as it's not clear that it was helpful; using the old learning-rates;
# and modifying the target-num-states to 7000.
# _b is as as _a except for configuration changes: using 12k num-leaves instead of
# 5k; using 5 times larger learning rate, and --final-layer-normalize-target=0.5,
# which will make the final layer learn less fast compared with other layers.
set -e
# configs for 'chain'
stage=12
train_stage=-10
get_egs_stage=-10
speed_perturb=true
dir=exp/chain/tdnn_3v # Note: _sp will get added to this if $speed_perturb == true.
# training options
num_epochs=4
initial_effective_lrate=0.001
final_effective_lrate=0.0001
leftmost_questions_truncate=-1
max_param_change=1.0
final_layer_normalize_target=0.5
num_jobs_initial=3
num_jobs_final=16
minibatch_size=128
frames_per_eg=150
remove_egs=false
# End configuration section.
echo "$0 $@" # Print the command line for logging
. cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
# The iVector-extraction and feature-dumping parts are the same as the standard
# nnet3 setup, and you can skip them by setting "--stage 8" if you have already
# run those things.
suffix=
if [ "$speed_perturb" == "true" ]; then
suffix=_sp
fi
dir=${dir}$suffix
train_set=train_nodup$suffix
ali_dir=exp/tri4_ali_nodup$suffix
treedir=exp/chain/tri5_2y_tree$suffix
lang=data/lang_chain_2y
# if we are using the speed-perturbed data we need to generate
# alignments for it.
local/nnet3/run_ivector_common.sh --stage $stage \
--speed-perturb $speed_perturb \
--generate-alignments $speed_perturb || exit 1;
if [ $stage -le 9 ]; then
# Get the alignments as lattices (gives the CTC training more freedom).
# use the same num-jobs as the alignments
nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1;
steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \
data/lang exp/tri4 exp/tri4_lats_nodup$suffix
rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space
fi
if [ $stage -le 10 ]; then
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
rm -rf $lang
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
if [ $stage -le 11 ]; then
# Build a tree using our new topology.
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--leftmost-questions-truncate $leftmost_questions_truncate \
--cmd "$train_cmd" 9000 data/$train_set $lang $ali_dir $treedir
fi
if [ $stage -le 12 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{1,2,3,4}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
fi
touch $dir/egs/.nodelete # keep egs around when that run dies.
steps/nnet3/chain/train_tdnn.sh --stage $train_stage \
--egs-dir exp/chain/tdnn_3t_sp/egs \
--jesus-opts "--jesus-forward-input-dim 400 --num-jesus-blocks 50 --jesus-forward-output-dim 1500 --jesus-hidden-dim 15000 --jesus-stddev-scale 0.2 --final-layer-learning-rate-factor 0.25" \
--splice-indexes "-2,-1,0,1,2 -3,-2,-1,0,1,2,3 -3,0,3 -6,-3,0,3,6 -6,-3,0,3,6" \
--apply-deriv-weights false \
--frames-per-iter 1200000 \
--lm-opts "--num-extra-lm-states=2000" \
--get-egs-stage $get_egs_stage \
--minibatch-size $minibatch_size \
--egs-opts "--frames-overlap-per-eg 0" \
--frames-per-eg $frames_per_eg \
--num-epochs $num_epochs --num-jobs-initial $num_jobs_initial --num-jobs-final $num_jobs_final \
--feat-type raw \
--online-ivector-dir exp/nnet3/ivectors_${train_set} \
--cmvn-opts "--norm-means=false --norm-vars=false" \
--initial-effective-lrate $initial_effective_lrate --final-effective-lrate $final_effective_lrate \
--max-param-change $max_param_change \
--final-layer-normalize-target $final_layer_normalize_target \
--relu-dim 850 \
--cmd "$decode_cmd" \
--remove-egs $remove_egs \
data/${train_set}_hires $treedir exp/tri4_lats_nodup$suffix $dir || exit 1;
fi
if [ $stage -le 13 ]; then
# Note: it might appear that this $lang directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg
fi
decode_suff=sw1_tg
graph_dir=$dir/graph_sw1_tg
if [ $stage -le 14 ]; then
for decode_set in train_dev eval2000; do
(
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--extra-left-context 20 \
--nj 50 --cmd "$decode_cmd" \
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \
$graph_dir data/${decode_set}_hires $dir/decode_${decode_set}_${decode_suff} || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
$dir/decode_${decode_set}_sw1_{tg,fsh_fg} || exit 1;
fi
) &
done
fi
wait;
exit 0;
|
michellemorales/OpenMM
|
kaldi/egs/swbd/s5c/local/chain/tuning/run_tdnn_3v.sh
|
Shell
|
gpl-2.0
| 15,199 |
#!/bin/sh
echo "# Generated using generate-pro.sh" > series60-remote.pro
echo "# " >> series60-remote.pro
echo "# Created: " `date -R` >> series60-remote.pro
echo "# WARNING! All changes made in this file will be lost!" >> series60-remote.pro
echo "" >> series60-remote.pro
FILES=$(find devices/ lib/ widget/ window/ -name "*.py")
for file in $FILES
do
echo "SOURCES += $file" >> series60-remote.pro
done
FILES=$(find ui/ -name "*.ui")
for file in $FILES
do
echo "FORMS += $file" >> series60-remote.pro
done
FILES=$(find ui/ -name "*.rc")
for file in $FILES
do
echo "RESOURCES += $file" >> series60-remote.pro
done
FILES=$(find lang/ -name "app_*.ts")
for file in $FILES
do
echo "TRANSLATIONS += $file" >> series60-remote.pro
done
|
ypid/series60-remote
|
pc/generate-pro.sh
|
Shell
|
gpl-2.0
| 767 |
#!/bin/sh
function assert_superuser {
[[ "$(id -u)" != "0" ]] && echo "You need to be 'root' dude." 1>&2 && exit 1
}
function verify_operation_admin
{
source "/home/openstack/Documentos/openstackUECE/controller-node/admin-demo/admin-openrc.sh"
cinder service-list
}
function verify_operation_demo
{
source "/home/openstack/Documentos/openstackUECE/controller-node/admin-demo/demo-openrc.sh"
cinder create --name demo-volume1 1
}
function main
{
assert_superuser
echo "export OS_VOLUME_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh
#verify_operation_admin
# verify_operation_demo
}
main
|
marcialf/openstackUECE
|
controller-node/13_verify_cinder_operation.sh
|
Shell
|
gpl-2.0
| 622 |
#!/usr/bin/env bash
set -e
test -d repository || (echo "This command must be ran from the features/minion directory" && exit 1)
# Inclue the bundled Maven in the $PATH
MYDIR=$(dirname "$0")
MYDIR=$(cd "$MYDIR"; pwd)
export PATH="$MYDIR/../../bin:$MYDIR/../../maven/bin:$PATH"
export CONTAINERDIR="${MYDIR}/../container/minion"
cleanup_and_build() {
should_use_sudo=$1
cmd_prefix=""
if [[ $should_use_sudo -eq 1 ]]; then
cmd_prefix="sudo "
fi
# Kill off any existing instances
did_kill_at_least_one_pid=0
for pid_file in $(find "${CONTAINERDIR}/target" -name karaf.pid); do
pid=$(cat "$pid_file")
if [[ ! -z $pid ]]; then
$cmd_prefix kill -9 "$pid" 2>/dev/null && did_kill_at_least_one_pid=1
fi
done
# If we killed a container, wait a few seconds before cleaning up
if [[ $did_kill_at_least_one_pid -eq 1 ]]; then
sleep 2
fi
# Delete files owned by root
$cmd_prefix rm -rf "${CONTAINERDIR}"/target/minion-karaf-*
# Rebuild - we've already verified that we're in the right folder
mvn clean install && \
(cd "${CONTAINERDIR}"; mvn clean install)
}
set_instance_specific_configuration() {
MINION_HOME="$1"
idx=$2
offset=$((idx - 1))
# Here's a commented list of ports on which a default instance of Minion is listening:
#$ sudo netstat -lnp | grep 23306
#### JVM Debug - Used when 'debug' flag in passed to the 'karaf' command, configured via the $JAVA_DEBUG_PORT env. var.
#tcp 0 0 0.0.0.0:5005 0.0.0.0:* LISTEN 23306/java
#### SSH Set in 'etc/org.apache.karaf.shell.cfg' via sshPort=8201
#tcp6 0 0 127.0.0.1:8201 :::* LISTEN 23306/java
#### Random RMI port?
#tcp6 0 0 :::38287 :::* LISTEN 23306/java
#### RMI Registry - Set in 'etc/org.apache.karaf.management.cfg' via rmiRegistryPort=1299 and the serviceUrl
#tcp6 0 0 127.0.0.1:1299 :::* LISTEN 23306/java
#### Jetty - Set in 'etc/org.ops4j.pax.web.cfg' via :org.osgi.service.http.port=8181
#tcp6 0 0 :::8181 :::* LISTEN 23306/java
#### Random port use for Karaf management - Stored in 'data/port'
#tcp6 0 0 127.0.0.1:34947 :::* LISTEN 23306/java
#### RMI Server - Set in 'etc/org.apache.karaf.management.cfg' via rmiServerPort=45444 and the serviceUrl
#tcp6 0 0 127.0.0.1:45444 :::* LISTEN 23306/java
#### Trap listener - Set in 'etc/org.opennms.netmgt.trapd.cfg' via trapd.listen.port=1162
#udp6 0 0 127.0.0.1:1162 :::* 23306/java
#### Syslog listener - Set in 'etc/org.opennms.netmgt.syslog.cfg' via syslog.listen.port=1514
#udp6 0 0 :::1514 :::* 23306/java
JAVA_DEBUG_PORT=$((5005 + offset))
JETTY_PORT=$((8181 + offset))
RMI_REGISTRY_PORT=$((1299 + offset))
RMI_SERVER_PORT=$((45444 + offset))
SNMP_TRAP_PORT=$((1162 + offset))
SSH_PORT=$((8201 + offset))
SYSLOG_PORT=$((1514 + offset))
# No need to write this one anywhere, just export it
export JAVA_DEBUG_PORT
# Jetty
#perl -pi -e "s|org.osgi.service.http.port.*|org.osgi.service.http.port = $JETTY_PORT|g" "$MINION_HOME/etc/org.ops4j.pax.web.cfg"
echo "org.osgi.service.http.port = $JETTY_PORT" > "$MINION_HOME/etc/org.ops4j.pax.web.cfg"
# RMI
perl -pi -e "s|rmiRegistryPort.*|rmiRegistryPort = $RMI_REGISTRY_PORT|g" "$MINION_HOME/etc/org.apache.karaf.management.cfg"
perl -pi -e "s|rmiServerPort.*|rmiServerPort = $RMI_SERVER_PORT|g" "$MINION_HOME/etc/org.apache.karaf.management.cfg"
perl -pi -e "s|serviceUrl.*|serviceUrl = service:jmx:rmi://127.0.0.1:$RMI_SERVER_PORT/jndi/rmi://127.0.0.1:$RMI_REGISTRY_PORT/karaf-minion|g" "$MINION_HOME/etc/org.apache.karaf.management.cfg"
# SNMP Traps
echo "trapd.listen.port = $SNMP_TRAP_PORT" > "$MINION_HOME/etc/org.opennms.netmgt.trapd.cfg"
# SSH
perl -pi -e "s|sshPort.*|sshPort = $SSH_PORT|g" "$MINION_HOME/etc/org.apache.karaf.shell.cfg"
# Syslog
echo "syslog.listen.port = $SYSLOG_PORT" > "$MINION_HOME/etc/org.opennms.netmgt.syslog.cfg"
# Use some fixed ids when the idx <= 3
MINION_ID="00000000-0000-0000-0000-000000000000"
case $idx in
1 ) MINION_ID="00000000-0000-0000-0000-000000ddba11"
;;
2 ) MINION_ID="00000000-0000-0000-0000-000000bad222"
;;
3 ) MINION_ID="00000000-0000-0000-0000-000000d3c0d3"
;;
* ) MINION_ID="test-$idx"
esac
echo "id=$MINION_ID" > "$MINION_HOME/etc/org.opennms.minion.controller.cfg"
}
spawn_minion() {
idx=$1
detached=$2
should_use_sudo=$3
MINION_HOME="${CONTAINERDIR}/target/minion-karaf-$idx"
echo "Extracting container for Minion #$idx..."
# Extract the container
pushd "${CONTAINERDIR}"/target > /dev/null
mkdir -p "$MINION_HOME"
tar zxvf minion-*.tar.gz -C "$MINION_HOME" --strip-components 1 > /dev/null
popd > /dev/null
# Extract the core repository
pushd core/repository/target > /dev/null
mkdir -p "$MINION_HOME/repositories/core"
tar zxvf core-repository-*-repo.tar.gz -C "$MINION_HOME/repositories/core" > /dev/null
popd > /dev/null
# Extract the default repository
pushd repository/target > /dev/null
mkdir -p "$MINION_HOME/repositories/default"
tar zxvf repository-*-repo.tar.gz -C "$MINION_HOME/repositories/default" > /dev/null
popd > /dev/null
echo "Updating configuration for Minion #$idx..."
# Enable Hawtio
echo 'hawtio-offline' > "$MINION_HOME/etc/featuresBoot.d/hawtio.boot"
# Instance specific configuration
set_instance_specific_configuration "$MINION_HOME" "$idx"
echo "Starting Minion #$idx (detached=$detached)..."
KARAF_ARGS="debug"
cmd_prefix=""
pushd "$MINION_HOME" > /dev/null
if [[ $detached -eq 1 ]]; then
# shellcheck disable=SC2086
# shellcheck disable=SC2024
if [[ $should_use_sudo -eq 1 ]]; then
cmd_prefix="sudo -E -b "
fi
$cmd_prefix nohup ./bin/karaf daemon $KARAF_ARGS &> "$MINION_HOME/output.log" &
else
# shellcheck disable=SC2086
if [[ $should_use_sudo -eq 1 ]]; then
cmd_prefix="sudo -E "
fi
$cmd_prefix ./bin/karaf $KARAF_ARGS
fi
popd > /dev/null
}
spawn_minions() {
num_instances=$1
should_detach=$2
use_sudo=$3
cleanup_and_build "$use_sudo"
for ((i=num_instances; i >= 1; i--)); do
# Only attach the last instance, unless we're detached, then don't attach at all
instance_detached=1
if [[ $i -eq 1 ]]; then
instance_detached=$should_detach
fi
spawn_minion "$i" "$instance_detached" "$use_sudo"
done
}
usage() {
echo "usage: runInPlace.sh [[[-n num_instances] [-d] [-s]] | [-h]]"
}
NUM_INSTANCES=1
DETACHED=0
SUDO=0
while [ "$1" != "" ]; do
case $1 in
-n | --num-instances ) shift
NUM_INSTANCES=$1
;;
-d | --detached ) DETACHED=1
;;
-s | --sudo ) SUDO=1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
NUMBER_RE='^[0-9]+$'
if ! [[ $NUM_INSTANCES =~ $NUMBER_RE ]] ; then
echo "Number of instances is not a number: $NUM_INSTANCES" >&2; exit 1
fi
if [ "$NUM_INSTANCES" -lt 1 ]; then
echo "Number of instances must be strictly positive: ${NUM_INSTANCES}" >&2; exit 1
fi
spawn_minions "$NUM_INSTANCES" "$DETACHED" "$SUDO"
|
jeffgdotorg/opennms
|
features/minion/runInPlace.sh
|
Shell
|
gpl-2.0
| 7,710 |
#!/bin/bash
# uses pdftk to join and rotate multiple pdf documents give as arguments.
pdftk $* cat output tmp.pdf
pdftk tmp.pdf cat 1-endeast output output.pdf
|
nmoran/BoogieStuff
|
scripts/BoogieJoinRot.sh
|
Shell
|
gpl-2.0
| 164 |
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
##
## wdb - weather and water data storage
##
## Copyright (C) 2007 met.no
##
## Contact information:
## Norwegian Meteorological Institute
## Box 43 Blindern
## 0313 OSLO
## NORWAY
## E-mail: [email protected]
##
## This is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#!/bin/sh
mkdir -p __WDB_LOGDIR__
# Remove Test Data
__WDB_ADMIN__ testclean ${DB_CONF} --logfile __WDB_LOGDIR__/gribLoad_testclean.log > __WDB_LOGDIR__/gribLoad_testclean.out
|
metno/wdb-gribload
|
test/install/tearDown.in.sh
|
Shell
|
gpl-2.0
| 788 |
#!/bin/bash
PICT=$(for i in `ls /home/dwalton/lockscreenimages/`; do echo /home/dwalton/lockscreenimages/$i; done | sort -R | head -1);
#now to lock the screen.
i3lock -i $PICT -t;
|
plainenough/linux_tools
|
lock.sh
|
Shell
|
gpl-2.0
| 183 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2022 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure we can remove a group after it's been used as an input.
. ./tup.sh
cat > Tuprules.tup << HERE
!cc = |> gcc -c %f -o %o |> %B.o | \$(MY_ROOT)/<objs>
MY_ROOT = \$(TUP_CWD)
HERE
mkdir foo
mkdir bar
mkdir sub
cat > foo/Tupfile << HERE
include_rules
: foreach *.c |> !cc |>
HERE
cat > bar/Tupfile << HERE
include_rules
: foreach *.c |> !cc |>
HERE
cat > foo/main.c << HERE
int bar(void);
int main(void)
{
return bar();
}
HERE
cat > bar/bar.c << HERE
int bar(void) {return 0;}
HERE
cat > Tupfile << HERE
: <objs> |> gcc %<objs> -o %o |> myprog.exe
HERE
update
cat > Tupfile << HERE
: foo/*.o bar/*.o |> gcc %f -o %o |> myprog.exe
HERE
cat > Tuprules.tup << HERE
!cc = |> gcc -c %f -o %o |> %B.o
MY_ROOT = \$(TUP_CWD)
HERE
update
eotup
|
gittup/tup
|
test/t3072-group-input2.sh
|
Shell
|
gpl-2.0
| 1,496 |
#!/bin/bash
# Simple Bash script for chroot from a live distribution (I used ubuntu)
# in case you need to recovery your installation.
# Copyright (c) 2013 Gabriele Baldoni gabriele.baldoni(_at_)gmail.com
if [ `id -u` -eq 0 ]; then
if [ $# -ne 2 ]; then
printf "Usage: sudo ./chroot_from_live.sh [mount folder] [device]\n"
printf "The script will create itself the mount directory into /mnt/\n"
exit 1
else
#need to turn echo off here
mountpoint=$1
device=$2
rmdir /mnt/$mountpoint
mkdir /mnt/$mountpoint
mount /dev/$device /mnt/$mountpoint
mount -t sysfs none /mnt/$mountpoint/sys
mount -t proc none /mnt/$mountpoint/proc
mount --bind /dev/ /mnt/$mountpoint/dev
mount --bind /etc/resolv.conf /mnt/$mountpoint/etc/resolv.conf
#and to turn echo on here
chroot /mnt/$mountpoint
exit 0
fi
else
printf "Run this as root!!!\n"
exit 1
fi
|
gabrik/recovery-chroot
|
chroot_from_live.sh
|
Shell
|
gpl-2.0
| 879 |
#!/bin/bash
PROG=checkip
PROGPID=$(echo $$)
source /etc/sysconfig/system-scripts.sh
#
if [ ! -d $BASEDIR/$PROG ]; then mkdir -p $BASEDIR/$PROG ; fi
if [ -f $PIDFILE ] ; then
echo -e "$PROG is Already Runnning on $HOSTNAME with $PROGPID" | mail -r $MAILRECIP -s "$MAILSUB" $MAILFROM
echo "exit 2" >$LOGFILE 2>$ERRORLOG
exit 2
fi
echo $PROGPID > $PIDFILE
echo -e "$PROG started on $STARTDATE at $STARTTIME" >$LOGFILE 2>$ERRORLOG
if [ "$CURRIP4" == "$MYIP4" ]; then
echo -e "The IP4 Address for $HOSTNAME is correct\nCurrent IP4 is $CURRIP4" >>$LOGFILE 2>>$ERRORLOG
else
echo -e "The IP4 Address for $HOSTNAME is not right\nCurrent IP4 is $CURRIP4\nThe IP4 should be $MYIP4" >>$LOGFILE 2>>$ERRORLOG
echo -e "The IP4 Address for $HOSTNAME is not right\nCurrent IP4 is $CURRIP4\nThe IP4 should be $MYIP4" >> $ERRORLOG
fi
if [ "$CURRIP6" == "$MYIP6" ]; then
echo -e "The IP6 Address for $HOSTNAME is correct\nCurrent IP6 is $CURRIP6" >>$LOGFILE 2>>$ERRORLOG
else
echo -e "The IP6 Address for $HOSTNAME is not right\nCurrent IP6 is $CURRIP6\nThe IP6 should be $MYIP6" >>$LOGFILE 2>>$ERRORLOG
echo -e "The IP6 Address for $HOSTNAME is not right\nCurrent IP6 is $CURRIP6\nThe IP6 should be $MYIP6" >> $ERRORLOG
fi
if [ ! -d $CONFDIR/include/$PROG ]; then mkdir -p $CONFDIR/include/$PROG ; fi
INCLUDESCRIPTS=$(ls $CONFDIR/include/$PROG/*.sh 2> /dev/null | wc -l)
if [ "$INCLUDESCRIPTS" != "0" ]; then
for file in $(ls $CONFDIR/include/$PROG/*.sh); do
source $file
done
fi
ENDDATE=$(date +"%m-%d-%Y")
ENDTIME=$(date +"%r")
if [ ! -s $ERRORLOG ] ; then
if [ $SENDMAIL = "yes" ] && [ $EMAILcheckip = "yes" ]; then
echo -e "
$MAILHEADER\n
$PROG started on $STARTDATE at $STARTTIME\n
$MAILMESS1
$MAILMESS2
$MAILMESS3
$PROG completed on $ENDDATE at $ENDTIME\n
$MAILFOOTER\n"| mail -r $MAILFROM -s "$MAILSUB" $MAILRECIP
fi
else
if [ -s $ERRORLOG ] && [ -f $ERRORLOG ] && [ $SENDMAILONERROR == "yes" ]; then
MAILMESS3="$(echo -e "Errors were reported and they are as follows:\n""$(cat $ERRORLOG)")"
echo -e "
$MAILHEADER\n
$PROG started on $STARTDATE at $STARTTIME\n
$MAILMESS1
$MAILMESS2
$MAILMESS3
$PROG completed on $ENDDATE at $ENDTIME\n
$MAILFOOTER\n"| mail -r $MAILFROM -s "$MAILSUB" $MAILRECIP
fi
rm -f $PIDFILE
fi
if [ -s $ERRORLOG ]; then
echo "Any errors from the error log are reported below" >> $LOGFILE
cat $ERRORLOG >> $LOGFILE
echo "End of error log file" >> $LOGFILE
fi
ENDDATE=$(date +"%m-%d-%Y")
ENDTIME=$(date +"%r")
echo -e "$PROG completed on $ENDDATE at $ENDTIME" >>$LOGFILE 2>>$ERRORLOG
echo -e "Total log Size is $(ls -lh $LOGFILE | awk '{print $5}')" >>$LOGFILE 2>>$ERRORLOG
rm -f $ERRORLOG
rm -f $PIDFILE
echo "exit = $?" >>$LOGFILE
exit $?
|
casjay/system-scripts
|
usr/share/system-scripts/checkip.sh
|
Shell
|
gpl-2.0
| 2,662 |
#!/bin/bash
##
## Start up script for Icinga2 on CentOS docker container
##
## Initialise any variables being called:
# Set the correct timezone for PHP
PHP_TZ=${TZ:-UTC}
PHP_TZ_CONT=`echo $PHP_TZ | awk 'BEGIN { FS="/" } { print $1 }'`
PHP_TZ_CITY=`echo $PHP_TZ | awk 'BEGIN { FS="/" } { print $2 }'`
setup=/config/.setup
## The remaining initialisation is contained in an if condition. When the initialisation completes an empty /etc/icinga2/.setup file is created. If this exists the initialisation is skipped. By deleting this file, the initialisation can be restarted.
if [ ! -f "${setup}" ]; then
## Set up basic Icinga2 configuration/features
# Enable feature: ido-mysql
if [[ -L /etc/icinga2/features-enabled/ido-mysql.conf ]]; then
echo "Symlink for /etc/icinga2/features-enabled/ido-mysql.conf exists already...skipping"
else
ln -s /etc/icinga2/features-available/ido-mysql.conf /etc/icinga2/features-enabled/ido-mysql.conf
fi
# Enable feature: checker
if [[ -L /etc/icinga2/features-enabled/checker.conf ]]; then
echo "Symlink for /etc/icinga2/features-enabled/checker.conf exists already... skipping"
else
ln -s /etc/icinga2/features-available/checker.conf /etc/icinga2/features-enabled/checker.conf
fi
# Enable feature: mainlog
if [[ -L /etc/icinga2/features-enabled/mainlog.conf ]]; then
echo "Symlink for /etc/icinga2/features-enabled/mainlog.conf exists already... skipping"
else
ln -s /etc/icinga2/features-available/mainlog.conf /etc/icinga2/features-enabled/mainlog.conf
fi
# Enable feature: command >> /dev/null
if [[ -L /etc/icinga2/features-enabled/command.conf ]]; then
echo "Symlink for /etc/icinga2/features-enabled/command.conf exists already...skipping"
else
ln -s /etc/icinga2/features-available/command.conf /etc/icinga2/features-enabled/command.conf
fi
# Enable feature: livestatus >> /dev/null
if [[ -L /etc/icinga2/features-enabled/livestatus.conf ]]; then
echo "Symlink for /etc/icinga2/features-enabled/livestatus.conf exists already...skipping"
else
ln -s /etc/icinga2/features-available/livestatus.conf /etc/icinga2/features-enabled/livestatus.conf
fi
## The mariadb instance is installed and empty directories are created as part of the container. This section performs the mysql_secure_installation steps.
# Start up the mariadb instance:
mysqld_safe --basedir=/usr --nowatch
sleep 10
# Make sure that NOBODY can access the server without a password - to be updated with a variable for a password ***
#mysql -e "UPDATE mysql.user SET Password = PASSWORD('CHANGEME') WHERE User = 'root'"
# Kill the anonymous users
mysql -e "DROP USER ''@'localhost'"
# Because our hostname varies we'll use some Bash magic here.
mysql -e "DROP USER ''@'$(hostname)'"
# Kill off the demo database
mysql -e "DROP DATABASE test"
# Setting up the icinga database - need to change the icinga user password to use a variable at some point ***
(
echo "CREATE DATABASE IF NOT EXISTS icinga;"
echo "GRANT SELECT, INSERT, UPDATE, DELETE, DROP, CREATE VIEW, INDEX, EXECUTE ON icinga.* TO 'icinga'@'localhost' IDENTIFIED BY 'icinga';"
echo "quit"
) |
mysql
mysql -f icinga < /usr/share/icinga2-ido-mysql/schema/mysql.sql
# Make our changes take effect
mysql -e "FLUSH PRIVILEGES"
# Any subsequent tries to run queries this way will get access denied because lack of usr/pwd param
# Stop the MariaDB, as it will be controlled via supervisord
kill `pgrep mysqld`
## Initialising the icingaweb2 configuration
# if [[ -L /etc/icingaweb2 ]]; then
# echo "Icinga2 web configuration directory already exists...skipping"
# else
# cd /usr/share/icingaweb2
# icingacli setup config directory
# icingacli setup token create
# fi
# Configure the PHP timezone correctly:
if [ "$PHP_TZ_CITY" = "" ]; then
sed -i "s/;date.timezone =/date.timezone = ${PHP_TZ_CONT}/" /etc/php.ini
else
sed -i "s/;date.timezone =/date.timezone = ${PHP_TZ_CONT}\/${PHP_TZ_CITY}/" /etc/php.ini
fi
# Mark the setup as complete
touch /config/.setup
fi
## Start up icinga2 and apache web server daemons via supervisord
/usr/bin/supervisord -n -c /etc/supervisord.conf
|
jervine/docker-centos-icinga2
|
start.sh
|
Shell
|
gpl-2.0
| 4,238 |
#!/bin/bash
#Build the file from an input pdf
usage() {
echo "Usage: $0 <PDF File to split>"
exit 1
}
[[ $# -eq 0 ]] && usage
pdftk $1 burst output gez.geez.exp%d.pdf
|
Deepfreeze32/Ethiopic-Tesseract-Training-Scripts
|
build.sh
|
Shell
|
gpl-2.0
| 173 |
#!/bin/sh
VER=$(grep AC_INIT configure.ac | sed -e 's/.*,\[\(.*\)\],.*/\1/')
if ! grep -q "^$VER$" NEWS ; then
echo "Please add NEWS entry for version $VER!"
exit 1
fi
if ! grep -q "^$(date --iso-8601=date)" NEWS ; then
echo "Expect today's date in NEWS!"
exit 1
fi
make distclean
./autogen.sh
make dist
gpg -a --detach-sign iec16022-$VER.tar.xz
git tag -s v${VER}
./winbuild.sh
strip .libs/iec16022.exe
make iec16022.signed.exe
cp iec16022.signed.exe iec16022-$VER.exe
|
rdoeffinger/iec16022
|
release.sh
|
Shell
|
gpl-2.0
| 486 |
#!/bin/bash
#
# normalize.sh
# Should be run from 'hisvak'
p=$(pwd)
s=$p/src/main/groovy/split.groovy
if [ ! -f $s ] ; then
echo "Cannot find $s"
echo "normalize.sh should be run from folder 'hisvak'"
exit -1
fi
d=$p/dataset
if [ ! -d $d/import ] ; then
mkdir $d/import
fi
cd $d
groovy $s HISVAK.xml HISVAK_BRON.xml HISVAK_LID.xml
|
IISH/hisvak
|
src/main/normalize.sh
|
Shell
|
gpl-2.0
| 348 |
# ----------------------------------------------------------------------------
# Faz cálculos com horários.
# A opção -r torna o cálculo relativo à primeira data, por exemplo:
# 02:00 - 03:30 = -01:30 (sem -r) e 22:30 (com -r)
#
# Uso: zzhora [-r] hh:mm [+|- hh:mm] ...
# Ex.: zzhora 8:30 + 17:25 # preciso somar dois horários
# zzhora 12:00 - agora # quando falta para o almoço?
# zzhora -12:00 + -5:00 # horas negativas!
# zzhora 1000 # quanto é 1000 minutos?
# zzhora -r 5:30 - 8:00 # que horas ir dormir para acordar às 5:30?
# zzhora -r agora + 57:00 # e daqui 57 horas, será quando?
# zzhora 1:00 + 2:00 + 3:00 - 4:00 - 0:30 # cálculos múltiplos
#
# Autor: Aurelio Marinho Jargas, www.aurelio.net
# Desde: 2000-02-22
# Versão: 4
# Licença: GPL
# ----------------------------------------------------------------------------
zzhora ()
{
zzzz -h hora "$1" && return
local hhmm1 hhmm2 operacao hhmm1_orig hhmm2_orig
local hh1 mm1 hh2 mm2 n1 n2 resultado parcial exitcode negativo
local horas minutos dias horas_do_dia hh mm hh_dia extra
local relativo=0
local neg1=0
local neg2=0
# Opções de linha de comando
if test "$1" = '-r'
then
relativo=1
shift
fi
# Verificação dos parâmetros
test -n "$1" || { zztool -e uso hora; return 1; }
# Cálculos múltiplos? Exemplo: 1:00 + 2:00 + 3:00 - 4:00
if test $# -gt 3
then
if test $relativo -eq 1
then
zztool erro "A opção -r não suporta cálculos múltiplos"
return 1
fi
# A zzhora continua simples, suportando apenas dois números
# e uma única operação entre eles. O que fiz para suportar
# múltiplos, é chamar a própria zzhora várias vezes, a cada
# número novo, usando o resultado do cálculo anterior.
#
# Início : parcial = $1
# Rodada 1: parcial = zzhora $parcial $2 $3
# Rodada 2: parcial = zzhora $parcial $4 $5
# Rodada 3: parcial = zzhora $parcial $6 $7
# e assim vai.
#
parcial="$1"
shift
# Daqui pra frente é de dois em dois: operador (+-) e a hora.
# Se tiver um número ímpar de argumentos, tem algo errado.
#
if test $(($# % 2)) -eq 1
then
zztool -e uso hora
return 1
fi
# Agora sim, vamos fazer o loop e calcular todo mundo
while test $# -ge 2
do
resultado=$(zzhora "$parcial" "$1" "$2")
exitcode=$?
# Salva somente o horário. Ex: 02:59 (0d 2h 59m)
parcial=$(echo "$resultado" | cut -d ' ' -f 1)
# Esses dois já foram. Venham os próximos!
shift
shift
done
# Loop terminou, então já temos o total final.
# Basta mostrar e encerrar, saindo com o exitcode retornado
# pela execução da última zzhora. Vai que deu erro?
#
if test $exitcode -ne 0
then
echo "$resultado"
else
zztool erro "$resultado"
fi
return $exitcode
fi
# Dados informados pelo usuário (com valores padrão)
hhmm1="$1"
operacao="${2:-+}"
hhmm2="${3:-0}"
hhmm1_orig="$hhmm1"
hhmm2_orig="$hhmm2"
# Somente adição e subtração são permitidas
if test "$operacao" != '-' -a "$operacao" != '+'
then
zztool erro "Operação inválida '$operacao'. Deve ser + ou -."
return 1
fi
# Remove possíveis sinais de negativo do início
hhmm1="${hhmm1#-}"
hhmm2="${hhmm2#-}"
# Guarda a informação de quem era negativo no início
test "$hhmm1" != "$hhmm1_orig" && neg1=1
test "$hhmm2" != "$hhmm2_orig" && neg2=1
# Atalhos bacanas para a hora atual
test "$hhmm1" = 'agora' -o "$hhmm1" = 'now' && hhmm1=$(date +%H:%M)
test "$hhmm2" = 'agora' -o "$hhmm2" = 'now' && hhmm2=$(date +%H:%M)
# Se as horas não foram informadas, coloca zero
test "${hhmm1#*:}" = "$hhmm1" && hhmm1="0:$hhmm1"
test "${hhmm2#*:}" = "$hhmm2" && hhmm2="0:$hhmm2"
# Extrai horas e minutos para variáveis separadas
hh1="${hhmm1%:*}"
mm1="${hhmm1#*:}"
hh2="${hhmm2%:*}"
mm2="${hhmm2#*:}"
# Retira o zero das horas e minutos menores que 10
hh1="${hh1#0}"
mm1="${mm1#0}"
hh2="${hh2#0}"
mm2="${mm2#0}"
# Se tiver algo faltando, salva como zero
hh1="${hh1:-0}"
mm1="${mm1:-0}"
hh2="${hh2:-0}"
mm2="${mm2:-0}"
# Validação dos dados
if ! (zztool testa_numero "$hh1" && zztool testa_numero "$mm1")
then
zztool erro "Horário inválido '$hhmm1_orig', deve ser HH:MM"
return 1
fi
if ! (zztool testa_numero "$hh2" && zztool testa_numero "$mm2")
then
zztool erro "Horário inválido '$hhmm2_orig', deve ser HH:MM"
return 1
fi
# Os cálculos são feitos utilizando apenas minutos.
# Então é preciso converter as horas:minutos para somente minutos.
n1=$((hh1*60 + mm1))
n2=$((hh2*60 + mm2))
# Restaura o sinal para as horas negativas
test $neg1 -eq 1 && n1="-$n1"
test $neg2 -eq 1 && n2="-$n2"
# Tudo certo, hora de fazer o cálculo
resultado=$(($n1 $operacao $n2))
# Resultado negativo, seta a flag e remove o sinal de menos "-"
if test $resultado -lt 0
then
negativo='-'
resultado="${resultado#-}"
fi
# Agora é preciso converter o resultado para o formato hh:mm
horas=$((resultado/60))
minutos=$((resultado%60))
dias=$((horas/24))
horas_do_dia=$((horas%24))
# Restaura o zero dos minutos/horas menores que 10
hh="$horas"
mm="$minutos"
hh_dia="$horas_do_dia"
test $hh -le 9 && hh="0$hh"
test $mm -le 9 && mm="0$mm"
test $hh_dia -le 9 && hh_dia="0$hh_dia"
# Decide como mostrar o resultado para o usuário.
#
# Relativo:
# $ zzhora -r 10:00 + 48:00 $ zzhora -r 12:00 - 13:00
# 10:00 (2 dias) 23:00 (ontem)
#
# Normal:
# $ zzhora 10:00 + 48:00 $ zzhora -r 12:00 - 13:00
# 58:00 (2d 10h 0m) -01:00 (0d 1h 0m)
#
if test $relativo -eq 1
then
# Relativo
# Somente em resultados negativos o relativo é útil.
# Para valores positivos não é preciso fazer nada.
if test -n "$negativo"
then
# Para o resultado negativo é preciso refazer algumas contas
minutos=$(( (60-minutos) % 60))
dias=$((horas/24 + (minutos>0) ))
hh_dia=$(( (24 - horas_do_dia - (minutos>0)) % 24))
mm="$minutos"
# Zeros para dias e minutos menores que 10
test $mm -le 9 && mm="0$mm"
test $hh_dia -le 9 && hh_dia="0$hh_dia"
fi
# "Hoje", "amanhã" e "ontem" são simpáticos no resultado
case $negativo$dias in
1)
extra='amanhã'
;;
-1)
extra='ontem'
;;
0 | -0)
extra='hoje'
;;
*)
extra="$negativo$dias dias"
;;
esac
echo "$hh_dia:$mm ($extra)"
else
# Normal
echo "$negativo$hh:$mm (${dias}d ${horas_do_dia}h ${minutos}m)"
fi
}
|
jgentina/funcoeszz
|
zz/zzhora.sh
|
Shell
|
gpl-2.0
| 6,510 |
IP=$1
MYIP=$(route get $IP | grep "interface: .*" -o | awk '{print $2;}')
ifconfig $MYIP | egrep '[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3} ' -o -m 1
|
wv-tud/paparazzi
|
sw/tools/get_ip_from_route_osx.sh
|
Shell
|
gpl-2.0
| 179 |
#!/bin/sh
cd Application
tag=$1
if [ "$tag" == "" ]; then
echo "No tag specified"
exit
fi
agvtool next-version -all
git commit -a -m "Increment CFBundleVersion for $tag"
git tag -m "Tag for $tag" -a $tag
git push origin master
git push --tags
|
nesium/trazzle
|
tag.sh
|
Shell
|
gpl-2.0
| 246 |
#!/bin/bash
#
# Create and initialize a directory for building a fwup example.
#
# Inputs:
# $1 = the path to the configuration file (a _defconfig file)
# $2 = the build directory
#
# Output:
# An initialized build directory on success
#
set -e
#set -x
BUILDROOT_VERSION=2020.05-rc1
DEFCONFIG=$1
BUILD_DIR=$2
# "readlink -f" implementation for BSD
# This code was extracted from the Elixir shell scripts
readlink_f () {
cd "$(dirname "$1")" > /dev/null
filename="$(basename "$1")"
if [[ -h "$filename" ]]; then
readlink_f "$(readlink "$filename")"
else
echo "$(pwd -P)/$filename"
fi
}
if [[ -z $DEFCONFIG ]]; then
echo "Usage:"
echo
echo " $0 <defconfig> [build directory]"
exit 1
fi
if [[ -z $BUILD_DIR ]]; then
BUILD_DIR=o/$(basename -s _defconfig "$DEFCONFIG")
fi
# Create the build directory if it doesn't already exist
mkdir -p "$BUILD_DIR"
# Normalize paths that were specified
ABS_DEFCONFIG=$(readlink_f "$DEFCONFIG")
ABS_DEFCONFIG_DIR=$(dirname "$ABS_DEFCONFIG")
ABS_BUILD_DIR=$(readlink_f "$BUILD_DIR")
if [[ ! -f "$ABS_DEFCONFIG" ]]; then
echo "ERROR: Can't find "$ABS_DEFCONFIG". Please check that it exists."
exit 1
fi
# Check that the host can build an image
HOST_OS=$(uname -s)
HOST_ARCH=$(uname -m)
if [[ $HOST_OS != "Linux" ]]; then
echo "ERROR: This only works on Linux"
exit 1
fi
if [[ $HOST_ARCH != "x86_64" ]]; then
echo "ERROR: 64-bit Linux probably required for running cross-compilers"
exit 1
fi
# Determine the BASE_DIR source directory
BASE_DIR=$(dirname $(readlink_f "${BASH_SOURCE[0]}"))
if [[ ! -e $BASE_DIR ]]; then
echo "ERROR: Can't determine script directory!"
exit 1
fi
# Location to download files to so that they don't need
# to be redownloaded when working a lot with buildroot
#
# NOTE: If you are a heavy Buildroot user and have an alternative location,
# override this environment variable or symlink this directory.
if [[ -z $BUILDROOT_DL_DIR ]]; then
if [[ -e $HOME/dl ]]; then
BUILDROOT_DL_DIR=$HOME/dl
else
BUILDROOT_DL_DIR="$BASE_DIR/dl"
mkdir -p "$BASE_DIR/dl"
fi
fi
BUILDROOT_STATE_FILE=$BASE_DIR/buildroot-$BUILDROOT_VERSION/.fwup-examples-br-state
BUILDROOT_EXPECTED_STATE_FILE=$BUILD_DIR/.fwup-examples-expected-br-state
"$BASE_DIR/scripts/buildroot-state.sh" $BUILDROOT_VERSION "$BASE_DIR/patches" > "$BUILDROOT_EXPECTED_STATE_FILE"
create_buildroot_dir() {
# Clean up any old versions of Buildroot
rm -fr "$BASE_DIR"/buildroot*
# Download and extract Buildroot
"$BASE_DIR/scripts/download-buildroot.sh" $BUILDROOT_VERSION $BUILDROOT_DL_DIR $BASE_DIR
# Apply patches
"$BASE_DIR/buildroot/support/scripts/apply-patches.sh" "$BASE_DIR/buildroot" "$BASE_DIR/patches/buildroot"
if ! [[ -z $BUILDROOT_DL_DIR ]]; then
# Symlink Buildroot's dl directory so that it can be cached between builds
ln -sf $BUILDROOT_DL_DIR $BASE_DIR/buildroot/dl
fi
cp $BUILDROOT_EXPECTED_STATE_FILE $BUILDROOT_STATE_FILE
}
if [[ ! -e $BUILDROOT_STATE_FILE ]]; then
create_buildroot_dir
elif ! diff "$BUILDROOT_STATE_FILE" "$BUILDROOT_EXPECTED_STATE_FILE" >/dev/null; then
echo "Detected a difference in the Buildroot source tree either due"
echo "to an change in Buildroot or a change in the patches that get"
echo "applied to Buildroot. The Buildroot source tree will be updated."
echo
echo "It is highly recommended to rebuild clean."
echo "To do this, go to $BUILD_DIR, and run 'make clean'."
echo
echo "Press return to acknowledge or CTRL-C to stop"
read -r
create_buildroot_dir
fi
# Configure the build directory - finally!
make -C $BASE_DIR/buildroot BR2_EXTERNAL=$BASE_DIR O=$ABS_BUILD_DIR \
BR2_DEFCONFIG=$ABS_DEFCONFIG \
DEFCONFIG=$ABS_DEFCONFIG \
defconfig
echo "------------"
echo
echo "Build directory successfully created."
echo
echo "Configuration: $ABS_DEFCONFIG"
echo
echo "Next, do the following:"
echo " 1. cd $ABS_BUILD_DIR"
echo " 2. make"
echo
echo "For additional options, run 'make help' in the build directory."
echo
echo "IMPORTANT: If you update fwup-examples, you should rerun this script."
echo " It will refresh the configuration in the build directory."
|
fhunleth/rpi_experiments_br
|
create-build.sh
|
Shell
|
gpl-2.0
| 4,290 |
# Assertion violation
rm -f defects.por.yml
cmd $PROG assert2.c -vv
test $EXITCODE = 0
grep "2 defects, 1 max-configs"
grep 'Assertion .* failed.'
test -f defects.por.yml
grep -A5 'description.*The program called abort' defects.por.yml
test "$(grep 'description' defects.por.yml | wc -l)" == 2
rm -f defects.por.yml
|
cesaro/dpu
|
tests/regression/defects/assert2.test.sh
|
Shell
|
gpl-2.0
| 319 |
#! /bin/sh
# Copyright (C) 1999-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Grepping checks on subdir objects with C and C++.
. test-init.sh
cat >> configure.ac <<'END'
AC_PROG_CC
AC_PROG_CXX
AC_PROG_YACC
AC_CONFIG_FILES([sub/Makefile])
AC_OUTPUT
END
$ACLOCAL
: > ylwrap
cat > Makefile.am << 'END'
SUBDIRS = sub
bin_PROGRAMS = wish
wish_SOURCES = generic/a.c
wish_SOURCES += another/z.cxx
END
mkdir sub
cat > sub/Makefile.am << 'END'
dream_SOURCES = generic/b.c more/r.y
bin_PROGRAMS = dream
END
rm -f compile
$AUTOMAKE --add-missing 2>stderr || { cat stderr >&2; exit 1; }
cat stderr >&2
# Make sure compile is installed, and that Automake says so.
grep '^configure\.ac:[48]:.*install.*compile' stderr
test -f compile
grep '^generic/a\.\$(OBJEXT):' Makefile.in
grep '^generic/b\.\$(OBJEXT):' sub/Makefile.in
grep '^another/z\.\$(OBJEXT):' Makefile.in
$EGREP '(^|[^/])[abz]\.\$(OBJEXT)' Makefile.in sub/Makefile.in && exit 1
# Opportunistically test for a different bug.
grep '^another/z\.\$(OBJEXT):.*dirstamp' Makefile.in
grep '^generic/b\.\$(OBJEXT):.*dirstamp' sub/Makefile.in
:
|
sugarlabs/automake
|
t/subobj.sh
|
Shell
|
gpl-2.0
| 1,708 |
#!/bin/bash
#
# Test the command line options of the Wireshark tools
#
# $Id: suite-clopts.sh 46633 2012-12-20 14:36:06Z morriss $
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2005 Ulf Lamping
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# common exit status values
EXIT_OK=0
EXIT_COMMAND_LINE=1
EXIT_ERROR=2
# generic: check against a specific exit status with a single char option
# $1 command: tshark or dumpcap
# $2 option: a
# $3 expected exit status: 0
test_single_char_options()
{
#echo "command: "$1" opt1: "$2" opt2: "$3" opt3: "$4" opt4: "$5" opt5: "$6
$1 -$2 > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $3 ]; then
test_step_failed "exit status: $RETURNVALUE"
else
test_step_ok
fi
rm ./testout.txt
}
# dumpcap
# Only with remote capture: A:ru
# Only with WinPcap: m:
# Only with WinPcap or 1.0.0-or-later libpcap: B:
# Only with 1.0.0-or-later libpcap: I
# Only with libpcap/WinPcap with bpf_image(): d
# check exit status of all invalid single char dumpcap options (must be 1)
clopts_suite_dumpcap_invalid_chars() {
for index in C E F G H J K N O Q R T U V W X Y e j l o x z
do
test_step_add "Invalid dumpcap parameter -$index, exit status must be $EXIT_COMMAND_LINE" "test_single_char_options $DUMPCAP $index $EXIT_COMMAND_LINE"
done
}
# check exit status of all valid single char dumpcap options being (must be 0)
# tests only those options that cause dumpcap to do something other than
# capture
clopts_suite_dumpcap_valid_chars() {
for index in h v
do
test_step_add "Valid dumpcap parameter -$index, exit status must be $EXIT_OK" "test_single_char_options $DUMPCAP $index $EXIT_OK"
done
}
# special case: interface-specific opts should work under Windows and fail as
# a regular user on other systems.
clopts_suite_dumpcap_interface_chars() {
for index in D L
do
if [ "$SKIP_CAPTURE" -eq 0 ] ; then
test_step_add "Valid dumpcap parameter -$index, exit status must be $EXIT_OK" "test_single_char_options $DUMPCAP $index $EXIT_OK"
else
test_step_add "Invalid permissions for dumpcap parameter -$index, exit status must be $EXIT_ERROR" "test_single_char_options $DUMPCAP $index $EXIT_ERROR"
fi
done
}
# check exit status and grep output string of an invalid capture filter
clopts_step_dumpcap_invalid_capfilter() {
if [ "$WS_SYSTEM" != "Windows" ] ; then
test_step_skipped
return
fi
$DUMPCAP -f 'jkghg' -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'Invalid capture filter "jkghg" for interface' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_output_print ./testout.txt
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# check exit status and grep output string of an invalid interface
clopts_step_dumpcap_invalid_interfaces() {
$DUMPCAP -i invalid_interface -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'The capture session could not be initiated' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_output_print ./testout.txt
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# check exit status and grep output string of an invalid interface index
# (valid interface indexes start with 1)
clopts_step_dumpcap_invalid_interfaces_index() {
$DUMPCAP -i 0 -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'There is no interface with that adapter index' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# TShark
# check exit status when reading an existing file
clopts_step_existing_file() {
$TSHARK -r "${CAPTURE_DIR}dhcp.pcap" > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
test_step_failed "exit status: $RETURNVALUE"
else
test_step_ok
fi
rm ./testout.txt
}
# check exit status when reading a non-existing file
clopts_step_nonexisting_file() {
$TSHARK -r ThisFileDontExist.pcap > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_ERROR ]; then
test_step_failed "exit status: $RETURNVALUE"
else
test_step_ok
fi
rm ./testout.txt
}
# check exit status of all invalid single char TShark options (must be 1)
clopts_suite_tshark_invalid_chars() {
for index in A B C E F H J K M N O R T U W X Y Z a b c d e f i j k m o r s t u w y z
do
test_step_add "Invalid TShark parameter -$index, exit status must be $EXIT_COMMAND_LINE" "test_single_char_options $TSHARK $index $EXIT_COMMAND_LINE"
done
}
# check exit status of all valid single char TShark options being (must be 0)
clopts_suite_tshark_valid_chars() {
for index in G h v
do
test_step_add "Valid TShark parameter -$index, exit status must be $EXIT_OK" "test_single_char_options $TSHARK $index $EXIT_OK"
done
}
# special case: interface-specific opts should work under Windows and fail as
# a regular user on other systems.
clopts_suite_tshark_interface_chars() {
for index in D L
do
if [ "$SKIP_CAPTURE" -eq 0 ] ; then
test_step_add "Valid TShark parameter -$index, exit status must be $EXIT_OK" "test_single_char_options $TSHARK $index $EXIT_OK"
else
test_step_add "Invalid permissions for TShark parameter -$index, exit status must be $EXIT_ERROR" "test_single_char_options $TSHARK $index $EXIT_ERROR"
fi
done
}
# S V l n p q x
# check exit status and grep output string of an invalid capture filter
clopts_step_tshark_invalid_capfilter() {
if [ "$WS_SYSTEM" != "Windows" ] ; then
test_step_skipped
return
fi
$TSHARK -f 'jkghg' -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'Invalid capture filter "jkghg" for interface' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_output_print ./testout.txt
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# check exit status and grep output string of an invalid interface
clopts_step_tshark_invalid_interfaces() {
$TSHARK -i invalid_interface -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'The capture session could not be initiated' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_output_print ./testout.txt
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# check exit status and grep output string of an invalid interface index
# (valid interface indexes start with 1)
clopts_step_tshark_invalid_interfaces_index() {
$TSHARK -i 0 -w './testout.pcap' > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_COMMAND_LINE ]; then
test_step_failed "exit status: $RETURNVALUE"
else
grep -i 'There is no interface with that adapter index' ./testout.txt > /dev/null
if [ $? -eq 0 ]; then
test_step_ok
else
test_step_output_print ./testout.txt
test_step_failed "Error message wasn't what we expected"
fi
fi
}
# check exit status and grep output string of an invalid capture filter
# XXX - how to efficiently test the *invalid* flags?
clopts_step_valid_name_resolving() {
if [ "$WS_SYSTEM" != "Windows" ] ; then
test_step_skipped
return
fi
$TSHARK -N mntC -a duration:1 > ./testout.txt 2>&1
RETURNVALUE=$?
if [ ! $RETURNVALUE -eq $EXIT_OK ]; then
test_step_failed "exit status: $RETURNVALUE"
else
test_step_ok
fi
}
# check exit status of some basic functions
clopts_suite_basic() {
test_step_add "Exit status for existing file: \"""${CAPTURE_DIR}dhcp.pcap""\" must be 0" clopts_step_existing_file
test_step_add "Exit status for none existing files must be 2" clopts_step_nonexisting_file
}
clopts_suite_dumpcap_capture_options() {
test_step_add "Invalid dumpcap capture filter -f" clopts_step_dumpcap_invalid_capfilter
test_step_add "Invalid dumpcap capture interface -i" clopts_step_dumpcap_invalid_interfaces
test_step_add "Invalid dumpcap capture interface index 0" clopts_step_dumpcap_invalid_interfaces_index
}
clopts_suite_tshark_capture_options() {
test_step_add "Invalid TShark capture filter -f" clopts_step_tshark_invalid_capfilter
test_step_add "Invalid TShark capture interface -i" clopts_step_tshark_invalid_interfaces
test_step_add "Invalid TShark capture interface index 0" clopts_step_tshark_invalid_interfaces_index
}
clopts_post_step() {
rm -f ./testout.txt ./testout2.txt
}
clopt_suite() {
test_step_set_post clopts_post_step
test_suite_add "Basic tests" clopts_suite_basic
test_suite_add "Invalid dumpcap single char options" clopts_suite_dumpcap_invalid_chars
test_suite_add "Valid dumpcap single char options" clopts_suite_dumpcap_valid_chars
test_suite_add "Interface-specific dumpcap single char options" clopts_suite_dumpcap_interface_chars
test_suite_add "Capture filter/interface options tests" clopts_suite_dumpcap_capture_options
test_suite_add "Invalid TShark single char options" clopts_suite_tshark_invalid_chars
test_suite_add "Valid TShark single char options" clopts_suite_tshark_valid_chars
test_suite_add "Interface-specific TShark single char options" clopts_suite_tshark_interface_chars
test_suite_add "Capture filter/interface options tests" clopts_suite_tshark_capture_options
test_step_add "Valid name resolution options -N (1s)" clopts_step_valid_name_resolving
#test_remark_add "Undocumented command line option: G"
#test_remark_add "Options currently unchecked: S, V, l, n, p, q and x"
}
## Emacs
## Local Variables:
## tab-width: 8
## indent-tabs-mode: t
## sh-basic-offset: 8
## End:
|
MavEtJu/wireshark-lean
|
test/suite-clopts.sh
|
Shell
|
gpl-2.0
| 11,070 |
#!/bin/bash
function rainColor() {
if [ "$rain" = "OCR_fail" ] ; then
red=255
green=0
blue=0
elif [ "$rain" = "Missing" ] ; then
red=255
green=128
blue=0
# Extreme - red
elif (($(echo $rain '>=' 2.56 | bc -l ))) ; then
red=255
green=$(echo 'scale=0; ( 127 - ( ' $rain ' - 2.56 ) * 100 ) / 1' | bc -l) # 128 -> 0
if [ $green -lt 0 ] ; then green=0 ; fi
blue=$green
# Heavy - blue fading to red or cyan fading to red
elif (($(echo $rain '>=' 1.28 | bc -l ))) ; then
if [ $type = "SNOW" ] ; then
red=$(echo 'scale=0; ( (' $rain ' - 1.28 ) * 200 ) / 1' | bc -l) # 0->255
green=$((255-$red/2)) # 255->128
blue=$((255-$red/2)) # 255->128
else
red=$(echo 'scale=0; ( (' $rain ' - 1.28 ) * 100 ) / 1 + 128' | bc -l) # 128->255
green=128
blue=$(echo 'scale=0; ( 255 - (' $rain ' - 1.28 ) * 100 ) / 1' | bc -l) # 255->128
fi
# Anything else - white fading to blue white fading to cyan
else
if [ $type = "SNOW" ] ; then
red=$(echo 'scale=0; ( 255 - ' $rain ' * 200 ) / 1' | bc -l) # 255->0
green=255
blue=255
else
red=$(echo 'scale=0; ( 255 - ' $rain ' * 100 ) / 1' | bc -l) # 255->128
green=$red # 255->128
blue=255
fi
fi
printf "%02x%02x%02x" $red $green $blue
}
rain=$1
type=$2
if [ $type = "SNOW" ] ; then
unit=cm
else
unit=mm
fi
if [ "$rain" = "0.00" ] ; then
echo "<font color=#$(rainColor)>No $(echo $type | tr A-Z a-z).</font>"
elif [ "$rain" = "Missing" -o "$rain" = "OCR_fail" ] ; then
echo "$type: <font color=#$(rainColor)>$rain</font>"
else
echo "$type: <font color=#$(rainColor $rain)>${rain}${unit}/hr</font>"
fi
|
rickettm/wmr89reader
|
colorrain.sh
|
Shell
|
gpl-2.0
| 1,655 |
#!/bin/sh
set -e
uname -a
${CC} --version
sudo apt-add-repository --yes ppa:zoogie/sdl2-snapshots
sudo apt-get update -qq -y
sudo apt-get install -qq -y libsdl2-dev
if [ "${CC}" != "clang" ]
then
sudo apt-get remove -qq -y mingw32
sudo apt-get install -qq -y mingw-w64
mkdir deps
cd deps
wget https://www.libsdl.org/release/SDL2-devel-${SDL_VER}-mingw.tar.gz
tar xfz SDL2-devel-${SDL_VER}-mingw.tar.gz
ZLIB_SVER=`echo ${ZLIB_VER} | sed 's|[.]||g'`
wget http://zlib.net/zlib${ZLIB_SVER}.zip
unzip zlib${ZLIB_SVER}.zip
make -C zlib-${ZLIB_VER} PREFIX="${MGW64_PREF}-" -f win32/Makefile.gcc
mkdir "zlib-${ZLIB_VER}/${MGW64_PREF}"
mv zlib-${ZLIB_VER}/*.dll zlib-${ZLIB_VER}/*.a "zlib-${ZLIB_VER}/${MGW64_PREF}"
make -C zlib-${ZLIB_VER} PREFIX="${MGW_PREF}-" -f win32/Makefile.gcc clean all
cd ..
fi
for build_type in debug production
do
make ARCH=LINUX BUILD_TYPE=${build_type} clean all
if [ "${CC}" != "clang" ]
then
make ARCH=MINGW BUILD_TYPE=${build_type} MINGW_DEPS_ROOT=`pwd`/deps clean all
make ARCH=MINGW64 BUILD_TYPE=${build_type} MINGW_DEPS_ROOT=`pwd`/deps clean all
fi
done
|
michaelknigge/digger
|
scripts/do-test.sh
|
Shell
|
gpl-2.0
| 1,129 |
#!/bin/sh
# install_mingw32_devel.sh
#
yum -y install mingw32-gcc mingw32-gcc-c++ emacs git autoconf automake libtool mingw32-qt qt-devel
|
ElvishArtisan/rivendell-install
|
centos7/install_mingw32_devel.sh
|
Shell
|
gpl-2.0
| 140 |
#!/bin/sh
. /etc/profile
oe_setup_addon script.moonlight
if [ ! -f "$ADDON_HOME/gamecontrollerdb.txt" ]; then
cp $ADDON_DIR/etc/gamecontrollerdb.txt $ADDON_HOME
fi
while [ 1 ]; do
if [ -f $ADDON_DIR/start_moonlight.tmp ]; then
MOONLIGHT_APP=`cat $ADDON_DIR/start_moonlight.tmp`
rm $ADDON_DIR/start_moonlight.tmp
MOONLIGHT_ARG="stream"
if [ "$MOON_PACKETSIZE" != "0" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -packetsize $MOON_PACKETSIZE"
fi
if [ "$MOON_BITRATE" != "0" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -bitrate $MOON_BITRATE"
fi
if [ "$MOON_RESOLUTION" = "720p" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -720"
elif [ "$MOON_RESOLUTION" = "1080p" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -1080"
elif [ "$MOON_RESOLUTION" = "4k" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -4k"
else
MOONLIGHT_ARG="$MOONLIGHT_ARG -width $MOON_WIDTH_RESOLUTION -height $MOON_HEIGHT_RESOLUTION"
fi
if [ "$MOON_FRAMERATE" = "60" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -fps 60"
else
MOONLIGHT_ARG="$MOONLIGHT_ARG -fps 30"
fi
if [ "$MOON_SURROUND" = "true" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -surround"
fi
if [ "$MOON_LOCALAUDIO" = "true" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -localaudio"
fi
if [ "$MOON_NOSOPS" = "true" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -nosops"
fi
if [ "$MOON_REMOTE" = "true" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -remote"
fi
if [ "$MOON_AUDIO" != "sysdefault" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -audio $MOON_AUDIO"
fi
if [ "$MOONLIGHT_APP" != "" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG -app \"${MOONLIGHT_APP}\""
fi
MOONLIGHT_ARG="$MOONLIGHT_ARG -keydir \"${ADDON_HOME}/keys\""
MOONLIGHT_ARG="$MOONLIGHT_ARG -mapping \"${ADDON_HOME}/gamecontrollerdb.txt\""
if [ "$MOON_SERVER_IP" != "0.0.0.0" ]; then
MOONLIGHT_ARG="$MOONLIGHT_ARG $MOON_SERVER_IP"
fi
if pgrep "kodi.bin" > /dev/null; then
systemctl stop kodi
fi
echo "${MOONLIGHT_ARG}" >> ${ADDON_LOG_FILE}
/bin/sh -c "${ADDON_DIR}/bin/moonlight ${MOONLIGHT_ARG} >> ${ADDON_LOG_FILE} 2>&1"
systemctl start kodi
fi
sleep 1
done
|
dead/moonlight-openelec-rpi2
|
script.moonlight/bin/moonlight.sh
|
Shell
|
gpl-2.0
| 2,123 |
#!/bin/bash
# Use tradition sort
export LC_ALL=C
FP=$(cd ${0%/*} && pwd -P)
export VENDOR=$(basename $(dirname $FP))
export DEVICE=$(basename $FP)
export BOARDCONFIGVENDOR=false
export BOARD_VENDOR_PLATFORM=rhine
export TARGET_BOARD_PLATFORM=msm8974
../common/extract-files.sh $@
../common/setup-makefiles.sh
./setup-makefiles.sh
|
vic3t3chn0/sony_device
|
rhine-common/extract-files.sh
|
Shell
|
gpl-2.0
| 334 |
#!/bin/bash
# Copyright 2012-2013 Brno University of Technology (author: Karel Vesely), Daniel Povey
# Apache 2.0.
# Create denominator lattices for MMI/MPE/sMBR training.
# Creates its output in $dir/lat.*.ark,$dir/lat.scp
# The lattices are uncompressed, we need random access for DNN training.
# Begin configuration section.
nj=4
cmd=run.pl
sub_split=1
beam=13.0
lattice_beam=7.0
acwt=0.1
max_active=5000
nnet=
nnet_forward_opts="--no-softmax=true --prior-scale=1.0"
max_mem=20000000 # This will stop the processes getting too large.
# This is in bytes, but not "real" bytes-- you have to multiply
# by something like 5 or 10 to get real bytes (not sure why so large)
# End configuration section.
use_gpu=no # yes|no|optional
parallel_opts="--num-threads 2"
ivector= # rx-specifier with i-vectors (ark-with-vectors),
echo "$0 $@" # Print the command line for logging
[ -f ./path.sh ] && . ./path.sh; # source the path.
. parse_options.sh || exit 1;
set -euo pipefail
if [ $# != 4 ]; then
echo "Usage: steps/$0 [options] <data-dir> <lang-dir> <src-dir> <exp-dir>"
echo " e.g.: steps/$0 data/train data/lang exp/tri1 exp/tri1_denlats"
echo "Works for plain features (or CMN, delta), forwarded through feature-transform."
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config containing options"
echo " --nj <nj> # number of parallel jobs"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --sub-split <n-split> # e.g. 40; use this for "
echo " # large databases so your jobs will be smaller and"
echo " # will (individually) finish reasonably soon."
exit 1;
fi
data=$1
lang=$2
srcdir=$3
dir=$4
sdata=$data/split$nj
mkdir -p $dir/log
[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1;
echo $nj > $dir/num_jobs
oov=`cat $lang/oov.int` || exit 1;
mkdir -p $dir
cp -r $lang $dir/
# Compute grammar FST which corresponds to unigram decoding graph.
new_lang="$dir/"$(basename "$lang")
echo "Making unigram grammar FST in $new_lang"
cat $data/text | utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt | \
awk '{for(n=2;n<=NF;n++){ printf("%s ", $n); } printf("\n"); }' | \
utils/make_unigram_grammar.pl | fstcompile | fstarcsort --sort_type=ilabel > $new_lang/G.fst \
|| exit 1;
# mkgraph.sh expects a whole directory "lang", so put everything in one directory...
# it gets L_disambig.fst and G.fst (among other things) from $dir/lang, and
# final.mdl from $srcdir; the output HCLG.fst goes in $dir/graph.
echo "Compiling decoding graph in $dir/dengraph"
if [ -s $dir/dengraph/HCLG.fst ] && [ $dir/dengraph/HCLG.fst -nt $srcdir/final.mdl ]; then
echo "Graph $dir/dengraph/HCLG.fst already exists: skipping graph creation."
else
utils/mkgraph.sh $new_lang $srcdir $dir/dengraph || exit 1;
fi
cp $srcdir/{tree,final.mdl} $dir
# Select default locations to model files
[ -z "$nnet" ] && nnet=$srcdir/final.nnet;
class_frame_counts=$srcdir/ali_train_pdf.counts
feature_transform=$srcdir/final.feature_transform
model=$dir/final.mdl
# Check that files exist
for f in $sdata/1/feats.scp $nnet $model $feature_transform $class_frame_counts; do
[ ! -f $f ] && echo "$0: missing file $f" && exit 1;
done
# PREPARE FEATURE EXTRACTION PIPELINE
# import config,
cmvn_opts=
delta_opts=
D=$srcdir
[ -e $D/norm_vars ] && cmvn_opts="--norm-means=true --norm-vars=$(cat $D/norm_vars)" # Bwd-compatibility,
[ -e $D/cmvn_opts ] && cmvn_opts=$(cat $D/cmvn_opts)
[ -e $D/delta_order ] && delta_opts="--delta-order=$(cat $D/delta_order)" # Bwd-compatibility,
[ -e $D/delta_opts ] && delta_opts=$(cat $D/delta_opts)
#
# Create the feature stream,
feats="ark,s,cs:copy-feats scp:$sdata/JOB/feats.scp ark:- |"
# apply-cmvn (optional),
[ ! -z "$cmvn_opts" -a ! -f $sdata/1/cmvn.scp ] && echo "$0: Missing $sdata/1/cmvn.scp" && exit 1
[ ! -z "$cmvn_opts" ] && feats="$feats apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp ark:- ark:- |"
# add-deltas (optional),
[ ! -z "$delta_opts" ] && feats="$feats add-deltas $delta_opts ark:- ark:- |"
# add-pytel transform (optional),
[ -e $D/pytel_transform.py ] && feats="$feats /bin/env python $D/pytel_transform.py |"
# add-ivector (optional),
if [ -e $D/ivector_dim ]; then
ivector_dim=$(cat $D/ivector_dim)
[ -z $ivector ] && echo "Missing --ivector, they were used in training! (dim $ivector_dim)" && exit 1
ivector_dim2=$(copy-vector --print-args=false "$ivector" ark,t:- | head -n1 | awk '{ print NF-3 }') || true
[ $ivector_dim != $ivector_dim2 ] && "Error, i-vector dimensionality mismatch! (expected $ivector_dim, got $ivector_dim2 in $ivector)" && exit 1
# Append to feats
feats="$feats append-vector-to-feats ark:- '$ivector' ark:- |"
fi
# nnet-forward,
feats="$feats nnet-forward $nnet_forward_opts --feature-transform=$feature_transform --class-frame-counts=$class_frame_counts --use-gpu=$use_gpu $nnet ark:- ark:- |"
# if this job is interrupted by the user, we want any background jobs to be
# killed too.
cleanup() {
local pids=$(jobs -pr)
[ -n "$pids" ] && kill $pids || true
}
trap "cleanup" INT QUIT TERM EXIT
echo "$0: generating denlats from data '$data', putting lattices in '$dir'"
#1) Generate the denominator lattices
if [ $sub_split -eq 1 ]; then
# Prepare 'scp' for storing lattices separately and gzipped
for n in `seq $nj`; do
[ ! -d $dir/lat$n ] && mkdir $dir/lat$n;
cat $sdata/$n/feats.scp | awk '{ print $1" | gzip -c >'$dir'/lat'$n'/"$1".gz"; }'
done >$dir/lat.store_separately_as_gz.scp
# Generate the lattices
$cmd $parallel_opts JOB=1:$nj $dir/log/decode_den.JOB.log \
latgen-faster-mapped --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \
--max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \
$dir/dengraph/HCLG.fst "$feats" "scp:$dir/lat.store_separately_as_gz.scp" || exit 1;
else
# each job from 1 to $nj is split into multiple pieces (sub-split), and we aim
# to have at most two jobs running at each time. The idea is that if we have stragglers
# from one job, we can be processing another one at the same time.
rm -f $dir/.error
prev_pid=
for n in `seq $[nj+1]`; do
if [ $n -gt $nj ]; then
this_pid=
elif [ -f $dir/.done.$n ] && [ $dir/.done.$n -nt $srcdir/final.mdl ]; then
echo "Not processing subset $n as already done (delete $dir/.done.$n if not)";
this_pid=
else
sdata2=$data/split$nj/$n/split$sub_split;
if [ ! -d $sdata2 ] || [ $sdata2 -ot $sdata/$n/feats.scp ]; then
split_data.sh --per-utt $sdata/$n $sub_split || exit 1;
fi
mkdir -p $dir/log/$n
mkdir -p $dir/part
feats_subset=$(echo $feats | sed s:JOB/:$n/split$sub_split/JOB/:g)
# Prepare 'scp' for storing lattices separately and gzipped
for k in `seq $sub_split`; do
[ ! -d $dir/lat$n/$k ] && mkdir -p $dir/lat$n/$k;
cat $sdata2/$k/feats.scp | awk '{ print $1" | gzip -c >'$dir'/lat'$n'/'$k'/"$1".gz"; }'
done >$dir/lat.$n.store_separately_as_gz.scp
# Generate lattices
$cmd $parallel_opts JOB=1:$sub_split $dir/log/$n/decode_den.JOB.log \
latgen-faster-mapped --beam=$beam --lattice-beam=$lattice_beam --acoustic-scale=$acwt \
--max-mem=$max_mem --max-active=$max_active --word-symbol-table=$lang/words.txt $srcdir/final.mdl \
$dir/dengraph/HCLG.fst "$feats_subset" scp:$dir/lat.$n.store_separately_as_gz.scp || touch .error &
this_pid=$!
fi
if [ ! -z "$prev_pid" ]; then # Wait for the previous job; merge the previous set of lattices.
wait $prev_pid
[ -f $dir/.error ] && echo "$0: error generating denominator lattices" && exit 1;
touch $dir/.done.$prev_n
fi
prev_n=$n
prev_pid=$this_pid
done
fi
#2) Generate 'scp' for reading the lattices
# make $dir an absolute pathname.
[ '/' != ${dir:0:1} ] && dir=$PWD/$dir
for n in `seq $nj`; do
find $dir/lat${n} -name "*.gz" | perl -ape 's:.*/([^/]+)\.gz$:$1 gunzip -c $& |:; '
done | sort >$dir/lat.scp
[ -s $dir/lat.scp ] || exit 1
echo "$0: done generating denominator lattices."
|
StevenLOL/aicyber_semeval_2016_ivector
|
System_2/steps/nnet/make_denlats.sh
|
Shell
|
gpl-3.0
| 8,359 |
#!/bin/sh
ARG1="$1" # <start|networkname
ARG2="$2" # <empty> or 'force'
PACKAGE="sshpubkeys"
SCRIPT="/var/www/scripts/build_$PACKAGE.sh"
I=0
J=0
log()
{
logger -s "$0: $1"
}
filehash()
{
sed '2d' "$1" | md5sum | cut -d' ' -f1
}
list_networks()
{
local pattern1="/var/www/networks/"
local pattern2="/meshrdf/recent"
find /var/www/networks/ -name recent |
grep "meshrdf/recent"$ |
sed -e "s|$pattern1||" -e "s|$pattern2||"
}
case "$ARG1" in
'')
echo "Usage: $0 <start|networkname>"
echo
echo "loops over:"
list_networks
exit 1
;;
start)
LIST_NETWORKS="$( list_networks )"
;;
*)
LIST_NETWORKS="$ARG1"
;;
esac
for NETWORK in $LIST_NETWORKS; do {
PACKAGE_BASE="/var/www/networks/$NETWORK/packages"
if [ -d "$PACKAGE_BASE" ]; then
cd "$PACKAGE_BASE" || exit
else
# log "dir '$PACKAGE_BASE' not found, omitting network"
continue
fi
VERSION_NOW="$( $SCRIPT "$NETWORK" "?" )"
VERSION_NEW="$( $SCRIPT "$NETWORK" "+0.1" )"
[ -n "$VERSION_NOW" ] || {
# log "not found any $PACKAGE file, omitting network"
continue
}
# log "[START] making new version $VERSION_NOW -> $VERSION_NEW"
$SCRIPT "$NETWORK" "$VERSION_NEW" >/dev/null
# log "[READY] new version"
F1="$PACKAGE_BASE/${PACKAGE}_${VERSION_NOW}" # expanded later
F2="$PACKAGE_BASE/${PACKAGE}_${VERSION_NEW}"
# log "[START] checking hash tar1 = ${PACKAGE}_${VERSION_NOW}*"
tar xzf "${PACKAGE}_${VERSION_NOW}"* "./control.tar.gz"
tar xzf "control.tar.gz" "./postinst"
HASH1="$( filehash "./postinst" )"
rm "./control.tar.gz"
rm "./postinst"
# log "[READY] check hash1"
# log "[START] checking hash tar2 = ${PACKAGE}_${VERSION_NEW}*"
tar xzf "${PACKAGE}_${VERSION_NEW}"* "./control.tar.gz"
tar xzf "control.tar.gz" "./postinst"
HASH2="$( filehash "./postinst" )"
rm "./control.tar.gz"
rm "./postinst"
# log "[READY] check hash2"
if [ "$HASH1" = "$HASH2" -a "$ARG2" != 'force' ]; then
# log "[OK] same hash for network $NETWORK - nothing to do, staying at version $VERSION_NOW"
I=$(( I + 1 ))
rm "$F2"* 2>/dev/null
else
log "[OK] hash differs, leaving new package v$VERSION_NEW, deleting old, regen index"
J=$(( J + 1 ))
rm "$F1"* 2>/dev/null
/var/www/scripts/gen_package_list.sh start
fi
# log "[READY] $NETWORK"
} done
log "[OK] $(( I + J )) overall, $I unchanged, $J updated"
|
bittorf/kalua
|
openwrt-monitoring/build_sshpubkeys_automagic.sh
|
Shell
|
gpl-3.0
| 2,312 |
#!/bin/bash
set -eo pipefail
FILES="../include/version.hpp
../include/cpp_meta.hpp
../include/omp.hpp
../include/traits.hpp
../include/duration.hpp
../include/regex.hpp
../include/stringify.hpp
../include/assert/loc.hpp
../include/assert/assertion_failure.hpp
../include/assert/assert.hpp
../include/assert/ordering.hpp
../include/assert/equality.hpp
../include/assert/range.hpp
../include/assert/regex.hpp
../include/test/testcase.hpp
../include/test/streambuf_proxy.hpp
../include/test/statistic.hpp
../include/test/testsuite.hpp
../include/test/testsuite_parallel.hpp
../include/report/reporter.hpp
../include/report/xml_reporter.hpp
../include/report/console_reporter.hpp
../include/report/markdown_reporter.hpp
../include/report/json_reporter.hpp
../include/report/reporter_factory.hpp
../include/config.hpp
../include/cmdline_parser.hpp
../include/runner.hpp
../include/api.hpp
../include/tpp.hpp"
TARGET="tpp.hpp"
COPYRIGHT="/*
Copyright (C) 2017 Jarthianur
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
*/"
echo "" > $TARGET.tmp
for f in $FILES; do
cat $f >> $TARGET.tmp
done
grep '#include' $TARGET.tmp > .includes
perl -0pi -e 's/#include [<"].*[">]//g' $TARGET.tmp
perl -0pi -e 's%//[/<]*? .*|/\*[\w\W\n]*?\*/%%g' $TARGET.tmp
echo "$COPYRIGHT" > $TARGET
echo "#ifndef TPP_RELEASE_TPP_HPP" >> $TARGET
echo "#define TPP_RELEASE_TPP_HPP" >> $TARGET
cat .includes >> $TARGET
cat $TARGET.tmp >> $TARGET
echo "" >> $TARGET
echo "#endif" >> $TARGET
perl -0pi -e 's/#include ".*"//g' $TARGET
if ! command -v clang-format-10 &> /dev/null
then
clang-format -style=file -i $TARGET
else
clang-format-10 -style=file -i $TARGET
fi
rm $TARGET.tmp
rm .includes
|
Jarthianur/simple-cpp-test-framework
|
release/build_header.sh
|
Shell
|
gpl-3.0
| 2,287 |
#!/bin/bash
# Shop System Extensions:
# - Terms of Use can be found under:
# https://github.com/wirecard/magento2-ee/blob/master/_TERMS_OF_USE
# - License can be found under:
# https://github.com/wirecard/magento2-ee/blob/master/LICENSE
set -e # Exit with nonzero exit code if anything fails
for ARGUMENT in "$@"; do
KEY=$(echo "${ARGUMENT}" | cut -f1 -d=)
VALUE=$(echo "${ARGUMENT}" | cut -f2 -d=)
case "${KEY}" in
SUBDOMAIN) SUBDOMAIN=${VALUE} ;;
*) ;;
esac
done
NGROK_ARCHIVE_LINK="https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip"
JQ_LINK="http://stedolan.github.io/jq/download/linux64/jq"
# download and install ngrok
curl -s "${NGROK_ARCHIVE_LINK}" >ngrok.zip
unzip ngrok.zip
chmod +x $PWD/ngrok
# Download json parser for determining ngrok tunnel
curl -sO ${JQ_LINK}
chmod +x "${PWD}"/jq
echo "SUBDOMAIN=${SUBDOMAIN}"
# Open ngrok tunnel
"${PWD}"/ngrok authtoken "${NGROK_TOKEN}"
"${PWD}"/ngrok http 80 -subdomain="${SUBDOMAIN}" >/dev/null &
NGROK_URL_HTTPS=$(curl -s localhost:4040/api/tunnels/command_line | jq --raw-output .public_url)
# allow ngrok to initialize
while [ ! "${NGROK_URL_HTTPS}" ] || [ "${NGROK_URL_HTTPS}" = 'null' ]; do
echo "Waiting for ngrok to initialize"
NGROK_URL_HTTPS=$(curl -s localhost:4040/api/tunnels/command_line | jq --raw-output .public_url)
((c++)) && ((c == 50)) && break
sleep 1
done
|
wirecard/magento2-ee
|
.bin/start-ngrok.sh
|
Shell
|
gpl-3.0
| 1,374 |
#!/bin/bash -x
docker push arthurmilliken/node-dev:latest
docker push arthurmilliken/node-dev:8
|
arthurmilliken/docker-dev
|
node-dev/v8/docker-push.sh
|
Shell
|
gpl-3.0
| 96 |
#!/bin/bash -i
bold=$(tput bold)
normal=$(tput sgr0)
APTLSTDIR=/opt/ownsec/0.Initial/lst/apt
PIP2LSTDIR=/opt/ownsec/0.Initial/lst/pip
APPSAPTLSTDIR=/opt/ownsec/0.Initial/src/PT2/3.UsrApp_Install
WIFIAPTLSTDIR=/opt/ownsec/0.Initial/src/PT2/2.Firmware_Install/1.Wifi
echo "${bold}
____ ____ _ _______ __ __ ____ ____ _ ___ ____ _____ ____
| _ \ / ___| |/ /_ _| \/ |/ ___| _ \ | | |_ _/ ___|_ _/ ___|
| |_) | | | ' / | | | |\/| | | _| |_) |____| | | |\___ \ | | \___ \
| __/| |___| . \ | | | | | | |_| | _ <_____| |___ | | ___) || | ___) |
|_| \____|_|\_\ |_| |_| |_|\____|_| \_\ |_____|___|____/ |_| |____/
INSTALL Package Manager Lists to satisfy Deps & base apps
${normal}"
echo "${bold}
Installing the apt-get lists - go get a coffee, will take a while ...
${normal}"
sudo apt-get update
sudo apt-get upgrade
xargs -a <(awk '/^\s*[^#]/' "$APTLSTDIR/essential.txt") -r -- sudo apt-get install -y
xargs -a <(awk '/^\s*[^#]/' "$APTLSTDIR/itsec-tools.txt") -r -- sudo apt-get install -y
xargs -a <(awk '/^\s*[^#]/' "$APTLSTDIR/remove-initial.txt") -r -- sudo apt-get purge --remove -y
xargs -a <(awk '/^\s*[^#]/' "$PIP2LSTDIR/essential_pip2.txt") -r -- sudo -H pip2 install
# Disable services
sudo service cups stop
sudo systemctl disable cups.service
sudo service cups-browsed stop
sudo systemctl disable cups-browsed.service
sudo service saned stop
sudo systemctl disable saned.service
sudo service dnsmasq stop
sudo systemctl disable dnsmasq.service
# end disable services
sudo apt-get autoremove -y
sudo apt-get install -y linux-image-extra-$(uname -r)
#sudo apt-get install -y linux-image-extra-virtual-$(uname -r)
sudo updatedb
sudo ldconfig
sudo phpenmod mcrypt
sudo phpenmod mbstring
|
alphaaurigae/ITSEC-Install-Scripts
|
0.Initial/src/PT2/1.Deps_Install/1.Package-Mgr_Lists_Install.sh
|
Shell
|
gpl-3.0
| 1,789 |
#!/bin/bash
#
# Instalador desatendido para Openstack Juno sobre CENTOS7
# Reynaldo R. Martinez P.
# E-Mail: [email protected]
# Octubre del 2014
#
# Script de instalacion y preparacion de Heat
#
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
if [ -f ./configs/main-config.rc ]
then
source ./configs/main-config.rc
mkdir -p /etc/openstack-control-script-config
else
echo "No puedo acceder a mi archivo de configuración"
echo "Revise que esté ejecutando el instalador/módulos en el directorio correcto"
echo "Abortando !!!!."
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/db-installed ]
then
echo ""
echo "Proceso de BD verificado - continuando"
echo ""
else
echo ""
echo "Este módulo depende de que el proceso de base de datos"
echo "haya sido exitoso, pero aparentemente no lo fue"
echo "Abortando el módulo"
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/keystone-installed ]
then
echo ""
echo "Proceso principal de Keystone verificado - continuando"
echo ""
else
echo ""
echo "Este módulo depende del proceso principal de keystone"
echo "pero no se pudo verificar que dicho proceso haya sido"
echo "completado exitosamente - se abortará el proceso"
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/heat-installed ]
then
echo ""
echo "Este módulo ya fue ejecutado de manera exitosa - saliendo"
echo ""
exit 0
fi
echo ""
echo "Instalando paquetes para Heat"
yum install -y openstack-heat-api \
openstack-heat-api-cfn \
openstack-heat-common \
python-heatclient \
openstack-heat-engine \
openstack-utils \
openstack-selinux
echo "Listo"
echo ""
cat ./libs/openstack-config > /usr/bin/openstack-config
source $keystone_admin_rc_file
echo ""
echo "Configurando Heat"
echo ""
chown -R heat.heat /etc/heat
case $dbflavor in
"mysql")
openstack-config --set /etc/heat/heat.conf database connection mysql://$heatdbuser:$heatdbpass@$dbbackendhost:$mysqldbport/$heatdbname
;;
"postgres")
openstack-config --set /etc/heat/heat.conf database connection postgresql://$heatdbuser:$heatdbpass@$dbbackendhost:$psqldbport/$heatdbname
;;
esac
openstack-config --set /etc/heat/heat.conf database retry_interval 10
openstack-config --set /etc/heat/heat.conf database idle_timeout 3600
openstack-config --set /etc/heat/heat.conf database min_pool_size 1
openstack-config --set /etc/heat/heat.conf database max_pool_size 10
openstack-config --set /etc/heat/heat.conf database max_retries 100
openstack-config --set /etc/heat/heat.conf database pool_timeout 10
openstack-config --set /etc/heat/heat.conf DEFAULT host $heathost
openstack-config --set /etc/heat/heat.conf DEFAULT debug false
openstack-config --set /etc/heat/heat.conf DEFAULT verbose false
openstack-config --set /etc/heat/heat.conf DEFAULT log_dir /var/log/heat
# Nuevo para Juno
openstack-config --set /etc/heat/heat.conf DEFAULT heat_metadata_server_url http://$heathost:8000
openstack-config --set /etc/heat/heat.conf DEFAULT heat_waitcondition_server_url http://$heathost:8000/v1/waitcondition
openstack-config --set /etc/heat/heat.conf DEFAULT heat_watch_server_url http://$heathost:8003
openstack-config --set /etc/heat/heat.conf DEFAULT heat_stack_user_role heat_stack_user
openstack-config --set /etc/heat/heat.conf DEFAULT auth_encryption_key $heatencriptionkey
openstack-config --set /etc/heat/heat.conf DEFAULT use_syslog False
openstack-config --set /etc/heat/heat.conf DEFAULT heat_api_cloudwatch bind_host 0.0.0.0
openstack-config --set /etc/heat/heat.conf DEFAULT heat_api_cloudwatch bind_port 8003
openstack-config --set /etc/heat/heat.conf keystone_authtoken admin_tenant_name $keystoneservicestenant
openstack-config --set /etc/heat/heat.conf keystone_authtoken admin_user $heatuser
openstack-config --set /etc/heat/heat.conf keystone_authtoken admin_password $heatpass
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_host $keystonehost
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/heat/heat.conf keystone_authtoken auth_uri http://$keystonehost:5000/v2.0/
openstack-config --set /etc/heat/heat.conf keystone_authtoken identity_uri http://$keystonehost:35357
openstack-config --set /etc/heat/heat.conf keystone_authtoken signing_dir /tmp/keystone-signing-heat
openstack-config --set /etc/heat/heat.conf ec2authtoken auth_uri http://$keystonehost:5000/v2.0/
openstack-config --set /etc/heat/heat.conf DEFAULT control_exchange openstack
case $brokerflavor in
"qpid")
openstack-config --set /etc/heat/heat.conf DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_reconnect_interval_min 0
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_username $brokeruser
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_tcp_nodelay True
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_protocol tcp
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_hostname $messagebrokerhost
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_password $brokerpass
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_port 5672
openstack-config --set /etc/heat/heat.conf DEFAULT qpid_topology_version 1
;;
"rabbitmq")
openstack-config --set /etc/heat/heat.conf DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_host $messagebrokerhost
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_userid $brokeruser
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_password $brokerpass
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_port 5672
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_use_ssl false
openstack-config --set /etc/heat/heat.conf DEFAULT rabbit_virtual_host $brokervhost
;;
esac
echo ""
echo "Heat Configurado"
echo ""
#
# Se aprovisiona la base de datos
echo ""
echo "Aprovisionando/inicializando BD de HEAT"
echo ""
chown -R heat.heat /var/log/heat
heat-manage db_sync
chown -R heat.heat /etc/heat /var/log/heat
echo ""
echo "Listo"
echo ""
echo ""
echo "Aplicando reglas de IPTABLES"
iptables -A INPUT -p tcp -m multiport --dports 8000,8004 -j ACCEPT
service iptables save
echo "Listo"
echo ""
echo "Activando Servicios"
echo ""
service openstack-heat-api start
service openstack-heat-api-cfn start
service openstack-heat-engine start
chkconfig openstack-heat-api on
chkconfig openstack-heat-api-cfn on
chkconfig openstack-heat-engine on
testheat=`rpm -qi openstack-heat-common|grep -ci "is not installed"`
if [ $testheat == "1" ]
then
echo ""
echo "Falló la instalación de heat - abortando el resto de la instalación"
echo ""
exit 0
else
date > /etc/openstack-control-script-config/heat-installed
date > /etc/openstack-control-script-config/heat
fi
echo ""
echo "Heat Instalado"
echo ""
|
tigerlinux/openstack-juno-installer-centos7
|
modules/heatinstall.sh
|
Shell
|
gpl-3.0
| 7,098 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-medianmemory_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::medianmemory_0:1.0 -N ID0000005 -R condorpool -L example_workflow -T 2016-11-07T08:05:01+00:00 ./example_workflow-medianmemory_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/instances/10_1_workflow_full_10files_secondary_w1_3sh_3rs_with_annot_with_proj_3s_range/dags/ubuntu/pegasus/example_workflow/20161107T080502+0000/00/00/medianmemory_0_ID0000005.sh
|
Shell
|
gpl-3.0
| 1,246 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="6"
pegasus_lite_version_patch="0"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_exit INT TERM EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-averagecpu_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::averagecpu_0:1.0 -N ID0000004 -R condorpool -L example_workflow -T 2016-05-12T11:11:05+00:00 ./example_workflow-averagecpu_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
sbbd2016/data/google-cloud/w-03/dags/elaine.n.watanabe/pegasus/example_workflow/20160512T111106+0000/averagecpu_0_ID0000004.sh
|
Shell
|
gpl-3.0
| 1,086 |
#!/bin/sh
clear
echo "Setting up development tree in $HOME"
rm -rf $HOME/rpmbuild
rpmdev-setuptree
if [ $? -ne 0 ]; then
echo "Install the rpm development tools first!"
exit -1
fi
export TISSUESTACK_BUILD_VERSION=2.3
export IS_RELEASE=1
SPEC_FILE=tissuestack.spec
TARGET=server
IS_SUSE=`cat /etc/*-release | grep -i "suse"| wc -c`
IS_CENTOS=`cat /etc/*-release | grep -i "centos"| wc -c`
IS_CENTOS_6_X=`cat /etc/*-release | grep -i "centos release 6."| wc -c`
PACKAGE_MANAGER="dnf"
if [ $IS_CENTOS -gt 0 ]; then
PACKAGE_MANAGER="yum"
fi;
REDHAT_RPM_CONFIG=`"$PACKAGE_MANAGER" list installed | grep "redhat-rpm-config" | wc -l`
if [ $IS_SUSE -eq 0 ] && [ $REDHAT_RPM_CONFIG -eq 0 ]; then
echo "Install redhat-rpm-config first!"
exit -1
fi;
if [ $IS_CENTOS_6_X -gt 0 ]; then
CENTOS_DEVTOOLS_1_1=`yum list installed | grep "devtoolset-1.1" | wc -c`
if [ $CENTOS_DEVTOOLS_1_1 -eq 0 ]; then
echo "Install devtools 1.1 first. CentOS 6.X needs it to compile C++ V11 code."
echo "Execute: 'cd /etc/yum.repos.d;wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo';"
echo "Then run: 'yum install devtoolset-1.1-gcc devtoolset-1.1-gcc-c++'"
exit -1
fi;
echo -e "\nIMPORTANT:\n"
echo "if the build fails with a message similar to these below, execute the line 'scl enable devtoolset-1.1 bash' prior to running this script"
echo "cc1plus: error: unrecognized command line option -std=c++11"
echo -e "cc1plus: error: unrecognized command line option -std=gnu++11\n"
fi;
CURRENT_DIR=`pwd`
IS_FEDORA=`cat /etc/*-release | grep -i "fedora"| wc -c`
if [ $IS_CENTOS -ne 0 ] || [ $IS_FEDORA -ne 0 ]; then
export IS_CENTOS_OR_FEDORA=1
fi
if ( [ $IS_CENTOS -ne 0 ] && [ $IS_CENTOS_6_X -eq 0 ]) || [ $IS_FEDORA -ne 0 ]; then
SPEC_FILE=tissuestack_sysctl.spec
export USES_SYSTEMCTL=1
fi
if [ $IS_SUSE -ne 0 ]; then
SPEC_FILE=tissuestack_suse.spec
export USES_SYSTEMCTL=1
fi;
PKI_BUILD=0
if [ $# -ne 0 ]; then
if [ "$1" == "CLIENTS" ]; then
TARGET=clients
SPEC_FILE=tissuestack_clients.spec
else
PKI_BUILD=1
fi;
fi;
echo "Calling TissueStack make with target $TARGET"
cd $CURRENT_DIR/src/c++;make $TARGET
if [ $? -ne 0 ]; then
echo "Build was NOT successful"
exit -1
fi;
echo "Copying spec file and source tar"
cp -f $CURRENT_DIR/rpm/$SPEC_FILE $HOME/rpmbuild/SPECS
cd $HOME/rpmbuild/SPECS/
# alter the standard apache port for PKI build
if [ $PKI_BUILD -ne 0 ]; then
sed -i "s/APACHE_PORT=80/APACHE_PORT=$1/g" $SPEC_FILE
fi
echo "Calling RPM build now ..."
rpmbuild -bb $SPEC_FILE
|
NIF-au/TissueStack
|
BUILD_ME_LIKE_THIS_FOR_RPM.sh
|
Shell
|
gpl-3.0
| 2,589 |
#!/bin/sh
#
# Common aliases
#
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias ssh="TERM=xterm ssh"
|
NeoMorfeo/dotfiles
|
common/alias.sh
|
Shell
|
gpl-3.0
| 116 |
#!/bin/bash
IMAGE_FILES="/var/www/html/wp-content/"
CACHE_FILE="$HOME/.optimized-image-cache.sh"
# Load the Cache File or Create a New Array
if [ -f "$CACHE_FILE" ]; then
source -- "$CACHE_FILE"
else
declare -A OPTIMIZED_IMAGES
fi
# Add $1 to the Cache Array
function addToCache {
OPTIMIZED_IMAGES[$1]=1
}
# Check the cache for existence of $1
function notInCache {
if [ ${OPTIMIZED_IMAGES[$1]} ]; then
return
else
echo 1
fi
}
# Optimize PNGs
function optimizePNG {
NOT_IN_CACHE=$(notInCache "$1")
if [[ $NOT_IN_CACHE ]]; then
addToCache "$1"
optipng -quiet -o7 "$1" > /dev/null
fi
}
# Optimize JPGs
function optimizeJPG {
NOT_IN_CACHE=$(notInCache "$1")
if [[ $NOT_IN_CACHE ]]; then
addToCache "$1"
jpegtran -copy none -progressive -optimize -outfile "$1" "$1" > /dev/null 2&>1
fi
}
# Optimize the Images
while read file; do
optimizePNG "$file"
done < <(find "$IMAGE_FILES" -type f -iname "*.png")
while read file; do
optimizeJPG "$file"
done < <(find "$IMAGE_FILES" -type f -iname "*.jpg" -o -iname "*.jpeg")
# Save the Cache File
declare -p OPTIMIZED_IMAGES > $CACHE_FILE
|
Fellowship-Of-Intentional-Community/Debian-Webserver-Playbook
|
playbook/roles/common/files/optimize_images.sh
|
Shell
|
gpl-3.0
| 1,189 |
#!/bin/sh
LPORT=13330
function run_payload()
{
./test_component $1 &
sleep 1
}
function test_single_bind_tcp_shell()
{
run_payload ../../bin/single_bind_tcp_shell.bin
echo "expr 1234 + 5678" | nc -4 -vv -w 5 localhost $LPORT | grep "6912"
wait
return $?
}
function test_single_reverse_tcp_shell()
{
(echo "expr 1234 + 5678" | nc -4 -vv -w 5 -l localhost $LPORT | grep "6912"; RESULT=$?) &
sleep 1
(./test_component ../../bin/single_reverse_tcp_shell.bin)
wait
return $RESULT
}
function test_staged_bind_tcp_shell()
{
run_payload ../../bin/stager_bind_tcp.bin
(./write_size_and_data.rb ../../bin/stage_shell.bin ; echo "expr 1234 + 5678" ) | nc -4 -vv -w 5 localhost $LPORT | grep "6912"
wait
return $?
}
function test_staged_reverse_tcp_shell()
{
((./write_size_and_data.rb ../../bin/stage_shell.bin; echo "expr 1234 + 5678" ) | nc -4 -vv -w 5 -l localhost $LPORT | grep "6912"; RESULT=$?) &
sleep 1
./test_component ../../bin/stager_reverse_tcp.bin
wait
return $RESULT
}
function test_staged_bind_tcp_bundleinject()
{
# Setup
run_payload ../../bin/stager_bind_tcp.bin
# Test
TMPFILE=`mktemp isightXXXXXX` || exit 1
( ./write_size_and_data.rb ../../bin/stage_bundleinject.bin ; ./write_size_and_data.rb ../../../../bundles/isight/isight.bundle ) | nc -4 -vv -w 5 localhost $LPORT | (dd bs=1 skip=4 of=$TMPFILE)
# Verify
file $TMPFILE | grep JPEG
RESULT=$?
# Cleanup
rm $TMPFILE
wait
return $RESULT
}
function test_staged_reverse_tcp_bundleinject()
{
# Setup
TMPFILE=`mktemp isightXXXXXX` || exit 1
(( ./write_size_and_data.rb ../../bin/stage_bundleinject.bin ; ./write_size_and_data.rb ../../../../bundles/isight/isight.bundle ) | nc -4 -vv -l -w 5 localhost $LPORT | dd bs=1 skip=4 of=$TMPFILE) &
sleep 1
run_payload ../../bin/stager_reverse_tcp.bin
wait
# Verify
file $TMPFILE | grep JPEG
RESULT=$?
if [ $RESULT -eq 0 ]; then
# Cleanup
rm $TMPFILE
fi
return $RESULT
}
SLEEP=65
echo "==> Testing single_reverse_tcp_shell..."
test_single_reverse_tcp_shell || exit 1
echo "Sleeping $SLEEP seconds..."
sleep $SLEEP
echo "==> Testing single_bind_tcp_shell..."
test_single_bind_tcp_shell || exit 1
echo "Sleeping $SLEEP seconds..."
sleep $SLEEP
echo "==> Testing stager_bind_tcp + stage_shell..."
test_staged_bind_tcp_shell || exit 1
echo "Sleeping $SLEEP seconds..."
sleep $SLEEP
echo "==> Testing stager_reverse_tcp + stage_shell..."
test_staged_reverse_tcp_shell || exit 1
echo "Sleeping $SLEEP seconds..."
sleep $SLEEP
echo "==> Testing stager_bind_tcp + bundleinject + isight.bundle..."
test_staged_bind_tcp_bundleinject || exit 1
echo "Sleeping $SLEEP seconds..."
sleep $SLEEP
echo "==> Testing stager_reverse_tcp + bundleinject + isight.bundle..."
test_staged_reverse_tcp_bundleinject || exit 1
echo "Sleeping $SLEEP seconds..."
echo
echo "==> All tests passed successfully!"
echo
|
cSploit/android.MSF
|
external/source/osx/x86/src/test/run_tests.sh
|
Shell
|
gpl-3.0
| 3,047 |
#!/bin/bash
# copy binaries to release folder
jgit=..
jplatform=android
mkdir -p $jgit/release/$jplatform/j32/x86
mkdir -p $jgit/release/$jplatform/j32/armeabi
mkdir -p $jgit/release/$jplatform/j32/armeabi-v7a
mkdir -p $jgit/release/$jplatform/j64/x86_64
mkdir -p $jgit/release/$jplatform/j64/arm64-v8a
rm -f $jgit/release/$jplatform/j32/x86/*
rm -f $jgit/release/$jplatform/j32/armeabi/*
rm -f $jgit/release/$jplatform/j32/armeabi-v7a/*
rm -f $jgit/release/$jplatform/j64/x86_64/*
rm -f $jgit/release/$jplatform/j64/arm64-v8a/*
cp $jgit/android/libs/x86/* $jgit/release/$jplatform/j32/x86/.
cp $jgit/android/libs/armeabi/* $jgit/release/$jplatform/j32/armeabi/.
cp $jgit/android/libs/armeabi-v7a/* $jgit/release/$jplatform/j32/armeabi-v7a/.
cp $jgit/android/libs/x86_64/* $jgit/release/$jplatform/j64/x86_64/.
cp $jgit/android/libs/arm64-v8a/* $jgit/release/$jplatform/j64/arm64-v8a/.
|
PlanetAPL/j-language
|
android/release-android.sh
|
Shell
|
gpl-3.0
| 907 |
#! /bin/bash
pref=""
if [ $# -ge 1 ]; then
pref=$1
shift
fi
for d in chip??/*; do
if [ -d $d ]; then
cd $d
for i in ${pref}*.fits; do
imcopy $i[1] bid
\mv bid $i
done
for i in ${pref}*.fits.fz; do
j=`echo $i | sed "s/.fz//"`
if [ ! -f $j ]; then
imcopy $i[1] $j
fi
if [ -f $j ]; then
size=`wc -c $j | cut -d " " -f 1`
if [ ${size} -gt 18300000 ]; then
\rm -f $i
# \mv $j D/
else
\rm -f $j
fi
fi
done
cd ../../
fi
done
exit
|
OSSOS/MOP
|
src/jmp/check_scripts/ExtractImage.sh
|
Shell
|
gpl-3.0
| 559 |
#! /bin/bash
./set_beagle_clock.sh
sshfs -o reconnect [email protected]:/ /media/beagle
|
rvega/libbbb_pruio
|
scripts/run-on-computer/mount_beagle.sh
|
Shell
|
gpl-3.0
| 87 |
#!/bin/bash
# Written on 15 June 2016.
# sudo tail -n 1000 /var/log/messages | grep -c "BLOCKED"
CurMonth=`date +%b`
CurDay=`date +%d`
zero=`echo ${CurDay:0:1}`
# Filter and number the Incoming but blocked requests within today.
if [ $zero == 0 ];then
CurDay="${CurDay:1:1}"
grep 'BLOCKED' /var/log/messages | grep -c "$CurMonth $CurDay"
else
grep 'BLOCKED' /var/log/messages | grep -c "$CurMonth $CurDay"
fi
echo
exit
|
mdrights/Myscripts
|
iptables-scripts/iptables-blocked-numbers.sh
|
Shell
|
gpl-3.0
| 451 |
#!/bin/bash
find vendor/aws/aws-sdk-php/src/ -type f -exec sed -i '' 's#use Aws\\#use ILAB_Aws\\#g' {} +
find vendor/aws/aws-sdk-php/src/ -type f -exec sed -i '' 's#namespace Aws#namespace ILAB_Aws#g' {} +
find vendor/aws/aws-sdk-php/src/ -type f -exec sed -i '' 's#"Aws\\#"ILAB_Aws\\#g' {} +
find vendor/aws/aws-sdk-php/src/ -type f -exec sed -i '' 's#'"'"'Aws\\#'"'"'ILAB_Aws\\#g' {} +
find vendor/aws/aws-sdk-php/src/ -type f -exec sed -i '' 's#\\Aws\\#\\ILAB_Aws\\#g' {} +
|
jawngee/ILab-Media-Tools
|
prefix-aws.sh
|
Shell
|
gpl-3.0
| 478 |
#!/bin/bash
packages="update bridge-utils openvpn libssl-dev openssl easy-rsa python-pip libpq-dev python-dev libjpeg-dev libpng-dev git"
pippack="Django==1.10 django-allauth"
for package in $packages
do
echo "install packet $package"
apt-get install -y $package
done
for pack in $pippack
do
pip install "$pack"
done
pip install -r requirements.txt
python manage.py makemigrations vpn
python manage.py migrate vpn
python manage.py migrate
#python manage.py migrate --fake vpn
python manage.py loaddata ./vpn/fixtures/initial_data.json
python manage.py loaddata ./vpn/fixtures/users.json
|
Oleh-Hrebchuk/OpenVPN-TryFalse
|
configure.sh
|
Shell
|
gpl-3.0
| 596 |
#!/bin/bash
# Get and compile ffmpeg
apt-get -y install git gcc make binutils
cd /usr/src
git clone git://git.videolan.org/x264
cd x264
./configure --host=arm-unknown-linux-gnueabi --enable-static --disable-opencl
make
make install
cd ..
git clone git://source.ffmpeg.org/ffmpeg.git
cd ffmpeg
apt-get -y install libmp3lame-dev libomxil-bellagio-dev
./configure --arch=armel --target-os=linux --enable-gpl --enable-libx264 --enable-libmp3lame --enable-omx-rpi --disable-debug --enable-version3 --enable-nonfree
make -j4
make install
|
joaquinromero/raspberry
|
minibian/3.install_ffmpeg.sh
|
Shell
|
gpl-3.0
| 533 |
#!/bin/bash
#
# note, uses & deletes test indices 7xx (models), and 8xx (outputs)
# to really clean all tests, remove test_07?? and test_08?? dirs, plus tiles in sim input dirs (uncomment below)
# by default, expects training data in sim 3000, and applies to 3001 and 3002 (shortened data sets)
#
BPATH=../data/
SAFEDEL='rm -r'
SCENE_TRAIN=3000
SCENE_NP=3001
SCENE_UNI=3002
SHOW_INPUTS=1
EPOS=1000
echo Using data path ${BPATH}
# --- numpy npz file tests ---
# optionally, clear all tiles
#find ${BPATH}/sim_${SCENE_TRAIN}/ -iname "tiles*" -exec rm -fr \{\} \;
#find ${BPATH}/sim_${SCENE_NP}/ -iname "tiles*" -exec rm -fr \{\} \;
# remove old output dir
echo
echo "************** Test 1 (npz) **************"
${SAFEDEL} ${BPATH}test_0700
${SAFEDEL} ${BPATH}test_0800
# train a model
python tf_train.py out 0 basePath ${BPATH} useVelocities 0 trainingEpochs ${EPOS} alwaysSave 1 testPathStartNo 700 fromSim ${SCENE_TRAIN} simSizeLow 48
# and apply to small data set
python tf_train.py out 1 basePath ${BPATH} useVelocities 0 testPathStartNo 800 fromSim ${SCENE_NP} loadModelTest 700 simSizeLow 96 outInputs ${SHOW_INPUTS}
# same for a model using velocities
echo
echo "************** Test 2 (npz,vel) **************"
${SAFEDEL} ${BPATH}test_0710
${SAFEDEL} ${BPATH}test_0810
python tf_train.py out 0 basePath ${BPATH} useVelocities 1 trainingEpochs ${EPOS} alwaysSave 1 testPathStartNo 710 fromSim ${SCENE_TRAIN} simSizeLow 48
python tf_train.py out 1 basePath ${BPATH} useVelocities 1 testPathStartNo 810 fromSim ${SCENE_NP} loadModelTest 710 simSizeLow 96 outInputs ${SHOW_INPUTS}
# --- keras version tests ---
echo
echo "************** Test 3 (keras) **************"
${SAFEDEL} ${BPATH}test_0720
${SAFEDEL} ${BPATH}test_0820
python tf_train_keras.py out 0 basePath ${BPATH} useVelocities 0 trainingEpochs ${EPOS} testPathStartNo 720 fromSim ${SCENE_TRAIN} simSizeLow 48
python tf_train_keras.py out 1 basePath ${BPATH} useVelocities 0 testPathStartNo 820 fromSim ${SCENE_NP} loadModelTest 720 simSizeLow 96 outInputs ${SHOW_INPUTS}
|
CoderDuan/mantaflow
|
tensorflow/example1_smoke_tiled/runTests.sh
|
Shell
|
gpl-3.0
| 2,117 |
#!/bin/sh
unzip -o stockfish_12_win_x64_avx2.zip
echo "#!/bin/sh
./stockfish_20090216_x64_avx2.exe bench 128 \$NUM_CPU_CORES 24 default depth > \$LOG_FILE 2>&1" > stockfish
chmod +x stockfish
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/stockfish-1.2.0/install_windows.sh
|
Shell
|
gpl-3.0
| 194 |
:
# ------------------------------------
# $APPASERVER_HOME/utility/ufw_deny.sh
# ------------------------------------
if [ "$#" -ne 1 ]
then
echo "Usage: $0 ip_address" 1>&2
exit 1
fi
sudo ufw insert 1 deny from $1
exit 0
|
timhriley/appaserver
|
utility/ufw_deny.sh
|
Shell
|
gpl-3.0
| 228 |
#!/bin/bash
# the visual netkit plugin paths (separed by ":")
APP=`which $0`
APP_PATH=${APP/\/visualnetkit.sh/}
export VISUAL_NETKIT_PLUGINS="$APP_PATH/plugins:$HOME/.visualnetkit/plugins"
$APP_PATH/VisualNetkit
|
slux83/visual-netkit
|
bin/visualnetkit.sh
|
Shell
|
gpl-3.0
| 215 |
#!/bin/sh
xrandr --output HDMI1 --off --output VIRTUAL1 --off --output eDP1 --mode 1366x768 --pos 0x0 --rotate normal
|
luffy1012/Scripts
|
normal_screen.sh
|
Shell
|
gpl-3.0
| 118 |
#!/bin/bash
sudo apt-get clean
cd /var/lib/apt
sudo rm -rf lists.old
sudo mv lists lists.old
sudo mkdir -p lists/partial
sudo apt-get clean
sudo apt-get update
|
Fbonazzi/Scripts
|
badsig.sh
|
Shell
|
gpl-3.0
| 161 |
#!/bin/bash
# -*- mode: shell-script; fill-column: 80; -*-
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
#
# Copyright (c) 2017, Joyent, Inc.
#
PS4='[\D{%FT%TZ}] ${BASH_SOURCE}:${LINENO}: ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
export PS4
set -o xtrace
#
# Disable the protection against RST reflection denial-of-service attacks.
# In order for system liveliness when PostgreSQL is not running, we need to
# be able to send a RST for every inbound connection to a closed port. This
# is only safe because we run Manatee on an isolated network.
#
# The long-term stability of this interface is not completely clear, so we
# ignore the exit status of ndd(1M). To do otherwise may unintentionally
# create a flag day with future platform versions.
#
/usr/sbin/ndd -set /dev/tcp tcp_rst_sent_rate_enabled 0
# set shared_buffers to 1/4 provisoned RSS
set -o errexit
set -o pipefail
shared_buffers="$(( $(prtconf -m) / 4 ))MB"
# maintenance_work_mem should be 1/128th of the zone's dram.
maintenance_work_mem="$(( $(prtconf -m) / 128 ))MB"
function expandPgConfig() {
ETC_DIR=$1
# Make a backup if one doesn't already exist.
if [[ ! -f $ETC_DIR/postgresql.sdc.conf.in ]]; then
cp $ETC_DIR/postgresql.sdc.conf $ETC_DIR/postgresql.sdc.conf.in
fi
sed -e "s#@@SHARED_BUFFERS@@#$shared_buffers#g" \
-e "s#@@MAINTENANCE_WORK_MEM@@#$maintenance_work_mem#g" \
$ETC_DIR/postgresql.sdc.conf.in > $ETC_DIR/postgresql.sdc.conf
}
expandPgConfig /opt/smartdc/manatee/etc/9.2
expandPgConfig /opt/smartdc/manatee/etc/9.6
set +o errexit
set +o pipefail
# For SDC we want to check if we should enable or disable the sitter on each boot.
svccfg import /opt/smartdc/manatee/smf/manifests/sitter.xml
disableSitter=$(json disableSitter < /opt/smartdc/manatee/etc/sitter.json)
if [[ -n ${disableSitter} && ${disableSitter} == "true" ]]; then
# HEAD-1327 we want to be able to disable the sitter on the 2nd manatee we
# create as part of the dance required to go from 1 -> 2+ nodes. This should
# only ever be set for the 2nd manatee.
echo "Disabing sitter per /opt/smartdc/manatee/etc/sitter.json"
svcadm disable manatee-sitter
else
echo "Starting sitter"
svcadm enable manatee-sitter
fi
exit 0
|
joyent/sdc-manatee
|
boot/configure.sh
|
Shell
|
mpl-2.0
| 2,405 |
#!/bin/bash
dir=$1
if cd $dir 2>/dev/null; then
echo "now in $dir"
else
echo "can't change to $dir"
fi
|
cxsjabc/basic
|
bash/_basic/if3.sh
|
Shell
|
agpl-3.0
| 109 |
#!/bin/sh
DIRNAME=`dirname "$0"`
PROGNAME=`basename "$0"`
GREP="grep"
# Use the maximum available, or set MAX_FD != -1 to use that
MAX_FD="maximum"
#
# Helper to complain.
#
warn() {
echo "${PROGNAME}: $*"
}
#
# Helper to puke.
#
die() {
warn $*
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false;
darwin=false;
case "`uname`" in
CYGWIN*)
cygwin=true
;;
Darwin*)
darwin=true
;;
esac
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$JBOSS_HOME" ] &&
JBOSS_HOME=`cygpath --unix "$JBOSS_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$JAVAC_JAR" ] &&
JAVAC_JAR=`cygpath --unix "$JAVAC_JAR"`
fi
# Setup JBOSS_HOME
RESOLVED_JBOSS_HOME=`cd "$DIRNAME/.."; pwd`
if [ "x$JBOSS_HOME" = "x" ]; then
# get the full path (without any relative bits)
JBOSS_HOME=$RESOLVED_JBOSS_HOME
else
SANITIZED_JBOSS_HOME=`cd "$JBOSS_HOME/.."; pwd`
if [ "$RESOLVED_JBOSS" != "$SANITIZED_JBOSS_HOME" ]; then
echo "WARNING JBOSS_HOME may be pointing to a different installation - unpredictable results may occur."
echo ""
fi
fi
export JBOSS_HOME
# Setup the JVM
if [ "x$JAVA" = "x" ]; then
if [ "x$JAVA_HOME" != "x" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA="java"
fi
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
JBOSS_HOME=`cygpath --path --windows "$JBOSS_HOME"`
fi
if [ "x$ELYTRON_TOOL_ADDONS" != "x" ]; then
SEP=:
fi
eval \"$JAVA\" $JAVA_OPTS -cp \""$JBOSS_HOME"/bin/wildfly-elytron-tool.jar$SEP$ELYTRON_TOOL_ADDONS\" \
org.wildfly.security.tool.ElytronTool \
'{"$0"}"$@"'
|
JiriOndrusek/wildfly-core
|
core-feature-pack/src/main/resources/content/bin/elytron-tool.sh
|
Shell
|
lgpl-2.1
| 1,764 |
#!/bin/bash
# Note that JHBUILD_SOURCES should be defined to contain the path to the root
# of the jhbuild sources. The script assumes that it resides in the
# tools/gen_scripts/ directory and the XML file will be placed in pango/src.
if [ -z "$JHBUILD_SOURCES" ]; then
echo -e "JHBUILD_SOURCES must contain the path to the jhbuild sources."
exit 1;
fi
PREFIX="$JHBUILD_SOURCES"
ROOT_DIR="$(dirname "$0")/../.."
OUT_DIR="$ROOT_DIR/pango/src"
for dir in "$PREFIX"/pango/pango; do
PARAMS="$PARAMS -s $dir"
done
DOCEXTRACT_TO_XML_PY="$JHBUILD_SOURCES/glibmm/tools/defs_gen/docextract_to_xml.py"
$DOCEXTRACT_TO_XML_PY $PARAMS > "$OUT_DIR/pango_docs.xml"
|
johne53/MB3Pangomm
|
tools/gen_scripts/pango_generate_docs.sh
|
Shell
|
lgpl-2.1
| 661 |
#!/bin/bash
if [ ! -f main ]; then
ln -s src/main main
fi
mkdir ../../photos
if [ ! -d ../../photos ]; then
echo "Cannot make photos directory in public_html. Make it manually and give it write permissions";
fi
#Check if we can write in the photos direcly
sudo -u www-data touch ../../photos/somefile
if [ ! -f ../../photos/somefile ]; then
sudo chmod -R 777 ../../photos/
else
rm -f ../../photos/somefile
fi
#Now we have write and read permissions. Place the files form the asset folder in the assets folder
cp assets/* ../../photos/
sudo chmod -R 777 ../`basename $(pwd)`
|
GreatDevelopers/CivilCoding
|
untitled.sh
|
Shell
|
lgpl-3.0
| 596 |
email=$1
password=$2
wget --load-cookies cookie.txt --save-cookies cookie.txt --post-data='lg_login=$email&lg_pass=$password&paso1ok=Entrar' http://series.ly/scripts/login/login.php
cookieName='PHPSESSID'
while read line
do
set -- $line
echo "$6 -> $7"
if [ "PHPSESSID" == "$6" ] ; then cookie=$7; fi
done < cookie.txt
rm login.php
echo "Cookie: $cookie"
# DONE
# bash docus.sh
# bash series.sh
# bash tv.sh
# bash movies.sh
# # The heavy part
# bash links.sh $cookie linksSeason.py 4 tvShows # DONE
# bash links.sh $cookie linksNoSeason.py 3 docus
bash links.sh $cookie linksSeason.py 1 series #GOING
# bash links.sh $cookie linksNoSeason.py 2 movies
|
javipolo/seriesLyks
|
dev/run.sh
|
Shell
|
lgpl-3.0
| 686 |
#!/bin/bash
## build system required files
PHP=`which php`
FW_HOME=`pwd`
AUTOLOAD_PATH_FW="$FW_HOME"
# create project autoload files
$PHP $FW_HOME/project/build_includes.php $AUTOLOAD_PATH_FW $FW_HOME/Loader.php "fw:autoload:application"
#generate cscope files
#root=$FW_HOME
#cd $root
#find $root/ -type f -name "*.inc" > project/cscope.files
#find $root/ -type f -name "*.php" >> project/cscope.files
#cd project
#cscope -b
|
sbxjoy/framework
|
project/autoload_builder.sh
|
Shell
|
lgpl-3.0
| 429 |
#!/bin/bash
./diracsimplify_generate1.sh
./diracsimplify_generate2.sh
./diracsimplify_generate3.sh
./diracsimplify_generate4.sh
./diracsimplify_generate5.sh
./diracsimplify_slashes_generate1.sh
./diracsimplify_slashes_generate2.sh
./diracsimplify_slashes_generate3.sh
./diractrace_generate1.sh
./diractrace_generate2.sh
./diractrace_generate3.sh
./diractrace_generate4.sh
./diractrace_generate5.sh
./diractrace_slashes_generate1.sh
./diractrace_slashes_generate2.sh
./diractrace_slashes_generate3.sh
|
FeynCalc/pyfeyncalctester
|
generate_tests.sh
|
Shell
|
lgpl-3.0
| 504 |
#!/bin/bash
set -x -v
set -o allexport
source env.list
set +o allexport
echo ${dbProfile0}
echo ${localgitdbcDIRPATH}
echo ${localgitsiteDIRPATH}
echo ${localgitdbcDIR}
echo "done echoing env variables in use inside docker"
## to download the source and html files of all notebook from a course module directory
## in a databricks workspace and have them update course site
docker run --rm -it --name=python-dbcli --env-file env.list --mount type=bind,readonly,source=${HOME}/.databrickscfg,destination=/root/.databrickscfg --mount type=bind,source=${HOME}/all/git,destination=/root/GIT lamastex/python-dbcli:latest /bin/bash /root/GIT/lamastex/scalable-data-science/books/latest/db.sh
## docker runs as root, so we need to reown it
sudo chown -R $USER ../../dbcArchives/${localgitdbcDIR}
sudo chgrp -R $USER ../../dbcArchives/${localgitdbcDIR}
sudo chown -R $USER ../../_sds/3/x/db
sudo chgrp -R $USER ../../_sds/3/x/db
|
lamastex/scalable-data-science
|
books/latest/docker-run-db.sh
|
Shell
|
unlicense
| 928 |
sed -i "/\RequirePackage{fontspec}/s/^/%/" ./simpleresumecv.cls
htlatex resume.tex "xhtml, charset=utf-8" " -cunihtf -utf8"
sed -i s/$(echo -e "\\u0393")/$(echo -e "\\u2022")/ ./resume.html
sed -i '1d' ./resume.html
sed -i '1d' ./resume.html
|
josephvoss/resume
|
build_html.sh
|
Shell
|
unlicense
| 243 |
#!/usr/bin/ksh
# 21.sh
# - also bash compatible
# https://github.com/kshji
# License, read https://github.com/kshji/ksh
#
# Karjalan ATK-Awot Oy 2020, Jukka Inkeri
# 2020-04-16
#
# Modulus 10, weight 21
# https://fi.wikipedia.org/wiki/Luhnin_algoritmi
# https://en.wikipedia.org/wiki/Luhn_algorithm
#
refsrc="$1"
PRG="$0"
######################################################################
usage()
{
echo "usage:$PRG laskunro ">&2
echo "usage:$PRG ReferenceNumber ">&2
echo "max.len 24 - maksimpituus 24 ">&2
}
######################################################################
# MAIN
######################################################################
[ "$refsrc" = "" ] && usage && exit 1
# array 1st element index is 0
mask="212121212121212121212121212121212121212121212121212"
sum=0
i=0
sum=0
len=${#refsrc}
# max 24 numbers
(( len > 24 )) && usage && exit 2
(( loc=len-1 ))
while (( i<len ))
do
factor=${mask:$i:1}
number=${refsrc:$loc:1}
(( loc=loc-1 ))
(( i=i+1 ))
(( result=factor*number ))
if ((result>9)) ; then # next equal 10
(( mod=(result%10)))
(( sum=sum+1 ))
#echo " - f:$factor number:$number res:$result mod:$mod sum:$sum"
((result=result-10))
fi
(( sum=sum+result ))
#echo "f:$factor number:$number res:$result mod:$mod sum:$sum"
done
#echo "sum:$sum"
dec=sum
(( mod=sum%10 ))
if ((mod != 0)) ; then # next equal 10
(( dec=(sum/10+1)*10 ))
fi
(( checksum = dec-sum ))
# Output Payment reference Number
echo "$refsrc$checksum"
|
kshji/ksh
|
Sh/21.sh
|
Shell
|
unlicense
| 1,487 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/mockexchange
OUTPUT_BASENAME=mockexchange
PACKAGE_TOP_DIR=mockexchange/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/mockexchange/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/mockexchange.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/mockexchange.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
matsbror/MockExchange
|
nbproject/Package-Release.bash
|
Shell
|
apache-2.0
| 1,471 |
#!/bin/bash
#更改软件源
echo "deb http://mirrors.ustc.edu.cn/kali kali main non-free contrib" > /etc/apt/sources.list
echo "deb-src http://mirrors.ustc.edu.cn/kali kali main non-free contrib" >> /etc/apt/sources.list
echo "deb http://mirrors.ustc.edu.cn/kali-security kali/updates main contrib non-free" >> /etc/apt/sources.list
echo "deb http://mirrors.aliyun.com/kali kali main non-free contrib" >> /etc/apt/sources.list
echo "deb-src http://mirrors.aliyun.com/kali kali main non-free contrib" >> /etc/apt/sources.list
echo "deb http://mirrors.aliyun.com/kali-security kali/updates main contrib non-free" >> /etc/apt/sources.list
#更新软件源并安装fcitx,google拼音和flash
yes|apt-get update
yes|apt-get install fcitx fcitx-googlepinyin
yes|apt-get install flashplugin-nonfree
yes|update-flashplugin-nonfree --install
#启用Metasploit数据库
update-rc.d postgresql enable
update-rc.d metasploit enable
service postgresql start
service metasploit start
#更改字体
yes|apt-get install ttf-wqy-microhei
#美化Vim、开启语法高亮、自动缩进等功能。
echo "filetype indent on" >> /etc/vim/vimrc
echo "colorscheme murphy" >> /etc/vim/vimrc
echo "syntax enable" >> /etc/vim/vimrc
#解决显示“设备未托管”从而导致无法使用网络的问题
sed -i '$d' /etc/NetworkManager/NetworkManager.conf
echo "managed=true" >> /etc/NetworkManager/NetworkManager.conf
##替换VLC、删除gedit、leafpad等仅保留vim。如果不了解请不要启用这些命令
#yes|apt-get purge vlc
#yes|apt-get purge gedit
#yes|apt-get purge leafpad
#yes|apt-get install smplayer
#解决启动Wireshark报错的问题
sed -i '$d' /usr/share/wireshark/init.lua
sed -i '$d' /usr/share/wireshark/init.lua
echo "--dofile(DATA_DIR.."console.lua")" >> /usr/share/wireshark/init.lua
echo "--dofile(DATA_DIR.."dtd_gen.lua")" >> /usr/share/wireshark/init.lua
##安装Zsh并自动配置,不需要请删除下面的命令直到下一个#号
yes|apt-get install zsh
git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh
cp ~/.oh-my-zsh/templates/zshrc.zsh-template ~/.zshrc
chsh -s /bin/zsh
##删除到这里
#清理系统垃圾与多余文件并更新系统
yes|apt-get upgrade
reset
yes|apt-get autoremove
yes|apt-get clean
yes|apt-get autoclean
dpkg -l |grep "^rc"|awk '{print $2}' |xargs aptitude -y purge
cd /usr/share/man && rm -rf `ls | grep -v "man"` > /dev/null 2>&1
rm -rf /var/log/*
rm -rf /tmp/*
reset
read -n1 -s -p "恭喜您完成了Kali快速设置,现在请您先重启然后手动设置输入法。设置的方法是右键点击右上方的小键盘,然后选择更改设置并将Google拼音移动到最上方以设为默认(如果您不需要则没有必要),顺便建议您将皮肤设置为十分大气的dark(黑色,尽显高端)。感谢您使用此脚本。" va
|
Arthur2e5/MobileConstructionVehicle
|
Kali/KaliLinuxShell.sh
|
Shell
|
apache-2.0
| 2,837 |
#!/bin/bash
# Script para configuracion automatica de monitores
# xrandr -q | para ver una lista de todos las opciones posibles
# cvt 1920 1080 # para agregar una nueva resolucion
# xrandr --newmode "1920x1080" 173.00 1920 2048 2248 2576 1080 1083 1088 1120 -HSync +VSync
# xrandr --addmode VGA1 "1920x1080"
# Para mas detalles vease: http://forums.opensuse.org/english/get-technical-help-here/hardware/458654-xrandr-does-not-detect-external-monitor-native-resolution.html
# Parámetros
# $1 = SINGLE o DUAL (por defecto DUAL)
# $2 = Si se pasa IZQ, $2 estará a la izquierda, DER a la derecha, por defecto es IZQ
# $3 = Nombre de la VGA (por defecto VGA conectada)
# $4 = Resolucion de la VGA $2, por defecto máxima resolución del a VGA
# Recursos
# Manipulacion de cadenas http://tldp.org/LDP/abs/html/string-manipulation.html
echo "Usage: configure_monitor.sh SINGLE|DUAL IZQ|DER VGAN RESXxRESy "
DUAL="dual"
SINGLE="single"
PLACE=$1
POS=$2
VGA=$3
RES=$4
if [[ $1 == "" ]]
then
PLACE=$DUAL
fi
if [[ $2 == "" ]]
then
POS="IZQ"
fi
if [[ $3 == "" ]]
then
VGA=`xrandr | grep "VGA[0-9]" | cut -d " " -f1`
fi
if [[ $4 == "" ]]
then
RES=`xrandr | grep "$VGA" -A 1 | tail -n 1 | tr -s " " | cut -d " " -f2`
fi
#Configuracion del la posicion
X=${RES%x*}
Y=${RES#*x}
#Proceso de invertir utilizando el OFFSET, siempre el monitor que se conecta es el principal
OFFSET="$Xx0"
if [[ "$POS" == "IZQ" ]]
then
OFFSET="-${X}x0"
fi
echo
echo "#####################"
echo "Display: $VGA"
echo "Resolución: $RES"
echo "Tipo: " $PLACE
echo "Pos: $OFFSET"
echo "#####################"
echo
if [[ $PLACE != $SINGLE ]]
then
xrandr --output LVDS1 --mode 1366x768 --pos 0x0 --auto --output $VGA --mode $RES --pos "$OFFSET" --primary --auto
else
xrandr --output LVDS1 --mode 1366x768 --auto --primary
fi
|
aVolpe/scripts
|
configure_monitor.sh
|
Shell
|
apache-2.0
| 1,828 |
#!/bin/bash
#
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
basedir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Loading OpenStack credentials
source /home/jenkins-slave/tools/keystonerc_admin
# Loading all the needed functions
source $basedir/library.sh
set -e
NAME="nov-dvs-$ZUUL_CHANGE-$ZUUL_PATCHSET"
if [[ ! -z $IS_DEBUG_JOB ]] && [[ $IS_DEBUG_JOB == "yes" ]]; then
NAME="$NAME-dbg"
fi
export NAME=$NAME
echo NAME=$NAME | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo ZUUL_PROJECT=$ZUUL_PROJECT | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo ZUUL_BRANCH=$ZUUL_BRANCH | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo ZUUL_CHANGE=$ZUUL_CHANGE | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo ZUUL_PATCHSET=$ZUUL_PATCHSET | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo ZUUL_UUID=$ZUUL_UUID | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo IS_DEBUG_JOB=$IS_DEBUG_JOB | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
ZUUL_SITE=`echo "$ZUUL_URL" |sed 's/.\{2\}$//'`
echo ZUUL_SITE=$ZUUL_SITE | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
NET_ID=$(nova net-list | grep private| awk '{print $2}')
echo NET_ID=$NET_ID | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
devstack_image="devstack-82v1"
echo "devstack_image=$devstack_image" | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
#FLOATING_IP=$(nova floating-ip-create public | awk '{print $2}'|sed '/^$/d' | tail -n 1) || echo `date -u +%H:%M:%S` "Failed to alocate floating IP"
#if [ -z "$FLOATING_IP" ]; then
# exit 1
#fi
echo "Deploying devstack $NAME"
VM_OK=1
while [ $VM_OK -ne 0 ]; do
VMID=$(nova boot --config-drive true --flavor devstack.xxl --image $devstack_image --key-name default --security-groups devstack --nic net-id="$NET_ID" --nic net-id="$NET_ID" "$NAME" --poll | awk '{if (NR == 21) {print $4}}')
NOVABOOT_EXIT=$?
export VMID=$VMID
if [ $NOVABOOT_EXIT -ne 0 ]; then
echo "Failed to create devstack VM: $VMID"
nova show "$VMID"
exit 1
fi
echo "Showing details of the new created instance: $VMID"
nova show "$VMID"
echo "Fetching devstack VM fixed IP address"
FIP=$(nova show "$VMID" | grep "private network" | awk '{print $6}')
FIP=${FIP//,}
if [[ ! $FIP =~ .*\|.* ]]; then
sleep 30
fi
#exec_with_retry "nova add-floating-ip $VMID $FLOATING_IP" 15 5 || { echo "nova show $VMID:"; nova show "$VMID"; echo "nova console-log $VMID:"; nova console-log "$VMID"; exit 1; }
sleep 10
FIXED_IP=$(nova show "$VMID" | grep "private network" | awk '{print $5}')
export FIXED_IP="${FIXED_IP//,}"
COUNT=1
while [ -z "$FIXED_IP" ]; do
if [ $COUNT -lt 10 ]; then
sleep 15
FIXED_IP=$(nova show "$VMID" | grep "private network" | awk '{print $5}')
export FIXED_IP="${FIXED_IP//,}"
COUNT=$(($COUNT + 1))
else
echo "Failed to get fixed IP using nova show $VMID"
echo "Trying to get the IP from console-log and port-list"
FIXED_IP1=`nova console-log $VMID | grep "ci-info" | grep "eth0" | grep "True" | awk '{print $7}'`
echo "From console-log we got IP: $FIXED_IP1"
FIXED_IP2=`neutron port-list -D -c device_id -c fixed_ips | grep $VMID | awk '{print $7}' | tr -d \" | tr -d }`
echo "From neutron port-list we got IP: $FIXED_IP2"
if [[ -z "$FIXED_IP1" || -z "$FIXED_IP2" || "$FIXED_IP1" != "$FIXED_IP2" ]]; then
echo "Failed to get fixed IP"
echo "nova show output:"
nova show "$VMID"
echo "nova console-log output:"
nova console-log "$VMID"
echo "neutron port-list output:"
neutron port-list -D -c device_id -c fixed_ips | grep $VMID
exit 1
else
export FIXED_IP=$FIXED_IP1
fi
fi
done
echo "nova show $VMID:"
nova show "$VMID"
sleep 60
echo "Probing for connectivity on IP $FIXED_IP"
set +e
wait_for_listening_port $FIXED_IP 22 30
status=$?
set -e
if [ $status -eq 0 ]; then
VM_OK=0
echo "VM connectivity OK"
else
echo "VM connectivity NOT OK, rebooting VM"
nova reboot "$VMID"
sleep 120
set +e
wait_for_listening_port $FIXED_IP 22 30
status=$?
set -e
if [ $status -eq 0 ]; then
VM_OK=0
echo "VM connectivity OK"
else
#exec_with_retry "nova floating-ip-disassociate $VMID $FLOATING_IP" 15 5
echo "nova console-log $VMID:"; nova console-log "$VMID"; echo "Failed listening for ssh port on devstack"
echo "Deleting VM $VMID"
nova delete $VMID
fi
fi
done
FLOATING_IP=$FIXED_IP
echo FLOATING_IP=$FLOATING_IP | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo FIXED_IP=$FIXED_IP | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo VMID=$VMID | tee -a /home/jenkins-slave/runs/devstack_params.$ZUUL_UUID.txt
echo "adding $NAME to /etc/hosts"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'VMNAME=$(hostname); sudo sed -i "s/127.0.0.1 localhost/127.0.0.1 localhost $VMNAME/g" /etc/hosts' 1
echo "adding apt-cacher-ng:"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'echo "Acquire::http { Proxy \"http://10.20.1.36:8000/\" };" | sudo tee --append /etc/apt/apt.conf.d/90-apt-proxy.conf' 1
echo "clean any apt files:"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sudo rm -rf /var/lib/apt/lists/*" 1
echo "apt-get update:"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sudo apt-get update -y" 1
echo "apt-get upgrade:"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'DEBIAN_FRONTEND=noninteractive && DEBIAN_PRIORITY=critical && sudo apt-get -q -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade' 1
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'DEBIAN_FRONTEND=noninteractive && DEBIAN_PRIORITY=critical && sudo apt-get -q -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" autoremove' 1
# set timezone to UTC
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sudo ln -fs /usr/share/zoneinfo/UTC /etc/localtime" 1
# copy files to devstack
scp -v -r -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" -i $DEVSTACK_SSH_KEY $basedir/../devstack_vm/* ubuntu@$FLOATING_IP:/home/ubuntu/
if [ "$ZUUL_BRANCH" != "master" ]; then
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'echo -e "tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.\ntempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON." >> /home/ubuntu/bin/excluded-tests.txt'
fi
#disable n-crt on master branch
if [ "$ZUUL_BRANCH" == "master" ]; then
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sed -i 's/^enable_service n-crt/disable_service n-crt/' /home/ubuntu/devstack/local.conf" 1
fi
set +e
VLAN_RANGE=`$basedir/../vlan_allocation.py -a $VMID`
if [ ! -z "$VLAN_RANGE" ]; then
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sed -i 's/TENANT_VLAN_RANGE.*/TENANT_VLAN_RANGE='$VLAN_RANGE'/g' /home/ubuntu/devstack/local.conf" 3
fi
set -e
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sed -i 's/export OS_AUTH_URL.*/export OS_AUTH_URL=http:\/\/127.0.0.1\/identity/g' /home/ubuntu/keystonerc" 3
# update repos
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "/home/ubuntu/bin/update_devstack_repos.sh --branch $ZUUL_BRANCH --build-for $ZUUL_PROJECT" 1
# gerrit-git-prep
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "/home/ubuntu/bin/gerrit_git_prep.sh --zuul-site $ZUUL_SITE --gerrit-site $ZUUL_SITE --zuul-ref $ZUUL_REF --zuul-change $ZUUL_CHANGE --zuul-project $ZUUL_PROJECT" 1
# get locally the vhdx files used by tempest
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "mkdir -p /home/ubuntu/devstack/files/images"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "wget http://10.20.1.14:8080/cirros-0.3.3-x86_64.vhdx -O /home/ubuntu/devstack/files/images/cirros-0.3.3-x86_64.vhdx"
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "wget http://10.20.1.14:8080/Fedora-x86_64-20-20140618-sda.vhdx.gz -O /home/ubuntu/devstack/files/images/Fedora-x86_64-20-20140618-sda.vhdx.gz"
# install neutron pip package as it is external
# run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "sudo pip install -U networking-hyperv --pre"
# make local.sh executable
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "chmod a+x /home/ubuntu/devstack/local.sh"
# Preparing share for HyperV logs
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'mkdir -p /openstack/logs; chmod 777 /openstack/logs; sudo chown nobody:nogroup /openstack/logs'
# Unzip Fedora image
echo `date -u +%H:%M:%S` "Started to unzip Fedora image.."
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "gzip --decompress --force /home/ubuntu/devstack/files/images/Fedora-x86_64-20-20140618-sda.vhdx.gz"
#Get IP addresses of the two Hyper-V hosts
set +e
#IFS='' read -r -d '' PSCODE <<'_EOF'
#$NetIPAddr = Get-NetIPAddress | Where-Object {$_.InterfaceAlias -like "*br100*" -and $_.AddressFamily -like "IPv4"}
#$IPAddr = $NetIPAddr.IPAddress
#Write-Host $IPAddr
#_EOF
#HYPERV_GET_DATA_IP=`echo "$PSCODE" | iconv -f ascii -t utf16le | base64 -w0`
hyperv01_ip=`run_wsman_cmd $hyperv01 $WIN_USER $WIN_PASS 'powershell -ExecutionPolicy RemoteSigned (Get-NetIPAddress -InterfaceAlias "*br100*" -AddressFamily "IPv4").IPAddress' 2>&1 | grep -E -o '10\.250\.[0-9]{1,2}\.[0-9]{1,3}'`
hyperv02_ip=`run_wsman_cmd $hyperv02 $WIN_USER $WIN_PASS 'powershell -ExecutionPolicy RemoteSigned (Get-NetIPAddress -InterfaceAlias "*br100*" -AddressFamily "IPv4").IPAddress' 2>&1 | grep -E -o '10\.250\.[0-9]{1,2}\.[0-9]{1,3}'`
set -e
echo `date -u +%H:%M:%S` "Data IP of $hyperv01 is $hyperv01_ip"
echo `date -u +%H:%M:%S` "Data IP of $hyperv02 is $hyperv02_ip"
if [[ ! $hyperv01_ip =~ ^10\.250\.[0-9]{1,2}\.[0-9]{1,3} ]]; then
echo "Did not receive a good IP for Hyper-V host $hyperv01 : $hyperv01_ip"
exit 1
fi
if [[ ! $hyperv02_ip =~ ^10\.250\.[0-9]{1,2}\.[0-9]{1,3} ]]; then
echo "Did not receive a good IP for Hyper-V host $hyperv02 : $hyperv02_ip"
exit 1
fi
# Building devstack as a threaded job
echo `date -u +%H:%M:%S` "Started to build devstack as a threaded job"
nohup $basedir/build_devstack.sh $hyperv01_ip $hyperv02_ip > /home/jenkins-slave/logs/devstack-build-log-$ZUUL_UUID.log 2>&1 &
pid_devstack=$!
# Building and joining HyperV nodes
echo `date -u +%H:%M:%S` "Started building & joining Hyper-V node: $hyperv01"
nohup $basedir/build_hyperv.sh $hyperv01 > /home/jenkins-slave/logs/hyperv-build-log-$ZUUL_UUID-$hyperv01.log 2>&1 &
pid_hv01=$!
echo `date -u +%H:%M:%S` "Started building & joining Hyper-V node: $hyperv02"
nohup $basedir/build_hyperv.sh $hyperv02 > /home/jenkins-slave/logs/hyperv-build-log-$ZUUL_UUID-$hyperv02.log 2>&1 &
pid_hv02=$!
TIME_COUNT=0
PROC_COUNT=3
echo `date -u +%H:%M:%S` "Start waiting for parallel init jobs."
finished_devstack=0;
finished_hv01=0;
finished_hv02=0;
while [[ $TIME_COUNT -lt 60 ]] && [[ $PROC_COUNT -gt 0 ]]; do
if [[ $finished_devstack -eq 0 ]]; then
ps -p $pid_devstack > /dev/null 2>&1 || finished_devstack=$?
[[ $finished_devstack -ne 0 ]] && PROC_COUNT=$(( $PROC_COUNT - 1 )) && echo `date -u +%H:%M:%S` "Finished building devstack"
fi
if [[ $finished_hv01 -eq 0 ]]; then
ps -p $pid_hv01 > /dev/null 2>&1 || finished_hv01=$?
[[ $finished_hv01 -ne 0 ]] && PROC_COUNT=$(( $PROC_COUNT - 1 )) && echo `date -u +%H:%M:%S` "Finished building $hyperv01"
fi
if [[ $finished_hv02 -eq 0 ]]; then
ps -p $pid_hv02 > /dev/null 2>&1 || finished_hv02=$?
[[ $finished_hv02 -ne 0 ]] && PROC_COUNT=$(( $PROC_COUNT - 1 )) && echo `date -u +%H:%M:%S` "Finished building $hyperv02"
fi
if [[ $PROC_COUNT -gt 0 ]]; then
sleep 1m
TIME_COUNT=$(( $TIME_COUNT +1 ))
fi
done
echo `date -u +%H:%M:%S` "Finished waiting for the parallel init jobs."
echo `date -u +%H:%M:%S` "We looped $TIME_COUNT times, and when finishing we have $PROC_COUNT threads still active"
OSTACK_PROJECT=`echo "$ZUUL_PROJECT" | cut -d/ -f2`
if [[ ! -z $IS_DEBUG_JOB ]] && [[ $IS_DEBUG_JOB == "yes" ]]
then
echo "All build logs can be found in http://64.119.130.115/debug/$OSTACK_PROJECT/$ZUUL_CHANGE/$ZUUL_PATCHSET/"
else
echo "All build logs can be found in http://64.119.130.115/$OSTACK_PROJECT/$ZUUL_CHANGE/$ZUUL_PATCHSET/"
fi
if [[ $PROC_COUNT -gt 0 ]]; then
kill -9 $pid_devstack > /dev/null 2>&1
kill -9 $pid_hv01 > /dev/null 2>&1
kill -9 $pid_hv02 > /dev/null 2>&1
echo "Not all build threads finished in time, initialization process failed."
exit 1
fi
# HyperV post-build services restart
post_build_restart_hyperv_services $hyperv01 $WIN_USER $WIN_PASS
post_build_restart_hyperv_services $hyperv02 $WIN_USER $WIN_PASS
# Check for nova join (must equal 2)
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'source /home/ubuntu/keystonerc; NOVA_COUNT=$(nova service-list | grep nova-compute | grep -c -w up); if [ "$NOVA_COUNT" != 2 ];then nova service-list; exit 1;fi' 12
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'source /home/ubuntu/keystonerc; nova service-list' 1
# Check for neutron join (must equal 2)
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'source /home/ubuntu/keystonerc; NEUTRON_COUNT=$(neutron agent-list | grep -c "HyperV agent.*:-)"); if [ "$NEUTRON_COUNT" != 2 ];then neutron agent-list; exit 1;fi' 12
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY 'source /home/ubuntu/keystonerc; neutron agent-list' 1
# Call create_cell after init phase is done
if [[ "$ZUUL_BRANCH" == "master" ]] || [[ "$ZUUL_BRANCH" == "stable/ocata" ]]; then
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "url=\$(grep transport_url /etc/nova/nova-dhcpbridge.conf | head -1 | awk '{print \$3}'); nova-manage cell_v2 simple_cell_setup --transport-url \$url >> /opt/stack/logs/screen/create_cell.log"
fi
# restart nova services to refresh cached cells (some tests fail because the cell is created before the compute nodes join)
run_ssh_cmd_with_retry ubuntu@$FLOATING_IP $DEVSTACK_SSH_KEY "/home/ubuntu/bin/restart_nova_services.sh"
|
cloudbase/nova-ci
|
jobs/run_initialize.sh
|
Shell
|
apache-2.0
| 15,347 |
#!/bin/sh
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#
. /etc/device.properties
export SNMP_BIN_DIR=/mnt/nfs/bin/target-snmp/bin
export MIBS=ALL
export MIBDIRS=$SNMP_BIN_DIR/../share/snmp/mibs:/usr/share/snmp/mibs
export PATH=$PATH:$SNMP_BIN_DIR:
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/mnt/nfs/bin/target-snmp/lib:/mnt/nfs/usr/lib
LOG_OUTPUT=rf_statistics_log.txt
if [ -f /tmp/.standby ]; then
LOG_PATH=$TEMP_LOG_PATH
else
LOG_PATH=/opt/logs
fi
# adding sleep of 180 sec to reduce high load condition during bootup. It is expected, The snmp commands will be executed after the AV is up with this delay.
if [ ! -f /etc/os-release ]; then
sleep 180
fi
while [ ! -f /tmp/snmpd.conf ]
do
sleep 15
done
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
while [ ! "$snmpCommunityVal" ]
do
sleep 20
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
done
while [ "$snmpCommunityVal" = "public" ]
do
if [ -f /tmp/.standby ]; then
LOG_PATH=$TEMP_LOG_PATH
else
LOG_PATH=/opt/logs
fi
echo "Waiting for the Community string for SNMP communication..!" > $LOG_PATH/$LOG_OUTPUT
sleep 60
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
done
echo "Dump the RF Statistics" >> $LOG_PATH/$LOG_OUTPUT
while [ true ]
do
if [ -f /tmp/.standby ]; then
LOG_PATH=$TEMP_LOG_PATH
else
LOG_PATH=/opt/logs
fi
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
echo $(date) DownStream Channel Center Freq: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfDownChannelFrequency.3) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) DownStream Channel Power: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfDownChannelPower.3) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) DownStream Channel Modn: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfDownChannelModulation.3) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) DownStream Channel Width: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfDownChannelWidth.3) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) UpStream Channel Type: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfUpChannelType.4) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) CM Modulation Type Status: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfCmStatusModulationType.2) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) UpStream Channel Center Freq: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfUpChannelFrequency.4) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) CM Tx Power: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfCmStatusTxPower.2) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) Signal/Noise Ratio: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF-MIB::docsIfSigQSignalNoise.3) >>$LOG_PATH/$LOG_OUTPUT
echo $(date) Modulation Error Ratio: $(snmpwalk -OQ -v 2c -c $snmpCommunityVal 192.168.100.1 DOCS-IF3-MIB::docsIf3SignalQualityExtRxMER.3) >>$LOG_PATH/$LOG_OUTPUT
sleep 1800
done
|
rdkcmf/rdk-sysint
|
lib/rdk/rfStatisticsCheck.sh
|
Shell
|
apache-2.0
| 3,944 |
#!/bin/bash
cd $(dirname $0)
set -e
./gradlew clean build
sudo rm -rf build
mvn clean package -Dmaven.test.skip=true
sudo rm -rf target
|
leandrocgsi/erudio-api-oauth2
|
ci.sh
|
Shell
|
apache-2.0
| 139 |
#!/bin/bash
#
# tjx@20181122
#
realpath=$(realpath $0)
cd $(dirname $(dirname $realpath))
tar czvf /tmp/vim-vide.tgz $(basename $(dirname $realpath))
qshell rput -w bvcstatic vim-vide-20200513.tgz /tmp/vim-vide.tgz
#
# git-hooks
# Automatic version numbers of your git repository using git hooks
# https://gist.github.com/sg-s/2ddd0fe91f6037ffb1bce28be0e74d4e
#
test -e .git && test ! -e .git/hooks/pre-commit && {
wget -O .git/hooks/pre-commit 'http://bvcstatic.acgvideo.com/pre-commit-autoversion' && \
chmod +x .git/hooks/pre-commit
}
|
mrytsr/vide
|
deploy.sh
|
Shell
|
apache-2.0
| 553 |
###############################################################################
# Copyright 2017 Intuit
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
#!/usr/bin/env bash
APPLICATION_INSTRUMENT="-javaagent:/vagrant/target/org.jacoco.agent-${jacoco.version}-runtime.jar=destfile=/vagrant/target/jacoco-it.exec,append=false"
CONSOLE_LOG=wasabi-os-console.log
MAIN_JAR=/vagrant/target/wasabi-main-*-all.jar
JAVA_OPTIONS="-server -Xmx4096m \
${APPLICATION_INSTRUMENT} \
-Dlogback.configurationFile=./logback.xml \
-Djava.util.logging.config.file=./logging.properties"
java ${JAVA_OPTIONS} -jar ${MAIN_JAR} 1>>${CONSOLE_LOG} 2>&1 &
|
intuit/wasabi
|
modules/functional-test/src/main/resources-filtered/server-start.sh
|
Shell
|
apache-2.0
| 1,220 |
#!/usr/bin/env bash
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/domains"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/sources"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/documents"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/items"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/parts"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/words"
curl -X DELETE "http://drinventor.dia.fi.upm.es:8080/api/0.1/topics"
|
oeg-upm/epnoi
|
vms/src/main/curl/deleteall.sh
|
Shell
|
apache-2.0
| 505 |
pkg_name=curator4
pkg_origin=core
pkg_version=4.1.2
pkg_description="Elasticsearch Curator helps you curate, or manage your indices."
pkg_upstream_url=https://github.com/elastic/curator
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_license=('Apache-2.0')
pkg_source=nosuchfile.tgz
pkg_deps=(
lilian/python2
)
pkg_build_deps=(
lilian/virtualenv
)
pkg_bin_dirs=(bin)
do_download() {
return 0
}
do_verify() {
return 0
}
do_unpack() {
return 0
}
do_prepare() {
localedef -i en_US -f UTF-8 en_US.UTF-8
export LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
virtualenv "$pkg_prefix"
# shellcheck source=/dev/null
source "$pkg_prefix/bin/activate"
}
do_build() {
return 0
}
do_install() {
pip install "elasticsearch-curator==$pkg_version"
# Write out versions of all pip packages to package
pip freeze > "$pkg_prefix/requirements.txt"
}
|
be-plans/be
|
curator4/plan.sh
|
Shell
|
apache-2.0
| 871 |
#!/bin/bash
start=$SECONDS
# change this value with the tag or branch you want to build
tag_or_branch=master
tomcat_url=http://apache.mirrors.pair.com/tomcat/tomcat-7/v7.0.62/bin/apache-tomcat-7.0.62.zip
temp_zip_name=temp.zip
temp=temp
realpath(){
thedir=$1
cd $thedir
pwd
}
rm -rf temp.zip
wget $tomcat_url -O $temp_zip_name
unzip -oq $temp_zip_name
rm $temp_zip_name
dir_to_build=te-build
./build_te.sh --tomcat $temp --base-folder $dir_to_build --tag-or-branch $tag_or_branch
## Warning: catalina_base and teamengine folder are created by build_te.sh.
dir_to_build=$(realpath $dir_to_build)
CATALINA_BASE=$dir_to_build/catalina_base
TE_BASE=$CATALINA_BASE/TE_BASE/
TE=$CATALINA_BASE/webapps/teamengine
CSV_FILE=tests_to_build.csv
##start tomcat required to build teamengine folder
$CATALINA_BASE/bin/catalina.sh start
sleep 5
$CATALINA_BASE/bin/catalina.sh stop
./install-all-tests.sh $TE_BASE $TE $CSV_FILE
duration=$(( SECONDS - start ))
echo "[INFO] Full installations of TEAM Engine and tests have been completed"
echo "[INFO] Time to build in seconds: $duration"
echo "[INFO] CATALINA_BASE: $CATALINA_BASE"
echo "[INFO] TE_BASE: $TE_BASE"
echo "[INFO] TE: $TE"
echo ""
echo "[INFO] to start tomcat run: $CATALINA_BASE/bin/catalina.sh start"
echo "[INFO] to stop tomcat run: $CATALINA_BASE'/bin/catalina.sh stop"
echo ""
echo "[INFO] More information: https://github.com/opengeospatial/teamengine-builder/"
|
opengeospatial/teamengine-builder
|
te_simple_build.sh
|
Shell
|
apache-2.0
| 1,442 |
#!/usr/bin/env bash
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
set -e
ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd "$(git rev-parse --show-toplevel)" && pwd)
function safe_curl() {
real_curl="$(command -v curl)"
set +e
"${real_curl}" --fail -SL "$@"
exit_code=$?
set -e
if [[ "${exit_code}" -ne 0 ]]; then
echo >&2 "Curl failed with args: $*"
exit 1
fi
}
# shellcheck source=build-support/common.sh
source "${ROOT}/build-support/common.sh"
# Note we allow the user to predefine this value so that they may point to a specific interpreter.
export PY="${PY:-python3.6}"
interpreter_constraint="CPython==3.6.*"
if ! command -v "${PY}" >/dev/null; then
die "Python interpreter ${PY} not discoverable on your PATH."
fi
py_major_minor=$(${PY} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')
if [[ "${py_major_minor}" != "3.6" ]]; then
die "Invalid interpreter. The release script requires Python 3.6 (you are using ${py_major_minor})."
fi
export PANTS_PYTHON_SETUP_INTERPRETER_CONSTRAINTS="['${interpreter_constraint}']"
function run_local_pants() {
"${ROOT}/pants" "$@"
}
# NB: Pants core does not have the ability to change its own version, so we compute the
# suffix here and mutate the VERSION_FILE to affect the current version.
readonly VERSION_FILE="${ROOT}/src/python/pants/VERSION"
PANTS_STABLE_VERSION="$(cat "${VERSION_FILE}")"
HEAD_SHA=$(git rev-parse --verify HEAD)
# We add a non-numeric prefix 'git' before the sha in order to avoid a hex sha which happens to
# contain only [0-9] being parsed as a number -- see #7399.
# TODO(#7399): mix in the timestamp before the sha instead of 'git' to get monotonic ordering!
readonly PANTS_UNSTABLE_VERSION="${PANTS_STABLE_VERSION}+git${HEAD_SHA:0:8}"
readonly DEPLOY_DIR="${ROOT}/dist/deploy"
readonly DEPLOY_3RDPARTY_WHEELS_PATH="wheels/3rdparty/${HEAD_SHA}"
readonly DEPLOY_PANTS_WHEELS_PATH="wheels/pantsbuild.pants/${HEAD_SHA}"
readonly DEPLOY_3RDPARTY_WHEEL_DIR="${DEPLOY_DIR}/${DEPLOY_3RDPARTY_WHEELS_PATH}"
readonly DEPLOY_PANTS_WHEEL_DIR="${DEPLOY_DIR}/${DEPLOY_PANTS_WHEELS_PATH}"
# A space-separated list of pants packages to include in any pexes that are built: by default,
# only pants core is included.
: "${PANTS_PEX_PACKAGES:="pantsbuild.pants"}"
# URL from which pex release binaries can be downloaded.
: "${PEX_DOWNLOAD_PREFIX:="https://github.com/pantsbuild/pex/releases/download"}"
# shellcheck source=contrib/release_packages.sh
source "${ROOT}/contrib/release_packages.sh"
function requirement() {
package="$1"
grep "^${package}[^A-Za-z0-9]" "${ROOT}/3rdparty/python/requirements.txt" || die "Could not find requirement for ${package}"
}
function run_pex() {
# TODO: Cache this in case we run pex multiple times
(
PEX_VERSION="$(requirement pex | sed -e "s|pex==||")"
pexdir="$(mktemp -d -t build_pex.XXXXX)"
trap 'rm -rf "${pexdir}"' EXIT
pex="${pexdir}/pex"
safe_curl -s "${PEX_DOWNLOAD_PREFIX}/v${PEX_VERSION}/pex" > "${pex}"
"${PY}" "${pex}" "$@"
)
}
function run_packages_script() {
(
cd "${ROOT}"
run_pex "$(requirement beautifulsoup4)" -- "${ROOT}/src/python/pants/releases/packages.py" "$@"
)
}
function find_pkg() {
local -r pkg_name=$1
local -r version=$2
local -r search_dir=$3
find "${search_dir}" -type f -name "${pkg_name}-${version}-*.whl"
}
function pkg_pants_install_test() {
local version=$1
shift
local PIP_ARGS=("$@")
pip install "${PIP_ARGS[@]}" "pantsbuild.pants==${version}" || \
die "pip install of pantsbuild.pants failed!"
execute_packaged_pants_with_internal_backends list src:: || \
die "'pants list src::' failed in venv!"
[[ "$(execute_packaged_pants_with_internal_backends --version 2>/dev/null)" \
== "${version}" ]] || die "Installed version of pants does not match requested version!"
}
function pkg_testutil_install_test() {
local version=$1
shift
local PIP_ARGS=("$@")
pip install "${PIP_ARGS[@]}" "pantsbuild.pants.testutil==${version}" && \
python -c "import pants.testutil"
}
#
# End of package declarations.
#
REQUIREMENTS_3RDPARTY_FILES=(
"3rdparty/python/requirements.txt"
"3rdparty/python/twitter/commons/requirements.txt"
"contrib/python/src/python/pants/contrib/python/checks/checker/3rdparty/requirements.txt"
)
# When we do (dry-run) testing, we need to run the packaged pants.
# It doesn't have internal backend plugins so when we execute it
# at the repo build root, the root pants.toml will ask it to load
# internal backend packages and their dependencies which it doesn't have,
# and it'll fail. To solve that problem, we load the internal backend package
# dependencies into the pantsbuild.pants venv.
function execute_packaged_pants_with_internal_backends() {
pip install --ignore-installed \
-r pants-plugins/3rdparty/python/requirements.txt &> /dev/null && \
pants \
--no-verify-config \
--pythonpath="['pants-plugins/src/python']" \
--backend-packages="[\
'pants.backend.codegen',\
'pants.backend.docgen',\
'pants.backend.graph_info',\
'pants.backend.jvm',\
'pants.backend.native',\
'pants.backend.project_info',\
'pants.backend.python',\
'pants.cache',\
'internal_backend.repositories',\
'internal_backend.sitegen',\
'internal_backend.utilities',\
]" \
"$@"
}
function pants_version_reset() {
pushd "${ROOT}" > /dev/null
git checkout -- "${VERSION_FILE}"
popd > /dev/null
unset _PANTS_VERSION_OVERRIDE
}
function pants_version_set() {
# Set the version in the wheels we build by mutating `src/python/pants/VERSION` to temporarily
# override it. Sets a `trap` to restore to HEAD on exit.
local version=$1
trap pants_version_reset EXIT
echo "${version}" > "${VERSION_FILE}"
# Also set the version reported by the prebuilt pant.pex we use to build the wheels.
# This is so that we pass the sanity-check that verifies that the built wheels have the same
# version as the pants version used to build them.
# TODO: Do we actually need that sanity check?
export _PANTS_VERSION_OVERRIDE=${version}
}
function build_3rdparty_packages() {
# Builds whls for 3rdparty dependencies of pants.
local version=$1
rm -rf "${DEPLOY_3RDPARTY_WHEEL_DIR}"
mkdir -p "${DEPLOY_3RDPARTY_WHEEL_DIR}/${version}"
local req_args=()
for req_file in "${REQUIREMENTS_3RDPARTY_FILES[@]}"; do
req_args=("${req_args[@]}" -r "${ROOT}/$req_file")
done
start_travis_section "3rdparty" "Building 3rdparty whls from ${REQUIREMENTS_3RDPARTY_FILES[*]}"
activate_tmp_venv
pip wheel --wheel-dir="${DEPLOY_3RDPARTY_WHEEL_DIR}/${version}" "${req_args[@]}"
deactivate
end_travis_section
}
function build_pants_packages() {
local version=$1
rm -rf "${DEPLOY_PANTS_WHEEL_DIR}"
mkdir -p "${DEPLOY_PANTS_WHEEL_DIR}/${version}"
pants_version_set "${version}"
start_travis_section "${NAME}" "Building packages"
# WONTFIX: fixing the array expansion is too difficult to be worth it. See https://github.com/koalaman/shellcheck/wiki/SC2207.
# shellcheck disable=SC2207
packages=(
$(run_packages_script build_and_print "${version}")
) || die "Failed to build packages at ${version}!"
for package in "${packages[@]}"
do
(
wheel=$(find_pkg "${package}" "${version}" "${ROOT}/dist") && \
cp -p "${wheel}" "${DEPLOY_PANTS_WHEEL_DIR}/${version}"
) || die "Failed to find package ${package}-${version}!"
done
end_travis_section
pants_version_reset
}
function build_fs_util() {
start_travis_section "fs_util" "Building fs_util binary"
# fs_util is a standalone tool which can be used to inspect and manipulate
# Pants's engine's file store, and interact with content addressable storage
# services which implement the Bazel remote execution API.
# It is a useful standalone tool which people may want to consume, for
# instance when debugging pants issues, or if they're implementing a remote
# execution API. Accordingly, we include it in our releases.
(
set -e
RUST_BACKTRACE=1 "${ROOT}/build-support/bin/native/cargo" build --release \
--manifest-path="${ROOT}/src/rust/engine/Cargo.toml" -p fs_util
dst_dir="${DEPLOY_DIR}/bin/fs_util/$("${ROOT}/build-support/bin/get_os.sh")/${PANTS_UNSTABLE_VERSION}"
mkdir -p "${dst_dir}"
cp "${ROOT}/src/rust/engine/target/release/fs_util" "${dst_dir}/"
) || die "Failed to build fs_util"
end_travis_section
}
function activate_tmp_venv() {
# Because the venv/bin/activate script's location is dynamic and not located in a fixed
# place, Shellcheck will not be able to find it so we tell Shellcheck to ignore the file.
# shellcheck source=/dev/null
VENV_DIR=$(mktemp -d -t pants.XXXXX) && \
"${ROOT}/build-support/virtualenv" "$VENV_DIR" && \
source "$VENV_DIR/bin/activate"
}
function pre_install() {
start_travis_section "SetupVenv" "Setting up virtualenv"
activate_tmp_venv
end_travis_section
}
function post_install() {
# this assume pre_install is called and a new temp venv activation has been done.
if [[ "${pause_after_venv_creation}" == "true" ]]; then
cat <<EOM
If you want to poke around with the new version of pants that has been built
and installed in a temporary virtualenv, fire up another shell window and type:
source ${VENV_DIR}/bin/activate
cd ${ROOT}
From there, you can run 'pants' (not './pants') to do some testing.
When you're done testing, press enter to continue.
EOM
read -r
fi
deactivate
}
function install_and_test_packages() {
local VERSION=$1
shift
local PIP_ARGS=(
"${VERSION}"
"$@"
--quiet
# Prefer remote or `--find-links` packages to cache contents.
--no-cache-dir
)
export PANTS_PYTHON_REPOS_REPOS="${DEPLOY_PANTS_WHEEL_DIR}/${VERSION}"
start_travis_section "wheel_check" "Validating ${VERSION} pantsbuild.pants wheels"
activate_twine
twine check "${PANTS_PYTHON_REPOS_REPOS}"/*.whl || die "Failed to validate wheels."
deactivate
end_travis_section
pre_install || die "Failed to setup virtualenv while testing ${NAME}-${VERSION}!"
# Avoid caching plugin installs.
PANTS_PLUGIN_CACHE_DIR=$(mktemp -d -t plugins_cache.XXXXX)
export PANTS_PLUGIN_CACHE_DIR
trap 'rm -rf "${PANTS_PLUGIN_CACHE_DIR}"' EXIT
# WONTFIX: fixing the array expansion is too difficult to be worth it. See https://github.com/koalaman/shellcheck/wiki/SC2207.
# shellcheck disable=SC2207
packages=(
$(run_packages_script list | grep '.' | awk '{print $1}')
) || die "Failed to list packages!"
for package in "${packages[@]}"
do
start_travis_section "${package}" "Installing and testing package ${package}-${VERSION}"
# shellcheck disable=SC2086
eval pkg_${package##*\.}_install_test "${PIP_ARGS[@]}" || \
die "Failed to install and test package ${package}-${VERSION}!"
end_travis_section
done
unset PANTS_PYTHON_REPOS_REPOS
post_install || die "Failed to deactivate virtual env while testing ${NAME}-${VERSION}!"
}
function dry_run_install() {
# Build a complete set of whls, and then ensure that we can install pants using only whls.
local VERSION="${PANTS_UNSTABLE_VERSION}"
build_pants_packages "${VERSION}" && \
build_3rdparty_packages "${VERSION}" && \
install_and_test_packages "${VERSION}" \
--only-binary=:all: \
-f "${DEPLOY_3RDPARTY_WHEEL_DIR}/${VERSION}" -f "${DEPLOY_PANTS_WHEEL_DIR}/${VERSION}"
}
function get_branch() {
git branch | grep -E '^\* ' | cut -d' ' -f2-
}
function check_clean_branch() {
banner "Checking for a clean branch"
pattern="^(master)|([0-9]+\.[0-9]+\.x)$"
branch=$(get_branch)
[[ -z "$(git status --porcelain)" &&
$branch =~ $pattern
]] || die "You are not on a clean branch."
}
function check_pgp() {
banner "Checking pgp setup"
msg=$(cat << EOM
You must configure your release signing pgp key.
You can configure the key by running:
git config --add user.signingkey [key id]
Key id should be the id of the pgp key you have registered with pypi.
EOM
)
get_pgp_keyid &> /dev/null || die "${msg}"
echo "Found the following key for release signing:"
"$(get_pgp_program)" -k "$(get_pgp_keyid)"
read -rp "Is this the correct key? [Yn]: " answer
[[ "${answer:-y}" =~ [Yy]([Ee][Ss])? ]] || die "${msg}"
}
function get_pgp_keyid() {
git config --get user.signingkey
}
function get_pgp_program() {
git config --get gpg.program || echo "gpg"
}
function tag_release() {
release_version="${PANTS_STABLE_VERSION}" && \
tag_name="release_${release_version}" && \
git tag -f \
"--local-user=$(get_pgp_keyid)" \
-m "pantsbuild.pants release ${release_version}" \
"${tag_name}" && \
git push -f [email protected]:pantsbuild/pants.git "${tag_name}"
}
function publish_docs_if_master() {
branch=$(get_branch)
if [[ "${branch}" == "master" ]]; then
"${ROOT}/build-support/bin/publish_docs.sh" -p -y
else
echo "Skipping docsite publishing on non-master branch (${branch})."
fi
}
function check_owners() {
run_packages_script check-my-ownership
}
function reversion_whls() {
# Reversions all whls from an input directory to an output directory.
# Adds one pants-specific glob to match the `VERSION` file in `pantsbuild.pants`.
local src_dir=$1
local dest_dir=$2
local output_version=$3
for whl in "${src_dir}"/*.whl; do
run_local_pants -q run src/python/pants/releases:reversion -- \
--glob='pants/VERSION' \
"${whl}" "${dest_dir}" "${output_version}" \
|| die "Could not reversion whl ${whl} to ${output_version}"
done
}
readonly BINARY_BASE_URL=https://binaries.pantsbuild.org
function list_prebuilt_wheels() {
# List prebuilt wheels as tab-separated tuples of filename and URL-encoded name.
wheel_listing="$(mktemp -t pants.wheels.XXXXX)"
trap 'rm -f "${wheel_listing}"' RETURN
for wheels_path in "${DEPLOY_PANTS_WHEELS_PATH}" "${DEPLOY_3RDPARTY_WHEELS_PATH}"; do
safe_curl -s "${BINARY_BASE_URL}/?prefix=${wheels_path}" > "${wheel_listing}"
"${PY}" << EOF
from __future__ import print_function
import sys
import urllib
import xml.etree.ElementTree as ET
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
root = ET.parse("${wheel_listing}")
ns = {'s3': 'http://s3.amazonaws.com/doc/2006-03-01/'}
for key in root.findall('s3:Contents/s3:Key', ns):
# Because filenames may contain characters that have different meanings
# in URLs (namely '+'), # print the key both as url-encoded and as a file path.
print('{}\t{}'.format(key.text, quote_plus(key.text)))
EOF
done
}
function fetch_prebuilt_wheels() {
local -r to_dir="$1"
banner "Fetching prebuilt wheels for ${PANTS_UNSTABLE_VERSION}"
(
cd "${to_dir}"
list_prebuilt_wheels | {
while read -r path_tuple
do
local file_path
file_path=$(echo "$path_tuple" | awk -F'\t' '{print $1}')
local url_path
url_path=$(echo "$path_tuple" | awk -F'\t' '{print $2}')
echo "${BINARY_BASE_URL}/${url_path}:"
local dest="${to_dir}/${file_path}"
mkdir -p "$(dirname "${dest}")"
safe_curl --progress-bar -o "${dest}" "${BINARY_BASE_URL}/${url_path}" \
|| die "Could not fetch ${dest}."
done
}
)
}
function fetch_and_check_prebuilt_wheels() {
# Fetches wheels from S3 into subdirectories of the given directory.
local check_dir="$1"
if [[ -z "${check_dir}" ]]
then
check_dir=$(mktemp -d -t pants.wheel_check.XXXXX)
trap 'rm -rf "${check_dir}"' RETURN
fi
banner "Checking prebuilt wheels for ${PANTS_UNSTABLE_VERSION}"
fetch_prebuilt_wheels "${check_dir}"
local missing=()
# WONTFIX: fixing the array expansion is too difficult to be worth it. See https://github.com/koalaman/shellcheck/wiki/SC2207.
# shellcheck disable=SC2207
RELEASE_PACKAGES=(
$(run_packages_script list | grep '.' | awk '{print $1}')
) || die "Failed to get a list of packages to release!"
for PACKAGE in "${RELEASE_PACKAGES[@]}"; do
# WONTFIX: fixing the array expansion is too difficult to be worth it. See https://github.com/koalaman/shellcheck/wiki/SC2207.
# shellcheck disable=SC2207
packages=($(find_pkg "${PACKAGE}" "${PANTS_UNSTABLE_VERSION}" "${check_dir}"))
if [ ${#packages[@]} -eq 0 ]; then
missing+=("${PACKAGE}")
continue
fi
# Confirm that if the package is not cross platform that we have whls for two platforms.
local cross_platform=""
for package in "${packages[@]}"; do
if [[ "${package}" =~ -none-any.whl ]]
then
cross_platform="true"
fi
done
# N.B. For platform-specific wheels, we expect 2 wheels: {linux,osx} * {abi3,}.
if [ "${cross_platform}" != "true" ] && [ ${#packages[@]} -ne 2 ]; then
missing+=("${PACKAGE} (expected whls for each platform: had only ${packages[@]})")
continue
fi
done
if (( ${#missing[@]} > 0 ))
then
echo "Failed to find prebuilt packages for:"
for package in "${missing[@]}"
do
echo " ${package}"
done
die
fi
}
function adjust_wheel_platform() {
# Renames wheels to adjust their tag from a src platform to a dst platform.
local src_plat="$1"
local dst_plat="$2"
local dir="$3"
find "$dir" -type f -name "*${src_plat}.whl" | while read -r src_whl; do
local dst_whl=${src_whl/$src_plat/$dst_plat}
mv -f "${src_whl}" "${dst_whl}"
done
}
function activate_twine() {
local -r venv_dir="${ROOT}/build-support/twine-deps.venv"
rm -rf "${venv_dir}"
"${ROOT}/build-support/virtualenv" "${venv_dir}"
# Because the venv/bin/activate script's location is dynamic and not located in a fixed
# place, Shellcheck will not be able to find it so we tell Shellcheck to ignore the file.
# shellcheck source=/dev/null
source "${venv_dir}/bin/activate"
pip install twine
}
function execute_pex() {
run_pex \
--no-build \
--no-pypi \
--disable-cache \
-f "${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_UNSTABLE_VERSION}" \
-f "${DEPLOY_3RDPARTY_WHEEL_DIR}/${PANTS_UNSTABLE_VERSION}" \
"$@"
}
function build_pex() {
# Builds a pex from the current UNSTABLE version.
# If $1 == "build", builds a pex just for this platform, from source.
# If $1 == "fetch", fetches the linux and OSX wheels which were built on travis.
local mode="$1"
local linux_platform_noabi="linux_x86_64"
local osx_platform_noabi="macosx_10.11_x86_64"
dest_suffix="py36.pex"
case "${mode}" in
build)
case "$(uname)" in
# NB: When building locally, we use a platform that does not refer to the ABI version, to
# avoid needing to introspect the python ABI for this machine.
Darwin)
local platform="${osx_platform_noabi}"
;;
Linux)
local platform="${linux_platform_noabi}"
;;
*)
echo >&2 "Unknown uname"
exit 1
;;
esac
local platforms=("${platform}")
local dest="${ROOT}/dist/pants.${PANTS_UNSTABLE_VERSION}.${platform}.${dest_suffix}"
local stable_dest="${DEPLOY_DIR}/pex/pants.${PANTS_STABLE_VERSION}.${platform}.${dest_suffix}"
;;
fetch)
local platforms=()
# TODO: once we add Python 3.7 PEX support, which requires first building Py37 wheels,
# we'll want to release one big flexible Pex that works with Python 3.6+.
abis=("cp-36-m")
for platform in "${linux_platform_noabi}" "${osx_platform_noabi}"; do
for abi in "${abis[@]}"; do
platforms=("${platforms[@]}" "${platform}-${abi}")
done
done
local dest="${ROOT}/dist/pants.${PANTS_UNSTABLE_VERSION}.${dest_suffix}"
local stable_dest="${DEPLOY_DIR}/pex/pants.${PANTS_STABLE_VERSION}.${dest_suffix}"
;;
*)
echo >&2 "Bad build_pex mode ${mode}"
exit 1
;;
esac
rm -rf "${DEPLOY_DIR}"
mkdir -p "${DEPLOY_DIR}"
if [[ "${mode}" == "fetch" ]]; then
fetch_and_check_prebuilt_wheels "${DEPLOY_DIR}"
else
build_pants_packages "${PANTS_UNSTABLE_VERSION}"
build_3rdparty_packages "${PANTS_UNSTABLE_VERSION}"
fi
local requirements=()
for pkg_name in $PANTS_PEX_PACKAGES; do
requirements=("${requirements[@]}" "${pkg_name}==${PANTS_UNSTABLE_VERSION}")
done
local platform_flags=()
for platform in "${platforms[@]}"; do
platform_flags=("${platform_flags[@]}" "--platform=${platform}")
done
# Pants depends on twitter.common libraries that trigger pex warnings for not properly declaring
# their dependency on setuptools (for namespace package support). To prevent these known warnings
# from polluting stderr we pass `--no-emit-warnings`.
execute_pex \
-o "${dest}" \
--no-emit-warnings \
--script=pants \
--interpreter-constraint="${interpreter_constraint}" \
"${platform_flags[@]}" \
"${requirements[@]}"
if [[ "${PANTS_PEX_RELEASE}" == "stable" ]]; then
mkdir -p "$(dirname "${stable_dest}")"
cp "${dest}" "${stable_dest}"
fi
banner "Successfully built ${dest}"
}
function publish_packages() {
rm -rf "${DEPLOY_PANTS_WHEEL_DIR}"
mkdir -p "${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_STABLE_VERSION}"
start_travis_section "Publishing" "Publishing packages for ${PANTS_STABLE_VERSION}"
# Fetch unstable wheels, rename any linux whls to manylinux, and reversion them
# from PANTS_UNSTABLE_VERSION to PANTS_STABLE_VERSION
fetch_and_check_prebuilt_wheels "${DEPLOY_DIR}"
adjust_wheel_platform "linux_x86_64" "manylinux1_x86_64" \
"${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_UNSTABLE_VERSION}"
reversion_whls \
"${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_UNSTABLE_VERSION}" \
"${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_STABLE_VERSION}" \
"${PANTS_STABLE_VERSION}"
activate_twine
trap deactivate RETURN
twine upload --sign "--sign-with=$(get_pgp_program)" "--identity=$(get_pgp_keyid)" \
"${DEPLOY_PANTS_WHEEL_DIR}/${PANTS_STABLE_VERSION}"/*.whl
end_travis_section
}
_OPTS="dhnftlowepq"
function usage() {
echo "With no options all packages are built, smoke tested and published to"
echo "PyPI. Credentials are needed for this as described in the"
echo "release docs: http://pantsbuild.org/release.html"
echo
echo "Usage: $0 [-d] (-h|-n|-f|-t|-l|-o|-w|-e|-p|-q)"
echo " -d Enables debug mode (verbose output, script pauses after venv creation)"
echo " -h Prints out this help message."
echo " -n Performs a release dry run."
echo " All package distributions will be built, installed locally in"
echo " an ephemeral virtualenv and exercised to validate basic"
echo " functioning."
echo " -f Build the fs_util binary."
echo " -t Tests a live release."
echo " Ensures the latest packages have been propagated to PyPI"
echo " and can be installed in an ephemeral virtualenv."
echo " -l Lists all pantsbuild packages that this script releases."
echo " -o Lists all pantsbuild package owners."
echo " -w List pre-built wheels for this release."
echo " -e Check that wheels are prebuilt for this release."
echo " -p Build a pex from prebuilt wheels for this release."
echo " -q Build a pex which only works on the host platform, using the code as exists on disk."
echo
echo "All options (except for '-d') are mutually exclusive."
if (( $# > 0 )); then
die "$@"
else
exit 0
fi
}
while getopts ":${_OPTS}" opt; do
case ${opt} in
h) usage ;;
d) debug="true" ;;
n) dry_run="true" ;;
f) build_fs_util ; exit $? ;;
t) test_release="true" ;;
l) run_packages_script list ; exit $? ;;
o) run_packages_script list-owners ; exit $? ;;
w) list_prebuilt_wheels ; exit $? ;;
e) fetch_and_check_prebuilt_wheels ; exit $? ;;
p) build_pex fetch ; exit $? ;;
q) build_pex build ; exit $? ;;
*) usage "Invalid option: -${OPTARG}" ;;
esac
done
if [[ "${debug}" == "true" ]]; then
set -x
pause_after_venv_creation="true"
fi
if [[ "${dry_run}" == "true" && "${test_release}" == "true" ]]; then
usage "The dry run and test options are mutually exclusive, pick one."
elif [[ "${dry_run}" == "true" ]]; then
banner "Performing a dry run release"
(
dry_run_install && \
banner "Dry run release succeeded"
) || die "Dry run release failed."
elif [[ "${test_release}" == "true" ]]; then
banner "Installing and testing the latest released packages"
(
install_and_test_packages "${PANTS_STABLE_VERSION}" && \
banner "Successfully installed and tested the latest released packages"
) || die "Failed to install and test the latest released packages."
else
banner "Releasing packages to PyPI"
(
check_clean_branch && check_pgp && check_owners && \
publish_packages && tag_release && publish_docs_if_master && \
banner "Successfully released packages to PyPI"
) || die "Failed to release packages to PyPI."
fi
|
wisechengyi/pants
|
build-support/bin/release.sh
|
Shell
|
apache-2.0
| 24,983 |
#!/bin/bash -e
# -----------------------------------------------------------------------------
#
# Package : yargs
# Version : v12.0.5
# Source repo : https://github.com/yargs/yargs
# Tested on : UBI8.5
# Language : Node
# Travis-Check : True
# Script License: Apache License, Version 2 or later
# Maintainer : Nailusha Potnuru <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#Exit immediately if a command exits with a non-zero status.
set -e
PACKAGE_NAME=yargs
#PACKAGE_VERSION is configurable can be passed as an argument.
PACKAGE_VERSION=${1:-v12.0.5}
PACKAGE_URL=https://github.com/yargs/yargs
yum -y update && yum install -y npm git gcc jq
OS_NAME=$(cat /etc/os-release | grep ^PRETTY_NAME | cut -d= -f2)
#Check if package exists
if [ -d "$PACKAGE_NAME" ] ; then
rm -rf $PACKAGE_NAME
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $OS_NAME | GitHub | Removed existing package if any"
fi
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails"
exit 0
fi
cd $PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 1
fi
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 1
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
y/yargs/yargs_v12.0.5_ubi8.5.sh
|
Shell
|
apache-2.0
| 2,680 |
#!/bin/bash
# Dump the working environment to a log file (useful for debugging)
env > /var/log/audrey_environment.log
# Add the MySQL node's settings to the Wordpress configuration file
sed -i -e "s/database_name_here/${AUDREY_VAR_http_wp_name}/" /etc/wordpress/wp-config.php
sed -i -e "s/username_here/${AUDREY_VAR_http_wp_user}/" /etc/wordpress/wp-config.php
sed -i -e "s/password_here/${AUDREY_VAR_http_wp_pw}/" /etc/wordpress/wp-config.php
sed -i -e "s/localhost/${AUDREY_VAR_http_mysql_ip}/" /etc/wordpress/wp-config.php
# Let Apache use remote databases (an SELinux permission)
/usr/sbin/setsebool -P httpd_can_network_connect_db 1
# Start the Apache http daemon
/sbin/service httpd start
# Figure out which virtualisation platform we're running on
if [ -f /etc/sysconfig/cloud-info ]
then
source /etc/sysconfig/cloud-info
fi
# Retrieve an IP address people can connect to
if [ "$CLOUD_TYPE" = "ec2" ]
then
# We're running in EC2, so get the public address
HOSTADDRESS=`/usr/bin/facter ec2_public_hostname`
else
# We're not running in EC2, so just grab any ip address
HOSTADDRESS=`/usr/bin/facter ipaddress`
fi
# Run the Wordpress installer, passing all the values it needs
curl -d "weblog_title=AudreyFTW&user_name=admin&admin_password=admin&admin_password2=admin&[email protected]&blog_public=0" "http://${HOSTADDRESS}/wordpress/wp-admin/install.php?step=2" > /var/log/audrey_curl.log
# Print useful info to the Audrey log
echo Wordpress should now be available at http://${HOSTADDRESS}/wordpress
|
aeolusproject/audrey
|
examples/wordpress/wordpress-http.sh
|
Shell
|
apache-2.0
| 1,535 |
function fetch() {
echo "-- fetching webrtc"
gclient config --name src http://webrtc.googlecode.com/svn/trunk/
echo "target_os = ['mac']" >> .gclient
gclient sync
sed -i "" '$d' .gclient
echo "target_os = ['ios', 'mac']" >> .gclient
gclient sync
echo "-- webrtc has been sucessfully fetched"
}
function wrbase() {
export GYP_DEFINES="build_with_libjinglth_chromium=0 libjingle_objc=1"
export GYP_GENERATORS="ninja"
}
function wrios() {
wrbase
export GYP_DEFINES="$GYP_DEFINES OS=ios target_arch=armv7"
export GYP_GENERATOR_FLAGS="$GYP_GENERATOR_FLAGS output_dir=out_ios"
export GYP_CROSSCOMPILE=1
}
function buildios() {
echo "-- building webrtc ios"
pushd src
wrios
gclient runhooks
ninja -v -j 4 -C out_ios/Debug-iphoneos AppRTCDemo
popd
echo "-- webrtc has been sucessfully built"
}
function fail() {
echo "*** webrtc build failed"
exit 1
}
fetch || fail
wrios || fail
buildios || fail
|
limtbk/WebRTC_demoapp
|
makeall-iosdevice.sh
|
Shell
|
apache-2.0
| 897 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.