code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
# UPDATED FOR UBUNTU 18.03
sudo apt update
sudo apt upgrade
# REBOOT THE VM
# INSTALL SOME TOOLS
sudo apt install curl git vim tmux
# OPEN TERMINAL
# ACTION: REMOVE DOCK ITEMS
# ACTION: LOCK TERMINAL TO LAUNCHER
# LOCK SCREEN time in seconds
gsettings set org.gnome.desktop.session idle-delay 100000
gsettings set org.gnome.desktop.interfacer enable-animations false
# CHANGE TIME ZONE
sudo timedatectl set-timezone EST
# update system
sudo apt update
sudo apt upgrade -y
# Download configuration files for vim, tmux and bash
cd ~/Downloads
git clone https://github.com/jpwco/tools_initial_config.git
mkdir ~/.alias ~/.functions
cd tools_initial_config
cp ~/Downloads/tools_initial_config/alias/.alias ~/.alias/.alias
cp ~/Downloads/tools_initial_config/vim/.vimrc ~/.vimrc
cp ~/Downloads/tools_initial_config/tmux/.tmux.conf ~/.tmux.conf
cp ~/Downloads/tools_initial_config//functions/.functions ~/.functions/.functions
echo 'source ~/.alias/.alias' >> ~/.bashrc
echo 'source ~/.functions/.functions' >> ~/.bashrc
# FONTS
cd ~/Downloads/
git clone https://github.com/powerline/fonts.git --depth=1
# install
cd fonts
./install.sh
# clean-up a bit
cd ..
rm -rf fonts
fc-cache -vf ~/.fonts/
# CHANGE TERMINAL FONT
# ZSH
sudo apt install -y zsh
wget https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | zsh
cp ~/Downloads/tools_initial_config/zshrc/.zshrc ~/.zshrc
# ON UBUNTU, CHANGE zsh directory from 'Users' to 'home'
echo 'source ~/.alias/.alias' >> ~/.zshrc
echo 'source ~/.functions/.functions' >> ~/.zshrc
chsh -s $(which zsh)
# Setup Vundle
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
# Install all vim plugins
vim +PluginInstall +qall
# ACK
mkdir ~/bin
curl https://beyondgrep.com/ack-v3.3.1 > ~/bin/ack && chmod 0755 ~/bin/ack
|
jpwco/tools_initial_config
|
setup.sh
|
Shell
|
mit
| 1,822 |
chmod +x bbcp && \
cp bbcp /usr/local/bin/ || exit 1
tar xf nmon16g_x86.tar.gz
chmod +x nmon16g_x86_rhel72 && \
cp nmon16g_x86_rhel72 /usr/local/bin/ && \
rm -f /usr/local/bin/nmon && \
ln -s /usr/local/bin/nmon16g_x86_rhel72 /usr/local/bin/nmon || exit 1
yum --disablerepo=epel,softwarecollections -y install \
screen hdparm dstat sysstat procps \
tcpdump net-tools arpwatch iproute ethtool traceroute iptstate nmap nc \
lsof psacct strace \
*.rpm
|
Zor-X-L/offline-utils
|
centos7-utils/install.sh
|
Shell
|
mit
| 455 |
#!/bin/sh
# You need autoconf 2.5x, preferably 2.57 or later
# You need automake 1.7 or later. 1.6 might work.
set -e
aclocal # -I m4
autoheader
automake --gnu --add-missing --copy
autoconf
|
jgarzik/pgdb
|
autogen.sh
|
Shell
|
mit
| 193 |
GCONF_CONFIG_SOURCE="xml::etc/gconf/gconf.xml.defaults" \
chroot . gconftool-2 --makefile-install-rule \
/etc/gconf/schemas/gksu.schemas \
1>/dev/null 2>/dev/null
if [ -x /usr/bin/update-desktop-database ]; then
/usr/bin/update-desktop-database &> /dev/null
fi
if [ -x /usr/bin/update-mime-database ]; then
/usr/bin/update-mime-database /usr/share/mime &> /dev/null
fi
|
panosmdma/SlackOnly-SlackBuilds
|
libraries/libgksu/doinst.sh
|
Shell
|
mit
| 384 |
#!/bin/bash
sleep 2
./replInit.sh
sleep 2
cmd="mongod --replSet rs0 --storageEngine wiredTiger"
exec $cmd
|
dan335/docker-mongodb-meteor
|
mongo.sh
|
Shell
|
mit
| 107 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2899-1
#
# Security announcement date: 2014-04-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:53 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - openafs:1.6.1-3+deb7u2
#
# Last versions recommanded by security team:
# - openafs:1.6.1-3+deb7u7
#
# CVE List:
# - CVE-2014-0159
# - CVE-2014-2852
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade openafs=1.6.1-3+deb7u7 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2014/DSA-2899-1.sh
|
Shell
|
mit
| 632 |
#!/bin/bash
#ssh -qTfnN -D 1223 mtx@2001:cc0:202c:37:2e0:4cff:fe00:af2a
#ssh -qTfnN -D 1223 maotx@nova_v6 #nova
#ssh -qTfnN -D 1223 mtx@2001:cc0:2020:3002:ba27:ebff:feb7:772c # Pi
ssh -qTfnN -D 1223 maotx@ccg_hp
#ssh -qTfnN -D 1223 mtx@aliyun -p 2223
|
POFK/utilTool
|
src/proxy_ssh.sh
|
Shell
|
mit
| 253 |
#!/usr/bin/env bash
set -e
[ -n "$PYENV_DEBUG" ] && set -x
if [ -z "$PYENV_ROOT" ]; then
export PYENV_ROOT="${HOME}/.pyenv"
fi
colorize() {
if [ -t 1 ]; then printf "\e[%sm%s\e[m" "$1" "$2"
else echo -n "$2"
fi
}
# Checks for `.pyenv` file, and suggests to remove it for installing
if [ -d "${PYENV_ROOT}" ]; then
{ echo
colorize 1 "WARNING"
echo ": Can not proceed with installation. Kindly remove the '${PYENV_ROOT}' directory first."
echo
} >&2
exit 1
fi
failed_checkout() {
echo "Failed to git clone $1"
exit -1
}
checkout() {
[ -d "$2" ] || git clone --depth 1 "$1" "$2" || failed_checkout "$1"
}
if ! command -v git 1>/dev/null 2>&1; then
echo "pyenv: Git is not installed, can't continue." >&2
exit 1
fi
if [ -n "${USE_GIT_URI}" ]; then
GITHUB="git://github.com"
else
GITHUB="https://github.com"
fi
checkout "${GITHUB}/pyenv/pyenv.git" "${PYENV_ROOT}"
checkout "${GITHUB}/pyenv/pyenv-doctor.git" "${PYENV_ROOT}/plugins/pyenv-doctor"
checkout "${GITHUB}/pyenv/pyenv-installer.git" "${PYENV_ROOT}/plugins/pyenv-installer"
checkout "${GITHUB}/pyenv/pyenv-update.git" "${PYENV_ROOT}/plugins/pyenv-update"
checkout "${GITHUB}/pyenv/pyenv-virtualenv.git" "${PYENV_ROOT}/plugins/pyenv-virtualenv"
checkout "${GITHUB}/pyenv/pyenv-which-ext.git" "${PYENV_ROOT}/plugins/pyenv-which-ext"
if ! command -v pyenv 1>/dev/null; then
{ echo
colorize 1 "WARNING"
echo ": seems you still have not added 'pyenv' to the load path."
echo
} >&2
{ # Without args, `init` commands print installation help
"${PYENV_ROOT}/bin/pyenv" init || true
"${PYENV_ROOT}/bin/pyenv" virtualenv-init || true
} >&2
fi
|
allanbreyes/dotfiles
|
python/install.sh
|
Shell
|
mit
| 1,687 |
rm -rf FayeCpp.framework
rm -f *.a
xcodebuild -configuration Release -project fayecpp.xcodeproj -target fayecpp_wolfssl -arch i386 -sdk iphonesimulator OBJROOT=obj SYMROOT=sym clean build
cd obj
find . -type f -iregex '.*\.o$' | xargs -I @ ar -q -v ../all-i386.a @
cd ..
rm -rf obj
rm -rf sym
xcodebuild -configuration Release -project fayecpp.xcodeproj -target fayecpp_wolfssl -arch x86_64 -sdk iphonesimulator OBJROOT=obj SYMROOT=sym clean build
cd obj
find . -type f -iregex '.*\.o$' | xargs -I @ ar -q -v ../all-x86_64.a @
cd ..
rm -rf obj
rm -rf sym
xcodebuild -configuration Release -project fayecpp.xcodeproj -target fayecpp_wolfssl -arch armv7 -sdk iphoneos OBJROOT=obj SYMROOT=sym clean build
cd obj
find . -type f -iregex '.*\.o$' | xargs -I @ ar -q -v ../all-armv7.a @
cd ..
rm -rf obj
rm -rf sym
xcodebuild -configuration Release -project fayecpp.xcodeproj -target fayecpp_wolfssl -arch armv7s -sdk iphoneos OBJROOT=obj SYMROOT=sym clean build
cd obj
find . -type f -iregex '.*\.o$' | xargs -I @ ar -q -v ../all-armv7s.a @
cd ..
rm -rf obj
rm -rf sym
xcodebuild -configuration Release -project fayecpp.xcodeproj -target fayecpp_wolfssl -arch arm64 -sdk iphoneos OBJROOT=obj SYMROOT=sym IPHONEOS_DEPLOYMENT_TARGET=7.0.0 clean build
cd obj
find . -type f -iregex '.*\.o$' | xargs -I @ ar -q -v ../all-arm64.a @
cd ..
rm -rf obj
rm -rf sym
lipo -create all-i386.a all-x86_64.a all-armv7.a all-armv7s.a all-arm64.a -output all.a
rm -f all-i386.a
rm -f all-x86_64.a
rm -f all-armv7.a
rm -f all-armv7s.a
rm -f all-arm64.a
rm -rf FayeCpp.framework
mkdir -p FayeCpp.framework/Versions/A/Headers
cp ../../fayecpp.h FayeCpp.framework/Versions/A/Headers/
cp ../../contrib/objc/FayeCppClient.h FayeCpp.framework/Versions/A/Headers/
mkdir -p FayeCpp.framework/Versions/A/Resources
cp all.a FayeCpp
cp FayeCpp FayeCpp.framework/Versions/A
ln -s A FayeCpp.framework/Versions/Current
ln -s Versions/Current/Headers FayeCpp.framework/Headers
ln -s Versions/Current/Resources FayeCpp.framework/Resources
ln -s Versions/Current/FayeCpp FayeCpp.framework/FayeCpp
rm -f FayeCpp
rm -f all.a
|
OlehKulykov/FayeCpp
|
builds/ios/build_ios_framework_ssl.sh
|
Shell
|
mit
| 2,094 |
#!/bin/sh
echo "Entered"
cp "manifest.yml" "dist"
echo "Exited"
|
ajaykIoT/wcs-chat
|
.build.sh
|
Shell
|
mit
| 63 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2814-1
#
# Security announcement date: 2015-11-18 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:56 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - nvidia-331-updates:340.96-0ubuntu0.12.04.1
# - nvidia-304:304.131-0ubuntu0.12.04.1
# - nvidia-304-updates:304.131-0ubuntu0.12.04.1
# - nvidia-340-updates:340.96-0ubuntu0.12.04.1
# - nvidia-340:340.96-0ubuntu0.12.04.1
#
# Last versions recommanded by security team:
# - nvidia-331-updates:340.96-0ubuntu0.12.04.1
# - nvidia-304:304.131-0ubuntu0.12.04.1
# - nvidia-304-updates:304.131-0ubuntu0.12.04.1
# - nvidia-340-updates:340.96-0ubuntu0.12.04.1
# - nvidia-340:340.96-0ubuntu0.12.04.1
#
# CVE List:
# - CVE-2015-7869
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade nvidia-331-updates=340.96-0ubuntu0.12.04.1 -y
sudo apt-get install --only-upgrade nvidia-304=304.131-0ubuntu0.12.04.1 -y
sudo apt-get install --only-upgrade nvidia-304-updates=304.131-0ubuntu0.12.04.1 -y
sudo apt-get install --only-upgrade nvidia-340-updates=340.96-0ubuntu0.12.04.1 -y
sudo apt-get install --only-upgrade nvidia-340=340.96-0ubuntu0.12.04.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2015/USN-2814-1.sh
|
Shell
|
mit
| 1,351 |
#!/bin/bash
# compile
harp compile
# discard CNAME and .nojekyll
git checkout -- www/CNAME
git checkout -- www/.nojekyll
|
fedosejev/fedosejev.com
|
compile.sh
|
Shell
|
mit
| 122 |
java -classpath .:"/usr/local/MATLAB/MATLAB_Runtime/v901/toolbox/javabuilder/jar/javabuilder.jar":./WaveReq.jar wave \
'test' 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.0 6.0 0 0 550 256 20 0 1 0 0 0 0
|
vicpark/icuc
|
run0.sh
|
Shell
|
mit
| 423 |
#!/bin/bash
zzz=$(($DEPLOYMENT*60))
echo "$SUPERVISOR_PROCESS_NAME: waiting for $zzz seconds before start..."
sleep $zzz
pmgrc=$HOME/.pmgrc.yaml
[[ ! -e $pmgrc ]] && echo "PMG_DUMMY_VAR: dummy" > $pmgrc
exec wait-for-it.sh $JUPYTER_GATEWAY_HOST -q -t 50 -- ddtrace-run gunicorn "mpcontribs.api:create_app()"
|
materialsproject/MPContribs
|
mpcontribs-api/scripts/start.sh
|
Shell
|
mit
| 311 |
#!/bin/bash
PORT=3000
echo "GET /"
curl -H 'Content-Type: application/json' http://localhost:${PORT}
echo ""
echo "GET /articles"
curl -H 'Content-Type: application/json' http://localhost:${PORT}/articles
echo ""
echo "POST /article"
curl -H 'Content-Type: application/json' http://localhost:${PORT}/article -d "{ \"body\":\"This is my new article! $(date)\" }"
echo ""
echo "GET /articles"
curl -H 'Content-Type: application/json' http://localhost:${PORT}/articles
echo ""
|
yusong-shen/comp531-web-development
|
inclass-16/test.sh
|
Shell
|
mit
| 478 |
#!/bin/bash
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo apt-get install -y g++-5
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 90
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 90
elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
# Apply compilation fixes for OS X.
git apply hacks/civetweb_compilation_fix.patch
fi
|
Cryptyc/Sc2LadderServer
|
.travis/install.sh
|
Shell
|
mit
| 377 |
#!/usr/bin/env bash
set -Eeuxo pipefail # https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" # https://stackoverflow.com/a/17744637
docker-compose -p sequelize-mssql-2019 down --remove-orphans
docker-compose -p sequelize-mssql-2019 up -d
./../../wait-until-healthy.sh sequelize-mssql-2019
docker exec sequelize-mssql-2019 \
/opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P "Password12!" -Q "CREATE DATABASE sequelize_test; ALTER DATABASE sequelize_test SET READ_COMMITTED_SNAPSHOT ON;"
DIALECT=mssql node check.js
echo "Local MSSQL-2019 instance is ready for Sequelize tests."
|
sequelize/sequelize
|
dev/mssql/2019/start.sh
|
Shell
|
mit
| 649 |
#!/bin/bash
set -e
RootDir="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
RootDir=$(sed 's@^/\([a-z]\{1\}\)@\1:@' <<< $RootDir)
# OTHER SCRIPTS
source "$RootDir/timing.sh"
# PROJECT DIRECTORIES
SourceDir="$RootDir/src"
LibDir="$RootDir/lib"
IncludeDir="$RootDir/include"
BuildDir="$RootDir/build"
TimerBegin
cmake -H"$SourceDir" -B"$BuildDir" -G"Visual Studio 15 2017 Win64"
cmake --build "$BuildDir" -- -nologo
TimerEnd
$BuildDir/Debug/BlockGame.exe
exit 0
#################################################################################
# The remainder of this script is from before I had a CMake build setup and was #
# building from the command line; it's just here for posterity. #
#################################################################################
# GLOBAL BUILD VARIABLES
DebugMode=1
PlatformEntry="win32_main"
GameEntry="blockgame_main"
# COMPILER FLAGS
function OutputFlags()
{
echo -n " -Fe:$BuildDir/$1 " # Create a .exe file
echo -n " -Fo:$BuildDir/$1 " # Create a .obj file
echo -n " -Fd:$BuildDir/$1 " # Create a .pdb file
}
WarningsFlags=(
"-WX" # Treat warnings as errors
"-W4" # Enable warnings up to 'informational' level
"-wd4100" # Disable 'unreferenced formal parameter'
"-wd4189" # Disable 'unreferenced local variable'
"-wd4505" # Disable 'unreferenced local function'
"-wd4201" # Allow 'nameless struct/union'
"-wd4458" # Allow parameters to elide class members
"-wd4723" # Disable 'potential divide by 0'
)
OptimisationFlags=(
"-fp:fast" # Enable fast floating point (sacrifices accuracy)
"-fp:except-" # Disable floating point exceptions
"-GR-" # Disable runtime type information
"-Oi" # Generate intrinsic functions
"-MTd" # Use a static multithreaded runtime library
"-EHsc" # Disable async exceptions, and "extern C" may throw
)
CompilerDefines=(
"-DDEBUG_MODE=$DebugMode" # Preprocessor #define DEBUG_MODE
"-D_CRT_SECURE_NO_WARNINGS=1"
)
IncludeFlags=(
"-I$IncludeDir"
)
MiscFlags=(
"-nologo" # Disable the MSVC logo
"-Gm-" # Disable minimal rebuild, i.e. force full rebuild
)
DebugFlags=(
"-Zo" # Output enhanced debug info
"-FC" # Output full source file paths
"-Z7" # Produce a .obj with full debug symbols
)
if [ $DebugMode ]; then
DebugFlags+=("-Od") # Disable optimisations
else
DebugFlags+=("-O2") # Enable optimisations
fi
CompilerFlags="${WarningsFlags[@]} ${OptimisationFlags[@]} ${CompilerDefines[@]}
${IncludeFlags[@]} ${MiscFlags[@]} ${DebugFlags[@]}"
# LINKER FLAGS
LinkerOptimisations=(
"-incremental:no" # Disable incremental linking
"-opt:ref" # Remove functions and data that aren't referenced
"-subsystem:windows,5.02" # Create a Windows app (no console), min version 5.1
)
LinkerCommonLibs=(
"$LibDir/glew/glew32s.lib" # GLEW static library
)
LinkerFlags="${LinkerOptimisations[@]} ${LinkerCommonLibs[@]}"
# CLEAN BUILD
if [ "$1" == "--clean" ]; then
echo "Cleaning up..."
rm -f $BuildDir/*.exe
rm -f $BuildDir/*.dll
rm -f $BuildDir/*.obj
rm -f $BuildDir/*.lib
rm -f $BuildDir/*.pdb
rm -f $BuildDir/*.exp
exit 0;
fi
# COMPILATION
TimerBegin
echo "Entering directory $RootDir"
pushd $RootDir > /dev/null
if [ ! -d $BuildDir ]; then
mkdir $BuildDir
fi
echo -e "\nCompiling game DLL..."
find $BuildDir -type f -name "$GameEntry-*.pdb" -exec rm {} \;
cl $CompilerFlags $(OutputFlags $GameEntry) "$SourceDir/blockgame/$GameEntry.cpp" -LD \
-link $LinkerFlags -PDB:"$BuildDir/$GameEntry-$RANDOM.pdb"
echo -e "\nCompiling platform code..."
cl $CompilerFlags $(OutputFlags $PlatformEntry) "$SourceDir/$PlatformEntry.cpp" \
-DGAME_DLL_FILENAME=\"$GameEntry.dll\" \
-link $LinkerFlags "gdi32.lib" "user32.lib" "winmm.lib" "opengl32.lib"
echo -e "\nLeaving directory $RootDir"
popd > /dev/null
TimerEnd
|
georgefrost123/3d_demo
|
build.sh
|
Shell
|
mit
| 4,601 |
#! /bin/bash
##########################################################################
# #
# NETHINKS OpenNMS Docker environment #
# Container Generator Container #
# prestart.sh #
# #
# [email protected] #
# #
##########################################################################
# nothing to do here
|
NETHINKS/opennms-docker-env
|
images/containergenerator/scripts/prestart.sh
|
Shell
|
mit
| 710 |
#!/bin/bash
NAME="ToDoListApp"
BIND_ADDRESS=unix:/home/localuser/comp204p/app.sock
NUM_WORKERS=5
DJANGO_WSGI_MODULE=COMP204P.wsgi
echo "Starting $NAME as `whoami`"
exec gunicorn ${DJANGO_WSGI_MODULE}:application --pythonpath '/home/localuser/comp204p/app' --name $NAME --workers $NUM_WORKERS --bind=$BIND_ADDRESS
|
javaburger/comp204p
|
bin/gunicorn_start.bash
|
Shell
|
mit
| 315 |
#!/bin/bash
# SAMPLE_SIZE --
# SAMPLE_SIZE*2 + THROWAWAYS*2 = total number of HTTP requests
SAMPLE_SIZE=12
THROWAWAYS=1
BASE_URL="http://wavsep.local/wavsep"
MARK_POS=50000
MARK_NEG=5
MWU_APP=./http-mwu
echo "expecting positive SQL injection detection (p < alpha)"
echo "======================================================="
${MWU_APP} \
-throwaways="${THROWAWAYS}" \
-x-url="${BASE_URL}/active/SQL-Injection/SInjection-Detection-Evaluation-GET-200Identical/Case01-InjectionInView-Numeric-Blind-200ValidResponseWithDefaultOnException.jsp?transactionId=1%20and%201%20in%20(select%20BENCHMARK(${MARK_NEG},MD5(CHAR(97)))%20)%20--%20" \
-y-url="${BASE_URL}/active/SQL-Injection/SInjection-Detection-Evaluation-GET-200Identical/Case01-InjectionInView-Numeric-Blind-200ValidResponseWithDefaultOnException.jsp?transactionId=1%20and%201%20in%20(select%20BENCHMARK(${MARK_POS},MD5(CHAR(97)))%20)%20--%20" \
-sample-size=${SAMPLE_SIZE}
echo
echo "expected negative SQL injection detection (p > alpha)"
echo "====================================================="
${MWU_APP} \
-throwaways="${THROWAWAYS}" \
-x-url="${BASE_URL}/active/SQL-Injection/SInjection-Detection-Evaluation-GET-200Identical/Case01-InjectionInView-Numeric-Blind-200ValidResponseWithDefaultOnException.jsp?transactionId=1%20and%201%20in%20(select%20BENCHMARK(${MARK_NEG},MD5(CHAR(97)))%20)%20--%20" \
-y-url="${BASE_URL}/active/SQL-Injection/SInjection-Detection-Evaluation-GET-200Identical/Case01-InjectionInView-Numeric-Blind-200ValidResponseWithDefaultOnException.jsp?transactionId=1%20and%201%20in%20(select%20BENCHMARK(${MARK_NEG},MD5(CHAR(97)))%20)%20--%20" \
-sample-size=${SAMPLE_SIZE}
|
sebcat/http-mwu
|
wavsep-demo.sh
|
Shell
|
mit
| 1,685 |
alias dc="docker-compose"
docker-cleanup() {
docker ps -q -a | xargs docker rm
docker rmi $(docker images | grep "^<none>" | awk '{print $3}')
docker volume rm $(docker volume ls -qf dangling=true)
}
alias dspec="docker-compose run web bundle exec rspec"
|
BrunoAssis/dotfiles
|
docker/aliases.zsh
|
Shell
|
mit
| 269 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3068-1
#
# Security announcement date: 2014-11-07 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:05 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - konversation:1.4-1+deb7u1
# - konversation-data:1.4-1+deb7u1
# - konversation-dbg:1.4-1+deb7u1
#
# Last versions recommanded by security team:
# - konversation:1.4-1+deb7u1
# - konversation-data:1.4-1+deb7u1
# - konversation-dbg:1.4-1+deb7u1
#
# CVE List:
# - CVE-2014-8483
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade konversation=1.4-1+deb7u1 -y
sudo apt-get install --only-upgrade konversation-data=1.4-1+deb7u1 -y
sudo apt-get install --only-upgrade konversation-dbg=1.4-1+deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2014/DSA-3068-1.sh
|
Shell
|
mit
| 906 |
#!/bin/sh
#install.sh
cd /home/pi
sudo apt-get -y install python3-tk fping
cd ./pi-networkDiagnostics/
#cat > launcher.sh
echo "#!/bin/sh" >> launcher.sh
echo "# launcher.sh" >> launcher.sh
echo "cd /" >> launcher.sh
echo "cd home/pi/pi-networkDiagnostics" >> launcher.sh
echo "python3 willrr.py" >> launcher.sh
echo "cd /" >> launcher.sh
sed '3i@/home/pi/pi-networkDiagnostics/launcher.sh' /home/pi/.config/lxsession/LXDE-pi/autostart
echo "Installed"
|
willrr/pi-networkDiagnostics
|
install.sh
|
Shell
|
mit
| 454 |
#! /bin/bash
source /cvmfs/des.opensciencegrid.org/eeups/startupcachejob21i.sh
setup galsim 1.5.1 # Brian's updated build
setup easyaccess 1.2.0+2
setup swarp 2.36.2+3
setup mofpsfex 0.3.2
setup psycopg2 2.4.6+7
setup despyastro 0.3.9+2
setup pil 1.1.7+13
setup fitsverify
setup pathos
setup healpy
setup esutil 0.6.2rc1+1
setup meds 0.9.3rc2
setup pixcorrect 0.5.3+12
setup sextractor 2.23.2+4
setup despydb
setup IntegrationUtils 2.0.9+1
setup ngmix
setup covmatrix 0.9.0+1
export PYTHONPATH=/data/des61.a/data/severett/Balrog-GalSim/balrog:${PYTHONPATH}
export BALROG_BASE=/data/des71.a/data/kuropat/balrog-base
export PYTHONPATH=$PYTHONPATH:/data/des71.a/data/kuropat/balrog-base/lib/python2.7/site-packages/
export DESMEDS_CONFIG_DIR=${BALROG_BASE}/desmeds-config/
export MEDS_DATA=/data/des71.a/data/kuropat/meds_test
export DESDATA=${BALROG_BASE}/DESDATA
export PYTHONPATH=${BALROG_BASE}/mof/ngmixer/y3v0.9.4a+1/python:$PYTHONPATH
export PATH=${BALROG_BASE}/mof/ngmixer/y3v0.9.4a+1/bin:$PATH
export PATH=${BALROG_BASE}/bin:$PATH
export medsconf="y3v02"
|
sweverett/Balrog-GalSim
|
setup_balrog.sh
|
Shell
|
mit
| 1,061 |
#!/bin/sh
EXPECTED_SIGNATURE="$(wget -q -O - https://composer.github.io/installer.sig)"
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_SIGNATURE="$(php -r "echo hash_file('sha384', 'composer-setup.php');")"
echo $EXPECTED_SIGNATURE
echo "\n"
echo $ACTUAL_SIGNATURE
if [ "$EXPECTED_SIGNATURE" != "$ACTUAL_SIGNATURE" ]
then
>&2 echo 'ERROR: Invalid installer signature'
rm composer-setup.php
exit 1
fi
php composer-setup.php --quiet --install-dir=/usr/local/bin --filename=composer
RESULT=$?
rm composer-setup.php
exit $RESULT
|
B0ulzy/BoulzyManagerBundle
|
docker/tools/composer-install.sh
|
Shell
|
mit
| 575 |
#!/bin/bash
source ../Utilities/CondorFunctions.sh
for volid in `cat volids.txt`
do
# initializing condor variables
export studyDir=/path/to/fod/and/atlasindwi/and/5TTdata
export mrtrix3=/path/to/mrtrix3StaticCompile/mrtrix3
export job=${volid}_connectograph
export executable=$(pwd)/Connectograph_Gordon_NonLocalCondor.sh
export args=$volid
export numCPUs="1"
export RAM="16 Gb"
export disk="500 Gb"
export initialDir=NonLocalCondorLogs_Gordon_Connectograph
mkdir -p $initialDir
export transferInputFiles="$studyDir/GordonReg_${volid}/${volid}_Gordon_regions_in_DWI.nii.gz,$studyDir/wmfod_${volid}_norm.mif,$studyDir/${volid}_mprageInDWI_5TT.mif,$mrtrix3"
export transferOutputFiles="mu_Gordon_${volid}.txt,connectome_Gordon_${volid}.csv,meanlength_Gordon_${volid}.csv,exemplars_${volid}.tck,nodes_${volid}_smooth.obj"
NonLocalCondorEcho
done
|
nadluru/NeuroImgMatlabCondor
|
ConnectographyExample/NonLocalCondorCall.sh
|
Shell
|
mit
| 846 |
#!/bin/sh -e
prepend_path ~/repo/tup
|
ghub/etc
|
profile.d/50_tup.sh
|
Shell
|
mit
| 38 |
#!/usr/bin/env bash
source bin/log.sh
function ditto_or_exit {
xcrun ditto "${1}" "${2}"
if [ "$?" != 0 ]; then
error "Could not copy:"
error " source: ${1}"
error " target: ${2}"
if [ ! -e "${1}" ]; then
error "The source file does not exist"
error "Did a previous xcodebuild step fail?"
fi
error "Exiting 1"
exit 1
fi
}
function install_with_ditto {
ditto_or_exit "${1}" "${2}"
info "Installed ${2}"
}
function ditto_to_zip {
xcrun ditto \
-ck --rsrc --sequesterRsrc --keepParent \
"${1}" \
"${2}"
}
|
calabash/ios-smoke-test-app
|
CalSmokeApp/bin/ditto.sh
|
Shell
|
mit
| 566 |
# Requirements:
# Create all confile file you want "conf-*.json"
# Default: the script take all confs, or you can give a conf file name in params
# install jq:
# * sudo apt-get install jq
# install pipsi and pew and then run a pew command to init it:
# * sudo pip install pipsi
# * sudo pipsi install pew
# * pew ls
# Vars to set:
packagePrefix="hj" # TODO use package_prefix of the workspace conf
# We get the conf file:
if [ -z "$1" ]
then
allConfs=$(find ./ -name "conf-*.json" -print0 | xargs -0 ls)
else
allConfs=$1
fi
# For all conf file:
for confName in $allConfs
do
# Getting the current dir:
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Getting the conf path:
confFile=$DIR"/$confName"
# Getting the server address in the conf:
function getJson()
{
data=$(jq ."$1" $2)
data=${data#"\""}
data=${data%"\""}
echo $data
}
# Getting all addresses (addresses are separated by a space):
addresses=($(jq -r '.address' $confFile))
# Getting the venv name in the conf:
venv=$(getJson "venv" $confFile)
# Getting the user name in the conf:
user=$(getJson "user" $confFile)
# Getting the project name in the conf:
project=$(getJson "project" $confFile)
# Getting the path name in the conf:
path=$(getJson "path" $confFile)
# Getting the path name in the conf:
pythonPath=$(getJson "pythonPath" $confFile)
# Getting the port in the conf:
port=$(getJson "port" $confFile)
# Getting the removeTargetDist in the conf:
removeTargetDist=$(getJson "removeTargetDist" $confFile)
# We delete the target dist package because we don't want to install it (just all dependencies):
if [ "$removeTargetDist" == "null" ] || [ $removeTargetDist == true ]
then
# Warning, if your target package doesn't match this pattern, you have to make your own:
targetDistPattern="*"$(echo "$project" | tr '[:upper:]' '[:lower:]')"-*.tar.gz"
rm $DIR/$targetDistPattern 2> /dev/null
fi
# For each address:
for var in "${addresses[@]}"
do
# Getting the current address:
address="${var}"
# We first check if the host is reachable:
status=$(ssh -o BatchMode=yes -o ConnectTimeout=10 -p "$port" "$user"@"$address" echo "_CONNEXION_SUCCESS_" 2>&1)
if [[ $status = *"_CONNEXION_SUCCESS_"* ]]
then
echo "==> The host $address is reachable <=="
# We set the default path:
if [ "$path" == "null" ]
then
path="/home/"$user
fi
# Echo infos:
echo "rsyncing dists at "$address" in "$venv
# Create the directory:
wmDistTmp=$path"/wm-dist-tmp/"$project
ssh -p $port "$user"@$address mkdir -p $wmDistTmp
# We delete all tar.gz:
ssh -p $port "$user"@$address rm $wmDistTmp/*-*.tar.gz
# Rsync all:
rsync -e "ssh -p $port" -a $DIR/* $user@$address:$wmDistTmp
# Check whether workspacemanager is installed:
# regex='workspacemanager'
# sshResult=$(ssh -p $port "$user"@$address 'pip freeze')
# if ! [[ $sshResult =~ $regex ]]
# then
# echo "Installing workspacemanager on the remote server..."
# ssh -p $port -t $user@$address 'sudo pip install workspacemanager'
# fi
# Check wheteher the venv exists:
sshResult=$(ssh -p $port "$user"@$address 'pew ls')
if ! [[ $sshResult =~ $venv ]]
then
echo "Creating the venv..."
if [ "$pythonPath" == "null" ]
then
ssh -p $port $user@$address "pew new -d $venv"
else
ssh -p $port $user@$address "pew new -p $pythonPath -d $venv"
fi
fi
# Install all files:
for current in $DIR/*.gz; do
bName=$(basename $current)
current=$wmDistTmp"/"$bName
# Pew is not found by ssh, so we need the full path, you can edit this line if pew is at an other place
package=$(echo $bName | perl -nle 'm/(.*)-(?:\d+.)+\d+.tar.gz/; print $1')
echo "==> Uninstalling $package... <=="
uninstallResult=$(ssh -p $port $user@$address "/usr/bin/yes | pew in $venv pip uninstall $package" 2>&1)
if [[ $uninstallResult = *"Cannot uninstall"* ]]; then
ssh -p $port $user@$address "/usr/bin/yes | pew in $venv pip uninstall $packagePrefix$package"
fi
ssh -p $port $user@$address "pew in $venv pip install $current"
done
else
echo "==> The host $address is unreachable <=="
fi
done # End for each address
done
|
hayj/WorkspaceManager
|
workspacemanager/dist-templates/rsync-all.sh
|
Shell
|
mit
| 4,914 |
#!/bin/sh
npm install
../bin/doker ../doc/doc
|
dokerjs/doker
|
test/doc.sh
|
Shell
|
mit
| 46 |
#!/usr/bin/env bash
source "$( dirname "${BASH_SOURCE[0]}" )/../harness.sh"
function creating_a_session_with_a_tmux_file() {
run_mx good_conf
expect_invocation_to_have_argument new-session -s wahoo
expect_invocation_to_have_argument new-session -n first
expect_invocation_to_have_argument new-session -c "$PROJECTS/good_conf"
expect_invocation_to_have_argument new-window -n second
expect_invocation_to_have_argument "'send-keys' '-t' 'wahoo:1'" 'echo Wahoo!' C-m
expect_invocation_to_have_argument "'send-keys' '-t' 'wahoo:2'" 'echo Heya.' C-m
expect_successful_run
}
tap_test 7 creating_a_session_with_a_tmux_file
|
demands/mx
|
test/specs/creating_a_session_with_a_tmux_file.sh
|
Shell
|
mit
| 638 |
#!/usr/bin/awk -f
# Authors: @esperlu, @artemyk, @gkuenning, @dumblob
# FIXME detect empty input file and issue a warning
function printerr( s ){ print s | "cat >&2" }
BEGIN {
if( ARGC != 2 ){
printerr( \
"USAGE:\n"\
" mysql2sqlite dump_mysql.sql > dump_sqlite3.sql\n" \
" OR\n" \
" mysql2sqlite dump_mysql.sql | sqlite3 sqlite.db\n" \
"\n" \
"NOTES:\n" \
" Dash in filename is not supported, because dash (-) means stdin." )
no_END = 1
exit 1
}
# Find INT_MAX supported by both this AWK (usually an ISO C signed int)
# and SQlite.
# On non-8bit-based architectures, the additional bits are safely ignored.
# 8bit (lower precision should not exist)
s="127"
# "63" + 0 avoids potential parser misbehavior
if( (s + 0) "" == s ){ INT_MAX_HALF = "63" + 0 }
# 16bit
s="32767"
if( (s + 0) "" == s ){ INT_MAX_HALF = "16383" + 0 }
# 32bit
s="2147483647"
if( (s + 0) "" == s ){ INT_MAX_HALF = "1073741823" + 0 }
# 64bit (as INTEGER in SQlite3)
s="9223372036854775807"
if( (s + 0) "" == s ){ INT_MAX_HALF = "4611686018427387904" + 0 }
# # 128bit
# s="170141183460469231731687303715884105728"
# if( (s + 0) "" == s ){ INT_MAX_HALF = "85070591730234615865843651857942052864" + 0 }
# # 256bit
# s="57896044618658097711785492504343953926634992332820282019728792003956564819968"
# if( (s + 0) "" == s ){ INT_MAX_HALF = "28948022309329048855892746252171976963317496166410141009864396001978282409984" + 0 }
# # 512bit
# s="6703903964971298549787012499102923063739682910296196688861780721860882015036773488400937149083451713845015929093243025426876941405973284973216824503042048"
# if( (s + 0) "" == s ){ INT_MAX_HALF = "3351951982485649274893506249551461531869841455148098344430890360930441007518386744200468574541725856922507964546621512713438470702986642486608412251521024" + 0 }
# # 1024bit
# s="89884656743115795386465259539451236680898848947115328636715040578866337902750481566354238661203768010560056939935696678829394884407208311246423715319737062188883946712432742638151109800623047059726541476042502884419075341171231440736956555270413618581675255342293149119973622969239858152417678164812112068608"
# if( (s + 0) "" == s ){ INT_MAX_HALF = "44942328371557897693232629769725618340449424473557664318357520289433168951375240783177119330601884005280028469967848339414697442203604155623211857659868531094441973356216371319075554900311523529863270738021251442209537670585615720368478277635206809290837627671146574559986811484619929076208839082406056034304" + 0 }
# # higher precision probably not needed
FS=",$"
print "PRAGMA synchronous = OFF;"
print "PRAGMA journal_mode = MEMORY;"
print "BEGIN TRANSACTION;"
}
# historically 3 spaces separate non-argument local variables
function bit_to_int( str_bit, powtwo, i, res, bit, overflow ){
powtwo = 1
overflow = 0
# 011101 = 1*2^0 + 0*2^1 + 1*2^2 ...
for( i = length( str_bit ); i > 0; --i ){
bit = substr( str_bit, i, 1 )
if( overflow || ( bit == 1 && res > INT_MAX_HALF ) ){
printerr( \
NR ": WARN Bit field overflow, number truncated (LSBs saved, MSBs ignored)." )
break
}
res = res + bit * powtwo
# no warning here as it might be the last iteration
if( powtwo > INT_MAX_HALF ){ overflow = 1; continue }
powtwo = powtwo * 2
}
return res
}
# CREATE TRIGGER statements have funny commenting. Remember we are in trigger.
/^\/\*.*(CREATE.*TRIGGER|create.*trigger)/ {
gsub( /^.*(TRIGGER|trigger)/, "CREATE TRIGGER" )
print
inTrigger = 1
next
}
# The end of CREATE TRIGGER has a stray comment terminator
/(END|end) \*\/;;/ { gsub( /\*\//, "" ); print; inTrigger = 0; next }
# The rest of triggers just get passed through
inTrigger != 0 { print; next }
# CREATE VIEW looks like a TABLE in comments
/^\/\*.*(CREATE.*TABLE|create.*table)/ {
inView = 1
next
}
# end of CREATE VIEW
/^(\).*(ENGINE|engine).*\*\/;)/ {
inView = 0
next
}
# content of CREATE VIEW
inView != 0 { next }
# skip comments
/^\/\*/ { next }
# skip PARTITION statements
/^ *[(]?(PARTITION|partition) +[^ ]+/ { next }
# print all INSERT lines
( /^ *\(/ && /\) *[,;] *$/ ) || /^(INSERT|insert|REPLACE|replace)/ {
prev = ""
# first replace \\ by \_ that mysqldump never generates to deal with
# sequnces like \\n that should be translated into \n, not \<LF>.
# After we convert all escapes we replace \_ by backslashes.
gsub( /\\\\/, "\\_" )
# single quotes are escaped by another single quote
gsub( /\\'/, "''" )
gsub( /\\n/, "\n" )
gsub( /\\r/, "\r" )
gsub( /\\"/, "\"" )
gsub( /\\\032/, "\032" ) # substitute char
gsub( /\\_/, "\\" )
# sqlite3 is limited to 16 significant digits of precision
while( match( $0, /0x[0-9a-fA-F]{17}/ ) ){
hexIssue = 1
sub( /0x[0-9a-fA-F]+/, substr( $0, RSTART, RLENGTH-1 ), $0 )
}
if( hexIssue ){
printerr( \
NR ": WARN Hex number trimmed (length longer than 16 chars)." )
hexIssue = 0
}
print
next
}
# CREATE DATABASE is not supported
/^(CREATE DATABASE|create database)/ { next }
# print the CREATE line as is and capture the table name
/^(CREATE|create)/ {
if( $0 ~ /IF NOT EXISTS|if not exists/ || $0 ~ /TEMPORARY|temporary/ ){
caseIssue = 1
printerr( \
NR ": WARN Potential case sensitivity issues with table/column naming\n" \
" (see INFO at the end)." )
}
if( match( $0, /`[^`]+/ ) ){
tableName = substr( $0, RSTART+1, RLENGTH-1 )
}
aInc = 0
prev = ""
firstInTable = 1
print
next
}
# Replace `FULLTEXT KEY` (probably other `XXXXX KEY`)
/^ (FULLTEXT KEY|fulltext key)/ { gsub( /[A-Za-z ]+(KEY|key)/, " KEY" ) }
# Get rid of field lengths in KEY lines
/ (PRIMARY |primary )?(KEY|key)/ { gsub( /\([0-9]+\)/, "" ) }
aInc == 1 && /PRIMARY KEY|primary key/ { next }
# Replace COLLATE xxx_xxxx_xx statements with COLLATE BINARY
/ (COLLATE|collate) [a-z0-9_]*/ { gsub( /(COLLATE|collate) [a-z0-9_]*/, "COLLATE BINARY" ) }
# Print all fields definition lines except the `KEY` lines.
/^ / && !/^( (KEY|key)|\);)/ {
if( match( $0, /[^"`]AUTO_INCREMENT|auto_increment[^"`]/) ){
aInc = 1
gsub( /AUTO_INCREMENT|auto_increment/, "PRIMARY KEY AUTOINCREMENT" )
}
gsub( /(UNIQUE KEY|unique key) (`.*`|".*") /, "UNIQUE " )
gsub( /(CHARACTER SET|character set) [^ ]+[ ,]/, "" )
# FIXME
# CREATE TRIGGER [UpdateLastTime]
# AFTER UPDATE
# ON Package
# FOR EACH ROW
# BEGIN
# UPDATE Package SET LastUpdate = CURRENT_TIMESTAMP WHERE ActionId = old.ActionId;
# END
gsub( /(ON|on) (UPDATE|update) (CURRENT_TIMESTAMP|current_timestamp)(\(\))?/, "" )
gsub( /(DEFAULT|default) (CURRENT_TIMESTAMP|current_timestamp)(\(\))?/, "DEFAULT current_timestamp")
gsub( /(COLLATE|collate) [^ ]+ /, "" )
gsub( /(ENUM|enum)[^)]+\)/, "text " )
gsub( /(SET|set)\([^)]+\)/, "text " )
gsub( /UNSIGNED|unsigned/, "" )
gsub( /_utf8mb3/, "" )
gsub( /` [^ ]*(INT|int|BIT|bit)[^ ]*/, "` integer" )
gsub( /" [^ ]*(INT|int|BIT|bit)[^ ]*/, "\" integer" )
ere_bit_field = "[bB]'[10]+'"
if( match($0, ere_bit_field) ){
sub( ere_bit_field, bit_to_int( substr( $0, RSTART +2, RLENGTH -2 -1 ) ) )
}
# remove USING BTREE and other suffixes for USING, for example: "UNIQUE KEY
# `hostname_domain` (`hostname`,`domain`) USING BTREE,"
gsub( / USING [^, ]+/, "" )
# field comments are not supported
gsub( / (COMMENT|comment).+$/, "" )
# Get commas off end of line
gsub( /,.?$/, "" )
if( prev ){
if( firstInTable ){
print prev
firstInTable = 0
}
else {
print "," prev
}
}
else {
# FIXME check if this is correct in all cases
if( match( $1,
/(CONSTRAINT|constraint) ["].*["] (FOREIGN KEY|foreign key)/ ) ){
print ","
}
}
prev = $1
}
/ ENGINE| engine/ {
if( prev ){
if( firstInTable ){
print prev
firstInTable = 0
}
else {
print "," prev
}
}
prev=""
print ");"
next
}
# `KEY` lines are extracted from the `CREATE` block and stored in array for later print
# in a separate `CREATE KEY` command. The index name is prefixed by the table name to
# avoid a sqlite error for duplicate index name.
/^( (KEY|key)|\);)/ {
if( prev ){
if( firstInTable ){
print prev
firstInTable = 0
}
else {
print "," prev
}
}
prev = ""
if( $0 == ");" ){
print
}
else {
if( match( $0, /`[^`]+/ ) ){
indexName = substr( $0, RSTART+1, RLENGTH-1 )
}
if( match( $0, /\([^()]+/ ) ){
indexKey = substr( $0, RSTART+1, RLENGTH-1 )
}
# idx_ prefix to avoid name clashes (they really happen!)
key[tableName] = key[tableName] "CREATE INDEX \"idx_" \
tableName "_" indexName "\" ON \"" tableName "\" (" indexKey ");\n"
}
}
END {
if( no_END ){ exit 1}
# print all KEY creation lines.
for( table in key ){ printf key[table] }
print "END TRANSACTION;"
if( caseIssue ){
printerr( \
"INFO Pure sqlite identifiers are case insensitive (even if quoted\n" \
" or if ASCII) and doesnt cross-check TABLE and TEMPORARY TABLE\n" \
" identifiers. Thus expect errors like \"table T has no column named F\".")
}
}
|
Hucaru/Valhalla
|
generateDrops/mysql2sqlite.sh
|
Shell
|
mit
| 9,229 |
#!/bin/bash
for fn in `find data/json/raw -type f`; do cat $fn | jq .text | wc -w; done
|
sunlightlabs/fcc-net-neutrality-comments
|
scripts/count_words.sh
|
Shell
|
mit
| 89 |
#!/bin/bash
# Display IP address
ip -f inet addr show eth0
cd /data/arma3
sudo su -c "LD_LIBRARY_PATH=$^C_LIBRARY_PATH:/usr/lib/jvm/java-7-openjdk-i386/jre/lib/i386/jamvm ./arma3server -netlog" arma
|
orlandov/arma3-dedicated-linux-server-setup
|
bin/start.sh
|
Shell
|
mit
| 202 |
#!/bin/bash
source /var/www/dumteedum/env/bin/activate
cd /var/www/dumteedum
python agent_fix_all_twitter_avatars.py
|
CornerstoneLabs/twittermap
|
search/check_avatars.sh
|
Shell
|
mit
| 118 |
#!/bin/bash
if type fortune > /dev/null 2>&1; then
#display short fortune if personal dictionary not exists
#place your dictionaries here or change dir path
FILE=$HOME/sources/dict/fortunes
[ -r "$FILE" ] && ARG="$FILE" || ARG="-s"
#use tux figure if cowsay exists
fortune "$ARG" | ( command -v cowsay > /dev/null 2>&1 && cowsay -f tux || cat )
fi
|
iiey/dotfiles
|
conky/fortune.sh
|
Shell
|
mit
| 372 |
#!/bin/sh
useradd --disabled-password --gecos "$1" $1
|
joelhelbling/anjou
|
scripts/create-user.sh
|
Shell
|
mit
| 55 |
#!/bin/sh
objdump -D -b binary -m i386:x86-64 shellcode_linux_x86-64.bin
|
j123123/my-shellcodes
|
x86-64_GNU_Linux/disasm_hex.sh
|
Shell
|
cc0-1.0
| 73 |
# Find the ARM GCC toolchain directory
locate arm-none-eabi-gcc
# e.g. @ /home/imr/ti/ccsv6/tools/compiler/gcc-arm-none-eabi-4_8-2014q3/arm-none-eabi/
# Location of assembler (as), linker(ld), etc.
/usr/arm-linux-gnueabi/bin
# GDB
# basix @ http://www.thegeekstuff.com/2010/03/debug-c-program-using-gdb/
# 1. Call a program with $ "gdb /path/to/program.c"
# 2. Set breakpoints
# 3. Run program with command line arguments w/ $ "run -m /path/to/model.bsm" etc
# step into
s
# next
n
# set breakpoint
b filename.ext:line#
# delete breakpoint, cf. http://www.delorie.com/gnu/docs/gdb/gdb_32.html
clear filename.ext:line#
# print variable value
p VAR
# examine contents of address
x 0x7FFFFFF
# show current breakpoints
info breakpoints
# Segmentation fault diagnosis
valgrind <cmd_that_segfaults>
valgrind --leak-check=full <cmd_that_segfaults>
# Heap memory usage
valgrind -massif
|
yunque/thinkingaboutthinking
|
tips/dbg.sh
|
Shell
|
cc0-1.0
| 896 |
#! /bin/sh
# Copyright (C) 2006-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Make sure different subpackages may share files and directories.
. test-init.sh
cat >Makefile.am <<'END'
SUBDIRS = subpkg
EXTRA_DIST = subpkg/foobar
END
cat >>configure.ac <<'END'
AC_CONFIG_SUBDIRS([subpkg])
AC_OUTPUT
END
mkdir subpkg
echo foobar >subpkg/foobar
echo SUBDIRS = >subpkg/Makefile.am
cat >subpkg/configure.ac <<'END'
AC_INIT([subpkg], [1.0])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
END
$ACLOCAL
$AUTOMAKE
$AUTOCONF
cd subpkg
$ACLOCAL
$AUTOMAKE
$AUTOCONF
cd ..
./configure
$MAKE distcheck
|
pylam/automake
|
t/subpkg3.sh
|
Shell
|
gpl-2.0
| 1,219 |
#!/bin/sh
#
# dcmd: expand file lists of .dsc/.changes files in the command line
#
# Copyright (C) 2008 Romain Francoise <[email protected]>
# Copyright (C) 2008 Christoph Berg <[email protected]>
# Copyright (C) 2008 Adam D. Barratt <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Usage:
#
# dcmd replaces any reference to a .dsc or .changes file in the command
# line with the list of files in its 'Files' section, plus the
# .dsc/.changes file itself.
#
# $ dcmd sha1sum rcs_5.7-23_amd64.changes
# f61254e2b61e483c0de2fc163321399bbbeb43f1 rcs_5.7-23.dsc
# 7a2b283b4c505d8272a756b230486a9232376771 rcs_5.7-23.diff.gz
# e3bac970a57a6b0b41c28c615f2919c931a6cb68 rcs_5.7-23_amd64.deb
# c531310b18773d943249cfaa8b539a9b6e14b8f4 rcs_5.7-23_amd64.changes
# $
PROGNAME=`basename $0`
version () {
echo \
"This is $PROGNAME, from the Debian devscripts package, version ###VERSION###
This code is copyright 2008 by Romain Francoise, all rights reserved.
This program comes with ABSOLUTELY NO WARRANTY.
You are free to redistribute this code under the terms of the
GNU General Public License, version 2 or later."
}
usage()
{
printf "Usage: %s [options] [command] [dsc or changes file] [...]\n" $PROGNAME
}
endswith()
{
case $1 in
*$2) return 0 ;;
*) return 1;;
esac
}
# Instead of parsing the file completely as the previous Python
# implementation did (using python-debian), let's just select lines
# that look like they might be part of the file list.
RE="^ [0-9a-f]{32} [0-9]+ ((([a-zA-Z0-9_.-]+/)?[a-zA-Z0-9_.-]+|-) ([a-zA-Z]+|-) )?(.*)$"
maybe_expand()
{
local dir
local sedre
if [ -e "$1" ] && (endswith "$1" .changes || endswith "$1" .dsc); then
# Need to escape whatever separator is being used in sed expression so
# it doesn't prematurely end the s command
dir=$(dirname "$1" | sed 's/,/\\,/g')
if [ "$(echo "$1" | cut -b1-2)" != "./" ]; then
sedre="\."
fi
sed --regexp-extended -n "s,$RE,$dir/\5,p" <"$1" | sed "s,^$sedre/,,"
fi
}
DSC=1; BCHANGES=1; SCHANGES=1; ARCHDEB=1; INDEPDEB=1; TARBALL=1; DIFF=1
CHANGES=1; DEB=1; ARCHUDEB=1; INDEPUDEB=1; UDEB=1;
FILTERED=0; FAIL_MISSING=1
EXTRACT_PACKAGE_NAME=0
SORT=0
TAC=0
while [ $# -gt 0 ]; do
TYPE=""
case "$1" in
--version|-v) version; exit 0;;
--help|-h) usage; exit 0;;
--no-fail-on-missing|-r) FAIL_MISSING=0;;
--fail-on-missing) FAIL_MISSING=1;;
--package|-p) EXTRACT_PACKAGE_NAME=1;;
--sort|-s) SORT=1;;
--tac|-t) TAC=1;;
--) shift; break;;
--no-*)
TYPE=${1#--no-}
case "$FILTERED" in
1) echo "$PROGNAME: Can't combine --foo and --no-foo options" >&2;
exit 1;;
0) FILTERED=-1;;
esac;;
--**)
TYPE=${1#--}
case "$FILTERED" in
-1) echo "$PROGNAME: Can't combine --foo and --no-foo options" >&2;
exit 1;;
0) FILTERED=1; DSC=0; BCHANGES=0; SCHANGES=0; CHANGES=0
ARCHDEB=0; INDEPDEB=0; DEB=0; ARCHUDEB=0; INDEPUDEB=0
UDEB=0; TARBALL=0; DIFF=0;;
esac;;
*) break;;
esac
case "$TYPE" in
"") ;;
dsc) [ "$FILTERED" = "1" ] && DSC=1 || DSC=0;;
changes) [ "$FILTERED" = "1" ] &&
{ BCHANGES=1; SCHANGES=1; CHANGES=1; } ||
{ BCHANGES=0; SCHANGES=0; CHANGES=0; } ;;
bchanges) [ "$FILTERED" = "1" ] && BCHANGES=1 || BCHANGES=0;;
schanges) [ "$FILTERED" = "1" ] && SCHANGES=1 || SCHANGES=1;;
deb) [ "$FILTERED" = "1" ] &&
{ ARCHDEB=1; INDEPDEB=1; DEB=1; } ||
{ ARCHDEB=0; INDEPDEB=0; DEB=0; };;
archdeb) [ "$FILTERED" = "1" ] && ARCHDEB=1 || ARCHDEB=0;;
indepdeb) [ "$FILTERED" = "1" ] && INDEPDEB=1 || INDEPDEB=0;;
udeb) [ "$FILTERED" = "1" ] &&
{ ARCHUDEB=1; INDEPUDEB=1; UDEB=1; } ||
{ ARCHUDEB=0; INDEPUDEB=0; UDEB=0; };;
archudeb) [ "$FILTERED" = "1" ] && ARCHUDEB=1 || ARCHUDEB=0;;
indepudeb) [ "$FILTERED" = "1" ] && INDEPUDEB=1 || INDEPUDEB=0;;
tar|orig) [ "$FILTERED" = "1" ] && TARBALL=1 || TARBALL=0;;
diff|debtar) [ "$FILTERED" = "1" ] && DIFF=1 || DIFF=0;;
*) echo "$PROGNAME: Unknown option '$1'" >&2; exit 1;;
esac
shift
done
cmd=
args=""
while [ $# -gt 0 ]; do
arg="$1"
shift
temparg="$(maybe_expand "$arg")"
if [ -z "$temparg" ]; then
if [ -z "$cmd" ]; then
cmd="$arg"
continue
fi
# Not expanded, so simply add to argument list
args="$args
$arg"
else
SEEN_INDEPDEB=0; SEEN_ARCHDEB=0; SEEN_SCHANGES=0; SEEN_BCHANGES=0
SEEN_INDEPUDEB=0; SEEN_ARCHUDEB=0; SEEN_UDEB=0;
SEEN_TARBALL=0; SEEN_DIFF=0; SEEN_DSC=0
MISSING=0
newarg=""
# Output those items from the expanded list which were
# requested, and record which files are contained in the list
eval "$(echo "$temparg" | while read THISARG; do
if [ -z "$THISARG" ]; then
# Skip
:
elif endswith "$THISARG" _all.deb; then
[ "$INDEPDEB" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_INDEPDEB=1;"
elif endswith "$THISARG" .deb; then
[ "$ARCHDEB" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_ARCHDEB=1;"
elif endswith "$THISARG" _all.udeb; then
[ "$INDEPUDEB" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_INDEPUDEB=1;"
elif endswith "$THISARG" .udeb; then
[ "$ARCHUDEB" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_ARCHUDEB=1;"
elif endswith "$THISARG" .debian.tar.gz || \
endswith "$THISARG" .debian.tar.xz || \
endswith "$THISARG" .debian.tar.bz2; then
[ "$DIFF" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_DIFF=1;"
elif endswith "$THISARG" .tar.gz || \
endswith "$THISARG" .tar.xz || \
endswith "$THISARG" .tar.lzma || \
endswith "$THISARG" .tar.bz2; then
[ "$TARBALL" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_TARBALL=1;"
elif endswith "$THISARG" _source.changes; then
[ "$SCHANGES" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_SCHANGES=1;"
elif endswith "$THISARG" .changes; then
[ "$BCHANGES" = "0" ] || echo "newarg\"\$newarg
$THISARG\";"
echo "SEEN_BCHANGES=1;"
elif endswith "$THISARG" .dsc; then
[ "$DSC" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_DSC=1;"
elif endswith "$THISARG" .diff.gz; then
[ "$DIFF" = "0" ] || echo "newarg=\"\$newarg
$THISARG\";"
echo "SEEN_DIFF=1;"
elif [ "$FILTERED" != "1" ]; then
# What is it? Output anyway
echo "newarg=\"\$newarg
$THISARG\";"
fi
done)"
INCLUDEARG=1
if endswith "$arg" _source.changes; then
[ "$SCHANGES" = "1" ] || INCLUDEARG=0
SEEN_SCHANGES=1
elif endswith "$arg" .changes; then
[ "$BCHANGES" = "1" ] || INCLUDEARG=0
SEEN_BCHANGES=1
elif endswith "$arg" .dsc; then
[ "$DSC" = "1" ] || INCLUDEARG=0
SEEN_DSC=1
fi
if [ "$FAIL_MISSING" = "1" ] && [ "$FILTERED" = "1" ]; then
if [ "$CHANGES" = "1" ]; then
if [ "$SEEN_SCHANGES" = "0" ] && [ "$SEEN_BCHANGES" = "0" ]; then
MISSING=1; echo "$arg: .changes fiie not found" >&2
fi
else
if [ "$SCHANGES" = "1" ] && [ "$SEEN_SCHANGES" = "0" ]; then
MISSING=1; echo "$arg: source .changes file not found" >&2
fi
if [ "$BCHANGES" = "1" ] && [ "$SEEN_BCHANGES" = "0" ]; then
MISSING=1; echo "$arg: binary .changes file not found" >&2
fi
fi
if [ "$DEB" = "1" ]; then
if [ "$SEEN_INDEPDEB" = "0" ] && [ "$SEEN_ARCHDEB" = "0" ]; then
MISSING=1; echo "$arg: binary packages not found" >&2
fi
else
if [ "$INDEPDEB" = "1" ] && [ "$SEEN_INDEPDEB" = "0" ]; then
MISSING=1; echo "$arg: arch-indep packages not found" >&2
fi
if [ "$ARCHDEB" = "1" ] && [ "$SEEN_ARCHDEB" = "0" ]; then
MISSING=1; echo "$arg: arch-dep packages not found" >&2
fi
fi
if [ "$UDEB" = "1" ]; then
if [ "$SEEN_INDEPUDEB" = "0" ] && [ "$SEEN_ARCHUDEB" = "0" ]; then
MISSING=1; echo "$arg: udeb packages not found" >&2
fi
else
if [ "$INDEPUDEB" = "1" ] && [ "$SEEN_INDEPUDEB" = "0" ]; then
MISSING=1; echo "$arg: arch-indep udeb packages not found" >&2
fi
if [ "$ARCHUDEB" = "1" ] && [ "$SEEN_ARCHUDEB" = "0" ]; then
MISSING=1; echo "$arg: arch-dep udeb packages not found" >&2
fi
fi
if [ "$DSC" = "1" ] && [ "$SEEN_DSC" = "0" ]; then
MISSING=1; echo "$arg: .dsc file not found" >&2
fi
if [ "$TARBALL" = "1" ] && [ "$SEEN_TARBALL" = "0" ]; then
MISSING=1; echo "$arg: upstream tar not found" >&2
fi
if [ "$DIFF" = "1" ] && [ "$SEEN_DIFF" = "0" ]; then
MISSING=1; echo "$arg: Debian debian.tar/diff not found" >&2
fi
[ "$MISSING" = "0" ] || exit 1
fi
args="$args
$newarg"
[ "$INCLUDEARG" = "0" ] || args="$args
$arg"
fi
done
IFS='
'
if [ "$EXTRACT_PACKAGE_NAME" = "1" ]; then
packages=""
for arg in $args; do
packages="$packages
$(echo "$arg" |sed s/_.*//)"
done
args="$packages"
fi
if [ "$SORT" = "1" ]; then
args="$(echo "$args"| sort -)"
fi
if [ "$TAC" = "1" ]; then
args="$(echo "$args"| tac -)"
fi
if [ -z "$cmd" ]; then
for arg in $args; do
echo $arg
done
exit 0
fi
exec $cmd $args
|
dancn/devscripts
|
scripts/dcmd.sh
|
Shell
|
gpl-2.0
| 9,585 |
#!/bin/bash
# Script to generate po files outside of the normal build process
# Author: Pr2 for OpenPLi Team
# Version: 1.3
#
# This script is derivated from updateallpo.sh it is intended to all you
# create the updated version of the po files on different environment:
# For Windows, please download and install the following program:
# Python:
# https://www.python.org/
# GitForWindows:
# https://gitforwindows.org/
#
# Pre-requisite for Windows:
# -> install python on your PC
# -> install Git for Windows, you can keep all default installation settings.
# -> Start the installed: git-bash you will see a command prompt.
# -> At the git-bash command prompt we will clone OpenPLi repository (see below):
#
# For Mac OSX download and install homebrew following explanation from:
# https://brew.sh/
#
# For Mac OSX with homebrew and also Linux users:
# The following tools must be installed on your system and accessible from path:
# gawk, find, gettext, gnu-sed, python
# Start and terminal and clone OpenPLi repository (see below):
#
# On All platforms please download and install:
#
# PoEdit: https://poedit.net/
#
# You then need to clone the OpenPLi repository with the following command:
# -------------------------------------------------------------------------------------
# git clone https://github.com/OpenPLi/enigma2.git
# cd enigma2/po
# -------------------------------------------------------------------------------------
# Run this script from within the po folder.
#
remote="origin"
branch="master"
python="python"
localgsed="sed"
xml2po="xml2po.py"
findoptions=""
delete=1
function this_help () {
printf "Possible options are:\n"
printf " -r | --remote to specify the remote git to use, default[origin]\n"
printf " -b | --branch to specify the branch to translate, default[develop]\n"
printf " -p | --python to specify the python runtime name, default[python]\n"
printf " -n | --nodelete to keep the .pot files, useful to find where a message came from\n"
printf " -h | --help this text\n\n"
printf "To translate for the develop branch simply run this script without any option.\n"
printf "To translate for the rc branch simply specify:\n"
printf "%s -branch rc \nor\n%s -b rc\n" $0 $0
printf "\n\n"
printf "Pre-requisites:\n\n"
printf "Please read the OpenPLi translators wiki page:\n"
printf "https://wiki.openpli.org/Information_for_Translators\n"
return 0
}
while [ "$1" != "" ]; do
case "$1" in
-b|--branch)
shift
branch="$1"
;;
-r|--remote)
shift
remote="$1"
;;
-p|--python)
shift
python="$1"
;;
-n|--nodelte)
delete=0
;;
-h|--help)
this_help
exit 0
;;
*)
printf "Error: unknown parameter [%s]\n\n" "$1"
this_help
exit 1
esac
shift
done
#
# Checking if defined remote exist
#
(git remote -v | grep -q "$remote\s") \
&& { printf "Remote git : [%s]\n" $remote; } \
|| { printf "Sorry this remote doesn't exist: [%s]\n Valid remotes are:\n" $remote; \
git remote -v ; exit 1; }
#
# Checking if remote branch exist on the defined remote
#
(git branch -r | grep -q "$remote/""$branch""$") \
&& { printf "Remote branch : [%s]\n" $branch; } \
|| { printf "Sorry this branch doesn't exist: [%s]\n Valid branches are:\n" $branch; \
git branch -r | grep $remote | sed 's/"$remote"\///'; exit 1; }
#
# Checking for Python version number to select the right python script to use
#
command -v "$python" >/dev/null 2>&1 || { printf >&2 "Script requires python but it's not installed. Aborting."; \
printf "Please download latest version and install it from: https://www.python.org/\n"; exit 1; }
printf "Python used [%s] script used [%s]: " "$python" "$xml2po"
"$python" --version
#
# Checking for gettext component
#
command -v xgettext --version >/dev/null 2>&1 || { printf "Please install gettext package on your system. Aborting.\n"; exit 1; }
command -v msguniq --version >/dev/null 2>&1 || { printf "Please install gettext package on your system. Aborting.\n"; exit 1; }
#
# On Mac OSX find option are specific
#
if [[ "$OSTYPE" == "darwin"* ]]
then
# Mac OSX
printf "Script running on Mac OSX [%s]\n" "$OSTYPE"
findoptions=" -s -X "
fi
#
# Script only run with sed but on some distro normal sed is already sed so checking it.
#
sed --version 2> /dev/null | grep -q "GNU"
if [ $? -eq 0 ]; then
localgsed="sed"
else
"$localgsed" --version | grep -q "GNU"
if [ $? -eq 0 ]; then
printf "GNU sed found: [%s]\n" $localgsed
fi
fi
#
# Needed when run in git-bash for Windows
#
export PYTHONIOENCODING=utf-8
#
# To fix the LF (Linux, Mac) and CRLF (Windows) conflict
#
git config core.eol lf
git config core.autocrlf input
git config core.safecrlf true
#
# Git commands to sync with origin.
#
git pull
#
# Retrieve languages from Makefile.am LANGS variable for backward compatibility
#
printf "Po files update/creation from script starting.\n"
languages=($(gawk ' BEGIN { FS=" " }
/^LANGS/ {
for (i=3; i<=NF; i++)
printf "%s ", $i
} ' Makefile.am ))
# If you want to define the language locally in this script uncomment and defined languages
#languages=("ar" "bg" "ca" "cs" "da" "de" "el" "en" "es" "et" "fa" "fi" "fr" "fy" "he" "hk" "hr" "hu" "id" "is" "it" "ku" "lt" "lv" "nl" "nb" "nn" "pl" "pt" "pt_BR" "ro" "ru" "sk" "sl" "sr" "sv" "th" "tr" "uk" "zh")
#
# Arguments to generate the pot and po files are not retrieved from the Makefile.
# So if parameters are changed in Makefile please report the same changes in this script.
#
printf "Creating temporary file enigma2-py.pot\n"
find $findoptions .. -name "*.py" -exec xgettext --no-wrap -L Python --from-code=UTF-8 -kpgettext:1c,2 --add-comments="TRANSLATORS:" -d enigma2 -s -o enigma2-py.pot {} \+
"$localgsed" --in-place enigma2-py.pot --expression=s/CHARSET/UTF-8/
printf "Creating temporary file enigma2-xml.pot\n"
find $findoptions .. -name "*.xml" -exec "$python" "$xml2po" {} \+ > enigma2-xml.pot
printf "Merging pot files to create: enigma2.pot\n"
cat enigma2-py.pot enigma2-xml.pot | msguniq --no-wrap --no-location -o enigma2.pot -
OLDIFS=$IFS
IFS=" "
for lang in "${languages[@]}" ; do
if [ -f $lang.po ]; then \
printf "Updating existing translation file %s.po\n" $lang
msgmerge --backup=none --no-wrap --no-location -s -U $lang.po enigma2.pot && touch $lang.po; \
msgattrib --no-wrap --no-obsolete $lang.po -o $lang.po; \
msgfmt -o $lang.mo $lang.po; \
else \
printf "New file created: %s.po, please add it to github before commit\n" $lang
msginit -l $lang.po -o $lang.po -i enigma2.pot --no-translator; \
msgfmt -o $lang.mo $lang.po; \
fi
done
if [ $delete -eq 1 ]; then \
rm enigma2-py.pot enigma2-xml.pot enigma2.pot; \
fi
IFS=$OLDIFS
printf "Po files update/creation from script finished!\n"
printf "Edit with PoEdit the po file that you want to translate located in:\n\n"
command -v cygpath > /dev/null && { cygpath -w "$PWD"; } || { pwd; }
printf "\nthen post it back to OpenPLi forum:\n\n"
printf "https://forums.openpli.org/forum/55-en-enduser-support/\n\n"
printf "Please always specify that it is for branch [%s]\n\n" $branch
printf "PoEdit: https://poedit.net/\n"
printf "IMPORTANT: in PoEdit go into Files-Preferences menu select the advanced tab\n"
printf " 1) select Unix(recommended) for carriage return\n"
printf " 2) unselect wrap text\n"
printf " 3) unselect keep original file format\n"
printf "You only need to do this once in PoEdit.\n\n"
printf "Please read the translators wiki page:\n"
printf "\nhttps://wiki.openpli.org/Information_for_Translators\n"
rm -rf *.mo
chmod 644 *.po
|
TwolDE/vix-core
|
po/updateallpo-multiOS.sh
|
Shell
|
gpl-2.0
| 7,629 |
#!/bin/bash
stage=0
train_stage=-100
# This trains only unadapted (just cepstral mean normalized) features,
# and uses various combinations of VTLN warping factor and time-warping
# factor to artificially expand the amount of data.
. cmd.sh
. utils/parse_options.sh # to parse the --stage option, if given
[ $# != 0 ] && echo "Usage: local/run_4b.sh [--stage <stage> --train-stage <train-stage>]" && exit 1;
set -e
if [ $stage -le 0 ]; then
# Create the training data.
featdir=`pwd`/mfcc/nnet5b; mkdir -p $featdir
fbank_conf=conf/fbank_40.conf
echo "--num-mel-bins=40" > $fbank_conf
steps/nnet2/get_perturbed_feats.sh --cmd "$train_cmd" \
$fbank_conf $featdir exp/perturbed_fbanks_si284 data/train_si284 data/train_si284_perturbed_fbank &
steps/nnet2/get_perturbed_feats.sh --cmd "$train_cmd" --feature-type mfcc \
conf/mfcc.conf $featdir exp/perturbed_mfcc_si284 data/train_si284 data/train_si284_perturbed_mfcc &
wait
fi
if [ $stage -le 1 ]; then
steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \
data/train_si284_perturbed_mfcc data/lang exp/tri4b exp/tri4b_ali_si284_perturbed_mfcc
fi
if [ $stage -le 2 ]; then
steps/nnet2/train_block.sh --stage "$train_stage" \
--cleanup false \
--initial-learning-rate 0.01 --final-learning-rate 0.001 \
--num-epochs 10 --num-epochs-extra 5 \
--cmd "$decode_cmd" \
--hidden-layer-dim 1536 \
--num-block-layers 3 --num-normal-layers 3 \
data/train_si284_perturbed_fbank data/lang exp/tri4b_ali_si284_perturbed_mfcc exp/nnet5b || exit 1
fi
if [ $stage -le 3 ]; then # create testing fbank data.
featdir=`pwd`/mfcc
fbank_conf=conf/fbank_40.conf
for x in test_eval92 test_eval93 test_dev93; do
rm -r data/${x}_fbank
cp -r data/$x data/${x}_fbank
rm -r ${x}_fbank/split* || true
steps/make_fbank.sh --fbank-config "$fbank_conf" --nj 8 \
--cmd "$train_cmd" data/${x}_fbank exp/make_fbank/$x $featdir || exit 1;
steps/compute_cmvn_stats.sh data/${x}_fbank exp/make_fbank/$x $featdir || exit 1;
done
fi
if [ $stage -le 4 ]; then
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 10 \
exp/tri4b/graph_bd_tgpr data/test_dev93_fbank exp/nnet5b/decode_bd_tgpr_dev93
steps/nnet2/decode.sh --cmd "$decode_cmd" --nj 8 \
exp/tri4b/graph_bd_tgpr data/test_eval92_fbank exp/nnet5b/decode_bd_tgpr_eval92
fi
exit 0;
|
michellemorales/OpenMM
|
kaldi/egs/wsj/s5/local/nnet2/run_5b.sh
|
Shell
|
gpl-2.0
| 2,370 |
while true; do
read -p "Are you sure you want to clean both logfile and config files? <yes/no> " yn
case $yn in
[Yy]* ) rm logfile.txt;rm config.xml; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
|
RamRawRDinosauR/STCRRDRMOD
|
build/clean.sh
|
Shell
|
gpl-2.0
| 256 |
convert images/OCS-167-A.png -crop 1499x4534+51+343 +repage images/OCS-167-A.png
#
#
#/OCS-167.png
convert images/OCS-167-B.png -crop 1573x4522+0+343 +repage images/OCS-167-B.png
#
#
#/OCS-167.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/cropedges.OCS-167.sh
|
Shell
|
gpl-2.0
| 197 |
#!/bin/bash
FILES = *.so
for f in *
do
install_name_tool -change libosg.95.dylib @executable_path/../Frameworks/libosg.95.dylib $f
install_name_tool -change libosgSim.95.dylib @executable_path/../Frameworks/libosgSim.95.dylib $f
install_name_tool -change libosgAnimation.95.dylib @executable_path/../Frameworks/libosgAnimation.95.dylib $f
install_name_tool -change libosgTerrain.95.dylib @executable_path/../Frameworks/libosgTerrain.95.dylib $f
install_name_tool -change libosgDB.95.dylib @executable_path/../Frameworks/libosgDB.95.dylib $f
install_name_tool -change libosgText.95.dylib @executable_path/../Frameworks/libosgText.95.dylib $f
install_name_tool -change libosgFX.95.dylib @executable_path/../Frameworks/libosgFX.95.dylib $f
install_name_tool -change libosgUtil.95.dylib @executable_path/../Frameworks/libosgUtil.95.dylib $f
install_name_tool -change libosgGA.95.dylib @executable_path/../Frameworks/libosgGA.95.dylib $f
install_name_tool -change libosgViewer.95.dylib @executable_path/../Frameworks/libosgViewer.95.dylib $f
install_name_tool -change libosgManipulator.95.dylib @executable_path/../Frameworks/libosgManipulator.95.dylib $f
install_name_tool -change libosgVolume.95.dylib @executable_path/../Frameworks/libosgVolume.95.dylib $f
install_name_tool -change libosgParticle.95.dylib @executable_path/../Frameworks/libosgParticle.95.dylib $f
install_name_tool -change libosgWidget.95.dylib @executable_path/../Frameworks/libosgWidget.95.dylib $f
install_name_tool -change libosgPresentation.95.dylib @executable_path/../Frameworks/libosgPresentation.95.dylib $f
install_name_tool -change libosgQt.95.dylib @executable_path/../Frameworks/libosgQt.95.dylib $f
install_name_tool -change libOpenThreads.12.dylib @executable_path/../Frameworks/libOpenThreads.12.dylib $f
install_name_tool -change libosgShadow.95.dylib @executable_path/../Frameworks/libosgShadow.95.dylib $f
otool -L $f
done
|
fgx/fgx
|
src/deploy/osx/scripts/deploy-osg-plugins.3.1.4.sh
|
Shell
|
gpl-2.0
| 1,996 |
#!/bin/bash
echo "Cleaning up"
rm api.php*
rm result.txt 2>/dev/null
rm prev.txt
rm diff.txt 2>/dev/null
touch result.txt
|
Smile4ever/wikitools
|
watch_category/init.sh
|
Shell
|
gpl-2.0
| 122 |
#!/bin/bash
#
# cleanup.sh of perf mem test
# Author: Michael Petlan <[email protected]>
#
#
# include working environment
. ../common/init.sh
. ./settings.sh
if [ -n "$PERFSUITE_RUN_DIR" ]; then
print_overall_skipped
exit 0
fi
make -s -C examples clean
find . -name \*.log | xargs -r rm
find . -name \*.err | xargs -r rm
rm -f perf.data*
RM_EXIT_CODE=$?
print_overall_results $RM_EXIT_CODE
exit $?
|
rfmvh/perftool-testsuite
|
base_mem/cleanup.sh
|
Shell
|
gpl-2.0
| 408 |
#!/bin/bash
# this is a script to install all necessary programs
# Copyright Chris Maier
# Package definitions
TOOLS=" mc wget curl git git-core unzip pwgen exuberant-ctags silversearcher-ag xsel autojump"
DEV=" clang cmake doxygen doxygen-docs graphviz mc exuberant-ctags ksh g++ subversion"
YOCTO=" gawk git-core diffstat unzip texinfo gcc-multilib build-essential chrpath socat libsdl1.2-dev xterm"
DESKTOP=" revelation pdftk texlive-full ffmpeg"
EMACS=" emacs-snapshot"
BROWSER=" google-chrome-stable"
VIM=" vim-gtk"
NEOVIM=" neovim python-dev python-pip python3-dev python3-pip python-setuptools python3-setuptools"
ZSH=" zsh zsh-common zsh-doc"
TRUECRYPT=" truecrypt"
VIRTUALBOX=" virtualbox virtualbox-qt"
MUTT=" mutt msmtp msmtp-mta urlview m3w"
# option flags
OPT_TOOLS=true
OPT_DEV=false
OPT_YOCTO=false
OPT_DESKTOP=false
OPT_EMACS=false
OPT_BROWSER=false
OPT_VIM=false
OPT_NEOVIM=false
OPT_ZSH=false
OPT_TRUECRPYT=false
OPT_VIRTUALBOX=false
OPT_MUTT=false
OPT_PRINTER=false
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SCRIPT_USER=$SUDO_USER
APT_GET_OPTIONS="--yes --show-progress --auto-remove"
#APT_GET_OPTIONS="-s"
#
# Logging
#
LOGFILE=$(date +"%Y-%m-%d").log
# this function prints to STDERR
echoerr() { printf "%s\n" "$*" >&2; }
echodebug()
{
if [ -n ${VERBOSE+x} ]; then
printf "%s\n" "$*" >&2
fi
}
#
# Function definitions
#
function usage (){
echoerr "$1 [+dev] [+truecrypt] [+yocto] [+desktop] [+emacs] [+vim] [+virtualbox] [+neovim] [+broswer] [+zsh] [-v|--verbose]"
exit 1
}
function parse_args (){
while [[ $# > 0 ]]
do
case $1 in
+dev)
OPT_DEV=true
;;
+yocto)
OPT_YOCTO=true
;;
+desktop)
OPT_DESKTOP=true
OPT_TRUECRYPT=true
OPT_VIRTUALBOX=true
OPT_BROWSER=true
OPT_ZSH=true
OPT_PRINTER=true
OPT_MUTT=true
;;
+emacs)
OPT_EMACS=true
;;
+neovim)
OPT_NEOVIM=true
;;
+vim)
OPT_VIM=true
;;
+browser)
OPT_BROWSER=true
;;
+zsh)
OPT_ZSH=true
;;
+truecrypt)
OPT_TRUECRYPT=true
;;
+virtualbox)
OPT_VIRTUALBOX=true
;;
-v|--verbose)
VERBOSE=true
;;
*)
usage
;;
esac
shift
done
}
function check_sudo ()
{
# Make sure only root can run our script
if [ "$(id -u)" != "0" ]; then
echoerr "This script must be run as root"
exit 1
fi
echoerr "sudo successful"
}
function install_browser ()
{
echoerr "Installing Google chrome ..."
sudo -u $SCRIPT_USER wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list
PACKAGES+=$BROWSER
}
function post_install_browser ()
{
echoerr "Configuring Google Chrome ..."
local BROWSER_BIN=$(which $BROWSER)
echodebug "Browser bin: $BROWSER_BIN"
update-alternatives --install /usr/bin/x-www-browser x-www-browser $BROWSER_BIN 60
}
function install_printer ()
{
echoerr "Install Brother MFC-L2700DW"
sh -c '$SCRIPT_DIR/linux-brprinter-installer-2.1.1-1 MFC-L2700DW'
}
function install_emacs ()
{
echoerr "Installing Emacs ..."
add-apt-repository -y ppa:ubuntu-elisp
PACKAGES+=$EMACS
}
function post_install_emacs (){
echoerr "Configuring Emacs ..."
local EMACS_BIN=$(which emacs)
echodebug "Emacs bin: $EMACS_BIN"
# post install emacs
if [ -n $EMACS_BIN ]; then
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.emacs.d ~/.emacs.d
fi
}
function install_neovim (){
echoerr "Installing Neovim ..."
add-apt-repository -y ppa:neovim-ppa/stable
PACKAGES+=$NEOVIM
}
function post_install_neovim (){
echoerr "Configuring Neovim ..."
local NEOVIM_BIN=$(which nvim)
local EDITOR_BIN=$(which editor)
echodebug "Neovim bin: $NEOVIM_BIN"
echodebug "Editor bin: $EDITOR_BIN"
sudo -u $SCRIPT_USER pip install --upgrade pip
sudo -u $SCRIPT_USER pip3 install --upgrade pip
sudo -u $SCRIPT_USER pip install --user neovim
sudo -u $SCRIPT_USER pip3 install --user neovim
# link the config files to ~/.config/neovim/
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.config/nvim ~/.config/
# install as default editor
update-alternatives --install $EDITOR_BIN editor $NEOVIM_BIN 60
}
function install_truecrypt (){
echoerr "Installing Truecrypt ..."
add-apt-repository -y ppa:stefansundin/truecrypt
PACKAGES+=$TRUECRYPT
}
function install_virtualbox ()
{
echoerr "Installing Virtualbox ..."
sudo -u $SCRIPT_USER wget -q -O - http://download.virtualbox.org/virtualbox/debian/oracle_vbox_2016.asc | apt-key add -
sh -c 'echo "deb http://download.virtualbox.org/virtualbox/debian yakkety non-free contrib" > /etc/apt/sources.list.d/virtualbox.org.list'
PACKAGES+=$VIRTUALBOX
}
function install_zsh (){
echoerr "Installing Zsh ..."
PACKAGES+=$ZSH
}
function post_install_zsh ()
{
echoerr "Configuring Zsh ..."
local ZSH_BIN=$(which zsh)
echodebug "Zsh bin: $ZSH_BIN"
# post install zsh
if [ -n $ZSH_BIN ]; then
# change login shell of current user, not root
chsh -s $ZSH_BIN $SCRIPT_USER
# download and install oh-my-zsh
sudo -u $SCRIPT_USER sh -c "$(wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)"
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.zshrc ~/.zshrc
fi
}
function install_packages ()
{
if $OPT_BROWSER; then
PACKAGES+=$BROWSER
install_browser
fi
if $OPT_DESKTOP; then
PACKAGES+=$DESKTOP
fi
if $OPT_DEV; then
PACKAGES+=$DEV
fi
if $OPT_EMACS; then
install_emacs
fi
if $OPT_NEOVIM; then
install_neovim
fi
if $OPT_TOOLS; then
PACKAGES+=$TOOLS
fi
if $OPT_VIM; then
PACKAGES+=$VIM
fi
if $OPT_YOCTO; then
PACKAGES+=$YOCTO
fi
if $OPT_ZSH; then
install_zsh
fi
if $OPT_TRUECRYPT; then
install_truecrypt
fi
if $OPT_VIRTUALBOX; then
install_virtualbox
fi
if $OPT_PRINTER; then
install_printer
fi
# remove duplicates
PACKAGES=$(printf '%s\n' $PACKAGES | sort -u)
# Install tools
apt-get update
apt-get install $PACKAGES $APT_GET_OPTIONS
apt-get upgrade --yes
}
function post_install ()
{
local MATE=$(which mate-terminal)
local GNOME=$(which gnome-terminal)
echodebug "Mate: $MATE"
echodebug "GNOME: $GNOME"
if [ -n $MATE ]; then
ln -fs $MATE /usr/bin/cmd
elif [ -n $GNOME ]; then
ln -fs $GNOME /usr/bin/cmd
else
echoerr "No terminal shortcut set"
fi
if $OPT_BROWSER; then
post_install_browser
fi
if $OPT_EMACS; then
post_install_emacs
fi
if $OPT_NEOVIM; then
post_install_neovim
fi
if $OPT_ZSH; then
post_install_zsh
fi
# link the Midnight commander config files
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.config/mc ~/.config/
# link git config
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.config/git ~/.config/
# link the xmodmap file
sudo -u $SCRIPT_USER ln -fs $SCRIPT_DIR/../src/.Xmodmap ~/.Xmodmap
}
#
# Main starts here
check_sudo
parse_args $*
install_packages
post_install
|
chris-maier/dotfiles
|
bin/install-linux.sh
|
Shell
|
gpl-2.0
| 6,900 |
#!/bin/sh
### LICENSE
# Author: Vlad Dubovskiy, November 2014.
# License: Copyright (c) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
# Export $PATH to work with crontab if you need to, example:
# export PATH="/bin/s3cmd-1.5.0-rc1:/usr/local/pgsql/bin"
##########
## ALL COMMENTED VARIABLES STORED IN HEROKU ENVIRONMENT
##########
# SOURCE DB (Postgres)
#PGHOST=
#PGUSER=
#PGPW=
#DBSCHEMA=public # source schema on your postgres DB. Public is default
# TARGET DB (Redshift)
#RSHOST=your_instance_name.redshift.amazonaws.com
#RSHOSTPORT=5439
#RSADMIN=your_superuser
#RSNAME=your_db_name
#RSKEY=redshift_api_key
#RSSECRET=redshift_api_secret
#RSUSER=your_user_name # name of the non-superuser! who will get read/write access to your schemas and tables. It's critical that you create this user that is not sudo to avoid concurrent connection limits
#RSPW=password for Redshift DB
#RSSCHEMA=public # target schema on your redshift cluster. You could change this, but public is the default schema.
#TMPSCHEMA=temp_refresh
# DIRECTORIES
#PGSQL_BIN=path_to_your_pgsql_bin # your postgres bin directory. Tested with psql 9.3.1
#PYTHONBIN=path_to_your_python_bin # location of your python 2.7.8 executable. Other python version will likely work as well. We install anaconda distribution. Quick and easy
#SCRPTDIR=path_to_your_script_directory where p2r_* scripts live
#SCHEMADIR=path were *.sql schemas should be dumped before cleaning/uploading
#DATADIR=path_to_where_to_dump_db_tables # a place to store table dumps. Make sure it's larger than all the DB tables of interest
#S3BUCKET=name_of_s3_bucket # S3 bucket to which your machine has API read/write privileges to. Must install s3cmd and configure it
# LOGGING
STDERR=/tmp/p2r.err
STDOUT=/tmp/p2r.out
LOCKFILE=/tmp/p2r.lock
# do not add views or functions to redshift. These are actual names of tables in your Postgres database
TABLES='active_cart_counts
adjustments
affiliates
audits
beacons
boost_competitions
boost_competition_results
building_groups
buildings
campus_payment_cards
canonicalized_json_menus
canonicalized_menus
cart_coupons
cart_item_options
cart_items
cart_participants
carts
cohort_memberships
cohort_service_cohorts
cohort_services
cohorts
content
coupons
credit_batches
credit_cards
credit_items
customer_addresses
customer_campus_cards
customer_coupon_uses
customer_information_requests
customer_phones
customers
daily_order_counts
deliveries
deliveries_hours
delivery_comments
delivery_estimates
delivery_log_entries
delivery_service_health_features
delivery_service_health_models
delivery_service_health_scores
delivery_service_random_forests
delivery_services
delivery_sign_ups
delivery_status_updates
delivery_steps
delivery_zones
devices
dispatches
driver_availabilities
driver_availability_blocks
driver_broadcasts
driver_messages
driver_points
driver_restaurant_bans
driver_work_hours
drivers
estimation_model_feature_values
estimation_model_features
estimation_models
franchise_contacts
gift_cards
goal_types
goal_values
hosted_sites
jobs
loyalty_cash_transactions
market_campus_payment_cards
market_cities
market_dispatch_notes
market_scorecards
market_weather_hours
markets
menu_categories
menu_category_option_group_option_prices
menu_category_option_group_options
menu_category_option_groups
menu_category_sizes
menu_descriptors
menu_item_descriptors
menu_item_option_group_option_prices
menu_item_option_group_options
menu_item_option_groups
menu_item_sizes
menu_items
menu_options
menu_sizes
menu_updates
monthly_order_counts
newbie_codes
notification_schedule_changes
numbers
option_group_option_prices
option_group_options
option_groups
order_coupons
order_notifications
orders
orders_payments
pay_period_account_entries
payments
pex_transactions
print_menus
promo_codes
receipts
referral_codes
referrals
reliability_score_events
restaurant_campus_payment_cards
restaurant_categories
restaurant_categorizations
restaurant_contacts
restaurant_drive_times
restaurant_hours
restaurant_requests
restaurant_temporary_hours
restaurant_users
restaurants
scorecards
settings
shift_assignment_delivery_service_changes
shift_assignments
shift_predictions
shift_templates
shifts
shutdown_group_restaurants
shutdown_groups
shutdown_messages
sign_up_links
sms_messages
sms_number_reservations
sms_numbers
specials
surveys
temporary_shutdowns
users
voice_calls
wait_pool_entries
work_segments'
# Custom Tables [CT] (some tables are huge due to text data, so you can define custom SQL to either munge your tables or only select certain columns for migration)
# The names of the variables must match actual tables names in the schema. Order commands inside CTSQL list and table names inside CTNAMES list so the indexes of the list match.
# Custom tables must have all the same columns as defined in schema, or you'll have to define a dummy table in your DB or adjust python schema part of the script to accomdate your new table structures
# If you are just dropping columns (like me), then fill them in with something
## declare an array variable
declare -a CTSQL=( )
CTNAMES=( )
|
orderup/open-data-science
|
postgres2redshift/p2r_settings.sh
|
Shell
|
gpl-2.0
| 5,260 |
#!/usr/bin/env bash
# shellcheck disable=SC2016
set -eux
systemd-analyze log-level debug
mkdir -p /tmp/img/usr/lib/systemd/system/
mkdir -p /tmp/img/opt/
touch /tmp/img/opt/script0.sh
chmod +x /tmp/img/opt/script0.sh
cat <<EOF >/tmp/img/usr/lib/systemd/system/testfile.service
[Service]
ExecStart = /opt/script0.sh
EOF
set +e
# Default behaviour is to recurse through all dependencies when unit is loaded
systemd-analyze verify --root=/tmp/img/ testfile.service \
&& { echo 'unexpected success'; exit 1; }
# As above, recurses through all dependencies when unit is loaded
systemd-analyze verify --recursive-errors=yes --root=/tmp/img/ testfile.service \
&& { echo 'unexpected success'; exit 1; }
# Recurses through unit file and its direct dependencies when unit is loaded
systemd-analyze verify --recursive-errors=one --root=/tmp/img/ testfile.service \
&& { echo 'unexpected success'; exit 1; }
set -e
# zero exit status since dependencies are ignored when unit is loaded
systemd-analyze verify --recursive-errors=no --root=/tmp/img/ testfile.service
rm /tmp/img/usr/lib/systemd/system/testfile.service
cat <<EOF >/tmp/testfile.service
[Unit]
foo = bar
[Service]
ExecStart = echo hello
EOF
cat <<EOF >/tmp/testfile2.service
[Unit]
Requires = testfile.service
[Service]
ExecStart = echo hello
EOF
# Zero exit status since no additional dependencies are recursively loaded when the unit file is loaded
systemd-analyze verify --recursive-errors=no /tmp/testfile2.service
set +e
# Non-zero exit status since all associated dependencies are recusively loaded when the unit file is loaded
systemd-analyze verify --recursive-errors=yes /tmp/testfile2.service \
&& { echo 'unexpected success'; exit 1; }
set -e
rm /tmp/testfile.service
rm /tmp/testfile2.service
cat <<EOF >/tmp/testfile.service
[Service]
ExecStart = echo hello
EOF
# Prevent regression from #13380 and #20859 where we can't verify hidden files
cp /tmp/testfile.service /tmp/.testfile.service
systemd-analyze verify /tmp/.testfile.service
rm /tmp/.testfile.service
# Zero exit status since the value used for comparison determine exposure to security threats is by default 100
systemd-analyze security --offline=true /tmp/testfile.service
set +e
#The overall exposure level assigned to the unit is greater than the set threshold
systemd-analyze security --threshold=90 --offline=true /tmp/testfile.service \
&& { echo 'unexpected success'; exit 1; }
set -e
rm /tmp/testfile.service
cat <<EOF >/tmp/img/usr/lib/systemd/system/testfile.service
[Service]
ExecStart = echo hello
PrivateNetwork = yes
PrivateDevices = yes
PrivateUsers = yes
EOF
# The new overall exposure level assigned to the unit is less than the set thresholds
# Verifies that the --offline= option works with --root=
systemd-analyze security --threshold=90 --offline=true --root=/tmp/img/ testfile.service
# Added an additional "INVALID_ID" id to the .json to verify that nothing breaks when input is malformed
# The PrivateNetwork id description and weight was changed to verify that 'security' is actually reading in
# values from the .json file when required. The default weight for "PrivateNetwork" is 2500, and the new weight
# assigned to that id in the .json file is 6000. This increased weight means that when the "PrivateNetwork" key is
# set to 'yes' (as above in the case of testfile.service) in the content of the unit file, the overall exposure
# level for the unit file should decrease to account for that increased weight.
cat <<EOF >/tmp/testfile.json
{"User_Or_DynamicUser":
{"description_bad": "Service runs as root user",
"weight": 2000,
"range": 10
},
"SupplementaryGroups":
{"description_good": "Service has no supplementary groups",
"description_bad": "Service runs with supplementary groups",
"description_na": "Service runs as root, option does not matter",
"weight": 200,
"range": 1
},
"PrivateDevices":
{"description_good": "Service has no access to hardware devices",
"description_bad": "Service potentially has access to hardware devices",
"weight": 1000,
"range": 1
},
"PrivateMounts":
{"description_good": "Service cannot install system mounts",
"description_bad": "Service may install system mounts",
"weight": 1000,
"range": 1
},
"PrivateNetwork":
{"description_good": "Service doesn't have access to the host's network",
"description_bad": "Service has access to the host's network",
"weight": 6000,
"range": 1
},
"PrivateTmp":
{"description_good": "Service has no access to other software's temporary files",
"description_bad": "Service has access to other software's temporary files",
"weight": 1000,
"range": 1
},
"PrivateUsers":
{"description_good": "Service does not have access to other users",
"description_bad": "Service has access to other users",
"weight": 1000,
"range": 1
},
"ProtectControlGroups":
{"description_good": "Service cannot modify the control group file system",
"description_bad": "Service may modify the control group file system",
"weight": 1000,
"range": 1
},
"ProtectKernelModules":
{"description_good": "Service cannot load or read kernel modules",
"description_bad": "Service may load or read kernel modules",
"weight": 1000,
"range": 1
},
"ProtectKernelTunables":
{"description_good": "Service cannot alter kernel tunables (/proc/sys, …)",
"description_bad": "Service may alter kernel tunables",
"weight": 1000,
"range": 1
},
"ProtectKernelLogs":
{"description_good": "Service cannot read from or write to the kernel log ring buffer",
"description_bad": "Service may read from or write to the kernel log ring buffer",
"weight": 1000,
"range": 1
},
"ProtectClock":
{"description_good": "Service cannot write to the hardware clock or system clock",
"description_bad": "Service may write to the hardware clock or system clock",
"weight": 1000,
"range": 1
},
"ProtectHome":
{"weight": 1000,
"range": 10
},
"ProtectHostname":
{"description_good": "Service cannot change system host/domainname",
"description_bad": "Service may change system host/domainname",
"weight": 50,
"range": 1
},
"ProtectSystem":
{"weight": 1000,
"range": 10
},
"RootDirectory_Or_RootImage":
{"description_good": "Service has its own root directory/image",
"description_bad": "Service runs within the host's root directory",
"weight": 200,
"range": 1
},
"LockPersonality":
{"description_good": "Service cannot change ABI personality",
"description_bad": "Service may change ABI personality",
"weight": 100,
"range": 1
},
"MemoryDenyWriteExecute":
{"description_good": "Service cannot create writable executable memory mappings",
"description_bad": "Service may create writable executable memory mappings",
"weight": 100,
"range": 1
},
"NoNewPrivileges":
{"description_good": "Service processes cannot acquire new privileges",
"description_bad": "Service processes may acquire new privileges",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_ADMIN":
{"description_good": "Service has no administrator privileges",
"description_bad": "Service has administrator privileges",
"weight": 1500,
"range": 1
},
"CapabilityBoundingSet_CAP_SET_UID_GID_PCAP":
{"description_good": "Service cannot change UID/GID identities/capabilities",
"description_bad": "Service may change UID/GID identities/capabilities",
"weight": 1500,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_PTRACE":
{"description_good": "Service has no ptrace() debugging abilities",
"description_bad": "Service has ptrace() debugging abilities",
"weight": 1500,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_TIME":
{"description_good": "Service processes cannot change the system clock",
"description_bad": "Service processes may change the system clock",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_NET_ADMIN":
{"description_good": "Service has no network configuration privileges",
"description_bad": "Service has network configuration privileges",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_RAWIO":
{"description_good": "Service has no raw I/O access",
"description_bad": "Service has raw I/O access",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_MODULE":
{"description_good": "Service cannot load kernel modules",
"description_bad": "Service may load kernel modules",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_AUDIT":
{"description_good": "Service has no audit subsystem access",
"description_bad": "Service has audit subsystem access",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_SYSLOG":
{"description_good": "Service has no access to kernel logging",
"description_bad": "Service has access to kernel logging",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_NICE_RESOURCE":
{"description_good": "Service has no privileges to change resource use parameters",
"description_bad": "Service has privileges to change resource use parameters",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_MKNOD":
{"description_good": "Service cannot create device nodes",
"description_bad": "Service may create device nodes",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_CHOWN_FSETID_SETFCAP":
{"description_good": "Service cannot change file ownership/access mode/capabilities",
"description_bad": "Service may change file ownership/access mode/capabilities unrestricted",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_DAC_FOWNER_IPC_OWNER":
{"description_good": "Service cannot override UNIX file/IPC permission checks",
"description_bad": "Service may override UNIX file/IPC permission checks",
"weight": 1000,
"range": 1
},
"CapabilityBoundingSet_CAP_KILL":
{"description_good": "Service cannot send UNIX signals to arbitrary processes",
"description_bad": "Service may send UNIX signals to arbitrary processes",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_NET_BIND_SERVICE_BROADCAST_RAW":
{"description_good": "Service has no elevated networking privileges",
"description_bad": "Service has elevated networking privileges",
"weight": 500,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_BOOT":
{"description_good": "Service cannot issue reboot()",
"description_bad": "Service may issue reboot()",
"weight": 100,
"range": 1
},
"CapabilityBoundingSet_CAP_MAC":
{"description_good": "Service cannot adjust SMACK MAC",
"description_bad": "Service may adjust SMACK MAC",
"weight": 100,
"range": 1
},
"CapabilityBoundingSet_CAP_LINUX_IMMUTABLE":
{"description_good": "Service cannot mark files immutable",
"description_bad": "Service may mark files immutable",
"weight": 75,
"range": 1
},
"CapabilityBoundingSet_CAP_IPC_LOCK":
{"description_good": "Service cannot lock memory into RAM",
"description_bad": "Service may lock memory into RAM",
"weight": 50,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_CHROOT":
{"description_good": "Service cannot issue chroot()",
"description_bad": "Service may issue chroot()",
"weight": 50,
"range": 1
},
"CapabilityBoundingSet_CAP_BLOCK_SUSPEND":
{"description_good": "Service cannot establish wake locks",
"description_bad": "Service may establish wake locks",
"weight": 25,
"range": 1
},
"CapabilityBoundingSet_CAP_WAKE_ALARM":
{"description_good": "Service cannot program timers that wake up the system",
"description_bad": "Service may program timers that wake up the system",
"weight": 25,
"range": 1
},
"CapabilityBoundingSet_CAP_LEASE":
{"description_good": "Service cannot create file leases",
"description_bad": "Service may create file leases",
"weight": 25,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_TTY_CONFIG":
{"description_good": "Service cannot issue vhangup()",
"description_bad": "Service may issue vhangup()",
"weight": 25,
"range": 1
},
"CapabilityBoundingSet_CAP_SYS_PACCT":
{"description_good": "Service cannot use acct()",
"description_bad": "Service may use acct()",
"weight": 25,
"range": 1
},
"UMask":
{"weight": 100,
"range": 10
},
"KeyringMode":
{"description_good": "Service doesn't share key material with other services",
"description_bad": "Service shares key material with other service",
"weight": 1000,
"range": 1
},
"ProtectProc":
{"description_good": "Service has restricted access to process tree(/proc hidepid=)",
"description_bad": "Service has full access to process tree(/proc hidepid=)",
"weight": 1000,
"range": 3
},
"ProcSubset":
{"description_good": "Service has no access to non-process/proc files(/proc subset=)",
"description_bad": "Service has full access to non-process/proc files(/proc subset=)",
"weight": 10,
"range": 1
},
"NotifyAccess":
{"description_good": "Service child processes cannot alter service state",
"description_bad": "Service child processes may alter service state",
"weight": 1000,
"range": 1
},
"RemoveIPC":
{"description_good": "Service user cannot leave SysV IPC objects around",
"description_bad": "Service user may leave SysV IPC objects around",
"description_na": "Service runs as root, option does not apply",
"weight": 100,
"range": 1
},
"Delegate":
{"description_good": "Service does not maintain its own delegated control group subtree",
"description_bad": "Service maintains its own delegated control group subtree",
"weight": 100,
"range": 1
},
"RestrictRealtime":
{"description_good": "Service realtime scheduling access is restricted",
"description_bad": "Service may acquire realtime scheduling",
"weight": 500,
"range": 1
},
"RestrictSUIDSGID":
{"description_good": "SUID/SGIDfilecreationbyserviceisrestricted",
"description_bad": "ServicemaycreateSUID/SGIDfiles",
"weight": 1000,
"range": 1
},
"RestrictNamespaces_user":
{"description_good": "Servicecannotcreateusernamespaces",
"description_bad": "Servicemaycreateusernamespaces",
"weight": 1500,
"range": 1
},
"RestrictNamespaces_mnt":
{"description_good": "Service cannot create file system namespaces",
"description_bad": "Service may create file system namespaces",
"weight": 500,
"range": 1
},
"RestrictNamespaces_ipc":
{"description_good": "Service cannot create IPC namespaces",
"description_bad": "Service may create IPC namespaces",
"weight": 500,
"range": 1
},
"RestrictNamespaces_pid":
{"description_good": "Service cannot create process namespaces",
"description_bad": "Service may create process namespaces",
"weight": 500,
"range": 1
},
"RestrictNamespaces_cgroup":
{"description_good": "Service cannot create cgroup namespaces",
"description_bad": "Service may create cgroup namespaces",
"weight": 500,
"range": 1
},
"RestrictNamespaces_net":
{"description_good": "Service cannot create network namespaces",
"description_bad": "Service may create network namespaces",
"weight": 500,
"range": 1
},
"RestrictNamespaces_uts":
{"description_good": "Service cannot create hostname namespaces",
"description_bad": "Service may create hostname namespaces",
"weight": 100,
"range": 1
},
"RestrictAddressFamilies_AF_INET_INET6":
{"description_good": "Service cannot allocate Internet sockets",
"description_bad": "Service may allocate Internet sockets",
"weight": 1500,
"range": 1
},
"RestrictAddressFamilies_AF_UNIX":
{"description_good": "Service cannot allocate local sockets",
"description_bad": "Service may allocate local sockets",
"weight": 25,
"range": 1
},
"RestrictAddressFamilies_AF_NETLINK":
{"description_good": "Service cannot allocate netlink sockets",
"description_bad": "Service may allocate netlink sockets",
"weight": 200,
"range": 1
},
"RestrictAddressFamilies_AF_PACKET":
{"description_good": "Service cannot allocate packet sockets",
"description_bad": "Service may allocate packet sockets",
"weight": 1000,
"range": 1
},
"RestrictAddressFamilies_OTHER":
{"description_good": "Service cannot allocate exotic sockets",
"description_bad": "Service may allocate exotic sockets",
"weight": 1250,
"range": 1
},
"SystemCallArchitectures":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_swap":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_obsolete":
{"weight": 250,
"range": 10
},
"SystemCallFilter_clock":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_cpu_emulation":
{"weight": 250,
"range": 10
},
"SystemCallFilter_debug":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_mount":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_module":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_raw_io":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_reboot":
{"weight": 1000,
"range": 10
},
"SystemCallFilter_privileged":
{"weight": 700,
"range": 10
},
"SystemCallFilter_resources":
{"weight": 700,
"range": 10
},
"IPAddressDeny":
{"weight": 1000,
"range": 10
},
"DeviceAllow":
{"weight": 1000,
"range": 10
},
"AmbientCapabilities":
{"description_good": "Service process does not receive ambient capabilities",
"description_bad": "Service process receives ambient capabilities",
"weight": 500,
"range": 1
},
"INVALID_ID":
{"weight": 1000,
"range": 10
}
}
EOF
# Reads in custom security requirements from the parsed .json file and uses these for comparison
systemd-analyze security --threshold=90 --offline=true \
--security-policy=/tmp/testfile.json \
--root=/tmp/img/ testfile.service
set +e
systemd-analyze security --threshold=50 --offline=true \
--security-policy=/tmp/testfile.json \
--root=/tmp/img/ testfile.service \
&& { echo 'unexpected success'; exit 1; }
set -e
rm /tmp/img/usr/lib/systemd/system/testfile.service
systemd-analyze log-level info
echo OK >/testok
exit 0
|
mxre/systemd
|
test/units/testsuite-65.sh
|
Shell
|
gpl-2.0
| 18,780 |
#!/bin/bash
###############################################################################
#
# Kernel Build Script
#
###############################################################################
# 2011-10-24 effectivesky : modified
# 2010-12-29 allydrop : created
###############################################################################
##############################################################################
# set toolchain
##############################################################################
# export PATH=$(pwd)/$(your tool chain path)/bin:$PATH
# export CROSS_COMPILE=$(your compiler prefix)
#export PATH=$(pwd)/../../toolchain_arm-eabi-4.6/arm-eabi-4.6/bin:$PATH
export ARCH=arm
export PATH=$(pwd)/../../../../arm-eabi-4.6/bin:$PATH
export CROSS_COMPILE=arm-eabi-
##############################################################################
# make zImage
##############################################################################
mkdir -p ./obj/KERNEL_OBJ/
#make O=./obj/KERNEL_OBJ/
make ARCH=arm O=./obj/KERNEL_OBJ/ msm8960_ef52k_tp20_defconfig
make -j8 ARCH=arm O=./obj/KERNEL_OBJ/ 2>&1 | tee kernel_log.txt
##############################################################################
# Copy Kernel Image
##############################################################################
cp -f ./obj/KERNEL_OBJ/arch/arm/boot/zImage .
|
CryToCry96/android_kernel_ef52
|
build_kernel.sh
|
Shell
|
gpl-2.0
| 1,407 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2022 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Try to hit the file-system limit, and make sure we can close the last
# fd and then create a new file and write to it.
. ./tup.sh
check_no_windows mmap
cat > ok.c << HERE
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/mman.h>
#include <unistd.h>
int main(void)
{
int fd;
int lastfd;
int lastfd2;
void *map;
while(1) {
fd = open("ok.c", O_RDONLY);
if(fd < 0) {
if(errno == EMFILE || errno == ENFILE) {
char buf[4];
if(close(lastfd) < 0) {
perror("close");
return -1;
}
if(close(lastfd2) < 0) {
perror("close2");
return -1;
}
fd = creat("new.txt", 0666);
if(fd < 0) {
perror("new.txt");
return -1;
}
write(fd, "hey\\n", 4);
close(fd);
/* Open twice to trigger the mapping
* logic.
*/
fd = open("new.txt", O_RDONLY);
fd = open("new.txt", O_RDONLY);
if(fd < 0) {
perror("new.txt - read");
return -1;
}
if(read(fd, buf, 4) < 0) {
perror("read");
return -1;
}
if(memcmp(buf, "hey\\n", 4) != 0) {
fprintf(stderr, "Expected 'hey'\\n");
}
break;
}
perror("ok.c");
fprintf(stderr, "Can't open ok.c\\n");
return -1;
}
lastfd2 = lastfd;
lastfd = fd;
map = mmap(NULL, 5, PROT_READ, MAP_SHARED, fd, 0);
}
return 0;
}
HERE
cat > Tupfile << HERE
: |> gcc ok.c -o %o |> prog.exe
: prog.exe |> ./%f |> new.txt
HERE
update
echo "hey" | diff - new.txt
eotup
|
gittup/tup
|
test/t4152-maxfiles3.sh
|
Shell
|
gpl-2.0
| 2,215 |
#!/usr/bin/env bash
## Test that the patch index passes its self-test on a repository
## with a rename.
##
## Copyright (C) 2014 Ganesh Sittampalam
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use, copy,
## modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
## BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
## ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
## CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
. lib
mkdir R
cd R
darcs init --with-patch-index
echo initial contents > file1
darcs rec -lam "initial"
darcs mv file1 file2
darcs rec -am "move"
darcs show patch-index-test
|
DavidAlphaFox/darcs
|
tests/patch-index-rename.sh
|
Shell
|
gpl-2.0
| 1,407 |
#!/bin/bash
# Copyright 2017 Andrey Rodchenko, School of Computer Science, The University of Manchester
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
. ./scripts/executeExitOnFail
set -x
executeExitOnFail cd maxine
executeExitOnFail ../graal/mxtool/mx -v checkstyle -f
executeExitOnFail cd ../graal
executeExitOnFail ../graal/mxtool/mx -v checkstyle -f
|
arodchen/MaxSim
|
scripts/checkStyle.sh
|
Shell
|
gpl-2.0
| 965 |
#! /bin/bash
# Install-Script, run as root
echo "Don't forget to add /mnt to your fstab or change the paths in the scripts."
echo "Installing.."
cp dashcam_init /etc/init.d/dashcam
chmod 755 /etc/init.d/dashcam
cp dashcam.sh /mnt/dashcam
cp server.sh /mnt/server
chmod +x /mnt/dashcam /mnt/server
update-rc.d dashcam defaults
cp conf.sample.cfg /etc/dashcam.cfg
source /etc/dashcam.cfg
mkdir "$VIDEO_PATH" "$PERMANENT_PATH"
echo "All done. Check /mnt for the scripts and /etc/dashcam.cfg for configuration."
echo "Keep in mind: Your Raspberry Pi will start recording on boot. You can stop the service anytime running /etc/init.d/dashcam stop"
|
KarimGeiger/RaspberryPiDashcam
|
scripts/install.sh
|
Shell
|
gpl-2.0
| 647 |
#!/bin/bash
######################################################################
#
# idFORGE Framework - Manage identity manuals in community
# Copyright © 2015 The CentOS Artwork SIG
#
# idFORGE Framework is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# idFORGE Framework is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with idFORGE Framework; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Alain Reguera Delgado <[email protected]>
# 39 Street No. 4426 Cienfuegos, Cuba.
#
######################################################################
function sh {
[[ ${#RENDER_FROM_PO[*]} -gt 1 ]] \
&& idforge_printMessage "`gettext "Only one PO file must be provided."`" --as-error-line
[[ ${#RENDER_FROM_MO[*]} -gt 1 ]] \
&& idforge_printMessage "`gettext "Only one MO file must be provided."`" --as-error-line
idforge_checkFiles -efi 'text/x-shellscript' "${RENDER_FROM[*]}"
LOCALE_PO_TEMPLATES[0]=$(idforge_printTemporalFile ${RENDER_FROM_PO[0]})
# Retrieve translatable strings from shell script files and create
# the portable object template (.pot) from them.
xgettext --output=${LOCALE_PO_TEMPLATES[0]} \
--strict \
--msgid-bugs-address="[email protected]" \
--copyright-holder="The CentOS Artwork SIG" \
--width=70 --no-location \
${RENDER_FROM[*]}
# Fix charset information in the POT file based on LANG
# environment variable.
local LANG_CHARSET=$(echo ${LANG} | cut -d. -f2)
sed -i -r "/^\"Content-Type:/{s/CHARSET/${LANG_CHARSET:-UTF-8}/}" ${LOCALE_PO_TEMPLATES[0]}
[[ -f ${LOCALE_PO_TEMPLATES[0]} ]] \
&& idforge_setModuleEnvironment -m 'po' -t 'sibling'
[[ -f ${LOCALE_PO_TEMPLATES[0]} ]] \
&& idforge_setModuleEnvironment -m 'mo' -t 'sibling'
}
|
areguera/idforge
|
Library/Modules/Locale/Modules/Update/Modules/Sh/sh.sh
|
Shell
|
gpl-2.0
| 2,331 |
#
# Copyright (C) 2010 OpenWrt.org
#
. /lib/ramips.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ramips.sh
platform_check_image() {
local board=$(ramips_board_name)
local magic="$(get_magic_long "$1")"
[ "$#" -gt 1 ] && return 1
case "$board" in
3g-6200n | \
3g-6200nl | \
3g150b | \
3g300m | \
a5-v11 | \
air3gii | \
ai-br100 |\
all0239-3g | \
all0256n | \
all5002 | \
all5003 | \
ar725w | \
asl26555 | \
awapn2403 | \
awm002-evb | \
awm003-evb | \
bc2 | \
broadway | \
carambola | \
cf-wr800n | \
d105 | \
dap-1350 | \
dcs-930 | \
dcs-930l-b1 | \
dir-300-b1 | \
dir-300-b7 | \
dir-320-b1 | \
dir-600-b1 | \
dir-600-b2 | \
dir-615-d | \
dir-615-h1 | \
dir-620-a1 | \
dir-620-d1 | \
dir-810l | \
e1700 | \
esr-9753 | \
f7c027 | \
fonera20n | \
freestation5 | \
firewrt |\
pbr-m1 |\
hg255d | \
hc5*61 | \
hc5661a | \
hlk-rm04 | \
ht-tm02 | \
hw550-3g | \
ip2202 | \
linkits7688 | \
linkits7688d | \
m2m | \
m3 | \
m4 | \
microwrt | \
mlw221 | \
mlwg2 | \
mofi3500-3gn | \
mpr-a1 | \
mpr-a2 | \
mr-102n | \
mzk-w300nh2 | \
nbg-419n | \
nw718 | \
omni-emb | \
omni-emb-hpm | \
omni-plug | \
olinuxino-rt5350f | \
olinuxino-rt5350f-evb | \
psr-680w | \
px4885 | \
re6500 | \
rp-n53 | \
rt-g32-b1 | \
rt-n10-plus | \
rt-n13u | \
rt-n14u | \
rt-n15 | \
rt-n56u | \
rut5xx | \
sl-r7205 | \
tew-691gr | \
tew-692gr | \
ur-326n4g |\
ur-336un |\
v22rw-2x2 | \
vocore | \
w150m | \
w306r-v20 |\
w502u |\
whr-g300n |\
whr-300hp2 |\
whr-600d |\
whr-1166d |\
wizfi630a |\
wsr-600 |\
wl-330n | \
wl-330n3g | \
wl-351 | \
wl341v3 | \
wli-tx4-ag300n | \
wzr-agl300nh | \
wmr300 |\
wnce2001 | \
wr512-3gn |\
wr6202 |\
wr8305rt |\
wrtnode |\
wt1520 |\
wt3020 |\
x5 |\
x8 |\
xiaomi-miwifi-mini |\
y1 |\
y1s |\
zbt-wa05 |\
zbt-wg2626 |\
zte-q7)
[ "$magic" != "27051956" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wsr-1166)
[ "$magic" != "48445230" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ar670w)
[ "$magic" != "6d000080" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
cy-swr1100 |\
dir-610-a1 |\
dir-645 |\
dir-860l-b1)
[ "$magic" != "5ea3a417" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
br-6475nd)
[ "$magic" != "43535953" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
c20i)
[ "$magic" != "03000000" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_do_upgrade() {
local board=$(ramips_board_name)
case "$board" in
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
blink_led() {
. /etc/diag.sh; set_state upgrade
}
append sysupgrade_pre_upgrade disable_watchdog
append sysupgrade_pre_upgrade blink_led
|
zhuanshicong/myopenwrt
|
target/linux/ramips/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 2,997 |
#!/bin/bash
#
#
# Bash Labirinto versão 1.0
# Este jogo foi baseado no gerador aleatório de labirintos de
# Joe Wingbermuehle localizado em https://raw.github.com/joewing/maze/master/maze.sh
# Acrescentei o conceito de movimento e colisões para criar o jogo.
# Naturalmente uma linguagem compilada deve ser mais eficiente para este tipo de uso,
# porém esse joguinho serve para mostrar o poder nem sempre reconhecido do shellscript.
#
#
# Referências e códigos utilizados:
# https://raw.github.com/joewing/maze/master/maze.sh
# http://lendscripts.blogspot.com.br/2012/10/licao-3-programacao-de-jogos-em.html
#
# Escrito por Fernando Bolonhezi Giannasi - junho/2013
# Validar versão do Bash
if [ $(echo $BASH_VERSION | awk -F"." '{ if ( ($1 >= 4) ) {print "0"} else {print "1"}}') -ne "0" ]; then
echo "Este jogo só funciona com Bash 4.0 ou posterior"
echo "Sua versão é $BASH_VERSION"
exit 1
fi
# Menu inicial
setterm -cursor off
while true; do
clear
echo -e '\033[01;33m'
cat << EOF
Labirinto Bash versão 1.0
*******************************************
Ajude a bolota vermelha a encontrar a saída
*******************************************
Comandos:
a -> move a esquerda
s -> move abaixo
d -> move a direita
w -> move acima
q -> sai do jogo (a qualquer momento)
*******************************************
EOF
tput sgr0
echo -e '\033[01;31m'
cat << EOF
Selecione a dificuldade desejada:
1-) Fácil
2-) Médio
3-) Difícil
q-) Sair do Jogo
EOF
tput sgr0
read -n1 -s DIFICULDADE
case "$DIFICULDADE" in
"1")
MAZE_WIDTH="39"
MAZE_HEIGHT="21"
break
;;
"2")
MAZE_WIDTH="49"
MAZE_HEIGHT="31"
break
;;
"3")
MAZE_WIDTH="59"
MAZE_HEIGHT="41"
break
;;
"q")
exit 0
;;
esac
done
# Gerador de labirintos:
# Initialize the maze array.
function init_maze {
for ((y=0; y<MAZE_HEIGHT; y++)) ; do
for ((x=1; x<$((MAZE_WIDTH-1)); x++)) ; do
maze[$((y * MAZE_WIDTH + x))]=0
done
maze[$((y * MAZE_WIDTH + 0))]=1
maze[$((y * MAZE_WIDTH + (MAZE_WIDTH - 1)))]=1
done
for ((x=0; x<MAZE_WIDTH; x++)) ; do
maze[$x]=1
maze[$(((MAZE_HEIGHT - 1) * MAZE_WIDTH + x))]=1
done
}
# Display the maze array.
function print_maze {
for ((y=0; y<MAZE_HEIGHT; y++)) ; do
for ((x = 0; x < MAZE_WIDTH; x++ )) ; do
if [[ maze[$((y * MAZE_WIDTH + x))] -eq 0 ]] ; then
echo -n "[]"
else
echo -n " "
fi
done
echo
done
}
# Carve the maze starting at the specified offset.
function carve_maze {
local index=$1
local dir=$RANDOM
local i=0
maze[$index]=1
while [ $i -le 4 ] ; do
local offset=0
case $((dir % 4)) in
0) offset=1 ;;
1) offset=-1 ;;
2) offset=$MAZE_WIDTH ;;
3) offset=$((-$MAZE_WIDTH)) ;;
esac
local index2=$((index + offset))
if [[ maze[$index2] -eq 0 ]] ; then
local nindex=$((index2 + offset))
if [[ maze[$nindex] -eq 0 ]] ; then
maze[$index2]=1
carve_maze $nindex
i=0
dir=$RANDOM
index=$nindex
fi
fi
i=$((i + 1))
dir=$((dir + 1))
done
}
# Gera um labirinto:
TMP="/tmp"
if [ ! -d "$TMP" ]; then
mkdir "$TMP"
fi
init_maze
carve_maze $((2 * MAZE_WIDTH + 2))
maze[$((MAZE_WIDTH + 2))]=1
maze[$(((MAZE_HEIGHT - 2) * MAZE_WIDTH + MAZE_WIDTH - 3))]=1
print_maze > $TMP/maze.txt
sed -i '1d' $TMP/maze.txt
sed -i 's/^ //g' $TMP/maze.txt
# Inicializando variáveis
INPUT="0" # Entrada de dados
m="0" # Movimento 1
n="1" # Movimento 2
C="0" # Teste de colisões
x="3" # Coordenada x
y="0" # Coordenada y
counter="1" #Conta movimentos
GANHA="$(echo $MAZE_HEIGHT - 3 | bc)" # Detecta local da saída
#Funções para imprimir labirinto e a bola
function cat_maze() {
echo -ne '\033[01;32m'
cat $TMP/maze.txt
tput sgr0
echo "Coordenada X = $x"
echo "Coordenada Y = $y"
echo "Movimentos = $counter"
}
function cat_ball() {
echo -ne '\033[01;31m'O
tput sgr0
}
# Posição inicial
clear
tput cup 0 0
cat_maze
tput cup $y $x
cat_ball
#Movimentação da bola:
while [ $INPUT != "q" ];do
read -n1 -s INPUT
if [ $INPUT = a ];then
let "m = x"
let "n = y + 1"
C=$(cat $TMP/maze.txt | sed -n "$n"p 2> /dev/null | cut -c"$m" 2> /dev/null) # Se C não estiver vazio então atingimos uma parede
if [ -z $C ];then
let "x = x - 1"
else
let counter--
fi
fi
if [ $INPUT = d ];then
let "m = x + 2"
let "n = y + 1"
C=$(cat $TMP/maze.txt | sed -n "$n"p 2> /dev/null | cut -c"$m" 2> /dev/null) # Se C não estiver vazio então atingimos uma parede
if [ -z $C ];then
let "x = x + 1"
else
let counter--
fi
fi
if [ $INPUT = w ];then
let "m = x + 1"
let "n = y"
C=$(cat $TMP/maze.txt | sed -n "$n"p 2> /dev/null | cut -c"$m" 2> /dev/null) # Se C não estiver vazio então atingimos uma parede
if [ -z $C ];then
let "y = y - 1"
else
let counter--
fi
fi
if [ $INPUT = s ];then
let "m = x + 1"
let "n = y + 2"
C=$(cat $TMP/maze.txt | sed -n "$n"p 2> /dev/null | cut -c"$m" 2> /dev/null) # Se C não estiver vazio então atingimos uma parede
if [ -z $C ];then
let "y = y + 1"
else
let counter--
fi
fi
if [ "$y" -lt "0" ]; then y=0; let counter--; fi
# Checa se ganhou
if [ "$y" -gt "$GANHA" ]; then
tput cup $(echo $MAZE_HEIGHT + 3 | bc) 0
echo -e '\033[01;31m'
echo GANHOU!!!!!
echo "Score: $counter movimentos realizados"
tput sgr0
echo
setterm -cursor on
exit 0
fi
clear
cat_maze
# Imprime a bola na nova localização
tput cup $y $x
cat_ball
let counter++
done
clear
# Fim do script
|
ronaldfalcao/shellscripts
|
labirinto/labirin.sh
|
Shell
|
gpl-2.0
| 5,747 |
#!/bin/sh
. ./test-common.sh
cleanup 68
# ------------------------------- Test 68 ------------------------------------
# Old state file entries should be removed when not used. Logrotate should
# not freeze on big state file.
preptest test.log 68 1 0
cat > state << EOF
logrotate state -- version 1
"$PWD/test.log" 2000-1-1
EOF
for i in $(seq 1 200000)
do
echo "\"$PWD/removed.log$i\" 2000-1-1" >> state
done
$RLR test-config.68 --force || exit 23
cat state|grep test.log >/dev/null
if [ $? != 0 ]; then
echo "state file should contain 'test.log'"
exit 3
fi
cat state|grep removed.log >/dev/null
if [ $? = 0 ]; then
echo "state file should not contain 'removed.log'"
exit 3
fi
|
logrotate/logrotate
|
test/test-0068.sh
|
Shell
|
gpl-2.0
| 692 |
#!/bin/bash
BINDIR=/usr/local/bin
LIBDIR=/usr/local/share/katana
INITDIR=/etc/init.d
UDEVDIR=/etc/udev/rules.d
if `grep -q katana-user /etc/passwd`; then
echo "Remove non-privileged user for MIDI bridge"
userdel -r katana-user
fi
echo "Remove program and support scripts from $BINDIR"
rm -f $BINDIR/katana_bridge_start
rm -f $BINDIR/katana_bridge_stop
rm -f $BINDIR/katana_bridge_app
echo "Remove Python modules from $LIBDIR"
rm -rf $LIBDIR
echo "Remove init script from $INITDIR"
rm -f $INITDIR/katana_bridge
update-rc.d katana_bridge remove
echo "Remove udev rules from $UDEVDIR and refresh system"
rm -f $UDEVDIR/50-katana.rules
rm -f $UDEVDIR/60-controller.rules
udevadm control --reload
echo "Done!"
|
snhirsch/katana-midi-bridge
|
uninstall.sh
|
Shell
|
gpl-2.0
| 724 |
# Check Blackberry NDK
BB_OS=`cat ${QNX_TARGET}/etc/qversion 2>/dev/null`
if [ -z "$BB_OS" ]; then
echo "Could not find your Blackberry NDK. Please source bbndk-env.sh"
exit 1
fi
# Strict errors. Any non-zero return exits this script
set -e
echo "Building for Blackberry ${BB_OS}"
if [[ "$1" == "--simulator" ]]; then
SIM="-DSIMULATOR=ON"
fi
cmake ${SIM} -DCMAKE_TOOLCHAIN_FILE=bb.toolchain.cmake -DBLACKBERRY=${BB_OS} .. | (grep -v "^-- " || true)
# Compile and create unsigned PPSSPP.bar with debugtoken
make -j4
if [[ "$1" != "--no-package" ]]; then
DEBUG="-devMode -debugToken ${HOME}/debugtoken.bar"
blackberry-nativepackager -package PPSSPP.bar bar-descriptor.xml $DEBUG
fi
|
metalex10/PPSSPP-X360
|
Blackberry/build.sh
|
Shell
|
gpl-2.0
| 695 |
#!/bin/sh
#
# Copyright (c) Linux Test Project, 2014-2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Cyril Hrubis <[email protected]>
#
# This is a LTP test library for shell.
#
export TST_PASS=0
export TST_FAIL=0
export TST_BROK=0
export TST_WARN=0
export TST_CONF=0
export TST_COUNT=1
export TST_ITERATIONS=1
export TST_TMPDIR_RHOST=0
. tst_ansi_color.sh
tst_do_exit()
{
local ret=0
if [ -n "$TST_SETUP_STARTED" -a -n "$TST_CLEANUP" -a \
-z "$TST_NO_CLEANUP" ]; then
$TST_CLEANUP
fi
if [ "$TST_NEEDS_DEVICE" = 1 -a "$TST_DEVICE_FLAG" = 1 ]; then
if ! tst_device release "$TST_DEVICE"; then
tst_res TWARN "Failed to release device '$TST_DEVICE'"
fi
fi
if [ "$TST_NEEDS_TMPDIR" = 1 -a -n "$TST_TMPDIR" ]; then
cd "$LTPROOT"
rm -r "$TST_TMPDIR"
[ "$TST_TMPDIR_RHOST" = 1 ] && tst_cleanup_rhost
fi
if [ $TST_FAIL -gt 0 ]; then
ret=$((ret|1))
fi
if [ $TST_BROK -gt 0 ]; then
ret=$((ret|2))
fi
if [ $TST_WARN -gt 0 ]; then
ret=$((ret|4))
fi
if [ $TST_CONF -gt 0 ]; then
ret=$((ret|32))
fi
echo
echo "Summary:"
echo "passed $TST_PASS"
echo "failed $TST_FAIL"
echo "skipped $TST_CONF"
echo "warnings $TST_WARN"
exit $ret
}
tst_inc_res()
{
case "$1" in
TPASS) TST_PASS=$((TST_PASS+1));;
TFAIL) TST_FAIL=$((TST_FAIL+1));;
TBROK) TST_BROK=$((TST_BROK+1));;
TWARN) TST_WARN=$((TST_WARN+1));;
TCONF) TST_CONF=$((TST_CONF+1));;
TINFO) ;;
*) tst_brk TBROK "Invalid resm type '$1'";;
esac
}
tst_res()
{
local res=$1
shift
tst_color_enabled
local color=$?
tst_inc_res "$res"
printf "$TCID $TST_COUNT "
tst_print_colored $res "$res: "
echo "$@"
}
tst_brk()
{
local res=$1
shift
tst_res "$res" "$@"
tst_do_exit
}
ROD_SILENT()
{
tst_rod $@ > /dev/null 2>&1
if [ $? -ne 0 ]; then
tst_brk TBROK "$@ failed"
fi
}
ROD()
{
tst_rod "$@"
if [ $? -ne 0 ]; then
tst_brk TBROK "$@ failed"
fi
}
EXPECT_PASS()
{
tst_rod "$@"
if [ $? -eq 0 ]; then
tst_res TPASS "$@ passed as expected"
else
tst_res TFAIL "$@ failed unexpectedly"
fi
}
EXPECT_FAIL()
{
# redirect stderr since we expect the command to fail
tst_rod "$@" 2> /dev/null
if [ $? -ne 0 ]; then
tst_res TPASS "$@ failed as expected"
else
tst_res TFAIL "$@ passed unexpectedly"
fi
}
tst_umount()
{
local device="$1"
local i=0
if ! grep -q "$device" /proc/mounts; then
tst_res TINFO "The $device is not mounted, skipping umount"
return
fi
while [ "$i" -lt 50 ]; do
if umount "$device" > /dev/null; then
return
fi
i=$((i+1))
tst_res TINFO "umount($device) failed, try $i ..."
tst_res TINFO "Likely gvfsd-trash is probing newly mounted "\
"fs, kill it to speed up tests."
tst_sleep 100ms
done
tst_res TWARN "Failed to umount($device) after 50 retries"
}
tst_mkfs()
{
local fs_type=$1
local device=$2
shift 2
local fs_opts="$@"
if [ -z "$fs_type" ]; then
tst_brk TBROK "No fs_type specified"
fi
if [ -z "$device" ]; then
tst_brk TBROK "No device specified"
fi
tst_res TINFO "Formatting $device with $fs_type extra opts='$fs_opts'"
ROD_SILENT mkfs.$fs_type $fs_opts $device
}
tst_check_cmds()
{
local cmd
for cmd in $*; do
if ! command -v $cmd > /dev/null 2>&1; then
tst_brk TCONF "'$cmd' not found"
fi
done
}
tst_is_int()
{
[ "$1" -eq "$1" ] 2>/dev/null
return $?
}
tst_usage()
{
if [ -n "$TST_USAGE" ]; then
$TST_USAGE
else
echo "usage: $0"
echo "OPTIONS"
fi
echo "-h Prints this help"
echo "-i n Execute test n times"
}
tst_resstr()
{
echo "$TST_PASS$TST_FAIL$TST_CONF"
}
tst_rescmp()
{
local res=$(tst_resstr)
if [ "$1" = "$res" ]; then
tst_brk TBROK "Test didn't report any results"
fi
}
tst_run()
{
local tst_i
if [ -n "$TST_TEST_PATH" ]; then
for tst_i in $(grep TST_ "$TST_TEST_PATH" | sed 's/.*TST_//; s/[="} \t\/:`].*//'); do
case "$tst_i" in
SETUP|CLEANUP|TESTFUNC|ID|CNT);;
OPTS|USAGE|PARSE_ARGS|POS_ARGS);;
NEEDS_ROOT|NEEDS_TMPDIR|NEEDS_DEVICE|DEVICE);;
NEEDS_CMDS|NEEDS_MODULE|MODPATH|DATAROOT);;
*) tst_res TWARN "Reserved variable TST_$tst_i used!";;
esac
done
fi
local name
OPTIND=1
while getopts "hi:$TST_OPTS" name $TST_ARGS; do
case $name in
'h') tst_usage; exit 0;;
'i') TST_ITERATIONS=$OPTARG;;
'?') tst_usage; exit 2;;
*) $TST_PARSE_ARGS "$name" "$OPTARG";;
esac
done
if ! tst_is_int "$TST_ITERATIONS"; then
tst_brk TBROK "Expected number (-i) not '$TST_ITERATIONS'"
fi
if [ "$TST_ITERATIONS" -le 0 ]; then
tst_brk TBROK "Number of iterations (-i) must be > 0"
fi
if [ "$TST_NEEDS_ROOT" = 1 ]; then
if [ "$(id -ru)" != 0 ]; then
tst_brk TCONF "Must be super/root for this test!"
fi
fi
tst_check_cmds $TST_NEEDS_CMDS
if [ "$TST_NEEDS_TMPDIR" = 1 ]; then
if [ -z "$TMPDIR" ]; then
export TMPDIR="/tmp"
fi
TST_TMPDIR=$(mktemp -d "$TMPDIR/LTP_$TST_ID.XXXXXXXXXX")
chmod 777 "$TST_TMPDIR"
TST_STARTWD=$(pwd)
cd "$TST_TMPDIR"
fi
if [ "$TST_NEEDS_DEVICE" = 1 ]; then
if [ -z ${TST_TMPDIR} ]; then
tst_brk "Use TST_NEEDS_TMPDIR must be set for TST_NEEDS_DEVICE"
fi
TST_DEVICE=$(tst_device acquire)
if [ -z "$TST_DEVICE" ]; then
tst_brk "Failed to acquire device"
fi
TST_DEVICE_FLAG=1
fi
if [ -n "$TST_NEEDS_MODULE" ]; then
for tst_module in "$TST_NEEDS_MODULE" \
"$LTPROOT/testcases/bin/$TST_NEEDS_MODULE" \
"$TST_STARTWD/$TST_NEEDS_MODULE"; do
if [ -f "$tst_module" ]; then
TST_MODPATH="$tst_module"
break
fi
done
if [ -z "$TST_MODPATH" ]; then
tst_brk TCONF "Failed to find module '$TST_NEEDS_MODULE'"
else
tst_res TINFO "Found module at '$TST_MODPATH'"
fi
fi
if [ -n "$TST_SETUP" ]; then
TST_SETUP_STARTED=1
$TST_SETUP
fi
#TODO check that test reports some results for each test function call
while [ $TST_ITERATIONS -gt 0 ]; do
if [ -n "$TST_CNT" ]; then
if type test1 > /dev/null 2>&1; then
for tst_i in $(seq $TST_CNT); do
local res=$(tst_resstr)
$TST_TESTFUNC$tst_i
tst_rescmp "$res"
TST_COUNT=$((TST_COUNT+1))
done
else
for tst_i in $(seq $TST_CNT); do
local res=$(tst_resstr)
$TST_TESTFUNC $tst_i
tst_rescmp "$res"
TST_COUNT=$((TST_COUNT+1))
done
fi
else
local res=$(tst_resstr)
$TST_TESTFUNC
tst_rescmp "$res"
TST_COUNT=$((TST_COUNT+1))
fi
TST_ITERATIONS=$((TST_ITERATIONS-1))
done
tst_do_exit
}
if TST_TEST_PATH=$(which $0) 2>/dev/null; then
if ! grep -q tst_run "$TST_TEST_PATH"; then
tst_brk TBROK "Test $0 must call tst_run!"
fi
fi
if [ -z "$TST_ID" ]; then
filename=$(basename $0)
TST_ID=${filename%%.*}
fi
export TST_ID="$TST_ID"
if [ -z "$TST_TESTFUNC" ]; then
tst_brk TBROK "TST_TESTFUNC is not defined"
fi
if [ -n "$TST_CNT" ]; then
if ! tst_is_int "$TST_CNT"; then
tst_brk TBROK "TST_CNT must be integer"
fi
if [ "$TST_CNT" -le 0 ]; then
tst_brk TBROK "TST_CNT must be > 0"
fi
fi
if [ -n "$TST_POS_ARGS" ]; then
if ! tst_is_int "$TST_POS_ARGS"; then
tst_brk TBROK "TST_POS_ARGS must be integer"
fi
if [ "$TST_POS_ARGS" -le 0 ]; then
tst_brk TBROK "TST_POS_ARGS must be > 0"
fi
fi
if [ -z "$LTPROOT" ]; then
export LTPROOT="$PWD"
export TST_DATAROOT="$LTPROOT/datafiles"
else
export TST_DATAROOT="$LTPROOT/testcases/data/$TST_ID"
fi
TST_ARGS="$@"
while getopts ":hi:$TST_OPTS" tst_name; do
case $tst_name in
'h') TST_PRINT_HELP=1;;
*);;
esac
done
shift $((OPTIND - 1))
if [ -n "$TST_POS_ARGS" ]; then
if [ -z "$TST_PRINT_HELP" -a $# -ne "$TST_POS_ARGS" ]; then
tst_brk TBROK "Invalid number of positional paramters:"\
"have ($@) $#, expected ${TST_POS_ARGS}"
fi
else
if [ -z "$TST_PRINT_HELP" -a $# -ne 0 ]; then
tst_brk TBROK "Unexpected positional arguments '$@'"
fi
fi
|
richiejp/ltp
|
testcases/lib/tst_test.sh
|
Shell
|
gpl-2.0
| 8,395 |
#!/bin/bash
set -u
BIN="$( readlink -f -- "${0%/*}" )"
SCRIPTS=$BIN/../scripts
OUT_DIR=$SCRATCH/kmer-binner/binned
PROG=$(basename $0 '.sh')
PARAMS_DIR=$BIN/params/$PROG
SLURM_OUT=$BIN/out/$PROG
SLURM_EMAIL="--mail-type=BEGIN,END [email protected]"
COMMON=$SCRIPTS/common.sh
REF_DIR=/scratch/03137/kyclark/data/reference
IN_DIRS="$REF_DIR/mouse $REF_DIR/wheat $REF_DIR/yeast $REF_DIR/zea_mays $REF_DIR/glycine_max $REF_DIR/medicago_truncatula"
FILES_LIST=$(mktemp)
if [ -e $COMMON ]; then
source $COMMON
else
echo COMMON \"$COMMON\" not found
exit 1
fi
if [[ ! -d $OUT_DIR ]]; then
mkdir -p $OUT_DIR
fi
init_dirs "$SLURM_OUT" "$PARAMS_DIR"
find $IN_DIRS -type f > $FILES_LIST
NUM_FILES=$(lc $FILES_LIST)
echo Found \"$NUM_FILES\" files
if [ $NUM_FILES -lt 1 ]; then
echo Nothing to do
exit 1
fi
PARAMS_FILE=$PARAMS_DIR/$$
if [ -e $PARAMS_FILE ]; then
rm $PARAMS_FILE
fi
while read FILE; do
BASE_DIR=$(dirname $FILE)
BASE_DIR=$(basename $BASE_DIR)
DIR=$OUT_DIR/$BASE_DIR
if [ -e $DIR ]; then
rm -rf $DIR/*
fi
echo "$SCRIPTS/binner.pl -q -f $FILE -o $DIR" >> $PARAMS_FILE
done < $FILES_LIST
sbatch -J binner -o "$SLURM_OUT/%j.out" -e "$SLURM_OUT/%j.err" \
-n ${NUM_FILES:=1} ${SLURM_EMAIL:=""} \
$BIN/launcher.sh $PARAMS_FILE
|
kyclark/kmer-binner
|
slurm/01-binner.sh
|
Shell
|
gpl-2.0
| 1,296 |
#!/bin/bash
for program in parhip edge_list_to_metis_graph friendster_list_to_metis_graph graph2binary graph2binary_external readbgf toolbox; do
scons program=$program variant=optimized -j 16
if [ "$?" -ne "0" ]; then
echo "compile error in $program. exiting."
exit
fi
done
rm config.log
|
peterwake/KaHIP
|
parallel/parallel_src/compile_all.sh
|
Shell
|
gpl-2.0
| 309 |
touch /tmp/dependancy_MiFlora_in_progress
echo 0 > /tmp/dependancy_MiFlora_in_progress
sudo apt-get update
echo 25 > /tmp/dependancy_MiFlora_in_progress
# sudo apt-get --yes upgrade
echo "Launch install of MiFlora dependancy"
echo 50 > /tmp/dependancy_MiFlora_in_progress
sudo apt-get install -y python-pip python3-pip python-dev build-essential python-requests bluetooth libffi-dev libssl-dev
sudo apt-get install python-pip libglib2.0-dev
echo 70 > /tmp/dependancy_MiFlora_in_progress
sudo pip install pyudev
sudo pip install pyserial
sudo pip install requests
echo 80 > /tmp/dependancy_MiFlora_in_progress
sudo pip install cryptography
echo 90 > /tmp/dependancy_MiFlora_in_progress
sudo pip install pycrypto
sudo pip install bluepy
sudo python3 -m pip install bluepy
echo 95 > /tmp/dependancy_MiFlora_in_progress
sudo connmanctl enable bluetooth >/dev/null 2>&1
sudo hciconfig hci0 up >/dev/null 2>&1
sudo hciconfig hci1 up >/dev/null 2>&1
echo 100 > /tmp/dependancy_MiFlora_in_progress
echo "Everything is successfully installed!"
rm /tmp/dependancy_MiFlora_in_progress
|
rjullien/jeedom_MiFlora
|
resources/install_remote.sh
|
Shell
|
gpl-2.0
| 1,073 |
cd build
qmake ../example/qhexedit.pro
make
cd ..
|
biluna/biluna
|
thrd/qhexedit2/build-example.sh
|
Shell
|
gpl-2.0
| 50 |
#!/bin/bash
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DEVICE=flamingo
MANUFACTURER=sony
STOCK=18.4.C.2.12
. ../yukon/extract-files.sh
COMMON_FIRMWARE="
modem.b00
modem.b01
modem.b02
modem.b03
modem.b04
modem.b05
modem.b08
modem.b10
modem.b11
modem.b13
modem.b14
modem.b15
modem.b16
modem.b17
modem.b18
modem.b19
modem.b20
modem.b21
modem.b22
modem.b25
modem.b26
modem.b27
modem.mdt
"
copy_files "$COMMON_FIRMWARE" "system/etc/firmware" "etc/firmware"
|
vic3t3chn0/sony_device
|
flamingo/extract-files.sh
|
Shell
|
gpl-2.0
| 1,037 |
#!/bin/bash
# Copyright (C) 2016 Intevation GmbH
#
# This file is part of GPG4Win.
#
# GPG4Win is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GPG4Win is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
# Grab the version information for KDE Frameworks and generate a text block
# that can be copy and pasted into packages.current.
#
# Ideally KDE will PGP Sign their releases soon.
set -e
if [ -z "$1" ]; then
echo "Usage $0 <Version> > snippet"
exit 1
fi
FRAMEWORKS="extra-cmake-modules
kconfig
ki18n
kwidgetsaddons
kcompletion
kwindowsystem
kcoreaddons
kcodecs
kconfigwidgets
kxmlgui
kguiaddons
kitemviews
kitemmodels
kiconthemes
breeze-icons
karchive
kcrash"
tmpdir=$(mktemp -d)
majorversion=$(echo $1 | head -c 4)
curdate=$(date +%Y-%m-%d)
KEYRING=$(dirname $0)/kde-release-key.gpg
for fw in $FRAMEWORKS; do
# Download pacakges over https now and verify that the signature matches
curl -L -s "https://download.kde.org/stable/frameworks/$majorversion/$fw-$1.tar.xz" > "$tmpdir/$fw-$1.tar.xz"
curl -L -s "https://download.kde.org/stable/frameworks/$majorversion/$fw-$1.tar.xz.sig" > "$tmpdir/$fw-$1.tar.xz.sig"
# Check the signature
if ! gpgv --keyring "$KEYRING" "$tmpdir/$fw-$1.tar.xz.sig" "$tmpdir/$fw-$1.tar.xz"; then
echo "Signature for $tmpdir/$fw-$1.tar.xz! does not match!"
exit 1
fi
sha2=$(sha256sum $tmpdir/$fw-$1.tar.xz | cut -d ' ' -f 1)
echo "# $fw"
echo "# last changed: $curdate"
echo "# by: ah"
echo "# verified: PGP Signed by ./kde-release-key.gpg (created by gen-frameworks.sh)"
echo "file $majorversion/$fw-$1.tar.xz"
echo "chk $sha2"
echo ""
done
rm -r $tmpdir
|
gpg/gpg4win
|
packages/gen-frameworks.sh
|
Shell
|
gpl-2.0
| 2,311 |
#!/bin/bash
# called by dracut
check() {
require_binaries sed grep || return 1
# do not add this module by default
return 255
}
# called by dracut
depends() {
return 0
}
# called by dracut
installkernel() {
return 0
}
# called by dracut
install() {
local _nm_version
_nm_version=${NM_VERSION:-$(NetworkManager --version)}
# We don't need `ip` but having it is *really* useful for people debugging
# in an emergency shell.
inst_multiple ip sed grep
inst NetworkManager
inst /usr/libexec/nm-initrd-generator
inst_multiple -o teamd dhclient
inst_hook cmdline 99 "$moddir/nm-config.sh"
if dracut_module_included "systemd"; then
inst_simple "${moddir}/nm-run.service" "${systemdsystemunitdir}/nm-run.service"
$SYSTEMCTL -q --root "$initdir" enable nm-run.service
fi
inst_hook initqueue/settled 99 "$moddir/nm-run.sh"
inst_rules 85-nm-unmanaged.rules
inst_libdir_file "NetworkManager/$_nm_version/libnm-device-plugin-team.so"
inst_simple "$moddir/nm-lib.sh" "/lib/nm-lib.sh"
if [[ -x "$initdir/usr/sbin/dhclient" ]]; then
inst /usr/libexec/nm-dhcp-helper
elif ! [[ -e "$initdir/etc/machine-id" ]]; then
# The internal DHCP client silently fails if we
# have no machine-id
systemd-machine-id-setup --root="$initdir"
fi
# We don't install the ifcfg files from the host automatically.
# But the user might choose to include them, so we pull in the machinery to read them.
inst_libdir_file "NetworkManager/$_nm_version/libnm-settings-plugin-ifcfg-rh.so"
_arch=${DRACUT_ARCH:-$(uname -m)}
inst_libdir_file {"tls/$_arch/",tls/,"$_arch/",}"libnss_dns.so.*" \
{"tls/$_arch/",tls/,"$_arch/",}"libnss_mdns4_minimal.so.*"
}
|
haraldh/dracut
|
modules.d/35network-manager/module-setup.sh
|
Shell
|
gpl-2.0
| 1,789 |
sed -n \
-e 's/\.method /#.method /p' \
-e 's/\.class /#.class /p' \
-e 's/\.super /#.super /p' \
-e '/const[ -/]/p' $1
|
sslab-gatech/avpass
|
src/modules/imitation/extract_string.sh
|
Shell
|
gpl-2.0
| 136 |
#!/usr/bin/env sh
echo 'Translation extraction';
cd ../../..;
# Extract string for default locale
echo '# Extract Kernel : EzPublishCoreBundle';
./app/console translation:extract en -v \
--dir=./vendor/ezsystems/ezpublish-kernel/eZ \
--exclude-dir=Bundle/PlatformBehatBundle \
--exclude-dir=Tests \
--exclude-dir=Features \
--exclude-dir=Publish/Core/REST/Client \
--exclude-dir=tests \
--output-dir=./vendor/ezsystems/ezpublish-kernel/eZ/Bundle/EzPublishCoreBundle/Resources/translations \
--enable-extractor=ez_fieldtypes \
--keep
"$@"
echo '# Clean file references';
sed -i "s|/../../../../.././vendor/ezsystems/ezpublish-kernel/|/|g" ./vendor/ezsystems/ezpublish-kernel/eZ/Bundle/EzPublishCoreBundle/Resources/translations/*.xlf
echo 'Translation extraction done';
|
flovntp/ezpublish-kernel
|
bin/extract-translations.sh
|
Shell
|
gpl-2.0
| 791 |
#!/usr/bin/env bash
#===============================================================================
#
# FILE: rnnlm.ex002.s100.basic.sh
#
# USAGE: ./rnnlm.ex002.s100.basic.sh
#
# DESCRIPTION:
#
# NOTES: ---
# AUTHOR: Hao Fang, [email protected]
# CREATED: 03/03/2015 17:32
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
set -e
exid="rnnlm.ex002"
step="s200"
rm expts/${exid}.${step}.* -f
sys=$1
exe_dir=../bin
if [[ ${sys} == "win" ]]; then
executable=${exe_dir}/rnnlm.exe
elif [[ ${sys} == "unix" ]]; then
executable=${exe_dir}/rnnlm
fi
cfg_datadir="./data/ptb_raw"
cfg_trainfiles="${cfg_datadir}/ptb.train.txt"
cfg_validationfile="${cfg_datadir}/ptb.valid.txt"
{ time ${executable} \
-train ${cfg_trainfiles} \
-class 10000 \
-rnnlm expts/${exid}.${step}.rnnlm \
-valid ${cfg_validationfile} \
-alpha 0.1 \
-beta 0 \
-hidden 50 \
-compression 0 \
-direct 0 -direct-order 0 \
-bptt 1 -bptt-block 1 \
-min-improvement 1 \
-gradient-cutoff 15; } \
2>&1 | tee expts/${exid}.${step}.log
cfg_testfile="${cfg_datadir}/ptb.test.txt"
cfg_rnnlm="expts/${exid}.${step}.rnnlm"
{ time ${executable} \
-rnnlm ${cfg_rnnlm} \
-test ${cfg_testfile}; } \
2>&1 | tee -a expts/${exid}.${step}.log
|
hao-fang/UWNeuralNetLMRepo
|
examples/rnnlm.ex002.s200.hidden_50.sh
|
Shell
|
gpl-2.0
| 1,430 |
BENCH_DATA="../../../stamatakis/benchMark_data"
TEAM1="../../bioinf2015/implementation-team1/build/mlalign"
TEAM2="../../bioinf2015/implementation-team2/bin/TKFLOG_CACHING_ROUND_UP"
REF="../../../stamatakis/tkf91_scaling/tkf91"
CMD="python repro.py --ref-exe=$REF"
echo $CMD
$CMD
CMD="python repro2.py --exe=$TEAM2 --bench-data=$BENCH_DATA"
echo $CMD
$CMD
CMD="python repro3.py --exe=$TEAM1 --bench-data=$BENCH_DATA"
echo $CMD
$CMD
CMD="python repro4.py --precision=float --bench-data=$BENCH_DATA"
echo $CMD
$CMD
CMD="python bench-analysis.py --precision=float --samples=10 --bench-data=$BENCH_DATA"
echo $CMD
$CMD
|
argriffing/arbtkf91
|
repro/run.sh
|
Shell
|
gpl-2.0
| 620 |
#! /bin/sh
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check parallel-tests features:
# - concurrent parallel execution
# FIXME: we should factor out (into am-test-lib.sh?) the code to determine
# how to run make in parallel mode ...
. test-init.sh
case $MAKE in
*\ -j*) skip_ "\$MAKE contains '-j'";;
esac
if using_gmake; then
j=-j
else
unindent > Makefile <<END
all: one two
one:
$sleep && test -f two
two:
echo ok > \$@
END
for j in "-j" "-j " NONE; do
if test x"$j" = xNONE; then
skip_ "can't run make in parallel mode"
fi
run_make -M -- ${j}2 all || continue
$EGREP -i "(warning|error):|-j[\"\'\` ]" output && continue
break
done
rm -f one output Makefile
fi
cat >> configure.ac << 'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
TESTS =
END
cat > x <<'END'
#! /bin/sh
echo "this is $0"
# Creative quoting below to please maintainer-check.
sleep '1'
exit 0
END
chmod a+x ./x
mkdir sub
for i in 1 2 3; do
echo "TESTS += foo$i.test" >> Makefile.am
cp x foo$i.test
echo "TESTS += zap$i" >> Makefile.am
cp x zap$i
echo "TESTS += sub/bar$i.test" >> Makefile.am
cp x sub/bar$i.test
echo "TESTS += sub/mu$i" >> Makefile.am
cp x sub/mu$i
done
$ACLOCAL
$AUTOCONF
$AUTOMAKE -a
for build in serial parallel; do
mkdir $build
cd $build
../configure
$MAKE
cd ..
done
cd serial
# Do *not* use "make -j1" here; apparently, some make implementations
# (e.g., AIX 7.1) interpret it as a synonym of "make -j" :-(
$MAKE check &
cd ..
cd parallel
$sleep
run_make -O -- ${j}4 check
cd ..
# Ensure the tests are really being run in parallel mode: if this is
# the case, the serial run of the dummy testsuite started above should
# still be ongoing when the parallel one has terminated.
kill -0 $!
cat parallel/stdout
test $(grep -c '^PASS:' parallel/stdout) -eq 12
# Wait long enough so that there are no open files any more when the
# post-test cleanup runs. But exit after we've waited for two minutes
# or more, to avoid testsuite hangs in unusual situations (this has
# already happened).
i=1
while test ! -f serial/test-suite.log && test $i -le 120; do
i=$(($i + 1))
sleep '1' # Extra quoting to please maintainer-check.
done
$sleep
:
|
komh/automake-os2
|
t/parallel-tests-concurrency.sh
|
Shell
|
gpl-2.0
| 2,864 |
#!/usr/bin/env bash
set -e
exe=$1
out=$2
echo pwd=$(pwd)
echo exe=$exe
echo out=$out
exec >$out
echo -n start:
date
lognew=${exe%.exe}.log.new
logref=${exe%.exe}.log.ref
logerr=${exe%.exe}.log.err
if [[ ${LIBUV_DIR-x} != x ]]; then
export LD_LIBRARY_PATH=$LIBUV_DIR/lib:${LD_LIBRARY_PATH-}
fi
if [ -f ${exe%.exe}.c ]; then
(
cd $(dirname $exe)
exe=$(basename $exe)
case $exe in
test_server*)
export XDG_CONFIG_HOME=$(pwd)/${exe%.exe}-conf.d
mkdir -p $XDG_CONFIG_HOME/circus
cat > $XDG_CONFIG_HOME/circus/server.conf <<EOF
{
"vault": {
"filename": "vault"
},
"log": {
"level": "pii",
"filename": "${exe%.exe}-server.log"
}
}
EOF
;;
esac
if [ -f ${exe%.exe}.sh ]; then
chmod u+x ${exe%.exe}.sh
exec ./${exe%.exe}.sh ./$exe
else
# DETAILED VALGRIND REPORTS:
#exec valgrind --leak-check=full --trace-children=yes --read-var-info=yes --fair-sched=no --track-origins=yes --malloc-fill=0A --free-fill=DE \
# --xml=yes --xml-file=${exe%.exe}.log.valgrind.xml --log-file=${exe%.exe}.log.valgrind ./$exe
#
# CONCISE VALGRIND REPORTS:
#exec valgrind --trace-children=yes --log-file=${exe%.exe}.log.valgrind ./$exe
#
# RAW EXECUTION: (fastest)
exec ./$exe
#
fi
) >$lognew 2>$logerr || {
echo "**** Exited with status $?" >>$lognew
cat $logerr >&2
exit 1
}
elif [ -f ${exe%.exe}.sh ]; then
chmod u+x ${exe%.exe}.sh
./${exe%.exe}.sh >$lognew 2>$logerr || {
echo "**** Exited with status $?" >>$lognew
cat $logerr >&2
exit 1
}
fi
if [ -r $logref ]; then
diff -u $logref $lognew >&2
else
echo "There is no $logref file. Please check and copy $lognew" >&2
exit 1
fi
echo -n end:
date
|
cadrian/circus
|
src/tst/run_test.sh
|
Shell
|
gpl-3.0
| 1,987 |
#!/bin/sh
#Job parameters:
#PBS -l walltime=10:00:00
#PBS -l select=1:ncpus=16:mem=10gb
#PBS -q med-bio
#Use current working directory
##$ -cwd
#Save standard error and out to files:
#PBS -e plink_loop_stderr.file
#PBS -o plink_loop_stdout.file
#Module commands:
#INFILE="chr22.dose.vcf.gz"
#OUTFILE="chr22_Airwave_CPMG_Plasma"
SAMPLE_IDs="Airwave_CPMG_Plasma.txt_sample_names_FID_IID_2.txt"
SLOTS="16"
#File management:
cp $PBS_O_WORKDIR/$SAMPLE_IDs $TMPDIR
##cp $PBS_O_WORKDIR/$CONFIG $TMPDIR
#Command:
for f in *.dose.vcf.gz; do
cp $PBS_O_WORKDIR/$f $TMPDIR
/home/aberlang/bin/plink_1.90_3.37_16_May.dir/plink --vcf $f \
--double-id \
--make-bed \
--keep $SAMPLE_IDs \
--out ${f}.subset
cp *.subset* $PBS_O_WORKDIR
done
#Notify by email:
##PBS -m abe
#File management:
#cp *.subset* $PBS_O_WORKDIR
|
AntonioJBT/genotype_tools
|
legacy/PBS_scripts.dir/PBS_plink_subset_loop.sh
|
Shell
|
gpl-3.0
| 1,026 |
#!/bin/sh
# Perform processing of the .sip file to add docstrings
echo ""
python ../Build/doxygen_to_sip.py -i mantidqt.in.sip -o mantidqt.sip -d mantidqt_dir.txt
echo ""
python ../Build/sip_strip_docstring.py -i mantidqt.sip -o mantidqt.rhel5.sip -w mantidqt_wiki.txt
echo ""
|
wdzhou/mantid
|
qt/python/process_sip.sh
|
Shell
|
gpl-3.0
| 278 |
#!/usr/bin/env bash
#
# Set up data folders.
#
if [ ! -d data ]; then
mkdir data
IS_INSTALLING="true"
fi
if [ ! -d data/csv ]; then
mkdir data/csv
fi
if [ ! -d data/archives ]; then
mkdir data/archives
fi
if [ ! -d data/client ]; then
mkdir data/client
fi
if [ ! -d data/client/releases ]; then
mkdir data/client/releases
fi
if [ ! -d data/client/releases/prod ]; then
mkdir data/client/releases/prod
fi
if [ ! -d data/client/releases/prod/apks ]; then
mkdir data/client/releases/prod/apks
fi
if [ ! -d data/client/releases/prod/pwas ]; then
mkdir data/client/releases/prod/pwas
fi
if [ ! -d data/client/releases/qa ]; then
mkdir data/client/releases/qa
fi
if [ ! -d data/client/releases/qa/apks ]; then
mkdir data/client/releases/qa/apks
fi
if [ ! -d data/client/releases/qa/pwas ]; then
mkdir data/client/releases/qa/pwas
fi
if [ ! -d data/client/releases/prod/dat ]; then
mkdir data/client/releases/prod/dat
fi
if [ ! -d data/client/releases/qa/dat ]; then
mkdir data/client/releases/qa/dat
fi
if [ ! -f data/id_rsa ]; then
echo '' > data/id_rsa
fi
if [ ! -f data/id_rsa.pub ]; then
echo '' > data/id_rsa.pub
fi
if [ ! -f data/reporting-worker-state.json ]; then
echo '{}' > data/reporting-worker-state.json
fi
if [ ! -f data/paid-worker-state.json ]; then
echo '{}' > data/paid-worker-state.json
fi
if [ ! -d data/dat-output ]; then
mkdir data/dat-output
fi
#
# Load config.
#
source ./config.defaults.sh
if [ -f "./config.sh" ]; then
source ./config.sh
else
echo "You have no config.sh. Copy config.defaults.sh to config.sh, change the passwords and try again." && exit 1;
fi
if echo "$T_MODULES" | grep mysql; then
./mysql-start.sh
echo "Waiting 60 seconds for myql to start..."
sleep 60
./mysql-setup.sh
fi
if echo "$T_MYSQL_PHPMYADMIN" | grep "TRUE"; then
echo "Starting phpmyadmin..."
./phpmyadmin-start.sh
fi
#
# Get software and shut down existing containers if they exist.
#
# Allow to specify Tangerine Version as parameter in ./start.sh, other wise use the most recent tag.
if [ "$1" = "" ]; then
if [ "$T_TAG" = "" ]; then
T_TAG=$(git describe --tags --abbrev=0)
else
T_TAG="$T_TAG"
fi
else
T_TAG="$1"
fi
echo "Pulling $T_TAG"
docker pull tangerine/tangerine:$T_TAG
echo "Stopping $T_CONTAINER_NAME"
docker stop $T_CONTAINER_NAME > /dev/null
echo "Removing $T_CONTAINER_NAME"
docker rm $T_CONTAINER_NAME > /dev/null
#
# Set up couchdb
#
T_COUCHDB_ENDPOINT="http://$T_COUCHDB_USER_ADMIN_NAME:$T_COUCHDB_USER_ADMIN_PASS@couchdb:5984/"
if [ ! -d data/couchdb ]; then
mkdir data/couchdb
fi
if [ ! -d data/couchdb/data ]; then
mkdir data/couchdb/data
fi
if [ ! -d data/couchdb/local.d ]; then
mkdir data/couchdb/local.d
fi
if [ ! -f data/couchdb/local.d/local.ini ]; then
echo "
[chttpd]
bind_address = any
[httpd]
bind_address = any
[couch_httpd_auth]
require_valid_user = true
[chttpd]
require_valid_user = true
" > data/couchdb/local.d/local.ini
fi
[ "$(docker ps | grep $T_COUCHDB_CONTAINER_NAME)" ] && docker stop $T_COUCHDB_CONTAINER_NAME
[ "$(docker ps -a | grep $T_COUCHDB_CONTAINER_NAME)" ] && docker rm $T_COUCHDB_CONTAINER_NAME
CMD="docker run -d \
--restart on-failure \
-e COUCHDB_USER=\"$T_COUCHDB_USER_ADMIN_NAME\" \
-e COUCHDB_PASSWORD=\"$T_COUCHDB_USER_ADMIN_PASS\" \
$T_COUCHDB_PORT_MAPPING \
-v $(pwd)/data/couchdb/data:/opt/couchdb/data \
-v $(pwd)/data/couchdb/local.d:/opt/couchdb/etc/local.d \
--name \"$T_COUCHDB_CONTAINER_NAME\" \
couchdb:2
"
echo $CMD
eval "$CMD"
sleep 10
#
# Start Tangerine.
#
RUN_OPTIONS="
--link $T_COUCHDB_CONTAINER_NAME:couchdb \
--name $T_CONTAINER_NAME \
--restart unless-stopped \
--env \"NODE_ENV=development\" \
--env \"T_VERSION=$T_TAG\" \
--env \"T_PROTOCOL=$T_PROTOCOL\" \
--env \"T_USER1=$T_USER1\" \
--env \"T_USER1_PASSWORD=$T_USER1_PASSWORD\" \
--env \"T_HOST_NAME=$T_HOST_NAME\" \
--env \"T_UPLOAD_TOKEN=$T_UPLOAD_TOKEN\" \
--env \"T_COUCHDB_ENDPOINT=$T_COUCHDB_ENDPOINT\" \
--env \"T_COUCHDB_USER_ADMIN_NAME=$T_COUCHDB_USER_ADMIN_NAME\" \
--env \"T_COUCHDB_USER_ADMIN_PASS=$T_COUCHDB_USER_ADMIN_PASS\" \
--env \"T_USER1_MANAGED_SERVER_USERS=$T_USER1_MANAGED_SERVER_USERS\" \
--env \"T_HIDE_PROFILE=$T_HIDE_PROFILE\" \
--env \"T_AUTO_COMMIT=$T_AUTO_COMMIT\" \
--env \"T_AUTO_COMMIT_FREQUENCY=$T_AUTO_COMMIT_FREQUENCY\" \
--env \"T_CSV_BATCH_SIZE=$T_CSV_BATCH_SIZE\" \
--env \"T_REPORTING_DELAY=$T_REPORTING_DELAY\" \
--env \"T_MODULES=$T_MODULES\" \
--env \"T_CORS_ALLOWED_ORIGINS=$T_CORS_ALLOWED_ORIGINS\" \
--env \"T_PAID_ALLOWANCE=$T_PAID_ALLOWANCE\" \
--env \"T_PAID_MODE=$T_PAID_MODE\" \
--env \"T_CATEGORIES=$T_CATEGORIES\" \
--env \"T_LEGACY=$T_LEGACY\" \
--env \"T_REGISTRATION_REQUIRES_SERVER_USER=$T_REGISTRATION_REQUIRES_SERVER_USER\" \
--env \"T_CENTRALLY_MANAGED_USER_PROFILE=$T_CENTRALLY_MANAGED_USER_PROFILE\" \
--env \"T_ORIENTATION=$T_ORIENTATION\" \
--env \"T_REPORTING_MARK_DISABLED_OR_HIDDEN_WITH=$T_REPORTING_MARK_DISABLED_OR_HIDDEN_WITH\" \
--env \"T_REPORTING_MARK_SKIPPED_WITH=$T_REPORTING_MARK_SKIPPED_WITH\" \
--env \"T_REPORTING_MARK_UNDEFINED_WITH=$T_REPORTING_MARK_UNDEFINED_WITH\" \
--env \"T_HIDE_SKIP_IF=$T_HIDE_SKIP_IF\" \
--env \"T_ARCHIVE_APKS_TO_DISK=$T_ARCHIVE_APKS_TO_DISK\" \
--env \"T_ARCHIVE_PWAS_TO_DISK=$T_ARCHIVE_PWAS_TO_DISK\" \
--env \"T_PASSWORD_POLICY=$T_PASSWORD_POLICY\" \
--env \"T_PASSWORD_RECIPE=$T_PASSWORD_RECIPE\" \
--env \"T_CUSTOM_LOGIN_MARKUP=$T_CUSTOM_LOGIN_MARKUP\" \
--env \"T_JWT_ISSUER=$T_JWT_ISSUER\" \
--env \"T_JWT_EXPIRES_IN=$T_JWT_EXPIRES_IN\" \
--volume $(pwd)/content-sets:/tangerine/content-sets:delegated \
--volume $(pwd)/data/dat-output:/dat-output/ \
--volume $(pwd)/data/reporting-worker-state.json:/reporting-worker-state.json \
--volume $(pwd)/data/paid-worker-state.json:/paid-worker-state.json \
--volume $(pwd)/data/id_rsa:/root/.ssh/id_rsa:delegated \
--volume $(pwd)/data/id_rsa.pub:/root/.ssh/id_rsa.pub:delegated \
--volume $(pwd)/data/client/releases:/tangerine/client/releases/ \
--volume $(pwd)/data/csv:/csv/ \
--volume $(pwd)/data/archives:/archives/ \
--volume $(pwd)/data/groups:/tangerine/groups/ \
--volume $(pwd)/data/client/content/groups:/tangerine/client/content/groups \
"
# Disable Tangerine claiming a port as it will be proxied by nginx.
if [ $SSL_RUNNING ]; then
RUN_OPTIONS="
$RUN_OPTIONS \
-e "LETSENCRYPT_HOST=$T_HOST_NAME" \
-e "VIRTUAL_HOST=$T_HOST_NAME" \
-e "LETSENCRYPT_EMAIL=$T_MAINTAINER_EMAIL" \
"
else
RUN_OPTIONS="
$RUN_OPTIONS \
$T_PORT_MAPPING \
"
fi
if echo "$T_MODULES" | grep mysql; then
RUN_OPTIONS="
--link $T_MYSQL_CONTAINER_NAME:mysql \
--env \"T_MYSQL_CONTAINER_NAME=$T_MYSQL_CONTAINER_NAME\" \
--env \"T_MYSQL_USER=$T_MYSQL_USER\" \
--env \"T_MYSQL_PASSWORD=$T_MYSQL_PASSWORD\" \
--env \"T_MYSQL_MULTI_PARTICIPANT_SCHEMA=$T_MYSQL_MULTI_PARTICIPANT_SCHEMA\" \
--volume $(pwd)/data/mysql/state:/mysql-module-state:delegated \
$RUN_OPTIONS
"
fi
CMD="docker run -d $RUN_OPTIONS tangerine/tangerine:$T_TAG"
echo "Running $T_CONTAINER_NAME at version $T_TAG"
echo "$CMD"
eval ${CMD}
echo "Installing missing plugin..."
docker exec ${T_CONTAINER_NAME} bash -c "cd /tangerine/client/builds/apk/ && cordova --no-telemetry plugin add cordova-plugin-whitelist --save"
echo ""
echo ""
echo ""
echo "🍊 Woohoo! Tangerine is running! 🍊"
echo ""
echo "Run 'docker exec tangerine info' to get a list of commands."
echo ""
|
Tangerine-Community/Tangerine
|
start.sh
|
Shell
|
gpl-3.0
| 7,490 |
#!/bin/bash
#You just need to create the folder VPN and copy all configuration files as well as certificates and psdd.conf file into it.
cd ~/Documents/VPN
clear
printf "Here, I'll run the IranSec VPN but whitch version?\033[1m\n\t\e[3$(( $RANDOM * 6 / 32767 + 1 ))m1- UK1 - V1\n\t\e[3$(( $RANDOM * 7 / 32767 + 1 ))m2- US1 - V1\n\t\e[3$(( $RANDOM * 8 / 32767 + 1 ))m3- US2 - V1\033[0m"
read option
if [ $option = 1 ]; then
sudo openvpn --config UK1-IST-V1-Linux.ovpn
elif [ $option = 2 ]; then
sudo openvpn --config US1-IST-V1-Linux.ovpn
elif [ $option = 3 ]; then
sudo openvpn --config US2-IST-V1-Linux.ovpn
else
printf "\n\nJust press 1, 2, or 3. I can't understand other charachters!"
sleep 1
fi
cd /
#generated by http://patorjk.com/software/taag/#p=testall&h=0&v=0&c=bash&f=Isometric1&t=IranSec
iransec[0]=" ### ##### "
iransec[1]=" # ##### ## # # # # ###### #### "
iransec[2]=" # # # # # ## # # # # # "
iransec[3]=" # # # # # # # # ##### ##### # "
iransec[4]=" # ##### ###### # # # # # # "
iransec[5]=" # # # # # # ## # # # # # "
iransec[6]=" ### # # # # # # ##### ###### #### "
iransec[7]=" A S h i e l d f o r D e f e n d e r s "
iransec[8]=" [email protected] "
clear
#for aligning it center and randomize its color for each run || read more: http://www.linuxquestions.org/questions/linux-newbie-8/bash-script-center-text-on-screen-4175417274/ and http://www.commandlinefu.com/commands/view/12548/generate-a-random-text-color-in-bash
printf "\e[3$(( $RANDOM * 6 / 32767 + 1 ))m"
y=$[($(tput cols)-${#iransec[0]})/2]
x=$[($(tput lines)-${#iransec[@]})/2]
for i in "${!iransec[@]}"
do
tput cup $x $y
if [ $i = 7 ]; then
printf "\033[1m\e[100m\e[39m ${iransec[$i]} \033[0m"
else
printf "\e[3$(( $RANDOM * $i / 32767 + 1 ))m ${iransec[$i]} \033[0m"
fi
x=$[x+1]
done
echo ""
echo ""
echo ""
echo ""
|
namnamir/IranSec-VPN-Linux
|
VPN.sh
|
Shell
|
gpl-3.0
| 2,038 |
#!/bin/sh
tar -zxvf llcbench-20170104.tar.gz
cd llcbench/
make linux-mpich
make cache-bench
echo $? > ~/install-exit-status
cd ..
echo "#!/bin/sh
cd llcbench/cachebench/
./cachebench \$@ > \$LOG_FILE" > cachebench
chmod +x cachebench
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/cachebench-1.1.0/install.sh
|
Shell
|
gpl-3.0
| 238 |
#!/usr/bin/env bash
#Title : NoTrack Installer
#Description : This script will install NoTrack and then configure dnsmasq and lighttpd
#Authors : QuidsUp, floturcocantsee, rchard2scout, fernfrost
#Usage : bash install.sh
#Version : 20.12
#######################################
# User Configerable Settings
#######################################
INSTALL_LOCATION="" #define custom installation path
#NOTRACK_REPO="https://github.com/quidsup/notrack.git"
NOTRACK_REPO="https://gitlab.com/quidsup/notrack.git"
HOSTNAME=""
NETWORK_DEVICE=""
WEB_USER=""
WEB_FOLDER="/var/www/html"
SERVERIP1="1.1.1.1"
SERVERIP2="1.0.0.1"
LISTENIP="127.0.0.1"
#######################################
# Constants
#######################################
readonly VERSION="20.12"
#######################################
# Global Variables
#######################################
DBUSER="ntrk"
DBPASSWORD="ntrkpass"
DBNAME="ntrkdb"
SUDO_REQUIRED=false #true if installing to /opt
#######################################
# Copy
# Copies either a file or directory
#
# Globals:
# None
# Arguments:
# $1: Source
# $2: Destination
# Returns:
# None
#######################################
function copy() {
if [ -f "$1" ]; then #Does file exist?
echo "Copying $1 to $2"
sudo cp "$1" "$2"
elif [ -d "$1" ]; then #Does directory exist?
echo "Copying folder $1 to $2"
sudo cp -r "$1" "$2"
else #Or unable find source
echo "WARNING: Unable to find $1 :-("
fi
}
#######################################
# Create File
# Checks if a file exists and creates it
#
# Globals:
# None
# Arguments:
# #$1 File to create
# Returns:
# None
#######################################
function create_file() {
if [ ! -e "$1" ]; then #Does file already exist?
echo "Creating file: $1"
sudo touch "$1" #If not then create it
sudo chmod 664 "$1" #RW RW R permissions
fi
}
#######################################
# Create Folder
# Creates a folder if it doesn't exist
# Globals:
# None
# Arguments:
# $1 - Folder to create
# Returns:
# None
#######################################
function create_folder() {
if [ ! -d "$1" ]; then #Does folder exist?
echo "Creating folder: $1" #Tell user folder being created
sudo mkdir "$1" #Create folder
fi
}
#######################################
# Exit script with exit code
# Globals:
# None
# Arguments:
# $1 Error Message
# $2 Exit Code
# Returns:
# Exit Code
#######################################
error_exit() {
echo "Error :-( $1"
echo "Aborting"
exit "$2"
}
#######################################
# Rename File
# Renames Source file to Destination
# Set permissions to -rwxr-xr-x
#
# Globals:
# None
# Arguments:
# $1: Source
# $2: Destination
# Returns:
# None
#######################################
function rename_file() {
if [ -e "$1" ]; then #Does file exist?
sudo mv "$1" "$2"
sudo chmod 755 "$2"
else
echo "WARNING: Unable to rename file $1 :-("
fi
}
#######################################
# Set Ownership of either a file or folder
#
# Globals:
# None
# Arguments:
# $1 File or Folder
# $2 User
# $3 Group
# Returns:
# None
#######################################
function set_ownership() {
if [ -d "$1" ]; then
echo "Setting ownership of folder $1 to $2:$3"
sudo chown -hR "$2":"$3" "$1"
elif [ -e "$1" ]; then
echo "Setting ownership of file $1 to $2:$3"
sudo chown "$2":"$3" "$1"
else
echo "Set_Ownership: Error - $1 is missing"
fi
}
#######################################
# Set Permissions of either a file or folder
#
# Globals:
# None
# Arguments:
# $1 File or Folder
# $2 Permissions
# Returns:
# None
#######################################
function set_permissions() {
if [ -d "$1" ]; then
echo "Setting permissions of folder $1 to $2"
sudo chmod -R "$2" "$1"
elif [ -e "$1" ]; then
echo "Setting permissions of file $1 to $2"
sudo chmod "$2" "$1"
else
echo "Set_Permissions: Error - $1 is missing"
fi
}
#######################################
# Restart service
# with either systemd or sysvinit or runit
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function service_restart() {
if [[ -n $1 ]]; then
echo "Restarting $1"
if [ "$(command -v systemctl)" ]; then #systemd
sudo systemctl restart "$1"
elif [ "$(command -v service)" ]; then #sysvinit
sudo service "$1" restart
elif [ "$(command -v sv)" ]; then #runit
sudo sv restart "$1"
else
error_exit "Unable to restart services. Unknown service supervisor" "21"
fi
fi
}
#######################################
# Start service
# Start and Enable systemd based services
# TODO complete for sv and sysvinit
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function service_start() {
if [[ -n $1 ]]; then
echo "Starting $1"
if [ "$(command -v systemctl)" ]; then #systemd
sudo systemctl enable "$1"
sudo systemctl start "$1"
fi
fi
}
#######################################
# Draw prompt menu
# 1. Clear Screen
# 2. Draw menu
# 3. Read single character of user input
# 4. Evaluate user input
# 4a. Check if value is between 0-9
# 4b. Check if value is between 1 and menu size. Return out of function if sucessful
# 4c. Check if user pressed the up key (ending A), Move highlighted point
# 4d. Check if user pressed the up key (ending B), Move highlighted point
# 4e. Check if user pressed Enter key, Return out of function
# 4f. Check if user pressed Q or q, Exit out with error code 1
# 5. User failed to input valid selection. Loop back to #2
#
# Globals:
# None
# Arguments:
# $1 = Title, $2, $3... Option 1, 2
# Returns:
# $? = Choice user made
#######################################
function menu() {
local choice
local highlight
local menu_size
highlight=1
menu_size=0
clear
while true; do
for i in "$@"; do
if [ $menu_size == 0 ]; then #$1 Is Title
echo -e "$1"
echo
else
if [ $highlight == $menu_size ]; then
echo " * $menu_size: $i"
else
echo " $menu_size: $i"
fi
fi
((menu_size++))
done
read -r -sn1 choice;
echo "$choice"
if [[ $choice =~ ^[0-9]+$ ]]; then #Has the user chosen 0-9
if [[ $choice -ge 1 ]] && [[ $choice -lt $menu_size ]]; then
return "$choice"
fi
elif [[ $choice == "A" ]]; then #Up
if [ $highlight -le 1 ]; then #Loop around list
highlight=$((menu_size-1))
echo
else
((highlight--))
fi
elif [[ $choice == "B" ]]; then #Down
if [ $highlight -ge $((menu_size-1)) ]; then #Loop around list
highlight=1
echo
else
((highlight++))
fi
elif [[ $choice == "" ]]; then #Enter
return "$highlight" #Return Highlighted value
elif [[ $choice == "q" ]] || [[ $choice == "Q" ]]; then
exit 1
fi
#C Right, D Left
menu_size=0
clear
done
}
#######################################
# Prompt for Install Location
# Globals:
# INSTALL_LOCATION
# Arguments:
# None
# Returns:
# None
#######################################
function prompt_installloc() {
if [[ -n $INSTALL_LOCATION ]]; then
return
fi
local homefolder="${HOME}"
#Find users home folder if installer was run as root
if [[ $homefolder == "/root" ]]; then
homefolder="$(getent passwd | grep /home | grep -v syslog | cut -d: -f6)"
if [ "$(wc -w <<< "$homefolder")" -gt 1 ]; then #How many users found?
echo "Unable to estabilish which Home folder to install to"
echo "Either run this installer without using sudo / root, or manually set the \$INSTALL_LOCATION variable"
echo "\$INSTALL_LOCATION=\"/home/you/NoTrack\""
exit 15
fi
fi
menu "Select Install Folder" "Home $homefolder" "Opt /opt" "Cancel"
case $? in
1)
INSTALL_LOCATION="$homefolder/notrack"
;;
2)
INSTALL_LOCATION="/opt/notrack"
SUDO_REQUIRED=true
;;
3)
error_exit "Aborting Install" 1
;;
esac
if [[ $INSTALL_LOCATION == "" ]]; then
error_exit "Install folder not set" 15
fi
}
#######################################
# Prompt for network device
# Globals:
# NETWORK_DEVICE
# Arguments:
# None
# Returns:
# None
#######################################
function prompt_network_device() {
local count_net_dev=0
local device=""
local -a device_list
local menu_choice
if [[ -n $NETWORK_DEVICE ]]; then #Check if NETWORK_DEVICE is set
return
fi
if [ ! -d /sys/class/net ]; then #Check net devices folder exists
echo "Error. Unable to find list of Network Devices"
echo "Edit user customisable setting \$NETWORK_DEVICE with the name of your Network Device"
echo "e.g. \$NETWORK_DEVICE=\"eth0\""
exit 11
fi
for device in /sys/class/net/*; do #Read list of net devices
device="${device:15}" #Trim path off
if [[ $device != "lo" ]]; then #Exclude loopback
device_list[$count_net_dev]="$device"
((count_net_dev++))
fi
done
if [ "$count_net_dev" -eq 0 ]; then #None found
echo "Error. No Network Devices found"
echo "Edit user customisable setting \$NETWORK_DEVICE with the name of your Network Device"
echo "e.g. \$NETWORK_DEVICE=\"eth0\""
exit 11
elif [ "$count_net_dev" -eq 1 ]; then #1 Device
NETWORK_DEVICE=${device_list[0]} #Simple, just set it
elif [ "$count_net_dev" -gt 0 ]; then
menu "Select Network Device" "${device_list[*]}"
menu_choice=$?
NETWORK_DEVICE=${device_list[$((menu_choice-1))]}
elif [ "$count_net_dev" -gt 9 ]; then #10 or more use bash prompt
clear
echo "Network Devices detected: ${device_list[*]}"
echo -n "Select Network Device to use for DNS queries: "
read -r choice
NETWORK_DEVICE=$choice
echo
fi
if [[ -z $NETWORK_DEVICE ]]; then #Final confirmation
error_exit "Network Device not entered, unable to proceed" 11
fi
}
#######################################
# Attempt to find hostname of system
#
# Globals:
# HOSTNAME
# Arguments:
# None
# Returns:
# None
#######################################
function get_hostname() {
if [[ -n $HOSTNAME ]]; then #Check if HOSTNAME is not null
return
fi
if [ -e /etc/sysconfig/network ]; then #Get first entry for localhosts
HOSTNAME=$(grep "HOSTNAME" /etc/sysconfig/network | cut -d "=" -f 2 | tr -d [[:space:]])
elif [ -e /etc/hostname ]; then
HOSTNAME=$(cat /etc/hostname)
else
echo "get_hostname: WARNING - Unable to find hostname"
fi
}
#######################################
# Disable Dnsmasq Stub
# Disable Stub Listener in Dnsmasq systemd services
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#
#######################################
function disable_dnsmasq_stub() {
local resolveconf="/etc/systemd/resolved.conf"
if [ "$(command -v systemctl)" ]; then #Only relevant for systemd
if [ -e "$resolveconf" ]; then #Does resolve.conf file exist?
echo "Disabling Systemd DNS stub resolver"
echo "Setting DNSStubListener=no in $resolveconf"
sudo sed -i "s/#DNSStubListener=yes/DNSStubListener=no/" "$resolveconf" &> /dev/null
service_restart "systemd-resolved.service"
service_restart "dnsmasq.service"
fi
fi
echo "========================================================="
}
#######################################
# Installs deb packages using apt for Ubuntu / Debian based systems
#
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function install_deb() {
echo "Refreshing apt"
sudo apt update
echo
echo "Preparing to install Deb packages..."
sleep 2s
echo "Installing dependencies"
sleep 2s
sudo apt -y install git unzip
echo
echo "Installing DNS Server Dnsmasq"
sleep 2s
sudo apt -y install dnsmasq
echo
echo "Installing MariaDB"
sleep 2s
sudo apt -y install mariadb-server
echo
echo "Installing Webserver Nginx"
sleep 2s
sudo apt -y install nginx
echo
echo "Creating snakeoil SSL cert"
sudo apt -y install ssl-cert
sudo make-ssl-cert generate-default-snakeoil
echo
echo "Installing PHP"
sleep 2s
sudo apt -y install memcached php-memcache php php-fpm php-curl php-mysql
echo
echo "Installing Python3"
sleep 2s
sudo apt -y install python3 python3-mysql.connector
echo "Finished installing Deb packages"
echo "========================================================="
echo
}
#######################################
# Git Clone
# Clone NoTrack using Git
# Globals:
# INSTALL_LOCATION, NOTRACK_REPO, SUDO_REQUIRED
# Arguments:
# None
# Returns:
# None
#######################################
function git_clone() {
echo "Downloading NoTrack using Git"
if [ $SUDO_REQUIRED == true ]; then
sudo git clone --depth=1 "$NOTRACK_REPO" "$INSTALL_LOCATION"
else
git clone --depth=1 "$NOTRACK_REPO" "$INSTALL_LOCATION"
fi
echo
}
#######################################
# Find the service name for the webserver
#
# Globals:
# WEB_USER
# Arguments:
# None
# Returns:
# None
#######################################
function find_web_user() {
if [[ -n $WEB_USER ]]; then #Check if WEB_USER is not null
echo "Web service user already set to: $WEB_USER"
return
fi
if getent passwd www-data &> /dev/null; then #Ubuntu uses www-data
WEB_USER="www-data"
elif getent passwd nginx &> /dev/null; then #Redhat uses nginx
WEB_USER="nginx"
elif getent passwd _nginx &> /dev/null; then #Void uses _nginx
WEB_USER="_nginx"
elif getent passwd http &> /dev/null; then #Arch uses http
WEB_USER="http"
else
echo "Unable to find account for web service :-("
echo "Check /etc/passwd for the web service account and then ammend \$WEB_USER value in this installer"
exit 9
fi
echo "Web service is using $WEB_USER account"
}
#######################################
# Setup LocalHosts
# Create initial entry in /etc/localhosts.list
# Globals:
# INSTALL_LOCATION
# Arguments:
# None
# Returns:
# None
#######################################
function setup_localhosts() {
local localhostslist="/etc/localhosts.list"
create_file "$localhostslist" #Local host IPs
if [[ -n $HOSTNAME ]]; then #Has a hostname been found?
echo "Setting up your /etc/localhosts.list for Local Hosts"
echo -e "127.0.0.1\t$HOSTNAME" | sudo tee -a "$localhostslist" &> /dev/null
fi
}
#######################################
# Setup Dnsmasq
# Copy custom config settings into dnsmasq.conf and create log file
# Create initial entry in /etc/localhosts.list
# Globals:
# INSTALL_LOCATION, LISTENIP, SERVERIP1, SERVERIP2, NETWORK_DEVICE
# Arguments:
# None
# Returns:
# None
#######################################
function setup_dnsmasq() {
local dnsmasqconf="/etc/dnsmasq.conf"
local serversconf="/etc/dnsmasq.d/servers.conf"
echo "Configuring Dnsmasq"
copy "$dnsmasqconf" "$dnsmasqconf.old" #Backup old config
create_folder "/etc/dnsmasq.d" #Issue #94 folder not created
create_file "/var/log/notrack.log" #DNS logs storage
set_ownership "/var/log/notrack.log" "dnsmasq" "root"
set_permissions "/var/log/notrack.log" "664"
#Copy config files modified for NoTrack
echo "Copying Dnsmasq config files from $INSTALL_LOCATION to /etc/conf"
copy "$INSTALL_LOCATION/conf/dnsmasq.conf" "$dnsmasqconf"
#Create initial Server Config. Note settings can be changed later via web admin
echo "Creating DNS Server Config $serversconf"
create_file "$serversconf" #DNS Server Config
echo "server=$SERVERIP1" | sudo tee -a "$serversconf" &> /dev/null
echo "server=$SERVERIP2" | sudo tee -a "$serversconf" &> /dev/null
echo "interface=$NETWORK_DEVICE" | sudo tee -a "$serversconf" &> /dev/null
echo "listen-address=$LISTENIP" | sudo tee -a "$serversconf" &> /dev/null
service_start "dnsmasq"
echo "Setup of Dnsmasq complete"
echo "========================================================="
echo
sleep 2s
}
#######################################
# Setup nginx config files
# Find web service account
# Copy NoTrack nginx config to /etc/nginx/sites-available/default
# Find the version of PHP
# Add PHP Version to the nginx config
#
# Globals:
# INSTALL_LOCATION
# Arguments:
# None
# Returns:
# None
#######################################
function setup_nginx() {
local phpinfo=""
local phpver=""
echo
echo "Setting up nginx"
find_web_user
#Backup the old nginx default config
rename_file "/etc/nginx/sites-available/default" "/etc/nginx/sites-available/default.old"
#Replace the default nginx config
copy "$INSTALL_LOCATION/conf/nginx.conf" "/etc/nginx/sites-available/nginx.conf"
rename_file "/etc/nginx/sites-available/nginx.conf" "/etc/nginx/sites-available/default"
#FastCGI server needs to contain the current PHP version
echo "Finding version of PHP"
phpinfo="$(php --version)" #Get info from php version
#Perform a regex check to extract version number from PHP (x.y).z
if [[ $phpinfo =~ ^PHP[[:space:]]([0-9]{1,2}\.[0-9]{1,2}) ]]; then
phpver="${BASH_REMATCH[1]}"
echo "Found PHP version $phpver"
sudo sed -i "s/%phpver%/$phpver/" /etc/nginx/sites-available/default
else
echo "I can't find the PHP version :-( You will have to replace %phpver% in /etc/nginx/sites-available/default"
sleep 8s
fi
service_start "php$phpver-fpm"
service_start "nginx"
echo "Setup of nginx complete"
echo "========================================================="
echo
sleep 2s
}
#######################################
# Setup MariaDB
# Setup user account and password (TODO) for Maria DB
# Globals:
# DBUSER, DBPASSWORD, DBNAME
# Arguments:
# None
# Returns:
# None
#######################################
function setup_mariadb() {
#local dbconfig="$INSTALL_LOCATION/admin/settings/dbconfig.php" FUTURE FEATURE
local rootpass=""
echo "Setting up MariaDB"
#Create a random password
#DBPASSWORD="$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1)"
service_start "mariadb"
echo "Creating User $DBUSER:"
sudo mysql --user=root --password="$rootpass" -e "CREATE USER '$DBUSER'@'localhost' IDENTIFIED BY '$DBPASSWORD';"
#Check to see if ntrk user has been added
if [[ ! $(sudo mysql -sN --user=root --password="$rootpass" -e "SELECT User FROM mysql.user") =~ ntrk[[:space:]]root ]]; then
error_exit "MariaDB command failed, have you entered incorrect root password?" "35"
fi
echo "Creating Database $DBNAME:"
sudo mysql --user=root --password="$rootpass" -e "CREATE DATABASE $DBNAME;"
echo "Setting privilages for ntrk user"
sudo mysql --user=root --password="$rootpass" -e "GRANT ALL PRIVILEGES ON $DBNAME.* TO 'ntrk'@'localhost';"
sudo mysql --user=root --password="$rootpass" -e "GRANT FILE ON *.* TO 'ntrk'@'localhost';"
#GRANT INSERT, SELECT, DELETE, UPDATE ON database.* TO 'user'@'localhost' IDENTIFIED BY 'password';
sudo mysql --user=root --password="$rootpass" -e "FLUSH PRIVILEGES;"
# NOTE This feature will be enabled in NoTrack 0.9.7
#add password to local dbconfig.php
#touch "$dbconfig"
#echo "<?php" > "$dbconfig"
#echo "//Local MariaDB password generated at install" >> "$dbconfig"
#echo "\$dbconfig->password = '$dbpassword';" >> "$dbconfig"
#echo "?>" >> "$dbconfig"
echo "========================================================="
echo
}
#######################################
# Copy NoTrack web admin files
#
# Globals:
# INSTALL_LOCATION, WEB_FOLDER, WEB_USER, HOSTNAME
# Arguments:
# None
# Returns:
# None
#######################################
function setup_webadmin() {
local phpinfo=""
local phpver=""
echo "Copying webadmin files to $WEB_FOLDER"
copy "$INSTALL_LOCATION/admin" "$WEB_FOLDER/admin"
copy "$INSTALL_LOCATION/sink" "$WEB_FOLDER/sink"
echo "$WEB_USER taking over $WEB_FOLDER"
sudo chown "$WEB_USER":"$WEB_USER" -hR "$WEB_FOLDER"
echo
}
#######################################
# Setup NoTrack
# 1. Create systemd service using template notrack.service
# 2. Initial run of blockparser
#
# Globals:
# INSTALL_LOCATION, IP_VERSION, NETWORK_DEVICE
# Arguments:
# None
# Returns:
# None
#######################################
function setup_notrack() {
copy "$INSTALL_LOCATION/init-scripts/notrack.service" "/etc/systemd/system"
sudo sed -i "s:%install_location%:$INSTALL_LOCATION:g" "/etc/systemd/system/notrack.service"
sudo systemctl enable --now notrack.service
echo "Downloading and parsing blocklists"
sleep 2s
sudo python3 "$INSTALL_LOCATION/src/blockparser.py"
echo
}
#######################################
# Welcome Screen
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function show_welcome() {
echo "Welcome to NoTrack v$VERSION"
echo
echo "This installer will transform your system into a network-wide Tracker Blocker"
echo "Install Guides: https://youtu.be/MHsrdGT5DzE"
echo " https://github.com/quidsup/notrack/wiki"
echo
echo
echo "Press any key to continue..."
read -rn1
}
#######################################
# Finish Screen
# Globals:
# INSTALL_LOCATION, REBOOT_REQUIRED, HOSTNAME
# Arguments:
# None
# Returns:
# None
#######################################
function show_finish() {
echo "========================================================="
echo
echo -e "NoTrack Install Complete :-)"
echo "Access the admin console at: http://$HOSTNAME/admin"
echo
echo "Post Install Checklist:"
echo -e "\t\u2022 Secure MariaDB Installation"
echo -e "\t Run: /usr/bin/mysql_secure_installation"
echo
echo -e "\t\u2022 Reboot System"
echo -e "\t PHP and Nginx services don't always restart cleanly"
echo
echo -e "\t\u2022 Enable DHCP"
echo -e "\t http://$HOSTNAME/dhcp"
echo
echo
echo "========================================================="
echo
}
#######################################
# Main
#######################################
if [[ $(command -v sudo) == "" ]]; then #Is sudo available?
error_exit "NoTrack requires Sudo to be installed for Admin functionality" "10"
fi
show_welcome
get_hostname
prompt_installloc
prompt_network_device
clear
echo "Installing to : $INSTALL_LOCATION" #Final report before Installing
echo "Hostname : $HOSTNAME"
echo "Network Device: $NETWORK_DEVICE"
echo "Primary DNS : $SERVERIP1"
echo "Secondary DNS : $SERVERIP2"
echo "Listening IP : $LISTENIP"
echo
echo "Note: Primary and Secondary DNS can be changed later with the admin config"
echo
seconds=$((6))
while [ $seconds -gt 0 ]; do
echo -ne "$seconds\033[0K\r"
sleep 1
: $((seconds--))
done
install_deb
git_clone
setup_localhosts
setup_dnsmasq
disable_dnsmasq_stub
setup_nginx
setup_mariadb
setup_webadmin
setup_notrack
show_finish
|
quidsup/notrack
|
install-ubuntu.sh
|
Shell
|
gpl-3.0
| 24,092 |
#!/bin/bash
# hecho 02/2017
# cd kanelones
git clone https://github.com/googolplex/kanelonesv3.git
|
googolplex/kanelones
|
gitfeliz/clonar_kanelonesv3.sh
|
Shell
|
gpl-3.0
| 100 |
#!/bin/sh
bs_dir="$(dirname $(readlink -f $0))"
rm -rf "${bs_dir}"/autom4te.cache
rm -f "${bs_dir}"/aclocal.m4 "${bs_dir}"/ltmain.sh
echo 'Running autoreconf -if...'
aclocal --force -I m4
libtoolize --install --copy --force
autoheader --force
automake --add-missing --copy --force-missing
autoconf --force
if test -z "$NOCONFIGURE" ; then
echo 'Configuring...'
"$bs_dir"/configure "$@"
fi
|
davembg/cgminer-dualminer
|
autogen.sh
|
Shell
|
gpl-3.0
| 392 |
#!/bin/bash
set -e -f -o pipefail
source "/usr/local/etc/babun.instance"
source "$babun_tools/script.sh"
source "$babun_tools/plugins.sh"
# start plugins
plugin_start "git"
plugin_start "core"
plugin_start "cygdrive"
plugin_start "cygfix"
# Automatic start disabled for now as we have to control the order of plugin starts
#
# for startFile in $(find "$babun_plugins" -name 'start.sh');
# do
# bash "$startFile" || echo "Could not start $startFile"
# done
|
justathoughtor2/atomicApe
|
cygwin/usr/local/etc/babun/source/babun-core/plugins/start.sh
|
Shell
|
gpl-3.0
| 460 |
#!/bin/bash
echo "Setting env variables";
export S_KEYSTORE="../keystore.jks"
export S_ALIAS="alias_here"
export S_KEYPASS="keypass_here"
export BUILD_NUMBER="0" # > 0 for non local build
# export CRASHLYTICS_API_KEY="" # if wanted
echo "Starting assembly"
./gradlew assembleRelease
echo "Done"
|
PTCE-Public/popcorn-android
|
build_release.sh
|
Shell
|
gpl-3.0
| 298 |
#!/bin/sh
if command -v curl > /dev/null 2>&1; then
get="curl -LO"
elif command -v wget > /dev/null 2>&1; then
get="wget"
else
echo "Failed to locate network downloader" >&2
exit 1
fi
$get https://www.fuzzwork.co.uk/dump/sqlite-latest.sqlite.bz2
$get https://www.fuzzwork.co.uk/dump/sqlite-latest.sqlite.bz2.md5
echo "***"
echo "MD5 check"
echo "***"
if ! md5sum -c sqlite-latest.sqlite.bz2.md5; then
rm sqlite-latest.sqlite.bz2 sqlite-latest.sqlite.bz2.md5
exit 1
fi
echo "***"
echo "Extracting archive"
echo "***"
bunzip2 -v sqlite-latest.sqlite.bz2
echo "***"
echo "Creating new database"
echo "***"
sqlite3 sqlite-latest.sqlite ".dump chrFactions" > dump.sql
sqlite3 sqlite-latest.sqlite ".dump mapRegions" >> dump.sql
sqlite3 sqlite-latest.sqlite ".dump mapConstellations" >> dump.sql
sqlite3 sqlite-latest.sqlite ".dump mapSolarSystems" >> dump.sql
sqlite3 sqlite-latest.sqlite ".dump invNames" >> dump.sql
sqlite3 sqlite-latest.sqlite ".dump invTypes" >> dump.sql
sqlite3 staticdata.sqlite < dump.sql
echo "***"
echo "Deleting temporary/downloaded files"
echo "***"
rm dump.sql sqlite-latest.sqlite sqlite-latest.sqlite.bz2.md5
echo "***"
echo "Finished"
echo "***"
|
Hijacker/vmbot
|
tools/create_staticdata.sh
|
Shell
|
gpl-3.0
| 1,203 |
path=$(dirname "${BASH_SOURCE[0]}")
cd $path
valac -X -lm \
--pkg gee-0.8 \
--pkg glib-2.0 \
--pkg gio-2.0 \
Chamah.vala \
src/pluie/global.vala \
src/pluie/bin.vala \
src/pluie/crypt.Chamah.vala \
src/pluie/crypt.Sbox.vala \
src/pluie/crypt.MatrixBytes.vala \
src/pluie/crypt.Permutation.vala \
src/pluie/crypt.PseudoRandom.vala \
src/pluie/crypt.KeyPermuter.vala \
src/pluie/crypt.Movment.vala \
src/pluie/io.Bytes.vala \
src/pluie/io.InputChunkStream.vala \
-o chamah \
|
pluie-org/chamah
|
build.sh
|
Shell
|
gpl-3.0
| 473 |
#!bin/bash
rm -r testlenbias
rm -r testlenbiasz
rm -r test1
rm -r test2
|
franticspider/frontierra
|
tests/cleanup.sh
|
Shell
|
gpl-3.0
| 73 |
#!/bin/sh
#-------------------------------------------------------------------
# config.sh: This file is read at the beginning of the execution of the ASGS to
# set up the runs that follow. It is reread at the beginning of every cycle,
# every time it polls the datasource for a new advisory. This gives the user
# the opportunity to edit this file mid-storm to change config parameters
# (e.g., the name of the queue to submit to, the addresses on the mailing list,
# etc)
#-------------------------------------------------------------------
#
# Copyright(C) 2020 Jason Fleming
#
# This file is part of the ADCIRC Surge Guidance System (ASGS).
#
# The ASGS is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# ASGS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# the ASGS. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
# Fundamental
INSTANCENAME=LAv20a_al282020_bde # "name" of this ASGS process
ACCOUNT=ADCIRC
QOS=vip7000 # for priority during a storm
QUEUENAME=normal # same as SLURM partition
SERQUEUE=normal
PPN=24
GROUP="G-803086"
ASGSADMIN="[email protected]"
# Input files and templates
GRIDNAME=LAv20a
source $SCRIPTDIR/config/mesh_defaults.sh
# Initial state (overridden by STATEFILE after ASGS gets going)
COLDSTARTDATE=auto
HOTORCOLD=hotstart
LASTSUBDIR=https://fortytwo.cct.lsu.edu/thredds/fileServer/2020/nam/2020102712/LA_v20a-WithUpperAtch_chk/supermic.hpc.lsu.edu/LAv20a_nam_akheir/namforecast
RMQMessaging_Enable="on"
RMQMessaging_Transmit="on"
#FTPSITE=ftp.nhc-replay.stormsurge.email
#RSSSITE=nhc-replay.stormsurge.email
# Physical forcing (defaults set in config/forcing_defaults.sh)
TIDEFAC=on # tide factor recalc
HINDCASTLENGTH=30.0 # length of initial hindcast, from cold (days)
BACKGROUNDMET=off # NAM download/forcing
FORECASTCYCLE="00,06,12,18"
TROPICALCYCLONE=on # tropical cyclone forcing
STORM=28 # storm number, e.g. 05=ernesto in 2006
YEAR=2020 # year of the storm
WAVES=on # wave forcing
REINITIALIZESWAN=no # used to bounce the wave solution
VARFLUX=off # variable river flux forcing
#STATICOFFSET=0.30
#
CYCLETIMELIMIT="99:00:00"
# Computational Resources (related defaults set in platforms.sh)
NCPU=2015 # number of compute CPUs for all simulations
NCPUCAPACITY=9999
NUMWRITERS=1
# Post processing and publication
INTENDEDAUDIENCE=general # "general" | "developers-only" | "professional"
POSTPROCESS=( createMaxCSV.sh includeWind10m.sh createOPeNDAPFileList.sh opendap_post.sh )
OPENDAPNOTIFY="[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected]"
NOTIFY_SCRIPT=ut-nam-notify.sh
TDS=( tacc_tds lsu_tds )
#
# Scenario package
SCENARIOPACKAGESIZE=6
case $si in
-2)
ENSTORM=hindcast
;;
-1)
# do nothing ... this is not a forecast
ENSTORM=nowcast
;;
0)
ENSTORM=veerRight100Wind10m
PERCENT=100
;;
1)
ENSTORM=veerRight100
PERCENT=100
;;
2)
ENSTORM=veerLeft100Wind10m
PERCENT=-100
;;
3)
ENSTORM=veerLeft100
PERCENT=-100
;;
4)
ENSTORM=nhcConsensusWind10m
;;
5)
ENSTORM=nhcConsensus
;;
*)
echo "CONFIGURATION ERROR: Unknown ensemble member number: '$si'."
;;
esac
source $SCRIPTDIR/config/io_defaults.sh # sets met-only mode based on "Wind10m" suffix
#
PREPPEDARCHIVE=prepped_${GRIDNAME}_${INSTANCENAME}_${NCPU}.tar.gz
HINDCASTARCHIVE=prepped_${GRIDNAME}_hc_${INSTANCENAME}_${NCPU}.tar.gz
|
jasonfleming/asgs
|
config/2020/LAv20a_al282020_bde.sh
|
Shell
|
gpl-3.0
| 4,205 |
#!/bin/sh
ulog2csv $1
ulog_params -o $1 parameters.octave
|
kd0aij/impulse-response
|
src/preprocess/ulog2octave.sh
|
Shell
|
gpl-3.0
| 58 |
#!/bin/bash
# force indendation settings
# vim: ts=4 shiftwidth=4 expandtab
########################################################################
########################################################################
########################################################################
# Copyright (C) 2017 Tomasz Wisniewski aka
# DAGON <[email protected]>
#
# http://github.com/dagon666
# http://pcarduino.blogspot.co.uk
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
########################################################################
########################################################################
# module dependencies
. ../../libs/libnapi_assoc.sh
. ../../libs/libnapi_retvals.sh
# fakes/mocks
. fake/libnapi_logging_fake.sh
# module under test
. ../../libs/libnapi_sysconf.sh
#
# tests env setup
#
setUp() {
___g_sysconf_configuration=()
}
#
# tests env tear down
#
tearDown() {
___g_sysconf_configuration=()
}
test_sysconf_getKey_SO_returnsValueForExistingKey() {
local key="some_key"
local value="some value"
___g_sysconf_configuration=( "${key}=${value}" )
assertEquals 'check the key value' \
"$value" "$(sysconf_getKey_SO "$key")"
assertEquals 'check return status on success' \
0 $?
}
test_sysconf_getKey_SO_returnsValueStartingWithDelimiter() {
local key="some_key"
local value="===some value"
___g_sysconf_configuration=( "${key}=${value}" )
assertEquals 'check the key value' \
"$value" "$(sysconf_getKey_SO "$key")"
assertEquals 'check return status on success' \
0 $?
}
test_sysconf_getKey_SO_failsForNonExistingKey() {
local key="some_key"
local value="some value"
___g_sysconf_configuration=( "${key}=${value}" )
local returnValue=
returnValue="$(sysconf_getKey_SO "non-existingKey")"
assertEquals 'check return status on failure' \
"$G_RETFAIL" $?
assertNotEquals 'check the key value' \
"$value" "$returnValue"
}
test_sysconf_setKey_GV_addsValuesWithWhiteCharacters() {
local key="someKey"
local value="some value with white characters"
sysconf_setKey_GV "$key" "$value"
assertEquals 0 $?
assertEquals \
"${key}=${value}" "${___g_sysconf_configuration[*]}"
assertEquals 'check the key value' \
"$value" "$(sysconf_getKey_SO "$key")"
}
test_sysconf_setKey_GV_modifiesAlreadyExistingKey() {
local key="someKey"
local originalValue="original-value"
local value="some value with white characters"
sysconf_setKey_GV "$key" "$originalValue"
sysconf_setKey_GV "$key" "$value"
assertEquals 0 $?
assertNotEquals \
"$originalValue" "$(sysconf_getKey_SO "$key")"
assertEquals \
"${key}=${value}" "${___g_sysconf_configuration[*]}"
assertEquals 'check the key value' \
"$value" "$(sysconf_getKey_SO "$key")"
}
# shunit call
. shunit2
|
dagon666/napi
|
tests/unit_tests/libnapi_sysconf_test.sh
|
Shell
|
gpl-3.0
| 3,615 |
setup ()
{
setup_script_options "service" "60.nfs" <<EOF
CTDB_MANAGES_NFS="yes"
EOF
}
|
sathieu/samba
|
ctdb/tests/eventscripts/scripts/06.nfs.sh
|
Shell
|
gpl-3.0
| 87 |
#!/bin/bash
#
# Copyright (C) 2019 Nicolas Bertin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
echo "CACHE MANIFEST"
echo ""
echo "# date: `date +'%Y-%m-%d %H:%M:%S'`"
echo ""
echo "CACHE:"
find import.css
find css -type f -print
find js -type f -print
find lib -type f -print
find lua -type f -print
find img -type f -print
echo ""
echo "NETWORK:"
echo "*"
echo ""
echo "FALLBACK:"
|
nbertin/webwigo
|
tools/mkmanifest.bash
|
Shell
|
gpl-3.0
| 1,013 |
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#--------------------------------------------------------------------------------
#!/bin/sh
cp ../../../ben_mem.mif .
rm -rf simv* csrc DVEfiles AN.DB
echo "Compiling Core Verilog UNISIM/Behavioral model"
vlogan +v2k ../../implement/results/routed.v
echo "Compiling Test Bench Files"
vhdlan ../bmg_tb_pkg.vhd
vhdlan ../addr_gen.vhd
vhdlan ../bmg_stim_gen.vhd
vhdlan ../ben_mem_synth.vhd
vhdlan ../ben_mem_tb.vhd
echo "Elaborating Design"
vcs +neg_tchk +vcs+lic+wait -debug ben_mem_tb glbl
echo "Simulating Design"
./simv -ucli -i ucli_commands.key
dve -session vcs_session.tcl
|
Vadman97/ImageAES
|
vga/ipcore_dir/ben_mem/simulation/timing/simulate_vcs.sh
|
Shell
|
gpl-3.0
| 2,841 |
#!/bin/bash
function counter_code_lines() {
echo $1
for file in `ls $1`
do
if [ -d $1"/"$file ]; then
counter_code_lines $1"/"$file
else
suffix=${file##*.}
if [[ $suffix == "c" || $suffix == "h" ]]
then
let lines+=`wc -l < $1"/"$file`
echo "lines $lines"
fi
fi
done
}
lines=0
echo $lines
pATH="/home/"
counter_code_lines $pATH
echo $lines
|
yy152193/counter_code_lines
|
count.sh
|
Shell
|
gpl-3.0
| 466 |
echo ''
echo '--- BUILD FRAMEWORKS ----------'
echo ''
echo "NODE VERSION should be >=6.3 and is"
node --version
echo "CORDOVA VERSION should be >=6.3 and is"
cordova -version
echo "IONIC VERSION should >2.0 and is"
ionic -version
read -p "--> all versions corerct? yes=ENTER / no=CTRL+c"
echo ''
echo '--- ANDROID SDK ---------------'
echo ''
echo "ANDROID SDK --> open another terminal and call 'android'"
echo "do you have the following packages installed:"
echo "- Tools / Android SDK Tools"
echo "- Tools / Android SDK Platform-tools"
echo "- Android 6.0 (API23) / SDK platform"
echo "- Android 4.1.2 (API16) / SDK platform"
echo "- Extras / Android Support Repository"
echo "- Extras / Google Repository"
echo ''
read -p "Android SDK and required packages are installed yes=ENTER / no=CTRL+c"
echo ''
echo '--- BUILDING ------------------'
echo ''
npm install
ionic state reset
ionic resources
ionic state reset
npm install -g bower
bower install
cp -r ./res/* ./platforms/android/res
ionic build android
rm -r ./res
echo ''
echo '--- FURTHER OPTIONS -----------'
echo ''
echo 'For Release Build with check private KeePassX for --> Konfetti Android Build Notes'
echo 'For further Development Builds just call --> ionic build android'
echo ''
|
rootzoll/konfetti-app
|
_buildAndroid.sh
|
Shell
|
gpl-3.0
| 1,247 |
#! /bin/bash
rm -f *.log
rm -f *.png
|
emersonmx/ippl
|
clean_tests.sh
|
Shell
|
gpl-3.0
| 38 |
# /bin/bash
#first version
cd ~/
ln -s ~/conffile/.tmux.conf .tmux.conf
ln -s ~/conffile/.viminfo .viminfo
ln -s ~/conffile/.vimrc .vimrc
ln -sr ~/conffile/vim vim
cd -
|
flouis1/conffile
|
cfg_copy.sh
|
Shell
|
gpl-3.0
| 171 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.