code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
set +x
set +e
echo ""
echo ""
echo "---"
echo "Now starting POST-BUILD steps"
echo "---"
echo ""
echo INFO: Pointing to $DOCKER_HOST
if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
echo INFO: Removing containers...
! docker rm -vf $(docker ps -aq)
fi
# Remove all images which don't have docker or ubuntu in the name
if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'ubuntu' | awk '{ print $3 }' | wc -l) -eq 0 ]; then
echo INFO: Removing images...
! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'ubuntu' | awk '{ print $3 }')
fi
# Kill off any instances of git, go and docker, just in case
! taskkill -F -IM git.exe -T >& /dev/null
! taskkill -F -IM go.exe -T >& /dev/null
! taskkill -F -IM docker.exe -T >& /dev/null
# Remove everything
! cd /c/jenkins/gopath/src/github.com/docker/docker
! rm -rfd * >& /dev/null
! rm -rfd .* >& /dev/null
echo INFO: Cleanup complete
exit 0
|
rawlingsj/gofabric8
|
vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh
|
Shell
|
apache-2.0
| 935 |
#!/bin/sh
# Copyright (c) 2005, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---
# Author: Craig Silverstein
#
# Runs the heap-profiler unittest and makes sure the profile looks appropriate.
#
# We run under the assumption that if $HEAP_PROFILER is run with --help,
# it prints a usage line of the form
# USAGE: <actual executable being run> [...]
#
# This is because libtool sometimes turns the 'executable' into a
# shell script which runs an actual binary somewhere else.
# We expect BINDIR and PPROF_PATH to be set in the environment.
# If not, we set them to some reasonable values
BINDIR="${BINDIR:-.}"
PPROF_PATH="${PPROF_PATH:-$BINDIR/src/pprof}"
if [ "x$1" = "x-h" -o "x$1" = "x--help" ]; then
echo "USAGE: $0 [unittest dir] [path to pprof]"
echo " By default, unittest_dir=$BINDIR, pprof_path=$PPROF_PATH"
exit 1
fi
HEAP_PROFILER="${1:-$BINDIR/heap-profiler_unittest}"
PPROF="${2:-$PPROF_PATH}"
TEST_TMPDIR=/tmp/heap_profile_info
# It's meaningful to the profiler, so make sure we know its state
unset HEAPPROFILE
rm -rf "$TEST_TMPDIR"
mkdir "$TEST_TMPDIR" || exit 2
num_failures=0
# Given one profile (to check the contents of that profile) or two
# profiles (to check the diff between the profiles), and a function
# name, verify that the function name takes up at least 90% of the
# allocated memory. The function name is actually specified first.
VerifyMemFunction() {
function="$1"
shift
# get program name. Note we have to unset HEAPPROFILE so running
# help doesn't overwrite existing profiles.
exec=`unset HEAPPROFILE; $HEAP_PROFILER --help | awk '{print $2; exit;}'`
if [ $# = 2 ]; then
[ -f "$1" ] || { echo "Profile not found: $1"; exit 1; }
[ -f "$2" ] || { echo "Profile not found: $2"; exit 1; }
$PPROF --base="$1" $exec "$2" >"$TEST_TMPDIR/output.pprof" 2>&1
else
[ -f "$1" ] || { echo "Profile not found: $1"; exit 1; }
$PPROF $exec "$1" >"$TEST_TMPDIR/output.pprof" 2>&1
fi
cat "$TEST_TMPDIR/output.pprof" \
| tr -d % | awk '$6 ~ /^'$function'$/ && $2 > 90 {exit 1;}'
if [ $? != 1 ]; then
echo
echo "--- Test failed for $function: didn't account for 90% of executable memory"
echo "--- Program output:"
cat "$TEST_TMPDIR/output"
echo "--- pprof output:"
cat "$TEST_TMPDIR/output.pprof"
echo "---"
num_failures=`expr $num_failures + 1`
fi
}
VerifyOutputContains() {
text="$1"
if ! grep "$text" "$TEST_TMPDIR/output" >/dev/null 2>&1; then
echo "--- Test failed: output does not contain '$text'"
echo "--- Program output:"
cat "$TEST_TMPDIR/output"
echo "---"
num_failures=`expr $num_failures + 1`
fi
}
HEAPPROFILE="$TEST_TMPDIR/test"
HEAP_PROFILE_INUSE_INTERVAL="10240" # need this to be 10Kb
HEAP_PROFILE_ALLOCATION_INTERVAL="$HEAP_PROFILE_INUSE_INTERVAL"
HEAP_PROFILE_DEALLOCATION_INTERVAL="$HEAP_PROFILE_INUSE_INTERVAL"
export HEAPPROFILE
export HEAP_PROFILE_INUSE_INTERVAL
export HEAP_PROFILE_ALLOCATION_INTERVAL
export HEAP_PROFILE_DEALLOCATION_INTERVAL
# We make the unittest run a child process, to test that the child
# process doesn't try to write a heap profile as well and step on the
# parent's toes. If it does, we expect the parent-test to fail.
$HEAP_PROFILER 1 >$TEST_TMPDIR/output 2>&1 # run program, with 1 child proc
VerifyMemFunction Allocate2 "$HEAPPROFILE.1329.heap"
VerifyMemFunction Allocate "$HEAPPROFILE.1448.heap" "$HEAPPROFILE.1548.heap"
# Check the child process got to emit its own profile as well.
VerifyMemFunction Allocate2 "$HEAPPROFILE"_*.1329.heap
VerifyMemFunction Allocate "$HEAPPROFILE"_*.1448.heap "$HEAPPROFILE"_*.1548.heap
# Make sure we logged both about allocating and deallocating memory
VerifyOutputContains "62 MB allocated"
VerifyOutputContains "62 MB freed"
# Now try running without --heap_profile specified, to allow
# testing of the HeapProfileStart/Stop functionality.
$HEAP_PROFILER >"$TEST_TMPDIR/output2" 2>&1
rm -rf $TMPDIR # clean up
if [ $num_failures = 0 ]; then
echo "PASS"
else
echo "Tests finished with $num_failures failures"
fi
exit $num_failures
|
yaozongyou/common
|
third_party/gperftools/src/tests/heap-profiler_unittest.sh
|
Shell
|
mit
| 5,567 |
# Test the passwords_file parameter from the config file.
use_config simple
echo "set password_file fake_gpg_home/.mdp/alternative" >> test.config
rm -f fake_gpg_home/.mdp/passwords
rm -f fake_gpg_home/.mdp/alternative
run_mdp edit
assert_file_exists fake_gpg_home/.mdp/alternative
|
tamentis/mdp
|
tests/functional/test_custom_password_file.sh
|
Shell
|
isc
| 285 |
#!/bin/bash
sudo apt-get install darktable
|
dartmedved/ubuntu-postinstall-v2
|
graphics/install-darktable.sh
|
Shell
|
mit
| 42 |
#!/bin/bash
php /bin/composer.phar clear-cache;
php /bin/composer.phar install;
mysql -uroot -p sarasoft < sarasoft.sql;
rm -rf var/cache/prod/*;
php bin/console cache:clear --env prod;
find * -type d -exec chmod 770 {} \;
find * -type f -exec chmod 660 {} \;
chown -R apache:apache .;
php bin\console security:check
chmod 770 deploy.sh
|
RicardoSaracino/sarasoft
|
deploy.sh
|
Shell
|
mit
| 344 |
# core things
# environment variables
[ -d ${HOME}/bin ] && export PATH="${PATH}:${HOME}/bin"
export MC_COLOR_TABLE=editnormal=:normal=
export LESSCHARSET=utf-8
export GREP_COLOR="1;33"
# unlimited shell history
export HISTSIZE=-1
export HISTFILESIZE=-1
# aliases
alias grep='grep --color=auto'
alias ls='ls -G'
alias ll='ls -lh'
alias la='ll -a'
alias lll='ll -@'
alias pstree='pstree -g 2'
alias top='top -o cpu -O rsize'
alias scr='screen -r'
alias cdc='cd;clear'
alias grephash='grep -v -e "^\s*#"'
alias grephashempty='grep -v -e "^\s*#" -e "^$"'
alias 7zPPMd='7z a -t7z -mx=9 -m0=PPMd'
alias 7zUltra='7z a -t7z -m0=lzma -mx=9 -mfb=64 -md=32m -ms=on'
alias openPorts='lsof -P -iTCP -sTCP:LISTEN'
alias normalize='chown -R $USER:$MAIN_GROUP *; find . -type d -exec chmod 755 {} \;; find . -type f -exec chmod 644 {} \;'
alias make_sh_executable='find . -type f -iname "*.sh" -exec chmod 755 {} \;'
if [ "$USER" = "root" ]; then
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
fi
alias last5='ls -t | head -n 5'
alias last10='ls -t | head -n 10'
alias last15='ls -t | head -n 15'
alias lastx='ls -t | head -n'
# functions
bullet() {
if [ -t 1 ]; then
case "$1" in
red)
echo -e -n "\033[1;31m"
;;
green)
echo -e -n "\033[1;32m"
;;
yellow)
echo -e -n "\033[1;33m"
;;
esac
echo -e -n "●\033[0m "
else
echo -n "* "
fi
}
bullet_err() {
if [ -t 2 ]; then
case "$1" in
red)
echo -e -n "\033[1;31m"
;;
green)
echo -e -n "\033[1;32m"
;;
yellow)
echo -e -n "\033[1;33m"
;;
esac
echo -e -n "●\033[0m "
else
echo -n "* "
fi
}
hr() {
cols=$(($(tput cols)-1))
for ((i=0; i<$cols; i++)) {
echo -n "―"
}
echo
}
dus() {
du -sh "$@" | sort -h
}
sortedless() {
sort "$1" | less
}
ex() {
if [[ -f $1 ]]; then
case $1 in
*.tar.bz2) tar xvjf $1;;
*.tar.gz) tar xvzf $1;;
*.tar.xz) tar xvJf $1;;
*.tar.lzma) tar --lzma xvf $1;;
*.bz2) bunzip $1;;
*.rar) unrar $1;;
*.gz) gunzip $1;;
*.tar) tar xvf $1;;
*.tbz2) tar xvjf $1;;
*.tgz) tar xvzf $1;;
*.zip) unzip $1;;
*.Z) uncompress $1;;
*.7z) 7z x $1;;
*.dmg) hdiutul mount $1;; # mount OS X disk images
*) echo "'$1' cannot be extracted via >ex<";;
esac
else
echo "'$1' is not a valid file"
fi
}
mcd() {
mkdir -p "$1" && cd "$1";
}
md5() {
echo -n "$1" | openssl md5 /dev/stdin
}
sha1() {
echo -n "$1" | openssl sha1 /dev/stdin
}
sha256() {
echo -n "$1" | openssl dgst -sha256 /dev/stdin
}
sha512() {
echo -n "$1" | openssl dgst -sha512 /dev/stdin
}
rot13() {
echo "$1" | tr "A-Za-z" "N-ZA-Mn-za-m"
}
rot47() {
echo "$1" | tr "\!-~" "P-~\!-O"
}
urlencode() {
python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])" "$1"
}
urldecode() {
python -c "import sys, urllib as ul; print ul.unquote_plus(sys.argv[1])" "$1"
}
millis() {
python -c "import time; print(int(time.time()*1000))"
}
nanos() {
python -c "import time; print(int(time.time()*1000000000))"
}
weather() {
wget -q -O- http://wttr.in/Budapest
}
findAll() {
eval "find \"$1\" -iname '$2'"
}
findFile() {
eval "find \"$1\" -type f -iname '$2'"
}
findDir() {
eval "find \"$1\" -type d -iname '$2'"
}
withoutExt() {
echo "${1%.*}"
}
backup() {
cp "${1}" "${1}.bck-$(date '+%Y%m%d%H%M%S')"
}
# [-d days|-b rsa-bits] <path-base (will be appended by .key and .crt)>
create-openssl-key-and-cert() {
if [ $# -lt 1 ]; then
echo "Usage: create-openssl-key-and-cert [-d days|-b rsa-bits] <path-base (will be appended by .key and .crt)>"
return 1
fi
days=365; bits=2048; pathbase=""
while [ ! -z "$1" ]; do
param="$1"; shift
case "${param}" in
-d)
if [ -z "$1" ]; then echo "parameter -d (days) requires a value" >&2; return 2; fi
days=$1; shift
;;
-b)
if [ -z "$1" ]; then echo "parameter -b (rsa bits) requires a value" >&2; return 2; fi
bits=$1; shift
;;
*)
pathbase="${param}"
;;
esac
done
if [ -z "$pathbase" ]; then echo "base path required" >&2; return 3; fi
openssl req -x509 -nodes -days $days -newkey rsa:$bits -keyout "${pathbase}.key" -out "${pathbase}.crt"
echo ""
echo "nginx config example: "
echo -e "\tssl_certificate ${pathbase}.crt;"
echo -e "\tssl_certificate_key ${pathbase}.key;"
echo -e "\tssl_dhparam /etc/ssl/certs/dhparam.pem;"
echo -e "\t"
echo -e "\tssl on;"
echo -e "\tssl_session_cache builtin:1000 shared:SSL:10m;"
echo -e "\tssl_protocols TLSv1 TLSv1.1 TLSv1.2;"
echo -e "\tssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;"
echo -e "\tssl_prefer_server_ciphers on;"
echo ""
}
create-openssl-dh() {
if [ $# -lt 1 ]; then
echo "Usage: create-openssl-dh [-b bits] <path (ex: dh2048.pem)>"
return 1
fi
bits=2048; pathbase=""
while [ ! -z "$1" ]; do
param="$1"; shift
case "${param}" in
-b)
if [ -z "$1" ]; then echo "parameter -b (rsa bits) requires a value" >&2; return 2; fi
bits=$1; shift
;;
*)
pathbase="${param}"
;;
esac
done
if [ -z "$pathbase" ]; then echo "path required" >&2; return 3; fi
openssl dhparam -out "${pathbase}" $bits
echo ""
echo "nginx config example: "
echo -e "\tssl_dhparam\t\t\t${pathbase};"
echo ""
}
run-in() {
if [ "_$1" == "_-v" ]; then
verbose=1
shift
else
verbose=0
fi
target_dir="$1"
shift
run_command="$@"
if [ -z "$run_command" ]; then
echo "Run command has to be specified" >&2
return 2
fi
if [ ! -d "$target_dir" ]; then
echo "Target dir \"${target_dir}\" doestn't exists" >&2
return 1
fi
curdir="$(pwd)"
for run_in_dir in $(find "$target_dir" -type d -maxdepth 1 -not -name '.*'); do
cd "$run_in_dir"
if [ $? -ne 0 ]; then
cd "$curdir"
echo "Could not enter directory \"${run_in_dir}\"" >&2
return 1
else
[ ${verbose} -eq 1 ] && echo "Running in \"${run_in_dir}\"... "
eval ${run_command}
fi
cd "$curdir"
done
}
set-session-title() {
export SESSION_TITLE="$@"
}
unset-session-title() {
export SESSION_TITLE=""
}
link-rc-scripts() {
for RC in ${SCRIPT_BASE_DIR}/rc/*rc; do
HOMERCNAME="${HOME}/.$(basename ${RC})"
echo "Linking $RC to $HOMERCNAME ..."
if [ -f "${HOMERCNAME}" ]; then
rm -f "${HOMERCNAME}" 2>&1 >/dev/null
fi
ln -s "${RC}" "${HOMERCNAME}"
done
}
update-shellrc() {
VERSIONS="$(wget --no-cache -q -O- 'https://github.com/majk1/shellrc/releases' | grep "/majk1/shellrc/archive/.*\.tar\.gz" | sed '/\/majk1\/shellrc\/archive\/refs\/tags\/.*\.tar\.gz/s/.*href="\/majk1\/shellrc\/archive\/refs\/tags\/\([0-9\.]*\)\.tar\.gz".*/\1/' | sort -Vr)"
LATEST="$(echo "$VERSIONS" | head -n 1)"
if [ "$LATEST" != "$SHELLRC_VERSION" ]; then
echo -n "Upgrade shellrc-scripts to version ${LATEST} [y/n]? "
read ANSWER
if [ "$ANSWER" != "y" ]; then
echo "Stopping upgrade. Bye"
else
wget --no-cache -q -O- "https://majk1.github.io/shellrc/installer.sh" | bash -s -- -u
fi
else
if [ ! -z "$1" ]; then
if [ "$1" == "--force" ]; then
echo -n "Reinstall shellrc-scripts version ${LATEST} [y/n]? "
read ANSWER
if [ "$ANSWER" != "y" ]; then
echo "Stopping reinstall. Bye"
else
wget --no-cache -q -O- "https://majk1.github.io/shellrc/installer.sh" | bash -s -- -u
fi
fi
else
echo "You have the latest version (${LATEST})."
echo "To force reinstall, run update-shellrc --force"
fi
fi
}
if type -p source-highlight >/dev/null 2>&1; then
alias colorcat='source-highlight -n -f esc -i '
fi
# includes
source $SCRIPT_BASE_DIR/idea.sh
source $SCRIPT_BASE_DIR/git.sh
source $SCRIPT_BASE_DIR/java.sh
source $SCRIPT_BASE_DIR/mvn.sh
source $SCRIPT_BASE_DIR/docker.sh
if [ -f ${HOME}/.customshellrc ]; then
source ${HOME}/.customshellrc
fi
|
majk1/shellrc
|
core.sh
|
Shell
|
mit
| 8,909 |
#!/bin/sh
licRes=$(
for file in $(find src test crossdock -type f -iname '*.js' ! -path '*/thrift_gen/*'); do
head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo " ${file}"
done;)
if [ -n "${licRes}" ]; then
echo "license header check failed:\n${licRes}"
exit 255
fi
|
uber/jaeger-client-node
|
scripts/check-license.sh
|
Shell
|
mit
| 292 |
#!/usr/bin/env bash
docker build -t screwdriver-docker-proxy .
|
tco/screwdriver-docker-proxy
|
build.sh
|
Shell
|
mit
| 63 |
#!/bin/bash -e
clear
echo "============================================"
echo "Proton Template Install Script"
echo "============================================"
echo "Menu: "
echo " i: Install templates"
echo " u: Uninstall templates"
echo ""
echo "Enter a command: "
read -e command
if [ "$command" == i ] ; then
echo "============================================"
echo "Installing templates..."
echo "============================================"
mkdir -p ~/Library/Developer/Xcode/Templates/File\ Templates/Source
cp -rf ./Proton\ Page.xctemplate ~/Library/Developer/Xcode/Templates/File\ Templates/Source
echo "Done."
else
echo "============================================"
echo "Uninstalling templates"
echo "============================================"
rm -rf ~/Library/Developer/Xcode/Templates/File\ Templates/Source/Proton\ Page.xctemplate
echo "Done."
fi
|
IMcD23/Proton
|
Templates/install_templates.command
|
Shell
|
mit
| 882 |
#!/bin/sh
echo "Cloning dotfiles"
git clone --depth=1 https://github.com/wyze/dotfiles.git "$HOME/.dotfiles"
cd "$HOME/.dotfiles"
# Ask for the administrator password upfront
sudo -v &> /dev/null
# Update existing `sudo` time stamp until this script has finished
# https://gist.github.com/cowboy/3118588
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done &> /dev/null &
echo "Installing dotfiles"
source install/link.sh
if [ "$(uname)" == "Darwin" ]; then
echo "Running OSX installation"
echo "Brewing it up"
source install/brew.sh
echo "Setting defaults on OSX"
source osx/set-defaults.sh
echo "Installing Prezto"
source install/prezto.sh
echo "Installing Node via nvm"
source install/node.sh
echo "Installing applications"
source install/apps.sh
echo "Installing Fira Code"
source install/firacode.sh
fi
# iTerm 2 settings
cp com.googlecode.iterm2.plist ~/Library/Preferences/
ZSH="$(which zsh)"
SHELLS="/etc/shells"
# Check to make sure we can set the shell
if ! grep -qs $ZSH $SHELLS
then
echo "Adding zsh to $SHELLS"
echo $ZSH | sudo tee -a $SHELLS
fi
echo "Configuring zsh as default shell"
chsh -s $(which zsh)
echo "Completed"
echo "Sleeping 10 seconds before killing applications"
sleep 10
###############################################################################¬
# Kill affected applications #¬
###############################################################################¬
for app in "Activity Monitor" "Address Book" "Calendar" "Contacts" "cfprefsd" \
"Dock" "Finder" "Google Chrome" "Google Chrome Canary" "Mail" "Messages" \
"Opera" "Safari" "SystemUIServer" "Terminal" "Twitter" "iCal" "Spotlight"; do
sudo killall "${app}" > /dev/null 2>&1
done
|
wyze/dotfiles
|
install.sh
|
Shell
|
mit
| 1,799 |
#!/bin/sh
whenever -w
cron
tail -f /var/log/whenever.log
|
policygenius/seal
|
start-cron.sh
|
Shell
|
mit
| 57 |
# added by travis gem
[ ! -s /Users/amasa.amos/.travis/travis.sh ] || source /Users/amasa.amos/.travis/travis.sh
|
asamasoma/dotfiles
|
travis/.zsh/travis.zsh
|
Shell
|
mit
| 112 |
txtblk='\[\e[0;30m\]' # Black - Regular
txtred='\[\e[0;31m\]' # Red
txtgrn='\[\e[0;32m\]' # Green
txtylw='\[\e[0;33m\]' # Yellow
txtblu='\[\e[0;34m\]' # Blue
txtpur='\[\e[0;35m\]' # Purple
txtcyn='\[\e[0;36m\]' # Cyan
txtwht='\[\e[0;37m\]' # White
bldblk='\[\e[1;30m\]' # Black - Bold
bldred='\[\e[1;31m\]' # Red
bldgrn='\[\e[1;32m\]' # Green
bldylw='\[\e[1;33m\]' # Yellow
bldblu='\[\e[1;34m\]' # Blue
bldpur='\[\e[1;35m\]' # Purple
bldcyn='\[\e[1;36m\]' # Cyan
bldwht='\[\e[1;37m\]' # White
unkblk='\[\e[4;30m\]' # Black - Underline
undred='\[\e[4;31m\]' # Red
undgrn='\[\e[4;32m\]' # Green
undylw='\[\e[4;33m\]' # Yellow
undblu='\[\e[4;34m\]' # Blue
undpur='\[\e[4;35m\]' # Purple
undcyn='\[\e[4;36m\]' # Cyan
undwht='\[\e[4;37m\]' # White
bakblk='\[\e[40m\]' # Black - Background
bakred='\[\e[41m\]' # Red
bakgrn='\[\e[42m\]' # Green
bakylw='\[\e[43m\]' # Yellow
bakblu='\[\e[44m\]' # Blue
bakpur='\[\e[45m\]' # Purple
bakcyn='\[\e[46m\]' # Cyan
bakwht='\[\e[47m\]' # White
txtrst='\[\e[0m\]' # Text Reset
|
bootstraps/dotfiles
|
bash/colors.sh
|
Shell
|
mit
| 1,029 |
#!/bin/bash
#Uncomment out this if you want full debug output
#set -xe
# This script can be used to setup the Kubeadm All-in-One environment on Ubuntu 16.04.
# This script should not be run as root but as a different user. Create a new user
# and give it root privileges if required.
if [[ $EUID -eq 0 ]]; then echo "This script should not be run using sudo or as the root user"; exit 1; fi
### Declare colors to use during the running of this script:
declare -r GREEN="\033[0;32m"
declare -r RED="\033[0;31m"
declare -r YELLOW="\033[0;33m"
function echo_green {
echo -e "${GREEN}$1"; tput sgr0
}
function echo_red {
echo -e "${RED}$1"; tput sgr0
}
function echo_yellow {
echo -e "${YELLOW}$1"; tput sgr0
}
source os-helm-env
cd ~/
echo_green "\n Sourcing os-helm-env variables"
echo_green "\n Installing OS Helm Tiller Kubernetes Infrastructure"
source ~/os-helm-aio-installer/os-helm-kube-infrastructure.sh
echo_green "\n Installing OpenStack"
read -p "Use FQDN settings for Helm Charts installer? y/n " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
source ~/os-helm-aio-installer/os-helm-wFQDN.sh
else
source ~/os-helm-aio-installer/os-helm-only.sh
fi
# Assuming OS-Helm installed first with Ingress
echo_green "\n Installing Kubernetes Infra Weave w Ingress"
source ~/os-helm-aio-installer/kube-weavescope-fqdn.sh
echo_green "\n Installing Kubernetes Infra Dashboard w Ingress"
source ~/os-helm-aio-installer/kube-dashboard-fqdn.sh
|
lukepatrick/os-helm-aio-installer
|
os-helm-all-together.sh
|
Shell
|
mit
| 1,456 |
#!/bin/bash
bash update.sh
bash ../renewPerms.sh
service apache2 restart
|
lcdi/Inventory
|
restart.sh
|
Shell
|
mit
| 72 |
#!/bin/sh
### direct2cache head START ###
_echo2cache() { echo "$@" 2>/dev/null >&9 || true; }
_cat2cache() { cat "$@" 2>/dev/null >&9 || true; }
#### direct2cache head END ####
. "$HOME/.execfunc.sh"
|
YoungCatChen/home-rc
|
profile.d/02-tocache.sh
|
Shell
|
mit
| 206 |
#!/bin/bash
#
# backs up the entire system, excluding various directories
# that are inappropriate to backup (e.g. proc-fs, dev tree,
# etc.), using rsync
#
include=/backup/include.files
exclude=/backup/exclude.files
lastbup=/backup/backup.log
lockbup=/backup/.backup.lock
rotates=/backup/rotate.sh
# comment out to actually perform backup
#rsyncdryrun=--dry-run
function readablesec
{
if [[ $# -gt 0 ]]
then
seconds=$1
if [[ $seconds -gt 86400 ]] # seconds in a day
then
printf "%d days " $(( seconds / 86400 ))
fi
date -d "1970-01-01 + $seconds seconds" "+%H hrs %M min %S sec"
fi
}
function logline
{
if [[ $# -gt 0 ]]
then
file=$1
if [[ -f "${file}" ]]
then
echo "[backup]file=${file};md5="$( md5sum "${file}" | \grep -oP '^\S+' )";"
fi
fi
}
if [[ $# -ge 2 ]]
then
# ---------------------------------------------------------------------------
# verify arguments
# ---------------------------------------------------------------------------
source=$1
target=$2
if [[ -e "${lockbup}" ]]
then
currbup="$( cat ${lockbup} | tr -d '\n' )"
echo "error: backup currently in progress: ${currbup}"
exit 1
fi
if [[ ! -d "${source}" ]]
then
echo "error: invalid source directory: ${source}"
exit 2
fi
if [[ ! -d "${target}" ]]
then
echo "error: invalid target directory: ${target}"
exit 3
fi
# ---------------------------------------------------------------------------
# configure filenames
# ---------------------------------------------------------------------------
realdate=$( date ) # use standard formatting with rotation script
datetime=$( date "+%Y-%m-%d__%H-%M-%S" -d "${realdate}" )
oskernel=$( printf "%s (%s)" \
"$( lsb_release -d | \grep -oP '(?<=^Description:\t).+$' )" \
"$( uname -r )" \
| tr ' ' '_' )
backupname="${oskernel}__${datetime}.tbz"
targetname="${oskernel}"
# ---------------------------------------------------------------------------
# configure tool options
# ---------------------------------------------------------------------------
if [[ -z ${rsyncdryrun} ]]
then
rsyncprogress=--info=progress2
else
rsyncprogress=-v
fi
# ---------------------------------------------------------------------------
# rotate the old backup files
# ---------------------------------------------------------------------------
rotatetime=$( date "+%s" )
echo "===================================================================="
echo " [+] rotating backup files ..."
echo
"${rotates}" "${target}" "${realdate}"
rotateduration=$(( $( date "+%s" ) - ${rotatetime} ))
echo
echo " [+] done ($( readablesec ${rotateduration} ))"
echo "===================================================================="
echo
# ---------------------------------------------------------------------------
# perform the backup
# ---------------------------------------------------------------------------
backuppath="${target}/${backupname}"
targetpath="${target}/${targetname}"
# add a file indicating we are performing a backup
echo "${backuppath}" > "${lockbup}"
rsynctime=$( date "+%s" )
echo "===================================================================="
echo " [+] syncing filesystem with backup ..."
echo
# copy all files from root fs to the backup fs
rsync ${rsyncdryrun} -axHAWXS \
--numeric-ids \
${rsyncprogress} \
--delete \
--exclude-from="${exclude}" \
--include-from="${include}" \
"${source}/" \
"${targetpath}"
rsyncduration=$(( $( date "+%s" ) - ${rsynctime} ))
echo
echo " [+] done ($( readablesec ${rsyncduration} ))"
echo "===================================================================="
echo
tarballtime=$( date "+%s" )
echo "===================================================================="
echo " [+] compressing backup filesystem ..."
echo
if [[ -z ${rsyncdryrun} ]]
then
# create a tarball using the SMP-aware bzip2 utility (to take advantage of
# all 16 CPU cores we have) with highest+slowest compression ratio (-9)
tar -c "${targetpath}" | pbzip2 -m2000 -p16 -9 -kcvz > "${backuppath}"
fi
tarballduration=$(( $( date "+%s" ) - ${tarballtime} ))
echo
echo " [+] done ($( readablesec ${tarballduration} ))"
echo "===================================================================="
echo
if [[ -z ${rsyncdryrun} ]]
then
# log the backup
logline "${backuppath}" >> "${lastbup}"
fi
# remove the backup lock file
rm -f "${lockbup}"
echo "===================================================================="
echo " [+] backup completed in $( readablesec $(( ${rsyncduration} + ${tarballduration} )) )"
echo " [+] current mirror: ${targetpath}/"
echo " [+] current snapshot: ${backuppath}"
echo "===================================================================="
else
cat <<USAGE
usage:
bash $0 <source-dir> <target-dir>
USAGE
exit 255
fi
|
ardnew/shutils
|
sysbackup/backup.sh
|
Shell
|
mit
| 5,070 |
#!/bin/bash
# directory navigation functions & aliases
alias ls='ls --color=auto'
alias ll='ls -lahF'
alias ...='.. 2'
alias ....='.. 3'
alias .....='.. 4'
alias ......='.. 5'
function ..() {
typeset levels up="" i
echo $1 | grep -E "^[0-9]$" > /dev/null && levels=$1 || levels=1
for ((i=0; i<$levels; i++)) do
up="${up}../"
done
cd $up
}
function ..l() { .. $1 && ll; }
function cdl() { cd $1 && ll; }
function mang() {
man $1 | grep --color=auto --line-number --context=${3:-2} $2
}
function manl() {
man $1 | less -p $2
}
alias tailf='tail -f'
alias du='dh -sh'
alias df='df -h'
alias bash-stats="history | awk '{print $2}' | sort | uniq -c | sort -rn | head -n20"
#
# History
#
# export HISTCONTROL="ignoredups" # ignore duplicates(store command once)
# export HISTIGNORE="&:ls:ll:[bf]g:exit:%[0-9]" # ignore simple commands
export HISTFILESIZE=10000 # history file size
#export GREP_OPTIONS='--color=auto --exclude=tags --exclude=TAGS --exclude-dir=.git'
# Local bin takes precedence
[ -d ~/bin ] && export PATH=~/bin:$PATH
# Learn something new every day
if command -v whatis; then
echo "Did you know that:"; whatis $(ls /bin | shuf -n 1)
fi
|
avishefi/dotfiles
|
profiles/bash/plugins/common.plugin.sh
|
Shell
|
mit
| 1,228 |
#!/bin/sh
pushd .
cd build
make && ./RunTests
popd
|
spajus/sdl2-snake
|
scripts/test.sh
|
Shell
|
mit
| 51 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Bolts/Bolts.framework"
install_framework "$BUILT_PRODUCTS_DIR/FBSDKCoreKit/FBSDKCoreKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/GAFBCustomTitleButton/GAFBCustomTitleButton.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Bolts/Bolts.framework"
install_framework "$BUILT_PRODUCTS_DIR/FBSDKCoreKit/FBSDKCoreKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/GAFBCustomTitleButton/GAFBCustomTitleButton.framework"
fi
|
george-gw/GAFBCustomTitleButton
|
Example/Pods/Target Support Files/Pods-GAFBCustomTitleButton_Tests/Pods-GAFBCustomTitleButton_Tests-frameworks.sh
|
Shell
|
mit
| 3,945 |
#!/bin/bash
if [ -z "$GOPATH" ]; then
echo "Need to set GOPATH see http://golang.org for more info"
exit 1
fi
# Install third party packages
echo "Installing Dependencies"
go get github.com/gorilla/mux
# Compile and install
echo "Building source..."
go install github.com/sjhitchner/annotator
echo "Run $GOPATH/bin/annotator"
|
sjhitchner/annotator
|
build.sh
|
Shell
|
mit
| 338 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3137-1
#
# Security announcement date: 2016-11-23 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:45 UTC
#
# Operating System: Ubuntu 16.10
# Architecture: i386
#
# Vulnerable packages fix on version:
# - python-moinmoin:1.9.8-1ubuntu1.16.10.1
#
# Last versions recommanded by security team:
# - python-moinmoin:1.9.8-1ubuntu1.16.10.1
#
# CVE List:
# - CVE-2016-7146
# - CVE-2016-7148
# - CVE-2016-9119
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade python-moinmoin=1.9.8-1ubuntu1.16.10.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_16.10/i386/2016/USN-3137-1.sh
|
Shell
|
mit
| 695 |
#!/bin/bash
set -e # Exit with nonzero exit code if anything fails
function doCompile {
./_compile.sh
}
# Pull requests and commits to other branches shouldn't try to deploy, just build to verify
if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
echo "Skipping deploy; just doing a build."
doCompile
exit 0
fi
# Save some useful information
REPO=`git config remote.origin.url`
SSH_REPO=${REPO/https:\/\/github.com\//[email protected]:}
SHA=`git rev-parse --verify HEAD`
# Clone the existing gh-pages for this repo into out/
# Create a new empty branch if gh-pages doesn't exist yet (should only happen on first deply)
git clone $REPO out
cd out
git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
cd ..
# Clean out existing contents
rm -rf out/* || exit 0
# Run our compile script
doCompile
# Now let's go have some fun with the cloned repo
cd out
git config user.name "Travis CI"
git config user.email "$COMMIT_AUTHOR_EMAIL"
# If there are no changes to the compiled out (e.g. this is a README update) then just bail.
# THIS DOES NOT WORK WITH NEW FILES
#if [ -z `git diff --exit-code` ]; then
# echo "No changes to the output on this push; exiting."
# exit 0
#fi
# Commit the "changes", i.e. the new version.
# The delta will show diffs between new and old versions.
git add -A
git commit -m "Deploy to GitHub Pages: ${SHA}"
# Get the deploy key by using Travis's stored variables to decrypt _deploy_key.enc
ENCRYPTED_KEY_VAR="encrypted_${ENCRYPTION_LABEL}_key"
ENCRYPTED_IV_VAR="encrypted_${ENCRYPTION_LABEL}_iv"
ENCRYPTED_KEY=${!ENCRYPTED_KEY_VAR}
ENCRYPTED_IV=${!ENCRYPTED_IV_VAR}
openssl aes-256-cbc -K $ENCRYPTED_KEY -iv $ENCRYPTED_IV -in ../_deploy_key.enc -out ../_deploy_key -d
chmod 600 ../_deploy_key
eval `ssh-agent -s`
ssh-add ../_deploy_key
# Now that we're all set up, we can push.
git push $SSH_REPO $TARGET_BRANCH
|
idealabasu/asurobotics
|
_deploy.sh
|
Shell
|
mit
| 1,913 |
#!/bin/sh
POSTGRES_USER=postgres
POSTGRES_DB=postgres
POSTGIS_INSTANCE=${1:-"osmdb"}
POSTGIS_PASSWORD=${2:-$(cat /dev/urandom| tr -dc _A-Z-a-z-0-9 | head -c12)}
docker run --name ${POSTGIS_INSTANCE} \
-e POSTGRES_PASSWORD=${POSTGIS_PASSWORD} \
-e POSTGRES_USER=${POSTGRES_USER} \
-e POSTGRES_DB=${POSTGRES_DB} \
-d osmtw/postgis
|
OsmHackTW/osm2pgsql-docker
|
postgis.sh
|
Shell
|
mit
| 345 |
#!/bin/bash
set -e -x
source .github/scripts/retry.sh
brew install pkg-config meson
# General note:
# Apple guarantees forward, but not backward ABI compatibility unless
# the deployment target is set for the oldest supported OS.
# (https://trac.macports.org/ticket/54332#comment:2)
# Used by CMake, clang, and Python's distutils
export MACOSX_DEPLOYMENT_TARGET=$MACOS_MIN_VERSION
# The Python variant to install, see exception below.
export PYTHON_INSTALLER_MACOS_VERSION=$MACOS_MIN_VERSION
# Work-around issue building on newer XCode versions.
# https://github.com/pandas-dev/pandas/issues/23424#issuecomment-446393981
if [ $PYTHON_VERSION == "3.5" ]; then
# No 10.9 installer available, use 10.6.
# The resulting wheel platform tags still have 10.6 (=target of Python itself),
# even though technically the wheel should only be run on 10.9 upwards.
# This is fixed manually below by renaming the wheel.
# See https://github.com/pypa/wheel/issues/312.
export PYTHON_INSTALLER_MACOS_VERSION=10.6
fi
# Install Python
# Note: The GitHub Actions supplied Python versions are not used
# as they are built without MACOSX_DEPLOYMENT_TARGET/-mmacosx-version-min
# being set to an older target for widest wheel compatibility.
# Instead we install python.org binaries which are built with 10.6/10.9 target
# and hence provide wider compatibility for the wheels we create.
# See https://github.com/actions/setup-python/issues/26.
git clone https://github.com/matthew-brett/multibuild.git
pushd multibuild
set +x # reduce noise
source osx_utils.sh
get_macpython_environment $PYTHON_VERSION venv $PYTHON_INSTALLER_MACOS_VERSION
source venv/bin/activate
set -x
popd
# Install dependencies
retry pip install numpy==$NUMPY_VERSION cython wheel delocate
# List installed packages
pip freeze
# Shared library dependencies are built from source to respect MACOSX_DEPLOYMENT_TARGET.
# Bottles from Homebrew cannot be used as they always have a target that
# matches the host OS. Unfortunately, building from source with Homebrew
# is also not an option as the MACOSX_DEPLOYMENT_TARGET env var cannot
# be forwarded to the build (Homebrew cleans the environment).
# See https://discourse.brew.sh/t/it-is-possible-to-build-packages-that-are-compatible-with-older-macos-versions/4421
LIB_INSTALL_PREFIX=$(pwd)/external/libs
export PKG_CONFIG_PATH=$LIB_INSTALL_PREFIX/lib/pkgconfig
export LIBRARY_PATH=$LIB_INSTALL_PREFIX/lib
export PATH=$LIB_INSTALL_PREFIX/bin:$PATH
# Install libffi (glib dependency)
curl -L --retry 3 https://sourceware.org/pub/libffi/libffi-3.2.1.tar.gz | tar xz
pushd libffi-3.2.1
./configure --prefix=$LIB_INSTALL_PREFIX --disable-debug
make install -j
popd
# Install gettext (glib dependency)
curl -L --retry 3 https://ftp.gnu.org/gnu/gettext/gettext-0.20.1.tar.xz | tar xz
pushd gettext-0.20.1
./configure --prefix=$LIB_INSTALL_PREFIX \
--disable-debug \
--disable-java --disable-csharp \
--without-git --without-cvs --without-xz
make -j
make install
popd
# Install glib (lensfun dependency)
curl -L --retry 3 https://download.gnome.org/sources/glib/2.69/glib-2.69.2.tar.xz | tar xz
pushd glib-2.69.2
mkdir build
cd build
meson --prefix=$LIB_INSTALL_PREFIX \
-Dinternal_pcre=true \
-Dselinux=disabled \
-Ddtrace=false \
-Dman=false \
-Dgtk_doc=false \
..
ninja install
popd
ls -al $LIB_INSTALL_PREFIX/lib
ls -al $LIB_INSTALL_PREFIX/lib/pkgconfig
export CC=clang
export CXX=clang++
export CFLAGS="-arch x86_64"
export CXXFLAGS=$CFLAGS
export LDFLAGS=$CFLAGS
export ARCHFLAGS=$CFLAGS
# Build wheel
export CMAKE_PREFIX_PATH=$LIB_INSTALL_PREFIX
python setup.py bdist_wheel
# Fix wheel platform tag, see above for details.
if [ $PYTHON_VERSION == "3.5" ]; then
filename=$(ls dist/*.whl)
mv -v "$filename" "${filename/macosx_10_6_intel/macosx_10_9_x86_64}"
fi
# List direct and indirect library dependencies
mkdir tmp_wheel
pushd tmp_wheel
unzip ../dist/*.whl
python ../.github/scripts/otooltree.py lensfunpy/*.so
popd
rm -rf tmp_wheel
delocate-listdeps --all dist/*.whl # lists direct library dependencies
delocate-wheel --require-archs=x86_64 dist/*.whl # copies library dependencies into wheel
delocate-listdeps --all dist/*.whl # verify
# Dump target versions of dependend libraries.
# Currently, delocate does not support checking those.
# See https://github.com/matthew-brett/delocate/issues/56.
set +x # reduce noise
echo "Dumping LC_VERSION_MIN_MACOSX (pre-10.14) & LC_BUILD_VERSION"
mkdir tmp_wheel
pushd tmp_wheel
unzip ../dist/*.whl
echo lensfunpy/*.so
otool -l lensfunpy/*.so | grep -A 3 LC_VERSION_MIN_MACOSX || true
otool -l lensfunpy/*.so | grep -A 4 LC_BUILD_VERSION || true
for file in lensfunpy/.dylibs/*.dylib; do
echo $file
otool -l $file | grep -A 3 LC_VERSION_MIN_MACOSX || true
otool -l $file | grep -A 4 LC_BUILD_VERSION || true
done
popd
set -x
# Install lensfunpy
pip install dist/*.whl
# Test installed lensfunpy
retry pip install numpy -U # scipy should trigger an update, but that doesn't happen
retry pip install -r dev-requirements.txt
# make sure it's working without any required libraries installed
rm -rf $LIB_INSTALL_PREFIX
mkdir tmp_for_test
pushd tmp_for_test
nosetests --verbosity=3 --nocapture ../test
popd
|
neothemachine/lensfunpy
|
.github/scripts/build-macos.sh
|
Shell
|
mit
| 5,236 |
#!/bin/bash -e
#-------------------------------------------------------------------------------
#
# cron/brew_update.sh
#
#-------------------------------------------------------------------------------
# Prime the environment first
# shellcheck source=/Volumes/ThunderBay/Users/phatblat
source "$HOME/.dotfiles/cron/cron.env"
# This can only be run by an admin user
if ! user_is_admin; then
exit 0
fi
brew_path=$(command -v brew)
$brew_path update > /dev/null && $brew_path upgrade > /dev/null
# firewall_allow_nginx
# nginx_path=`brew list nginx | head -n 1`
# sudo /usr/libexec/ApplicationFirewall/socketfilterfw --add ${nginx_path}
# sudo /usr/libexec/ApplicationFirewall/socketfilterfw --unblockapp ${nginx_path}
|
phatblat/dotfiles
|
.dotfiles/cron/brew_update.sh
|
Shell
|
mit
| 724 |
#!/bin/sh
#watch -n 1 --differences smem
fnm=${0##*/}
nlog=${fnm%.*}.log
echo monitor start
#rm $0.log
ps aux |grep USER|grep -v grep >$nlog
while true
do
ps aux|grep [g]it |grep -v time >>$nlog
# sleep 1
# usleep 100000 #0.1s
sleepenh 0.1 > /dev/null
done
echo monitor end
|
kotaro-dev/ubuntu-shell
|
smem_t.sh
|
Shell
|
mit
| 282 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/VuforiaAR/VuforiaAR.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/VuforiaAR/VuforiaAR.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
JeanVinge/VuforiaAR
|
Example/Pods/Target Support Files/Pods-VuforiaAR_Example/Pods-VuforiaAR_Example-frameworks.sh
|
Shell
|
mit
| 4,666 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2993-1
#
# Security announcement date: 2016-06-09 00:00:00 UTC
# Script generation date: 2017-02-06 21:05:33 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox:47.0+build3-0ubuntu0.12.04.1
#
# Last versions recommanded by security team:
# - firefox:51.0.1+build2-0ubuntu0.12.04.2
#
# CVE List:
# - CVE-2016-2815
# - CVE-2016-2818
# - CVE-2016-2819
# - CVE-2016-2821
# - CVE-2016-2822
# - CVE-2016-2825
# - CVE-2016-2828
# - CVE-2016-2829
# - CVE-2016-2831
# - CVE-2016-2832
# - CVE-2016-2833
# - CVE-2016-2834
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade firefox=51.0.1+build2-0ubuntu0.12.04.2 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2016/USN-2993-1.sh
|
Shell
|
mit
| 879 |
java -Duser.home=`pwd` -jar alloy41/alloy41.jar &
|
nishio/learning_alloy
|
run41.sh
|
Shell
|
mit
| 50 |
#!/bin/sh
set +h # disable hashall
shopt -s -o pipefail
set -e # Exit on error
PKG_NAME="bzip2"
PKG_VERSION="1.0.6"
TARBALL="${PKG_NAME}-${PKG_VERSION}.tar.gz"
SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
function prepare() {
ln -sv "../../source/$TARBALL" "$TARBALL"
}
function unpack() {
tar xf ${TARBALL}
}
function build() {
make $MAKE_PARALLEL
}
function check() {
echo " "
}
function instal() {
make $MAKE_PARALLEL PREFIX=/tools install
}
function clean() {
rm -rf "${SRC_DIR}" "$TARBALL"
}
clean;prepare;unpack;pushd ${SRC_DIR};build;[[ $MAKE_CHECK = TRUE ]] && check;instal;popd;clean
|
PandaLinux/pandaOS
|
phase1/bzip2/build.sh
|
Shell
|
mit
| 622 |
BARE_PROMPT="$PROMPT"
timeprompt() {
export PROMPT="\$(date \"+%H:%M:%S\") $BARE_PROMPT"
export REPORTTIME=0
}
notimeprompt() {
export PROMPT="$BARE_PROMPT"
unset REPORTTIME
}
|
kjhaber/dotfiles
|
zsh/bin/timeprompt.zsh
|
Shell
|
mit
| 185 |
#!/bin/bash
#remember to run from git root!
if [ ! -d plots ]; then
echo "Plots directory not found. Are you running from git root?"
fi
echo EXT2 FSCK
IO_INST_READFILE=plots/ext2-fsck.read.out \
IO_INST_WRITEFILE=plots/ext2-fsck.write.out \
IO_INST_TRACKFD=5 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/sbin/fsck.ext2 -f img/ext2-postmark.img
echo VFAT FSCK
IO_INST_READFILE=plots/vfat-fsck.read.out \
IO_INST_WRITEFILE=plots/vfat-fsck.write.out \
IO_INST_TRACKFD=5 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/sbin/fsck.vfat -f img/vfat-postmark.img
echo MINIX FSCK
IO_INST_READFILE=plots/minix-fsck.read.out \
IO_INST_WRITEFILE=plots/minix-fsck.write.out \
IO_INST_TRACKFD=5 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/sbin/fsck.minix -f img/minix-many.img
echo XFS FSCK
IO_INST_READFILE=plots/xfs-fsck.read.out \
IO_INST_WRITEFILE=plots/xfs-fsck.write.out \
IO_INST_TRACKFD=5 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/usr/sbin/xfs_db -f -c "check" -f img/xfs-postmark.img
echo REISER FSCK
IO_INST_READFILE=plots/reiserfs-fsck.read.out \
IO_INST_WRITEFILE=plots/reiserfs-fsck.write.out \
IO_INST_TRACKFD=5 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/sbin/reiserfsck -f -y img/reiserfs-postmark.img
echo BTRFS FSCK
IO_INST_READFILE=plots/btrfs-fsck.read.out \
IO_INST_WRITEFILE=plots/btrfs-fsck.write.out \
IO_INST_TRACKFD=6 \
LD_PRELOAD=`pwd`/lib/io_inst/io_inst.so \
/sbin/btrfsck img/btrfs-postmark.img
|
chzchzchz/fsl
|
util/fsck_io.sh
|
Shell
|
mit
| 1,452 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-WikipediaUnitTests/FBSnapshotTestCase.framework"
install_framework "Pods-WikipediaUnitTests/Nimble.framework"
install_framework "Pods-WikipediaUnitTests/Nocilla.framework"
install_framework "Pods-WikipediaUnitTests/OCHamcrest.framework"
install_framework "Pods-WikipediaUnitTests/OCMockito.framework"
install_framework "Pods-WikipediaUnitTests/Quick.framework"
fi
if [[ "$CONFIGURATION" == "AdHoc" ]]; then
install_framework "Pods-WikipediaUnitTests/FBSnapshotTestCase.framework"
install_framework "Pods-WikipediaUnitTests/Nimble.framework"
install_framework "Pods-WikipediaUnitTests/Nocilla.framework"
install_framework "Pods-WikipediaUnitTests/OCHamcrest.framework"
install_framework "Pods-WikipediaUnitTests/OCMockito.framework"
install_framework "Pods-WikipediaUnitTests/Quick.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-WikipediaUnitTests/FBSnapshotTestCase.framework"
install_framework "Pods-WikipediaUnitTests/Nimble.framework"
install_framework "Pods-WikipediaUnitTests/Nocilla.framework"
install_framework "Pods-WikipediaUnitTests/OCHamcrest.framework"
install_framework "Pods-WikipediaUnitTests/OCMockito.framework"
install_framework "Pods-WikipediaUnitTests/Quick.framework"
fi
if [[ "$CONFIGURATION" == "Alpha" ]]; then
install_framework "Pods-WikipediaUnitTests/FBSnapshotTestCase.framework"
install_framework "Pods-WikipediaUnitTests/Nimble.framework"
install_framework "Pods-WikipediaUnitTests/Nocilla.framework"
install_framework "Pods-WikipediaUnitTests/OCHamcrest.framework"
install_framework "Pods-WikipediaUnitTests/OCMockito.framework"
install_framework "Pods-WikipediaUnitTests/Quick.framework"
fi
if [[ "$CONFIGURATION" == "Beta" ]]; then
install_framework "Pods-WikipediaUnitTests/FBSnapshotTestCase.framework"
install_framework "Pods-WikipediaUnitTests/Nimble.framework"
install_framework "Pods-WikipediaUnitTests/Nocilla.framework"
install_framework "Pods-WikipediaUnitTests/OCHamcrest.framework"
install_framework "Pods-WikipediaUnitTests/OCMockito.framework"
install_framework "Pods-WikipediaUnitTests/Quick.framework"
fi
|
jindulys/Wikipedia
|
Pods/Target Support Files/Pods-WikipediaUnitTests/Pods-WikipediaUnitTests-frameworks.sh
|
Shell
|
mit
| 5,548 |
#!/usr/bin/env bash
# This runs a terminal timer that counts down to zero
# from a specified amount of minutes and then
# lets the window manager draw attention to it in it's native way.
# based on script and explanation from: https://erikwinter.nl/notes/2020/basic-pomodoro-timer-in-bash-with-termdown-and-wmctrl/
# keywords: pomodoro, timer, stopwatch, wmctrl
set -eou pipefail
usage()
{
echo "usage: pomodoro.sh -t [normal/break]"
}
no_args="true"
while getopts ":t:" arg; do
case $arg in
t) TYPE=$OPTARG;;
esac
no_args="false"
done
[[ "$no_args" == "true" ]] && { usage; exit 1; }
if [ "$TYPE" == 'normal' ]; then
MINUTES=25
elif [ "$TYPE" == 'break' ]; then
MINUTES=5
else
echo -e 'UNKNOWN TYPE. :( \nKnown types are: normal, break.'
exit 1
fi
# wmctrl handles the part of drawing the attention.
# The trick here is to make sure that attention is drawn to the right window.
# This is done setting the window title of the active window, the one we run the script in, to something known beforehand.
# This way, we can find it back later at the end of the countdown:
#
# -N "Pomodoro" sets the title of the window to "Pomodoro".
# -r :ACTIVE: selects the currently active window.
wmctrl -N "Pomodoro" -r :ACTIVE:
# below sets the timer
termdown --no-seconds -B --font univers -b -q 5 -t 'TIME IS UP!' ${MINUTES}m
# After the countdown, we let wmctrl draw the attention:
# -b add,demands_attention adds the property demands_attention to the window.
# -r "Pomodoro" selects the window that has "Pomodoro" as title.
wmctrl -b add,demands_attention -r "Pomodoro"
|
tiagoprn/devops
|
bin/pomodoro.sh
|
Shell
|
mit
| 1,593 |
#!/bin/sh
# Test comment
echo "^ This guy"
# Another comment + harvesting search results
wget -P files -i urls.log --rejected-log error.log --no-verbose
#EOF
|
lee-dohm/language-generic-config
|
spec/fixtures/exec.sh
|
Shell
|
mit
| 161 |
#! /usr/bin/env bash
set -e
#############################################################################
## Helpers & Config
#############################################################################
msg() {
tput setab 2 # green bg
tput setaf 7 # white text
echo ">>> $1"
tput sgr 0
sleep 1
}
tmp_dir="/tmp/fsi-$(date +%s)"
#############################################################################
## Main Script
#############################################################################
main() {
msg "Entering temporary directory..."
mkdir "$tmp_dir"
cd "$tmp_dir"
msg "Making a copy of resources.zip..."
cp /opt/spotify/spotify-client/Data/resources.zip resources_old.zip
unzip resources_old.zip -d resources_old/
msg "Downloading icons..."
wget -O spotify_icon.ico https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify_icon.ico
wget -O spotify-linux-16.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-16.png
wget -O spotify-linux-22.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-22.png
wget -O spotify-linux-24.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-24.png
wget -O spotify-linux-32.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-32.png
wget -O spotify-linux-48.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-48.png
wget -O spotify-linux-64.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-64.png
wget -O spotify-linux-128.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-128.png
wget -O spotify-linux-256.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-256.png
wget -O spotify-linux-512.png https://raw.githubusercontent.com/rhoconlinux/fix-spotify-icon-elementary/master/src/images/spotify-linux-512.png
msg "Replacing the icons..."
cp spotify_icon.ico resources_old/_linux/spotify_icon.ico
cp spotify-linux-16.png resources_old/_linux/spotify-linux-16.png
cp spotify-linux-22.png resources_old/_linux/spotify-linux-22.png
cp spotify-linux-24.png resources_old/_linux/spotify-linux-24.png
cp spotify-linux-32.png resources_old/_linux/spotify-linux-32.png
cp spotify-linux-48.png resources_old/_linux/spotify-linux-48.png
cp spotify-linux-64.png resources_old/_linux/spotify-linux-64.png
cp spotify-linux-128.png resources_old/_linux/spotify-linux-128.png
cp spotify-linux-256.png resources_old/_linux/spotify-linux-256.png
cp spotify-linux-512.png resources_old/_linux/spotify-linux-512.png
msg "Packaging resources.zip..."
cd resources_old/
zip -r resources_patched.zip .
cd ..
mv resources_old/resources_patched.zip .
msg "Replacing current resources.zip..."
sudo cp resources_patched.zip /opt/spotify/spotify-client/Data/resources.zip
msg "Cleaning up..."
rm -rf "$tmp_dir"
msg "The Spotify icon has been replaced successfuly!"
msg "Start Spotify again to check it out."
sleep 2
}
#############################################################################
## Bootstrap it
#############################################################################
main "$@"
|
rhoconlinux/fix-spotify-icon-elementary
|
src/fix-spotify-icon.sh
|
Shell
|
mit
| 3,629 |
#!/bin/bash
port=8000
for i in `seq 1 4`
do
#rm -rf $i
#mkdir $i
ip='0.0.0.0'
# auto-close terminal
gnome-terminal --tab -e "/bin/bash -c '../main.py -o stakeholders/$i/data -cli -ip $ip -p $port -bp localhost:8000'"
# don't auto-close terminal
#gnome-terminal --tab -e "/bin/bash -c '../main.py -o stakeholders/$i/data -cli -ip $ip -p $port -bp localhost:8000; exec /bin/bash -i'"
port=$((port+2))
done
|
ceddie/bptc_wallet
|
demo_setup/start_stakeholders.sh
|
Shell
|
mit
| 442 |
if test ! $(which pyenv); then
echo "Installing pyenv..."
brew install pyenv
export PYTHON_CONFIGURE_OPTS="--enable-framework"
pyenv install 2.7.8
pyenv global 2.7.8
fi
if test ! $(which http); then
echo "Installing httpie..."
pip install httpie
fi
|
redhotvengeance/dotfiles
|
python/install.sh
|
Shell
|
mit
| 264 |
#!/usr/bin/env bash
set -ex
for package in ./deploy/*
do
npm publish ${package}
done
|
scttcper/ng2-bs-dropdown
|
enact-release.sh
|
Shell
|
mit
| 89 |
export SRC_ROOT=`pwd`
# Install Postgres, etc
SRC_ROOT/../install.sh
# Pl/v8 stuff
git clone git://github.com/v8/v8.git v8 && cd v8
export GYPFLAGS="-D OS=freebsd"
make dependencies
make native.check -j 4 library=shared strictaliasing=off console=readline
cp v8.so /usr/lib/v8.so
cd $SRC_ROOT
# TODO: Add pg_config to you $PATH. Normally pg_config exists in $PGHOME/bin.
git clone https://code.google.com/p/plv8js/ && cd plv8js
make && make install
cd $SRC_ROOT
|
danieltahara/sinew
|
benchmark/setup.sh
|
Shell
|
mit
| 468 |
#!/bin/sh -xe
if [ "$CI_ARCH" = "amd64" ]; then
export PATH=/mingw64/bin:$PATH
else
export PATH=/mingw32/bin:$PATH
fi
7za | head -2
gcc -v
cppcheck --error-exitcode=1 src
export ISOLATE_VERSION=head
if [ -n "$CI_BUILD_TAG" ]; then
export ISOLATE_VERSION=$CI_BUILD_TAG
fi
export CI_VERSION=$CI_BUILD_REF_NAME
export ISOLATE_CFLAGS="-DISOLATE_VERSION=\\\"$ISOLATE_VERSION\\\""
make
file isolate.exe
export CI_OS="windows"
# sign (win)
if [ "$CI_OS" = "windows" ]; then
scripts/ci-sign.sh "isolate.exe"
fi
# verify
7za a isolate.7z isolate.exe
# set up a file hierarchy that ibrew can consume, ie:
#
# - dl.itch.ovh
# - isolate
# - windows-amd64
# - LATEST
# - v0.3.0
# - isolate.7z
# - isolate.exe
# - SHA1SUMS
BINARIES_DIR="binaries/$CI_OS-$CI_ARCH"
mkdir -p $BINARIES_DIR/$CI_VERSION
mv isolate.7z $BINARIES_DIR/$CI_VERSION
mv isolate.exe $BINARIES_DIR/$CI_VERSION
(cd $BINARIES_DIR/$CI_VERSION && sha1sum * > SHA1SUMS && sha256sum * > SHA256SUMS)
if [ -n "$CI_BUILD_TAG" ]; then
echo $CI_BUILD_TAG > $BINARIES_DIR/LATEST
fi
|
itchio/isolate
|
scripts/ci-build.sh
|
Shell
|
mit
| 1,088 |
#!/bin/bash
# v1.0.0
#------------------------------------------------------------------------------
# print the help for the create-full-7z-package functionality
#------------------------------------------------------------------------------
doHelpCreateFull7zPackage(){
doLog "INFO ::: START ::: create-full-7z-package.help" ;
cat doc/txt/pgsql-runner/helps/pckg/create-full-7z-package.help.txt
sleep $sleep_interval
printf "\033[2J";printf "\033[0;0H"
doLog "INFO ::: STOP ::: create-full-7z-package.help" ;
}
#eof help doCreateFullPackage
|
YordanGeorgiev/pgsql-runner
|
src/bash/pgsql-runner/helps/pckg/create-full-7z-package.help.sh
|
Shell
|
mit
| 558 |
#!/bin/sh
cd /usr/lib/aarch64-linux-gnu
sudo rm libGL.so
sudo ln -s /usr/lib/aarch64-linux-gnu/tegra/libGL.so libGL.so
|
jetsonhacks/installLibfreenect2
|
libGLFix.sh
|
Shell
|
mit
| 119 |
#!/bin/sh
export COMPOSER_NO_INTERACTION=1
composer self-update
if [ -n "${MIN_STABILITY:-}" ]; then
sed -i -e "s/\"minimum-stability\": \"stable\"/\"minimum-stability\": \"${MIN_STABILITY}\"/" composer.json
fi
composer require --no-update satooshi/php-coveralls:"~0.6@stable" guzzle/guzzle:">=3.0.4@stable"
composer remove --no-update symfony/form symfony/http-kernel symfony/translation symfony/yaml
if [ -n "${SYMFONY_VERSION:-}" ]; then
composer require --no-update --dev symfony/symfony:"${SYMFONY_VERSION}"
fi
if [ "${USE_DEPS:-}" = "lowest" ]; then
COMPOSER_UPDATE_ARGS="--prefer-lowest"
fi
composer update ${COMPOSER_UPDATE_ARGS:-}
|
IUQOL/Capacitation
|
vendor/craue/formflow-bundle/travis_install_dependencies.sh
|
Shell
|
mit
| 649 |
#!/bin/bash
cd Entidades
javac -d ../WEB-INF/classes/ *.java -classpath /usr/local/Cellar/tomcat/7.0.47/libexec/lib/servlet-api.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/lib/mysql-connector-java-5.1.27-bin.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/webapps/SEng/WEB-INF/classes/
echo "entidades compilado"
cd ..
cd Controles
javac -d ../WEB-INF/classes/ *.java -classpath /usr/local/Cellar/tomcat/7.0.47/libexec/lib/servlet-api.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/lib/mysql-connector-java-5.1.27-bin.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/webapps/SEng/WEB-INF/classes/
echo "controles compilado"
cd ..
cd Interfaces
javac -d ../WEB-INF/classes/ *.java -classpath /usr/local/Cellar/tomcat/7.0.47/libexec/lib/servlet-api.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/lib/mysql-connector-java-5.1.27-bin.jar:/usr/local/Cellar/tomcat/7.0.47/libexec/webapps/SEng/WEB-INF/classes/
echo "interfaces compilado"
|
acrogenesis/SEng-Bytes
|
compilar.sh
|
Shell
|
mit
| 917 |
#!/bin/bash
trap 'exit' ERR
APP_NAME="stormy-hollows-9630"
echo "-----> Deploying to Heroku"
./prepare_deploy_directory.sh
VERSION=`git rev-parse HEAD | perl -pe 'chomp'`
echo "-----> Deploying application version $VERSION"
echo " Creating build tarball...."
DEPLOY_FILENAME="deploy-$VERSION.tgz"
pushd deploy
tar -czf ../$DEPLOY_FILENAME .
popd
echo " Requesting application specific source endpoint..."
SOURCE_ENDPOINT="$(curl -s -n \
-X POST "https://api.heroku.com/apps/$APP_NAME/sources" \
-H "Accept: application/vnd.heroku+json; version=3.streaming-build-output")"
PUT_URL=`echo $SOURCE_ENDPOINT | jsawk "return this.source_blob.put_url"`
echo " Received blob endpoint: $PUT_URL"
GET_URL=`echo $SOURCE_ENDPOINT | jsawk "return this.source_blob.get_url"`
echo " Received deploy endpoint: $GET_URL"
echo " Upload app blob"
curl -s "$PUT_URL" -X PUT -H 'Content-Type:' --data-binary @$DEPLOY_FILENAME
echo " Deploy application"
DEPLOY_RESULT="$(curl -n -X POST "https://api.heroku.com/apps/$APP_NAME/builds"\
-d "{\"source_blob\":{\"url\":\"$GET_URL\", \"version\": \"$VERSION\"}}" \
-H "Accept: application/vnd.heroku+json; version=3.streaming-build-output" \
-H "Content-Type: application/json")"
log_url=`echo "$DEPLOY_RESULT" | jsawk "return this.output_stream_url"`
echo " Received log endpoint: $log_url"
curl "$log_url"
|
shageman/cbra_book_code
|
c2s12/sportsball/deploy_to_heroku.sh
|
Shell
|
mit
| 1,404 |
#!/bin/bash
mkdir -p log
mkdir -p output
kill `cat ./log/crawler-worker.pid`
node lib/crawler-worker.js -g "md15" -p 6379 -h localhost -o output/ > log/crawler-worker.log 2>&1 &
echo $!>./log/crawler-worker.pid
tail -f ./log/crawler-worker.log
|
yi/node-crawler-worker
|
start-md15.sh
|
Shell
|
mit
| 246 |
#!/usr/bin/env bash
# Disable analytics
brew analytics off
# Update the formulae and Homebrew itself
brew update
# Upgrade everything already installed
brew upgrade
# Install main GNU packages
brew install coreutils
brew install findutils
brew install moreutils
# Install other useful GNU packages
brew install autoconf
brew install automake
brew install gnu-sed
brew install gnupg
brew install grep
brew install libtool
brew install readline
brew install screen
brew install wget --with-iri
# Install other useful packages
#brew install ack
#brew install asdf # Use git clone install for now.
brew install bat
brew install bluetoothconnector
brew install fd
brew install ffmpeg
brew install fzf
brew install ghostscript
brew install git
brew install git-lfs
brew install graphicsmagick
brew install heroku
brew install htop
brew install imagemagick --with-webp
brew install jdupes
#brew install libvpx # When webm is needed
brew install libxslt
brew install libyaml
#brew install lua
brew install md5deep
brew install ncdu
brew install openssh
brew install openssl
brew install pandoc
#brew install postgresql
brew install rename
brew install ripgrep
#brew install rlwrap
brew install rsync
brew install ruby-build
brew install smartmontools
brew install sqlite
brew install ssh-copy-id
brew install tldr
brew install tmux
brew install tree
#brew install tvnamer
brew install unixodbc # Driver for sql servers, used by asdf plugins
brew install vim --with-override-system-vi
brew install webkit2png
brew install xz
brew install yarn
brew install youtube-dl
brew install zsh
brew install zsh-completions
# Uninstall old versions
brew cleanup
|
michaelx/dotfiles
|
brew.sh
|
Shell
|
mit
| 1,649 |
#!/bin/bash
#SBATCH -o DiscSimu.%j.%N.out
##SBATCH -D
#SBATCH -J DiscSimu_N03_0
#SBATCH --ntasks=1
#SBATCH --mail-type=end
#SBATCH [email protected]
#SBATCH --time=99:00:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --share
#SBATCH --gres=gpu:1
#SBATCH --nodelist=gpu03
cd ../
srun --gres=gpu:1 ./bin/runDiscSimulation_M -slurm N03_0
|
laosunhust/SceCells
|
scripts/discN03G00.sh
|
Shell
|
mit
| 355 |
#!/usr/bin/env bash
# Install dependencies
apt-get update && apt-get install -y gcc make linux-headers-$(uname -r)
# Mount Guest Additions ISO file
mkdir /mnt/vbox
mount -t iso9660 -o loop /root/VBoxGuestAdditions.iso /mnt/vbox
# Execute the installer
/mnt/vbox/VBoxLinuxAdditions.run
# Display installation logs before deleting them
cat /var/log/vboxadd-install.log
rm -f /var/log/vboxadd-install*.log
rm -f /var/log/VBoxGuestAdditions.log
# Unmount ISO file
umount /mnt/vbox
rmdir /mnt/vbox
# Delete ISO file
rm -f /root/VBoxGuestAdditions.iso
|
SkypLabs/packer-debian
|
scripts/vbox-tools.sh
|
Shell
|
mit
| 552 |
#!/usr/bin/env bash
RELEASE=0.13.1
dist=dist
bin=imladris
function build {
GOOS=$1 GOARCH=$2 go build -o $bin
package=$bin-$RELEASE-$1-$2.tar.gz
tar cvzf $package $bin
mv $package $dist
rm $bin
}
mkdir -p $dist
go generate
build darwin amd64
build linux amd64
|
anduintransaction/imladris
|
build.sh
|
Shell
|
mit
| 283 |
#!/bin/bash
filename=PKGBUILD
if [[ $1 != '' ]]; then
filename="$1"
fi
if [[ ! -f $filename ]]; then
echo "Can't find $filename"
exit 1
fi
cd $(dirname "$filename")
filename=$(basename "$filename")
# Update the pkgver
bumpver "$filename"
# Update the hash sums
updpkgsums "$filename"
|
xyproto/getver
|
scripts/vup.sh
|
Shell
|
mit
| 295 |
echo "Trying valid user on unallowed resource."
curl -u policia:password -X GET 'http://localhost:9000/v1/cap/transaction/1'
echo ""
echo "Trying invalid user."
curl -u policia2:password -X GET 'http://localhost:9000/v1/cap/' -i
echo ""
echo "Trying transaction. Checking entities."
curl -u dev:password -X GET 'http://localhost:9000/v1/cap/transaction/1'
echo ""
echo "Test root resource"
curl -u policia:password -X GET 'http://localhost:9000/v1/cap/test' -i
|
mindware/prgov_gmq_cap_api
|
test/systems/bash/go.sh
|
Shell
|
mit
| 462 |
#!/bin/bash
~/.arduino15/packages/esp8266/tools/esptool/0.4.9/esptool -vv -cd nodemcu -cb 57600 -ca 0x00000 -cp /dev/ttyUSB0 -cf latest-20170317.bin
|
kiu/feinstaubnode
|
firmware/flash.sh
|
Shell
|
mit
| 149 |
#!/bin/bash
#Souce config file
. "../bin/sebal-automation.conf"
#Commands to filter process of crawler and scheduler
check_running_crawler="pgrep -f crawler"
check_running_scheduler="pgrep -f scheduler"
#SSH access and execution of commands
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $crawler_user_name@$crawler_ip ${check_running_crawler} > /dev/null
crawler_output=$?
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $scheduler_user_name@$scheduler_ip ${check_running_scheduler} > /dev/null
scheduler_output=$?
#Conditions to print the state of each one of the two services
if [ $crawler_output == 1 ]
then
echo "Crawler is down... try to restart it."
elif [ $crawler_output == 0 ]
then
echo "Crawler is running."
fi
if [ $scheduler_output == 1 ]
then
echo "Scheduler is down... try to restart it."
elif [ $scheduler_output == 0 ]
then
echo "Scheduler is running."
fi
|
thiagomanel/SEBAL-automation
|
scripts/check-running-services.sh
|
Shell
|
mit
| 933 |
#!/bin/bash
set -e
export RAILS_ENV=test
export BUNDLE_WITHOUT=development
if [ -x "$(which bower)" ]; then
bower cache clean
fi
rm -rf vendor/assets/bower_components
bin/setup
bin/rake
|
guidance-guarantee-programme/summary_generator
|
test.sh
|
Shell
|
mit
| 192 |
#!/bin/bash
#
# SPDX-License-Identifier: MIT
# Copyright © 2021 Apolo Pena
#
# update-composer.sh
# Description:
# Programatically updates composer to the latest version.
LOG='/var/log/workspace-image.log'
# BEGIN: update composer to the latest version
echo "BEGIN: update composer" | tee -a $LOG
echo " Purging existing version of composer: $(composer --version)" | tee -a $LOG
sudo apt-get --assume-yes purge composer
COMP_PURGE=$?
if [ $COMP_PURGE -ne 0 ]; then
>&2 echo " ERROR: failed to purge existing version of composer." | tee -a $LOG
else
echo " SUCCESS: purged existing version of composer." | tee -a $LOG
fi
echo " Installing latest version of composer" | tee -a $LOG
EXPECTED_CHECKSUM="$(wget -q -O - https://composer.github.io/installer.sig)"
sudo php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_CHECKSUM="$(php -r "echo hash_file('sha384', 'composer-setup.php');")"
if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]; then
>&2 echo " ERROR: Invalid installer checksum - failed to install latest version of composer." | tee -a $LOG
sudo rm composer-setup.php
else
sudo php composer-setup.php --install-dir=/usr/bin --filename=composer
COMP_VAL=$?
if [ $COMP_VAL -ne 0 ]; then
>&2 echo " ERROR $COMP_VAL: Failed to install latest version of composer." | tee -a $LOG
else
echo " SUCCESS: latest version of composer installed: $(composer --version)" | tee -a $LOG
fi
sudo rm composer-setup.php
fi
echo "END: update composer" | tee -a $LOG
# END: update composer to the latest version
|
concrete5/concrete5
|
.gp/bash/update-composer.sh
|
Shell
|
mit
| 1,571 |
#! /bin/bash
function read_dir(){
for file in `ls $1` #注意此处这是两个反引号,表示运行系统命令
do
if [ -d $1"/"$file ] #注意此处之间一定要加上空格,否则会报错
then
read_dir $1"/"$file
else
echo $1"/"$file #在此处处理文件即可
mv $1"/"$file /Users/jerry/Desktop/1
fi
done
}
#读取第一个参数
read_dir $1
|
niasand/cool-config
|
shell_learn/get_images.sh
|
Shell
|
mit
| 447 |
#!/bin/sh
#
# nginx - this script starts and stops the nginx daemon
#
# chkconfig: - 85 15
# description: NGINX is an HTTP(S) server, HTTP(S) reverse \
# proxy and IMAP/POP3 proxy server
# processname: nginx
# config: /etc/nginx/nginx.conf
# config: /etc/sysconfig/nginx
# pidfile: /var/run/nginx.pid
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ "$NETWORKING" = "no" ] && exit 0
nginx="/usr/sbin/nginx"
prog=$(basename $nginx)
NGINX_CONF_FILE="/etc/nginx/nginx.conf"
NGINX_PID_FILE="/run/nginx.pid"
[ -f /etc/sysconfig/nginx ] && . /etc/sysconfig/nginx
lockfile=/var/lock/subsys/nginx
make_dirs() {
# make required directories
user=`$nginx -V 2>&1 | grep "configure arguments:.*--user=" | sed 's/[^*]*--user=\([^ ]*\).*/\1/g' -`
if [ -n "$user" ]; then
if [ -z "`grep $user /etc/passwd`" ]; then
useradd -M -s /bin/nologin $user
fi
options=`$nginx -V 2>&1 | grep 'configure arguments:'`
for opt in $options; do
if [ `echo $opt | grep '.*-temp-path'` ]; then
value=`echo $opt | cut -d "=" -f 2`
if [ ! -d "$value" ]; then
# echo "creating" $value
mkdir -p $value && chown -R $user $value
fi
fi
done
fi
}
start() {
[ -x $nginx ] || exit 5
[ -f $NGINX_CONF_FILE ] || exit 6
make_dirs
echo -n $"Starting $prog: "
$nginx -c $NGINX_CONF_FILE -g "daemon off;"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc
retval=$?
echo
[ $retval -eq 0 ] && rm $lockfile
return $retval
}
configtest() {
$nginx -t -c $NGINX_CONF_FILE
}
killproc() {
PIDS=$(ps ax | grep -v grep | grep -e "nginx:.*process" | awk '{printf $1 "\n"}')
for pid in $PIDS; do
kill -9 $pid
done
wait $PIDS 2>/dev/null
}
case "$1" in
start)
$1
;;
stop)
$1
;;
restart|configtest)
$1
;;
*)
echo $"Usage: $0 {start|stop|configtest}"
exit 2
esac
|
Novitoll/devops-linux-mentoring-program
|
Docker/web/nginx.sh
|
Shell
|
mit
| 2,076 |
#!/bin/bash
set -x
set -e
# setup
apt-get update
apt-get install -y --no-install-recommends ca-certificates-java
ES_PKG_NAME=elasticsearch-7.10.2
cd /tmp/
wget -nv -t5 -O es.tar.gz https://artifacts.elastic.co/downloads/elasticsearch/$ES_PKG_NAME-linux-x86_64.tar.gz
tar xzf es.tar.gz
rm -f es.tar.gz
mv /tmp/$ES_PKG_NAME /elasticsearch
cd -
# elasticsearch plugins
es_plugin_install() {
for i in {1..5}
do
/elasticsearch/bin/elasticsearch-plugin install $1 && break || sleep 1
done
}
es_plugin_install analysis-icu
chown -R 1000:1000 /elasticsearch
cp -frv /build/files/* / || true
source /usr/local/build_scripts/cleanup_apt.sh
|
nfq-technologies/docker-images
|
elasticsearch710/build/setup_docker.sh
|
Shell
|
mit
| 663 |
#!/bin/bash
# to maintain cask ....
# brew update && brew upgrade brew-cask && brew cleanup && brew cask cleanup`
# Install native apps.
brew install caskroom/cask/brew-cask
brew tap caskroom/versions
# browsers
brew cask install google-chrome-canary
brew cask install firefox-nightly
brew cask install firefox
# dev
brew cask install atom
brew cask install dash2
brew cask install filezilla
brew cask install github-desktop
brew cask install imagemin
brew cask install imageoptim
brew cask install poedit
brew cask install sourcetree
brew cask install spectacle
brew cask install sublime-text3
brew cask install virtualbox
brew cask install vagrant
brew cask install vagrant-manager
# others
brew cask install appcleaner
brew cask install deluge
brew cask install hubic
brew cask install libreoffice
brew cask install skype
brew cask install slack
brew cask install sonos
brew cask install spotify
brew cask install teamviewer
brew cask install the-unarchiver
brew cask install vlc
# quick look plugins
brew cask install qlcolorcode
brew cask install qlstephen
brew cask install qlmarkdown
brew cask install quicklook-json
brew cask install qlprettypatch
brew cask install quicklook-csv
brew cask install betterzipql
brew cask install qlimagesize
brew cask install webpquicklook
|
antleblanc/setup
|
shell/brew-cask.sh
|
Shell
|
mit
| 1,290 |
# Change the current directory to the extension folder
cd /var/ilx/workspaces/Common/ilxws_DEMO/extensions/ilxex_DEMO/
# Install the Node.js request module (https://www.npmjs.com/package/request)
npm install request --save
# Install the Node.js string module (https://www.npmjs.com/package/string)
npm install string --save
|
ArtiomL/f5networks
|
iruleslx/tutorial/part3/tm.sh
|
Shell
|
mit
| 326 |
#!/usr/bin/env bash
yum -y update
yum groupinstall 'Development Tools'
curl https://setup.ius.io/ | bash -
yum -y install php70u nginx curl vim php70u-fpm-nginx php70u-cli php70u-mysqlnd
curl --silent --location https://rpm.nodesource.com/setup_6.x | bash -
yum -y install nodejs
cat << EOF > /etc/yum.repos.d/mongodb-org-3.2.repo
[mongodb-org-3.2]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/7/mongodb-org/3.2/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc
EOF
yum install -y mongodb-org
|
sercril/ACTimer
|
vagrant/packages.sh
|
Shell
|
gpl-2.0
| 552 |
#! /bin/sh
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that automake complains about unknown warnings.
. test-init.sh
# We want (almost) complete control over automake options.
AUTOMAKE="$am_original_AUTOMAKE --foreign -Werror"
cat > configure.ac <<END
AC_INIT([$me], [1.0])
AM_INIT_AUTOMAKE([-Wno-zardoz])
AC_CONFIG_FILES([Makefile])
END
cat > Makefile.am <<END
AUTOMAKE_OPTIONS = -Wfoobar
END
$ACLOCAL
AUTOMAKE_fails -Wbazquux
grep '^configure\.ac:2:.* unknown warning category.*zardoz' stderr
grep '^Makefile\.am:1:.* unknown warning category.*foobar' stderr
grep "^automake-$APIVERSION:.* unknown warning category.*bazquux" stderr
# Check that we can override warnings about unknown warning categories.
$AUTOMAKE -Wno-unsupported -Wbazquux
:
|
autotools-mirror/automake
|
t/warnings-unknown.sh
|
Shell
|
gpl-2.0
| 1,398 |
#!/usr/bin/env bash
. ./lib
# This demonstrates a bug that happens if you revert followed by
# a partial unrevert and a full unrevert. It requires that
# the second unrevert is working with patches who's contents need
# to be modified by the commute in the first unrevert.
rm -rf temp1
mkdir temp1
cd temp1
darcs init
echo line1 >> A
echo line2 >> A
echo line3 >> A
echo line4 >> A
echo line5 >> A
echo line6 >> A
darcs add A
darcs record -am A
sed 's/line2/Line2/' A > A1; rm A; mv A1 A
sed '4d' A > A1; rm A; mv A1 A
sed 's/line6/Line6/' A > A1; rm A; mv A1 A
darcs revert -a
echo nyny | darcs unrev
darcs unrev -a
cd ..
rm -rf temp1
|
DavidAlphaFox/darcs
|
tests/double-unrevert.sh
|
Shell
|
gpl-2.0
| 641 |
#!/bin/bash
RESTORE=$(echo -en '\033[0m')
BOLD=$(echo -en '\033[1m')
RED=$(echo -en '\033[00;31m')
GREEN=$(echo -en '\033[00;32m')
YELLOW=$(echo -en '\033[00;33m')
BLUE=$(echo -en '\033[00;34m')
MAGENTA=$(echo -en '\033[00;35m')
PURPLE=$(echo -en '\033[00;35m')
CYAN=$(echo -en '\033[00;36m')
LIGHTGRAY=$(echo -en '\033[00;37m')
LRED=$(echo -en '\033[01;31m')
LGREEN=$(echo -en '\033[01;32m')
LYELLOW=$(echo -en '\033[01;33m')
LBLUE=$(echo -en '\033[01;34m')
LMAGENTA=$(echo -en '\033[01;35m')
LPURPLE=$(echo -en '\033[01;35m')
LCYAN=$(echo -en '\033[01;36m')
WHITE=$(echo -en '\033[01;37m')
# Run test cases through CoNLL-U validator.
#set -u
VALIDATOR="python validate.py --lang=testsuite"
VALID_DIR="test-cases/valid"
NONVALID_DIR="test-cases/nonvalid"
silent=false
success=0
failure=0
for validf in true false; do
if [ "$validf" = true ]; then
d="$VALID_DIR"
else
d="$NONVALID_DIR";
fi
for f in $d/*.conllu; do
OUTP=$($VALIDATOR $f 2>&1)
if [ $? -eq 0 ]; then
validv=true
else
validv=false
fi
if [ "$validf" = "$validv" ]; then
success=$((success+1))
echo ${LGREEN}${BOLD}PASS $f${RESTORE}
else
failure=$((failure+1))
echo ${LRED}${BOLD}FAIL "$f valid: $validf validated: $validv" ${RESTORE}
fi
if [[ "$1" == "-v" ]]
then
echo -en "$OUTP" | egrep -v ' PASSED ' | egrep -v ' FAILED ' | egrep -v 'errors: [0-9]'
echo
fi
done
done
# Test the multiple ID thing over several files
OUTP=$($VALIDATOR $VALID_DIR/id_test_part*.conllu 2>&1)
if [ $? -eq 0 ]; then
echo ${LRED}${BOLD}FAIL "Several files with id duplication across files not caught" ${RESTORE}
failure=$((failure+1))
else
echo ${LGREEN}${BOLD}PASS "Several files with id duplication across files" ${RESTORE}
success=$((success+1))
if [[ "$1" == "-v" ]]
then
echo -en "$OUTP" | egrep -v ' PASSED ' | egrep -v ' FAILED ' | egrep -v 'errors: [0-9]'
echo
fi
fi
echo "passed $success/$((success+failure)) tests."
|
UniversalDependencies/tools
|
runtests.sh
|
Shell
|
gpl-2.0
| 1,988 |
#!/bin/bash
echo -e "\033[0;32mDeploying updates to GitHub...\033[0m"
# Build the project.
hugo --theme=lanyon --buildDrafts # if using a theme, replace by `hugo -t <yourtheme>`
# Go To Public folder
cd public
# Add changes to git.
git add -A
# Commit changes.
msg="rebuilding site `date`"
if [ $# -eq 1 ]
then msg="$1"
fi
git commit -m "$msg"
# Push source and build repos.
#git push origin master
git push
# Come Back
cd ..
|
ezobn/hugo-blog-source
|
deploy.sh
|
Shell
|
gpl-2.0
| 435 |
#!/bin/bash
WEBPERF="${BUILD_DIR}/bin/webperf"
FT_COMMON="webperf/ft/common"
TMP="${BUILD_DIR}/ft/webperf"
PHANTOMJS="${CACHE_DIR}/bin/phantomjs"
TEST_DIR=$(dirname "${BASH_SOURCE[1]}")
mkdir -p "${TMP}"
update_urls()
{
if [ $# -ne 2 ]; then
echo "Usage: update_urls.sh <URL> <output>" 1>&2
return 1
fi
url="$1"
local file="$2"
local max_age=3600
if [ -f "$file" ]; then
local update=0
if [ "$(find $file -mmin +60 | wc -l)" -eq 1 ]
then
echo "Updating URLs because cached file is too old."
update=1
fi
if [ "$(cat $file | wc -l)" -eq 0 ]
then
echo "Updating URLs because cached file is empty."
update=1
fi
else
echo "Updating URLs because cached file was not found."
update=1
fi
if [ $update -eq 1 ]; then
"$PHANTOMJS" --ignore-ssl-errors=true "${FT_COMMON}/get_urls.js" "$url" > "$file" 2>/dev/null
fi
}
|
wwwperf/webperf
|
webperf/ft/common/ft.bash
|
Shell
|
gpl-2.0
| 999 |
#!/bin/bash
#exit when command fails (use || true when a command can fail)
set -o errexit
#exit when your script tries to use undeclared variables
set -o nounset
# in scripts to catch mysqldump fails
set -o pipefail
# Set magic variables for current file & dir
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Dir of the script
__root="$(cd "$(dirname "${__dir}")" && pwd)" # Dir of the dir of the script
__file="${__dir}/$(basename "${BASH_SOURCE[0]}")" # Full path of the script
__base="$(basename ${__file})" # Name of the script
ts=`date +'%Y%m%d-%H%M%S'`
ds=`date +'%Y%m%d'`
pid=`ps -ef | grep ${__base} | grep -v 'vi ' | head -n1 | awk ' {print $2;} '`
formerDir=`pwd`
echo Chekcing and waiting until git finishes.....
untilDone.bash -s git -e "$__base"
### Commits everything, pulls and, if pull succesfull, pushes
git remote -v
echo
reportPath=''
if hash diffReport.bash 2>/dev/null; then
reportPath=`diffReport.bash logOnly`
fi
echo Git commit...
git commit -am "$1" || true
echo
echo Submodule check...
cd `git rev-parse --show-toplevel`
git submodule init
git submodule update
cd -
echo
echo 'git pull & git push'
git pull && echo && git push
set +x
echo
git-branch-status.bash || true
git submodule foreach --recursive git-branch-status.bash || true
if [[ ! -z "$reportPath" ]] ; then
echo; echo; echo
echo Diff report in
echo $reportPath
echo; echo; echo
fi
#echo 'Repacking.... this will be eventually in a seperate term window'
#git repack -a -d --depth=250 --window=250
|
cyclingzealot/bin
|
gitTools/push.bash
|
Shell
|
gpl-2.0
| 1,568 |
#!/usr/bin/env bash
# coding=utf-8
[ -z "$UHP_HOME" ] && {
echo "UHP_HOME not set."
exit 1
}
#echo "UHP_HOME=$UHP_HOME"
app=`basename "$0" .sh`
app=${app#*-} # app=worker
echo "------------------------------"
echo "- STOP $app"
echo "------------------------------"
pid=$(cat ${UHP_HOME}/pids/${app}/*.pid 2>/dev/null)
[ -n "$pid" ] && kill $pid
ok="false"
for ((i=0;i<10;i++)); do
pidfile=$(ls ${UHP_HOME}/pids/${app}/*.pid 2>/dev/null)
[ -z "$pidfile" ] && {
ok="true"
break
}
sleep 1
done
[ "$ok" == "true" ] && {
echo "Stop OK"
} || {
echo "kill -9 $pid"
kill -9 $pid
rm -rf ${UHP_HOME}/pids/${app}/*.pid
sleep 1
}
echo "------------------------------"
|
uhp/uhp
|
bin/stop-app.sh
|
Shell
|
gpl-2.0
| 724 |
#!/bin/sh
#
# ADTPro - *nix startup shell script
#
# Note:
# Invoke with the name of the communications button to push
# in order to start with that mode active (i.e. './adtpro.sh ethernet')
#
# You can set two variables here:
# 1. $MY_JAVA_HOME - to pick a particular java to run under
# 2. $ADTPRO_HOME - to say where you installed ADTPro
#
# Set default ADTPRO_HOME to be the fully qualified
# current working directory.
export ADTPRO_HOME="`dirname \"$0\"`"
cd "$ADTPRO_HOME"
export ADTPRO_HOME="`pwd`/"
# Uncomment and modify one or both of the lines below if you
# want to specify a particular location for Java or ADTPro.
# Note: They must have a trailing backslash as in the examples!
#
# export MY_JAVA_HOME=/usr/local/java/bin/
# export ADTPRO_HOME=~/myuser/adtpro/
OS=`uname`
OS_ARCH=`uname -p`
if [ "$1x" = "headlessx" ]; then
shift
if [ "$1x" = "x" ] || [ ! -f /usr/bin/xvfb-run ]; then
if [ ! -f /usr/bin/xvfb-run ]; then
echo "Headless operation requires xvfb."
else
echo "usage: adtpro.sh [ headless ] [ serial | ethernet | audio | localhost ]"
fi
exit 1
else
HEADLESS="xvfb-run --auto-servernum "
fi
fi
$HEADLESS"$MY_JAVA_HOME"java -Xms256m -Xmx512m -cp "$ADTPRO_HOME"lib/%ADTPRO_VERSION%:"$ADTPRO_HOME"lib/AppleCommander/AppleCommander-%AC_VERSION%.jar:"$ADTPRO_HOME"lib/jssc/jssc-%JSSC_VERSION%.jar:"$ADTPRO_HOME"lib/jssc/slf4j-nop-%SLF4J_VERSION%.jar org.adtpro.ADTPro $*
|
ADTPro/ADTPro
|
build/adtprobase.sh
|
Shell
|
gpl-2.0
| 1,444 |
#!/bin/bash
# this script is used for comparing decoding results between systems.
# e.g. local/chain/compare_wer_general.sh exp/chain_cleaned/tdnn_{c,d}_sp
# For use with discriminatively trained systems you specify the epochs after a colon:
# for instance,
# local/chain/compare_wer_general.sh exp/chain_cleaned/tdnn_c_sp exp/chain_cleaned/tdnn_c_sp_smbr:{1,2,3}
echo "# $0 $*"
include_looped=false
if [ "$1" == "--looped" ]; then
include_looped=true
shift
fi
used_epochs=false
# this function set_names is used to separate the epoch-related parts of the name
# [for discriminative training] and the regular parts of the name.
# If called with a colon-free directory name, like:
# set_names exp/chain_cleaned/tdnn_lstm1e_sp_bi_smbr
# it will set dir=exp/chain_cleaned/tdnn_lstm1e_sp_bi_smbr and epoch_infix=""
# If called with something like:
# set_names exp/chain_cleaned/tdnn_d_sp_smbr:3
# it will set dir=exp/chain_cleaned/tdnn_d_sp_smbr and epoch_infix="_epoch3"
set_names() {
if [ $# != 1 ]; then
echo "compare_wer_general.sh: internal error"
exit 1 # exit the program
fi
dirname=$(echo $1 | cut -d: -f1)
epoch=$(echo $1 | cut -s -d: -f2)
if [ -z $epoch ]; then
epoch_infix=""
else
used_epochs=true
epoch_infix=_epoch${epoch}
fi
}
echo -n "# System "
for x in $*; do printf "% 10s" " $(basename $x)"; done
echo
strings=("# WER on dev " "# WER on test ")
for n in 0 1; do
echo -n "${strings[$n]}"
for x in $*; do
set_names $x # sets $dirname and $epoch_infix
decode_names=(dev${epoch_infix} test${epoch_infix})
wer=$(grep WER $dirname/decode_${decode_names[$n]}/wer* | utils/best_wer.sh | awk '{print $2}')
printf "% 10s" $wer
done
echo
if $include_looped; then
echo -n "# [looped:] "
for x in $*; do
set_names $x # sets $dirname and $epoch_infix
decode_names=(dev${epoch_infix} test${epoch_infix})
wer=$(grep WER $dirname/decode_looped_${decode_names[$n]}/wer* | utils/best_wer.sh | awk '{print $2}')
printf "% 10s" $wer
done
echo
fi
done
if $used_epochs; then
exit 0; # the diagnostics aren't comparable between regular and discriminatively trained systems.
fi
echo -n "# Final train prob "
for x in $*; do
prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -v xent | awk '{printf("%.4f", $8)}')
printf "% 10s" $prob
done
echo
echo -n "# Final valid prob "
for x in $*; do
prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -v xent | awk '{printf("%.4f", $8)}')
printf "% 10s" $prob
done
echo
echo -n "# Final train prob (xent)"
for x in $*; do
prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -w xent | awk '{printf("%.4f", $8)}')
printf "% 10s" $prob
done
echo
echo -n "# Final valid prob (xent)"
for x in $*; do
prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -w xent | awk '{printf("%.4f", $8)}')
printf "% 10s" $prob
done
echo
|
michellemorales/OpenMM
|
kaldi/egs/fisher_english/s5/local/chain/compare_wer_general.sh
|
Shell
|
gpl-2.0
| 3,004 |
#!/bin/sh
## java env
export JAVA_HOME=/usr/local/java/jdk1.7.0_72
export JRE_HOME=$JAVA_HOME/jre
## you just need to change this param name
APP_NAME=fee
SERVICE_DIR=/home/wusc/edu/service/$APP_NAME
SERVICE_NAME=pay-service-$APP_NAME
JAR_NAME=$SERVICE_NAME\.jar
PID=$SERVICE_NAME\.pid
cd $SERVICE_DIR
case "$1" in
start)
nohup $JRE_HOME/bin/java -Xms128m -Xmx512m -jar $JAR_NAME >/dev/null 2>&1 &
echo $! > $SERVICE_DIR/$PID
echo "=== start $SERVICE_NAME"
;;
stop)
kill `cat $SERVICE_DIR/$PID`
rm -rf $SERVICE_DIR/$PID
echo "=== stop $SERVICE_NAME"
sleep 5
P_ID=`ps -ef | grep -w "$SERVICE_NAME" | grep -v "grep" | awk '{print $2}'`
if [ "$P_ID" == "" ]; then
echo "=== $SERVICE_NAME process not exists or stop success"
else
echo "=== $SERVICE_NAME process pid is:$P_ID"
echo "=== begin kill $SERVICE_NAME process, pid is:$P_ID"
kill -9 $P_ID
fi
;;
restart)
$0 stop
sleep 2
$0 start
echo "=== restart $SERVICE_NAME"
;;
*)
## restart
$0 stop
sleep 2
$0 start
;;
esac
exit 0
|
piaolinzhi/fight
|
dubbo/pay/应用部署脚本/service-fee.sh
|
Shell
|
gpl-2.0
| 1,232 |
#!/bin/sh
#
# $Id: xhbgtmk.sh 9279 2011-02-14 18:06:32Z druzus $
#
# ---------------------------------------------------------------
# Copyright 2003 Przemyslaw Czerpak <[email protected]>
# This script checks you have all tools to build xHarbour binaries
# installed then takes current xHarbour sources from SourceForge CVS
# and build binary RPMs at your local host
#
# See doc/license.txt for licensing terms.
# ---------------------------------------------------------------
# ssh is not necessary for anonymous access on SourceForge
# export CVS_RSH=ssh
export CVSROOT=":pserver:[email protected]:/cvsroot/xharbour"
export PROJECT=xharbour
test_reqrpm()
{
rpm -q --whatprovides "$1" &> /dev/null
}
TOINST_LST=""
for i in cvs make gcc binutils bison bash ncurses ncurses-devel
do
test_reqrpm "$i" || TOINST_LST="${TOINST_LST} $i"
done
_cvs_RSH="${CVS_RSH}"
[ -n "${_cvs_RSH}" ] || _cvs_RSH="rsh"
if ! which ${_cvs_RSH} &>/dev/null
then
if [ "${_cvs_RSH}" = "ssh" ]
then
TOINST_LST="${TOINST_LST} [open]ssh-clients"
else
TOINST_LST="${TOINST_LST} ${_cvs_RSH}"
fi
fi
if [ -z "${TOINST_LST}" ] || [ "$1" = "--force" ]
then
cd
mkdir -p CVS
cd CVS
if cvs -z3 co "${PROJECT}"; then
cd "${PROJECT}"
./make_rpm.sh "$*"
fi
else
echo "If you want to build xHarbour compilers"
echo "you have to install the folowing RPM files:"
echo "${TOINST_LST}"
echo ""
echo "If you want to force installation run this script with --force paramter:"
echo "$0 --force"
fi
|
xharbour/core
|
xhbgtmk.sh
|
Shell
|
gpl-2.0
| 1,551 |
#!/bin/bash
build_compile()
{
make
}
build_pack()
{
make install \
prefix=$BUILD_PACK/usr \
MANDIR=$BUILD_PACK/usr/share/man/man1
}
|
phyglos/phyglos
|
bundles/phyglos-deck.dir/tree-1.7.0.sh
|
Shell
|
gpl-2.0
| 146 |
# ----------------------------------------------------------------------------
# Fala a pronúncia correta de uma palavra em inglês.
# Uso: zzpronuncia palavra
# Ex.: zzpronuncia apple
#
# Autor: Thobias Salazar Trevisan, www.thobias.org
# Desde: 2002-04-10
# Versão: 4
# Licença: GPL
# Requisitos: zzplay
# ----------------------------------------------------------------------------
zzpronuncia ()
{
zzzz -h pronuncia "$1" && return
local audio_file audio_dir
local palavra=$1
local cache=$(zztool cache pronuncia "$palavra.mp3")
local url='http://www.merriam-webster.com/dictionary'
local url2='http://media.merriam-webster.com/audio/prons/en/us/mp3'
# Verificação dos parâmetros
test -n "$1" || { zztool -e uso pronuncia; return 1; }
# O 'say' é um comando do Mac OS X, aí não precisa baixar nada
if test -x /usr/bin/say
then
say $*
return
fi
# Busca o arquivo MP3 na Internet caso não esteja no cache
if ! test -f "$cache"
then
# Extrai o nome do arquivo no site do dicionário
audio_file=$(
$ZZWWWHTML "$url/$palavra" |
sed -n "/data-file=\"[^\"]*$palavra[^\"]*\"/{s/.*data-file=\"//;s/\".*//;p;}" |
uniq)
# Ops, não extraiu nada
if test -z "$audio_file"
then
zztool erro "$palavra: palavra não encontrada"
return 1
else
audio_file="${audio_file}.mp3"
fi
# O nome da pasta é a primeira letra do arquivo (/a/apple001.mp3)
# Ou "number" se iniciar com um número (/number/9while01.mp3)
audio_dir=$(echo $audio_file | cut -c1)
echo $audio_dir | grep '[0-9]' >/dev/null && audio_dir='number'
# Compõe a URL do arquivo e salva-o localmente (cache)
$ZZWWWHTML "$url2/$audio_dir/$audio_file" > "$cache"
fi
# Fala que eu te escuto
zzplay "$cache"
}
|
gmgall/funcoeszz
|
zz/zzpronuncia.sh
|
Shell
|
gpl-2.0
| 1,735 |
#!/bin/bash
#
# Allow all ICMP traffic.
#
set -e # Errors are fatal
iptables -D OUTPUT -p icmp -j DROP
echo "#"
echo "# Now allowing all ICMP traffic"
echo "#"
|
dakiri/splunk-network-monitor
|
bin/icmp_allow.sh
|
Shell
|
gpl-2.0
| 164 |
# ----------------------------------------------------------------------------
# Testa a validade do número no tipo de categoria selecionada.
# Nada é ecoado na saída padrão, apenas deve-se analisar o código de retorno.
# Pode-se ecoar a saída de erro usando a opção -e antes da categoria.
#
# Categorias:
# ano => Ano válido
# ano_bissexto | bissexto => Ano Bissexto
# exp | exponencial => Número em notação científica
# numero | numero_natural => Número Natural ( inteiro positivo )
# numero_sinal | inteiro => Número Inteiro ( positivo ou negativo )
# numero_fracionario => Número Fracionário ( casas decimais )
# numero_real => Número Real ( casas decimais possíveis )
# complexo => Número Complexo ( a+bi )
# dinheiro => Formato Monetário ( 2 casas decimais )
# bin | binario => Número Binário ( apenas 0 e 1 )
# octal | octadecimal => Número Octal ( de 0 a 7 )
# hexa | hexadecimal => Número Hexadecimal ( de 0 a 9 e A até F )
# ip => Endereço de rede IPV4
# ip6 | ipv6 => Endereço de rede IPV6
# mac => Código MAC Address válido
# data => Data com formatação válida ( dd/mm/aaa )
# hora => Hora com formatação válida ( hh:mm )
#
# Obs.: ano, ano_bissextto e os
# números naturais, inteiros e reais sem separador de milhar.
#
# Uso: zztestar [-e] categoria número
# Ex.: zztestar ano 1999
# zztestar ip 192.168.1.1
# zztestar hexa 4ca9
# zztestar numero_real -45,678
#
# Autor: Itamar <itamarnet (a) yahoo com br>
# Desde: 2016-03-14
# Versão: 2
# Licença: GPL
# Tags: número, teste
# ----------------------------------------------------------------------------
zztestar ()
{
zzzz -h testar "$1" && return
local erro
# Devo mostrar a mensagem de erro?
test '-e' = "$1" && erro=1 && shift
# Verificação dos parâmetros
test -n "$1" || { zztool -e uso testar; return 1; }
case "$1" in
ano) zztool ${erro:+-e} testa_ano "$2" ;;
ano_bissexto | bissexto)
# Testa se $2 é um ano bissexto
#
# A year is a leap year if it is evenly divisible by 4
# ...but not if it's evenly divisible by 100
# ...unless it's also evenly divisible by 400
# http://timeanddate.com
# http://www.delorie.com/gnu/docs/gcal/gcal_34.html
# http://en.wikipedia.org/wiki/Leap_year
#
local y=$2
test $((y%4)) -eq 0 && test $((y%100)) -ne 0 || test $((y%400)) -eq 0
test $? -eq 0 && return 0
test -n "$erro" && zztool erro "Ano bissexto inválido '$2'"
return 1
;;
exp | exponencial)
# Testa se $2 é um número em notação científica
echo "$2" | sed 's/^-\([.,]\)/-0\1/;s/^\([.,]\)/0\1/' |
grep '^[+-]\{0,1\}[0-9]\{1,\}\([,.][0-9]\{1,\}\)\{0,1\}[eE][+-]\{0,1\}[0-9]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número exponencial inválido '$2'"
return 1
;;
numero | numero_natural) zztool ${erro:+-e} testa_numero "$2" ;;
numero_sinal | inteiro)
# Testa se $2 é um número (pode ter sinal: -2 +2)
echo "$2" | grep '^[+-]\{0,1\}[0-9]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número inteiro inválido '$2'"
return 1
;;
numero_fracionario)
# Testa se $2 é um número fracionário (1.234 ou 1,234)
# regex: \d+[,.]\d+
echo "$2" | grep '^[0-9]\{1,\}[,.][0-9]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número fracionário inválido '$2'"
return 1
;;
numero_real)
# Testa se $2 é um número real (1.234; 1,234; -56.789; 123)
# regex: [+-]?\d+([,.]\d+)?
echo "$2" | sed 's/^-\([.,]\)/-0\1/;s/^\([.,]\)/0\1/' |
grep '^[+-]\{0,1\}[0-9]\{1,\}\([,.][0-9]\{1,\}\)\{0,1\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número real inválido '$2'"
return 1
;;
complexo)
# Testa se $2 é um número complexo (3+5i ou -9i)
# regex: ((\d+([,.]\d+)?)?[+-])?\d+([,.]\d+)?i
echo "$2" | sed 's/^-\([.,]\)/-0\1/;s/^\([.,]\)/0\1/' |
grep '^\(\([+-]\{0,1\}[0-9]\{1,\}\([,.][0-9]\{1,\}\)\{0,1\}\)\{0,1\}[+-]\)\{0,1\}[0-9]\{1,\}\([,.][0-9]\{1,\}\)\{0,1\}i$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número complexo inválido '$2'"
return 1
;;
dinheiro)
# Testa se $2 é um valor monetário (1.234,56 ou 1234,56)
# regex: ( \d{1,3}(\.\d\d\d)+ | \d+ ),\d\d
echo "$2" | grep '^[+-]\{0,1\}\([0-9]\{1,3\}\(\.[0-9][0-9][0-9]\)\{1,\}\|[0-9]\{1,\}\),[0-9][0-9]$' >/dev/null && return 0
test -n "$erro" && zztool erro "Valor inválido '$2'"
return 1
;;
bin | binario)
# Testa se $2 é um número binário
echo "$2" | grep '^[01]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número binário inválido '$2'"
return 1
;;
octal | octadecimal)
# Testa se $2 é um número octal
echo "$2" | grep '^[0-7]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número octal inválido '$2'"
return 1
;;
hexa | hexadecimal)
# Testa se $2 é um número hexadecimal
echo "$2" | grep '^[0-9A-Fa-f]\{1,\}$' >/dev/null && return 0
test -n "$erro" && zztool erro "Número hexadecimal inválido '$2'"
return 1
;;
ip)
# Testa se $2 é um número IPV4 (nnn.nnn.nnn.nnn)
local nnn="\([0-9]\|[1-9][0-9]\|1[0-9][0-9]\|2[0-4][0-9]\|25[0-5]\)" # 0-255
echo "$2" | grep "^$nnn\.$nnn\.$nnn\.$nnn$" >/dev/null && return 0
test -n "$erro" && zztool erro "Número IP inválido '$2'"
return 1
;;
ip6 | ipv6)
# Testa se $2 é um número IPV6 (hhhh:hhhh:hhhh:hhhh:hhhh:hhhh:hhhh:hhhh)
echo "$2" |
awk -F : '
{
if ( $0 ~ /^:[^:]/ ) { exit 1 }
if ( $0 ~ /:::/ ) { exit 1 }
if ( $0 ~ /:$/ ) { exit 1 }
if ( NF<8 && $0 !~ /::/ ) { exit 1 }
if ( NF>8 ) { exit 1 }
if ( NF<=8 ) {
for (i=1; i<=NF; i++) {
if (length($i)>4) { exit 1 }
if (length($i)>0 && $i !~ /^[0-9A-Fa-f]+$/) { exit 1 }
}
}
}' && return 0
test -n "$erro" && zztool erro "Número IPV6 inválido '$2'"
return 1
;;
mac)
# Testa se $2 tem um formato de MAC válido
# O MAC poderá ser nos formatos 00:00:00:00:00:00, 00-00-00-00-00-00 ou 0000.0000.0000
echo "$2" | egrep '^([0-9A-Fa-f]{2}-){5}[0-9A-Fa-f]{2}$' >/dev/null && return 0
echo "$2" | egrep '^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$' >/dev/null && return 0
echo "$2" | egrep '^([0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4}$' >/dev/null && return 0
test -n "$erro" && zztool erro "MAC address inválido '$2'"
return 1
;;
data) zztool ${erro:+-e} testa_data "$2" ;;
hora)
# Testa se $2 é uma hora (hh:mm)
echo "$2" | grep "^\(0\{0,1\}[0-9]\|1[0-9]\|2[0-3]\):[0-5][0-9]$" >/dev/null && return 0
test -n "$erro" && zztool erro "Hora inválida '$2'"
return 1
;;
*)
# Qualquer outra opção retorna erro
test -n "$erro" && zztool erro "Opção '$1' inválida"
return 1
;;
esac
}
|
faustovaz/funcoeszz
|
zz/zztestar.sh
|
Shell
|
gpl-2.0
| 7,078 |
#!/bin/bash
set -e
# Installs modules as defined in environment variables
source "${EJABBERD_HOME}/docker/lib/base_config.sh"
source "${EJABBERD_HOME}/docker/lib/config.sh"
source "${EJABBERD_HOME}/docker/lib/base_functions.sh"
source "${EJABBERD_HOME}/docker/lib/functions.sh"
install_module_from_source() {
local module_name=$1
local module_source_path=${EJABBERD_HOME}/module_source/${module_name}
local module_install_folder=${EJABBERD_HOME}/.ejabberd-modules/sources/${module_name}
log "Analyzing module ${module_name} for installation"
# Make sure that the module exists in the source folder before attempting a copy
if [ ! -d ${module_source_path} ]; then
log "Error: Module ${module_name} not found in ${EJABBERD_HOME}/module_source"
log "Please use a shared volume to populate your module in ${EJABBERD_HOME}/module_source"
return 1;
fi
# Check to see if the module is already installed
local install_count=$(${EJABBERDCTL} modules_installed | grep -ce "^${module_name}[[:space:]]")
if [ $install_count -gt 0 ]; then
log "Error: Module already installed: ${module_name}"
return 1;
fi
# Copy the module into the shared folder
log "Copying module to ejabberd folder ${module_install_folder}"
mkdir -p ${module_install_folder}
cp -R ${module_source_path} ${module_install_folder}
# Run the ejabberdctl module_check on the module
log "Running module_check on ${module_name}"
${EJABBERDCTL} module_check ${module_name}
if [ $? -ne 0 ]; then
log "Module check failed for ${module_name}"
return 1;
fi
log "Module check succeeded for ${module_name}"
# Install the module
log "Running module_install on ${module_name}"
${EJABBERDCTL} module_install ${module_name}
if [ $? -ne 0 ]; then
log "Module installation failed for ${module_name}"
return 1;
fi
log "Module installation succeeded for ${module_name}"
return 0;
}
install_module_from_ejabberd_contrib() {
local module_name=$1
# Check to see if the module is already installed
local install_count=$(${EJABBERDCTL} modules_installed | grep -ce "^${module_name}[[:space:]]")
if [ $install_count -gt 0 ]; then
log "Error: Module already installed: ejabberd_contrib ${module_name}"
return 1;
fi
# Install the module
log "Running module_install on ejabberd_contrib ${module_name}"
${EJABBERDCTL} module_install ${module_name}
if [ $? -ne 0 ]; then
log "Module installation failed for ejabberd_contrib ${module_name}"
return 1;
fi
log "Module installation succeeded for ejabberd_contrib ${module_name}"
return 0;
}
enable_custom_auth_module_override() {
module_name=$1;
# When using custom authentication modules, the module name must be
# in the following pattern: ejabberd_auth_foo, where foo is the
# value you will use for your auth_method yml configuration.
required_prefix="ejabberd_auth_"
if [[ "${module_name}" != "${required_prefix}"* ]]; then
log "Error: module_name must begin with ${required_prefix}"
exit 1;
fi
log "Checking custom auth module: ${module_name}"
# Make sure the auth module is installed
local install_count=$(${EJABBERDCTL} modules_installed | grep -ce "^${module_name}[[:space:]]")
if [ $install_count -eq 0 ]; then
log "Error: custom auth_module not installed: ${module_name}"
return 1;
fi
custom_auth_method=${module_name#$required_prefix}
echo -e "\nauth_method: [${custom_auth_method}]" >> ${CONFIGFILE}
log "Custom auth module ${module_name} configuration complete."
}
file_exist ${FIRST_START_DONE_FILE} \
&& exit 0
is_restart_needed=0;
if [ -n "${EJABBERD_SOURCE_MODULES}" ]; then
for module_name in ${EJABBERD_SOURCE_MODULES} ; do
install_module_from_source ${module_name}
done
is_restart_needed=1;
fi
# Check the EJABBERD_CONTRIB_MODULES variable for any ejabberd_contrib modules
if [ -n "${EJABBERD_CONTRIB_MODULES}" ]; then
for module_name in ${EJABBERD_CONTRIB_MODULES} ; do
install_module_from_ejabberd_contrib ${module_name}
done
is_restart_needed=1;
fi
# If a custom module was defined for handling auth, we need to override
# the pre-defined auth methods in the config.
if [ -n "${EJABBERD_CUSTOM_AUTH_MODULE_OVERRIDE}" ]; then
enable_custom_auth_module_override "${EJABBERD_CUSTOM_AUTH_MODULE_OVERRIDE}"
is_restart_needed=1;
fi
# If any modules were installed, restart the server, if the option is enabled
if [ ${is_restart_needed} -eq 1 ]; then
if is_true ${EJABBERD_RESTART_AFTER_MODULE_INSTALL} ; then
log "Restarting ejabberd after successful module installation(s)"
${EJABBERDCTL} restart
child=$!
${EJABBERDCTL} "started"
wait $child
fi
fi
exit 0
|
lemenkov/ejabberd
|
docker/post/11_ejabberd_install_modules.sh
|
Shell
|
gpl-2.0
| 4,929 |
#!/bin/bash
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2012-2013, 2015-2016, 2020 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl
# Copyright (C) 2016 SURFmarket https://surf.nl
# Copyright (C) 2020 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020 SURF https://www.surf.nl
# Copyright (C) 2020 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2020 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
export LANG=en_US.UTF-8
export PYTHONPATH=.:"$PYTHONPATH"
export WEIGHTLESS_COMPOSE_TEST="PYTHON"
export TESTMODE=true
export PYTHONWARNINGS=default
python3 ./_alltests.py "$@"
|
seecr/meresco-components
|
test/alltests.sh
|
Shell
|
gpl-2.0
| 1,666 |
#!/bin/bash
#mkdir -p /var/www/html/myhvacshop.com/wp-content/themes
#mkdir -p /var/www/html/myhvacshop.com/wp-content/plugins
echo ok
|
Doap/myhvacshop.com
|
scripts/makedirs.sh
|
Shell
|
gpl-2.0
| 136 |
PDC_COLS=74
PDC_LINES=48
PDC_FONT=fonts/pdcfont-tiles.bmp
#./wtfgame ./game/start-fixed-iso.js
gdb --args ./wtfgame ./game/start-fixed-iso.js
|
blamarche/wtfgame
|
release/start game-debug.sh
|
Shell
|
gpl-2.0
| 145 |
#! /bin/bash
function myecho()
{
echo ====================== BUDDYBOX MAGIC IN PROGRESS ===================================
}
sudo echo -e "[user]\n\tname = EasyEngine\n\temail = [email protected]" > ~/.gitconfig
myecho
wget -qO ee rt.cx/ee && sudo bash ee stable || exit 1
myecho
sudo ee stack install || exit 1
myecho
sudo ee site create buddy.box --wp || exit 1
myecho
cd /var/www/buddy.box/ || exit 1
myecho
wp plugin install buddypress || exit 1
myecho
|
BoweFrankema/buddybox
|
buddybox.sh
|
Shell
|
gpl-2.0
| 472 |
#!/bin/bash
mkdir -p data/local
local=`pwd`/local
scripts=`pwd`/scripts
export PATH=$PATH:`pwd`/../../../tools/irstlm/bin
echo "Preparing train and test data"
train_base_name=train_yesno
test_base_name=test_yesno
waves_dir=$1
ls -1 $waves_dir > data/local/waves_all.list
cd data/local
../../local/create_yesno_waves_test_train.pl waves_all.list waves.test waves.train
../../local/create_yesno_wav_scp.pl ${waves_dir} waves.test > ${test_base_name}_wav.scp
../../local/create_yesno_wav_scp.pl ${waves_dir} waves.train > ${train_base_name}_wav.scp
../../local/create_yesno_txt.pl waves.test > ${test_base_name}.txt
../../local/create_yesno_txt.pl waves.train > ${train_base_name}.txt
cp ../../input/task.arpabo lm_tg.arpa
cd ../..
# This stage was copied from WSJ example
for x in train_yesno test_yesno; do
mkdir -p data/$x
cp data/local/${x}_wav.scp data/$x/wav.scp
cp data/local/$x.txt data/$x/text
cat data/$x/text | awk '{printf("%s global\n", $1);}' > data/$x/utt2spk
utils/utt2spk_to_spk2utt.pl <data/$x/utt2spk >data/$x/spk2utt
done
|
michellemorales/OpenMM
|
kaldi/egs/yesno/s5/local/prepare_data.sh
|
Shell
|
gpl-2.0
| 1,065 |
# Copyright 2009 Enno Ruijters
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General
# Public License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#!/bin/sh
# Tests if a single file of two 1024-byte non-adjacent blocks on a tiny, full
# ext2 filesystem is correctly left unchanged on improvement request.
# This is a regression test for a bug which locked up the program in this case.
. ./test-lib.sh
test_begin "t1111-full"
load_image full
infra_cmd "echo \"dump_inode <12> before\nquit\n\" | debugfs full.img \
> /dev/null"
test_and_stop_on_error "defragmenting full ext2 disk with fragmented file" \
"echo \"i12\n0\" | e2defrag -i full.img > /dev/null"
test_and_continue "resulting image should not have file system errors" \
"e2fsck -f -y full.img 2>/dev/null > fsckout"
test_and_continue "file in image should be unchanged" \
"echo \"dump_inode <12> after\nquit\n\" \
| debugfs full.img \
> /dev/null 2>/dev/null && cmp before after"
test_end
|
ennoruijters/e2defrag
|
tests/t1111-full.sh
|
Shell
|
gpl-2.0
| 1,597 |
export PATH=/work/workspace/toolschain/gcc-3.4.5-glibc-2.3.6/bin:$PATH
#make distclean
make 100ask24x0_config
make
|
andyjhf/uboot
|
build.sh
|
Shell
|
gpl-2.0
| 116 |
#!/bin/bash
# Add the sample assets
start_date=$(date '+%FT%H:%M:%SZ')
end_date=$(date '+%FT%H:%M:%SZ' -d '+6 years')
curl --header "Content-Type: application/json" \
--request POST \
--data "{
\"name\": \"Screenly Weather Widget\",
\"uri\": \"https://weather.srly.io\",
\"mimetype\": \"webpage\",
\"start_date\": \"$start_date\",
\"end_date\": \"$end_date\",
\"play_order\": 0,
\"is_enabled\": 1,
\"skip_asset_check\": 0
}" \
http://127.0.0.1:8080/api/v1.2/assets
curl --header "Content-Type: application/json" \
--request POST \
--data "{
\"name\": \"Screenly Clock Widget\",
\"uri\": \"https://clock.srly.io\",
\"mimetype\": \"webpage\",
\"start_date\": \"$start_date\",
\"end_date\": \"$end_date\",
\"play_order\": 1,
\"is_enabled\": 1,
\"skip_asset_check\": 0
}" \
http://127.0.0.1:8080/api/v1.2/assets
curl --header "Content-Type: application/json" \
--request POST \
--data "{
\"name\": \"Hacker News\",
\"uri\": \"https://news.ycombinator.com\",
\"mimetype\": \"webpage\",
\"start_date\": \"$start_date\",
\"end_date\": \"$end_date\",
\"play_order\": 2,
\"is_enabled\": 1,
\"skip_asset_check\": 0
}" \
http://127.0.0.1:8080/api/v1.2/assets
|
GreatFruitOmsk/screenly-ose
|
bin/prepare_device_for_imaging.sh
|
Shell
|
gpl-2.0
| 1,272 |
#!/bin/sh
DIRNAME=`dirname $1`
FILENAME=`basename $1`
ls $DIRNAME/*.tar.xz | sort | while read fname; do
WNAME=`basename $fname`
if [ "$WNAME" \< "$FILENAME" ]
then
if [ "$2" \= "" ]
then
tar -xJGf "$fname"
else
mkdir "$2"
tar -C "$2" -xJGf "$fname"
fi
fi
if [ "$2" \= "" ]
then
tar -xJGf "$fname"
else
tar -C "$2" -xJGf "$fname"
fi
done
|
roandbox/backuptar
|
extract.sh
|
Shell
|
gpl-2.0
| 425 |
#!/bin/sh
# Copyright (C) 2012 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/test
#If you change this change the unit test case too.
aux prepare_pvs 6
#Locate the python binding library to use.
python_lib=`find $abs_top_builddir -name lvm.so`
# Unable to test python bindings if library not available
test -z "$python_lib" && skip
export PYTHONPATH=`dirname $python_lib`:$PYTHONPATH
skip
# skiped until fixed
# FIXME - script must ONLY use $(cat DEVICES) as PVs
# it must NOT create/modify/remove volumes from other places
python_lvm_unit.py -v
# nemiver python ../api/python_lvm_unit.py -v -f
|
Distrotech/LVM2
|
test/api/pytest.sh
|
Shell
|
gpl-2.0
| 1,021 |
#!/bin/bash
#Crosslink Copyright (C) 2016 NIAB EMR see included NOTICE file for details
#
# build crosslink_viewer and crosslink_graphical
#
set -eu
TYPE="-Wall -Wextra -O3"
#TYPE="-Wall -Wextra -g"
AUX=""
#uncomment to select alternative colour scheme
#AUX="-DALTCOLSCHEME"
gcc ${TYPE} ${AUX}\
-o crosslink_viewer\
crosslink_utils.c\
crosslink_ga.c\
crosslink_gibbs.c\
crosslink_group.c\
crosslink_viewer.c\
crosslink_viewer_main.c\
rjvparser.c\
-lSDL2 -lm
gcc ${TYPE} ${AUX}\
-o crosslink_graphical\
crosslink_utils.c\
crosslink_ga.c\
crosslink_gibbs.c\
crosslink_group.c\
crosslink_viewer.c\
crosslink_graphical.c\
crosslink_graphical_main.c\
rjvparser.c\
-lSDL2 -lm
|
eastmallingresearch/crosslink
|
scripts/viewer_make.sh
|
Shell
|
gpl-2.0
| 753 |
#!/bin/sh
set -e
set +u
# Avoid recursively calling this script.
if [[ $SF_MASTER_SCRIPT_RUNNING ]]
then
exit 0
fi
set -u
export SF_MASTER_SCRIPT_RUNNING=1
BUILD_ROOT=/tmp/PebbleVendor_build/build
SF_TARGET_NAME=PebbleVendor
SF_EXECUTABLE_PATH=libPebbleVendor.a
SF_WRAPPER_NAME=PebbleVendor.framework
SF_IPHONE_BUILT_PRODUCTS_DIR="$BUILD_ROOT/$CONFIGURATION-iphoneos"
SF_SIMULATOR_BUILT_PRODUCTS_DIR="$BUILD_ROOT/$CONFIGURATION-iphonesimulator"
SF_FAT_BUILT_PRODUCTS_DIR="$BUILD_ROOT/$CONFIGURATION-iphoneos+iphonesimulator"
SF_IPHONE_SDK_NAME=iphoneos7.0
SF_SIMULATOR_SDK_NAME=iphonesimulator7.0
# Build for iphone platform armv6 + armv7
xcodebuild -project "${PROJECT_FILE_PATH}" -target "${SF_TARGET_NAME}" -configuration "Release" -sdk ${SF_IPHONE_SDK_NAME} BUILD_DIR="${BUILD_ROOT}" OBJROOT="${OBJROOT}" BUILD_ROOT="${BUILD_ROOT}" ARCHS="armv6 armv7 armv7s" SYMROOT="${SYMROOT}" $ACTION
# Build the simulator platform i386
xcodebuild -project "${PROJECT_FILE_PATH}" -target "${SF_TARGET_NAME}" -configuration "Release" -sdk ${SF_SIMULATOR_SDK_NAME} BUILD_DIR="${BUILD_ROOT}" OBJROOT="${OBJROOT}" BUILD_ROOT="${BUILD_ROOT}" ARCHS="i386" SYMROOT="${SYMROOT}" $ACTION
# Smash the two static libraries into one fat binary and store it in the .framework
lipo -create "${SF_IPHONE_BUILT_PRODUCTS_DIR}/${SF_EXECUTABLE_PATH}" "${SF_SIMULATOR_BUILT_PRODUCTS_DIR}/${SF_EXECUTABLE_PATH}" -output "${SF_FAT_BUILT_PRODUCTS_DIR}/${SF_WRAPPER_NAME}/Versions/A/${SF_TARGET_NAME}"
|
sdeyerle/pebble_fun
|
PebbleSDK-2.0-BETA7/PebbleKit-iOS/PebbleVendor/build_framework.sh
|
Shell
|
gpl-2.0
| 1,478 |
#!/bin/bash
export PATH=$PATH:"/home/svtuser/Projects/dhARMa/toolchain/arm-2014.05/bin"
arm-none-eabi-as $1.s -o $1.o
arm-none-eabi-objcopy $1.o $1.bin -O binary
arm-none-eabi-objdump -SD $1.o > $1.obj
../../bin/signgp ./$1.bin
mv $1.bin.ift MLO
|
mutex023/dhARMa
|
src/5-MLO-LedBlink-RTCInterrupt-RelocatedIntVec/build.sh
|
Shell
|
gpl-2.0
| 251 |
#!/bin/sh
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get upgrade -qq
apt-get install -qq \
vim most screen htop pv \
curl wget jq net-tools netcat \
make git etckeeper
# set up git
git config --global user.name "${GIT_AUTHOR_NAME:-Markus Rudy}"
git config --global user.email "${GIT_AUTHOR_EMAIL:[email protected]}"
|
burgerdev/devops
|
ubuntu-basic-packages.sh
|
Shell
|
gpl-2.0
| 355 |
python queryK.py -k 9 -K 30 -s 15 -b ../data/multipleK/boxquery30 -o ../data/multipleK/result30
|
updownlife/multipleK
|
src/queryK.sh
|
Shell
|
gpl-2.0
| 96 |
#!/bin/bash
msgfmt -o /usr/share/locale/de/LC_MESSAGES/HTML-FormEngine.mo ../../../../HTML-FormEngine/locale/de/LC_MESSAGES/HTML-FormEngine.po HTML-FormEngine-DBSQL.po
|
gitpan/HTML-FormEngine-DBSQL
|
locale/de/LC_MESSAGES/create.sh
|
Shell
|
gpl-2.0
| 170 |
#!/bin/bash
cd /glusterfs/netapp/homes1/BOCONNOR/gitroot/pancancer-sandbox/bionimbus_monitor
# checks and turns off nodes it can't ssh to
perl test.pl --verbose --use-nova --glob-target 'fleet_master' --cluster-json /glusterfs/netapp/homes1/BOCONNOR/gitroot/decider/decider-bwa-pancancer/pdc_cluster.2.json --ssh-pem /glusterfs/netapp/homes1/BOCONNOR/.ssh/brian-pdc-3.pem --ssh-username BOCONNOR
|
ICGC-TCGA-PanCancer/pancancer-sandbox
|
bionimbus_monitor/test_nova.sh
|
Shell
|
gpl-2.0
| 397 |
#!/bin/bash
#
# Script: run_backup.sh
#
#
# Purpose: Backup an Oracle database using rman
#
# Usage: run_backup.sh sid
# where sid is the sid of the database to be backed up
# Logfiles created in /backup/backup_scripts/logs
#
# also sends a second backup to $DEST
#
#
# number of days to keep files
# used in find +mtime
#
case $# in
1) export SID=$1
;;
*) echo 'Usage: run_backup.sh ORACLE_SID '
echo ' '
exit 1;
;;
esac
# Days to keep log files
NDAYS=14
fra_top=/orafra
SID=$1
#Remote backup info
DEST=/mnt/orabackup/$SID
export PASSWD=ic3mann4#
MOUNTCMD="mount /mnt/orabackup"
#MOUNTCMD="echo mount .. $DEST"
REMOTEDAYS=1
DATE=`date +%Y%m%d`; export DATE
DT=`date '+%Y%m%d_%H%M'` ; export DT
export PATH=/usr/local/bin:$PATH
. $HOME/.bash_profile
LOG_DIR=$HOME/logs
export logfile=$LOG_DIR/backup_${SID}_${DT}.log
export errfile=$LOG_DIR/backup_${SID}_${DT}.err
touch $logfile
{
echo "Backup of database $SID started at `date` "
echo " "
if [ `egrep "^${SID}:" /etc/oratab|wc -l` -lt 1 ]; then
echo "Invalid SID entered $SID"
exit
fi
export ORAENV_ASK=NO
export ORACLE_SID=$SID
. oraenv > /dev/null 2> /dev/null
# FOR COLD BACKUPS ONLY:
#sqlplus -S / as sysdba <<EOF
#shutdown immediate
#startup mount
#EOF
export NLS_DATE_FORMAT="dd-mon-yyyy hh24:mi:ss"
export TAG=Daily_${DT}
export TAGCF=Ctrl_File_${DT}
echo " "
rman target / <<EOF
run {
backup database plus archivelog tag $TAG;
backup current controlfile tag $TAGCF;
#delete noprompt obsolete;
}
EOF
rman target / <<EOF
report unrecoverable ;
EOF
# FOR COLD BACKUPS ONLY:
#sqlplus -S / as sysdba <<EOF
#alter database open;
#EOF
# Now try to backup to $DEST
if [ ! -d $DEST ]; then
# mount if directory is not available
$MOUNTCMD
if [ ! -d $DEST ]; then
echo "ERROR: Backup destination $DEST does not exist!"
fi
fi
if [ -d $DEST ]; then
# Starting remote BACKUP TO $DEST
rman target=/ <<EOF
run {
# remove references to files removed manually
delete noprompt expired backupset;
# copy backups (last 1 days) from FRA to $DEST (will be copied to tape)
backup backupset completed after 'SYSDATE-$REMOTEDAYS' to destination '$DEST' tag='REMOTE';
# delete old backups from $DEST
delete backupset tag='REMOTE' completed before 'SYSDATE-$REMOTEDAYS';
}
exit
EOF
else
echo "ERROR: Skipping remote backup to $DEST "
fi
} >> $logfile
exit 0
{
if [ `grep -i "ora-" $logfile| wc -l` -gt 0 -o `grep -i "rman-" $logfile| wc -l` -gt 0 ]; then
echo " "
echo "Errors in backup "
echo " "
grep -i "ora-" $logfile
grep -i "rman-" $logfile
echo " "
echo "LOGILE = " $logfile >> $errfile
echo "Errors in backup " >> $errfile
echo " ">> $errfile
grep -i "ora-" $logfile >> $errfile
grep -i "rman-" $logfile >> $errfile
echo " " >> $errfile
# ./send_email.sh "$ORACLE_SID Backup errors" $errfile email_list.txt
chmod 664 $errfile
fi
} >> $logfile
exit 0
echo " "
echo "Ensure no old files remain unpurged in the FRA"
cd $fra_top/$ORACLE_SID/archivelog
echo "Archive logs to be purged"
find . -name "*.arc" -mtime +$NDAYS -ls
find . -name "*.arc" -mtime +$NDAYS -exec rm {} \;
echo " "
echo "Backupsets in the FRA to be purged"
cd $fra_top/$ORACLE_SID/backupset
find . -name "*.bkp" -mtime +$NDAYS -ls
find . -name "*.bkp" -mtime +$NDAYS -exec rm {} \;
echo " "
echo "Autobackups in the FRA to be purged"
cd $fra_top/$ORACLE_SID/autobackup
find . -name "*.bkp" -mtime +$NDAYS -ls
find . -name "*.bkp" -mtime +$NDAYS -exec rm {} \;
echo " "
echo "Cross check archivelogs and backupsets in rman"
echo " "
rman target / <<eof
run {
crosscheck archivelog all;
delete noprompt expired archivelog all;
crosscheck backup;
delete noprompt expired backup;
}
eof
echo " "
echo "Backup of database $SID completed at `date` "
} >> $logfile
# Set permissions on logfile and backups to 775 so that anyone in the DBA gorup
# can purge them.
#
chmod 664 $logfile
#end of script
|
acmyonghua/oracle-1
|
rman_backup.sh
|
Shell
|
gpl-2.0
| 3,956 |
#!/usr/bin/ksh
echo "TYPE: paths with lspath " > ../DATA/$0.`uname -n`.command
lspath | grep -v Enabled > ../DATA/$0.`uname -n`.txt
RC=$?
if [ -s "../DATA/$0.`uname -n`.txt" ] ; then
echo BAD > ../DATA/$0.`uname -n`.error
else
echo OK > ../DATA/$0.`uname -n`.error
fi
|
camp8chris/SYSTEMS
|
PLUGINS/Check_aix_lspaths.sh
|
Shell
|
gpl-2.0
| 292 |
#!/bin/bash
configfile='/root/.backup_config/backup'
configfile_secured='/tmp/backup'
if egrep -q -v '^#|^[^ ]*=[^;]*' "$configfile"; then
echo "Config file is unclean, cleaning it..." >&2
egrep '^#|^[^ ]*=[^;&]*' "$configfile" > "$configfile_secured"
configfile="$configfile_secured"
fi
source "$configfile"
_mon="$(date +'%m')"
cd /home/
for f in *; do
if [[ -d $f ]]; then
backupfile=$backupdir/monthly/$f-$_mon.tar.gz
echo $backupfile
tar -zcf $backupfile $f/public_html 2>>/var/log/backup-log/monthly/tar-error-$(date +"%d-%m-%Y").log
s3File="s3://$awsbucketname$backupfile"
echo $s3File
aws s3 cp $backupfile $s3File >> /var/log/backup-log/monthly/s3upload-$(date +"%d-%m-%Y").log
rm -f $backupfile
fi
done
|
muhilvannan/linux-backup-scripts
|
s3-monthly-backup.sh
|
Shell
|
gpl-2.0
| 741 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.