code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
. ~/shell_enhancements/helpers/git.sh
. ~/shell_enhancements/helpers/redirects.sh
# These commands help with repositories managed with gitflow semantics.
#
# If the user has `git-flow` installed (`brew install git-flow`) and is in a
# repo that has been set up with `git flow init`, the commands will attempt to
# use the `git-flow` names the user has set up.
#
# For general information on gitflow:
#
# * http://danielkummer.github.io/git-flow-cheatsheet/
# * http://nvie.com/posts/a-successful-git-branching-model/
gitflow_initialized () {
redirect_to_null git config --get-regexp gitflow
}
# Gives the default branch prefixes according to gitflow semantics.
gitflow_default_prefix () {
local branch_type
branch_type=$1
if [ -z "$branch_type" ] ; then
echo_error "Specify a branch type (e.g. hotfix, release)"
return 1
fi
local prefix
case "$branch_type" in
develop|master ) prefix="$branch_type" ;;
feature|hotfix|release ) prefix="$branch_type/" ;;
* ) prefix="" ;;
esac
if [ -z "$prefix" ] ; then
echo_error "Branch type doesn’t match gitflow semantics"
return 1
else
echo "$prefix"
fi
}
# `git-flow` stores the following general info in each initialized repo’s
# `git-config`:
#
# * `gitflow.branch.master`
# * `gitflow.branch.develop`
# * `gitflow.prefix.feature`
# * `gitflow.prefix.release`
# * `gitflow.prefix.hotfix`
# * `gitflow.prefix.support`
# * `gitflow.prefix.versiontag`
#
# `gitflow_prefix` attempts to look these up; if they’re not available (i.e.
# `git-flow` has not been set up for the repo) it falls back to
# `gitflow_default_prefix`.
gitflow_prefix () {
local branch_type
branch_type=$1
if [ -z "$branch_type" ] ; then
echo_error "Specify a branch type (e.g. hotfix, release)"
return 1
fi
local prefix
local match
match=$(git config --get "gitflow.prefix.$branch_type")
if [ -n "$match" ] ; then
prefix="$match"
# `develop` and `master` are under `gitflow.branch` instead of
# `gitflow.prefix`
elif [ "$branch_type" = "develop" ] || [ "$branch_type" = "master" ] ; then
local branch
branch=$(git config --get "gitflow.branch.$branch_type")
if [ -z "$branch" ] ; then
prefix="$branch"
fi
fi
# `git-flow` doesn’t have anything; try defaults
if [ -z "$prefix" ] ; then
# TODO this gets around `echo_error` showing the same message twice if the
# branch doesn’t match, but calling a function twice isn’t great either.
redirect_to_null gitflow_default_prefix "$branch_type"
if [ "$?" -eq "0" ] ; then
prefix=$(gitflow_default_prefix "$branch_type")
fi
fi
# defaults don’t work either; give up
if [ -z "$prefix" ] ; then
echo_error "Branch type doesn’t match gitflow semantics"
return 1
else
echo "$prefix"
fi
}
# If a branch is set up with `git-flow`, `git-config` will have information
# about what the base branch: which branch this one was forked from and should
# merge to. This is stored under `gitflow.branch.*.base`, e.g.
# `gitflow.branch.hotfix/brains.base`.
#
# If the branch *wasn’t* set up with `git-flow` (the `gitflow.branch` key is
# missing), `gitflow_branch_base` guesses the base branch according to general
# gitflow semantics.
gitflow_branch_base () {
local branch=$1
if [ -z "$branch" ] ; then
echo_error "Specify a branch name"
return 1
fi
local base
local match
match=$(git config --get "gitflow.branch.$branch.base")
# gitflow has recorded the base
if [ -n "$match" ] ; then
base="$match"
# use defaults
else
local develop
develop=$(gitflow_prefix "develop")
local hotfix
hotfix=$(gitflow_prefix "hotfix")
case "$branch" in
$hotfix* ) base="master" ;;
* ) base="$develop" ;;
esac
fi
echo "$base"
}
# Determine base remote from available remotes using branch name.
#
# If 1) there’s a remote named `"upstream"` and 2) the branch name is
# `"develop"` or `"master"`, or is a hotfix branch, `"upstream"` will be used.
# Otherwise, `"origin"` will be.
gitflow_remote () {
local branch=$1
if [ -z "$branch" ] ; then
echo_error "Specify a branch name"
return 1
fi
local remote
git_in_initialized_repo || return 1
git_remote_exists "upstream"
if [ "$?" -eq "0" ] ; then
local develop
develop=$(gitflow_prefix "develop")
case "$branch" in
$develop|master ) remote="upstream" ;;
* ) remote="origin" ;;
esac
else
remote="origin"
fi
echo $remote
}
|
stilist/shell_enhancements
|
helpers/gitflow.sh
|
Shell
|
mit
| 4,433 |
#!/bin/bash
set -e
cd `dirname $0`
[ $# -lt 1 ] && echo 'usage ./start.sh $MYID' && exit 1
chmod +x idgen_mon run.sh
mkdir -p log
export MYID=$1
./idgen_mon -l log/mon.log -d -m log/idgen_mon.pid ./run.sh
|
eLong-INF/idgen
|
support-files/start.sh
|
Shell
|
mit
| 211 |
#!/bin/bash
'''
# wrapper script to launch mrMeshPy from MatLab TODO implement this later
Mark Hymers, Andre Gouws 2017
'''
unset LD_LIBRARY_PATH
exec /usr/bin/python "$@"
|
andregouws/mrMeshPy
|
matlabRoutines/launchMeshPy.sh
|
Shell
|
mit
| 175 |
#!/bin/bash
# bash script custom here
|
huytbt/mydevtool-docker-node
|
script/custom.sh
|
Shell
|
mit
| 38 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
PACKAGES="ruby"
|
hhvm/packaging
|
aws/ondemand/hhvm/user-documentation/packages.inc.sh
|
Shell
|
mit
| 211 |
#!/bin/bash
cd "$(dirname "$0")"
java -cp ./bin:./PacManVsGhosts6.2.jar:./gson-2.2.2.jar pacman.experimentclient.ExperimentClient $1 $2
|
stewartml/4thYearProject
|
agent/run.sh
|
Shell
|
mit
| 136 |
#!/bin/bash
sudo mkdir -p /tmp/mnt/fat32
sudo mkdir -p /tmp/mnt/ext4
sudo mount /dev/mmcblk0p1 /tmp/mnt/fat32/
sudo mount /dev/mmcblk0p2 /tmp/mnt/ext4/
sudo make ARCH=arm CROSS_COMPILE=$CCPREFIX INSTALL_MOD_PATH=/tmp/mnt/ext4 modules_install
sudo cp /tmp/mnt/fat32/$KERNEL.img /tmp/mnt/fat32/$KERNEL-backup.img
sudo scripts/mkknlimg arch/arm/boot/zImage /tmp/mnt/fat32/$KERNEL.img
sudo cp arch/arm/boot/dts/*.dtb /tmp/mnt/fat32/
sudo cp arch/arm/boot/dts/overlays/*.dtb* /tmp/mnt/fat32/overlays/
sudo cp arch/arm/boot/dts/overlays/README /tmp/mnt/fat32/overlays/
sudo umount /tmp/mnt/fat32
sudo umount /tmp/mnt/ext4
|
rvu95/rpi
|
wheezy/tools/kernel-compilation/install-new-kernel.sh
|
Shell
|
mit
| 617 |
sudo apt-get install xfsprogs
|
UedaTakeyuki/gc_setups
|
xfs.setup.sh
|
Shell
|
mit
| 30 |
#!/bin/bash
# Specifying the icon(s) in the script
# This allows us to change its appearance conditionally
icon=""
player_status=$(playerctl status 2> /dev/null)
if [[ $? -eq 0 ]]; then
metadata="$(playerctl metadata artist) - $(playerctl metadata title)"
fi
# Foreground color formatting tags are optional
if [[ $player_status = "Playing" ]]; then
echo "%{F#D08770}$icon $metadata" # Orange when playing
elif [[ $player_status = "Paused" ]]; then
echo "%{F#65737E}$icon $metadata" # Greyed out info when paused
else
echo "%{F#65737E}$icon" # Greyed out icon when stopped
fi
|
TJuberg/dotfiles
|
polybar/.config/polybar/mpris.sh
|
Shell
|
mit
| 625 |
#!/bin/bash
max_t_cost="$1"
max_m_cost="$2"
max_lanes="$3"
if [ -z "$max_t_cost" ]; then
echo "ERROR: Maximum time cost must be specified!" 1>&2
exit 1
fi
if [ -z "$max_m_cost" ]; then
echo "ERROR: Maximum memory cost must be specified!" 1>&2
exit 1
fi
if [ -z "$max_lanes" ]; then
echo "ERROR: Maximum number of lanes must be specified!" 1>&2
exit 1
fi
dirname="$(dirname "$0")"
cd "$dirname/.." || exit 1
echo "t_cost,m_cost,lanes,ms_i,ms_d,ms_id"
stdbuf -oL ./argon2-bench2 $max_t_cost $max_m_cost $max_lanes |
stdbuf -oL tail -n +2 |
while read line; do
print_comma=0
for x in $line; do
if [ $print_comma -eq 1 ]; then
echo -n ","
else
print_comma=1
fi
echo -n "$x"
done
echo
done
|
WOnder93/argon2
|
scripts/run-benchmark.sh
|
Shell
|
mit
| 788 |
#!/usr/bin/env bash
echo "Running core provision script..."
sudo apt-get update
# Install build tools
sudo apt-get install -y make g++ libcairo2-dev libav-tools nfs-common ssh build-essential libssl-dev htop python-software-properties python
|
weberamaral/ng4-developer
|
vagrant/provision/core.sh
|
Shell
|
mit
| 244 |
# If example.rc.lua is missing, make a default one.
rc_lua=$PWD/example.rc.lua
test -f $rc_lua || /bin/cp /etc/xdg/awesome/rc.lua $rc_lua
# Just in case we're not running from /usr/bin
awesome=`which awesome`
xephyr=`which Xephyr`
pidof=`which pidof`
test -x $awesome || { echo "Awesome executable not found. Please install Awesome"; exit 1; }
test -x $xephyr || { echo "Xephyr executable not found. Please install Xephyr"; exit 1; }
function usage()
{
cat <<USAGE
awesome_test start|stop|restart|run
start Start nested Awesome in Xephyr
stop Stop Xephyr
restart Reload nested Awesome configuration
run Run command in nested Awesome
USAGE
exit 0
}
# WARNING: the following two functions expect that you only run one instance
# of Xephyr and the last launched Awesome runs in it
function awesome_pid()
{
$pidof awesome | cut -d\ -f1
}
function xephyr_pid()
{
$pidof Xephyr | cut -d\ -f1
}
[ $# -lt 1 ] && usage
case "$1" in
start)
$xephyr -ac -br -noreset -screen 800x600 :1 &
sleep 1
DISPLAY=:1.0 $awesome -c $rc_lua &
sleep 1
echo "Awesome ready for tests. PID is $(awesome_pid)"
;;
stop)
echo -n "Stopping Nested Awesome... "
if [ -z $(xephyr_pid) ]; then
echo "Not running: not stopped :)"
exit 0
else
kill $(xephyr_pid)
echo "Done."
fi
;;
restart)
echo -n "Restarting Awesome... "
kill -s SIGHUP $(awesome_pid)
;;
run)
shift
DISPLAY=:1.0 "$@" &
;;
*)
usage
;;
esac
|
dr-slump/bajawa
|
conf/desktop-manager/awesome/tyrannical/utils/xephyr.sh
|
Shell
|
mit
| 1,522 |
#!/bin/bash
# run parsec on a target host, results are generated under log/$BENCH-$TAG-$TIMESTAMP.runtime
# pthreads: whole benchmark suite; <name>: single benchmark (e.g. dedup)
BENCH="fluidanimate"
#BENCH="pthread"
# output file name tag, change it for each experiment
TAG="x1-3k-ple-test"
# default parameters
ESX="esx"
HOST="ubuntu1"
NR_THREADS=6
LOOP=3
PARSEC_PATH="~/parsec-2.1-inplace"
# -k: run in place, do not decompress from the input.tar, first run does not work with -k
# -c: build type (do not change)
CMD="cd $PARSEC_PATH; source env.sh; parsecmgmt -k -a run -p $BENCH -i native -n $NR_THREADS -c gcc-pthreads.pre"
# auto generated parameters
TIMESTAMP=`date +%m%d%y-%H%M`
LOG="$BENCH-$TAG-$TIMESTAMP.log"
# body
mkdir -p log
echo > log/$LOG
date | tee -a log/$LOG
for i in `seq 1 $LOOP`; do
# reset if you need sched-stat data
# ssh root@$ESX "sched-stats -r; sched-stats -s 1" | tee -a log/$LOG
# costop-3k
bash ./set-costop-3k.sh | tee -a log/$LOG
(time fab -H $HOST cmd:"$CMD") 2>&1 | tee -a log/$LOG
# costop-0
# bash ./set-costop-0.sh | tee -a log/$LOG
# (time fab -H $HOST cmd:"$CMD") 2>&1 | tee -a log/$LOG
# dump sched-stats
# ssh root@$ESX "vsi_traverse -o /vmfs/volumes/datastore1/log/$BENCH.log" | tee -a log/$LOG
done
date | tee -a log/$LOG
|
jnouyang/esx-parsec-bench
|
run-parsec.sh
|
Shell
|
mit
| 1,302 |
#!/bin/bash
# This script displays the message "This script will exit with
# with a 0 exit status" and then does in fact exit with a 0 exit
# status. Confirm the same by using the "$?" command in the CLI
# after running this script. "0: command not found" should be
# the result.
echo "This script will exit with a 0 exit status."
exit 0
|
iPar/ShellScripting
|
Exercises/02-ExitStatuses-ReturnCodes/01-exitStatus0.sh
|
Shell
|
mit
| 341 |
#!/bin/sh
set -e
export VSINSTALLDIR="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community"
export VisualStudioVersion="16.0"
docfx ./docs/docfx.json
SOURCE_DIR=$PWD
TEMP_REPO_DIR=$PWD/../my-project-gh-pages
echo "Removing temporary doc directory $TEMP_REPO_DIR"
rm -rf $TEMP_REPO_DIR
mkdir $TEMP_REPO_DIR
echo "Cloning the repo with the gh-pages branch"
git clone https://github.com/markvincze/Stubbery --branch gh-pages $TEMP_REPO_DIR
echo "Clear repo directory"
cd $TEMP_REPO_DIR
git rm -r *
echo "Copy documentation into the repo"
cp -r $SOURCE_DIR/docs/_site/* .
if [ "$APPVEYOR_REPO_BRANCH" == "master" ]
then
echo "Push the new docs to the remote branch"
git add . -A
git commit --allow-empty -m "Update generated documentation"
git push origin gh-pages
else
echo "Not on master, skipping pushing docs"
fi
|
markvincze/Stubbery
|
releaseDocs.sh
|
Shell
|
mit
| 849 |
docker build -t neuralensemble/simulationx --no-cache .
|
NeuralEnsemble/neuralensemble-docker
|
simulationx/regenerate.sh
|
Shell
|
mit
| 56 |
#!/bin/bash
USER=hungpham2511
IMAGE=toppra-dep
VERSION=0.0.3
echo "Building docker image: $USER/$IMAGE:$VERSION"
docker build -t ${IMAGE} .
docker tag ${IMAGE} ${USER}/${IMAGE}:${VERSION}
docker tag ${IMAGE} ${USER}/${IMAGE}:latest
|
hungpham2511/toppra
|
dockerfiles/build.sh
|
Shell
|
mit
| 234 |
#!/bin/bash
# inspired by https://github.com/benjie/dotfiles
git pull
function setLink {
src="$1"
target="$2"
if [ -h "$target" ]
then
return;
fi
ln -s "$src" "$target"
}
function gitClone {
giturl="$1"
target="$2"
if [ -d "$target" ]
then
#TODO: test that it is a git copy?
return
fi
git clone "$giturl" "$target"
}
setLink "$PWD/ssh_config" ~/.ssh/config
setLink "$PWD/vimrc" ~/.vimrc
setLink "$PWD/bashrc" ~/.bashrc
setLink "$PWD/gitconfig" ~/.gitconfig
setLink "$PWD/ctags" ~/.ctags
setLink "$PWD/perltidyrc" ~/.perltidyrc
setLink "$PWD/dir_colors.solarized" ~/.dir_colors.solarized
setLink "$PWD/editorconfig" ~/.editorconfig
#vim plugins
mkdir -p ~/.vim/bundle
gitClone git://github.com/altercation/vim-colors-solarized.git ~/.vim/bundle/vim-colors-solarized
gitClone https://github.com/kchmck/vim-coffee-script.git ~/.vim/bundle/vim-coffee-script
gitClone git://github.com/digitaltoad/vim-jade.git ~/.vim/bundle/vim-jade
gitClone https://github.com/plasticboy/vim-markdown.git ~/.vim/bundle/vim-markdown
gitClone https://github.com/will133/vim-dirdiff.git ~/.vim/bundle/vim-dirdiff
gitClone https://github.com/majutsushi/tagbar.git ~/.vim/bundle/tagbar
gitClone https://github.com/kien/ctrlp.vim.git ~/.vim/bundle/ctrlp.vim
gitClone https://github.com/editorconfig/editorconfig-vim.git ~/.vim/bundle/editorconfig-vim
mkdir -p ~/.vim/plugin
setLink "$PWD/dotfiles/vim_tagbar" ~/.vim/plugin/tagbar.vim
mkdir -p ~/.vim/autoload
setLink "$PWD/vim_pathogen" ~/.vim/autoload/pathogen.vim
setLink "$PWD/vim_tagbar_autoload" ~/.vim/autoload/tagbar.vim
mkdir -p ~/.vim/syntax
setLink "$PWD/vim_tagbar_syntax" ~/.vim/syntax/tagbar.vim
# liquidprompt - best bash/zsh PS1 I've seen!
gitClone https://github.com/nojhan/liquidprompt.git ~/liquidprompt
echo all done
|
antonpiatek/dotfiles
|
install.sh
|
Shell
|
mit
| 1,972 |
#!/bin/bash
# This is used to clear up all the junk which gets left behind after slurm has run a job
rm worker_*
rm std*
|
dcf21/4most-4gp-scripts
|
src/lunarc/delete_junk.sh
|
Shell
|
mit
| 124 |
#!/usr/bin/env bash
##
# Deploy stack update to remote instance.
#
# @example
# # Deploy target defaults to the 'prod' remote instance.
# make stack-deploy
# # Or :
# cwt/extensions/remote/stack/deploy.sh
#
# # Deploy to the 'dev' remote instance.
# make stack-deploy 'dev'
# # Or :
# cwt/extensions/remote/stack/deploy.sh 'dev'
#
p_remote_id="$1"
if [[ -z "$p_remote_id" ]]; then
p_remote_id='prod'
fi
# TODO (wip) Detect containers that may need to be rebuilt ?
# Turn this into an abstract entry point ?
cwt/extensions/remote/remote/exec.sh "$p_remote_id" \
'git pull && cwt/instance/reinit.sh && cwt/instance/restart.sh'
|
Paulmicha/common-web-tools
|
cwt/extensions/remote/stack/deploy.sh
|
Shell
|
mit
| 649 |
#!/bin/sh -eux
# Add repo
cat <<EOT >> /etc/yum.repos.d/MariaDB.repo
# MariaDB 10.3 CentOS repository list - created 2018-09-18 11:39 UTC
# http://downloads.mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.3/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOT
# Install MariaDB
yum -y install MariaDB-server MariaDB-client
systemctl enable mariadb;
systemctl start mariadb;
|
robbaier/vagrant-boxes
|
centos/scripts/7/mariadb.sh
|
Shell
|
mit
| 451 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-146-1
#
# Security announcement date: 2015-02-06 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:52 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - krb5:1.8.3+dfsg-4squeeze9
#
# Last versions recommanded by security team:
# - krb5:1.8.3+dfsg-4squeeze11
#
# CVE List:
# - CVE-2014-5352
# - CVE-2014-9421
# - CVE-2014-9422
# - CVE-2014-9423
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade krb5=1.8.3+dfsg-4squeeze11 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2015/DLA-146-1.sh
|
Shell
|
mit
| 685 |
#!/bin/bash
## Stop and restart frontend from Git sources
docker stop web-dev
docker rm web-dev
cd /home/gossart/camomile-web-frontend
git pull
docker rmi klm8/camomile-web-frontend-dev
docker build -t klm8/camomile-web-frontend-dev .
docker run -d --restart=always -p 8080:8070 -e CAMOMILE_API=http://vmjoker.limsi.fr:32781 -e CAMOMILE_LOGIN=admin -e CAMOMILE_PASSWORD=p455w0rd --name web-dev klm8/camomile-web-frontend-dev
|
kLm8/scripts
|
ex.sh
|
Shell
|
mit
| 428 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:0120
#
# Security announcement date: 2013-01-08 06:42:24 UTC
# Script generation date: 2017-01-01 21:14:20 UTC
#
# Operating System: Red Hat 5
# Architecture: i386
#
# Vulnerable packages fix on version:
# - quota.i386:3.13-8.el5
# - quota-debuginfo.i386:3.13-8.el5
#
# Last versions recommanded by security team:
# - quota.i386:3.13-8.el5
# - quota-debuginfo.i386:3.13-8.el5
#
# CVE List:
# - CVE-2012-3417
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install quota.i386-3.13 -y
sudo yum install quota-debuginfo.i386-3.13 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/i386/2013/RHSA-2013:0120.sh
|
Shell
|
mit
| 704 |
#!/usr/bin/env bash
# Use python's argparse module in shell scripts
#
# The function `argparse` parses its arguments using
# argparse.ArgumentParser; the parser is defined in the function's
# stdin.
#
# Executing ``argparse.bash`` (as opposed to sourcing it) prints a
# script template.
#
# https://github.com/nhoffman/argparse-bash
# MIT License - Copyright (c) 2015 Noah Hoffman
argparse(){
argparser=$(mktemp 2>/dev/null || mktemp -t argparser)
cat > "$argparser" <<EOF
from __future__ import print_function
import sys
import argparse
import os
class MyArgumentParser(argparse.ArgumentParser):
def print_help(self, file=None):
"""Print help and exit with error"""
super(MyArgumentParser, self).print_help(file=file)
sys.exit(1)
parser = MyArgumentParser(prog=os.path.basename("$0"),
description="""$ARGPARSE_DESCRIPTION""")
EOF
# stdin to this function should contain the parser definition
cat >> "$argparser"
cat >> "$argparser" <<EOF
args = parser.parse_args()
for arg in [a for a in dir(args) if not a.startswith('_')]:
key = arg.upper()
value = getattr(args, arg, None)
if isinstance(value, bool) or value is None:
print('{0}="{1}";'.format(key, 'yes' if value else ''))
elif isinstance(value, list):
print('{0}=({1});'.format(key, ' '.join('"{0}"'.format(s) for s in value)))
else:
print('{0}="{1}";'.format(key, value))
EOF
# Define variables corresponding to the options if the args can be
# parsed without errors; otherwise, print the text of the error
# message.
if python "$argparser" "$@" &> /dev/null; then
eval $(python "$argparser" "$@")
retval=0
else
python "$argparser" "$@"
retval=1
fi
rm "$argparser"
return $retval
}
# print a script template when this script is executed
if [[ $0 == *argparse.bash ]]; then
cat <<FOO
#!/usr/bin/env bash
source \$(dirname \$0)/argparse.bash || exit 1
argparse "\$@" <<EOF || exit 1
parser.add_argument('infile')
parser.add_argument('-o', '--outfile')
EOF
echo "INFILE: \${INFILE}"
echo "OUTFILE: \${OUTFILE}"
FOO
fi
|
nhoffman/argparse-bash
|
argparse.bash
|
Shell
|
mit
| 2,158 |
#! /bin/sh
export KSROOT=/koolshare
source $KSROOT/scripts/base.sh
eval `dbus export webrecord_`
cp -rf /tmp/webrecord/init.d/* $KSROOT/init.d/
cp -rf /tmp/webrecord/scripts/* $KSROOT/scripts/
cp -rf /tmp/webrecord/webs/* $KSROOT/webs/
cp /tmp/webrecord/uninstall.sh $KSROOT/scripts/uninstall_webrecord.sh
chmod +x $KSROOT/scripts/webrecord_*
dbus set softcenter_module_webrecord_description=查看网址和搜索记录
dbus set softcenter_module_webrecord_install=1
dbus set softcenter_module_webrecord_name=webrecord
dbus set softcenter_module_webrecord_title="上网记录"
dbus set softcenter_module_webrecord_version=0.1
sleep 1
rm -rf /tmp/webrecord >/dev/null 2>&1
|
koolshare/ledesoft
|
webrecord/webrecord/install.sh
|
Shell
|
mit
| 677 |
#!/usr/bin/env bash
# Launches all components of the OpenOCR service
#
# How to run this script for generic docker
#
# https://github.com/tleyden/open-ocr/blob/master/README.md
#
# How to run this script for Orchard docker PAAS
#
# https://github.com/tleyden/open-ocr/wiki/Installation-on-Orchard
#
RABBITMQ_HOST=rabbitmq
RABBITMQ_PASS=aasderljcwei3jlbc93
HTTP_PORT=20080
OPENOCR_ACCESS_NETWORK="openocr" # public access via "bridge"
DOCKER=docker
export AMQP_URI=amqp://admin:${RABBITMQ_PASS}@${RABBITMQ_HOST}/
$DOCKER network create rabbitmq
$DOCKER network create openocr
$DOCKER run -d -p 5672:5672 -p 15672:15672 --name="rabbitmq" --net="rabbitmq" -e RABBITMQ_PASS=${RABBITMQ_PASS} tutum/rabbitmq
echo "Waiting 30s for rabbit MQ to startup .."
sleep 30 # workaround for startup race condition issue
$DOCKER run -d -p ${HTTP_PORT}:${HTTP_PORT} --name="openocr" --net="$OPENOCR_ACCESS_NETWORK" tleyden5iwx/open-ocr open-ocr-httpd -amqp_uri "${AMQP_URI}" -http_port ${HTTP_PORT}
$DOCKER network connect rabbitmq openocr
$DOCKER run -d --name="openocr-worker" --net="rabbitmq" tleyden5iwx/open-ocr open-ocr-worker -amqp_uri "${AMQP_URI}"
|
rori-dev/lunchbox
|
backend-play-akka-scala/scripts/docker-run-openocr.sh
|
Shell
|
mit
| 1,153 |
type direnv &>/dev/null \
&& eval "$(direnv hook bash)"
|
sax/dotfiles
|
bash_it/direnv.bash
|
Shell
|
mit
| 58 |
#!/bin/bash
export PATH=/usr/local/bin:/bin:/sbin:/usr/bin:/usr/sbin
MOD=$(awk '/DJANGO_SETTINGS_MODULE/{ gsub("\"|[.].*", "", $2); print $2}' manage.py)
export NEW_RELIC_CONFIG_FILE=/www/config/production/newrelic.ini
export NEW_RELIC_ENVIRONMENT=production
exec 2>&1
exec newrelic-admin run-program celery -A $MOD worker $*
|
ArabellaTech/aa-docker-tools
|
production/pycommon/celery-newrelic.sh
|
Shell
|
mit
| 331 |
#!/bin/bash
DATE_STAMP=` date +%d-%m-%Y-%T`
echo ${DATE_STAMP}
/home/pi/filebot/opt/share/filebot/bin/filebot.sh -rename /media/usbstick/Torrent_complete/* –format “/media/usbstick/Film_rename/{n}/Season {s}/{n} – {sxe} – {t}” –db thetvdb -non-strict
|
collegboi/Raspi-Media
|
tvrenamer.sh
|
Shell
|
mit
| 265 |
#!/bin/bash
#
# The Unix toolset used in the script is installed along with
# MSysGit/Git for Windows.
#
CURDIR=$(dirname "$0")
cd $CURDIR/..
cd lib
SYSNAME=$(uname -s)
if [[ "${SYSNAME:0:5}" == "MINGW" ]]; then
MSBUILD_PATH=$(reg.exe query \
"HKLM\\SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions\\12.0" \
//v MSBuildToolsPath \
| grep REG_SZ | sed 's/.*REG_SZ[[:space:]]\+//')
[[ -z "$MSBUILD_PATH" ]] && { echo "FAIL: MSBuild v12 not installed"; exit; }
"${MSBUILD_PATH}MSBuild.exe" *.sln
else
make
fi
|
mrts/ioc-cpp
|
scripts/build.sh
|
Shell
|
mit
| 514 |
#!/bin/bash
for conf in $@; do
sudo systemd-tmpfiles --create $conf 2>&1 | sed 's/^/ /'
test ${PIPESTATUS[1]} -ne 0 && exit 1
echo " VALID: $conf"
done
|
Undeterminant/config-etc
|
test/tmpfiles.sh
|
Shell
|
cc0-1.0
| 157 |
#!/bin/bash
# mvn clean install
# mkdir -p target/results
# mkdir -p target/result-logs
export VALUE_FACTORY_FACTORY="VF_PDB_PERSISTENT_CURRENT,VF_SCALA,VF_CLOJURE,VF_PDB_PERSISTENT_MEMOIZED_LAZY"
######
export AGGREGATED_SETTINGS="-jvmArgsPrepend -Xms4g -jvmArgsPrepend -Xmx4g -wi 10 -i 20 -f 1 -r 1 -gc true -rf csv -v NORMAL -foe true -bm avgt -p valueFactoryFactory=$VALUE_FACTORY_FACTORY -p sampleDataSelection=MATCH -p producer=PDB_INTEGER"
export SET_BENCHMARKS="nl.cwi.swat.jmh_dscg_benchmarks.JmhSetBenchmarks.(timeContainsKey|timeContainsKeyNotContained|timeInsert|timeInsertContained|timeRemoveKey|timeRemoveKeyNotContained|timeIteration|timeEqualsRealDuplicate|timeEqualsDeltaDuplicate)$"
export MAP_BENCHMARKS="nl.cwi.swat.jmh_dscg_benchmarks.JmhMapBenchmarks.(timeContainsKey|timeContainsKeyNotContained|timeInsert|timeInsertContained|timeRemoveKey|timeRemoveKeyNotContained|timeIteration|timeEntryIteration|timeEqualsRealDuplicate|timeEqualsDeltaDuplicate)$"
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $SET_BENCHMARKS $AGGREGATED_SETTINGS -p run=0 -rff ./target/results/results.JmhSetBenchmarks.run0.log # 1>./target/result-logs/results.std-console.JmhSetBenchmarks.run0.log 2>./target/result-logs/results.err-console.JmhSetBenchmarks.run0.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $SET_BENCHMARKS $AGGREGATED_SETTINGS -p run=1 -rff ./target/results/results.JmhSetBenchmarks.run1.log # 1>./target/result-logs/results.std-console.JmhSetBenchmarks.run1.log 2>./target/result-logs/results.err-console.JmhSetBenchmarks.run1.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $SET_BENCHMARKS $AGGREGATED_SETTINGS -p run=2 -rff ./target/results/results.JmhSetBenchmarks.run2.log # 1>./target/result-logs/results.std-console.JmhSetBenchmarks.run2.log 2>./target/result-logs/results.err-console.JmhSetBenchmarks.run2.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $SET_BENCHMARKS $AGGREGATED_SETTINGS -p run=3 -rff ./target/results/results.JmhSetBenchmarks.run3.log # 1>./target/result-logs/results.std-console.JmhSetBenchmarks.run3.log 2>./target/result-logs/results.err-console.JmhSetBenchmarks.run3.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $SET_BENCHMARKS $AGGREGATED_SETTINGS -p run=4 -rff ./target/results/results.JmhSetBenchmarks.run4.log # 1>./target/result-logs/results.std-console.JmhSetBenchmarks.run4.log 2>./target/result-logs/results.err-console.JmhSetBenchmarks.run4.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $MAP_BENCHMARKS $AGGREGATED_SETTINGS -p run=0 -rff ./target/results/results.JmhMapBenchmarks.run0.log # 1>./target/result-logs/results.std-console.JmhMapBenchmarks.run0.log 2>./target/result-logs/results.err-console.JmhMapBenchmarks.run0.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $MAP_BENCHMARKS $AGGREGATED_SETTINGS -p run=1 -rff ./target/results/results.JmhMapBenchmarks.run1.log # 1>./target/result-logs/results.std-console.JmhMapBenchmarks.run1.log 2>./target/result-logs/results.err-console.JmhMapBenchmarks.run1.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $MAP_BENCHMARKS $AGGREGATED_SETTINGS -p run=2 -rff ./target/results/results.JmhMapBenchmarks.run2.log # 1>./target/result-logs/results.std-console.JmhMapBenchmarks.run2.log 2>./target/result-logs/results.err-console.JmhMapBenchmarks.run2.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $MAP_BENCHMARKS $AGGREGATED_SETTINGS -p run=3 -rff ./target/results/results.JmhMapBenchmarks.run3.log # 1>./target/result-logs/results.std-console.JmhMapBenchmarks.run3.log 2>./target/result-logs/results.err-console.JmhMapBenchmarks.run3.log
LD_LIBRARY_PATH=~/lib/ java -jar target/benchmarks.jar $MAP_BENCHMARKS $AGGREGATED_SETTINGS -p run=4 -rff ./target/results/results.JmhMapBenchmarks.run4.log # 1>./target/result-logs/results.std-console.JmhMapBenchmarks.run4.log 2>./target/result-logs/results.err-console.JmhMapBenchmarks.run4.log
######
TIMESTAMP=`date +"%Y%m%d_%H%M"`
echo $TIMESTAMP > LAST_TIMESTAMP_MICROBENCHMARKS.txt
INPUT_FILES=target/results/results.Jmh*.log
RESULTS_FILE=target/results/results.all-$TIMESTAMP.log
RESULT_HEADER=`echo $INPUT_FILES | xargs -n 1 head -n 1 | head -n 1`
{
for f in $INPUT_FILES
do
tail -n +2 $f
done
} | cat <(echo $RESULT_HEADER) - > $RESULTS_FILE
STD_CONSOLE_LOG_FILES=target/result-logs/results.std-console.*.log
PERF_STAT_LOG_FILES=target/result-logs/results.perf-stat.*.log
# RESULTS_FILE_PERF_STAT=target/results/results.all-$TIMESTAMP.perf-stat.log
# PERF_HEADER=`echo $PERF_STAT_LOG_FILES | xargs -n 1 head -n 1 | head -n 1 | sed -e 's/^/benchmark,/'`
# {
# for f in $PERF_STAT_LOG_FILES
# do
# CURRENT_BENCHMARK=`echo "$f" | sed 's/.*\.time\([^.]*\)\(.*\)/\1/'`
# tail -n +2 $f | sed -e "s/^/$CURRENT_BENCHMARK,/"
# done
# } | cat <(echo $PERF_HEADER) - | xz -9 > $RESULTS_FILE_PERF_STAT.xz
# java -Xmx12G -XX:+UseCompressedOops -javaagent:`echo $(cd $(dirname ~); pwd)/$(basename ~)`/.m2/repository/com/google/memory-measurer/1.0-SNAPSHOT/memory-measurer-1.0-SNAPSHOT.jar -cp target/benchmarks.jar nl.cwi.swat.jmh_dscg_benchmarks.CalculateFootprints && mv map-sizes-and-statistics.csv target/map-sizes-and-statistics-32bit-$TIMESTAMP.csv
# java -Xmx12G -XX:-UseCompressedOops -javaagent:`echo $(cd $(dirname ~); pwd)/$(basename ~)`/.m2/repository/com/google/memory-measurer/1.0-SNAPSHOT/memory-measurer-1.0-SNAPSHOT.jar -cp target/benchmarks.jar nl.cwi.swat.jmh_dscg_benchmarks.CalculateFootprints && mv map-sizes-and-statistics.csv target/map-sizes-and-statistics-64bit-$TIMESTAMP.csv
# create empty placeholders
touch target/map-sizes-and-statistics-32bit-$TIMESTAMP.csv
touch target/map-sizes-and-statistics-64bit-$TIMESTAMP.csv
java -Xms4g -Xmx4g -XX:+UseCompressedOops -javaagent:`pwd`/lib/memory-measurer.jar -cp target/benchmarks.jar nl.cwi.swat.jmh_dscg_benchmarks.CalculateFootprints && mv map-sizes-and-statistics.csv target/map-sizes-and-statistics-32bit-$TIMESTAMP.csv
java -Xms4g -Xmx4g -XX:-UseCompressedOops -javaagent:`pwd`/lib/memory-measurer.jar -cp target/benchmarks.jar nl.cwi.swat.jmh_dscg_benchmarks.CalculateFootprints && mv map-sizes-and-statistics.csv target/map-sizes-and-statistics-64bit-$TIMESTAMP.csv
# clean temporary file (if cancelled)
rm -f map-sizes-and-statistics.csv
ARCHIVE_PATH=`pwd`/resources/r
ARCHIVE_NAME=$ARCHIVE_PATH/hamt-benchmark-results-$TIMESTAMP.tgz
echo "Current Working Directory: `pwd`"
echo "Archive Path: $ARCHIVE_PATH"
RESULTS_FILES=`pwd`/target/results/results.all-$TIMESTAMP*
FOOTPRINT_FILES=`pwd`/target/map-sizes-and-statistics-*.csv
cp $RESULTS_FILES $FOOTPRINT_FILES $ARCHIVE_PATH
(cd target && tar -czf $ARCHIVE_NAME results result-logs $RESULTS_FILES $FOOTPRINT_FILES)
|
msteindorfer/oopsla15-artifact
|
oopsla15-benchmarks/runMicrobenchmarks.sh
|
Shell
|
epl-1.0
| 6,725 |
# export AWS_ACCESS_KEY="Your-Access-Key"
# export AWS_SECRET_KEY="Your-Secret-Key"
today=`date +"%d-%m-%Y","%T"`
logfile="/awslog/ec2-access.log"
# Grab all Security Groups IDs for ALLOW action and export the IDs to a text file
sudo aws ec2 describe-security-groups --filters Name=tag:open-allports-time,Values=21-00 Name=tag:bash-profile,Values=wd --query SecurityGroups[].[GroupId] --output text > ~/tmp/allowall_wd_info.txt 2>&1
# Take list of changing security groups
for group_id in $(cat ~/tmp/allowall_wd_info.txt)
do
# Change rules in security group
sudo aws ec2 authorize-security-group-ingress --group-id $group_id --protocol all --port all --cidr 0.0.0.0/0
# Put info into log file
echo Attempt $today allow access to instances with attached group $group_id for all ports >> $logfile
done
|
STARTSPACE/aws-access-to-ec2-by-timetable
|
all/allow-wd/all-allow-wd-21.sh
|
Shell
|
gpl-2.0
| 807 |
# Test the vector and matrix addition methods
cleanup ()
{
rm -rf $TEMPDIR
rm -f a.out
true
}
fail ()
{
cleanup
exit 1
}
pass ()
{
cleanup
exit 0
}
include_path='-I '`echo $search_path | sed -e 's/:/ -I /g'`
if [ $? -ne 0 ] ; then fail ; fi
here=`pwd`
if [ $? -ne 0 ] ; then fail ; fi
TEMPDIR=/tmp/$$
mkdir -p $TEMPDIR
if [ $? -ne 0 ] ; then fail ; fi
cd $TEMPDIR
if [ $? -ne 0 ] ; then fail ; fi
cat << EOF > $TEMPDIR/test.cc
#line $[$LINENO+2] "$0"
#include <ann/matrix.h>
#include <iostream>
#include <math.h>
using namespace std;
bool
fcmp(float f1, float f2, float tolerance)
{
float diff = f1 -f2;
if ( fabs(diff) > tolerance) return false;
return true;
}
int
main(int argc, char **argv)
{
/* First test the vectors */
ann::vector v1(5);
v1[0]=-12.09; v1[1]=4.09; v1[2]=-1.098; v1[3]=9.093;v1[4]=43.09;
ann::vector v2(5);
v2[0]=-121.09; v2[1]=41.09; v2[2]=-21.098; v2[3]=29.093;v2[4]=243.09;
ann::vector v3 = v1 + v2;
assert ( fcmp( v3[0], v1[0] + v2[0],0.0001 ));
assert ( fcmp( v3[1], v1[1] + v2[1],0.0001 ));
assert ( fcmp( v3[2], v1[2] + v2[2],0.0001 ));
assert ( fcmp( v3[3], v1[3] + v2[3],0.0001 ));
assert ( fcmp( v3[4], v1[4] + v2[4],0.0001 ));
/* now test the matrices */
const float m100=-09.207;
const float m110=-909.207;
const float m200=97.22307;
const float m210=89.7;
ann::matrix m1(2,1);
m1.set(0,0,m100);
m1.set(1,0,m110);
ann::matrix m2(2,1);
m2.set(0,0,m200);
m2.set(1,0,m210);
ann::matrix m3(2,1);
m3.set(0,0,m200+ m100);
m3.set(1,0,m210 + m110);
ofstream ofs1("$TEMPDIR/foo1");
ofs1 << m3;
m1+=m2;
ofstream ofs2("$TEMPDIR/foo2");
ofs2 << m1 ;
return 0;
}
EOF
if [ $? -ne 0 ] ; then fail ; fi
$CXX $TEMPDIR/test.cc $include_path -L$builddir -lann
if [ $? -ne 0 ] ; then fail ; fi
LD_LIBRARY_PATH=$builddir ./a.out
if [ $? -ne 0 ] ; then fail ; fi
diff $TEMPDIR/foo1 $TEMPDIR/foo2
if [ $? -ne 0 ] ; then fail ; fi
pass
|
chrisoei/Libann
|
test/00/t0013a.sh
|
Shell
|
gpl-2.0
| 1,988 |
#!/bin/bash
# programs with acyclic state spaces
nidhuggc -- -sc stf.c &> log/stf.log
nidhuggc -- -sc stf_bug.c &> log/stf_bug.log
nidhuggc -- -sc spin08.c &> log/spin08.log
nidhuggc -- -sc fib.c &> log/fib.log
nidhuggc -- -sc fib_bug.c &> log/fib_bug.log
nidhuggc -- -sc ccnf9.c &> log/ccnf9.log
nidhuggc -- -sc ccnf17.c &> log/ccnf17.log
nidhuggc -- -sc ccnf19.c &> log/ccnf19.log
nidhuggc -- -sc ssb.c &> log/ssb.log
nidhuggc -- -sc ssb1.c &> log/ssb1.log
nidhuggc -- -sc ssb3.c &> log/ssb3.log
nidhuggc -- -sc ssb4.c &> log/ssb4.log
nidhuggc -- -sc ssb8.c &> log/ssb8.log
# programs with non-acyclic state spaces
nidhuggc -- -sc szymanski.c &> log/szymanski.log
nidhuggc -unroll=10 -- -sc dekker.c &> log/dekker.log
nidhuggc -unroll=10 -- -sc lamport.c &> log/lamport.log
nidhuggc -unroll=10 -- -sc peterson.c &> log/peterson.log
nidhuggc -unroll=10 -- -sc pgsql.c &> log/pgsql.log
nidhuggc -unroll=10 -- -sc rwlock.c &> log/rwlock.log
nidhuggc -unroll=2 -- -sc rwlock2.c &> log/rwlock2.log
nidhuggc -unroll=5 -- -sc prodcons.c &> log/prodcons.log
nidhuggc -unroll=5 -- -sc prodcons2.c &> log/prodcons2.log
|
marcelosousa/poet
|
benchmarks/concur15/nidhugg/run.sh
|
Shell
|
gpl-2.0
| 1,113 |
#!/bin/bash
# initialize
#cwd=$(pwd)
#cuda_deb=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.5-18_amd64.deb
# download & install cuda
#wget -O cuda.deb $cuda_deb
ln -s /media/ogawak/ssd21/cuda_related/cuda-repo-ubuntu1404-7-0-local_7.0-28_amd64.deb cuda.deb
sudo dpkg -i cuda.deb
#sudo apt-get update
sudo apt-get install -y --force-yes cuda
sudo apt-get install -y --force-yes nvidia-prime
# setup vars
cp ~/.bashrc ~/.bashrc_bak
echo "export CUDA_HOME=/usr/local/cuda" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=\$CUDA_HOME/lib64" >> ~/.bashrc
echo "export PATH=\$CUDA_HOME/bin:\$PATH" >> ~/.bashrc
source ~/.bashrc
|
topazS50/setup_linux
|
install/cuda_install.sh
|
Shell
|
gpl-2.0
| 674 |
#!/bin/bash
if [ ! -e odoors ]
then
git clone https://github.com/apamment/odoors
fi
cd odoors
make
cd ..
gcc -c main.c -o main.o -I./odoors/ -I/usr/include/lua5.3
gcc -c interbbs2.c -o interbbs2.o
gcc -c inih/ini.c -o inih/ini.o
gcc -o GalacticDynasty main.o interbbs2.o inih/ini.o odoors/libs-`uname -s`/libODoors.a -lsqlite3 -llua5.3
|
apamment/galactic-dynasty
|
build.sh
|
Shell
|
gpl-2.0
| 345 |
#! /bin/bash
set -x
originalDirectory=$(pwd)
cd ..
wget https://github.com/wikimedia/mediawiki/archive/$MW.tar.gz
tar -zxf $MW.tar.gz
mv mediawiki-$MW phase3
cd phase3
git checkout $MW
composer install --prefer-source
mysql -e 'create database its_a_mw;'
php maintenance/install.php --dbtype $DBTYPE --dbuser root --dbname its_a_mw --dbpath $(pwd) --pass nyan TravisWiki admin
cd extensions
cp -r $originalDirectory GitHub
cd GitHub
composer install --prefer-source
cd ../..
echo 'require_once( __DIR__ . "/extensions/GitHub/GitHub.php" );' >> LocalSettings.php
echo 'error_reporting(E_ALL| E_STRICT);' >> LocalSettings.php
echo 'ini_set("display_errors", 1);' >> LocalSettings.php
echo '$wgShowExceptionDetails = true;' >> LocalSettings.php
echo '$wgDevelopmentWarnings = true;' >> LocalSettings.php
echo "putenv( 'MW_INSTALL_PATH=$(pwd)' );" >> LocalSettings.php
php maintenance/update.php --quick
|
BrotherPhil/GitHub
|
build/travis/before_script.sh
|
Shell
|
gpl-2.0
| 915 |
#!/bin/sh
# sdr101-java
# Simple software-defined radio for Java.
#
# (c) Karl-Martin Skontorp <[email protected]> ~ http://22pf.org/
# Licensed under the GNU GPL 2.0 or later.
java -Xprof=help -cp /home/kms/.m2/repository/junit/junit/4.4/junit-4.4.jar:target/classes:target/test-classes org.junit.runner.JUnitCore org.picofarad.sdr101.$1
|
kms/sdr101-java
|
profile-single-test.sh
|
Shell
|
gpl-2.0
| 340 |
#!/bin/sh
if [[ "$TERM" != *xterm* ]]; then
if [ ! -z "`which konsole`" ]; then
konsole -e "$0" "keep"
else
xterm -e "$0" "keep"
fi
exit
fi
mkdir -p .build5
cd .build5
if ! cmake -DWITH_QT5=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release ..; then
echo "CONFIGURATION FAILED :-("
exit
fi
echo "BUILDING..."
if ! make; then
echo "BUILD FAILED :-("
exit
fi
echo "... INSTALL:"
if sudo make install; then
echo
echo "... SUCCESS!"
fi
if [ "$1" = "keep" ]; then
echo "======================================"
echo "Press ENTER to quit ..."
read foo
fi
|
luebking/virtuality
|
buildQt5.sh
|
Shell
|
gpl-2.0
| 629 |
#!/bin/bash
# Copyright 2012 Johns Hopkins University (Author: Daniel Povey).
# 2013 Xiaohui Zhang
# 2013 Guoguo Chen
# 2014 Vimal Manohar
# Apache 2.0.
# This script trains neural network with pnorm nonlinearities.
# The difference with train_tanh.sh is that, instead of setting
# hidden_layer_size, you should set pnorm_input_dim and pnorm_output_dim.
# Also the P value (the order of the p-norm) should be set.
#
# [Vimal Manohar - Oct 2014]
# The script now supports realignment during training, which can be done by
# specifying realign_epochs.
# Begin configuration section.
cmd=run.pl
num_epochs=15 # Number of epochs during which we reduce
# the learning rate; number of iteration is worked out from this.
num_epochs_extra=5 # Number of epochs after we stop reducing
# the learning rate.
num_iters_final=20 # Maximum number of final iterations to give to the
# optimization over the validation set.
initial_learning_rate=0.04
final_learning_rate=0.004
bias_stddev=0.5
softmax_learning_rate_factor=1.0 # In the default setting keep the same learning rate.
combine_regularizer=1.0e-14 # Small regularizer so that parameters won't go crazy.
pnorm_input_dim=3000
pnorm_output_dim=300
p=2
minibatch_size=128 # by default use a smallish minibatch size for neural net
# training; this controls instability which would otherwise
# be a problem with multi-threaded update. Note: it also
# interacts with the "preconditioned" update which generally
# works better with larger minibatch size, so it's not
# completely cost free.
samples_per_iter=200000 # each iteration of training, see this many samples
# per job. This option is passed to get_egs.sh
num_jobs_nnet=16 # Number of neural net jobs to run in parallel. This option
# is passed to get_egs.sh.
get_egs_stage=0
shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples
# on each iter. You could set it to 0 or to a large value for complete
# randomization, but this would both consume memory and cause spikes in
# disk I/O. Smaller is easier on disk and memory but less random. It's
# not a huge deal though, as samples are anyway randomized right at the start.
add_layers_period=2 # by default, add new layers every 2 iterations.
num_hidden_layers=3
stage=-5
io_opts="--max-jobs-run 5" # for jobs with a lot of I/O, limits the number running at one time.
splice_width=4 # meaning +- 4 frames on each side for second LDA
randprune=4.0 # speeds up LDA.
alpha=4.0
max_change=10.0
mix_up=0 # Number of components to mix up to (should be > #tree leaves, if
# specified.)
num_threads=16
parallel_opts="--num-threads 16 --mem 1G" # by default we use 16 threads; this lets the queue know.
# note: parallel_opts doesn't automatically get adjusted if you adjust num-threads.
cleanup=true
egs_dir=
lda_opts=
lda_dim=
egs_opts=
transform_dir= # If supplied, overrides alidir
cmvn_opts= # will be passed to get_lda.sh and get_egs.sh, if supplied.
# only relevant for "raw" features, not lda.
feat_type= # Can be used to force "raw" features.
prior_subset_size=10000 # 10k samples per job, for computing priors. Should be
# more than enough.
align_cmd= # The cmd that is passed to steps/nnet2/align.sh
align_use_gpu= # Passed to use_gpu in steps/nnet2/align.sh [yes/no]
realign_epochs= # List of epochs, the beginning of which realignment is done
num_jobs_align=30 # Number of jobs for realignment
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 4 ]; then
echo "Usage: $0 [opts] <data> <lang> <ali-dir> <exp-dir>"
echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet"
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config file containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --num-epochs <#epochs|15> # Number of epochs of main training"
echo " # while reducing learning rate (determines #iterations, together"
echo " # with --samples-per-iter and --num-jobs-nnet)"
echo " --num-epochs-extra <#epochs-extra|5> # Number of extra epochs of training"
echo " # after learning rate fully reduced"
echo " --initial-learning-rate <initial-learning-rate|0.02> # Learning rate at start of training, e.g. 0.02 for small"
echo " # data, 0.01 for large data"
echo " --final-learning-rate <final-learning-rate|0.004> # Learning rate at end of training, e.g. 0.004 for small"
echo " # data, 0.001 for large data"
echo " --num-hidden-layers <#hidden-layers|2> # Number of hidden layers, e.g. 2 for 3 hours of data, 4 for 100hrs"
echo " --add-layers-period <#iters|2> # Number of iterations between adding hidden layers"
echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer,"
echo " # per context-dependent state. Try a number several times #states."
echo " --num-jobs-nnet <num-jobs|8> # Number of parallel jobs to use for main neural net"
echo " # training (will affect results as well as speed; try 8, 16)"
echo " # Note: if you increase this, you may want to also increase"
echo " # the learning rate."
echo " --num-threads <num-threads|16> # Number of parallel threads per job (will affect results"
echo " # as well as speed; may interact with batch size; if you increase"
echo " # this, you may want to decrease the batch size."
echo " --parallel-opts <opts|\"--num-threads 16 --mem 1G\"> # extra options to pass to e.g. queue.pl for processes that"
echo " # use multiple threads... "
echo " --io-opts <opts|\"--max-jobs-run 10\"> # Options given to e.g. queue.pl for jobs that do a lot of I/O."
echo " --minibatch-size <minibatch-size|128> # Size of minibatch to process (note: product with --num-threads"
echo " # should not get too large, e.g. >2k)."
echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per"
echo " # process."
echo " --splice-width <width|4> # Number of frames on each side to append for feature input"
echo " # (note: we splice processed, typically 40-dimensional frames"
echo " --lda-dim <dim|250> # Dimension to reduce spliced features to with LDA"
echo " --num-iters-final <#iters|20> # Number of final iterations to give to nnet-combine-fast to "
echo " # interpolate parameters (the weights are learned with a validation set)"
echo " --egs-opts <opts> # Extra options to pass to get_egs.sh"
echo " --lda-opts <opts> # Extra options to pass to get_lda.sh"
echo " --realign-epochs <list-of-epochs|\"\"> # A list of space-separated epoch indices the beginning of which"
echo " # realignment is to be done"
echo " --align-cmd (utils/run.pl|utils/queue.pl <queue opts>) # passed to align.sh"
echo " --align-use-gpu (yes/no) # specify is gpu is to be used for realignment"
echo " --num-jobs-align <#njobs|30> # Number of jobs to perform realignment"
echo " --stage <stage|-9> # Used to run a partially-completed training process from somewhere in"
echo " # the middle."
exit 1;
fi
data=$1
lang=$2
alidir=$3
dir=$4
if [ ! -z "$realign_epochs" ]; then
[ -z "$align_cmd" ] && echo "$0: realign_epochs specified but align_cmd not specified" && exit 1
[ -z "$align_use_gpu" ] && echo "$0: realign_epochs specified but align_use_gpu not specified" && exit 1
fi
# Check some files.
for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
# Set some variables.
num_leaves=`tree-info $alidir/tree 2>/dev/null | grep num-pdfs | awk '{print $2}'` || exit 1
[ -z $num_leaves ] && echo "\$num_leaves is unset" && exit 1
[ "$num_leaves" -eq "0" ] && echo "\$num_leaves is 0" && exit 1
nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir...
# in this dir we'll have just one job.
sdata=$data/split$nj
utils/split_data.sh $data $nj
mkdir -p $dir/log
echo $nj > $dir/num_jobs
cp $alidir/tree $dir
utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1;
cp $lang/phones.txt $dir || exit 1;
extra_opts=()
[ ! -z "$cmvn_opts" ] && extra_opts+=(--cmvn-opts "$cmvn_opts")
[ ! -z "$feat_type" ] && extra_opts+=(--feat-type $feat_type)
[ ! -z "$online_ivector_dir" ] && extra_opts+=(--online-ivector-dir $online_ivector_dir)
[ -z "$transform_dir" ] && transform_dir=$alidir
extra_opts+=(--transform-dir $transform_dir)
extra_opts+=(--splice-width $splice_width)
if [ $stage -le -4 ]; then
echo "$0: calling get_lda.sh"
steps/nnet2/get_lda.sh $lda_opts "${extra_opts[@]}" --cmd "$cmd" $data $lang $alidir $dir || exit 1;
fi
# these files will have been written by get_lda.sh
feat_dim=`cat $dir/feat_dim` || exit 1;
lda_dim=`cat $dir/lda_dim` || exit 1;
if [ $stage -le -3 ] && [ -z "$egs_dir" ]; then
echo "$0: calling get_egs.sh"
steps/nnet2/get_egs.sh $egs_opts "${extra_opts[@]}" \
--samples-per-iter $samples_per_iter \
--num-jobs-nnet $num_jobs_nnet --stage $get_egs_stage \
--cmd "$cmd" $egs_opts --io-opts "$io_opts" \
$data $lang $alidir $dir || exit 1;
fi
if [ -z $egs_dir ]; then
egs_dir=$dir/egs
fi
iters_per_epoch=`cat $egs_dir/iters_per_epoch` || exit 1;
! [ $num_jobs_nnet -eq `cat $egs_dir/num_jobs_nnet` ] && \
echo "$0: Warning: using --num-jobs-nnet=`cat $egs_dir/num_jobs_nnet` from $egs_dir"
num_jobs_nnet=`cat $egs_dir/num_jobs_nnet` || exit 1;
if ! [ $num_hidden_layers -ge 1 ]; then
echo "Invalid num-hidden-layers $num_hidden_layers"
exit 1
fi
if [ $stage -le -2 ]; then
echo "$0: initializing neural net";
lda_mat=$dir/lda.mat
ext_lda_dim=$lda_dim
ext_feat_dim=$feat_dim
stddev=`perl -e "print 1.0/sqrt($pnorm_input_dim);"`
cat >$dir/nnet.config <<EOF
SpliceComponent input-dim=$ext_feat_dim left-context=$splice_width right-context=$splice_width
FixedAffineComponent matrix=$lda_mat
AffineComponentPreconditioned input-dim=$ext_lda_dim output-dim=$pnorm_input_dim alpha=$alpha max-change=$max_change learning-rate=$initial_learning_rate param-stddev=$stddev bias-stddev=$bias_stddev
PnormComponent input-dim=$pnorm_input_dim output-dim=$pnorm_output_dim p=$p
NormalizeComponent dim=$pnorm_output_dim
AffineComponentPreconditioned input-dim=$pnorm_output_dim output-dim=$num_leaves alpha=$alpha max-change=$max_change learning-rate=$initial_learning_rate param-stddev=0 bias-stddev=0
SoftmaxComponent dim=$num_leaves
EOF
# to hidden.config it will write the part of the config corresponding to a
# single hidden layer; we need this to add new layers.
cat >$dir/hidden.config <<EOF
AffineComponentPreconditioned input-dim=$pnorm_output_dim output-dim=$pnorm_input_dim alpha=$alpha max-change=$max_change learning-rate=$initial_learning_rate param-stddev=$stddev bias-stddev=$bias_stddev
PnormComponent input-dim=$pnorm_input_dim output-dim=$pnorm_output_dim p=$p
NormalizeComponent dim=$pnorm_output_dim
EOF
$cmd $dir/log/nnet_init.log \
nnet-am-init $alidir/tree $lang/topo "nnet-init $dir/nnet.config -|" \
$dir/0.mdl || exit 1;
fi
if [ $stage -le -1 ]; then
echo "Training transition probabilities and setting priors"
$cmd $dir/log/train_trans.log \
nnet-train-transitions $dir/0.mdl "ark:gunzip -c $alidir/ali.*.gz|" $dir/0.mdl \
|| exit 1;
fi
num_iters_reduce=$[$num_epochs * $iters_per_epoch];
num_iters_extra=$[$num_epochs_extra * $iters_per_epoch];
num_iters=$[$num_iters_reduce+$num_iters_extra]
echo "$0: Will train for $num_epochs + $num_epochs_extra epochs, equalling "
echo "$0: $num_iters_reduce + $num_iters_extra = $num_iters iterations, "
echo "$0: (while reducing learning rate) + (with constant learning rate)."
# This is when we decide to mix up from: halfway between when we've finished
# adding the hidden layers and the end of training.
finish_add_layers_iter=$[$num_hidden_layers * $add_layers_period]
mix_up_iter=$[($num_iters + $finish_add_layers_iter)/2]
if [ $num_threads -eq 1 ]; then
train_suffix="-simple" # this enables us to use GPU code if
# we have just one thread.
if ! cuda-compiled; then
echo "$0: WARNING: you are running with one thread but you have not compiled"
echo " for CUDA. You may be running a setup optimized for GPUs. If you have"
echo " GPUs and have nvcc installed, go to src/ and do ./configure; make"
fi
else
train_suffix="-parallel --num-threads=$num_threads"
fi
x=0
for realign_epoch in $realign_epochs; do
realign_iter=`perl -e 'print int($ARGV[0] * $ARGV[1]);' $realign_epoch $iters_per_epoch`
realign_this_iter[$realign_iter]=$realign_epoch
done
cur_egs_dir=$egs_dir
while [ $x -lt $num_iters ]; do
if [ ! -z "${realign_this_iter[$x]}" ]; then
prev_egs_dir=$cur_egs_dir
cur_egs_dir=$dir/egs_${realign_this_iter[$x]}
fi
if [ $x -ge 0 ] && [ $stage -le $x ]; then
if [ ! -z "${realign_this_iter[$x]}" ]; then
epoch=${realign_this_iter[$x]}
echo "Getting average posterior for purposes of adjusting the priors."
# Note: this just uses CPUs, using a smallish subset of data.
rm $dir/post.*.vec 2>/dev/null
$cmd JOB=1:$num_jobs_nnet $dir/log/get_post.JOB.log \
nnet-subset-egs --n=$prior_subset_size ark:$prev_egs_dir/egs.JOB.0.ark ark:- \| \
nnet-compute-from-egs "nnet-to-raw-nnet $dir/$x.mdl -|" ark:- ark:- \| \
matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.JOB.vec || exit 1;
sleep 3; # make sure there is time for $dir/post.*.vec to appear.
$cmd $dir/log/vector_sum.log \
vector-sum $dir/post.*.vec $dir/post.vec || exit 1;
rm $dir/post.*.vec;
echo "Re-adjusting priors based on computed posteriors"
$cmd $dir/log/adjust_priors.$x.log \
nnet-adjust-priors $dir/$x.mdl $dir/post.vec $dir/$x.mdl || exit 1;
sleep 2
steps/nnet2/align.sh --nj $num_jobs_align --cmd "$align_cmd" --use-gpu $align_use_gpu \
--transform-dir "$transform_dir" \
--iter $x $data $lang $dir $dir/ali_$epoch || exit 1
steps/nnet2/relabel_egs.sh --cmd "$cmd" --iter $x $dir/ali_$epoch \
$prev_egs_dir $cur_egs_dir || exit 1
if $cleanup && [[ $prev_egs_dir =~ $dir/egs* ]]; then
steps/nnet2/remove_egs.sh $prev_egs_dir
fi
fi
# Set off jobs doing some diagnostics, in the background.
# Use the egs dir from the previous iteration for the diagnostics
$cmd $dir/log/compute_prob_valid.$x.log \
nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.$x.log \
nnet-compute-prob $dir/$x.mdl ark:$cur_egs_dir/train_diagnostic.egs &
if [ $x -gt 0 ] && [ ! -f $dir/log/mix_up.$[$x-1].log ]; then
$cmd $dir/log/progress.$x.log \
nnet-show-progress --use-gpu=no $dir/$[$x-1].mdl $dir/$x.mdl \
ark:$cur_egs_dir/train_diagnostic.egs '&&' \
nnet-am-info $dir/$x.mdl &
fi
echo "Training neural net (pass $x)"
if [ $x -gt 0 ] && \
[ $x -le $[($num_hidden_layers-1)*$add_layers_period] ] && \
[ $[($x-1) % $add_layers_period] -eq 0 ]; then
mdl="nnet-init --srand=$x $dir/hidden.config - | nnet-insert $dir/$x.mdl - - |"
else
mdl=$dir/$x.mdl
fi
$cmd $parallel_opts JOB=1:$num_jobs_nnet $dir/log/train.$x.JOB.log \
nnet-shuffle-egs --buffer-size=$shuffle_buffer_size --srand=$x \
ark:$cur_egs_dir/egs.JOB.$[$x%$iters_per_epoch].ark ark:- \| \
nnet-train$train_suffix \
--minibatch-size=$minibatch_size --srand=$x "$mdl" \
ark:- $dir/$[$x+1].JOB.mdl \
|| exit 1;
nnets_list=
for n in `seq 1 $num_jobs_nnet`; do
nnets_list="$nnets_list $dir/$[$x+1].$n.mdl"
done
learning_rate=`perl -e '($x,$n,$i,$f)=@ARGV; print ($x >= $n ? $f : $i*exp($x*log($f/$i)/$n));' $[$x+1] $num_iters_reduce $initial_learning_rate $final_learning_rate`;
softmax_learning_rate=`perl -e "print $learning_rate * $softmax_learning_rate_factor;"`;
nnet-am-info $dir/$[$x+1].1.mdl > $dir/foo 2>/dev/null || exit 1
nu=`cat $dir/foo | grep num-updatable-components | awk '{print $2}'`
na=`cat $dir/foo | grep -v Fixed | grep AffineComponent | wc -l`
# na is number of last updatable AffineComponent layer [one-based, counting only
# updatable components.]
lr_string="$learning_rate"
for n in `seq 2 $nu`; do
if [ $n -eq $na ] || [ $n -eq $[$na-1] ]; then lr=$softmax_learning_rate;
else lr=$learning_rate; fi
lr_string="$lr_string:$lr"
done
$cmd $dir/log/average.$x.log \
nnet-am-average $nnets_list - \| \
nnet-am-copy --learning-rates=$lr_string - $dir/$[$x+1].mdl || exit 1;
if [ "$mix_up" -gt 0 ] && [ $x -eq $mix_up_iter ]; then
# mix up.
echo Mixing up from $num_leaves to $mix_up components
$cmd $dir/log/mix_up.$x.log \
nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \
$dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1;
fi
rm $nnets_list
fi
x=$[$x+1]
done
# Now do combination.
# At the end, final.mdl will be a combination of the last e.g. 10 models.
nnets_list=()
if [ $num_iters_final -gt $num_iters_extra ]; then
echo "Setting num_iters_final=$num_iters_extra"
fi
start=$[$num_iters-$num_iters_final+1]
for x in `seq $start $num_iters`; do
idx=$[$x-$start]
if [ $x -gt $mix_up_iter ]; then
nnets_list[$idx]=$dir/$x.mdl # "nnet-am-copy --remove-dropout=true $dir/$x.mdl - |"
fi
done
if [ $stage -le $num_iters ]; then
echo "Doing final combination to produce final.mdl"
# Below, use --use-gpu=no to disable nnet-combine-fast from using a GPU, as
# if there are many models it can give out-of-memory error; set num-threads to 8
# to speed it up (this isn't ideal...)
this_num_threads=$num_threads
[ $this_num_threads -lt 8 ] && this_num_threads=8
num_egs=`nnet-copy-egs ark:$cur_egs_dir/combine.egs ark:/dev/null 2>&1 | tail -n 1 | awk '{print $NF}'`
mb=$[($num_egs+$this_num_threads-1)/$this_num_threads]
[ $mb -gt 512 ] && mb=512
# Setting --initial-model to a large value makes it initialize the combination
# with the average of all the models. It's important not to start with a
# single model, or, due to the invariance to scaling that these nonlinearities
# give us, we get zero diagonal entries in the fisher matrix that
# nnet-combine-fast uses for scaling, which after flooring and inversion, has
# the effect that the initial model chosen gets much higher learning rates
# than the others. This prevents the optimization from working well.
$cmd $parallel_opts $dir/log/combine.log \
nnet-combine-fast --initial-model=100000 --num-lbfgs-iters=40 --use-gpu=no \
--num-threads=$this_num_threads --regularizer=$combine_regularizer \
--verbose=3 --minibatch-size=$mb "${nnets_list[@]}" ark:$cur_egs_dir/combine.egs \
$dir/final.mdl || exit 1;
# Normalize stddev for affine or block affine layers that are followed by a
# pnorm layer and then a normalize layer.
$cmd $parallel_opts $dir/log/normalize.log \
nnet-normalize-stddev $dir/final.mdl $dir/final.mdl || exit 1;
# Compute the probability of the final, combined model with
# the same subset we used for the previous compute_probs, as the
# different subsets will lead to different probs.
$cmd $dir/log/compute_prob_valid.final.log \
nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.final.log \
nnet-compute-prob $dir/final.mdl ark:$cur_egs_dir/train_diagnostic.egs &
fi
if [ $stage -le $[$num_iters+1] ]; then
echo "Getting average posterior for purposes of adjusting the priors."
# Note: this just uses CPUs, using a smallish subset of data.
rm $dir/post.*.vec 2>/dev/null
$cmd JOB=1:$num_jobs_nnet $dir/log/get_post.JOB.log \
nnet-subset-egs --n=$prior_subset_size ark:$cur_egs_dir/egs.JOB.0.ark ark:- \| \
nnet-compute-from-egs "nnet-to-raw-nnet $dir/final.mdl -|" ark:- ark:- \| \
matrix-sum-rows ark:- ark:- \| vector-sum ark:- $dir/post.JOB.vec || exit 1;
sleep 3; # make sure there is time for $dir/post.*.vec to appear.
$cmd $dir/log/vector_sum.log \
vector-sum $dir/post.*.vec $dir/post.vec || exit 1;
rm $dir/post.*.vec;
echo "Re-adjusting priors based on computed posteriors"
$cmd $dir/log/adjust_priors.final.log \
nnet-adjust-priors $dir/final.mdl $dir/post.vec $dir/final.mdl || exit 1;
fi
sleep 2
echo Done
if $cleanup; then
echo Cleaning up data
if [[ $cur_egs_dir =~ $dir/egs* ]]; then
steps/nnet2/remove_egs.sh $cur_egs_dir
fi
echo Removing most of the models
for x in `seq 0 $num_iters`; do
if [ $[$x%100] -ne 0 ] && [ $x -lt $[$num_iters-$num_iters_final+1] ]; then
# delete all but every 100th model; don't delete the ones which combine to form the final model.
rm $dir/$x.mdl
fi
done
fi
|
michellemorales/OpenMM
|
kaldi/egs/wsj/s5/steps/nnet2/train_pnorm.sh
|
Shell
|
gpl-2.0
| 22,627 |
#! /bin/sh
export PYTHONPATH=./$PYTHONPATH
cd nessi/
python simcon.py
|
jehrensb/Nessi
|
nessi.sh
|
Shell
|
gpl-2.0
| 72 |
#!/bin/sh
# Copyright (C) 2004-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check for _DEPENDENCIES definition with conditional _LDADD.
# Report from Elena A. Vengerova.
. test-init.sh
cat >>configure.ac <<'EOF'
AM_CONDITIONAL([TWO], [test -n "$two"])
AC_OUTPUT
EOF
cat > Makefile.am <<'EOF'
OBJEXT=z
CC=false
AUTOMAKE_OPTIONS=no-dependencies
bin_PROGRAMS = test1 test2
if TWO
test1_LDADD = two.$(OBJEXT)
test2_LDADD = two.$(OBJEXT)
test2_DEPENDENCIES = $(test2_LDADD) somethingelse.a
else !TWO
test1_LDADD = one.$(OBJEXT)
test2_LDADD = three.$(OBJEXT)
endif !TWO
test1_DEPENDENCIES = $(test1_LDADD) somethingelse.a
.PHONY: dep-test1 dep-test2
dep-test1:
echo BEG: $(test1_DEPENDENCIES) :END
dep-test2:
echo BEG: $(test2_DEPENDENCIES) :END
EOF
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
run_make -O dep-test1
$FGREP 'BEG: one.z somethingelse.a :END' stdout
run_make -O dep-test2
$FGREP 'BEG: three.z :END' stdout
./configure two=2
run_make -O dep-test1
$FGREP 'BEG: two.z somethingelse.a :END' stdout
run_make -O dep-test2
$FGREP 'BEG: two.z somethingelse.a :END' stdout
:
|
pylam/automake
|
t/cond34.sh
|
Shell
|
gpl-2.0
| 1,713 |
#!/bin/bash
# Source this file to get DebugEcho and more
# $Revision$ $Author$ $Date$
##############################################################################
# Copyright (C) 2004 Kurt Schwehr
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
###############################################################################
######################################################################
# Debugging
######################################################################
declare -ri EXIT_FAILURE=1
declare -ri EXIT_SUCCESS=0
declare -ri TERSE=1
declare -ri TRACE=4
declare -ri VERBOSE=8
declare -ri BOMBASTIC=16
if [ -z "$VERBOSITY" ]; then
declare -i debugLevel=4
else
declare -i debugLevel=$VERBOSITY
fi
# Twisted way to get down to the fundamental script name.
tmp=${0##/*/}
tmp=${tmp%%.bash}
tmp=${tmp##*.}
tmp=${tmp##*/}
declare -r SCRIPT_NAME=$tmp
# $1 is the level to compare against debugLevel
# $2 is line number
# $3 is the string to echo to stdout.
DebugEcho()
{
declare -ir val=$1
if [ "$debugLevel" -ge "$val" ]; then
#echo $2
echo "${SCRIPT_NAME}.bash:$2: (`printf "%02d" $1`) $3"
fi
}
#DebugEcho $TERSE "Terse is on"
#DebugEcho $TRACE "Trace is on"
DebugEcho $VERBOSE $LINENO "Verbose is on"
DebugEcho $BOMBASTIC $LINENO "Bombastic is on"
DebugEcho $TERSE $LINENO "debugLevel = $debugLevel"
######################################################################
# Like perl's die command
######################################################################
die()
{
declare -ir line=$1
echo "ERROR: Command failed at line $line"
exit $EXIT_FAILURE
}
|
schwehr/density
|
debug.bash
|
Shell
|
gpl-2.0
| 2,356 |
#!/bin/bash
nodemon -e js,html,json,less,swig app.js
|
oliverdain/CodeTeacher
|
start.sh
|
Shell
|
gpl-2.0
| 55 |
#!/bin/bash
# http://www.support.code-red-tech.com/CodeRedWiki/HardwareDebugConnections
# Pin # CN2 # Designation
# 1 VDD_TARGET # VDD from application # ( = VDD, DNC when powered from Pi)
# 2 SWCLK # SWD clock # ( = JTCK / PA14)
# 3 GND # Ground # ( = GND)
# 4 SWDIO # SWD data input/output # ( = TMS / PA13)
# 5 NRST # RESET of target MCU # ( = NRST)
# 6 SWO # Reserved # ( = JTDO / PB3 (optional))
#sudo apt-get install autoconf
#sudo apt-get install libusb-1.0-0-dev
git clone https://github.com/texane/stlink
cd stlink
./autogen.sh
./configure
make
sudo make install
sudo cp 49-stlinkv2.rules /etc/udev/rules.d/
sudo udevadm control --reload-rules
# unplug + replug st-link v2 usb device
#For flashing with an ST-Link V2 or Fx-Discovery.
#st-flash --reset write build/leds.bin 0x08000000
#For debugging with an ST-Link V2 or Fx-Discovery.
#st-util
#In another terminal:
#arm-none-eabi-gdb
#(gdb) target extended-remote :4242
#(gdb) load build/leds.elf
#(gdb) r
|
ARMinARM/arminarm
|
src/update_stlink.sh
|
Shell
|
gpl-2.0
| 976 |
#!/bin/sh
#
# Copyright (c) 2009, 2010, 2012, 2013 David Aguilar
#
test_description='git-difftool
Testing basic diff tool invocation
'
. ./test-lib.sh
difftool_test_setup ()
{
test_config diff.tool test-tool &&
test_config difftool.test-tool.cmd 'cat "$LOCAL"' &&
test_config difftool.bogus-tool.cmd false
}
prompt_given ()
{
prompt="$1"
test "$prompt" = "Launch 'test-tool' [Y/n]? branch"
}
test_expect_success 'basic usage requires no repo' '
test_expect_code 129 git difftool -h >output &&
grep ^usage: output &&
# create a ceiling directory to prevent Git from finding a repo
mkdir -p not/repo &&
test_when_finished rm -r not &&
test_expect_code 129 \
env GIT_CEILING_DIRECTORIES="$(pwd)/not" \
git -C not/repo difftool -h >output &&
grep ^usage: output
'
# Create a file on master and change it on branch
test_expect_success 'setup' '
echo master >file &&
git add file &&
git commit -m "added file" &&
git checkout -b branch master &&
echo branch >file &&
git commit -a -m "branch changed file" &&
git checkout master
'
# Configure a custom difftool.<tool>.cmd and use it
test_expect_success 'custom commands' '
difftool_test_setup &&
test_config difftool.test-tool.cmd "cat \"\$REMOTE\"" &&
echo master >expect &&
git difftool --no-prompt branch >actual &&
test_cmp expect actual &&
test_config difftool.test-tool.cmd "cat \"\$LOCAL\"" &&
echo branch >expect &&
git difftool --no-prompt branch >actual &&
test_cmp expect actual
'
test_expect_success 'custom tool commands override built-ins' '
test_config difftool.vimdiff.cmd "cat \"\$REMOTE\"" &&
echo master >expect &&
git difftool --tool vimdiff --no-prompt branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool ignores bad --tool values' '
: >expect &&
test_must_fail \
git difftool --no-prompt --tool=bad-tool branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool forwards arguments to diff' '
difftool_test_setup &&
>for-diff &&
git add for-diff &&
echo changes>for-diff &&
git add for-diff &&
: >expect &&
git difftool --cached --no-prompt -- for-diff >actual &&
test_cmp expect actual &&
git reset -- for-diff &&
rm for-diff
'
test_expect_success 'difftool ignores exit code' '
test_config difftool.error.cmd false &&
git difftool -y -t error branch
'
test_expect_success 'difftool forwards exit code with --trust-exit-code' '
test_config difftool.error.cmd false &&
test_must_fail git difftool -y --trust-exit-code -t error branch
'
test_expect_success 'difftool forwards exit code with --trust-exit-code for built-ins' '
test_config difftool.vimdiff.path false &&
test_must_fail git difftool -y --trust-exit-code -t vimdiff branch
'
test_expect_success 'difftool honors difftool.trustExitCode = true' '
test_config difftool.error.cmd false &&
test_config difftool.trustExitCode true &&
test_must_fail git difftool -y -t error branch
'
test_expect_success 'difftool honors difftool.trustExitCode = false' '
test_config difftool.error.cmd false &&
test_config difftool.trustExitCode false &&
git difftool -y -t error branch
'
test_expect_success 'difftool ignores exit code with --no-trust-exit-code' '
test_config difftool.error.cmd false &&
test_config difftool.trustExitCode true &&
git difftool -y --no-trust-exit-code -t error branch
'
test_expect_success 'difftool stops on error with --trust-exit-code' '
test_when_finished "rm -f for-diff .git/fail-right-file" &&
test_when_finished "git reset -- for-diff" &&
write_script .git/fail-right-file <<-\EOF &&
echo "$2"
exit 1
EOF
>for-diff &&
git add for-diff &&
echo file >expect &&
test_must_fail git difftool -y --trust-exit-code \
--extcmd .git/fail-right-file branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool honors exit status if command not found' '
test_config difftool.nonexistent.cmd i-dont-exist &&
test_config difftool.trustExitCode false &&
test_must_fail git difftool -y -t nonexistent branch
'
test_expect_success 'difftool honors --gui' '
difftool_test_setup &&
test_config merge.tool bogus-tool &&
test_config diff.tool bogus-tool &&
test_config diff.guitool test-tool &&
echo branch >expect &&
git difftool --no-prompt --gui branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --gui last setting wins' '
difftool_test_setup &&
: >expect &&
git difftool --no-prompt --gui --no-gui >actual &&
test_cmp expect actual &&
test_config merge.tool bogus-tool &&
test_config diff.tool bogus-tool &&
test_config diff.guitool test-tool &&
echo branch >expect &&
git difftool --no-prompt --no-gui --gui branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --gui works without configured diff.guitool' '
difftool_test_setup &&
echo branch >expect &&
git difftool --no-prompt --gui branch >actual &&
test_cmp expect actual
'
# Specify the diff tool using $GIT_DIFF_TOOL
test_expect_success 'GIT_DIFF_TOOL variable' '
difftool_test_setup &&
git config --unset diff.tool &&
echo branch >expect &&
GIT_DIFF_TOOL=test-tool git difftool --no-prompt branch >actual &&
test_cmp expect actual
'
# Test the $GIT_*_TOOL variables and ensure
# that $GIT_DIFF_TOOL always wins unless --tool is specified
test_expect_success 'GIT_DIFF_TOOL overrides' '
difftool_test_setup &&
test_config diff.tool bogus-tool &&
test_config merge.tool bogus-tool &&
echo branch >expect &&
GIT_DIFF_TOOL=test-tool git difftool --no-prompt branch >actual &&
test_cmp expect actual &&
test_config diff.tool bogus-tool &&
test_config merge.tool bogus-tool &&
GIT_DIFF_TOOL=bogus-tool \
git difftool --no-prompt --tool=test-tool branch >actual &&
test_cmp expect actual
'
# Test that we don't have to pass --no-prompt to difftool
# when $GIT_DIFFTOOL_NO_PROMPT is true
test_expect_success 'GIT_DIFFTOOL_NO_PROMPT variable' '
difftool_test_setup &&
echo branch >expect &&
GIT_DIFFTOOL_NO_PROMPT=true git difftool branch >actual &&
test_cmp expect actual
'
# git-difftool supports the difftool.prompt variable.
# Test that GIT_DIFFTOOL_PROMPT can override difftool.prompt = false
test_expect_success 'GIT_DIFFTOOL_PROMPT variable' '
difftool_test_setup &&
test_config difftool.prompt false &&
echo >input &&
GIT_DIFFTOOL_PROMPT=true git difftool branch <input >output &&
prompt=$(tail -1 <output) &&
prompt_given "$prompt"
'
# Test that we don't have to pass --no-prompt when difftool.prompt is false
test_expect_success 'difftool.prompt config variable is false' '
difftool_test_setup &&
test_config difftool.prompt false &&
echo branch >expect &&
git difftool branch >actual &&
test_cmp expect actual
'
# Test that we don't have to pass --no-prompt when mergetool.prompt is false
test_expect_success 'difftool merge.prompt = false' '
difftool_test_setup &&
test_might_fail git config --unset difftool.prompt &&
test_config mergetool.prompt false &&
echo branch >expect &&
git difftool branch >actual &&
test_cmp expect actual
'
# Test that the -y flag can override difftool.prompt = true
test_expect_success 'difftool.prompt can overridden with -y' '
difftool_test_setup &&
test_config difftool.prompt true &&
echo branch >expect &&
git difftool -y branch >actual &&
test_cmp expect actual
'
# Test that the --prompt flag can override difftool.prompt = false
test_expect_success 'difftool.prompt can overridden with --prompt' '
difftool_test_setup &&
test_config difftool.prompt false &&
echo >input &&
git difftool --prompt branch <input >output &&
prompt=$(tail -1 <output) &&
prompt_given "$prompt"
'
# Test that the last flag passed on the command-line wins
test_expect_success 'difftool last flag wins' '
difftool_test_setup &&
echo branch >expect &&
git difftool --prompt --no-prompt branch >actual &&
test_cmp expect actual &&
echo >input &&
git difftool --no-prompt --prompt branch <input >output &&
prompt=$(tail -1 <output) &&
prompt_given "$prompt"
'
# git-difftool falls back to git-mergetool config variables
# so test that behavior here
test_expect_success 'difftool + mergetool config variables' '
test_config merge.tool test-tool &&
test_config mergetool.test-tool.cmd "cat \$LOCAL" &&
echo branch >expect &&
git difftool --no-prompt branch >actual &&
test_cmp expect actual &&
# set merge.tool to something bogus, diff.tool to test-tool
test_config merge.tool bogus-tool &&
test_config diff.tool test-tool &&
git difftool --no-prompt branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool.<tool>.path' '
test_config difftool.tkdiff.path echo &&
git difftool --tool=tkdiff --no-prompt branch >output &&
grep file output >grep-output &&
test_line_count = 1 grep-output
'
test_expect_success 'difftool --extcmd=cat' '
echo branch >expect &&
echo master >>expect &&
git difftool --no-prompt --extcmd=cat branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --extcmd cat' '
echo branch >expect &&
echo master >>expect &&
git difftool --no-prompt --extcmd=cat branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool -x cat' '
echo branch >expect &&
echo master >>expect &&
git difftool --no-prompt -x cat branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --extcmd echo arg1' '
echo file >expect &&
git difftool --no-prompt \
--extcmd sh\ -c\ \"echo\ \$1\" branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --extcmd cat arg1' '
echo master >expect &&
git difftool --no-prompt \
--extcmd sh\ -c\ \"cat\ \$1\" branch >actual &&
test_cmp expect actual
'
test_expect_success 'difftool --extcmd cat arg2' '
echo branch >expect &&
git difftool --no-prompt \
--extcmd sh\ -c\ \"cat\ \\\"\$2\\\"\" branch >actual &&
test_cmp expect actual
'
# Create a second file on master and a different version on branch
test_expect_success 'setup with 2 files different' '
echo m2 >file2 &&
git add file2 &&
git commit -m "added file2" &&
git checkout branch &&
echo br2 >file2 &&
git add file2 &&
git commit -a -m "branch changed file2" &&
git checkout master
'
test_expect_success 'say no to the first file' '
(echo n && echo) >input &&
git difftool -x cat branch <input >output &&
grep m2 output &&
grep br2 output &&
! grep master output &&
! grep branch output
'
test_expect_success 'say no to the second file' '
(echo && echo n) >input &&
git difftool -x cat branch <input >output &&
grep master output &&
grep branch output &&
! grep m2 output &&
! grep br2 output
'
test_expect_success 'ending prompt input with EOF' '
git difftool -x cat branch </dev/null >output &&
! grep master output &&
! grep branch output &&
! grep m2 output &&
! grep br2 output
'
test_expect_success 'difftool --tool-help' '
git difftool --tool-help >output &&
grep tool output
'
test_expect_success 'setup change in subdirectory' '
git checkout master &&
mkdir sub &&
echo master >sub/sub &&
git add sub/sub &&
git commit -m "added sub/sub" &&
git tag v1 &&
echo test >>file &&
echo test >>sub/sub &&
git add file sub/sub &&
git commit -m "modified both"
'
run_dir_diff_test () {
test_expect_success "$1 --no-symlinks" "
symlinks=--no-symlinks &&
$2
"
test_expect_success SYMLINKS "$1 --symlinks" "
symlinks=--symlinks &&
$2
"
}
run_dir_diff_test 'difftool -d' '
git difftool -d $symlinks --extcmd ls branch >output &&
grep sub output &&
grep file output
'
run_dir_diff_test 'difftool --dir-diff' '
git difftool --dir-diff $symlinks --extcmd ls branch >output &&
grep sub output &&
grep file output
'
run_dir_diff_test 'difftool --dir-diff ignores --prompt' '
git difftool --dir-diff $symlinks --prompt --extcmd ls branch >output &&
grep sub output &&
grep file output
'
run_dir_diff_test 'difftool --dir-diff branch from subdirectory' '
(
cd sub &&
git difftool --dir-diff $symlinks --extcmd ls branch >output &&
# "sub" must only exist in "right"
# "file" and "file2" must be listed in both "left" and "right"
grep sub output > sub-output &&
test_line_count = 1 sub-output &&
grep file"$" output >file-output &&
test_line_count = 2 file-output &&
grep file2 output >file2-output &&
test_line_count = 2 file2-output
)
'
run_dir_diff_test 'difftool --dir-diff v1 from subdirectory' '
(
cd sub &&
git difftool --dir-diff $symlinks --extcmd ls v1 >output &&
# "sub" and "file" exist in both v1 and HEAD.
# "file2" is unchanged.
grep sub output >sub-output &&
test_line_count = 2 sub-output &&
grep file output >file-output &&
test_line_count = 2 file-output &&
! grep file2 output
)
'
run_dir_diff_test 'difftool --dir-diff branch from subdirectory w/ pathspec' '
(
cd sub &&
git difftool --dir-diff $symlinks --extcmd ls branch -- .>output &&
# "sub" only exists in "right"
# "file" and "file2" must not be listed
grep sub output >sub-output &&
test_line_count = 1 sub-output &&
! grep file output
)
'
run_dir_diff_test 'difftool --dir-diff v1 from subdirectory w/ pathspec' '
(
cd sub &&
git difftool --dir-diff $symlinks --extcmd ls v1 -- .>output &&
# "sub" exists in v1 and HEAD
# "file" is filtered out by the pathspec
grep sub output >sub-output &&
test_line_count = 2 sub-output &&
! grep file output
)
'
run_dir_diff_test 'difftool --dir-diff from subdirectory with GIT_DIR set' '
(
GIT_DIR=$(pwd)/.git &&
export GIT_DIR &&
GIT_WORK_TREE=$(pwd) &&
export GIT_WORK_TREE &&
cd sub &&
git difftool --dir-diff $symlinks --extcmd ls \
branch -- sub >output &&
grep sub output &&
! grep file output
)
'
run_dir_diff_test 'difftool --dir-diff when worktree file is missing' '
test_when_finished git reset --hard &&
rm file2 &&
git difftool --dir-diff $symlinks --extcmd ls branch master >output &&
grep file2 output
'
run_dir_diff_test 'difftool --dir-diff with unmerged files' '
test_when_finished git reset --hard &&
test_config difftool.echo.cmd "echo ok" &&
git checkout -B conflict-a &&
git checkout -B conflict-b &&
git checkout conflict-a &&
echo a >>file &&
git add file &&
git commit -m conflict-a &&
git checkout conflict-b &&
echo b >>file &&
git add file &&
git commit -m conflict-b &&
git checkout master &&
git merge conflict-a &&
test_must_fail git merge conflict-b &&
cat >expect <<-EOF &&
ok
EOF
git difftool --dir-diff $symlinks -t echo >actual &&
test_cmp expect actual
'
write_script .git/CHECK_SYMLINKS <<\EOF
for f in file file2 sub/sub
do
echo "$f"
ls -ld "$2/$f" | sed -e 's/.* -> //'
done >actual
EOF
test_expect_success SYMLINKS 'difftool --dir-diff --symlink without unstaged changes' '
cat >expect <<-EOF &&
file
$PWD/file
file2
$PWD/file2
sub/sub
$PWD/sub/sub
EOF
git difftool --dir-diff --symlink \
--extcmd "./.git/CHECK_SYMLINKS" branch HEAD &&
test_cmp actual expect
'
write_script modify-right-file <<\EOF
echo "new content" >"$2/file"
EOF
run_dir_diff_test 'difftool --dir-diff syncs worktree with unstaged change' '
test_when_finished git reset --hard &&
echo "orig content" >file &&
git difftool -d $symlinks --extcmd "$PWD/modify-right-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
run_dir_diff_test 'difftool --dir-diff syncs worktree without unstaged change' '
test_when_finished git reset --hard &&
git difftool -d $symlinks --extcmd "$PWD/modify-right-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
write_script modify-file <<\EOF
echo "new content" >file
EOF
test_expect_success 'difftool --no-symlinks does not overwrite working tree file ' '
echo "orig content" >file &&
git difftool --dir-diff --no-symlinks --extcmd "$PWD/modify-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
write_script modify-both-files <<\EOF
echo "wt content" >file &&
echo "tmp content" >"$2/file" &&
echo "$2" >tmpdir
EOF
test_expect_success 'difftool --no-symlinks detects conflict ' '
(
TMPDIR=$TRASH_DIRECTORY &&
export TMPDIR &&
echo "orig content" >file &&
test_must_fail git difftool --dir-diff --no-symlinks --extcmd "$PWD/modify-both-files" branch &&
echo "wt content" >expect &&
test_cmp expect file &&
echo "tmp content" >expect &&
test_cmp expect "$(cat tmpdir)/file"
)
'
test_expect_success 'difftool properly honors gitlink and core.worktree' '
git submodule add ./. submod/ule &&
test_config -C submod/ule diff.tool checktrees &&
test_config -C submod/ule difftool.checktrees.cmd '\''
test -d "$LOCAL" && test -d "$REMOTE" && echo good
'\'' &&
(
cd submod/ule &&
echo good >expect &&
git difftool --tool=checktrees --dir-diff HEAD~ >actual &&
test_cmp expect actual
)
'
test_expect_success SYMLINKS 'difftool --dir-diff symlinked directories' '
git init dirlinks &&
(
cd dirlinks &&
git config diff.tool checktrees &&
git config difftool.checktrees.cmd "echo good" &&
mkdir foo &&
: >foo/bar &&
git add foo/bar &&
test_commit symlink-one &&
ln -s foo link &&
git add link &&
test_commit symlink-two &&
echo good >expect &&
git difftool --tool=checktrees --dir-diff HEAD~ >actual &&
test_cmp expect actual
)
'
test_done
|
pzhaoyang/git
|
t/t7800-difftool.sh
|
Shell
|
gpl-2.0
| 17,132 |
#!/bin/bash
mic=ihm
ngram_order=4 # this option when used, the rescoring binary makes an approximation
# to merge the states of the FST generated from RNNLM. e.g. if ngram-order = 4
# then any history that shares last 3 words would be merged into one state
stage=1
weight=0.5 # when we do lattice-rescoring, instead of replacing the lm-weights
# in the lattice with RNNLM weights, we usually do a linear combination of
# the 2 and the $weight variable indicates the weight for the RNNLM scores
. ./utils/parse_options.sh
. ./cmd.sh
. ./path.sh
set -e
dir=data/tensorflow_lstm
mkdir -p $dir
steps/tfrnnlm/check_tensorflow_installed.sh
if [ $stage -le 1 ]; then
local/tfrnnlm/rnnlm_data_prep.sh $dir
fi
mkdir -p $dir
if [ $stage -le 2 ]; then
# the following script uses TensorFlow. You could use tools/extras/install_tensorflow_py.sh to install it
$cuda_cmd $dir/train_rnnlm.log utils/parallel/limit_num_gpus.sh \
python steps/tfrnnlm/lstm.py --data-path=$dir --save-path=$dir/rnnlm --vocab-path=$dir/wordlist.rnn.final
fi
final_lm=ami_fsh.o3g.kn
LM=$final_lm.pr1-7
if [ $stage -le 3 ]; then
for decode_set in dev eval; do
basedir=exp/$mic/nnet3/tdnn_sp/
decode_dir=${basedir}/decode_${decode_set}
# Lattice rescoring
steps/lmrescore_rnnlm_lat.sh \
--cmd "$tfrnnlm_cmd --mem 16G" \
--rnnlm-ver tensorflow --weight $weight --max-ngram-order $ngram_order \
data/lang_$LM $dir \
data/$mic/${decode_set}_hires ${decode_dir} \
${decode_dir}.tfrnnlm.lat.${ngram_order}gram.$weight &
done
fi
wait
|
michellemorales/OpenMM
|
kaldi/egs/ami/s5/local/tfrnnlm/run_lstm.sh
|
Shell
|
gpl-2.0
| 1,578 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TAP support:
# - numbered and unnumbered test results can coexist
# - tests without explicit number get automatically numbered in the
# testsuite progress output on console
. test-init.sh
. tap-setup.sh
cat > all.test <<'END'
1..7
ok 1 foo
ok 2 - foo2
ok - bar
not ok baz # TODO
not ok 5 - asd # TODO
ok 6 # SKIP
ok zardoz
END
$MAKE check >stdout || { cat stdout; exit 1; }
cat stdout
count_test_results total=7 pass=4 fail=0 xpass=0 xfail=2 skip=1 error=0
grep '^PASS: all\.test 1 foo$' stdout
grep '^PASS: all\.test 2 - foo2$' stdout
grep '^PASS: all\.test 3 - bar$' stdout
grep '^XFAIL: all\.test 4 baz # TODO$' stdout
grep '^XFAIL: all\.test 5 - asd # TODO$' stdout
grep '^SKIP: all\.test 6 # SKIP$' stdout
grep '^PASS: all\.test 7 zardoz$' stdout
:
|
GavinSmith0123/automake-amplain
|
t/tap-with-and-without-number.sh
|
Shell
|
gpl-2.0
| 1,457 |
#!/bin/sh
sudo cp runlights /etc/init.d/
sudo update-rc.d runlights defaults
# uninstall
#sudo update-rc.d -f runlights remove
|
dmtaub/APA102_Pi
|
sysadmin/install.sh
|
Shell
|
gpl-2.0
| 129 |
#! /bin/bash
MYNAME=$(basename $0)
MYBASENAME=$(basename $0 .sh)
MYDIR=$(dirname $0)
STDOUT_FILE=ft_errors_stdout
VERBOSE=""
VERSION="1.0.0"
LOG_OPTION="--wait"
DEBUG_OPTION=""
CLUSTER_NAME="${MYBASENAME}_$$"
CLUSTER_ID=""
OPTION_INSTALL=""
PIP_CONTAINER_CREATE=$(which "pip-container-create")
CONTAINER_SERVER=""
PROVIDER_VERSION="5.6"
OPTION_VENDOR="percona"
# The IP of the node we added first and last. Empty if we did not.
FIRST_ADDED_NODE=""
LAST_ADDED_NODE=""
export SSH="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet"
cd $MYDIR
source ./include.sh
#
# Prints usage information and exits.
#
function printHelpAndExit()
{
cat << EOF
Usage:
$MYNAME [OPTION]... [TESTNAME]
$MYNAME - Test script for s9s to check Galera clusters.
-h, --help Print this help and exit.
--verbose Print more messages.
--log Print the logs while waiting for the job to be ended.
--server=SERVER The name of the server that will hold the containers.
--print-commands Do not print unit test info, print the executed commands.
--install Just install the cluster and exit.
--reset-config Remove and re-generate the ~/.s9s directory.
--vendor=STRING Use the given Galera vendor.
--provider-version=STRING The SQL server provider version.
--leave-nodes Do not destroy the nodes at exit.
SUPPORTED TESTS
o testCreateCluster Creates a cluster.
o testCreateClusterFail1 Fails to create a cluster by re-using nodes.
o testCreateClusterDupl1 Creates cluster with duplicate name.
o testRemoveClusterFail Fails to remove cluster 0.
EXAMPLE
./$MYNAME --print-commands --server=core1 --reset-config --install
EOF
exit 1
}
ARGS=$(\
getopt -o h \
-l "help,verbose,log,server:,print-commands,install,reset-config,\
provider-version:,vendor:,leave-nodes" \
-- "$@")
if [ $? -ne 0 ]; then
exit 6
fi
eval set -- "$ARGS"
while true; do
case "$1" in
-h|--help)
shift
printHelpAndExit
;;
--verbose)
shift
VERBOSE="true"
;;
--log)
shift
LOG_OPTION="--log"
DEBUG_OPTION="--debug"
;;
--server)
shift
CONTAINER_SERVER="$1"
shift
;;
--print-commands)
shift
DONT_PRINT_TEST_MESSAGES="true"
PRINT_COMMANDS="true"
;;
--install)
shift
OPTION_INSTALL="--install"
;;
--reset-config)
shift
OPTION_RESET_CONFIG="true"
;;
--provider-version)
shift
PROVIDER_VERSION="$1"
shift
;;
--vendor)
shift
OPTION_VENDOR="$1"
shift
;;
--leave-nodes)
shift
OPTION_LEAVE_NODES="true"
;;
--)
shift
break
;;
esac
done
#
# This test will allocate a few nodes and install a new cluster.
#
function testCreateCluster()
{
local node1="ft_galera_new_001"
local node2="ft_galera_new_002"
local nodes
local nodeName
local exitCode
local command_line
#
# Creating a Galera cluster.
#
print_title "Creating a Galera Cluster by Re-using the Nodes"
cat <<EOF
In this test we first create a cluster, then immediately we drop the cluster
and so make the nodes available for creating a new cluster on them. After
these we create a new cluster re-using the nodes and so drive the controller
into a situation when it needs to uninstall the software the first cluster
creation installed.
EOF
begin_verbatim
echo "Creating node #0"
nodeName=$(create_node --autodestroy $node1)
nodes+="$nodeName;"
FIRST_ADDED_NODE=$nodeName
echo "Creating node #1"
nodeName=$(create_node --autodestroy $node2)
nodes+="$nodeName;"
LAST_ADDED_NODE="$nodeName"
mys9s cluster \
--create \
--cluster-type=galera \
--nodes="$nodes" \
--vendor="$OPTION_VENDOR" \
--cluster-name="$CLUSTER_NAME" \
--provider-version=$PROVIDER_VERSION \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
CLUSTER_ID=$(find_cluster_id $CLUSTER_NAME)
if [ "$CLUSTER_ID" -gt 0 ]; then
success " o Cluster ID is $CLUSTER_ID, ok"
else
failure "Cluster ID '$CLUSTER_ID' is invalid"
fi
wait_for_cluster_started "$CLUSTER_NAME"
if [ $? -ne 0 ]; then
end_verbatim
return 1
fi
#
# Dropping the cluster we just created.
#
print_subtitle "Dropping the Galera Cluster"
mys9s cluster --drop \
--cluster-id=$CLUSTER_ID \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
# Need to stop the daemon...
command_line="sudo killall -KILL mysql mysqld_safe mysqld"
echo "$FIRST_ADDED_NODE@$USER# $command_line"
$SSH $FIRST_ADDED_NODE -- "$command_line"
echo "$LAST_ADDED_NODE@$USER# $command_line"
$SSH $LAST_ADDED_NODE -- "$command_line"
#
# Creating the a new cluster using the same name, the same nodes.
#
print_subtitle "Creating a Cluster Using the Same Nodes"
mys9s cluster \
--create \
--cluster-type=galera \
--nodes="$nodes" \
--vendor="$OPTION_VENDOR" \
--cluster-name="$CLUSTER_NAME" \
--provider-version=$PROVIDER_VERSION \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
CLUSTER_ID=$(find_cluster_id $CLUSTER_NAME)
if [ "$CLUSTER_ID" -gt 0 ]; then
success " o Cluster ID is $CLUSTER_ID, ok"
else
failure "Cluster ID '$CLUSTER_ID' is invalid"
fi
wait_for_cluster_started "$CLUSTER_NAME"
end_verbatim
}
function testCreateClusterFail1()
{
local exitCode
#
#
#
print_title "Creating Cluster with Failure"
cat <<EOF
This test will try to create a MySQL replication cluster by re-using a node
that is part of an already existing cluster. This should fail, no way to
co-locate MySQL servers on the same computer.
EOF
begin_verbatim
mys9s cluster \
--create \
--cluster-type=mysqlreplication \
--nodes="$LAST_ADDED_NODE" \
--vendor="percona" \
--provider-version="5.6" \
$LOG_OPTION \
$DEBUG_OPTION
exitCode=$?
if [ $exitCode -eq 0 ]; then
failure "Re-using node in a new cluster should have failed."
else
success " o Cluster that re-using a node failed, ok."
fi
end_verbatim
}
function testCreateClusterDupl1()
{
local node1="ft_galera_new_011"
local newClusterName="${CLUSTER_NAME}~1"
local newClusterId
local nodes
local nodeName
local exitCode
print_title "Creating a Galera Cluster with Same Name"
cat <<EOF
This test will try to create a new cluster with the name that is already used
by a previously created cluster. In this case the cluster should be created,
but renamed to CLUSTERNAME~1 on the fly.
EOF
begin_verbatim
echo "Creating node #1"
nodeName=$(create_node --autodestroy $node1)
nodes+="$nodeName;"
#
# Creating a Galera cluster.
#
mys9s cluster \
--create \
--cluster-type=galera \
--nodes="$nodes" \
--vendor="$OPTION_VENDOR" \
--cluster-name="$CLUSTER_NAME" \
--provider-version=$PROVIDER_VERSION \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
mys9s cluster --list --long
mys9s node --list --long
newClusterId=$(find_cluster_id $newClusterName)
if [ "$newClusterId" -gt 0 ]; then
success " o Cluster ID is $newClusterId, ok"
else
failure "Cluster ID '$newClusterId' is invalid"
fi
wait_for_cluster_started "$newClusterName"
end_verbatim
}
function testCreateClusterDupl2()
{
local node1="ft_galera_new_021"
local newClusterName="${CLUSTER_NAME}~2"
local newClusterId
local nodes
local nodeName
local exitCode
print_title "Creating a Galera Cluster with Same Name"
cat <<EOF
Yet another cluster with the same name. This should be renamed to
CLUSTERNAME~2 of course.
EOF
begin_verbatim
echo "Creating node #2"
nodeName=$(create_node --autodestroy $node1)
nodes+="$nodeName;"
#
# Creating a Galera cluster.
#
mys9s cluster \
--create \
--cluster-type=galera \
--nodes="$nodes" \
--vendor="$OPTION_VENDOR" \
--cluster-name="$CLUSTER_NAME" \
--provider-version=$PROVIDER_VERSION \
$LOG_OPTION \
$DEBUG_OPTION
check_exit_code $?
mys9s cluster --list --long
mys9s node --list --long
newClusterId=$(find_cluster_id $newClusterName)
if [ "$newClusterId" -gt 0 ]; then
success " o Cluster ID is $newClusterId, ok"
else
failure "Cluster ID '$newClusterId' is invalid"
fi
wait_for_cluster_started "$newClusterName"
end_verbatim
}
function testRemoveClusterFail()
{
local exitCode
print_title "Removing Cluster 0"
cat <<EOF
This test will try to remove cluster 0 and checks that this actually fails.
EOF
begin_verbatim
mys9s cluster \
--drop \
--cluster-id=0 \
$LOG_OPTION \
$DEBUG_OPTION
exitCode=$?
if [ $exitCode -eq 0 ]; then
failure "Removing the cluster with ID 0 should have failed."
else
success " o Removing cluster 0 failed, ok."
fi
end_verbatim
}
#
# Running the requested tests.
#
startTests
reset_config
grant_user
if [ "$OPTION_INSTALL" ]; then
runFunctionalTest testCreateCluster
elif [ "$1" ]; then
for testName in $*; do
runFunctionalTest "$testName"
done
else
runFunctionalTest testCreateCluster
runFunctionalTest testCreateClusterFail1
runFunctionalTest testCreateClusterDupl1
runFunctionalTest testCreateClusterDupl2
runFunctionalTest testRemoveClusterFail
fi
endTests
|
severalnines/s9s-tools
|
tests/ft_galera_new.sh
|
Shell
|
gpl-2.0
| 10,218 |
touch /data/logger/kernel.log
chmod 666 /data/logger/kernel.log
cat /proc/kmsg >> /data/logger/kernel.log
|
laufersteppenwolf/android_kernel_lge_d680
|
mediatek/config/mt6577/save_kernel_log.sh
|
Shell
|
gpl-2.0
| 106 |
#!/bin/sh
test_description='apply same filename'
. ./test-lib.sh
test_expect_success 'setup' '
mkdir -p some/sub/dir &&
echo Hello > some/sub/dir/file &&
git add some/sub/dir/file &&
git commit -m initial &&
git tag initial
'
cat > patch << EOF
diff a/bla/blub/dir/file b/bla/blub/dir/file
--- a/bla/blub/dir/file
+++ b/bla/blub/dir/file
@@ -1,1 +1,1 @@
-Hello
+Bello
EOF
test_expect_success 'apply --directory -p (1)' '
git apply --directory=some/sub -p3 --index patch &&
test Bello = $(git show :some/sub/dir/file) &&
test Bello = $(cat some/sub/dir/file)
'
test_expect_success 'apply --directory -p (2) ' '
git reset --hard initial &&
git apply --directory=some/sub/ -p3 --index patch &&
test Bello = $(git show :some/sub/dir/file) &&
test Bello = $(cat some/sub/dir/file)
'
test_done
|
vmiklos/gsoc2008
|
t/t4128-apply-root.sh
|
Shell
|
gpl-2.0
| 813 |
#!/bin/bash
uname=$(uname)
if [[ $uname == "Darwin" ]]; then
date_seconds="-r"
else
date_seconds="-d@"
fi
function wowTokenCurrentPrice() {
jq . <(curl -s "https://kr.api.battle.net/data/wow/token/?namespace=dynamic-kr&locale=ko_KR&access_token=cr5na62qkm64vgk5c5w28nyg")
}
function telegramMsg() {
local tgCli="/snap/bin/telegram-cli"
local profile="$1"; shift
local peerId="$1"; shift
local msgText="$*"
if [[ -f $tgCli ]]; then
$tgCli -p $profile -D -W -e "msg $peerId \"$msgText\"" > /dev/null
else
echo $tgCli -p $profile -D -W -e "msg $peerId \"$msgText\"" > /dev/null
fi
}
function watchPrice() {
local sleepTime=$((60 * $1))
while true; do
korea=$(wowTokenCurrentPrice)
if [[ $korea && $koreaLast ]]; then
price=$(jq .price <(echo $korea))
price=$((price / 10000))
priceLast=$(jq .price <(echo $koreaLast))
priceLast=$((priceLast / 10000))
priceChange=$((price - priceLast))
if [[ $priceChange -ne 0 ]]; then
local msgPrice="$((price/10000)).$(((price%10000)/1000))"
[[ $koreaLast ]] && local msgPrice="$msgPrice $priceChange"
if [[ $((price/10000)) -lt 38 ]]; then
telegramMsg "floret" "Token_Low" $msgPrice &
else
telegramMsg "changmin" "Token" $msgPrice &
fi
echo "$(date ${date_seconds}$(($(jq .last_updated_timestamp <(echo $korea)) / 1000 ))) $msgPrice"
fi
fi
koreaLast=$korea
sleep $sleepTime
done
}
clear
watchPrice 1
|
mcm811/bin.osx
|
bash/watch_wow_token_price.sh
|
Shell
|
gpl-2.0
| 1,664 |
#!/bin/sh
# This script executes some teragens. It is convenient for putting
# data into your file system for some tests.
source ${HAMSTER_SCRIPTS_HOME}/lib/hamster-lib-hadoop-helper
cd ${HADOOP_HOME}
if Hamster_hadoop_is 1
then
terasortexamples="hadoop-examples-$HADOOP_VERSION.jar"
elif Hamster_hadoop_is 2
then
terasortexamples="share/hadoop/mapreduce/hadoop-mapreduce-examples-$HADOOP_VERSION.jar"
elif Hamster_hadoop_is 3
then
terasortexamples="share/hadoop/mapreduce/hadoop-mapreduce-examples-$HADOOP_VERSION.jar"
fi
command="bin/hadoop jar ${terasortexamples} teragen 50000000 teragen-1"
echo "Running $command" >&2
$command
command="bin/hadoop jar ${terasortexamples} teragen 50000000 teragen-2"
echo "Running $command" >&2
$command
command="bin/hadoop fs -ls"
echo "Running $command" >&2
$command
exit 0
|
DwyaneShi/Hamster
|
scripts/job/examples/hadoop-create-files-script.sh
|
Shell
|
gpl-2.0
| 833 |
#!/bin/bash
# NetHunter kernel for Asus Google Nexus 7 (2013) build script by jcadduono
################### BEFORE STARTING ################
#
# download a working toolchain and extract it somewhere and configure this
# file to point to the toolchain's root directory.
#
# once you've set up the config section how you like it, you can simply run
# DEVICE=[DEVICE] ./build.sh [VARIANT]
#
###################### CONFIG ######################
# root directory of NetHunter flo git repo (default is this script's location)
RDIR=$(pwd)
[ "$VER" ] ||
# version number
VER=$(cat "$RDIR/VERSION")
# directory containing cross-compile arm toolchain
TOOLCHAIN=$HOME/build/toolchain/gcc-linaro-4.9-2016.02-x86_64_arm-linux-gnueabihf
# amount of cpu threads to use in kernel make process
THREADS=5
############## SCARY NO-TOUCHY STUFF ###############
export ARCH=arm
export CROSS_COMPILE=$TOOLCHAIN/bin/arm-linux-gnueabihf-
[ "$1" ] && DEVICE=$1
[ "$DEVICE" ] || DEVICE=flo
[ "$TARGET" ] || TARGET=nethunter
DEFCONFIG=${TARGET}_${DEVICE}_defconfig
ABORT()
{
echo "Error: $*"
exit 1
}
[ -f "$RDIR/arch/$ARCH/configs/${DEFCONFIG}" ] ||
abort "Config $DEFCONFIG not found in $ARCH configs!"
export LOCALVERSION=$TARGET-$DEVICE-$VER
KDIR=$RDIR/build/arch/$ARCH/boot
CLEAN_BUILD()
{
echo "Cleaning build..."
cd "$RDIR"
rm -rf build
}
SETUP_BUILD()
{
echo "Creating kernel config for $LOCALVERSION..."
cd "$RDIR"
mkdir -p build
make -C "$RDIR" O=build "$DEFCONFIG" \
|| ABORT "Failed to set up build"
}
BUILD_KERNEL()
{
echo "Starting build for $LOCALVERSION..."
while ! make -C "$RDIR" O=build -j"$THREADS"; do
read -p "Build failed. Retry? " do_retry
case $do_retry in
Y|y) continue ;;
*) return 1 ;;
esac
done
}
CLEAN_BUILD && SETUP_BUILD && BUILD_KERNEL && echo "Finished building $LOCALVERSION!"
|
jcadduono/nethunter_kernel_mako_flo
|
build.sh
|
Shell
|
gpl-2.0
| 1,826 |
#!/bin/sh
[[ "z${USER_ID}" == "z" ]] && USER_ID=$(id -u)
[[ "z${GROUP_ID}" == "z" ]] && GROUP_ID=$(id -g)
if [[ ${USER_ID} -eq 0 ]];then
exec "$@"
else
RUN_USER=runuser
RUN_GROUP=$(getent group|awk -F: '$3=='${GROUP_ID}'{print $1}')
if [ "z${RUN_GROUP}" == "z" ];then
RUN_GROUP=rungroup
addgroup -g ${GROUP_ID} ${RUN_GROUP}
fi
adduser -s /bin/sh -u ${USER_ID} -G ${RUN_GROUP} -D -S ${RUN_USER}
# Give user write access to app files
chown ${RUN_USER}:${RUN_GROUP} -R .
exec sudo -E -u "${RUN_USER}" "$@"
fi
|
ughjobs/ughjobs
|
docker_entrypoint.sh
|
Shell
|
gpl-3.0
| 565 |
echo "INFO, test als_als"
i2c_add=44
thresh_low=20
thresh_bright=100
echo "INFO, Scanning for ALS"
found=`i2cdetect -y 9 | grep " $i2c_add "`
if [ -z "$found" ]; then
echo "FAIL, Device 0x$i2c_add NOT found"
exit 1
else
echo "PASS, Device 0x$i2c_add found"
fi
#set mode to ALS continuous
i2cset -y 9 0x$i2c_add 0x00 0xa0
echo "INFO, Reading Bright Light"
echo "INPT, Please set ambient light to bright (press any key to continue)..."
read keyboard
#read data
value=`i2cget -y 9 0x$i2c_add 0x03`
value_dec=$((0x${value:2}))
if [ "$value_dec" -gt "$thresh_bright" ]; then
echo "PASS, Bright Light detected, $value_dec"
else
echo "FAIL, Bright Light NOT detected, $value_dec"
fi
echo "INFO, Reading Low Light"
echo "INPT, Please set ambient light to low (press any key to continue)..."
read keyboard
#read data
value=`i2cget -y 9 0x$i2c_add 0x03`
value_dec=$((0x${value:2}))
if [ "$value_dec" -lt "$thresh_low" ]; then
echo "PASS, Low Light detected, $value_dec"
else
echo "FAIL, Low Light NOT detected, $value_dec"
fi
|
auspbro/CodeSnippets
|
Shell/Prj_QF7/als_als.sh
|
Shell
|
gpl-3.0
| 1,036 |
cp my.vimrc ~/.vimrc -f
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
git clone https://github.com/Valloric/YouCompleteMe ~/.vim/bundle/YouCompleteMe
cd ~/.vim/bundle/YouCompleteMe
git submodule update --init --recursive
python install.py --clang-completer
cd ~
|
Rjerk/snippets
|
vim/install.sh
|
Shell
|
gpl-3.0
| 301 |
#!/bin/bash
######################################
# Cache polulate tool for HLS stream #
# using curl #
# #
# [email protected] #
# #
# v.0.03 #
######################################
URL="http://94.156.44.142:8080/dvr/$1.smil"
PLAYLIST="$URL/playlist.m3u8?DVR"
IFS=$'\r\n' GLOBIGNORE='*' command eval "BITRATE_PLAYLISTS=($(curl -s -sH 'Accept-encoding: gzip' --compressed $PLAYLIST |grep -v '#'))"
echo ""
echo "${BITRATE_PLAYLISTS[0]}"
echo "${BITRATE_PLAYLISTS[1]}"
IFS=$'\r\n' GLOBIGNORE='*' command eval "CHUNKLIST1=($(curl -s -sH 'Accept-encoding: gzip' --compressed $URL/${BITRATE_PLAYLISTS[0]} |grep -v '#'))"
IFS=$'\r\n' GLOBIGNORE='*' command eval "CHUNKLIST2=($(curl -s -sH 'Accept-encoding: gzip' --compressed $URL/${BITRATE_PLAYLISTS[1]} |grep -v '#'))"
echo "$PLAYLIST"
echo "${CHUNKLIST1[0]}"
echo "${CHUNKLIST2[0]}"
echo "${CHUNKLIST1[-1]}"
echo "${CHUNKLIST2[-1]}"
FIRSTCH1=0
FIRSTCH2=0
LASTCH1=${#CHUNKLIST1[@]}
LASTCH2=${#CHUNKLIST2[@]}
for (( c=$FIRSTCH1; c<=$LASTCH1; c++ ))
do
CHUNKS1="$CHUNKS1 $URL/${CHUNKLIST1[$c]}"
done
for (( c=$FIRSTCH2; c<=$LASTCH2; c++ ))
do
CHUNKS2="$CHUNKS2 $URL/${CHUNKLIST2[$c]}"
done
curl -s $CHUNKS1 >/dev/null 2>&1
curl -s $CHUNKS2 >/dev/null 2>&1
|
jivco/wowza-nginx-cdn
|
hls-stress-tools/hls_populate_cache.sh
|
Shell
|
gpl-3.0
| 1,368 |
#!/bin/bash -e
. ../test-lib.sh 2>/dev/null || { echo "Must run in script directory!" ; exit 1 ; }
# Smoke test to see if all output formats are not crashing
run_bob show something-invalid
run_bob show root --format=yaml --indent=2
run_bob show root --format=json --no-indent
run_bob show root --format=flat
run_bob show "//*" --format=flat
run_bob show --sandbox root/tool root/sandbox --format=diff
# The diff format expects two packages. Otherwise it must fail
expect_fail run_bob show root --format=diff
# Normally common lines are suppressed in diff format. Can be enabled
# selectively, though.
run_bob show --format diff root/dep/ root/sandbox/ | expect_fail grep -q scriptLanguage
run_bob show --format diff --show-common root/dep/ root/sandbox/ | grep -q scriptLanguage
# Verify that empty properties are hidden by default but can be activated
run_bob show root --no-indent | python3 -c '
import sys, yaml
d = yaml.load(sys.stdin.read(), Loader=yaml.Loader)
assert "checkoutTools" not in d
'
run_bob show root --show-empty | python3 -c '
import sys, yaml
d = yaml.load(sys.stdin.read(), Loader=yaml.Loader)
assert d["checkoutTools"] == {}
'
# Verify that filtering works as expected
run_bob show root -f buildVars -f packageVars | python3 -c '
import sys, yaml
d = yaml.load(sys.stdin.read(), Loader=yaml.Loader)
assert set(d.keys()) == {"buildVars", "packageVars"}
assert set(d["buildVars"].keys()) == {"FOO", "BAR"}
assert set(d["packageVars"].keys()) == {"FOO", "BAR", "META"}
'
|
BobBuildTool/bob
|
test/show/run.sh
|
Shell
|
gpl-3.0
| 1,497 |
#!/bin/bash
################################################################################
# Author: Kais FRIKHA #
# Script: This script is dedicated for making snapshot of database : MySQL #
# Script: This script is dedicated for making snapshot of database : MariaDB #
# date version Author #
# ---------------------------------------------- #
# 15/03/2016 1.0.0 Kais FRIKHA #
################################################################################
db_host=""
db_login=""
db_password=""
date_backup=`date +%Y%m%d`
backup_dir=""
help_message() {
clear
echo " $0 [-h] [-s] <DB Server> [-u] <DB Login> [-p] <user password> [-d] <path of backup directory>"
echo " where:"
echo " -h : show help message"
echo " -s : put the db address"
echo " -u : put the login of db user"
echo " -p : put the password of user"
echo " -d : put the path of backup directory"
}
verif_arg (){
if [ ! -e "$4" ]; then
logging "ERR"
echo " This path does not exist!"
exit 2
fi
if [ ! -d "$4" ]; then
logging "ERR"
echo " This path does not represnt a directory!"
exit 2
fi
if [ ! -r "$4" ]; then
logging "ERR"
echo " This User ( ${USER} ) does not have permission to access to this directory $1"
exit 2
fi
}
logging (){
log_date=`date +%Y/%m/%d`
log_time=`date +%H:%M:%S`
case "$1" in
SRS)
echo ${log_date}" "${log_time}" Start backup of MySQL database"
;;
STS)
echo ${log_date}" "${log_time}" End backup of MySQL database"
;;
SRD)
echo ${log_date}" "${log_time}" Start saveguard of MySQL database"
;;
STD)
echo ${log_date}" "${log_time}" End saveguard of MySQL database"
;;
SRP)
echo ${log_date}" "${log_time}" Starting purge of backup directory"
;;
STP)
echo ${log_date}" "${log_time}" End purge of backup directory"
;;
*)
echo ${log_date}" "${log_time}" ERROR : Verfiy logs files and the script "
;;
esac
}
#######################################Main############################################
logging "SRS"
if [ $# == 0 ]; then
logging "ERR"
help_message
exit 1
fi
OPTIND=1
while getopts ':s:u:p:d:h' option
do
case "$option" in
s)
db_host=$OPTARG
;;
u)
db_login=$OPTARG
;;
p)
db_password=$OPTARG
;;
d)
backup_dir=$OPTARG
;;
:)
printf "missing argument for -%s\n" "$OPTARG" >&2
logging "ERR"
help_message
exit 1
;;
\?)
printf "illegal option: -%s\n" "$OPTARG" >&2
logging "ERR"
help_message
exit 1
;;
*)
logging "ERR"
help_message
exit 1
;;
esac
done
shift $((OPTIND-1))
verif_arg $db_host $db_login $db_password $backup_dir
logging "SRD"
DB_LIST=`echo 'show databases' | mysql --host=${db_host} --user=${db_login} --password=${db_password}`
for db in $DB_LIST; do
if [ $db != "information_schema" ] && [ $db != "mysql" ] && [ $db != "performance_schema" ] && [ $db != "root" ] && [ $db != "phpmyadmin" ] && [ $db != "Database" ] && [ $db != "admin" ]; then
mysqldump --host=${db_host} --user=${db_login} --password=${db_password} ${db} | gzip > ${backup_dir}/db_backup_${db}_${date_backup}.sql.gz
fi
done
logging "STD"
logging "SRP"
find ${backup_dir} -type f -name '*.gz' -mtime +30 -exec rm -f {} \ ; 2>/dev/null
logging "STP"
#######################################################################################
logging "STS"
exit 0
|
kaissfr/Tools
|
dbbackup.sh
|
Shell
|
gpl-3.0
| 3,533 |
#!/bin/bash
## Ref:
## https://stackoverflow.com/questions/14894605/shell-script-to-create-folder-daily-with-time-stamp-and-push-time-stamp-generate
## https://crunchify.com/shell-script-append-timestamp-to-file-name/
foldername=$(date +%Y%m%d)
echo $foldername
filename=$(date +%Y%m%d%H%M%S)
echo $filename
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
echo "Current Time : $current_time"
# /home/app/logs/$(date +%Y%m%d)/test$(date +%Y%m%d%H%M%S).log
current_path_without_full_path=${PWD##*/}
echo $current_path_without_full_path
CURRENT=`pwd`
BASENAME=`basename "$CURRENT"` ##basename external dependency
echo "$BASENAME"
## Ref:
## http://mywiki.wooledge.org/BashFAQ/028
## https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself
SCRIPT_PATH=$(dirname $(realpath -s $0)) ##realpath external dependency
echo $SCRIPT_PATH
SCRIPT_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
echo $SCRIPT_PATH
SCRIPT_PATH="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" ##SCRIPT_PATH="$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)"
echo $SCRIPT_PATH
ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
echo $ABSOLUTE_PATH
## Ref:
## https://tecadmin.net/how-to-extract-filename-extension-in-shell-script/
# fullfilename=$1
fullfilename=$0
filename=$(basename "$fullfilename")
fname="${filename%.*}"
ext="${filename##*.}"
echo "Input File: $fullfilename"
echo "Filename without Path: $filename"
echo "Filename without Extension: $fname"
echo "File Extension without Name: $ext"
## Ref:
## https://stackoverflow.com/questions/2352380/how-to-get-extension-of-a-file-in-shell-script
file_ext=$(echo $filename |awk -F . '{if (NF>1) {print $NF}}')
echo $file_ext
|
mangalbhaskar/linuxscripts
|
util-cmd.sh
|
Shell
|
gpl-3.0
| 1,748 |
#!/usr/bin/env bash
mkdir -p ./results/boscastle50m_72_u/
mkdir -p ./results/boscastle50m_72_u_flipped_lr/
# Boscastle test 1
# 50m resolution, 72 hours rainfall
../bin/HAIL-CAESAR.exe ./input_data/boscastle/boscastle_input_data/ boscastle_test_72hr_50m_u.params
../bin/HAIL-CAESAR.exe ./input_data/boscastle/boscastle_input_data/ boscastle_test_72hr_50m_u_flipped_lr.params
|
dvalters/HAIL-CAESAR
|
test/run_tests.sh
|
Shell
|
gpl-3.0
| 375 |
#!/bin/bash
## Copyright (C) 2016 D S Pavan Kumar
## dspavankumar [at] gmail [dot] com
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
nj=4
. cmd.sh
. path.sh
## Configurable directories
train=data/train
test=data/test
lang=data/lang
gmm=exp/tri2b
exp=exp/dnn_5b
## Split training data into train and cross-validation sets
[ -d ${train}_tr95 ] || utils/subset_data_dir_tr_cv.sh --cv-spk-percent 5 $train ${train}_tr95 ${train}_cv05
## Align data using GMM
for dset in cv05 tr95; do
[ -f ${gmm}_ali_$dset/ali.1.gz ] || steps/align_si.sh --nj $nj --cmd "$train_cmd" \
${train}_$dset $lang $gmm ${gmm}_ali_$dset
done
## Train
[ -f $exp/dnn.nnet.h5 ] || python3 steps_kt/train_LSTM.py ${train}_cv05 ${gmm}_ali_cv05 ${train}_tr95 ${gmm}_ali_tr95 $gmm $exp
## Make graph
[ -f $gmm/graph/HCLG.fst ] || utils/mkgraph.sh ${lang}_test_bg $gmm $gmm/graph
## Decode
[ -f $exp/decode/wer_11 ] || bash steps_kt/decode_seq.sh --nj $nj \
--add-deltas "true" --norm-vars "true" --splice-size "11" \
$test $gmm/graph $exp $exp/decode
#### Align
## [ -f ${exp}_ali ] || steps_kt/align_seq.sh --nj $nj --cmd "$train_cmd" \
## --add-deltas "true" --norm-vars "true" --splice-opts "11" \
## $train $lang $exp ${exp}_ali
|
dspavankumar/keras-kaldi
|
run_kt_LSTM.sh
|
Shell
|
gpl-3.0
| 1,863 |
#!/bin/sh
aclocal
autoconf
autoheader
automake --add-missing
if test x$NOCONFIGURE = x; then
./configure $*
fi
|
cykerway/ncmpcr
|
autogen.sh
|
Shell
|
gpl-3.0
| 117 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor Boston, MA 02110-1301, USA
name=nam_conus
fullname="12km Resolution CONUS NAM Forecast Model"
description='Data from <a href=\"http://www.ncep.noaa.gov/\">
National Centers for Environmental Prediction</a>
generated four times daily is contoured and converted to kml
for forecast hours 0-96 in 3 hour increments.'
levels="200 200 mb
300 300 mb
500 500 mb
700 700 mb
850 850 mb
925 925 mb
1000 1000 mb"
products200="wind Wind
t Temperature
rh Relative Humidity
hgt Heights"
products300="wind Wind
t Temperature
rh Relative Humidity
hgt Heights"
products500="wind Wind
t Temperature
rh Relative Humidity
hgt Heights
thk Thickness"
products700="wind Wind
t Temperature
rh Relative Humidity
hgt Heights
thk Thickness"
products850="wind Wind
t Temperature
rh Relative Humidity
hgt Heights
thk Thickness"
products925="wind Wind
t Temperature
rh Relative Humidity
hgt Heights
thk Thickness"
products1000="wind Wind
t Temperature
rh Relative Humidity
hgt Heights"
levels2="0 0 - none"
products2="pwat Perceptible Water
pmsl Mean Sea Level Pressure
pcp 3 Hour Total Precipitation
rain 3 Hour Total Rain Precipitation
snow 3 Hour Total Snow Precipitation
frez 3 Hour Total Freezing Precipitation
pellet 3 Hour Total Pellet Precipitation
pop Probability of Precipitation
popz Probability of Frozen Precipitation
popf Probability of Freezing Precipitation
tstm Probability of Thunderstorm
cape Convective Available Potential Energy"
times="latest Latest
00 00Z
06 06Z
12 12Z
18 18Z"
runs="latest Latest
00 00Z
06 06Z
12 12Z
18 18Z"
prodmap=" plot, HGHT, hgt, 20, 84 3, -H,
diffplot, HGHT, thk, 20, 84 3, , HGHT
plot, TMPK, t, 1, 84 3, ,
plot, RELH, rh, 10, 84 3, ,
windplot, UREL, wind, 5, 84 3, , VREL
plot, CAPE, cape, 200, 84 3, ,
plot, PWTR, pwat, 4, 84 3, -I,
plot, P03M, pcp, 4, 84 3, -I,
andplot, WXTS, snow, 4, 84 3, -I, P03M
andplot, WXTZ, frez, 4, 84 3, -I, P03M
andplot, WXTR, rain, 4, 84 3, -I, P03M
andplot, WXTP, pellet, 4, 84 3, -I, P03M
plot, PMSL, pmsl, 200, 84 3, -H,
plot, POPF03, popf, 10, 84 3, ,
plot, TSTM03, tstm, 10, 84 3, ,
plot, POP03, pop, 10, 84 3, ,
plot, POPZ03, popz, 10, 84 3, ,"
|
winkey/grib_contour
|
share/nam_conus.config.sh
|
Shell
|
gpl-3.0
| 3,297 |
#!/bin/bash
#
# AppOS(R) Build Script (ABS)
# Copyright (c) 2002-2008 Spliced Networks LLC
#
# initialize some basic values
SNVER_ABS="7.0.2"
SNVER_IBE="4.0.0.0"
SNVER_APPOS="4.0.0.0"
SNVER_COPYSTART="2002"
SNVER_COPYEND="2008"
export SNVER_ABS SNVER_IBE SNVER_APPOS SNVER_COPYSTART SNVER_COPYEND
# set the current directory to the build root
SNBLD=`pwd`
export SNBLD
echo ""
echo ""
echo "ABS version $SNVER_ABS"
echo "Copyright (c) $SNVER_COPYSTART-$SNVER_COPYEND Spliced Networks LLC"
echo ""
SNPLAT=`uname -a | grep x86_64`
export SNPLAT
if [ "$SNPLAT" ]; then
echo ""
echo "Building x86_64 platform, skipping x86"
echo ""
./build-x86_64.sh
echo "Starting hardened AppOS build.."
./hbuild-x86_64.sh
else
echo ""
echo "Building x86 platform, skipping x86_64"
echo ""
./build-x86.sh
echo "Starting hardened AppOS build.."
./hbuild-x86.sh
fi
#
# TODO:
#
# The script now needs to do a couple of things
#
# 1. extract the development chroot
# 2. extract the core AppOS
# 3. create the cpio for AppOS
# 4. build kernels for all platforms
#
|
buswellj/code
|
AppOS-Linux/abs-7.0.2/bldsys/build.sh
|
Shell
|
gpl-3.0
| 1,068 |
#!/bin/bash
clear
echo -e "Traktor v1.8\nTor will be automatically installed and configured…\n\n"
function acceptance_agreement()
{
echo "This script is going to install these applications:"
echo "-------------------------------------------"
# Applications list
echo -e " * Tor\n * Obfs4proxy\n * dnscrypt-proxy\n * torbrowser-launcher\n * apt-transport-tor"
echo "-------------------------------------------"
echo "Do you agree ?(y/n)"
read answer
answer=${answer:-'y'} # set default value as yes
case $answer in
y|Y)
clear
echo "Start installation..."
;;
n|N)
echo "Cancel installation and exit..."
exit 2
;;
*)
echo "Wrong answer!"
echo "Exiting..."
exit
;;
esac
}
acceptance_agreement
# Install Packages
sudo dnf install -y \
tor \
privoxy \
dnscrypt-proxy \
torbrowser-launcher \
sudo dnf install -y \
make \
automake \
gcc \
python-pip \
python-devel \
libyaml-devel \
redhat-rpm-config
# sudo pip install obfsproxy
sudo dnf install -y obfs4
if [ -f "/etc/tor/torrc" ]; then
echo "Backing up the old torrc to '/etc/tor/torrc.traktor-backup'..."
sudo cp /etc/tor/torrc /etc/tor/torrc.traktor-backup
fi
#configuring dnscrypt-proxy
sudo wget https://ubuntu-ir.github.io/traktor/dnscrypt-proxy.service-fedora -O /etc/systemd/system/dnscrypt.service > /dev/null
sudo systemctl daemon-reload
echo "nameserver 127.0.0.1" | sudo tee /etc/resolv.conf >/dev/null
#sudo chattr +i /etc/resolv.conf
sudo systemctl enable dnscrypt.service
sudo systemctl start dnscrypt.service
# Write Bridge
sudo wget https://ubuntu-ir.github.io/traktor/torrcV3 -O /etc/tor/torrc > /dev/null
# Change tor log file owner
sudo touch /var/log/tor/log
sudo chown toranon:toranon /var/log/tor/log
# Write Privoxy config
sudo perl -i -pe 's/^listen-address/#$&/' /etc/privoxy/config
echo 'logdir /var/log/privoxy
listen-address 0.0.0.0:8118
forward-socks5t / 127.0.0.1:9050 .
forward 192.168.*.*/ .
forward 10.*.*.*/ .
forward 127.*.*.*/ .
forward localhost/ .' | sudo tee -a /etc/privoxy/config > /dev/null
sudo systemctl enable privoxy
sudo systemctl restart privoxy.service
# Set IP and Port on HTTP and SOCKS
gsettings set org.gnome.system.proxy mode 'manual'
gsettings set org.gnome.system.proxy.http host 127.0.0.1
gsettings set org.gnome.system.proxy.http port 8118
gsettings set org.gnome.system.proxy.socks host 127.0.0.1
gsettings set org.gnome.system.proxy.socks port 9050
gsettings set org.gnome.system.proxy ignore-hosts "['localhost', '127.0.0.0/8', '::1', '192.168.0.0/16', '10.0.0.0/8', '172.16.0.0/12']"
# Install Finish
echo "Install Finished successfully…"
# Wait for tor to establish connection
echo "Tor is trying to establish a connection. This may take long for some minutes. Please wait" | sudo tee /var/log/tor/log
bootstraped='n'
sudo systemctl enable tor.service
sudo systemctl restart tor.service
while [ $bootstraped == 'n' ]; do
if sudo cat /var/log/tor/log | grep "Bootstrapped 100%: Done"; then
bootstraped='y'
else
sleep 1
fi
done
# update finished
echo "Congratulations!!! Your computer is using Tor. may run torbrowser-launcher now."
|
ubuntu-ir/traktor
|
traktor_fedora.sh
|
Shell
|
gpl-3.0
| 3,416 |
#! /bin/sh
set -e
# Impurity scattering with various doping profiles
#
# This script is part of the QWWAD software suite. Any use of this code
# or its derivatives in published work must be accompanied by a citation
# of:
# P. Harrison and A. Valavanis, Quantum Wells, Wires and Dots, 4th ed.
# Chichester, U.K.: J. Wiley, 2015, ch.2
#
# (c) Copyright 1996-2014
# Alex Valavanis <[email protected]>
#
# QWWAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QWWAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QWWAD. If not, see <http://www.gnu.org/licenses/>.
# Initialise files
outfile=imp-profiles.dat
rm -f $outfile
# Generate square well with delta doping in middle of well
# Set volume doping to give sheet density of 1e10 cm^{-2} in each level
cat > s.r << EOF
200 0.15 0.0
195 0.0 0.0
10 0.0 1e17
195 0.0 0.0
200 0.15 0.0
EOF
nz=801
find_heterostructure --nz-1per $nz
efxv
# Solve Schroedinger equation
efss --nst 2
# Define subband populations in file `N.r'
densityinput --type even
# Define required rate
echo "2 1" > rrp.r
for T in `seq 10 10 300`; do
# Calculate distribution function
sbp --Te $T
# Find impurity scattering WITH screening
imp --temperature $T
rate=`awk '{print $3}' imp-avg.dat`
printf "%d %e\n" $T $rate >> $outfile
done
printf "\n" >> $outfile
# Generate square well with constant doping in entire well
# Set volume doping to give sheet density of 1e10 cm^{-2} in each level
cat > s.r << EOF
200 0.15 0.0
400 0.0 2.5e15
200 0.15 0.0
EOF
nz=801
find_heterostructure --nz-1per $nz
efxv
# Solve Schroedinger equation
efss --nst 2
# Define subband populations in file `N.r'
densityinput --type even
# Define required rate
echo "2 1" > rrp.r
for T in `seq 10 10 300`; do
# Calculate distribution function
sbp --Te $T
# Find impurity scattering WITH screening
imp --temperature $T
rate=`awk '{print $3}' imp-avg.dat`
printf "%d %e\n" $T $rate >> $outfile
done
printf "\n" >> $outfile
# Generate square well with modulation doping in barriers
# Set volume doping to give sheet density of 1e10 cm^{-2} in each level
cat > s.r << EOF
200 0.15 2.5e15
400 0.0 0.0
200 0.15 2.5e15
EOF
nz=801
find_heterostructure --nz-1per $nz
efxv
# Solve Schroedinger equation
efss --nst 2
# Define subband populations in file `N.r'
densityinput --type even
# Define required rate
echo "2 1" > rrp.r
for T in `seq 10 10 300`; do
# Calculate distribution function
sbp --Te $T
# Find impurity scattering WITH screening
imp --temperature $T
rate=`awk '{print $3}' imp-avg.dat`
printf "%d %e\n" $T $rate >> $outfile
done
cat << EOF
Results have been written to $outfile in the format:
COLUMN 1 - Temperature [K]
COLUMN 2 - |2> -> |1> scattering rate [s^{-1}]
The file contains 3 data sets:
SET 1 - Delta-doping in centre of well
SET 2 - Constant doping throughout well
SET 3 - Constant doping in barriers
This script is part of the QWWAD software suite.
(c) Copyright 1996-2015
Alex Valavanis <[email protected]>
Paul Harrison <[email protected]>
Report bugs to https://bugs.launchpad.net/qwwad
EOF
# Clean up workspace
# rm -f *.r
|
QWWAD/qwwad
|
examples/carrier-scattering/imp-profiles.sh
|
Shell
|
gpl-3.0
| 3,650 |
#!/bin/bash
# create multiresolution windows icon
ICON_SRC=../../src/qt/res/icons/Paris.png
ICON_DST=../../src/qt/res/icons/Paris.ico
convert ${ICON_SRC} -resize 16x16 Paris-16.png
convert ${ICON_SRC} -resize 32x32 Paris-32.png
convert ${ICON_SRC} -resize 48x48 Paris-48.png
convert Paris-16.png Paris-32.png Paris-48.png ${ICON_DST}
|
ParisCoin/Paris
|
share/qt/make_windows_icon.sh
|
Shell
|
gpl-3.0
| 335 |
#!/bin/sh
while read line; do
echo 'http://i.huffpost.com/gen/1548397/images/o-NIC-CAGE-facebook.jpg'
done
|
smikims/arpspoof
|
cage.sh
|
Shell
|
gpl-3.0
| 108 |
#!/bin/bash
echo -e "Traktor v1.3\nTor will be automatically uinstalled ...\n\n"
sudo apt remove -y \
tor \
obfs4proxy \
polipo \
dnscrypt-proxy \
torbrowser-launcher \
apt-transport-tor
sudo rm -f /etc/tor/torrc \
/etc/apparmor.d/abstractions/tor \
/etc/apparmor.d/system_tor &> /dev/null
gsettings set org.gnome.system.proxy mode 'auto'
sudo rm -f /etc/apt/sources.list.d/tor.list &> /dev/null
sudo rm -f /usr/share/applications/traktor_gui_panel.desktop > /dev/null
sudo rm -f ~/.traktor_gui_panel
gpg --delete-keys A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89
# User should enter 'y' to delete the public key from keyring.
echo "Uninstalling Finished Successfully."
|
AmirrezaFiroozi/traktor
|
uninstall_debian.sh
|
Shell
|
gpl-3.0
| 683 |
#!/bin/sh
tar -xf sqlite-330-for-speedtest.tar.gz
cd sqlite
./configure
if [ $OS_TYPE = "BSD" ]
then
gmake speedtest1
else
make speedtest1
fi
echo $? > ~/install-exit-status
cd ~
echo "#!/bin/sh
cd sqlite
./speedtest1 \$@ > \$LOG_FILE 2>&1
echo \$? > ~/test-exit-status" > sqlite-speedtest
chmod +x sqlite-speedtest
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/sqlite-speedtest-1.0.1/install.sh
|
Shell
|
gpl-3.0
| 321 |
conda create --copy --name hls4ml-env python=3.6
source activate hls4ml-env
## CPU version of pytorch for now
#conda install pytorch torchvision -c pytorch
conda install pytorch-cpu torchvision-cpu -c pytorch
conda install -c anaconda scikit-learn h5py pyyaml
|
hls-fpga-machine-learning/hls-fpga-machine-learning
|
install.sh
|
Shell
|
gpl-3.0
| 263 |
#!/bin/sh
# Removal script for kernel module "module2.ko"
KO_PATH=$1
rmmod KO_PATH
|
legatoproject/legato-af
|
apps/test/framework/mk/system/kernelmodule/module/files/module2/scripts/remove.sh
|
Shell
|
mpl-2.0
| 85 |
rm -v *.aux *.lof *.log *.bbl *.blg *.toc *.synctex.gz
|
aureooms-ulb-2010-2015/2012-2013-infof302-project
|
Rapport/clear.sh
|
Shell
|
agpl-3.0
| 54 |
#!/bin/bash
. $(dirname $0)/config.inc
rsync -aO $WORKINGDIR"/web/generation/" $COUCHDISTANTHOST":"$WORKINGDIR"/web/generation"
rsync -aO $WORKINGDIR"/"$EXPORTDIR"/" $COUCHDISTANTHOST":"$WORKINGDIR"/"$EXPORTDIR
|
24eme/AVA
|
project/bin/sync_instances.sh
|
Shell
|
agpl-3.0
| 213 |
#!/bin/bash -e
#===============================================================================
# FILE: kaltura-drop-db.sh
# USAGE: ./kaltura-drop-db.sh
# DESCRIPTION:
# OPTIONS: ---
# LICENSE: AGPLv3+
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Jess Portnoy (), <[email protected]>
# ORGANIZATION: Kaltura, inc.
# CREATED: 01/24/14 12:50:13 EST
# REVISION: ---
#===============================================================================
#set -o nounset # Treat unset variables as an error
if [ ! -r /opt/kaltura/bin/db_actions.rc ];then
echo "I can't drop without /opt/kaltura/bin/db_actions.rc"
exit 1
fi
. /opt/kaltura/bin/db_actions.rc
. /opt/kaltura/bin/colors.sh
RC_FILE=/etc/kaltura.d/system.ini
if [ ! -r "$RC_FILE" ];then
echo "Could not find $RC_FILE so, exiting.."
exit 1
fi
. $RC_FILE
echo -en "${CYAN}This will drop the following DBs:
$DBS
and remove users:
$DB_USERS
on $DB1_HOST
${NORMAL}
"
if [ -n "$1" ];then
DBPASSWD=$1
else
echo -en "${BRIGHT_RED}
NOTE: this is not reversible.
It is recommended you also back up the current data using mysqldump before continuing.
You can use /opt/kaltura/bin/kaltura-export-db.sh to export the data.
Are you absolutely certain you want this? [n/Y]
${NORMAL}
"
read AN
if [ "$AN" != 'Y' ];then
echo "Aborting. To remove hit UPPER CASED 'Y'"
exit 1
fi
echo "root DB passwd:"
read -s DBPASSWD
fi
for i in $DB_USERS;do echo "drop user $i" | mysql -u$SUPER_USER -h$DB1_HOST -p$DBPASSWD -P$DB1_PORT;done
for i in $DBS;do
echo -en "${CYAN}Removing $i..${NORMAL}"
echo "drop database $i" | mysql -u$SUPER_USER -h$DB1_HOST -p$DBPASSWD -P$DB1_PORT ;
done
|
doubleshot/platform-install-packages
|
RPM/scripts/postinst/kaltura-drop-db.sh
|
Shell
|
agpl-3.0
| 1,745 |
#!/bin/bash
# Copyright 2013 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE LiveDemo App
#
# FI-WARE LiveDemo App is free software: you can redistribute it and/or modify it under the terms
# of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# FI-WARE LiveDemo App is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License along with FI-WARE LiveDemo App. If not,
# see http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact with fermin at tid dot es
curl ${CEP_HOST}:${CEP_PORT}/ProtonOnWebServerAdmin/resources/instances/ProtonOnWebServer -X PUT -s -S --header 'Content-Type: application/json' -d @- <<EOF
{"action":"ChangeState","state":"start"}
EOF
|
telefonicaid/fiware-livedemoapp
|
scripts/cep-start.sh
|
Shell
|
agpl-3.0
| 1,114 |
#!/bin/bash -xe
sudo docker exec -it $(sudo docker ps | grep 'kafka_destination' | awk '{print $1}') /kafka/bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
|
ihrwein/syslog-ng-docker
|
playground/syslog-ng-ose-with-kafka-destination/check_logs_on_kafka.sh
|
Shell
|
lgpl-2.1
| 195 |
#!/bin/sh
set -u
set -e
set -x
if [ $# != 1 ]; then
echo "Usage: $0 VERSION"
echo " e.g.: $0 1.2.6"
exit 1
fi
new_nginx_version="$1"
base_dir="$(dirname "$0")"
top_src_dir="${base_dir}/.."
nginx_version_file="${top_src_dir}/nginx_version"
current_nginx_version=$(cat "${nginx_version_file}")
current_nginx_dir="${base_dir}/nginx-${current_nginx_version}"
new_nginx_base_name="nginx-${new_nginx_version}"
new_nginx_tar_gz="${new_nginx_base_name}.tar.gz"
wget "http://nginx.org/download/${new_nginx_tar_gz}"
tar xzf "${new_nginx_tar_gz}"
rm "${new_nginx_tar_gz}"
echo "${new_nginx_version}" > "${nginx_version_file}"
git add "${new_nginx_base_name}"
git rm -rf "${current_nginx_dir}" || :
|
myokoym/groonga
|
vendor/update_nginx.sh
|
Shell
|
lgpl-2.1
| 709 |
#!/bin/bash
################################################################################
#
# Copyright (c) 2000-2017 Liferay, Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
################################################################################
if [[ -z "$1" ]] || [[ -z "$2" ]]; then
echo "Usage: gist-diff.sh [file|url] [file|url]"
fi
FILE1="$1"
if [[ "$1" == http* ]]; then
FILE1='<(curl -s "$1")'
fi
FILE2="$2"
if [[ "$2" == http* ]]; then
FILE2='<(curl -s "$2")'
fi
gist.sh <(eval diff $FILE1 $FILE2 -u | sed "1iLeft: $1\nRight: $2\n") -f diff.diff
|
stiemannkj1/liferay-tools
|
Tools/my-tools/gist-diff.sh
|
Shell
|
lgpl-2.1
| 1,066 |
#!/bin/sh -x
#
# Written by George Milescu
# see LICENSE.txt for license information
#
# We should run the tests in a separate Python interpreter to prevent
# problems with our singleton classes, e.g. SuperPeerDB, etc.
#
# WARNING: this shell script must use \n as end-of-line, Windows
# \r\n gives problems running this on Linux
PYTHONPATH=../..:"$PYTHONPATH"
export PYTHONPATH
python test_proxyservice.py singtest_good_2fast
#python test_proxyservice.py singtest_bad_2fast_dlhelp
#python test_proxyservice.py singtest_bad_2fast_metadata_not_bdecodable
#python test_proxyservice.py singtest_bad_2fast_metadata_not_dict1
#python test_proxyservice.py singtest_bad_2fast_metadata_not_dict2
#python test_proxyservice.py singtest_bad_2fast_metadata_empty_dict
#python test_proxyservice.py singtest_bad_2fast_metadata_wrong_dict_keys
#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent1
#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent2
#python test_proxyservice.py singtest_bad_2fast_metadata_bad_torrent3
|
egbertbouman/tribler-g
|
Tribler/Test/test_proxyservice.sh
|
Shell
|
lgpl-2.1
| 1,042 |
#! /bin/sh
set -ev
SOLVER_FILES=$(ls ../../core/*.h ../../core/*.cpp | \
grep -v "non_mini_solver\|naxos.h")
# Ensure that Naxos Solver can be considered as a "mini-solver".
# According to the First International XCSP3 Competition: "A
# mini-solver is a solver whose code must be composed of
# less than 8,000 lines of at most 160 characters (while
# discarding code for parsing XCSP3, comments and code of
# standard libraries)."
SLOC=$(cat $SOLVER_FILES | grep -v "^$" | grep -v "^ *//" | wc -l)
echo "$SLOC pure source lines of code"
test $SLOC -lt 8000
# Ensure that the maximum line width limit isn't exceeded
(! grep ".\{161\}" $SOLVER_FILES)
# Check coding style in TraviS CI
if [ "$CONTINUOUS_INTEGRATION" = "true" ]
then
cd -
# Fix coding style of all source files
find \( -iname "*.h" -or -iname "*.cpp" \) \
-exec clang-format-5.0 -i {} +
# List the files that may need reformatting
git ls-files -m
cd -
fi
# Memory check tool
MEM_CHECK="valgrind -q"
# Default Traveling Salesman Problem instance
$MEM_CHECK ./naxos-xcsp3 parser/src/XCSP3-CPP-Parser/instances/tsp-25-843.xml
# Default Constraint Optimisation (COP) instance
$MEM_CHECK ./naxos-xcsp3 parser/src/XCSP3-CPP-Parser/instances/obj.xml
# Limit the available time to 10s for searching a solution
timeout --preserve-status --kill-after=1s 10s \
$MEM_CHECK ./naxos-xcsp3 verification/without_solutions/AllConstraints.xml
# Reduce the available time to 5s, while not testing memory
timeout --preserve-status --kill-after=1s 5s \
./naxos-xcsp3 verification/without_solutions/AllConstraintsFormatted.xml
# For each Mini-solver Competition's requirement, solve a CSP
for INSTANCE in verification/*.xml
do
# Set the stored solution file name
SOLUTION="verification/$(basename $INSTANCE .xml).sol"
# Compare the stored solution with the solver's one
./naxos-xcsp3 $INSTANCE | cmp $SOLUTION
done
|
bftjoe/naxos
|
apps/XCSP3/verification/test.sh
|
Shell
|
lgpl-3.0
| 1,936 |
#!/bin/sh
dd if=/dev/zero of=btrfs-drone bs=1 count=0 seek=4G
mkfs.btrfs btrfs-drone
mount btrfs-drone /mnt
btrfs quota enable /mnt
btrfs qgroup limit -e 4G /mnt
mkdir -p /etc/cobalt
mount sample/config1 /etc/cobalt
|
PressLabs/cobalt
|
drone.sh
|
Shell
|
apache-2.0
| 224 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck source=scripts/ci/libraries/_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/../libraries/_script_init.sh"
# Builds or waits for the CI image in the CI environment
# Depending on "USE_GITHUB_REGISTRY" and "GITHUB_REGISTRY_WAIT_FOR_IMAGE" setting
function build_ci_image_on_ci() {
build_images::prepare_ci_build
start_end::group_start "Prepare CI image ${AIRFLOW_CI_IMAGE}"
rm -rf "${BUILD_CACHE_DIR}"
mkdir -pv "${BUILD_CACHE_DIR}"
if [[ ${USE_GITHUB_REGISTRY} == "true" && ${GITHUB_REGISTRY_WAIT_FOR_IMAGE} == "true" ]]; then
# Pretend that the image was build. We already have image with the right sources baked in!
md5sum::calculate_md5sum_for_all_files
# Tries to wait for the images indefinitely
# skips further image checks - since we already have the target image
local python_tag_suffix=""
if [[ ${GITHUB_REGISTRY_PULL_IMAGE_TAG} != "latest" ]]; then
python_tag_suffix="-${GITHUB_REGISTRY_PULL_IMAGE_TAG}"
fi
# first we pull base python image. We will need it to re-push it after main build
# Becoming the new "latest" image for other builds
build_images::wait_for_image_tag "${GITHUB_REGISTRY_PYTHON_BASE_IMAGE}" \
"${python_tag_suffix}" "${AIRFLOW_PYTHON_BASE_IMAGE}"
# And then the actual image
build_images::wait_for_image_tag "${GITHUB_REGISTRY_AIRFLOW_CI_IMAGE}" \
":${GITHUB_REGISTRY_PULL_IMAGE_TAG}" "${AIRFLOW_CI_IMAGE}"
md5sum::update_all_md5_with_group
else
build_images::rebuild_ci_image_if_needed
fi
# Disable force pulling forced above this is needed for the subsequent scripts so that
# They do not try to pull/build images again.
unset FORCE_PULL_IMAGES
unset FORCE_BUILD
# Skip the image check entirely for the rest of the script
export CHECK_IMAGE_FOR_REBUILD="false"
start_end::group_end
}
build_ci_image_on_ci
|
sekikn/incubator-airflow
|
scripts/ci/images/ci_prepare_ci_image_on_ci.sh
|
Shell
|
apache-2.0
| 2,772 |
#!/bin/bash
function mos_ec2_template() {
## create disk layout
if_create_disk_layout
## create file system layout
if_create_fs_layout
## create swap space
if_create_swap_space
## add mOS repositories
if_add_mos_main_repos
if_add_mos_s3_repos
## install mOS base system
if_install_mos_base
if_install_mos_ec2_kernel
## disable root password
if_disable_mos_root_password
## add mos-operator account
if_create_mos_operator
## create custom paths
if_create_custom_paths
## create default fstab
if_create_fstab
## mount devices as bind
if_mount_bind_devices
## customize mos services
if_customize_mos_services
## generate ec2 boot configuration
if_generate_ec2_boot_conf
## generate particularities
if_generate_btrfs_layout
## cleanup
if_cleanup_installation
}
function mos_default_template() {
## create disk layout
if_create_disk_layout
## create file system layout
if_create_fs_layout
## create swap space
if_create_swap_space
## add mOS repositories
if_add_mos_main_repos
if_add_mos_default_repos
## install mOS base system
if_install_mos_base
if_install_mos_default_kernel
## disable root password
if_disable_mos_root_password
## add mos-operator account
if_create_mos_operator
## create custom paths
if_create_custom_paths
## create default fstab
if_create_fstab
## mount devices as bind
if_mount_bind_devices
## customize mos services
if_customize_mos_services
## generate default boot configuration
if_generate_default_boot_conf
## generate particularities
if_generate_btrfs_layout
## cleanup
if_cleanup_installation
}
|
ieat/mosaic-mos-image-builder
|
lib/mos-templates.sh
|
Shell
|
apache-2.0
| 1,610 |
#!/bin/bash
# TF_XLA_FLAGS=--tf_xla_auto_jit=2
python3 bigbird/summarization/run_summarization.py \
--data_dir="tfds://scientific_papers/pubmed" \
--output_dir="$GCP_EXP_BUCKET"summarization/pubmed \
--attention_type=block_sparse \
--couple_encoder_decoder=True \
--max_encoder_length=3072 \
--max_decoder_length=256 \
--num_attention_heads=12 \
--num_hidden_layers=12 \
--hidden_size=768 \
--intermediate_size=3072 \
--block_size=64 \
--train_batch_size=4 \
--eval_batch_size=4 \
--do_train=True \
--do_eval=False \
--use_tpu=True \
--tpu_name=bigbird \
--tpu_zone=europe-west4-a \
--gcp_project="$GCP_PROJECT_NAME" \
--num_tpu_cores=64 \
--init_checkpoint=gs://bigbird-transformer/pretrain/bigbr_base/model.ckpt-0
|
google-research/bigbird
|
bigbird/summarization/roberta_base.sh
|
Shell
|
apache-2.0
| 759 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Load the latest WEAVE AMIs
WEAVE_ECS_AMIS=( $(curl -L -s https://raw.githubusercontent.com/weaveworks/integrations/master/aws/ecs/README.md | sed -n -e 's/^| *\([^| ]*\) *| *\(ami-[^| ]*\) *|$/\1:\2/p' ) )
SCOPE_AAS_PROBE_TOKEN="$1"
function usage(){
echo "usage: $(basename $0) [scope-aas-probe-token]"
echo " where [scope-aas-probe-token] is an optional Scope as a Service probe token."
echo " When provided, the Scope probes in your ECS instances will report to your app"
echo " at http://scope.weave.works/"
}
# Mimic associative arrays using ":" to compose keys and values,
# to make them work in bash v3
function key(){
echo ${1%%:*}
}
function value(){
echo ${1#*:}
}
# Access is O(N) but .. we are mimicking maps with arrays
function get(){
KEY=$1
shift
for I in $@; do
if [ $(key $I) = "$KEY" ]; then
echo $(value $I)
return
fi
done
}
REGIONS=""
for I in ${WEAVE_ECS_AMIS[@]}; do
REGIONS="$REGIONS $(key $I)"
done
# Check that we have everything we need
if [ \( "$#" -gt 1 \) -o \( "$1" = "--help" \) ]; then
usage
exit 1
fi
if [ -z "$(which aws)" ]; then
echo "error: Cannot find AWS-CLI, please make sure it's installed"
exit 1
fi
REGION=$(aws configure list 2> /dev/null | grep region | awk '{ print $2 }')
if [ -z "$REGION" ]; then
echo "error: Region not set, please make sure to run 'aws configure'"
exit 1
fi
AMI="$(get $REGION ${WEAVE_ECS_AMIS[@]})"
if [ -z "$AMI" ]; then
echo "error: AWS-CLI is using '$REGION', which doesn't offer ECS yet, please set it to one from: ${REGIONS}"
exit 1
fi
# Check that setup wasn't already run
CLUSTER_STATUS=$(aws ecs describe-clusters --clusters weave-ecs-demo-cluster --query 'clusters[0].status' --output text)
if [ "$CLUSTER_STATUS" != "None" -a "$CLUSTER_STATUS" != "INACTIVE" ]; then
echo "error: ECS cluster weave-ecs-demo-cluster is active, run cleanup.sh first"
exit 1
fi
set -euo pipefail
# Cluster
echo -n "Creating ECS cluster (weave-ecs-demo-cluster) .. "
aws ecs create-cluster --cluster-name weave-ecs-demo-cluster > /dev/null
echo "done"
# VPC
echo -n "Creating VPC (weave-ecs-demo-vpc) .. "
VPC_ID=$(aws ec2 create-vpc --cidr-block 172.31.0.0/28 --query 'Vpc.VpcId' --output text)
aws ec2 modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support
aws ec2 modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames
# tag it for later deletion
aws ec2 create-tags --resources $VPC_ID --tag Key=Name,Value=weave-ecs-demo-vpc
echo "done"
# Subnet
echo -n "Creating Subnet (weave-ecs-demo-subnet) .. "
SUBNET_ID=$(aws ec2 create-subnet --vpc-id $VPC_ID --cidr-block 172.31.0.0/28 --query 'Subnet.SubnetId' --output text)
# tag it for later deletion
aws ec2 create-tags --resources $SUBNET_ID --tag Key=Name,Value=weave-ecs-demo-subnet
echo "done"
# Internet Gateway
echo -n "Creating Internet Gateway (weave-ecs-demo) .. "
GW_ID=$(aws ec2 create-internet-gateway --query 'InternetGateway.InternetGatewayId' --output text)
# tag it for later deletion
aws ec2 create-tags --resources $GW_ID --tag Key=Name,Value=weave-ecs-demo
aws ec2 attach-internet-gateway --internet-gateway-id $GW_ID --vpc-id $VPC_ID
TABLE_ID=$(aws ec2 describe-route-tables --query 'RouteTables[?VpcId==`'$VPC_ID'`].RouteTableId' --output text)
aws ec2 create-route --route-table-id $TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $GW_ID > /dev/null
echo "done"
# Security group
echo -n "Creating Security Group (weave-ecs-demo) .. "
SECURITY_GROUP_ID=$(aws ec2 create-security-group --group-name weave-ecs-demo --vpc-id $VPC_ID --description 'Weave ECS Demo' --query 'GroupId' --output text)
# Wait for the group to get associated with the VPC
sleep 5
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 22 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 80 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 4040 --cidr 0.0.0.0/0
# Weave
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 6783 --source-group $SECURITY_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol udp --port 6783 --source-group $SECURITY_GROUP_ID
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol udp --port 6784 --source-group $SECURITY_GROUP_ID
# Scope
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 4040 --source-group $SECURITY_GROUP_ID
# Kubernetes
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol tcp --port 30000-32767 --cidr 0.0.0.0/0
aws ec2 authorize-security-group-ingress --group-id $SECURITY_GROUP_ID --protocol udp --port 30000-32767 --cidr 0.0.0.0/0
echo "done"
# Key pair
echo -n "Creating Key Pair (weave-ecs-demo, file weave-ecs-demo-key.pem) .. "
aws ec2 create-key-pair --key-name weave-ecs-demo-key --query 'KeyMaterial' --output text > weave-ecs-demo-key.pem
chmod 600 weave-ecs-demo-key.pem
echo "done"
# IAM role
echo -n "Creating IAM role (weave-ecs-role) .. "
aws iam create-role --role-name weave-ecs-role --assume-role-policy-document file://data/weave-ecs-role.json > /dev/null
aws iam put-role-policy --role-name weave-ecs-role --policy-name weave-ecs-policy --policy-document file://data/weave-ecs-policy.json
aws iam create-instance-profile --instance-profile-name weave-ecs-instance-profile > /dev/null
# Wait for the instance profile to be ready, otherwise we get an error when trying to use it
while ! aws iam get-instance-profile --instance-profile-name weave-ecs-instance-profile 2>&1 > /dev/null; do
sleep 2
done
aws iam add-role-to-instance-profile --instance-profile-name weave-ecs-instance-profile --role-name weave-ecs-role
echo "done"
# Launch configuration
echo -n "Creating Launch Configuration (weave-ecs-launch-configuration) .. "
# Wait for the role to be ready, otherwise we get:
# A client error (ValidationError) occurred when calling the CreateLaunchConfiguration operation: You are not authorized to perform this operation.
# Unfortunately even if you can list the profile, "aws autoscaling create-launch-configuration" barks about it not existing so lets sleep instead
# while [ "$(aws iam list-instance-profiles-for-role --role-name weave-ecs-role --query 'InstanceProfiles[?InstanceProfileName==`weave-ecs-instance-profile`].InstanceProfileName' --output text 2>/dev/null || true)" != weave-ecs-instance-profile ]; do
# sleep 2
# done
sleep 15
TMP_USER_DATA_FILE=$(mktemp /tmp/weave-ecs-demo-user-data-XXXX)
trap 'rm $TMP_USER_DATA_FILE' EXIT
cp data/set-ecs-cluster-name.sh $TMP_USER_DATA_FILE
if [ -n "$SCOPE_AAS_PROBE_TOKEN" ]; then
echo "echo SERVICE_TOKEN=$SCOPE_AAS_PROBE_TOKEN >> /etc/weave/scope.config" >> $TMP_USER_DATA_FILE
fi
echo "sed \"s/\(succeed_or_die weave launch-proxy --hostname-from-label 'com.amazonaws.ecs.container-name'\)/\1 --rewrite-inspect/\" -i /etc/weave/run.sh" >> $TMP_USER_DATA_FILE
aws autoscaling create-launch-configuration --image-id $AMI --launch-configuration-name weave-ecs-launch-configuration --key-name weave-ecs-demo-key --security-groups $SECURITY_GROUP_ID --instance-type m3.xlarge --user-data file://$TMP_USER_DATA_FILE --iam-instance-profile weave-ecs-instance-profile --associate-public-ip-address --instance-monitoring Enabled=false
echo "done"
# Auto Scaling Group
echo -n "Creating Auto Scaling Group (weave-ecs-demo-group) with 3 instances .. "
aws autoscaling create-auto-scaling-group --auto-scaling-group-name weave-ecs-demo-group --launch-configuration-name weave-ecs-launch-configuration --min-size 3 --max-size 3 --desired-capacity 3 --vpc-zone-identifier $SUBNET_ID
echo "done"
# Wait for instances to join the cluster
echo -n "Waiting for instances to join the cluster (this may take a few minutes) .. "
while [ "$(aws ecs describe-clusters --clusters weave-ecs-demo-cluster --query 'clusters[0].registeredContainerInstancesCount' --output text)" != 3 ]; do
sleep 2
done
echo "done"
# Print out the public hostnames of the instances we created
INSTANCE_IDS=$(aws autoscaling describe-auto-scaling-groups --auto-scaling-group-names weave-ecs-demo-group --query 'AutoScalingGroups[0].Instances[*].InstanceId' --output text)
DNS_NAMES=$(aws ec2 describe-instances --instance-ids $INSTANCE_IDS --query 'Reservations[0].Instances[*].PublicDnsName' --output text)
SSH_FLAGS="-o Compression=yes -o LogLevel=FATAL -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -i weave-ecs-demo-key.pem"
for i in $DNS_NAMES
do echo $i:
ssh $SSH_FLAGS ec2-user@$i \
bash -c 'weave\ hide\ \;\ weave\ expose\ -h\ `hostname`.weave.local\ \;'
ssh $SSH_FLAGS ec2-user@$i \
docker -H unix:///var/run/weave/weave.sock run \
--volume="/:/rootfs" \
--volume="/var/run/weave/weave.sock:/docker.sock" \
weaveworks/kubernetes-anywhere:toolbox \
setup-kubelet-volumes
ssh $SSH_FLAGS ec2-user@$i \
docker -H unix:///var/run/weave/weave.sock run -d -l com.amazonaws.ecs.container-name=kubelet \
--net=host --pid=host --privileged=true \
--volumes-from=kubelet-volumes \
weaveworks/kubernetes-anywhere:kubelet
ssh $SSH_FLAGS ec2-user@$i \
docker -H unix:///var/run/weave/weave.sock run -d -l com.amazonaws.ecs.container-name=kube-proxy \
--net=host --pid=host --privileged=true \
weaveworks/kubernetes-anywhere:proxy
done
echo "Setup is ready!"
echo "Open your browser and go to any of these URLs:"
for NAME in $DNS_NAMES; do
echo " http://$NAME"
done
|
errordeveloper/kubernetes-anywhere
|
phase1/aws-ecs/create-cluster.sh
|
Shell
|
apache-2.0
| 10,302 |
pkg_origin=core
pkg_name=numactl
pkg_version=2.0.9
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_license=('GPL-2.0' 'LGPL-2.1')
pkg_source=https://github.com/numactl/numactl/archive/v${pkg_version}.tar.gz
pkg_shasum=3e893f41e601eac3100eefd659dbead8c75a89b9b73bc01c8387966181d9320c
pkg_deps=(core/glibc)
pkg_build_deps=(lilian/gcc lilian/make)
pkg_bin_dirs=(bin)
pkg_include_dirs=(include)
pkg_lib_dirs=(lib64)
pkg_upstream_url=https://github.com/numactl/numactl
pkg_description="NUMA support for Linux http://oss.sgi.com/projects/libnuma/"
do_build () {
make PREFIX="$pkg_prefix"
}
|
be-plans/be
|
numactl/plan.sh
|
Shell
|
apache-2.0
| 604 |
#!/usr/bin/env bash
cp /dragonflow/doc/source/multi-node-conf/compute_node_local_controller.conf ~/devstack/local.conf
if [ "$1" != "" ]; then
sed -i -e 's/<IP address of host running everything else>/'$1'/g' ~/devstack/local.conf
fi
# Get the IP address
ipaddress=$(/sbin/ifconfig eth1 | grep 'inet addr' | awk -F' ' '{print $2}' | awk -F':' '{print $2}')
# Adjust some things in local.conf
cat << DEVSTACKEOF >> devstack/local.conf
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
Q_HOST=$1
HOST_IP=$ipaddress
HOSTNAME=$(hostname)
DEVSTACKEOF
~/devstack/stack.sh
|
FrankDuan/df_code
|
vagrant/provisioning/setup-compute.sh
|
Shell
|
apache-2.0
| 622 |
#!/bin/bash
set -o errexit -o nounset
rev=$(git rev-parse --short HEAD)
cd target/doc
git init
git config --global user.name "doc bot"
git config --global user.email "docbot@travis"
git remote add upstream "https://[email protected]/huonw/spellck.git"
git fetch upstream
git reset upstream/gh-pages
touch .
git add -A .
git commit -m "rebuild pages at ${rev}"
git push upstream HEAD:gh-pages
|
huonw/spellck
|
deploy-docs.sh
|
Shell
|
apache-2.0
| 401 |
PYXB_ROOT=${PYXB_ROOT:-/mnt/devel/pyxb}
PYTHONPATH=${PYXB_ROOT}:${PYTHONPATH:+:${PYTHONPATH}}
PATH="${PYXB_ROOT}/scripts:${PYXB_ROOT}/bin:${PATH}"
export PYXB_ROOT PYTHONPATH PATH
|
pabigot/pyxb
|
maintainer/usepyxb.sh
|
Shell
|
apache-2.0
| 180 |
#!/bin/bash
wget http://download.virtualbox.org/virtualbox/5.0.24/VBoxGuestAdditions_5.0.24.iso -P /tmp
sudo mount -o loop /tmp/VBoxGuestAdditions_5.0.24.iso /mnt
sudo sh -x /mnt/VBoxLinuxAdditions.run # --keep
sudo modprobe vboxsf
sudo /opt/VBoxGuestAdditions*/init/vboxadd setup
sudo reboot
|
CiscoCloud/vaquero-vagrant
|
provision_scripts/vb_guest.sh
|
Shell
|
apache-2.0
| 299 |
#!/bin/bash -eu
#
# Copyright 2019-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$#" -ne 1 ]; then
echo "Usage: $0 [ --trigger-bucket ]" >&2
exit 1
fi
DIR=`dirname "$BASH_SOURCE"`
pushd $DIR
FUNCTION_SHARED="../shared"
function finish {
if [ -d "${FUNCTION_SHARED}" ]; then
echo "Removing shared directory from function directory"
rm -R "${FUNCTION_SHARED}"
fi
if [ -f ../package.json.bak ]; then
echo "Restoring original package.json"
mv -f ../package.json.bak ../package.json
fi
popd
}
trap finish EXIT
BUCKET_NAME=""
for i in "$@"; do
case $i in
-t=* | --trigger-bucket=*)
BUCKET_NAME="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
echo "Unknown option ${i}"
;;
esac
done
if [ -z "$BUCKET_NAME" ]; then
echo "--trigger-bucket must be supplied"
exit 2
fi
if [[ $BUCKET_NAME != gs://* ]]; then
BUCKET_NAME="gs://${BUCKET_NAME}"
echo "Updated --trigger-bucket to '${BUCKET_NAME}'"
fi
BUCKET_REGION=$(gsutil ls -L -b ${BUCKET_NAME} | grep "Location constraint:" | awk 'END {print tolower($3)}')
if [ $? -ne 0 ] || [ -z "$BUCKET_REGION" ]; then
echo "Failed to find bucket location"
exit 3
fi
ENABLED_SERVICE_LIST=$(gcloud services list)
if [ $? -ne 0 ]; then
echo "Failed to get active services list"
exit 4
fi
if [[ "$ENABLED_SERVICE_LIST" != *"cloudfunctions.googleapis.com"* ]]; then
echo "Enabling cloudfunctions.googleapis.com api"
gcloud services enable cloudfunctions.googleapis.com
if [ $? -ne 0 ]; then
echo "Failed to enable cloudfunctions.googleapis.com api"
exit 5
fi
else
echo "cloudfunctions.googleapis.com api is enabled"
fi
echo "Bucket name: ${BUCKET_NAME}"
echo "Bucket region: ${BUCKET_REGION}"
# https://cloud.google.com/functions/docs/locations
AVAILABLE_FUNCTION_REGIONS=$(gcloud functions regions list)
if [ $? -ne 0 ] || [ -z "$AVAILABLE_FUNCTION_REGIONS" ]; then
echo "Unable to get available functions region list"
exit 6
fi
FUNCTION_REGION=""
for i in $(basename -- "$AVAILABLE_FUNCTION_REGIONS" | grep -v NAME); do
if [ "$i" == "${BUCKET_REGION}" ]; then
FUNCTION_REGION=${i}
break
fi
done
if [ -z "$FUNCTION_REGION" ]; then
MAIN_REGION=$(echo ${BUCKET_REGION} | awk -F"-" '{print $1}')
echo "Main region: ${MAIN_REGION}"
# https://cloud.google.com/storage/docs/locations
case ${MAIN_REGION} in
"northamerica" | "us" | "southamerica" | "australia" | "nam4") FUNCTION_REGION="us-central1" ;;
"europe" | "eu" | "eur4") FUNCTION_REGION="europe-west1" ;;
"asia") FUNCTION_REGION="asia-east2" ;;
*) FUNCTION_REGION="us-central1" ;;
esac
fi
if [ -z "$FUNCTION_REGION" ]; then
echo "Function region could not be determined, exiting."
exit 7
else
echo "Function region: ${FUNCTION_REGION}"
if [ -d "${FUNCTION_SHARED}" ]; then
rm -R "${FUNCTION_SHARED}"
fi
# Symlinks do not work, have to physical copy the directory
echo "Copying shared module into function directory"
cp -R "../../../shared/" "${FUNCTION_SHARED}/"
echo "Creating backup of package.json"
cp ../package.json ../package.json.bak
UNAME=$(uname | awk '{print tolower($0)}')
if [ "$UNAME" == "darwin" ]; then
# macOS
echo 'Running on macOS, performing package.json replacement for cds-shared module'
sed -i '' -E 's/(file:)(\.\.\/\.\.\/)(shared)/\1\3/g' ../../batch/package.json
else
# linux
echo 'Running on linux, performing package.json replacement for cds-shared module'
sed -i -E 's/(file:)(\.\.\/\.\.\/)(shared)/\1\3/g' ../../batch/package.json
fi
gcloud functions deploy ${FUNCTION_NAME:-processUpload} --region=${FUNCTION_REGION} --memory=256MB --source=../../batch --runtime=nodejs16 --entry-point=processEvent --timeout=540s --trigger-event=providers/cloud.storage/eventTypes/object.change --trigger-resource="${BUCKET_NAME}" --quiet --set-env-vars=VERBOSE_MODE=true,ARCHIVE_FILES=false
echo "Restoring original package.json"
mv -f ../package.json.bak ../package.json
echo "Removing shared directory from function directory"
rm -R "${FUNCTION_SHARED}"
exit 0
fi
|
GoogleCloudPlatform/datashare-toolkit
|
ingestion/batch/bin/deploy.sh
|
Shell
|
apache-2.0
| 4,812 |
#!/bin/bash
# Note that the sensor specified in $OBSERVATIONS_POST_FILE must be registered in the system beforehand
BASE_URI="http://54.72.170.113:8080/webapp/sos/rest"
N=$1
C=$2
STEPS=$3
CONTENT_TYPE="application/gml+xml"
ACCEPT_ENCODING="gzip, deflate"
OBSERVATIONS_POST_FILE="1st_scenario_post_data.xml"
for (( i=1; i<=$STEPS; i++ )); do
FILENAME="observations_n${N}c${C}_$i"
DATA_PATH="data/${FILENAME}.txt"
OUTPUT_PATH="output/${FILENAME}.txt"
/usr/sbin/ab -n $N -c $C -p $OBSERVATIONS_POST_FILE -T $CONTENT_TYPE -H "Accept: $CONTENT_TYPE" -H "Accept-Encoding: $ACCEPT_ENCODING" -g $DATA_PATH $BASE_URI/observations > $OUTPUT_PATH
echo "\nOutput stored in '$OUTPUT_PATH'"
# Plot the resulting data
./plot.sh $FILENAME $N $C $RESOURCE
sleep 50
done
echo "Executed $STEPS times"
|
sauloperez/sos
|
load_testing/first_scenario.sh
|
Shell
|
apache-2.0
| 810 |
echo $(date) >> version.html
echo "<br>" >> version.html
echo $(git rev-parse HEAD) >> version.html
echo "<br><br>" >> version.html
git add .
git commit -am "elbv2"
git push origin master
aws deploy create-deployment --application-name CD-Demo-Prod --deployment-group-name CD-Demo-Prod-Group --github-location repository=hub714/cdeploy,commitId=$(git rev-parse HEAD) --region us-east-2
#aws deploy create-deployment --application-name SF-Demo --deployment-group-name SF-Demo-Group --github-location repository=hub714/cdeploy,commitId=$(git rev-parse HEAD) --region us-east-2
|
hub714/cdeploy
|
deploy.sh
|
Shell
|
apache-2.0
| 578 |
# Use functions instead of aliases so we can export them.
reallocateReads() {
"$thispath/../scripts/plugins/realloc-reads/reallocateReads" "$@"
}
export -f reallocateReads
|
klmr/trna-chip-pipeline
|
tools-setup.sh
|
Shell
|
apache-2.0
| 178 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.