code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
source /build/docker-couchdb/buildconfig
set -x
$minimal_apt_get_install erlang-base-hipe erlang-crypto erlang-eunit \
erlang-inets erlang-os-mon erlang-public-key erlang-ssl \
erlang-syntax-tools erlang-tools erlang-xmerl erlang-dev libicu-dev \
libmozjs185-dev erlang-asn1 make g++ libtool pkg-config git \
automake autoconf autoconf-archive
git clone https://github.com/apache/couchdb.git /tmp/couchdb
cd /tmp/couchdb
git checkout tags/1.6.1
./bootstrap
./configure --prefix=/couchdb && make && make install
useradd -d /couchdb/lib/couchdb couchdb
mkdir /etc/service/couchdb
cp /build/docker-couchdb/runit/couchdb.sh /etc/service/couchdb/run
cp /build/docker-couchdb/50_enforce_couchdb_permissions.sh /etc/my_init.d/
# copy local.ini, overriding default settings
# Should do some sed replacements for auth related things
cp /build/docker-couchdb/local.ini /couchdb/etc/couchdb/local.ini
|
verdverm/starterKit
|
dockers/databases/couchdb/build/install_couchdb.sh
|
Shell
|
mit
| 926 |
#! /bin/sh
. ../../testenv.sh
analyze repro.vhdl
elab_simulate repro_ent
#analyze repro1.vhdl
#elab_simulate repro1_ent
clean
echo "Test successful"
|
tgingold/ghdl
|
testsuite/gna/bug018/testsuite.sh
|
Shell
|
gpl-2.0
| 154 |
create_disklabel() {
local device=$1
debug create_disklabel "creating new gpt disklabel"
sgdisk -Z -g ${device}
# add bios boot partition for good measure
sgdisk -n "128:-32M:" -t "128:ef02" -c "128:BIOS boot partition" ${device}
return $?
}
add_partition() {
local device=$1
local minor=$2
local type=$3
local size=$4
sgdisk -n "${minor}::+${size}" -t "${minor}:${type}" -c "${minor}:Linux filesystem" ${device}
return $?
}
|
fr34k8/quickstart
|
modules/partition.sh
|
Shell
|
gpl-2.0
| 454 |
#!/bin/sh
# Copyright (C) 2007-2014 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
aux prepare_vg 2 80
lvcreate -L 10M -n lv -i2 $vg
lvresize -l +4 $vg/lv
lvremove -ff $vg
lvcreate -L 64M -n $lv -i2 $vg
not lvresize -v -l +4 xxx/$lv
# Check stripe size is reduced to extent size when it's bigger
ESIZE=$(get vg_field $vg vg_extent_size --units b)
lvextend -L+64m -i 2 -I$(( ${ESIZE%%B} * 2 ))B $vg/$lv 2>&1 | tee err
grep "Reducing stripe size" err
lvremove -ff $vg
lvcreate -L 10M -n lv $vg "$dev1"
lvextend -L +10M $vg/lv "$dev2"
# Attempt to reduce with lvextend and vice versa:
not lvextend -L 16M $vg/lv
not lvreduce -L 32M $vg/lv
lvremove -ff $vg
|
jh80chung/lvm2
|
test/shell/lvresize-usage.sh
|
Shell
|
gpl-2.0
| 1,091 |
#!/bin/bash
#chroot /var/chroot unshare -n "~/vcr/python-browser-8/tab.py" "$1" "$2" "$3"
#strace "~/vcr/python-browser-8/tab.py" "$1" "$2" "$3"
#strace -f -F ~/vcr/python-browser-8/tab.py "$1" "$2" "$3"
#~/vcr/python-browser-8/tab.py $1 $2 $3
#ulimit -c unlimited
tabno=0
if [ -f "tabno.tmp" ]
then
tabno=`cat tabno.tmp`
tabno=$(($tabno+1))
else
echo 0 > tabno.tmp
fi
#~/vcr/python-browser-8/tab$tabno.py $1 $2 $3
#echo "~/vcr/python-browser-8/tab_exec $tabno $1 $2 $3"
suid=`id -u tab$tabno`
~/vcr/python-browser-8/tab_exec $suid $1 $2 $3
#~/vcr/python-browser-8/tab.py $1 $2 $3
|
raoariel/quark
|
kernel/tab.sh
|
Shell
|
gpl-2.0
| 600 |
#!/bin/bash
# for use of smbpasswd we need package samba-common-bin
apt-get -y install samba-common-bin
FILENAME=changepasswd
BINPATH=/usr/local/bin
cp -v files/$FILENAME $BINPATH
chown root:root $BINPATH/$FILENAME
chmod 755 $BINPATH/$FILENAME
|
edvapp/autoinstall
|
laus/scriptsForClasses/ZZZARCHIVE/APP/APP1604/450-addChangeSMBPasswordScript.sh
|
Shell
|
gpl-2.0
| 248 |
#!/bin/sh
if [ $(which gnome-autogen.sh) ]; then
srcdir=`dirname $0`
$srcdir gnome-autogen.sh $@
else
autoreconf --install
./configure $@
echo "Now run 'make && make install'"
fi
|
PeterSkovoroda/silver-rain
|
autogen.sh
|
Shell
|
gpl-2.0
| 185 |
#!/bin/bash
set -v
set -x
LIGHT_EXE=~/dev/tyrutils/build-ninja/light/light
QUAKEDIR=~/quake
QUAKE_EXE=~/Library/Developer/Xcode/DerivedData/QuakeSpasm-alpgyufxkvrsawhefxaskvlihpyd/Build/Products/Debug/QuakeSpasm-SDL2.app/Contents/MacOS/QuakeSpasm-SDL2
make_screenshot () {
imagename="$1"
mapname="$2"
params="$3"
viewpos="$4"
showlightmap="$5"
#strip off .map suffix
map_basename=$(basename $mapname .map)
$LIGHT_EXE -lit -extra4 $params $mapname
rm -fdr $QUAKEDIR/tyrutils-screenshots
mkdir $QUAKEDIR/tyrutils-screenshots
mkdir $QUAKEDIR/tyrutils-screenshots/maps
#copy over the map
cp $map_basename.{bsp,lit} $QUAKEDIR/tyrutils-screenshots/maps
#write an autoexec.cfg that will take the screenshot
cat << EOF > $QUAKEDIR/tyrutils-screenshots/autoexec.cfg
scr_conspeed 100000
scr_centertime 0
con_notifytime 0
map $map_basename
wait
wait
wait
wait
wait
setpos $viewpos
fog 0
r_lightmap $showlightmap
r_drawviewmodel 0
r_drawentities 0
viewsize 120
fov 110
gamma 1
wait
wait
wait
wait
screenshot
quit
EOF
#launch quake
$QUAKE_EXE -basedir $QUAKEDIR -nolauncher -window -width 1024 -height 768 -fsaa 8 -game tyrutils-screenshots
#convert the screenshot to jpg
convert $QUAKEDIR/tyrutils-screenshots/spasm0000.tga $imagename
}
#dirt
DIRT_VIEWPOS="-1904 -871 847 4 38 0"
make_screenshot dirtdefault.jpg jam2_sock.map "-dirt -dirtdebug" "$DIRT_VIEWPOS" 1
make_screenshot dirtdepth_256.jpg jam2_sock.map "-dirt -dirtdebug -dirtdepth 256" "$DIRT_VIEWPOS" 1
make_screenshot dirtdepth_512.jpg jam2_sock.map "-dirt -dirtdebug -dirtdepth 512" "$DIRT_VIEWPOS" 1
make_screenshot dirtdepth_1024.jpg jam2_sock.map "-dirt -dirtdebug -dirtdepth 1024" "$DIRT_VIEWPOS" 1
make_screenshot dirtgain_0.75.jpg jam2_sock.map "-dirt -dirtdebug -dirtgain 0.75" "$DIRT_VIEWPOS" 1
make_screenshot dirtgain_0.5.jpg jam2_sock.map "-dirt -dirtdebug -dirtgain 0.5" "$DIRT_VIEWPOS" 1
make_screenshot dirtmode_1_dirtgain_0.5.jpg jam2_sock.map "-dirt -dirtdebug -dirtgain 0.5 -dirtmode 1" "$DIRT_VIEWPOS" 1
make_screenshot dirtscale_1.5.jpg jam2_sock.map "-dirt -dirtdebug -dirtscale 1.5" "$DIRT_VIEWPOS" 1
make_screenshot dirtscale_2.0.jpg jam2_sock.map "-dirt -dirtdebug -dirtscale 2.0" "$DIRT_VIEWPOS" 1
#sunlight
SUN_POS_A="$DIRT_VIEWPOS"
SUN_POS_B="-1851 499 1057 1 329 0"
#TODO: make light support -sunlight flags on command line so these can use the same map file
make_screenshot a_sunlight.jpg sunlight.map "" "$SUN_POS_A" 1
make_screenshot b_sunlight.jpg sunlight.map "" "$SUN_POS_B" 1
make_screenshot a_sunlight2.jpg sunlight2.map "" "$SUN_POS_A" 1
make_screenshot b_sunlight2.jpg sunlight2.map "" "$SUN_POS_B" 1
make_screenshot a_sunlight_plus_sunlight2.jpg sunlight_plus_sunlight2.map "" "$SUN_POS_A" 1
make_screenshot b_sunlight_plus_sunlight2.jpg sunlight_plus_sunlight2.map "" "$SUN_POS_B" 1
#phong
PHONG_POS="893 887 -252 7 293 0"
PHONG_MAP="ad_crucial.map"
make_screenshot phong_1_lightmap.jpg "$PHONG_MAP" "" "$PHONG_POS" 1
make_screenshot phong_0_lightmap.jpg "$PHONG_MAP" "-phong 0" "$PHONG_POS" 1
make_screenshot phong_1_normals.jpg "$PHONG_MAP" "-phongdebug" "$PHONG_POS" 1
make_screenshot phong_0_normals.jpg "$PHONG_MAP" "-phongdebug -phong 0" "$PHONG_POS" 1
# bounce
BOUNCE_POS="1043 -1704 2282 12 134 0"
BOUNCE_MAP="ad_azad.map"
make_screenshot bouncescale0.0.jpg "$BOUNCE_MAP" "" "$BOUNCE_POS" 1
make_screenshot bouncescale0.5.jpg "$BOUNCE_MAP" "-bounce -bouncescale 0.5" "$BOUNCE_POS" 1
make_screenshot bouncescale1.0.jpg "$BOUNCE_MAP" "-bounce -bouncescale 1" "$BOUNCE_POS" 1
make_screenshot bouncescale2.0.jpg "$BOUNCE_MAP" "-bounce -bouncescale 2" "$BOUNCE_POS" 1
|
ericwa/tyrutils
|
testmaps/deprecated/make_screenshots.sh
|
Shell
|
gpl-2.0
| 3,662 |
#! /bin/sh
#
# Report test for GRAMPS: Generate det_descendant_report testing
# different option combinations.
# $Id: det_descendant_report.sh 5544 2005-12-13 02:07:16Z rshura $
REP="det_ancestor_report"
FMT="txt"
TOP_DIR=`dirname $PWD`
TEST_DIR=$TOP_DIR/test
SRC_DIR=$TOP_DIR/src
PRG="python gramps.py"
EXAMPLE_XML=$TOP_DIR/example/gramps/example.gramps
REP_DIR=$TEST_DIR/reports/$REP
mkdir -p $REP_DIR
DATA_DIR=$TEST_DIR/data
mkdir -p $DATA_DIR
if [ -f $DATA_DIR/example.grdb ]; then
rm $DATA_DIR/example.grdb
fi
echo ""
echo "+--------------------------------------------------------------"
echo "| Import XML, write GRDB"
echo "+--------------------------------------------------------------"
OPTS="-i $EXAMPLE_XML -o $DATA_DIR/example.grdb"
(cd $SRC_DIR; $PRG $OPTS)
OPTS="-O $DATA_DIR/example.grdb"
echo ""
echo "+--------------------------------------------------------------"
echo "| Text Report: "$REP
echo "| Text Format: "$FMT
echo "+--------------------------------------------------------------"
for desref in {0,1}; do
for incphotos in {0,1}; do
for omitda in {0,1}; do
for incsources in {0,1}; do
for usenick in {0,1}; do
for fulldates in {0,1}; do
for incnotes in {0,1}; do
for repplace in {0,1}; do
for repdate in {0,1}; do
for computeage in {0,1}; do
for incnames in {0,1}; do
for incevents in {0,1}; do
for listc in {0,1}; do
output="$desref$incphotos$omitda$incsources$usenick$fulldates$incnotes$repplace$repdate$computeage$incnames$incevents$listc"
action="-a report -p name=$REP,id=I44,off=$FMT,of=$REP_DIR/$output.$FMT,desref=$desref,incphotos=$incphotos,omitda=$omitda,incsources=$incsources,usenick=$usenick,fulldates=$fulldates,incnotes=$incnotes,repplace=$repplace,repdate=$repdate,computeage=$computeage,incnames=$incnames,incevents=$incevents,listc=$listc"
(cd $SRC_DIR; $PRG $OPTS $action)
done
done
done
done
done
done
done
done
done
done
done
done
done
|
rdp/legado
|
test/det_descendant_report.sh
|
Shell
|
gpl-2.0
| 1,977 |
#!/bin/sh
# try to run make before commit.
# add this to .git/hooks/pre-commit and chmod a+x
set -e
# clean up dvi files, whizzytex generates dvi files, and they need to
# be cleaned or we will try to test dvi->pdf rules only, which is not
# what we want.
rm -f debianmeetingresume*.dvi
# run a build so that we know it succeeds.
echo 'Run pre-commit make testing'
make
echo 'done pre-commit make testing'
|
kenhys/tokyodebian-monthly-report
|
git-pre-commit.sh
|
Shell
|
gpl-2.0
| 414 |
#!/bin/sh
# Copyright (C) 2008 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# 'Exercise some lvcreate diagnostics'
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
cleanup_lvs() {
lvremove -ff $vg
(dm_table | not grep $vg) || \
die "ERROR: lvremove did leave some some mappings in DM behind!"
}
aux prepare_pvs 2
aux pvcreate --metadatacopies 0 "$dev1"
aux vgcreate $vg $(cat DEVICES)
# ---
# Create snapshots of LVs on --metadatacopies 0 PV (bz450651)
lvcreate -aey -n$lv1 -l4 $vg "$dev1"
lvcreate -n$lv2 -l4 -s $vg/$lv1
lvcreate -n$lv3 -l4 --permission r -s $vg/$lv1
cleanup_lvs
# Skip the rest for cluster
test -e LOCAL_CLVMD && exit 0
# ---
# Create mirror on two devices with mirrored log using --alloc anywhere
lvcreate --type mirror -m 1 -l4 -n $lv1 --mirrorlog mirrored $vg --alloc anywhere "$dev1" "$dev2"
cleanup_lvs
# --
# Create mirror on one dev with mirrored log using --alloc anywhere, should fail
not lvcreate --type mirror -m 1 -l4 -n $lv1 --mirrorlog mirrored $vg --alloc anywhere "$dev1"
cleanup_lvs
vgremove -ff $vg
|
jh80chung/lvm2
|
test/shell/lvcreate-operation.sh
|
Shell
|
gpl-2.0
| 1,431 |
shopt -s expand_aliases
pushd /eliza18/atlas/mhance/D3PDReaderAnalyses_8TeV
source setup.sh MultiLepton
popd
DelphesReaderDir=/eliza18/atlas/mhance/Snowmass/DelphesReader
cd $DelphesReaderDir
cp -a scripts $TMPDIR
cp -a bin $TMPDIR
cp -a share $TMPDIR
cp -a ${grid}/${background} $TMPDIR
cp -a ${grid}/${signal} $TMPDIR
cd $TMPDIR
pwd
/bin/ls -ltrh
allevents=""
allsignal=""
while read signal events; do
let gridpoints=$((gridpoints+1))
if [[ $allevents == "" ]]; then
allevents=$events
allsignal=$signal
else
allevents="$allevents $events"
allsignal="$allsignal $signal"
fi
done < ${signal}
taskindex=$(($SGE_TASK_ID))
sig=$(echo $allsignal | awk -v taskindex=$taskindex '{print $taskindex}')
evt=$(echo $allevents | awk -v taskindex=$taskindex '{print $taskindex}')
echo "$sig $evt" > signal.txt
cat signal.txt
./scripts/make_limits.py \
--background ${background} \
--signal signal.txt \
--lumi=${lumi} >> $DelphesReaderDir/${grid}/${output}
echo "Printing setup.log"
cat tmp_limits/setup.log
echo "Printing limit.log"
cat tmp_limits/limit.log
|
mhance/FCC
|
Snowmass/DelphesReader/scripts/batch_limits.sh
|
Shell
|
gpl-2.0
| 1,096 |
#!/bin/bash
set -e
#
# Post create filesystem hook
#
if [ $# -ne 3 ]; then
echo "Usage: $0 <BR images directory> <Path to fwup.conf> <Base firmware name>"
exit 1
fi
NERVES_SDK_IMAGES=$1
FWUP_CONFIG=$2
BASE_FW_NAME=$3
[ ! -f $FWUP_CONFIG ] && { echo "Error: $FWUP_CONFIG not found"; exit 1; }
TARGETDIR=$NERVES_SDK_IMAGES/../target
NERVES_ROOT=$NERVES_SDK_IMAGES/../../..
NERVES_SDK_ROOT=$NERVES_SDK_IMAGES/../host
# Link the fwup config to the images directory so that
# it can be used to create images based on this one.
ln -sf $FWUP_CONFIG $NERVES_SDK_IMAGES
# Use the rel2fw.sh tool to create the demo images
OLD_DIR=`pwd`
(cd $NERVES_SDK_IMAGES && \
source $NERVES_ROOT/scripts/nerves-env-helper.sh $NERVES_ROOT && \
$NERVES_ROOT/scripts/rel2fw.sh $TARGETDIR/srv/erlang ${BASE_FW_NAME}.fw ${BASE_FW_NAME}.img && \
mv _images/*.fw . && \
mv _images/*.img . && \
rm -fr _images) || (cd $OLD_DIR; echo rel2fw.sh failed; exit 1)
cd $OLD_DIR
|
fraxu/nerves-sdk
|
board/nerves-common/post-createfs.sh
|
Shell
|
gpl-2.0
| 963 |
mpiexec -n 4 ./simple_xy_par_wr
mpiexec -n 4 ./simple_xy_par_rd
|
qingu/WRF-Libraries
|
src/netcdf-fortran-4.2/examples/F90/run_f90_par_examples.sh
|
Shell
|
gpl-2.0
| 64 |
#!/bin/bash
mkdir -p m4
cd "`dirname $BASH_SOURCE`"
./autogen.sh
./configure
make
|
aalex/jasm
|
one_step_build.sh
|
Shell
|
gpl-3.0
| 84 |
#!/bin/bash
# -- extract and convert documentation from Emacs Lisp files
# Author: Andreas Röhler <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Commentary:
# tests Emacs python-mode
#
# Code:
# Edit the vars pointing to the directories/files
# holding your python-mode for test
# assumes python-mode files in current directory
# the path
# needs being in `test' directory
PDIR=`pwd`
# write PATH-TO-EMACS source code directory here
# EMACS_SOURCE_DIR="$HOME/emacs-20110426"
EMACS_SOURCE_DIR=$HOME/emacs-23.4
# python-mode file to load
if [ -s "../python-components-mode.el" ];
then
PYTHONMODE="../python-components-mode.el"
else
PYTHONMODE="../python-mode.el"
fi
if [ $1 ]; then
EINGABE=$1
fi
if [ $EMACS_SOURCE_DIR ]; then
EMACS="${EMACS_SOURCE_DIR}/src/emacs"
# else
# EMACS=emacs
# when installed Emacs shall be used, CCCMDS must be set
# CCCMDS="${EMACS_SOURCE_DIR}/lisp/progmodes/cc-cmds.el"
# ERG=$(echo $LOGNAME | sed 's/^s\(.*\)/m/')
# if [ $ERG == "m" ]; then
# EMACS_SOURCE_DIR="$HOME/emacs-20110426"
# else
# EMACS_SOURCE_DIR="~/emacs-20110426"
# fi
HIGHL="../highlight-indentation.el"
CLMACS="${EMACS_SOURCE_DIR}/lisp/emacs-lisp/cl-macs.el"
BYTECOMP="${EMACS_SOURCE_DIR}/lisp/emacs-lisp/bytecomp.el"
CUSTOM="${EMACS_SOURCE_DIR}/lisp/custom.el"
ANSICOLOR="${EMACS_SOURCE_DIR}/lisp/ansi-color.el"
COMINT="${EMACS_SOURCE_DIR}/lisp/comint.el"
CCCMDS="${EMACS_SOURCE_DIR}/lisp/progmodes/cc-cmds.el"
SHELL="${EMACS_SOURCE_DIR}/lisp/shell.el"
PYMACS="../pymacs.el"
# file holding the tests
TESTFILE="py-bug-numbered-tests.el"
TESTFILE2="python-mode-test.el"
DOCU=/tools/fundocu2infoformats.el
echo "\$PYMACS: $PYMACS"
$EMACS -Q --batch --eval "(message (emacs-version))" --eval "(when (featurep 'python-mode)(unload-feature 'python-mode t))" --eval "(add-to-list 'load-path \"$PDIR/\")" --eval "(setq py-install-directory \".\")" -load "$PYMACS" -load $CCCMDS -load $COMINT -load $SHELL -load $ANSICOLOR -load $CLMACS -load $BYTECOMP -load $CUSTOM -load $PYTHONMODE \
--funcall finds-from-programm \
else
cat <<EOF
usage: ${0##*/} EMACS_SOURCE_DIR
Runs from tools dir
This script write commands-lists from python-mode files.
EOF
fi
|
emacsmirror/python-mode
|
devel/fundocu2infoformats.sh
|
Shell
|
gpl-3.0
| 2,813 |
# Copyright (c) 2015 Anchor Systems Pty Ltd <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
cleanup_refresh_partitions() {
kpartx -d "$BLOCK_DEVICE"
# Give udev time to react.
sleep 1
}
register_cleanup "cleanup_refresh_partitions"
kpartx -a "$BLOCK_DEVICE"
# Give udev time to react.
sleep 1
# kpartx has a nasty habit of putting all its partitions under /dev/mapper,
# which, of course, the partitioner isn't expected to know. So, we need to
# manually mangle the partition names to correspond to the kpartx-created
# names.
for partname in "${!PARTITIONS[@]}"; do
debug "Converting $partname (${PARTITIONS[$partname]}) to kpartx-created device name"
PARTITIONS[$partname]="/dev/mapper/$(basename "${PARTITIONS[$partname]}")"
done
|
anchor/pcib
|
plugins/partitioner/full-disk/tasks/06-refresh-partitions.sh
|
Shell
|
gpl-3.0
| 1,436 |
#!/bin/bash
sort -t';' comp0.csv > sc0.csv
sort -t';' comp1.csv > sc1.csv
sort -t';' comp2.csv > sc2.csv
sort -t';' comp3.csv > sc3.csv
join -t';' sc0.csv sc1.csv > temp0.csv
join -t';' temp0.csv sc2.csv > temp1.csv
join -t';' temp1.csv sc3.csv > temp2.csv
awk -F';' '{m=$2;for(i=2;i<=NF;i++){if($i<m)m=$i;}print $1";"$2";"$3";"$4";"$5";"m}' temp2.csv > all.csv
grep -i 'amba*' all.csv > amba.csv
grep -i 'load*' all.csv > load.csv
grep -i 'gb_*' all.csv > genbuf.csv
awk -F';' '{if($6>30)print $1";"$2";"$3";"$4";"$5";"$6}' all.csv > more30.csv
|
gaperez64/AbsSynthe
|
scripts/gen4csv.sh
|
Shell
|
gpl-3.0
| 549 |
#!/bin/bash
: <<'END'
This software was created by United States Government employees at
The Center for the Information Systems Studies and Research (CISR)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
END
usage(){
echo "publish_image.sh image [-t]"
echo "use -t to push to the testregistry:5000 registry for testing"
exit
}
if [ $# -eq 0 ]; then
usage
fi
if [[ $# -eq 1 ]]; then
export LABTAINER_REGISTRY="mfthomps"
if [ -z "$DOCKER_LOGIN" ]; then
docker login
DOCKER_LOGIN=YES
fi
elif [[ "$2" == -t ]]; then
export LABTAINER_REGISTRY="testregistry:5000"
else
usage
fi
echo "Using registry $LABTAINER_REGISTRY"
image=$1
docker tag $image $LABTAINER_REGISTRY/$image
docker push $LABTAINER_REGISTRY/$image
|
cliffe/SecGen
|
modules/utilities/unix/labtainers/files/Labtainers-master/scripts/designer/bin/publish_image.sh
|
Shell
|
gpl-3.0
| 1,026 |
#!/usr/bin/env bash
set -e
rootdir=$( cd $(dirname $0) ; pwd -P )
if [ -z ${EDGE_HOME+x} ]; then
EDGE_HOME="$rootdir/../../"
fi
test_result(){
Test=$rootdir/TestOutput/HostRemoval/hostclean.stats.txt
Expect=$rootdir/hostclean.stats.txt
testName="EDGE Human Host Removal test";
if cmp -s "$Test" "$Expect"
then
echo "$testName passed!"
touch "$rootdir/TestOutput/test.success"
else
echo "$testName failed!"
touch "$rootdir/TestOutput/test.fail"
fi
}
cd $rootdir
echo "Working Dir: $rootdir";
echo "EDGE HOME Dir: $EDGE_HOME";
if [ ! -f "$rootdir/TestOutput/test.success" ]
then
rm -rf $rootdir/TestOutput
fi
perl $EDGE_HOME/runPipeline -c $rootdir/config.txt -o $rootdir/TestOutput -cpu 4 -noColorLog -p $rootdir/../Ecoli_10x.1.fastq $rootdir/../Ecoli_10x.2.fastq || true
rm -rf $rootdir/TestOutput/QcReads
test_result;
|
LANL-Bioinformatics/edge
|
testData/runHostRemovalTest/runTest.sh
|
Shell
|
gpl-3.0
| 844 |
#!/bin/sh
cd ..
while true; do
read com
if [ "$com" = "start" ]; then
read algo
read dataset
./run.sh InteractiveRecom $algo $dataset
elif [ "$com" = "ping" ]; then
echo pong
elif [ "$com" != "exit" ]; then
echo error
1>&2 echo Erreur : $com
fi
done
|
PFgimenez/thesis
|
demonstrateur/wrapper.sh
|
Shell
|
gpl-3.0
| 311 |
timeout_set '90 seconds'
# Test if sessions are properly supported after promoting the shadow server.
MOUNTS=2 \
MASTERSERVERS=2 \
CHUNKSERVERS=1 \
USE_RAMDISK=YES \
MOUNT_EXTRA_CONFIG="mfscachemode=NEVER,mfsacl" \
MOUNT_0_EXTRA_EXPORTS="ro,allcanchangequota" \
MOUNT_1_EXTRA_EXPORTS="rw,alldirs,allcanchangequota,maxtrashtime=1234567,mapall=lizardfstest_6:lizardfstest_4" \
setup_local_empty_lizardfs info
mkdir "${info[mount1]}/subdir"
echo 'mfssubfolder=/subdir' >>"${info[mount1_config]}"
lizardfs_mount_unmount 1
lizardfs_mount_start 1
lizardfs_master_n 1 start
cd "${info[mount1]}"
for generator in $(metadata_get_all_generators |grep -v metadata_generate_uids_gids); do
eval "$generator"
done
metadata_validate_files
# Check if using removed files works as expected:
echo "ala ma kota" > removed_file
mfssettrashtime 0 removed_file
exec 11<> removed_file
rm removed_file
echo -n "u huhu" >&11
assert_equals "u huhu kota" "$(cat /proc/$$/fd/11)"
cd
mount1meta=$(metadata_print "${info[mount1]}")
mount0meta=$(metadata_print "${info[mount0]}")
sleep 3
lizardfs_master_daemon kill
lizardfs_make_conf_for_master 1
lizardfs_master_daemon reload
lizardfs_wait_for_all_ready_chunkservers
# check restored filesystem
assert_no_diff "$mount0meta" "$(metadata_print "${info[mount0]}")"
assert_failure touch "${info[mount0]}"/newfile
cd "${info[mount1]}"
assert_no_diff "$mount1meta" "$(metadata_print)"
assert_success touch newfile
touch nowaythiswilleverwork
assert_failure mfssettrashtime 12345678 nowaythiswilleverwork
# Check if using removed files works as expected after promotion:
echo -n " prrrrrr" >&11
assert_equals "u huhu prrrrrr" "$(cat /proc/$$/fd/11)"
exec 11>&- # close the descriptor to allow clean umount
metadata_validate_files
|
cloudweavers/lizardfs
|
tests/test_suites/ShortSystemTests/test_shadow_sessions.sh
|
Shell
|
gpl-3.0
| 1,768 |
./rop-tool-Linux-x86-64 s ./binary-samples/elf-Linux-Alpha-bash -a8
|
ADemonisis/rop-tool
|
test/test13.sh
|
Shell
|
gpl-3.0
| 68 |
#!/bin/bash
set -e
cd $(dirname $0)
# Assume OPC is checked out to the same top-level directory as the soma repo
OPC_DIR=../../../openpixelcontrol
set -x
make -C $OPC_DIR
exec $OPC_DIR/bin/gl_server points.json 7890 model.stl
|
mct/soma
|
pier14/opc-client/run-simulator.sh
|
Shell
|
apache-2.0
| 229 |
#!/bin/sh
# Make sure we're in our directory (i.e., where this shell script is)
echo $0
cd `dirname $0`
# Configure fetch method
URL="http://www.minix3.org/pkgsrc/distfiles/minix/3.3.0/binutils-2.23.2.tar.bz2"
BACKUP_URL="http://ftp.gnu.org/gnu/binutils/binutils-2.23.2.tar.bz2"
FETCH=ftp
which curl >/dev/null
if [ $? -eq 0 ]; then
FETCH="curl -O -f"
fi
# Fetch sources if not available
if [ ! -d dist ];
then
if [ ! -f binutils-2.23.2.tar.bz2 ]; then
$FETCH $URL
if [ $? -ne 0 ]; then
$FETCH $BACKUP_URL
fi
fi
tar -oxjf binutils-2.23.2.tar.bz2 && \
mv binutils-2.23.2 dist && \
cd dist && \
cat ../patches/* | patch -p1 && \
cp ../files/yyscript.h gold && \
cp ../files/yyscript.c gold && \
rm -f ld/configdoc.texi
fi
|
ayyucedemirbas/Minix-Source-Code
|
minix-master/external/gpl3/binutils/fetch.sh
|
Shell
|
apache-2.0
| 744 |
#!/bin/sh -e
#
# Copyright (C) 2012, 2014 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
SYSTEMTESTTOP=../..
. $SYSTEMTESTTOP/conf.sh
zone=.
infile=root.db.in
zonefile=root.db
(cd ../ns2 && $SHELL sign.sh)
cp ../ns2/dsset-good. .
cp ../ns2/dsset-bad. .
key1=`$KEYGEN -q -r $RANDFILE -a RSASHA1 -b 1024 -n zone $zone`
key2=`$KEYGEN -q -r $RANDFILE -a RSASHA1 -b 2048 -n zone -f KSK $zone`
cat $infile $key1.key $key2.key > $zonefile
$SIGNER -P -g -r $RANDFILE -o $zone $zonefile > /dev/null
# Configure the resolving server with a trusted key.
cat $key2.key | grep -v '^; ' | $PERL -n -e '
local ($dn, $class, $type, $flags, $proto, $alg, @rest) = split;
local $key = join("", @rest);
print <<EOF
trusted-keys {
"$dn" $flags $proto $alg "$key";
};
EOF
' > trusted.conf
cp trusted.conf ../ns2/trusted.conf
cp trusted.conf ../ns3/trusted.conf
cp trusted.conf ../ns4/trusted.conf
|
execunix/vinos
|
external/bsd/bind/dist/bin/tests/system/dsdigest/ns1/sign.sh
|
Shell
|
apache-2.0
| 1,592 |
#!/system/bin/sh
# Make modem config folder and copy firmware config to that folder
rm -rf /data/misc/radio/modem_config
mkdir -m 770 /data/misc/radio/modem_config
cp -r /firmware/image/modem_pr/mcfg/configs/* /data/misc/radio/modem_config
echo 1 > /data/misc/radio/copy_complete
|
libnijunior/patchrom_angler
|
stockrom/system/bin/init.mcfg.sh
|
Shell
|
apache-2.0
| 281 |
module load git
module load cmake
|
tskisner/pytoast
|
external/conf/tacc-knl-intel.sh
|
Shell
|
bsd-2-clause
| 34 |
#!/bin/bash
# Copyright 2019 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# List of commands to be run once per PR before running dart and flutter
# tests.
set -ex
pub global activate tuneup
flutter channel master
flutter upgrade
|
flutter/cocoon
|
test_utilities/bin/prepare_environment.sh
|
Shell
|
bsd-3-clause
| 332 |
rm ./libluajit.a
echo APP_ABI := armeabi-v7a>./Application.mk
cd luajit/src
NDK=/Users/luzexi/Desktop/work/_Environment/android-ndk-r9
NFK=$NDK/toolchains/arm-linux-androideabi-4.6/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-
make clean
make HOST_CC="gcc -m32 -ffast-math -O3" \
CROSS=$NFK \
TARGET_SYS=Linux \
TARGET_FLAGS="--sysroot $NDK/platforms/android-14/arch-arm -march=armv7-a -Wl,--fix-cortex-a8"
cp ./libluajit.a ../../libluajit.a
cd ../../../
ndk-build clean
ndk-build
|
luzexi/slua-3rd-lib
|
build/android/jni/build_arm_v7.sh
|
Shell
|
mit
| 487 |
#!/usr/bin/env bash
$ANDROID_HOME/platform-tools/adb forward tcp:4444 localabstract:/adb-hub
$ANDROID_HOME/platform-tools/adb connect 127.0.0.1:4444
|
vndly/shoppingcart
|
bluetooth.sh
|
Shell
|
mit
| 149 |
#! /bin/bash
#
#
# colours
#
# http://en.wikipedia.org/wiki/ANSI_escape_sequences#Colors
#
export OTHER_NO_COLOUR="\[\033[0m\]"
export OTHER_RED="\[\033[0;31m\]"
export OTHER_LIGHT_RED="\[\033[1;31m\]"
export OTHER_GREEN="\[\033[0;32m\]"
export OTHER_LIGHT_GREEN="\[\033[1;32m\]"
export OTHER_BROWN="\[\033[0;33m\]"
export OTHER_YELLOW="\[\033[1;33m\]"
export OTHER_BLUE="\[\033[0;34m\]"
export OTHER_LIGHT_BLUE="\[\033[1;34m\]"
export OTHER_MAGENTA="\[\033[0;35m\]"
export OTHER_LIGHT_MAGENTA="\[\033[1;35m\]"
export OTHER_CYAN="\[\033[0;36m\]"
export OTHER_LIGHT_CYAN="\[\033[1;36m\]"
export OTHER_WHITE="\[\033[1;37m\]"
export OTHER_LIGHT_GRAY="\[\033[0;37m\]"
export OTHER_GRAY="\[\033[1;38m\]"
export OTHER_GRAY_TOO="\[\033[0;38m\]"
export NO_COLOUR="\033[0m"
export RED="\033[0;31m"
export LIGHT_RED="\033[1;31m"
export GREEN="\033[0;32m"
export LIGHT_GREEN="\033[1;32m"
export BROWN="\033[0;33m"
export YELLOW="\033[1;33m"
export BLUE="\033[0;34m"
export LIGHT_BLUE="\033[1;34m"
export MAGENTA="\033[1;35m"
export LIGHT_MAGENTA="\033[0;35m"
export CYAN="\033[0;36m"
export LIGHT_CYAN="\033[1;36m"
export WHITE="\033[1;37m"
export LIGHT_GRAY="\033[0;37m"
export GRAY="\033[1;38m"
export GRAY_TOO="\033[0;38m"
|
jalanb/jab
|
environ.d/colour.sh
|
Shell
|
mit
| 1,514 |
#!/usr/bin/env bash
set -x
source $(dirname $0)/load.env.include
if [ -z "${DBHOST}" ]
then
echo "Please add DBHOST in .env file as it is mandatory"
exit 42
fi
if [ -z "${DBROOTUSER}" ]
then
echo "Please add DBROOTUSER in .env file as it is mandatory"
exit 42
fi
if [ -z "${DBAPPNAME}" ]
then
echo "Please add DBAPPNAME in .env file as it is mandatory"
exit 42
fi
if [ -z "${DBAPPUSER}" ]
then
echo "Please add DBAPPUSER in .env file as it is mandatory"
exit 42
fi
if [ -z "${DBAPPPASSWORD}" ]
then
echo "Please add DBAPPPASSWORD in .env file as it is mandatory"
exit 42
fi
userexist=$(psql -qt -w -h ${DBHOST} -U ${DBROOTUSER} -c "SELECT rolname FROM pg_catalog.pg_roles WHERE rolname = '${DBAPPUSER}';"|sed -e s/' '//g)
if [ -z ${userexist} ]
then
psql -w -h ${DBHOST} -c "CREATE USER ${DBAPPUSER} WITH LOGIN PASSWORD '${DBAPPPASSWORD}';" -U ${DBROOTUSER}
fi
dbexist=$(psql -qt -w -h ${DBHOST} -U ${DBROOTUSER} -c "SELECT datname FROM pg_catalog.pg_database WHERE datname = '${DBAPPNAME}';"|sed -e s/' '//g)
if [ -z ${dbexist} ]
then
psql -w -h ${DBHOST} -c "CREATE DATABASE ${DBAPPNAME} OWNER ${DBAPPUSER};" -U ${DBROOTUSER}
fi
|
Fabrice-li/e-venement
|
bin/ci-scripts/create_database.sh
|
Shell
|
gpl-2.0
| 1,190 |
#!/bin/sh
lantiq_soc_has_adsl() {
ls /lib/modules/*/drv_dsl_cpe_api.ko
}
lantiq_soc_name() {
grep ^system /proc/cpuinfo | sed "s/system type.*: \(.*\)/\1/g"
}
lantiq_board_name() {
grep ^machine /proc/cpuinfo | sed "s/machine.*: \(.*\)/\1/g" | sed "s/\(.*\) - .*/\1/g"
}
|
TheSkorm/openwrt-metarouter-cjdns
|
target/linux/lantiq/base-files/lib/lantiq.sh
|
Shell
|
gpl-2.0
| 277 |
#!/bin/sh
# This script is highly based on GIMP's autogen.sh
# This script does all the magic calls to automake/autoconf and friends
# that are needed to configure a Subversion checkout. As described in
# the file HACKING you need a couple of extra tools to run this script
# successfully.
#
# If you are compiling from a released tarball you don't need these
# tools and you shouldn't use this script. Just call ./configure
# directly.
ACLOCAL=${ACLOCAL-aclocal-1.9}
AUTOCONF=${AUTOCONF-autoconf}
AUTOHEADER=${AUTOHEADER-autoheader}
AUTOMAKE=${AUTOMAKE-automake-1.9}
LIBTOOLIZE=${LIBTOOLIZE-libtoolize}
AUTOCONF_REQUIRED_VERSION=2.54
AUTOMAKE_REQUIRED_VERSION=1.9.6
INTLTOOL_REQUIRED_VERSION=0.35.5
LIBTOOL_REQUIRED_VERSION=1.5
PROJECT="Ario Music Player"
TEST_TYPE=-d
FILE=plugins
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
ORIGDIR=`pwd`
cd $srcdir
check_version ()
{
VERSION_A=$1
VERSION_B=$2
save_ifs="$IFS"
IFS=.
set dummy $VERSION_A 0 0 0
MAJOR_A=$2
MINOR_A=$3
MICRO_A=$4
set dummy $VERSION_B 0 0 0
MAJOR_B=$2
MINOR_B=$3
MICRO_B=$4
IFS="$save_ifs"
if expr "$MAJOR_A" = "$MAJOR_B" > /dev/null; then
if expr "$MINOR_A" \> "$MINOR_B" > /dev/null; then
echo "yes (version $VERSION_A)"
elif expr "$MINOR_A" = "$MINOR_B" > /dev/null; then
if expr "$MICRO_A" \>= "$MICRO_B" > /dev/null; then
echo "yes (version $VERSION_A)"
else
echo "Too old (version $VERSION_A)"
DIE=1
fi
else
echo "Too old (version $VERSION_A)"
DIE=1
fi
elif expr "$MAJOR_A" \> "$MAJOR_B" > /dev/null; then
echo "Major version might be too new ($VERSION_A)"
else
echo "Too old (version $VERSION_A)"
DIE=1
fi
}
echo
echo "I am testing that you have the tools required to build the"
echo "$PROJECT from Subversion. This test is not foolproof,"
echo "so if anything goes wrong, see the file HACKING for more information..."
echo
DIE=0
echo -n "checking for libtool >= $LIBTOOL_REQUIRED_VERSION ... "
if ($LIBTOOLIZE --version) < /dev/null > /dev/null 2>&1; then
LIBTOOLIZE=$LIBTOOLIZE
elif (glibtoolize --version) < /dev/null > /dev/null 2>&1; then
LIBTOOLIZE=glibtoolize
else
echo
echo " You must have libtool installed to compile $PROJECT."
echo " Install the appropriate package for your distribution,"
echo " or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
echo
DIE=1
fi
if test x$LIBTOOLIZE != x; then
VER=`$LIBTOOLIZE --version \
| grep libtool | sed "s/.* \([0-9.]*\)[-a-z0-9]*$/\1/"`
check_version $VER $LIBTOOL_REQUIRED_VERSION
fi
echo -n "checking for autoconf >= $AUTOCONF_REQUIRED_VERSION ... "
if ($AUTOCONF --version) < /dev/null > /dev/null 2>&1; then
VER=`$AUTOCONF --version | head -n 1 \
| grep -iw autoconf | sed "s/.* \([0-9.]*\)[-a-z0-9]*$/\1/"`
check_version $VER $AUTOCONF_REQUIRED_VERSION
else
echo
echo " You must have autoconf installed to compile $PROJECT."
echo " Download the appropriate package for your distribution,"
echo " or get the source tarball at ftp://ftp.gnu.org/pub/gnu/autoconf/"
echo
DIE=1;
fi
echo -n "checking for automake >= $AUTOMAKE_REQUIRED_VERSION ... "
if ($AUTOMAKE --version) < /dev/null > /dev/null 2>&1; then
AUTOMAKE=$AUTOMAKE
ACLOCAL=$ACLOCAL
elif (automake-1.11 --version) < /dev/null > /dev/null 2>&1; then
AUTOMAKE=automake-1.11
ACLOCAL=aclocal-1.11
elif (automake-1.10 --version) < /dev/null > /dev/null 2>&1; then
AUTOMAKE=automake-1.10
ACLOCAL=aclocal-1.10
elif (automake-1.9 --version) < /dev/null > /dev/null 2>&1; then
AUTOMAKE=automake-1.9
ACLOCAL=aclocal-1.9
else
echo
echo " You must have automake $AUTOMAKE_REQUIRED_VERSION or newer installed to compile $PROJECT."
echo " Download the appropriate package for your distribution,"
echo " or get the source tarball at ftp://ftp.gnu.org/pub/gnu/automake/"
echo
DIE=1
fi
if test x$AUTOMAKE != x; then
VER=`$AUTOMAKE --version \
| grep automake | sed "s/.* \([0-9.]*\)[-a-z0-9]*$/\1/"`
check_version $VER $AUTOMAKE_REQUIRED_VERSION
fi
echo -n "checking for intltool >= $INTLTOOL_REQUIRED_VERSION ... "
if (intltoolize --version) < /dev/null > /dev/null 2>&1; then
VER=`intltoolize --version \
| grep intltoolize | sed "s/.* \([0-9.]*\)/\1/"`
check_version $VER $INTLTOOL_REQUIRED_VERSION
else
echo
echo " You must have intltool installed to compile $PROJECT."
echo " Get the latest version from"
echo " ftp://ftp.gnome.org/pub/GNOME/sources/intltool/"
echo
DIE=1
fi
if test "$DIE" -eq 1; then
echo
echo "Please install/upgrade the missing tools and call me again."
echo
exit 1
fi
test $TEST_TYPE $FILE || {
echo
echo "You must run this script in the top-level $PROJECT directory."
echo
exit 1
}
if test -z "$*"; then
echo "If you wish to pass additional arguments, please specify them "
echo "on the $0 command line or set the AUTOGEN_CONFIGURE_ARGS "
echo "environment variable."
echo
else
echo
echo "I am going to run ./configure with the following arguments:"
echo
echo "$AUTOGEN_CONFIGURE_ARGS $@"
echo
fi
if test -z "$ACLOCAL_FLAGS"; then
acdir=`$ACLOCAL --print-ac-dir`
m4list="glib-2.0.m4 glib-gettext.m4 gtk-2.0.m4 intltool.m4 pkg.m4"
for file in $m4list
do
if [ ! -f "$acdir/$file" ]; then
echo
echo "WARNING: aclocal's directory is $acdir, but..."
echo " no file $acdir/$file"
echo " You may see fatal macro warnings below."
echo " If these files are installed in /some/dir, set the "
echo " ACLOCAL_FLAGS environment variable to \"-I /some/dir\""
echo " or install $acdir/$file."
echo
fi
done
fi
rm -rf autom4te.cache
$ACLOCAL $ACLOCAL_FLAGS
RC=$?
if test $RC -ne 0; then
echo "$ACLOCAL gave errors. Please fix the error conditions and try again."
exit $RC
fi
$LIBTOOLIZE --force || exit $?
# optionally feature autoheader
($AUTOHEADER --version) < /dev/null > /dev/null 2>&1 && $AUTOHEADER || exit 1
$AUTOMAKE --add-missing || exit $?
$AUTOCONF || exit $?
intltoolize --automake || exit $?
cd $ORIGDIR
$srcdir/configure $AUTOGEN_CONFIGURE_ARGS "$@"
RC=$?
if test $RC -ne 0; then
echo
echo "Configure failed or did not finish!"
exit $RC
fi
echo
echo "Now type 'make' to compile the $PROJECT."
|
Lokke/ario-git
|
autogen.sh
|
Shell
|
gpl-2.0
| 7,209 |
#! /bin/sh
# Copyright (C) 2011-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The strictness specified in Makefile.am:AUTOMAKE_OPTIONS should
# override that specified in configure.ac:AM_INIT_AUTOMAKE, and both
# should override the strictness specified on the command line.
# NOTE: the current semantics might not be the best one (even if it has
# been in place for quite a long time); see also Automake bug #7673.
# Update this test if the semantics are changed.
. test-init.sh
# We want complete control over automake options.
AUTOMAKE=$am_original_AUTOMAKE
cat > Makefile.am <<'END'
AUTOMAKE_OPTIONS =
END
set_strictness ()
{
set +x
sed <$2 >$2-t -e "s|^\\(AUTOMAKE_OPTIONS\\) *=.*|\\1 = $1|" \
-e "s|^\\(AM_INIT_AUTOMAKE\\).*|\\1([$1])|"
mv -f $2-t $2
set -x
cat $2
}
ok ()
{
$AUTOMAKE -Werror $*
}
ko ()
{
AUTOMAKE_fails $*
grep 'required file.*README' stderr
}
$ACLOCAL
# Leave out only one of the required files, to avoid too much
# repetition in the error messages.
touch INSTALL NEWS AUTHORS ChangeLog COPYING
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness '' configure.ac
ko --gnu
ko
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness '' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness 'gnu' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness '' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness 'foreign' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness 'gnu' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness 'foreign' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness 'gnu' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness 'foreign' configure.ac
ko --gnu
ko
ko --foreign
:
|
kuym/openocd
|
tools/automake-1.15/t/strictness-override.sh
|
Shell
|
gpl-2.0
| 2,727 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "not configured"
setup
ok_null
simple_test_event "ipreallocate"
check_routes 0
|
sathieu/samba
|
ctdb/tests/eventscripts/11.natgw.001.sh
|
Shell
|
gpl-3.0
| 138 |
#!/usr/bin/env bash
# Copyright 2011 Red Hat Inc., Durham, North Carolina.
# All Rights Reserved.
. $builddir/tests/test_common.sh
# Test Cases.
function test_probes_fileextendedattribute {
return 255 # TODO: implement xattr support check
probecheck "fileextendedattribute" || return 255
local ret_val=0;
local DEFFILE="$srcdir/test_probes_fileextendedattribute.xml"
local RESFILE="results.xml"
[ -f $RESFILE ] && rm -f $RESFILE
touch /tmp/xattr_with_val
setfattr -n user.fooattr -v foo /tmp/xattr_with_val
touch /tmp/xattr_without_val
setfattr -n user.fooattr /tmp/xattr_without_val
touch /tmp/xattr_noattr
$OSCAP oval eval --results $RESFILE $DEFFILE
if [ -f $RESFILE ]; then
verify_results "def" $DF $RF 1 && verify_results "tst" $DF $RF 3
ret_val=$?
else
ret_val=1
fi
return $ret_val
}
# Testing.
test_init
test_run "test_probes_fileattribute" test_probes_fileextendedattribute
test_exit
|
jan-cerny/openscap
|
tests/probes/fileextendedattribute/test_probes_fileextendedattribute.sh
|
Shell
|
lgpl-2.1
| 984 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is an example docker build script. It is not intended for PRODUCTION use
set -euo pipefail
AIRFLOW_SOURCES="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../../../" && pwd)"
cd "${AIRFLOW_SOURCES}"
# [START build]
export AIRFLOW_VERSION=2.2.2
docker build . \
--build \
--build-arg PYTHON_BASE_IMAGE="python:3.7-slim-buster" \
--build-arg AIRFLOW_VERSION="${AIRFLOW_VERSION}" \
--tag "my-pypi-selected-version:0.0.1"
# [END build]
docker rmi --force "my-pypi-selected-version:0.0.1"
|
bolkedebruin/airflow
|
docs/docker-stack/docker-examples/customizing/pypi-selected-version.sh
|
Shell
|
apache-2.0
| 1,308 |
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2012, OpenNebula Project Leads (OpenNebula.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
for j in `ls ./spec/*_spec.rb` ; do
rm ozones-test.db &> /dev/null
spec $j -f s
CODE=$?
if [ $CODE != 0 ] ; then
exit 1
fi
done
|
bcec/opennebula3.4.1
|
src/ozones/Server/lib/test/test_all.sh
|
Shell
|
apache-2.0
| 1,361 |
#!/bin/sh
PLATFORM="`uname -s`"
[ "$1" ] && VERSION="$1" || VERSION="2.1devel"
set -e
# Portable "ggrep -A" replacement.
# Work around Solaris awk record limit of 2559 bytes.
# contextgrep PATTERN POST_MATCH_LINES
contextgrep() {
cut -c -2500 | awk "/$1/ { count = ($2 + 1) } count > 0 { count--; print }"
}
do_tests() {
echo
cd tests
lua -e 'print("Testing Lua CJSON version " .. require("cjson")._VERSION)'
./test.lua | contextgrep 'FAIL|Summary' 3 | grep -v PASS | cut -c -150
cd ..
}
echo "===== Setting LuaRocks PATH ====="
eval "`luarocks path`"
echo "===== Building UTF-8 test data ====="
( cd tests && ./genutf8.pl; )
echo "===== Cleaning old build data ====="
make clean
rm -f tests/cjson.so
echo "===== Verifying cjson.so is not installed ====="
cd tests
if lua -e 'require "cjson"' 2>/dev/null
then
cat <<EOT
Please ensure you do not have the Lua CJSON module installed before
running these tests.
EOT
exit
fi
cd ..
echo "===== Testing LuaRocks build ====="
luarocks make --local
do_tests
luarocks remove --local lua-cjson
make clean
echo "===== Testing Makefile build ====="
make
cp -r lua/cjson cjson.so tests
do_tests
make clean
rm -rf tests/cjson{,.so}
echo "===== Testing Cmake build ====="
mkdir build
cd build
cmake ..
make
cd ..
cp -r lua/cjson build/cjson.so tests
do_tests
rm -rf build tests/cjson{,.so}
# vi:ai et sw=4 ts=4:
|
LomoX-Offical/nginx-openresty-windows
|
src/lua-cjson-2.1.0.5/runtests.sh
|
Shell
|
bsd-2-clause
| 1,392 |
#!/bin/bash
FN="primeviewprobe_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/primeviewprobe_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/primeviewprobe_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-primeviewprobe/bioconductor-primeviewprobe_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-primeviewprobe/bioconductor-primeviewprobe_2.18.0_src_all.tar.gz"
)
MD5="87cc56a8da4fb742ab1558ce1d3d26cc"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-primeviewprobe/post-link.sh
|
Shell
|
mit
| 1,445 |
EXPECTED_MD5="7b7e9f6039a97e4a453b596055912435"
curl -L -O https://www.openssl.org/source/openssl-1.0.0t.tar.gz
if [[ $(md5 -q openssl-1.0.0t.tar.gz) != $EXPECTED_MD5 ]]; then
echo "Unexpected source hash"
exit -1
fi
tar zxvf openssl-1.0.0t.tar.gz
mv openssl-1.0.0t CachedSource
|
ensemblr/llvm-project-boilerplate
|
include/llvm/projects/test-suite/ClangAnalyzer/openssl-1.0.0t/download_project.sh
|
Shell
|
mit
| 286 |
#!/bin/bash
success="true"
for commit in $(git cherry master | cut -d " " -f 2)
do
if ! git show -s "$commit" | grep -q 'Signed-off-by:'; then
echo "Commit $commit doesn't have a Signed-off-by"
git show "$commit"
success="false"
fi
done
if [ "$success" = "true" ]; then
echo "All commits have a Signed-off-by"
else
echo "Some commits do not have a Signed-off-by, failing..."
echo "Fix is probably (git commit --amend -s)"
exit 1
fi
|
jamielennox/hoist
|
tests/test-signed-off-by.sh
|
Shell
|
apache-2.0
| 483 |
#!/bin/sh
render_usage()
{
echo "USAGE: ./configure [TARGET]"
echo ""
echo "TARGET - one of the following values: "
for item in confs/*.conf;
do
echo "$item" | sed -e 's/confs\/\(.*\)\.conf/\1/';
done
}
if [ $# -ne 1 ]
then
render_usage
else
FILE=confs/$1.conf
echo "Targetting $1"
cp $FILE dsss.conf
fi
|
greenmanspirit/xomb
|
build/configure.sh
|
Shell
|
bsd-3-clause
| 319 |
#!/bin/bash -eux
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
me=${0##*/}
TMP="$me.tmp"
# Work in scratch directory
cd "$OUTDIR"
# some stuff we'll need
DEVKEYS=${SRCDIR}/tests/devkeys
TESTKEYS=${SRCDIR}/tests/testkeys
SIGNER=${SRCDIR}/tests/external_rsa_signer.sh
# Create a copy of an existing keyblock, using the old way
${FUTILITY} vbutil_keyblock --pack ${TMP}.keyblock0 \
--datapubkey ${DEVKEYS}/firmware_data_key.vbpubk \
--flags 7 \
--signprivate ${DEVKEYS}/root_key.vbprivk
# Check it.
${FUTILITY} vbutil_keyblock --unpack ${TMP}.keyblock0 \
--signpubkey ${DEVKEYS}/root_key.vbpubk
# It should be the same as the dev-key firmware keyblock
cmp ${DEVKEYS}/firmware.keyblock ${TMP}.keyblock0
# Now create it the new way
${FUTILITY} --debug sign \
--datapubkey ${DEVKEYS}/firmware_data_key.vbpubk \
--flags 7 \
--signprivate ${DEVKEYS}/root_key.vbprivk \
--outfile ${TMP}.keyblock1
# It should be the same too.
cmp ${DEVKEYS}/firmware.keyblock ${TMP}.keyblock1
# Create a keyblock without signing it.
# old way
${FUTILITY} vbutil_keyblock --pack ${TMP}.keyblock0 \
--datapubkey ${DEVKEYS}/firmware_data_key.vbpubk \
--flags 14
# new way
${FUTILITY} --debug sign \
--flags 14 \
${DEVKEYS}/firmware_data_key.vbpubk \
${TMP}.keyblock1
cmp ${TMP}.keyblock0 ${TMP}.keyblock1
# Create one using PEM args
# old way
${FUTILITY} vbutil_keyblock --pack ${TMP}.keyblock2 \
--datapubkey ${DEVKEYS}/firmware_data_key.vbpubk \
--signprivate_pem ${TESTKEYS}/key_rsa4096.pem \
--pem_algorithm 8 \
--flags 9
# verify it
${FUTILITY} vbutil_keyblock --unpack ${TMP}.keyblock2 \
--signpubkey ${TESTKEYS}/key_rsa4096.sha512.vbpubk
# new way
${FUTILITY} --debug sign \
--pem_signpriv ${TESTKEYS}/key_rsa4096.pem \
--pem_algo 8 \
--flags 9 \
${DEVKEYS}/firmware_data_key.vbpubk \
${TMP}.keyblock3
cmp ${TMP}.keyblock2 ${TMP}.keyblock3
# Try it with an external signer
# old way
${FUTILITY} vbutil_keyblock --pack ${TMP}.keyblock4 \
--datapubkey ${DEVKEYS}/firmware_data_key.vbpubk \
--signprivate_pem ${TESTKEYS}/key_rsa4096.pem \
--pem_algorithm 8 \
--flags 19 \
--externalsigner ${SIGNER}
# verify it
${FUTILITY} vbutil_keyblock --unpack ${TMP}.keyblock4 \
--signpubkey ${TESTKEYS}/key_rsa4096.sha512.vbpubk
# new way
${FUTILITY} --debug sign \
--pem_signpriv ${TESTKEYS}/key_rsa4096.pem \
--pem_algo 8 \
--pem_external ${SIGNER} \
--flags 19 \
${DEVKEYS}/firmware_data_key.vbpubk \
${TMP}.keyblock5
cmp ${TMP}.keyblock4 ${TMP}.keyblock5
# cleanup
rm -rf ${TMP}*
exit 0
|
acorn-marvell/vboot_reference
|
tests/futility/test_sign_keyblocks.sh
|
Shell
|
bsd-3-clause
| 2,672 |
#!/bin/bash
FN="pd.rhegene.1.0.st_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/pd.rhegene.1.0.st_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.rhegene.1.0.st_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.rhegene.1.0.st/bioconductor-pd.rhegene.1.0.st_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.rhegene.1.0.st/bioconductor-pd.rhegene.1.0.st_3.12.0_src_all.tar.gz"
)
MD5="6469add6928e663529df4df98fcdd7a8"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-pd.rhegene.1.0.st/post-link.sh
|
Shell
|
mit
| 1,466 |
#!/bin/bash
################################################################################
## ##
## Copyright (c) Dan Carpenter., 2004 ##
## ##
## This program is free software; you can redistribute it and#or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ##
## ##
################################################################################
DIR=$1
MAX=$2
while true ; do
file=$(($RANDOM%$MAX))
new_file=$((($file + 1)%$MAX))
ln -s $file $DIR/$new_file 2> /dev/null
ln $file $DIR/$new_file 2> /dev/null
done
|
chenhuacai/ltp
|
testcases/kernel/fs/racer/fs_racer_file_link.sh
|
Shell
|
gpl-2.0
| 1,740 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
source "$(dirname "$0")"/common.sh
source "$(dirname "$0")"/common_ha.sh
TEST_PROGRAM_JAR_NAME=DataStreamAllroundTestProgram.jar
TEST_PROGRAM_JAR=${END_TO_END_DIR}/flink-datastream-allround-test/target/${TEST_PROGRAM_JAR_NAME}
FLINK_LIB_DIR=${FLINK_DIR}/lib
JOB_ID="00000000000000000000000000000000"
function ha_cleanup() {
stop_watchdogs
kill_all 'StandaloneJobClusterEntryPoint'
}
on_exit ha_cleanup
function run_job() {
local PARALLELISM=$1
local BACKEND=$2
local ASYNC=$3
local INCREM=$4
local CHECKPOINT_DIR="${TEST_DATA_DIR}/checkpoints/"
${FLINK_DIR}/bin/standalone-job.sh start \
--job-classname org.apache.flink.streaming.tests.DataStreamAllroundTestProgram \
--environment.parallelism ${PARALLELISM} \
--test.semantics exactly-once \
--test.simulate_failure true \
--test.simulate_failure.num_records 200 \
--test.simulate_failure.num_checkpoints 1 \
--test.simulate_failure.max_failures 20 \
--state_backend ${BACKEND} \
--state_backend.checkpoint_directory "file://${CHECKPOINT_DIR}" \
--state_backend.file.async ${ASYNC} \
--state_backend.rocks.incremental ${INCREM} \
--sequence_generator_source.sleep_time 15 \
--sequence_generator_source.sleep_after_elements 1
}
function verify_logs_per_job() {
local JM_FAILURES=$1
local EXIT_CODE=0
# verify that we have no alerts
if ! check_logs_for_non_empty_out_files; then
echo "FAILURE: Alerts found at the general purpose job."
EXIT_CODE=1
fi
# checks that all apart from the first JM recover the failed jobgraph.
if ! verify_num_occurences_in_logs 'standalonejob' 'Found 0 checkpoints in ZooKeeper' 1; then
echo "FAILURE: A JM did not take over, but started new job."
EXIT_CODE=1
fi
if ! verify_num_occurences_in_logs 'standalonejob' 'Found [[:digit:]]\+ checkpoints in ZooKeeper' $((JM_FAILURES + 1)); then
echo "FAILURE: A JM did not take over."
EXIT_CODE=1
fi
# search the logs for JMs that log completed checkpoints
if ! verify_num_occurences_in_logs 'standalonejob' 'Completed checkpoint' $((JM_FAILURES + 1)); then
echo "FAILURE: A JM did not execute the job."
EXIT_CODE=1
fi
if [[ $EXIT_CODE != 0 ]]; then
echo "One or more tests FAILED."
exit $EXIT_CODE
fi
}
function run_ha_test() {
local PARALLELISM=$1
local BACKEND=$2
local ASYNC=$3
local INCREM=$4
local JM_KILLS=3
CLEARED=0
# add job jar to cluster classpath
cp ${TEST_PROGRAM_JAR} ${FLINK_LIB_DIR}
# start the cluster on HA mode
create_ha_config
# change the pid dir to start log files always from 0, this is important for checks in the
# jm killing loop
set_config_key "env.pid.dir" "${TEST_DATA_DIR}"
start_local_zk
echo "Running on HA mode: parallelism=${PARALLELISM}, backend=${BACKEND}, asyncSnapshots=${ASYNC}, and incremSnapshots=${INCREM}."
# submit a job in detached mode and let it run
run_job ${PARALLELISM} ${BACKEND} ${ASYNC} ${INCREM}
# divide parallelism by slots per tm with rounding up
local neededTaskmanagers=$(( (${PARALLELISM} + ${TASK_SLOTS_PER_TM_HA} - 1) / ${TASK_SLOTS_PER_TM_HA} ))
start_taskmanagers ${neededTaskmanagers}
wait_job_running ${JOB_ID}
# start the watchdog that keeps the number of JMs stable
start_ha_jm_watchdog 1 "StandaloneJobClusterEntryPoint" run_job ${PARALLELISM} ${BACKEND} ${ASYNC} ${INCREM}
# start the watchdog that keeps the number of TMs stable
start_ha_tm_watchdog ${JOB_ID} ${neededTaskmanagers}
# let the job run for a while to take some checkpoints
wait_num_of_occurence_in_logs "Completed checkpoint [1-9]* for job ${JOB_ID}" 2 "standalonejob"
for (( c=1; c<=${JM_KILLS}; c++ )); do
# kill the JM and wait for watchdog to
# create a new one which will take over
kill_single 'StandaloneJobClusterEntryPoint'
# let the job start and take some checkpoints
wait_num_of_occurence_in_logs "Completed checkpoint [1-9]* for job ${JOB_ID}" 2 "standalonejob-${c}"
done
# verify checkpoints in the logs
verify_logs_per_job ${JM_KILLS}
}
STATE_BACKEND_TYPE=${1:-file}
STATE_BACKEND_FILE_ASYNC=${2:-true}
STATE_BACKEND_ROCKS_INCREMENTAL=${3:-false}
run_ha_test 4 ${STATE_BACKEND_TYPE} ${STATE_BACKEND_FILE_ASYNC} ${STATE_BACKEND_ROCKS_INCREMENTAL}
|
shaoxuan-wang/flink
|
flink-end-to-end-tests/test-scripts/test_ha_per_job_cluster_datastream.sh
|
Shell
|
apache-2.0
| 5,453 |
#!/bin/bash
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
# Needed because if it is set, cd may print the path it changed to.
unset CDPATH
# On Mac OS, readlink -f doesn't work, so follow_links traverses the path one
# link at a time, and then cds into the link destination and find out where it
# ends up.
#
# The function is enclosed in a subshell to avoid changing the working directory
# of the caller.
function follow_links() (
cd -P "$(dirname -- "$1")"
file="$PWD/$(basename -- "$1")"
while [[ -h "$file" ]]; do
cd -P "$(dirname -- "$file")"
file="$(readlink -- "$file")"
cd -P "$(dirname -- "$file")"
file="$PWD/$(basename -- "$file")"
done
echo "$file"
)
SCRIPT_DIR=$(follow_links "$(dirname -- "${BASH_SOURCE[0]}")")
SRC_DIR="$(cd "$SCRIPT_DIR/../../.."; pwd -P)"
GN="$SRC_DIR/flutter/tools/gn"
FLUTTER_ENGINE=android_debug_unopt_x64
export ANDROID_HOME="$SRC_DIR/third_party/android_tools/sdk"
if [[ $# -eq 1 ]]; then
FLUTTER_ENGINE="$1"
fi
if [[ ! -d "$SRC_DIR/out/$FLUTTER_ENGINE" ]]; then
"$GN" --android --unoptimized --android-cpu x64 --runtime-mode debug
"$GN" --unoptimized --runtime-mode debug
fi
autoninja -C "$SRC_DIR/out/$FLUTTER_ENGINE"
autoninja -C "$SRC_DIR/out/host_debug_unopt"
"$SCRIPT_DIR/compile_android_jit.sh" "$SRC_DIR/out/host_debug_unopt" "$SRC_DIR/out/$FLUTTER_ENGINE/clang_x64"
"$SCRIPT_DIR/run_android_tests.sh" "$FLUTTER_ENGINE"
|
chinmaygarde/flutter_engine
|
testing/scenario_app/build_and_run_android_tests.sh
|
Shell
|
bsd-3-clause
| 1,531 |
#!/bin/sh
# -----------------------------------------------------------------------------
# Start Script for the CATALINA Server
#
# $Id: startup.sh 467182 2006-10-23 23:47:06Z markt $
# -----------------------------------------------------------------------------
# Better OS/400 detection: see Bugzilla 31132
os400=false
darwin=false
case "`uname`" in
CYGWIN*) cygwin=true;;
OS400*) os400=true;;
Darwin*) darwin=true;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
EXECUTABLE=catalina.sh
# Check that target executable exists
if $os400; then
# -x will Only work on the os400 if the files are:
# 1. owned by the user
# 2. owned by the PRIMARY group of the user
# this will not work if the user belongs in secondary groups
eval
else
if [ ! -x "$PRGDIR"/"$EXECUTABLE" ]; then
echo "Cannot find $PRGDIR/$EXECUTABLE"
echo "This file is needed to run this program"
exit 1
fi
fi
exec "$PRGDIR"/"$EXECUTABLE" start "$@"
#
|
feiyue/maven-framework-project
|
spring-flex-testdrive/tomcat/bin/startup.sh
|
Shell
|
mit
| 1,175 |
#!/bin/sh
# ensure that "rm -rf DIR-with-many-entries" is not O(N^2)
# Copyright (C) 2008-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ rm
very_expensive_
# Using rm -rf to remove a 400k-entry directory takes:
# - 9 seconds with the patch, on a 2-yr-old system
# - 350 seconds without the patch, on a high-end system (disk 20-30% faster)
threshold_seconds=60
# The number of entries in our test directory.
n=400000
# Choose a value that is large enough to ensure an accidentally
# regressed rm would require much longer than $threshold_seconds to remove
# the directory. With n=400k, pre-patch GNU rm would require about 350
# seconds even on a fast disk. On a relatively modern system, the
# patched version of rm requires about 10 seconds, so even if you
# choose to enable very expensive tests with a disk that is much slower,
# the test should still succeed.
# Skip unless "." is on an ext[34] file system.
# FIXME-maybe: try to find a suitable file system or allow
# the user to specify it via an envvar.
df -T -t ext3 -t ext4dev -t ext4 . \
|| skip_ 'this test runs only on an ext3 or ext4 file system'
# Skip if there are too few inodes free. Require some slack.
free_inodes=$(stat -f --format=%d .) || framework_failure_
min_free_inodes=$(expr 12 \* $n / 10)
test $min_free_inodes -lt $free_inodes \
|| skip_ "too few free inodes on '.': $free_inodes;" \
"this test requires at least $min_free_inodes"
ok=0
start=$(date +%s)
mkdir d &&
cd d &&
seq $n | xargs touch &&
test -f 1 &&
test -f $n &&
cd .. &&
ok=1
test $ok = 1 || framework_failure_
setup_duration=$(expr $(date +%s) - $start)
echo creating a $n-entry directory took $setup_duration seconds
# If set-up took longer than the default $threshold_seconds,
# use the longer set-up duration as the limit.
test $threshold_seconds -lt $setup_duration \
&& threshold_seconds=$setup_duration
start=$(date +%s)
timeout ${threshold_seconds}s rm -rf d; err=$?
duration=$(expr $(date +%s) - $start)
case $err in
124) fail=1; echo rm took longer than $threshold_seconds seconds;;
0) ;;
*) fail=1;;
esac
echo removing a $n-entry directory took $duration seconds
Exit $fail
|
duythanhphan/coreutils
|
tests/rm/ext3-perf.sh
|
Shell
|
gpl-3.0
| 2,864 |
#!/bin/sh
: ==== start ====
ipsec setup stop
umount /var/tmp; mount /var/tmp
umount /usr/local; mount /usr/local
hostname road.uml.freeswan.org
ifconfig eth0 inet 192.1.3.194
route delete -net default
route add -net default gw 192.1.3.254
netstat -rn
export TESTNAME=xauth-pluto-08
source /testing/pluto/bin/roadlocal.sh
ipsec setup start
ipsec auto --add modecfg-road--eastnet-psk
echo done
|
mcr/bluerose
|
testing/pluto/xauth-pluto-08/roadinit.sh
|
Shell
|
gpl-2.0
| 402 |
#!/bin/sh
test_description='split index mode tests'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
# We need total control of index splitting here
sane_unset GIT_TEST_SPLIT_INDEX
# Testing a hard coded SHA against an index with an extension
# that can vary from run to run is problematic so we disable
# those extensions.
sane_unset GIT_TEST_FSMONITOR
sane_unset GIT_TEST_INDEX_THREADS
# Create a file named as $1 with content read from stdin.
# Set the file's mtime to a few seconds in the past to avoid racy situations.
create_non_racy_file () {
cat >"$1" &&
test-tool chmtime =-5 "$1"
}
test_expect_success 'setup' '
test_oid_cache <<-EOF
own_v3 sha1:8299b0bcd1ac364e5f1d7768efb62fa2da79a339
own_v3 sha256:38a6d2925e3eceec33ad7b34cbff4e0086caa0daf28f31e51f5bd94b4a7af86b
base_v3 sha1:39d890139ee5356c7ef572216cebcd27aa41f9df
base_v3 sha256:c9baeadf905112bf6c17aefbd7d02267afd70ded613c30cafed2d40cb506e1ed
own_v4 sha1:432ef4b63f32193984f339431fd50ca796493569
own_v4 sha256:6738ac6319c25b694afa7bcc313deb182d1a59b68bf7a47b4296de83478c0420
base_v4 sha1:508851a7f0dfa8691e9f69c7f055865389012491
base_v4 sha256:3177d4adfdd4b6904f7e921d91d715a471c0dde7cf6a4bba574927f02b699508
EOF
'
test_expect_success 'enable split index' '
git config splitIndex.maxPercentChange 100 &&
git update-index --split-index &&
test-tool dump-split-index .git/index >actual &&
indexversion=$(test-tool index-version <.git/index) &&
# NEEDSWORK: Stop hard-coding checksums.
if test "$indexversion" = "4"
then
own=$(test_oid own_v4)
base=$(test_oid base_v4)
else
own=$(test_oid own_v3)
base=$(test_oid base_v3)
fi &&
cat >expect <<-EOF &&
own $own
base $base
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'add one file' '
create_non_racy_file one &&
git update-index --add one &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
base $base
100644 $EMPTY_BLOB 0 one
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'disable split index' '
git update-index --no-split-index &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
BASE=$(test-tool dump-split-index .git/index | sed -n "s/^own/base/p") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
not a split index
EOF
test_cmp expect actual
'
test_expect_success 'enable split index again, "one" now belongs to base index"' '
git update-index --split-index &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'modify original file, base index untouched' '
echo modified | create_non_racy_file one &&
file1_blob=$(git hash-object one) &&
git update-index one &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $file1_blob 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
q_to_tab >expect <<-EOF &&
$BASE
100644 $file1_blob 0Q
replacements: 0
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'add another file, which stays index' '
create_non_racy_file two &&
git update-index --add two &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $file1_blob 0 one
100644 $EMPTY_BLOB 0 two
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
q_to_tab >expect <<-EOF &&
$BASE
100644 $file1_blob 0Q
100644 $EMPTY_BLOB 0 two
replacements: 0
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'remove file not in base index' '
git update-index --force-remove two &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $file1_blob 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
q_to_tab >expect <<-EOF &&
$BASE
100644 $file1_blob 0Q
replacements: 0
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'remove file in base index' '
git update-index --force-remove one &&
git ls-files --stage >ls-files.actual &&
test_must_be_empty ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions: 0
EOF
test_cmp expect actual
'
test_expect_success 'add original file back' '
create_non_racy_file one &&
git update-index --add one &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
100644 $EMPTY_BLOB 0 one
replacements:
deletions: 0
EOF
test_cmp expect actual
'
test_expect_success 'add new file' '
create_non_racy_file two &&
git update-index --add two &&
git ls-files --stage >actual &&
cat >expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
100644 $EMPTY_BLOB 0 two
EOF
test_cmp expect actual
'
test_expect_success 'unify index, two files remain' '
git update-index --no-split-index &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
100644 $EMPTY_BLOB 0 two
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
not a split index
EOF
test_cmp expect actual
'
test_expect_success 'rev-parse --shared-index-path' '
test_create_repo split-index &&
(
cd split-index &&
git update-index --split-index &&
echo .git/sharedindex* >expect &&
git rev-parse --shared-index-path >actual &&
test_cmp expect actual &&
mkdir subdirectory &&
cd subdirectory &&
echo ../.git/sharedindex* >expect &&
git rev-parse --shared-index-path >actual &&
test_cmp expect actual
)
'
test_expect_success 'set core.splitIndex config variable to true' '
git config core.splitIndex true &&
create_non_racy_file three &&
git update-index --add three &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
100644 $EMPTY_BLOB 0 three
100644 $EMPTY_BLOB 0 two
EOF
test_cmp ls-files.expect ls-files.actual &&
BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'set core.splitIndex config variable to false' '
git config core.splitIndex false &&
git update-index --force-remove three &&
git ls-files --stage >ls-files.actual &&
cat >ls-files.expect <<-EOF &&
100644 $EMPTY_BLOB 0 one
100644 $EMPTY_BLOB 0 two
EOF
test_cmp ls-files.expect ls-files.actual &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
not a split index
EOF
test_cmp expect actual
'
test_expect_success 'set core.splitIndex config variable back to true' '
git config core.splitIndex true &&
create_non_racy_file three &&
git update-index --add three &&
BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual &&
create_non_racy_file four &&
git update-index --add four &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
100644 $EMPTY_BLOB 0 four
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'check behavior with splitIndex.maxPercentChange unset' '
git config --unset splitIndex.maxPercentChange &&
create_non_racy_file five &&
git update-index --add five &&
BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual &&
create_non_racy_file six &&
git update-index --add six &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
100644 $EMPTY_BLOB 0 six
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'check splitIndex.maxPercentChange set to 0' '
git config splitIndex.maxPercentChange 0 &&
create_non_racy_file seven &&
git update-index --add seven &&
BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual &&
create_non_racy_file eight &&
git update-index --add eight &&
BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<-EOF &&
$BASE
replacements:
deletions:
EOF
test_cmp expect actual
'
test_expect_success 'shared index files expire after 2 weeks by default' '
create_non_racy_file ten &&
git update-index --add ten &&
test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
just_under_2_weeks_ago=$((5-14*86400)) &&
test-tool chmtime =$just_under_2_weeks_ago .git/sharedindex.* &&
create_non_racy_file eleven &&
git update-index --add eleven &&
test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
just_over_2_weeks_ago=$((-1-14*86400)) &&
test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
create_non_racy_file twelve &&
git update-index --add twelve &&
test $(ls .git/sharedindex.* | wc -l) -le 2
'
test_expect_success 'check splitIndex.sharedIndexExpire set to 16 days' '
git config splitIndex.sharedIndexExpire "16.days.ago" &&
test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
create_non_racy_file thirteen &&
git update-index --add thirteen &&
test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
just_over_16_days_ago=$((-1-16*86400)) &&
test-tool chmtime =$just_over_16_days_ago .git/sharedindex.* &&
create_non_racy_file fourteen &&
git update-index --add fourteen &&
test $(ls .git/sharedindex.* | wc -l) -le 2
'
test_expect_success 'check splitIndex.sharedIndexExpire set to "never" and "now"' '
git config splitIndex.sharedIndexExpire never &&
just_10_years_ago=$((-365*10*86400)) &&
test-tool chmtime =$just_10_years_ago .git/sharedindex.* &&
create_non_racy_file fifteen &&
git update-index --add fifteen &&
test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
git config splitIndex.sharedIndexExpire now &&
just_1_second_ago=-1 &&
test-tool chmtime =$just_1_second_ago .git/sharedindex.* &&
create_non_racy_file sixteen &&
git update-index --add sixteen &&
test $(ls .git/sharedindex.* | wc -l) -le 2
'
test_expect_success POSIXPERM 'same mode for index & split index' '
git init same-mode &&
(
cd same-mode &&
test_commit A &&
test_modebits .git/index >index_mode &&
test_must_fail git config core.sharedRepository &&
git -c core.splitIndex=true status &&
shared=$(ls .git/sharedindex.*) &&
case "$shared" in
*" "*)
# we have more than one???
false ;;
*)
test_modebits "$shared" >split_index_mode &&
test_cmp index_mode split_index_mode ;;
esac
)
'
while read -r mode modebits
do
test_expect_success POSIXPERM "split index respects core.sharedrepository $mode" '
# Remove existing shared index files
git config core.splitIndex false &&
git update-index --force-remove one &&
rm -f .git/sharedindex.* &&
# Create one new shared index file
git config core.sharedrepository "$mode" &&
git config core.splitIndex true &&
create_non_racy_file one &&
git update-index --add one &&
echo "$modebits" >expect &&
test_modebits .git/index >actual &&
test_cmp expect actual &&
shared=$(ls .git/sharedindex.*) &&
case "$shared" in
*" "*)
# we have more than one???
false ;;
*)
test_modebits "$shared" >actual &&
test_cmp expect actual ;;
esac
'
done <<\EOF
0666 -rw-rw-rw-
0642 -rw-r---w-
EOF
test_expect_success POSIXPERM,SANITY 'graceful handling when splitting index is not allowed' '
test_create_repo ro &&
(
cd ro &&
test_commit initial &&
git update-index --split-index &&
test -f .git/sharedindex.*
) &&
cp ro/.git/index new-index &&
test_when_finished "chmod u+w ro/.git" &&
chmod u-w ro/.git &&
GIT_INDEX_FILE="$(pwd)/new-index" git -C ro update-index --split-index &&
chmod u+w ro/.git &&
rm ro/.git/sharedindex.* &&
GIT_INDEX_FILE=new-index git ls-files >actual &&
echo initial.t >expected &&
test_cmp expected actual
'
test_expect_success 'writing split index with null sha1 does not write cache tree' '
git config core.splitIndex true &&
git config splitIndex.maxPercentChange 0 &&
git commit -m "commit" &&
{
git ls-tree HEAD &&
printf "160000 commit $ZERO_OID\\tbroken\\n"
} >broken-tree &&
echo "add broken entry" >msg &&
tree=$(git mktree <broken-tree) &&
test_tick &&
commit=$(git commit-tree $tree -p HEAD <msg) &&
git update-ref HEAD "$commit" &&
GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
test_might_fail test-tool dump-cache-tree >cache-tree.out &&
test_line_count = 0 cache-tree.out
'
test_expect_success 'do not refresh null base index' '
test_create_repo merge &&
(
cd merge &&
test_commit initial &&
git checkout -b side-branch &&
test_commit extra &&
git checkout main &&
git update-index --split-index &&
test_commit more &&
# must not write a new shareindex, or we wont catch the problem
git -c splitIndex.maxPercentChange=100 merge --no-edit side-branch 2>err &&
# i.e. do not expect warnings like
# could not freshen shared index .../shareindex.00000...
test_must_be_empty err
)
'
test_done
|
Osse/git
|
t/t1700-split-index.sh
|
Shell
|
gpl-2.0
| 14,136 |
#Copyright 2009,2010 Alex Graves
#
# This file is part of RNNLIB.
#
# RNNLIB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RNNLIB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RNNLIB. If not, see <http://www.gnu.org/licenses/>.
#! /bin/bash
UTILS=/home/alex/code/neural_net_console-1.0/utils
PL=plot_errors.py
if [ $# = 4 ]
then
FILE=$1_$2_$3_$4
grep -A $4 "$2 errors" $1 | grep $3 | cut -f2 -d" " > $FILE
$PL $FILE
# rm $FILE
else
echo "usage: plot_errors.sh save_file data_set(train|test|validation) error_type(labelErrorRate,ctcMlError...) num_error_types"
fi
|
j-briggs/rnnlib
|
utils/plot_errors.sh
|
Shell
|
gpl-3.0
| 1,028 |
#! /bin/bash
set -x
set -e
date
uname -a
echo $HOSTNAME
mkdir $HOME/bin || true
CMAKE_BASE=cmake-3.3.0-Darwin-x86_64
cd ..
CMAKE_BIN_DIR=`pwd`/$CMAKE_BASE/CMake.app/Contents/bin
export PATH=$CMAKE_BIN_DIR:$HOME/bin:$PATH
export LD_LIBRARY_PATH=$HOME/lib:$LD_LIBRARY_PATH
wget http://www.cmake.org/files/v3.3/$CMAKE_BASE.tar.gz
tar xfz $CMAKE_BASE.tar.gz
which cmake
cmake --version
git clone https://github.com/floitsch/double-conversion.git
(cd double-conversion; cmake -DCMAKE_INSTALL_PREFIX=$HOME .; make -j 4 && make install)
git clone https://github.com/schuhschuh/gflags.git
(mkdir gflags/build; cd gflags/build; cmake -DCMAKE_INSTALL_PREFIX=$HOME -D GFLAGS_NAMESPACE=google -D BUILD_SHARED_LIBS=on .. && make -j 4 && make install)
svn checkout http://google-glog.googlecode.com/svn/trunk/ glog
( cd glog && ./configure --with-gflags=$HOME --prefix=$HOME && make -j 4 && make install )
git clone https://github.com/facebook/folly.git
pwd ; ls -l
cd wdt
#set +e
set +x
|
XiaosongWei/wdt
|
travis_osx.sh
|
Shell
|
bsd-3-clause
| 981 |
#!/bin/bash
# configs/ekk-lm3s9b96/otest/setenv.sh
#
# Copyright (C) 2012 Gregory Nutt. All rights reserved.
# Authors: Gregory Nutt <[email protected]>
# Jose Pablo Rojas V. <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
if [ "$_" = "$0" ] ; then
echo "You must source this script, not run it!" 1>&2
exit 1
fi
WD=`pwd`
if [ ! -x "setenv.sh" ]; then
echo "This script must be executed from the top-level NuttX build directory"
exit 1
fi
if [ -z "${PATH_ORIG}" ]; then
export PATH_ORIG="${PATH}"
fi
# This is the Cygwin path to the location where I installed the CodeSourcery
# toolchain under windows. You will also have to edit this if you install
# the CodeSourcery toolchain in any other location
# export TOOLCHAIN_BIN="/cygdrive/c/Program Files (x86)/CodeSourcery/Sourcery G++ Lite/bin"
# This is the Cygwin path to the location where I build the buildroot
# toolchain.
export TOOLCHAIN_BIN="${WD}/../misc/buildroot/build_arm_nofpu/staging_dir/bin"
# Add the path to the toolchain to the PATH varialble
export PATH="${TOOLCHAIN_BIN}:/sbin:/usr/sbin:${PATH_ORIG}"
echo "PATH : ${PATH}"
|
gcds/project_xxx
|
nuttx/configs/ekk-lm3s9b96/ostest/setenv.sh
|
Shell
|
bsd-3-clause
| 2,581 |
#/bin/sh
# takes a -n argument to disable the s3 download for faster testing
# Ensure that all your children are truly dead when you yourself are killed.
# http://www.davidpashley.com/articles/writing-robust-shell-scripts/#id2382181
# trap "kill -- -$BASHPID" INT TERM EXIT
# leave out EXIT for now
trap "kill -- -$BASHPID" INT TERM
echo "BASHPID: $BASHPID"
echo "current PID: $$"
set -o pipefail # trace ERR through pipes
set -o errtrace # trace ERR through 'time command' and other functions
set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
set -o errexit ## set -e : exit the script if any statement returns a non-true return value
# remove any test*xml or TEST*xml in the current dir
rm -f test.*xml
# This gets the h2o.jar
source ./runner_setup.sh "$@"
rm -f h2o-nodes.json
if [[ $HOSTNAME == "lg1" || $HOSTNAME == "ch-63" ]]
then
# in sm land. clean up!
# pssh -h /home/0xdiag/hosts_minus_9_22 -i 'rm -f -r /home/0xdiag/ice*'
python ../four_hour_cloud.py -v -cj pytest_config-jenkins-sm2.json &
CLOUD_IP=10.71.0.100
CLOUD_PORT=54355
else
if [[ $USER == "jenkins" ]]
then
# clean out old ice roots from 0xcust.** (assuming we're going to run as 0xcust..
# only do this if you're jenksin
echo "If we use more machines, expand this cleaning list."
echo "The possibilities should be relatively static over time"
echo "Could be problems if other threads also using that user on these machines at same time"
echo "Could make the rm pattern match a "sourcing job", not just 0xcustomer"
echo "Also: Touch all the 0xcustomer-datasets mnt points, to get autofs to mount them."
echo "Permission rights extend to the top level now, so only 0xcustomer can automount them"
echo "okay to ls the top level here...no secret info..do all the machines we might be using"
for mr in 164 180
do
ssh -i ~/.0xcustomer/0xcustomer_id_rsa [email protected].$mr \
'find /home/0xcustomer/ice* -ctime +3 | xargs rm -rf; cd /mnt/0xcustomer-datasets'
done
python ../four_hour_cloud.py -cj pytest_config-jenkins-174.json &
# make sure this matches what's in the json!
CLOUD_IP=172.16.2.174
CLOUD_PORT=54474
else
if [[ $USER == "kevin" ]]
then
python ../four_hour_cloud.py -cj pytest_config-kevin.json &
# make sure this matches what's in the json!
CLOUD_IP=127.0.0.1
CLOUD_PORT=54355
else
python ../four_hour_cloud.py &
# make sure this matches what the four_hour_cloud.py does!
CLOUD_IP=127.0.0.1
CLOUD_PORT=54321
fi
fi
fi
CLOUD_PID=$!
jobs -l
echo ""
echo "Have to wait until h2o-nodes.json is available from the cloud build. Deleted it above."
echo "spin loop here waiting for it. Since the h2o.jar copy slows each node creation"
echo "it might be 12 secs per node"
while [ ! -f ./h2o-nodes.json ]
do
sleep 5
done
ls -lt ./h2o-nodes.json
# We now have the h2o-nodes.json, that means we started the jvms
# Shouldn't need to wait for h2o cloud here..
# the test should do the normal cloud-stabilize before it does anything.
# n0.doit uses nosetests so the xml gets created on completion. (n0.doit is a single test thing)
# A little '|| true' hack to make sure we don't fail out if this subtest fails
# test_c1_rel has 1 subtest
# This could be a runner, that loops thru a list of tests.
echo "If it exists, pytest_config-<username>.json in this dir will be used"
echo "i.e. pytest_config-jenkins.json"
echo "Used to run as 0xcust.., with multi-node targets (possibly)"
#******************************************************
mySetup() {
# we setup .Renviron and delete the old local library if it exists
# then make the R_LIB_USERS dir
# creates /tmp/libPaths.$USER.cmd
rm -f /tmp/libPaths.$USER.cmd
./Rsetup.sh
cmd="R -f /tmp/libPaths.$USER.cmd --args $CLOUD_IP:$CLOUD_PORT"
echo "Running this cmd:"
echo $cmd
# it's gotten long now because of all the installs
python ./sh2junit.py -name 'libPaths' -timeout 1800 -- $cmd
}
myR() {
# we change dir, but return it to what it was, on the return
pushd .
# these are hardwired in the config json used above for the cloud
# CLOUD_IP=
# CLOUD_PORT=
# get_s3_jar.sh now downloads it. We need to tell anqi's wrapper where to find it.
# with an environment variable
if [[ -z $2 ]];
then
timeout=30 # default to 30
else
timeout=$2
fi
which R
R --version
H2O_R_HOME=../../R
H2O_PYTHON_HOME=../../py
# first test will cause an install
# this is where we downloaded to.
# notice no version number
# ../../h2o-1.6.0.1/R/h2oWrapper_1.0.tar.gz
echo "FIX! we don't need H2OWrapperDir stuff any more???"
# export H2OWrapperDir="$PWD/../../h2o-downloaded/R"
# echo "H2OWrapperDir should be $H2OWrapperDir"
# ls $H2OWrapperDir/h2o*.tar.gz
# we want $1 used for -name below, to not have .R suffix
# test paths are always relative to tests
testDir=$(dirname $1)
shdir=$H2O_R_HOME/tests/$testDir
testName=$(basename $1)
rScript=$testName.R
echo $rScript
echo "Will run this cmd in $shdir"
cmd="R -f $rScript --args $CLOUD_IP:$CLOUD_PORT"
echo $cmd
# don't fail on errors, since we want to check the logs in case that has more info!
set +e
# executes the $cmd in the target dir, but the logs stay in sandbox here
# -dir is optional
python ./sh2junit.py -shdir $shdir -name $testName -timeout $timeout -- $cmd || true
set -e
popd
}
H2O_R_HOME=../../R
echo "Okay to run h2oWrapper.R every time for now"
#***********************************************************************
# This is the list of tests
#***********************************************************************
mySetup
# can be slow if it had to reinstall all packages?
# export H2OWrapperDir="$PWD/../../h2o-downloaded/R"
# FIX! if we assume we're always running with a local build, we shouldn't load from here
# echo "Showing the H2OWrapperDir env. variable. Is it .../../h2o-downloaded/R?"
# printenv | grep H2OWrapperDir
#autoGen RUnits
myR ../../R/tests/Utils/runnerSetupPackage 300
# myR ../../R/tests/testdir_munging/histograms/runit_histograms 1200
# these are used to run a single test (from the command line -d -t)
if [[ $TEST == "" ]] || [[ $TESTDIR == "" ]]
then
# have to ignore the Rsandbox dirs that got created in the tests directory
for test in $(find ../../R/tests/ | egrep -v 'Utils|Rsandbox|/results/' | grep 'runit.*\.[rR]' | sed -e 's!\.[rR]$!!');
do
myR $test 300
done
else
myR $TESTDIR/$TEST 300
fi
# airlines is failing summary. put it last
#myR $single/runit_libR_airlines 120
# If this one fals, fail this script so the bash dies
# We don't want to hang waiting for the cloud to terminate.
# produces xml too!
../testdir_single_jvm/n0.doit shutdown/test_shutdown.py
#***********************************************************************
# End of list of tests
#***********************************************************************
if ps -p $CLOUD_PID > /dev/null
then
echo "$CLOUD_PID is still running after shutdown. Will kill"
kill $CLOUD_PID
fi
ps aux | grep four_hour_cloud
jobs -l
echo ""
echo "You can stop this jenkins job now if you want. It's all done"
|
h2oai/h2o
|
py/testdir_release/runner_Runit.sh
|
Shell
|
apache-2.0
| 7,519 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec ${TEST_SRCDIR-$0.runfiles}/src/tools/android/java/com/google/devtools/build/android/AarGeneratorAction "$@"
|
mzhaom/trunk
|
tools/android/aar_generator.sh
|
Shell
|
apache-2.0
| 720 |
#/bin/sh
#
# See: http://boinc.berkeley.edu/trac/wiki/AndroidBuildClient#
#
# Script to compile everything BOINC needs for Android
./build_boinc_arm.sh
./build_boinc_x86.sh
./build_boinc_mips.sh
|
AltroCoin/altrocoin
|
android/build_boinc_all.sh
|
Shell
|
gpl-3.0
| 198 |
#!/usr/bin/env bash
set -e
CONF_DIR="/etc/cjdns"
if [ ! -f "$CONF_DIR/cjdroute.conf" ]; then
echo "generate $CONF_DIR/cjdroute.conf"
conf=$(cjdroute --genconf | cjdroute --cleanconf)
echo $conf > "$CONF_DIR/cjdroute.conf"
fi
cjdroute --nobg < "$CONF_DIR/cjdroute.conf"
exit $?
|
madafoo/cjdns
|
contrib/docker/entrypoint.sh
|
Shell
|
gpl-3.0
| 287 |
#! /bin/sh
rm -rf data
(time python3 -u ../lstm.py param.yaml) 2>&1 | tee log.txt
|
nayutaya/tensorflow-rnn-sin
|
ex2/lstm_seq70/run.sh
|
Shell
|
mit
| 82 |
source common.sh
clearStore
max=500
reference=$NIX_STORE_DIR/abcdef
touch $reference
(echo $reference && echo && echo 0) | nix-store --register-validity
echo "making registration..."
set +x
for ((n = 0; n < $max; n++)); do
storePath=$NIX_STORE_DIR/$n
echo -n > $storePath
ref2=$NIX_STORE_DIR/$((n+1))
if test $((n+1)) = $max; then
ref2=$reference
fi
echo $storePath; echo; echo 2; echo $reference; echo $ref2
done > $TEST_ROOT/reg_info
set -x
echo "registering..."
nix-store --register-validity < $TEST_ROOT/reg_info
echo "collecting garbage..."
ln -sfn $reference "$NIX_STATE_DIR"/gcroots/ref
nix-store --gc
if [ -n "$(type -p sqlite3)" -a "$(sqlite3 ./test-tmp/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then
echo "referrers not cleaned up"
exit 1
fi
|
acowley/nix
|
tests/referrers.sh
|
Shell
|
lgpl-2.1
| 812 |
#!/bin/bash
dir="$(pwd)"
target="$(basename $dir)"
#target=$1
folders="$(ls -d ../../test/output/runs/$target*)"
for i in $folders
do
basename $i >> folderlist.txt
done
names="$(cat folderlist.txt)"
for i in $names
do
./copy_data_date.sh $i
done
for i in $names
do
python vCOMhistogramMass.py $i
done
./create_png.sh
gnuplot -e "\
files=system('ls *.gnuplot');
do for [ i in files ] {
load i
}"
./create_plot.sh
|
homeslike/OpticalTweezer
|
scripts/test_p0.5_at0.1/init.sh
|
Shell
|
mit
| 451 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
function azure_call {
local -a params=()
local param
# the '... in "$@"' is implicit on a for, so doesn't need to be stated.
for param; do
params+=("${param}")
done
local rc=0
local stderr
local count=0
while [[ count -lt 10 ]]; do
stderr=$(azure "${params[@]}" 2>&1 >&3) && break
rc=$?
if [[ "${stderr}" != *"getaddrinfo ENOTFOUND"* ]]; then
break
fi
count=$(($count + 1))
done 3>&1
if [[ "${rc}" -ne 0 ]]; then
echo "${stderr}" >&2
return "${rc}"
fi
}
function json_val () {
python -c 'import json,sys;obj=json.load(sys.stdin);print obj'$1'';
}
# Verify prereqs
function verify-prereqs {
if [[ -z "$(which azure)" ]]; then
echo "Couldn't find azure in PATH"
echo " please install with 'npm install azure-cli'"
exit 1
fi
if [[ -z "$(azure_call account list | grep true)" ]]; then
echo "Default azure account not set"
echo " please set with 'azure account set'"
exit 1
fi
account=$(azure_call account list | grep true)
if which md5 > /dev/null 2>&1; then
AZ_HSH=$(md5 -q -s "$account")
else
AZ_HSH=$(echo -n "$account" | md5sum)
fi
AZ_HSH=${AZ_HSH:0:7}
AZ_STG=kube$AZ_HSH
echo "==> AZ_STG: $AZ_STG"
AZ_CS="$AZ_CS_PREFIX-$AZ_HSH"
echo "==> AZ_CS: $AZ_CS"
CONTAINER=kube-$TAG
echo "==> CONTAINER: $CONTAINER"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
}
# Take the local tar files and upload them to Azure Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SALT_TAR_URL
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SALT_TAR_URL=
echo "==> SERVER_BINARY_TAR: $SERVER_BINARY_TAR"
echo "==> SALT_TAR: $SALT_TAR"
echo "+++ Staging server tars to Azure Storage: $AZ_STG"
local server_binary_url="${SERVER_BINARY_TAR##*/}"
local salt_url="${SALT_TAR##*/}"
SERVER_BINARY_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$server_binary_url"
SALT_TAR_URL="https://${AZ_STG}.blob.core.windows.net/$CONTAINER/$salt_url"
echo "==> SERVER_BINARY_TAR_URL: $SERVER_BINARY_TAR_URL"
echo "==> SALT_TAR_URL: $SALT_TAR_URL"
echo "--> Checking storage exists..."
if [[ -z "$(azure_call storage account show $AZ_STG 2>/dev/null | \
grep data)" ]]; then
echo "--> Creating storage..."
azure_call storage account create -l "$AZ_LOCATION" $AZ_STG --type LRS
fi
echo "--> Getting storage key..."
stg_key=$(azure_call storage account keys list $AZ_STG --json | \
json_val '["primaryKey"]')
echo "--> Checking storage container exists..."
if [[ -z "$(azure_call storage container show -a $AZ_STG -k "$stg_key" \
$CONTAINER 2>/dev/null | grep data)" ]]; then
echo "--> Creating storage container..."
azure_call storage container create \
-a $AZ_STG \
-k "$stg_key" \
-p Blob \
$CONTAINER
fi
echo "--> Checking server binary exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $server_binary_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting server binary in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$server_binary_url
fi
echo "--> Uploading server binary to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SERVER_BINARY_TAR \
$CONTAINER \
$server_binary_url
echo "--> Checking salt data exists in the container..."
if [[ -n "$(azure_call storage blob show -a $AZ_STG -k "$stg_key" \
$CONTAINER $salt_url 2>/dev/null | grep data)" ]]; then
echo "--> Deleting salt data in the container..."
azure_call storage blob delete \
-a $AZ_STG \
-k "$stg_key" \
$CONTAINER \
$salt_url
fi
echo "--> Uploading salt data to the container..."
azure_call storage blob upload \
-a $AZ_STG \
-k "$stg_key" \
$SALT_TAR \
$CONTAINER \
$salt_url
}
# Detect the information about the minions
#
# Assumed vars:
# MINION_NAMES
# ZONE
# Vars set:
#
function detect-minions () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs
fi
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
MINION_NAMES[$i]=$(ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} $AZ_CS.cloudapp.net hostname -f)
done
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
if [[ -z "$AZ_CS" ]]; then
verify-prereqs
fi
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_IP="${AZ_CS}.cloudapp.net"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig current-context if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
# Make sure we have the tar files staged on Azure Storage
find-release-tars
upload-server-tars
ensure-temp-dir
get-password
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# Generate openvpn certs
echo "--> Generating openvpn certs"
echo 01 > ${KUBE_TEMP}/ca.srl
openssl genrsa -out ${KUBE_TEMP}/ca.key
openssl req -new -x509 -days 1095 \
-key ${KUBE_TEMP}/ca.key \
-out ${KUBE_TEMP}/ca.crt \
-subj "/CN=openvpn-ca"
openssl genrsa -out ${KUBE_TEMP}/server.key
openssl req -new \
-key ${KUBE_TEMP}/server.key \
-out ${KUBE_TEMP}/server.csr \
-subj "/CN=server"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/server.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/server.crt
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
openssl genrsa -out ${KUBE_TEMP}/${MINION_NAMES[$i]}.key
openssl req -new \
-key ${KUBE_TEMP}/${MINION_NAMES[$i]}.key \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-subj "/CN=${MINION_NAMES[$i]}"
openssl x509 -req -days 1095 \
-in ${KUBE_TEMP}/${MINION_NAMES[$i]}.csr \
-CA ${KUBE_TEMP}/ca.crt \
-CAkey ${KUBE_TEMP}/ca.key \
-CAserial ${KUBE_TEMP}/ca.srl \
-out ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt
done
# Build up start up script for master
echo "--> Building up start up script for master"
(
echo "#!/bin/bash"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "SERVER_CRT=\"$(cat ${KUBE_TEMP}/server.crt)\""
echo "SERVER_KEY=\"$(cat ${KUBE_TEMP}/server.key)\""
echo "mkdir -p /var/cache/kubernetes-install"
echo "cd /var/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
if [[ ! -f $AZ_SSH_KEY ]]; then
ssh-keygen -f $AZ_SSH_KEY -N ''
fi
if [[ ! -f $AZ_SSH_CERT ]]; then
openssl req -new -x509 -days 1095 -key $AZ_SSH_KEY -out $AZ_SSH_CERT \
-subj "/CN=azure-ssh-key"
fi
if [[ -z "$(azure_call network vnet show "$AZ_VNET" 2>/dev/null | grep data)" ]]; then
echo error create vnet $AZ_VNET with subnet $AZ_SUBNET
exit 1
fi
echo "--> Starting VM"
azure_call vm create \
-w "$AZ_VNET" \
-n $MASTER_NAME \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e 22000 -P \
-d ${KUBE_TEMP}/master-start.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
ssh_ports=($(eval echo "2200{1..$NUM_MINIONS}"))
#Build up start up script for minions
echo "--> Building up start up script for minions"
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
(
echo "#!/bin/bash"
echo "MASTER_NAME='${MASTER_NAME}'"
echo "CA_CRT=\"$(cat ${KUBE_TEMP}/ca.crt)\""
echo "CLIENT_CRT=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.crt)\""
echo "CLIENT_KEY=\"$(cat ${KUBE_TEMP}/${MINION_NAMES[$i]}.key)\""
echo "MINION_IP_RANGE='${MINION_IP_RANGES[$i]}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/salt-minion.sh"
) > "${KUBE_TEMP}/minion-start-${i}.sh"
echo "--> Starting VM"
azure_call vm create \
-c -w "$AZ_VNET" \
-n ${MINION_NAMES[$i]} \
-l "$AZ_LOCATION" \
-t $AZ_SSH_CERT \
-e ${ssh_ports[$i]} -P \
-d ${KUBE_TEMP}/minion-start-${i}.sh \
-b $AZ_SUBNET \
$AZ_CS $AZ_IMAGE $USER
done
echo "--> Creating endpoint"
azure_call vm endpoint create $MASTER_NAME 443
detect-master > /dev/null
echo "==> KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
until curl --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" --max-time 5 \
--fail --output /dev/null --silent "https://${KUBE_MASTER_IP}/healthz"; do
printf "."
sleep 2
done
printf "\n"
echo "Kubernetes cluster created."
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="azure_${INSTANCE_PREFIX}"
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
# config file. Distribute the same way the htpasswd is done.
(umask 077
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
create-kubeconfig
)
echo "Sanity checking cluster..."
echo
echo " This will continually check the minions to ensure docker is"
echo " installed. This is usually a good indicator that salt has"
echo " successfully provisioned. This might loop forever if there was"
echo " some uncaught error during start up."
echo
# Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
printf "."
sleep 2
done
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ${KUBECONFIG}."
echo
}
# Delete a kubernetes cluster
function kube-down {
echo "Bringing down cluster"
set +e
azure_call vm delete $MASTER_NAME -b -q
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
azure_call vm delete ${MINION_NAMES[$i]} -b -q
done
wait
}
# Update a kubernetes cluster with latest source
#function kube-push {
# detect-project
# detect-master
# Make sure we have the tar files staged on Azure Storage
# find-release-tars
# upload-server-tars
# (
# echo "#! /bin/bash"
# echo "mkdir -p /var/cache/kubernetes-install"
# echo "cd /var/cache/kubernetes-install"
# echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
# echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
# grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/download-release.sh"
# echo "echo Executing configuration"
# echo "sudo salt '*' mine.update"
# echo "sudo salt --force-color '*' state.highstate"
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
# get-password
# echo
# echo "Kubernetes cluster is running. The master is running at:"
# echo
# echo " https://${KUBE_MASTER_IP}"
# echo
# echo "The user name and password to use is located in ~/.kubernetes_auth."
# echo
#}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
ssh --ssh_arg "-o LogLevel=quiet" "${node}" "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-proxy on the master ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart"
}
|
hunt/kubernetes
|
cluster/azure/util.sh
|
Shell
|
apache-2.0
| 17,056 |
#!/bin/sh
#
# Show only interfaces that have an IPv4 address
#
# [email protected]
#
ip addr | sed -nre '{/^[0-9]+:/h;/inet ([0-9]+\.){3}[0-9]+/{x;p;x;p}}'
|
VIRL-Open/virl-utils
|
ipv4.sh
|
Shell
|
isc
| 161 |
#!/usr/bin/bash
export NGI_MODULES_DBMS=db2
export NGI_MODULES_APP=was
export NGI_PKG_HOME=/d/elementar/workspace/de.bit.elementar.vertrag.system/pkg/target/classes
source $NGI_HOME/bin/ngi.sh
echo -ne "\e]0;elementar pkg\a"
cd /d/elementar/workspace/de.bit.elementar.vertrag.system/pkg
zsh
|
jrrsmaug/dotfiles
|
env/.env/elementar-pkg.sh
|
Shell
|
mit
| 293 |
#!/bin/bash
x86_64-w64-mingw32-g++ -fdiagnostics-color -std=c++11 ta_sdl_platform.cpp ta_sdl_game.cpp Logging.cpp -mwindows -mconsole -lmingw32 -lSDL2main -lglew32 -lglu32 -lopengl32 -lSDL2 -lpng -lz -ggdb -D__CSGL_DEBUG__ -o ta_sdl_64.exe
|
Christof-Sigel/TA_SDL
|
build.w64.sh
|
Shell
|
mit
| 242 |
#!/bin/bash
. /etc/profile.d/modules.sh
# Module stuff
module load netcdf
module load szip
module load sge
module load openmpi
module add ncl
module add nco
module switch ncl/opendap ncl/nodap
#
# Active comments for SGE
#
#$ -S /bin/bash
#$ -N geogrid
#$ -v MPI_HOME
#$ -v LD_LIBRARY_PATH
#$ -cwd
#$ -q all.q
#$ -pe ompi 1
#$ -j yes
CMD="$MPI_HOME/bin/mpirun geogrid.exe"
echo $CMD
$CMD
|
samwisehawkins/wrftools
|
scripts/geogrid/geogrid.sh
|
Shell
|
mit
| 400 |
run_mod.sh 1.0
run_mod.sh 1.1
run_mod.sh 1.2
|
NMFS-toolbox/AMAK
|
examples/bogo/runall.sh
|
Shell
|
mit
| 45 |
#!/bin/zsh
echo 'GROK: run_topic_installs'
###############################################################################
# DEFINE INSTALL FUNCTIONS
#
log_header() {
echo ''
echo '==============================================================================='
echo 'INSTALL TOPIC:' $*
echo '==============================================================================='
}
###############################################################################
# RUN TOPIC INSTALL SCRIPTS
#
# Run all `install.sh` files in `\topics`.
#
# find the installers and run them iteratively
for installer in $(find $GROK_TOPICS -name install.sh); do
log_header $(basename ${installer/\/install.sh});
source $installer;
done
|
levithomason/grok
|
lib/run_topic_installs.zsh
|
Shell
|
mit
| 728 |
#!/usr/bin/env ruby
module Support
extend self
@@repo_path = "~/vagrant-lamp"
@@repo_url = "https://github.com/gocodeup/Codeup-Vagrant-Setup/archive/master.zip"
@@steps = ["start", "xcode", "homebrew", "vagrant_lamp", "git", "sublime", "final"]
def steps
@@steps
end
def repo_path
@@repo_path
end
def repo_url
@@repo_url
end
def git_download(repo_url, local_path)
system "curl -L --progress-bar -o /tmp/vagrant_lamp.zip " + repo_url
system "unzip /tmp/vagrant_lamp.zip -d /tmp"
system "mv /tmp/Codeup-Vagrant-Setup-master " + repo_path
end
def subl_pkg_install(package_path)
system "curl -L --progress-bar -o \"#{package_path}/Package Control.sublime-package\" https://sublime.wbond.net/Package%20Control.sublime-package"
end
def brew_install(package, *options)
output = `brew list #{package}`
return unless output.empty?
system "brew install #{package} #{options.join ' '}"
end
def brew_cask_install(package, *options)
output = `brew cask info #{package}`
return unless output.include? 'Not installed'
system "brew cask install #{package} #{options.join ' '}"
end
def app_path(name)
path = "/Applications/#{name}.app"
["~#{path}", path].each do |full_path|
return full_path if File.directory? full_path
end
return nil
end
def app?(name)
!self.app_path(name).nil?
end
def xcode?
`xcode-select --print-path 2>&1`
$?.success?
end
end
module Steps
extend self
def heading(description)
description = "-- #{description} "
description = description.ljust(80, '-')
puts
puts "\e[32m#{description}\e[0m"
end
def block(description)
line = ''
description.split(/ /).each do |word|
if line.length + word.length > 76
puts " #{line}"
line = ''
end
line += "#{word} "
end
puts " #{line}"
puts "\n Press 'Return' to continue."
gets
end
def do_step(name)
case name
when "start"
self.heading "Welcome!"
when "xcode"
self.heading "Setting up Xcode Commandline Tools"
when "homebrew"
self.heading "Setting up Homebrew"
when "vagrant"
self.heading "Setting up Vagrant Box"
when "vagrant_lamp"
self.heading "Checking Out Vagrant LAMP Repository"
when "git"
self.heading "Configuring SSH Keys for Git"
when "final"
self.heading "Final Configuration Steps"
when "sublime"
self.heading "Setting up the Sublime Text editor"
else
raise "Unknown step #{name}"
end
self.send name
end
def start
description = "This script will go through and make sure you have all the tools you need to get started as a Codeup student. "
description+= "At several points through this process, you may be asked for a password; this is normal. "
description+= "Enter the password you use to log in to your computer or otherwise install software normally."
self.block description
end
def xcode
if Support.xcode?
self.block "Xcode commandline tool are already installed, moving on."
else
description = "We need to install some commandline tools for Xcode. When you press 'Return', a dialog will pop up "
description+= "with several options. Click the 'Install' button and wait. Once the process completes, come back here "
description+= "and we will proceed with the next step."
self.block description
system "xcode-select --install"
while !Support.xcode?
sleep 1
end
end
end
def homebrew
`which brew`
if $?.success?
description = "Homebrew is already installed. We will check to make sure our other utilities--including Ansible, Vagrant, "
description+= "and VirutalBox--are also set up."
self.block description
else
description = "We will now install a tool called 'Homebrew'. This is a package manager we will use to install several "
description+= "other utilities we will be using in the course, including Ansible, Vagrant, and VirtualBox. "
description+= "You will probably be asked for your password a couple of times through this process; "
description+= "when you type it in, your password will not be displayed on the screen. This is normal."
self.block description
system 'ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"'
end
# Install brew cask
system('brew tap | grep caskroom/cask > /dev/null') || Support.brew_install('caskroom/cask/brew-cask')
# Install ansible
Support.brew_install 'ansible'
# Install Virtual Box
Support.brew_cask_install "virtualbox" unless Support.app? "VirtualBox"
# Install Vagrant
Support.brew_cask_install "vagrant"
end
def vagrant_lamp
full_repo_path = File.expand_path Support.repo_path
if File.directory?(full_repo_path)
self.block "Looks like our project directory has already been checked out. On to the next step."
else
description = "We will now use Git to download our project directory. This project will set up your Vagrant "
description+= "environment. All of your development in the class will be done inside this Vagrant environment."
self.block description
Support.git_download(Support.repo_url, full_repo_path)
end
system "sudo /usr/bin/easy_install passlib"
puts # add an extra line after the output
description = "We're going to start up the vagrant box with the command 'vagrant up'. If the box hasn't already been downloaded "
description+= "this will grab it and configure the internal settings for it. This could take some considerable time so please "
description+= "be patient. Otherwise, it will simply boot up the box and make sure everything is running."
self.block description
system "cd #{full_repo_path} && vagrant up"
end
def git
key_path = File.expand_path "~/.ssh/id_rsa"
unless File.exists?(key_path) && File.exists?("#{key_path}.pub")
description = "We're now going to generate an SSH public/private key pair. This key is like a fingerprint for you "
description+= "on your laptop. We'll use this key for connecting into GitHub without having to enter a password, and "
description+= "when you ultimately deploy your website to a third party server."
self.block description
description = "We will be putting a comment in the SSH key pair as well. Comments can be used to keep track of different "
description+= "keys on different servers. The comment will be formatted as [your name]@codeup."
self.block description
name = ''
while name.empty?
print " Please type in your name and press 'Return'. "
name = gets.chomp
end
system "ssh-keygen -trsa -b2048 -C '#{name}@codeup' -f #{key_path}"
end
system "pbcopy < #{key_path}.pub"
puts " The following is your new SSH key:\n"
puts IO.read(key_path + ".pub")
puts
description = "We've already copied it to the clipboard for you. Now, we are going to take you to the GitHub website "
description+= "where you will add it as one of your keys by clicking the \"Add SSH key\" button and pasting "
description+= "the contents in there."
self.block description
system "open https://github.com/settings/ssh"
self.block "We'll continue once you're done."
end
def sublime
app_path = Support.app_path("Sublime Text") || Support.app_path("Sublime Text 2")
if app_path.nil?
self.block "Looks like Sublime Text hasn't been installed yet. You'll need to take care of that before class starts."
return
end
`which subl`
system "ln -s \"#{app_path}/Contents/SharedSupport/bin/subl\" /usr/local/bin/subl" unless $?.success?
system "git config --global core.editor \"subl -n -w\""
description = "We're going to install the Sublime Text Package Manager. This is a plugin for Sublime that makes "
description+= "it incredibly easy to install other plugins and add functionality to Sublime."
self.block description
support_dir = app_path[/Sublime Text 2/] || "Sublime Text 3"
package_dir = File.expand_path "~/Library/Application Support/#{support_dir}/Installed Packages"
system "mkdir -p \"#{package_dir}\""
Support.subl_pkg_install package_dir
end
def final
if IO.readlines("/etc/hosts").grep(/192\.168\.77\.77\s+codeup\.dev/).empty?
description = "We need to add an entry to your hosts file so we can easily connect to sites in your Vagrant environment. "
description+= "The hosts file is a shortcut for DNS lookup. We are going to put the domain name 'codeup.dev' in the "
description+= "hosts file and point it into your Vagrant environment, allowing you to connect into it without "
description+= "having to memorize IP addresses or ports. This will require you to again put in your password."
self.block description
system "sudo sh -c \"echo '\n192.168.77.77\tcodeup.dev' >> /etc/hosts\""
end
description = "Now that everything has been configured, we are going to load the codeup.dev site. "
description+= "This is the site landing page running in YOUR vagrant box inside YOUR OWN computer! "
description+= "You should see the Codeup logo as well as some information about PHP. Don't worry too "
description+= "much about what it says for now, we just want to verify that everything is running correctly."
self.block description
system "open http://codeup.dev"
description = "Ok! We've gotten everything setup and you should be ready to go! Thanks for taking the time to "
description+= "get your laptop configured and good luck in the class."
self.block description
puts " _____ _____ _ _ "
puts " | __ \\ / __ \\ | | | |"
puts " | | \\/ ___ | / \\/ ___ __| | ___ _ _ _ __ | |"
puts " | | __ / _ \\ | | / _ \\ / _` |/ _ \\ | | | '_ \\| |"
puts " | |_\\ \\ (_) | | \\__/\\ (_) | (_| | __/ |_| | |_) |_|"
puts " \\____/\\___/ \\____/\\___/ \\__,_|\\___|\\__,_| .__/(_)"
puts " | | "
puts " |_| "
end
end
begin
Support.steps.each do |step|
Steps.do_step step
end
rescue => e
puts "Oh no! Looks like something has gone wrong in the process."
puts "Please copy the contents of this window and paste them into an"
puts "eMail to <[email protected]>."
puts "We're sorry about the inconvenience; we'll get the error resolved as quickly"
puts "as we can and let you know when you may re-run the setup process."
puts "Error: " + e.message
puts e.backtrace.join "\n"
end
|
bbatsche/LAMP-Setup-Script
|
lamp-setup.command
|
Shell
|
mit
| 10,941 |
password=`date +%s | shasum -a 256 | base64 | head -c 32`
master_password=`mvn --encrypt-master-password $password | grep -o '{.*}'`
echo "<settingsSecurity>\n<master>${master_password}</master>\n</settingsSecurity>" > ~/.m2/settings-security.xml
|
FortitudeSolutions/puppet-maven
|
files/create_settings_security.sh
|
Shell
|
mit
| 250 |
#!/bin/zsh
export SMPL_PATH="$(dirname $(readlink -f $0))"
export SMPL_REAL_ROOT="$SMPL_PATH/.root"
export SMPL_SRC="$SMPL_PATH/.src"
export SMPL_ROOT="/tmp/.smpl.$(uuidgen -t)-$(uuidgen -r)"
continue_stage=n
if [ -f "$SMPL_PATH/.continue_stage" ]
then continue_stage=$(cat "$SMPL_PATH/.continue_stage")
fi
if [ -f "$SMPL_PATH/.continue_root" ]
then SMPL_ROOT=$(cat "$SMPL_PATH/.continue_root")
fi
case $continue_stage in
n)
rm -f "$SMPL_PATH/.continue_stage"
rm -rf "$SMPL_ROOT" "$SMPL_SRC" "$SMPL_REAL_ROOT"
mkdir -p "$SMPL_REAL_ROOT" "$SMPL_SRC"
ln -s "$SMPL_REAL_ROOT" "$SMPL_ROOT"
echo "$SMPL_ROOT" > "$SMPL_PATH/.continue_root"
;&
luajit)
echo "luajit" > "$SMPL_PATH/.continue_stage"
cd $SMPL_SRC
git clone http://luajit.org/git/luajit-2.0.git luajit || exit
cd luajit
git checkout v2.1
git pull
make amalg PREFIX=$SMPL_ROOT CPATH=$SMPL_ROOT/include LIBRARY_PATH=$SMPL_ROOT/lib && \
make install PREFIX=$SMPL_ROOT || exit
ln -sf luajit-2.1.0-alpha $SMPL_ROOT/bin/luajit
;&
luarocks)
echo "luarocks" > "$SMPL_PATH/.continue_stage"
cd $SMPL_SRC
git clone git://github.com/keplerproject/luarocks.git || exit
cd luarocks
./configure --prefix=$SMPL_ROOT \
--lua-version=5.1 \
--lua-suffix=jit \
--with-lua=$SMPL_ROOT \
--with-lua-include=$SMPL_ROOT/include/luajit-2.1 \
--with-lua-lib=$SMPL_ROOT/lib/lua/5.1 \
--force-config && \
make build && make install || exit
;&
moonscript)
echo "moonscript" > "$SMPL_PATH/.continue_stage"
$SMPL_ROOT/bin/luarocks install moonscript
;&
luafilesystem)
echo "luafilesystem" > "$SMPL_PATH/.continue_stage"
$SMPL_ROOT/bin/luarocks install luafilesystem
;&
lgi)
echo "lgi" > "$SMPL_PATH/.continue_stage"
$SMPL_ROOT/bin/luarocks install lgi
;&
wrappers)
echo "wrappers" > "$SMPL_PATH/.continue_stage"
# wrappers
cat > $SMPL_PATH/.run <<END
#!/bin/zsh
export SMPL_PATH="\$(dirname "\$(readlink -f "\$0")")"
export SMPL_REAL_ROOT="\$SMPL_PATH/.root"
export SMPL_ROOT="$SMPL_ROOT"
[ -e "\$SMPL_ROOT" ] || ln -s "\$SMPL_PATH/.root" \$SMPL_ROOT
export PATH="\$SMPL_ROOT/bin:\$PATH"
export LUA_PATH="./custom_?.lua;\$SMPL_PATH/custom_?.lua;./?.lua;./?/init.lua;\$SMPL_PATH/src/?/init.lua;\$SMPL_PATH/src/?.lua;\$SMPL_PATH/?.lua;\$LUA_PATH;\$SMPL_ROOT/lualib/?.lua;\$SMPL_ROOT/share/luajit-2.1.0-alpha/?.lua;\$SMPL_ROOT/share/lua/5.1/?.lua;\$SMPL_ROOT/share/lua/5.1/?/init.lua"
export LUA_CPATH="./custom_?.so;\$SMPL_PATH/custom_?.so;./?.so;./?/init.so;\$SMPL_PATH/src/?/init.so;\$SMPL_PATH/src/?.so;\$SMPL_PATH/?.so;\$LUA_CPATH;\$SMPL_ROOT/lualib/?.so;\$SMPL_ROOT/share/luajit-2.1.0-alpha/?.so;\$SMPL_ROOT/share/lua/5.1/?.so;\$SMPL_ROOT/share/lua/5.1/?/init.so"
export MOON_PATH="./custom_?.moon;\$SMPL_PATH/custom_?.moon;./?.moon;./?/init.moon;\$SMPL_PATH/src/?/init.moon;\$SMPL_PATH/src/?.moon;\$SMPL_PATH/?.moon;\$MOON_PATH;\$SMPL_ROOT/lualib/?.moon;\$SMPL_ROOT/share/luajit-2.1.0-alpha/?.moon;\$SMPL_ROOT/share/lua/5.1/?.moon;\$SMPL_ROOT/share/lua/5.1/?/init.moon"
export LD_LIBRARY_PATH="\$SMPL_ROOT/lib:\$LD_LIBRARY_PATH"
fn=\$(basename \$0)
if [ "\$fn" = ".run" ]
then exec "\$@"
else
exec \$fn "\$@"
fi
END
chmod a+rx $SMPL_PATH/.run
ln -sf .run $SMPL_PATH/moon
;&
esac
# cleanup
rm -rf "$SMPL_SRC"
rm -f "$SMPL_ROOT" "$SMPL_PATH/.continue_stage" "$SMPL_PATH/.continue_root"
|
nonchip/smartplayer
|
setup.zsh
|
Shell
|
mit
| 3,489 |
cd scripts
./dotnet-build.sh
|
domino46/Fibon
|
travis-build.sh
|
Shell
|
mit
| 28 |
#!/bin/bash
role=`id -u`
if test $role -ne 0
then
echo "Operation not permitted"
exit 1
fi
curl https://raw.githubusercontent.com/WALL-E/static/master/setup/redhat/install_python36|bash
pip3.6 install -r requirements.txt
|
hotfix-project/hotfix-api
|
install.sh
|
Shell
|
mit
| 232 |
#!/bin/bash -e
if test -z "$CMD"; then
if test -n "$1"; then
CMD="$1"
else
CMD="$(hostname)"
fi
fi
SVD="/usr/bin/supervisord"
SVD_CONF="/etc/supervisor/conf.d/$CMD.conf"
SVD_OPTS="-nc $SVD_CONF"
INIT_SCRIPT="/etc/provisioning/$CMD.init.sh"
if ! test -f "$SVD_CONF"; then
echo "Unknown command; exiting" >&2; exit 1
fi
if test -f "$INIT_SCRIPT"; then
# Found an init script; run it
bash -xe $INIT_SCRIPT
fi
$SVD $SVD_OPTS
|
zultron/docker-provisioning
|
common/entrypoint.sh
|
Shell
|
mit
| 452 |
#!/bin/sh
echo "Hello Master Falindir"
|
Falindir/AI
|
SHELL/src/ai.sh
|
Shell
|
mit
| 38 |
protoc --gogo_out=. gateway/gateway.proto
protoc --gogo_out=. nwkmgr/nwkmgr.proto
protoc --gogo_out=. otasrvr/otasrvr.proto
|
ninjasphere/go-zigbee
|
generate.sh
|
Shell
|
mit
| 124 |
#!/bin/bash -e
# Launches the TableTop service as a Unix process
here=$(dirname $0)
memOpts="-Xmx512M"
httpPort=9640
dbgPort=9602
jmxPort=9603
ymlConfigFile="$here/tableTopService.yml"
jar=$here/tableTop-1.0-fat.jar
out=logs/tableTop-service.out
usage()
{
echo "Usage: $0 [-jmx] [-dbg] start | stop | restart | status [Yaml-file]"
echo " -j enable Java management support (JMX)"
echo " -d enable debugging support (JPDA)"
exit 1
}
function startService
{
if [ "$DBG" ]; then
dbgOpts="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$dbgPort"
fi
# Configure JMX (via unsecured listener on port 1099)
if [ "$JMX" ]; then
inetAddr=`ifconfig | grep 'inet addr' | awk '{print $2}' | cut -d: -f2 | grep -v 127.0.0.1`
jmx=com.sun.management.jmxremote
jmxOpts="-D$jmx -D$jmx.port=$jmxPort -D$jmx.ssl=false -D$jmx.authenticate=false -Djava.rmi.server.hostname=$inetAddr"
fi
mkdir -p $(dirname $out)
nohup java $memOpts $dbgOpts $jmxOpts -jar $jar server $ymlConfigFile > $out 2>&1 &
sleep 3
ps -ef |grep $ymlConfigFile |grep -v grep
}
function stopService
{
pid=$(lsof -i tcp:$httpPort -s tcp:LISTEN | awk 'NR!=1 {print $2}')
if [ "$pid" != "" ]; then
kill $pid
sleep 1
fi
}
function serviceStatus
{
pid=$(lsof -i tcp:$httpPort -s tcp:LISTEN | awk 'NR!=1 {print $2}')
name=$(basename $0 .sh)
if [ "$pid" = "" ]; then
echo "$name is not running."
else
echo "$name is running (pid=$pid)."
fi
ps -ef |grep "$ymlConfigFile" |grep -v grep
}
for opt in $*; do
case $opt in
-j*) JMX=1 ;;
-d*) DBG=1 ;;
start | stop | status | restart) ;;
*) if [ -f $opt ]; then ymlConfigFile=$opt
else usage
fi ;;
esac
done
for opt in $*; do
case $opt in
start|restart)
stopService
startService ;;
stop)
stopService
serviceStatus ;;
status)
serviceStatus ;;
esac
done
|
fooqotsa/tableTop
|
artefacts/tableTopService.sh
|
Shell
|
mit
| 1,925 |
#!/usr/bin/env bash
DIR="${BASH_SOURCE%/*}"
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
. "$DIR/../bash-spec.sh"
describe "The bash version test" "$(
context "When using bash (especially on MacOS X)" "$(
it "Version must be 4+" "$(
[[ "${BASH_VERSINFO[0]}" > "3" ]]
should_succeed
)"
)"
)"
describe "Should_succeed" "$(
context "When testing conditional" "$(
it "reports success as pass" "$(
[[ "1" = "1" ]]
should_succeed
)"
)"
context "When testing condition fails" "$(
result="$(
[[ "1" = "2" ]]
should_succeed
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: '0(success)' | actual: '1(failed)'"
)"
)"
)"
describe "Should_fail" "$(
context "When testing conditional" "$(
it "reports fail as pass" "$(
[[ "1" = "2" ]]
should_fail
)"
)"
context "When testing condition fails" "$(
result="$(
[[ "1" = "1" ]]
should_fail
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: 'NOT 0(fail)' | actual: '0(succeeded)'"
)"
)"
)"
describe "The equality test" "$(
context "When a single value is passed" "$(
it "Reports two scalar values are equal" "$(
one="1"
expect $one to_be 1
)"
)"
context "When a single value is passed (by ref)" "$(
it "Reports two scalar values are equal" "$(
one="1"
expect_var one to_be 1
)"
)"
context "When a multi word value is passed" "$(
it "Reports two scalar values are equal" "$(
string="This is a string."
expect "$string" to_be "This is a string."
)"
)"
context "When a multi word (multi-line) value is passed" "$(
it "Reports two scalar values are equal" "$(
string=$'This is\na multi-line\noutput string.'
expect "$string" to_be "This is" \
"a multi-line" \
"output string."
)"
)"
context "When there is a failure" "$(
result="$(
expect "Test text" to_be "Something else"
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: 'Something else' | actual: 'Test text'"
)"
)"
)"
describe "The inequality test" "$(
it "Reports two scalar values are unequal" "$(
one="1"
expect $one not to_be 2
)"
context "When there is a failure" "$(
result="$(
expect "1" not to_be "1"
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT '1' | actual: '1'"
)"
)"
)"
describe "The regex matcher" "$(
str="one fine day"
it "Reports a regex match" "$(
expect "$str" to_match day$
)"
context "When there is a failure" "$(
result="$(
expect "$str" to_match wibble$
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: 'wibble$' | actual: 'one fine day'"
)"
)"
)"
describe "The regex non-matcher" "$(
str="one fine night"
it "Reports regex mismatch" "$(
expect "$str" not to_match day$
)"
context "When there is a failure" "$(
result="$(
expect "$str" not to_match night$
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT 'night$' | actual: 'one fine night'"
)"
)"
)"
describe "The array matcher" "$(
declare -a arr=(1 2 3 4)
it "Reports an array contains a given value" "$(
expect "${arr[@]}" to_contain 3
)"
context "When there is a failure" "$(
result="$(
expect "${arr[@]}" to_contain 5
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: '5' | actual: '1 2 3 4'"
)"
)"
)"
describe "The array non-matcher" "$(
declare -a arr=(1 2 3 4)
it "Reports an array does not contain a given value" "$(
expect "${arr[@]}" not to_contain 5
)"
context "When there is a failure" "$(
result="$(
expect "${arr[@]}" not to_contain 4
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT '4' | actual: '1 2 3 4'"
)"
)"
)"
describe "The array (passed by reference) matcher" "$(
declare -a arr=(1 2 3 4)
it "Reports an array contains a given value" "$(
expect_array arr to_contain 3
)"
context "When there is a failure" "$(
result="$(
expect_array arr to_contain 5
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: '5' | actual: '1 2 3 4'"
)"
)"
)"
describe "The array (passed by reference) non-matcher" "$(
declare -a arr=(1 2 3 4)
it "Reports an array does not contain a given value" "$(
expect_array arr not to_contain 5
)"
context "When there is a failure" "$(
result="$(
expect_array arr not to_contain 4
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT '4' | actual: '1 2 3 4'"
)"
)"
)"
describe "The file existence matcher" "$(
echo 'test' > tempfile
it "Reports a file exists" "$(
expect tempfile to_exist
)"
context "When there is a failure" "$(
rm -f tempfile
result="$(
expect tempfile to_exist
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: 'tempfile EXISTS' | actual: 'File not found'"
)"
)"
rm -f tempfile
)"
describe "The file non-existence matcher" "$(
it "Reports a file does not exist" "$(
rm -f tempfile
expect tempfile not to_exist
)"
context "When there is a failure" "$(
echo 'test' > tempfile
result="$(
expect tempfile not to_exist
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT 'tempfile EXISTS' | actual: 'tempfile'"
)"
)"
rm -f tempfile
)"
describe "The file mode matcher" "$(
touch tempfile
chmod u=rw,g=r,o=x tempfile
it "Reports a file has the given mode" "$(
expect tempfile to_have_mode -rw-r----x
)"
context "When there is a failure" "$(
result="$(
expect tempfile to_have_mode -rw-rw-rwx
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: '-rw-rw-rwx' | actual: '-rw-r----x'"
)"
)"
rm -f tempfile
)"
describe "The file mode non-matcher" "$(
touch tempfile
chmod u=rw,g=r,o=x tempfile
it "Reports a file does not have the given mode" "$(
expect tempfile not to_have_mode -rw-rw-rwx
)"
context "When there is a failure" "$(
result="$(
expect tempfile not to_have_mode -rw-r----x
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT '-rw-r----x' | actual: '-rw-r----x'"
)"
)"
rm -f tempfile
)"
describe "The exit mode matcher" "$(
function return_boolean {
if [[ $1 == "true" ]]; then
return 0
fi
return 1
}
it "Reports truth when the exit code of the following command is 0" "$(
expect to_be_true return_boolean true
)"
context "When there is a failure" "$(
result="$(
expect to_be_true return_boolean false
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: 'return_boolean false IS TRUE' | actual: 'return_boolean false IS FALSE'"
)"
)"
)"
describe "The exit mode non matcher" "$(
function return_boolean {
if [[ $1 == "true" ]]; then
return 0
fi
return 1
}
it "Reports false when the exit code of the following command is 1" "$(
expect not to_be_true return_boolean false
)"
context "When there is a failure" "$(
result="$(
expect not to_be_true return_boolean true
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: NOT 'return_boolean true IS TRUE' | actual: 'return_boolean true IS TRUE'"
)"
)"
)"
describe "Setting variables when nesting" "$(
test_var="first value"
it "Pulls a value into an it from the outer level" "$(
expect "$test_var" to_be "first value"
)"
context "When there is a nested context" "$(
it "Pulls a value into the inner it from the very outer level" "$(
expect "$test_var" to_be "first value"
)"
)"
context "When the context overwrites the value" "$(
test_var="second value"
it "Pulls the value into the inner it from the next level out" "$(
expect "$test_var" to_be "second value"
)"
)"
it "Does not get affected by values set in inner nesting earlier on" "$(
expect "$test_var" to_be "first value"
)"
)"
describe "The array matcher counts occurrences" "$(
declare -a arr=(1 2 3 3)
it "Reports an array contains a given value (any number of times)" "$(
expect "${arr[@]}" to_contain 3
)"
it "Reports an array contains a given value (2 times)" "$(
expect "${arr[@]}" to_contain 3 occurring 2 times
)"
context "When there is a failure" "$(
result="$(
expect "${arr[@]}" to_contain 3 occurring 1 time
)"
it "Reports the actual and expected correctly" "$(
expect "$result" to_be "**** FAIL - expected: '3 (x1 found x2)' | actual: '1 2 3 3'"
)"
)"
)"
|
realestate-com-au/bash-spec-2
|
tests/test_bash-spec.sh
|
Shell
|
mit
| 9,510 |
#!/bin/bash
#
# bmacs - bogus again
#
#
# THIS IS A JOKE.
#
# (not a very funny one, either)
doc()
{
func[$max_func]="$1"
documentation[$max_func]="$2"
max_func=$[max_func + 1]
}
###########################################################################
# Screen drawing functions
###########################################################################
draw_modeline()
{
local mod
if [ $modified = 1 ]; then
mod="*"
else
mod="-"
fi
local ro
if [ $read_only = 1 ]; then
ro="%"
else
ro="$mod"
fi
local percent="[$col $line] ($x $y) $lines" # @@@
# Modeline format
local modeline_template
if [ "x$modeline_format" = x ] ; then
modeline_template="$default_modeline_format"
else
modeline_template="$modeline_format"
fi
local i=0
local c
local expanded_modeline
while [ $i -lt ${#modeline_template} ]
do
c=${modeline_template:$i:1}
if [ "$c" = '%' ] ; then
i=$[i + 1]
c=${modeline_template:$i:1}
case "$c" in
\*) expanded_modeline="${expanded_modeline}${mod}" ;;
\+) expanded_modeline="${expanded_modeline}${ro}" ;;
h) expanded_modeline="${expanded_modeline}${host}" ;;
b) expanded_modeline="${expanded_modeline}${buf_name}" ;;
m) expanded_modeline="${expanded_modeline}${mode[*]}" ;;
p) expanded_modeline="${expanded_modeline}${percent}" ;;
-)
local dash_len=$[screen_cols - ${#expanded_modeline}]
local blanks=$(printf "%${dash_len}s" ' ')
local dashes=${blanks// /-}
expanded_modeline="${expanded_modeline}${dashes}"
;;
esac
else
expanded_modeline="${expanded_modeline}${c}"
fi
i=$[i + 1]
done
moveto $win_height 0 # goto the bottom of the window
echo -n "$t_sgr0"
if [ $has_color = 1 ] ; then
bg_blue
fg_green
fi
echo -n "${expanded_modeline}${t_sgr0}"
}
doc toggle-update-modeline "Toggle updating the modeline."
toggle-update-modeline()
{
debug "$FUNCNAME"
update_modeline=$[! update_modeline]
}
doc toggle-debug "Toggle debugging."
toggle-debug()
{
debug "$FUNCNAME"
debug_flag=$[! debug_flag]
}
doc toggle-line-numbers "Toggle displaying line numbers."
toggle-line-numbers()
{
debug "$FUNCNAME"
line_numbers=$[! line_numbers]
}
line_view_len()
{
declare -i bi=${bix[$1]}
local l="${buf[$bi]}"
line_view_col $1 ${#l}
return $?
}
line_view_col()
{
local ln=$1 # the line
local cl=$2 # target col
declare -i bi=${bix[$ln]}
local l="${buf[$bi]}"
local s="${l// }" # Check for any tab
if [ ${#l} -eq ${#s} ]; then
return $cl
fi
local len=${#l}
local total=0
local i=0
while [ $i -lt $len -a $i -lt $cl ]
do
if [ "${l:$i:1}" = ' ' ]; then
total=$[total + (8 - (total % 8))]
else
total=$[total + 1]
fi
i=$[i + 1]
done
return $total
}
old_line_view_col()
{
local ln=$1
local cl=$1
declare -i bi=${bix[$ln]}
local l="${buf[$bi]}"
local s="${l// }"
if [ ${#l} -eq ${#s} ]; then
return ${#buf[$bi]}
fi
local len=${#s}
local total=0
local tt
while [ $len != 0 ]
do
# output consecutive tabs
while [ "${l:0:1}" = ' ' ]
do
tt=$[8 - (total % 8)]
l="${l/?([ ])/}" # snip off one tab
total=$[total + $tt]
done
s="${l/+([ ]*)/}" # beginning without tabs
len=${#s}
if [ ${#s} -ne ${#l} ]; then
total=$[total + len]
tt=$[8 - (total % 8)]
total=$[total + tt]
else # no more tabs
total=$[total + ${#l}]
return $total
fi
l="${l##+([^ ])}" # the rest
l="${l/?([ ])/}" # snip off one tab
done
shopt -u extglob
return $total
}
# render a line with tabs
render_line()
{
shopt -s extglob
local l="$*"
local s="${l// }"
# no tabs in the line
if [ ${#l} -eq ${#s} ]; then
if [ "$colorizer" ]; then
$colorizer "$l"
l="$syntax_line"
fi
echo -n "$l"
return
fi
# s="${l/+([ ]*)/}" # beginning without tabs
local len=${#s}
local total=0
local tt=0
local fixed_line=""
# printf ".......T.......T.......T.......T.......T\n"
# fg_red
while [ $len != 0 ]
do
# output consecutive tabs
while [ "${l:0:1}" = ' ' ]
do
tt=$[8 - (total % 8)]
fixed_line="$fixed_line""$(printf '%*.*s' $tt $tt ' ')"
l="${l/?([ ])/}" # snip off one tab
total=$[total + $tt]
done
s="${l/+([ ]*)/}" # beginning without tabs
len=${#s}
if [ ${#s} -ne ${#l} ]; then
total=$[total + len]
tt=$[8 - (total % 8)]
# echo "tt=$tt s=$s"
fixed_line="$fixed_line""$(printf '%s%*.*s' "$s" $tt $tt ' ')"
total=$[total + tt]
else
# echo -n "$l"
fixed_line="$fixed_line""$l"
if [ "$colorizer" ]; then
$colorizer "$fixed_line"
fixed_line="$syntax_line"
fi
echo -n "$fixed_line"
# fg_green
return
fi
l="${l##+([^ ])}" # the rest
l="${l/?([ ])/}" # snip off one tab
done
if [ "$colorizer" ]; then
$colorizer "$fixed_line"
fixed_line="$syntax_line"
fi
echo -n "$fixed_line"
shopt -u extglob
# fg_green
}
## redraw the given screen line
redraw_line()
{
moveto $1 0
echo -n "${t_sgr0}${t_rmir}${t_el}" # no attr, no insert, erase line
declare -i l=$[win_start + $1]
declare -i bi=${bix[$l]}
if [ "${buf[$bi]}" ]; then
# echo -n "${buf[$bi]:0:$screen_cols}"
render_line "${buf[$bi]:0:$screen_cols}"
fi
echo -n "$t_enter_insert_mode" # insert mode on
}
redraw()
{
if [ $line_to_redraw != x ]; then
redraw_line "$line_to_redraw"
moveto $y $x # go back to the cursor location
line_to_redraw=x
need_to_redraw=0
return
fi
# clear, no attributes, insert mode off
tput csr 0 $screen_rows
echo -n "${t_sgr0}${t_rmir}${t_clear}"
# display the lines in the buffer
local i=$win_start
local win_end=$[win_start + (win_height - 1)]
declare -i bi
declare -i j=0
while [ $i -lt $win_end -a $i -lt $lines ]; do
bi=${bix[$i]}
if [ $line_numbers = 1 ]; then
moveto $j 76 ; fg_blue ; printf "%3d " $bi ; fg_green ; moveto $j 0
# line_view_len $i ; lnln=$?
# moveto $j 76 ; fg_blue ; printf "%3d " $lnln ; fg_green ; moveto $j 0
fi
if [ "${buf[$bi]}" ]; then
# printf "%-${screen_cols}.${screen_cols}s\n" "${buf[$bi]}"
# echo "${buf[$bi]:0:$screen_cols}"
render_line "${buf[$bi]:0:$screen_cols}"
echo
else
echo
fi
i=$[i + 1]
j=$[j + 1]
done
# can't put a newline on the last one, because of the scrolling region
if [ $i = $win_end ]; then
bi=${bix[$i]}
if [ "${buf[$bi]}" ]; then
# echo -n "${buf[$bi]:0:$screen_cols}"
render_line "${buf[$bi]:0:$screen_cols}"
fi
fi
draw_modeline
tput csr 0 $[win_height - 1]
moveto $y $x # goto the cursor location
echo -n "$t_enter_insert_mode" # insert mode on
need_to_redraw=0
}
typeout()
{
bg_green
fg_black
echo -n "${t_rmir}"
local wid=$[screen_cols - 10]
eval "$1" | (
local nana
local i=2
while read nana
do
moveto $i 5
printf "%-${wid}.${wid}s\n" "$nana"
i=$[i + 1]
done
moveto $i 5
)
printf "%-${wid}.${wid}s\n" " "
tput cuf 5
printf "%-${wid}.${wid}s" "---Press the ANY key to continue---"
read -sr -d '' -n 1
bg_black
fg_green
echo -n "${t_enter_insert_mode}"
need_to_redraw=1
}
get_screen_size()
{
local sizes=$($stty size)
if [ $? = 0 ] ; then
set $sizes
screen_rows="$1"
screen_cols="$2"
fi
debug "screen_rows=$1 screen_cols=$2"
}
init_term_vars()
{
# Screen size
screen_cols=80
screen_rows=24
get_screen_size
# Color
if [ "$(tput colors 2> /dev/null)" -ge 8 ] ; then
has_color=1
else
has_color=0
fi
# Bells
t_bell=$(tput bel 2>/dev/null)
t_flash=$(tput flash 2>/dev/null)
if [ "x$t_bell" = x ]; then
t_bell=""
fi
if [ "x$t_flash" = x ] ; then
t_flash=$t_bell
fi
# Movement
t_cuf1=$(tput cuf1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking cuf1."
fi
t_cub1=$(tput cub1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking cub1."
fi
t_cud1="$(tput cud1 2>/dev/null)"
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking cud1."
fi
t_cuu1=$(tput cuu1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking cuu1."
fi
# Alteration
t_dch1=$(tput dch1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking dch1."
fi
t_clear=$(tput clear 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking clear."
fi
t_el=$(tput el 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking el."
fi
t_il1=$(tput il1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking il1."
fi
t_dl1=$(tput dl1 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking dl1."
fi
# Modes
t_enter_insert_mode=$(tput smir 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. \
Lacking enter_insert_mode."
fi
t_rmir=$(tput rmir 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking rmir."
fi
t_sgr0=$(tput sgr0 2>/dev/null)
if [ $? != 0 ] ; then
fatal "Terminal description is not fully functional. Lacking sgr0."
fi
}
# for speed
moveto()
{
echo -ne '\033['$[$1 + 1]';'$[$2 + 1]'H'
# tput cup $1 $2
}
# for speed
fg_black() { echo -ne '\033[30m' ; } # tput setaf 0
fg_red() { echo -ne '\033[31m' ; } # tput setaf 1
fg_green() { echo -ne '\033[32m' ; } # tput setaf 2
fg_yellow() { echo -ne '\033[33m' ; } # tput setaf 3
fg_blue() { echo -ne '\033[34m' ; } # tput setaf 4
fg_magenta() { echo -ne '\033[35m' ; } # tput setaf 5
fg_cyan() { echo -ne '\033[36m' ; } # tput setaf 6
fg_white() { echo -ne '\033[37m' ; } # tput setaf 7
bg_black() { echo -ne '\033[40m' ; } # tput setaf 0
bg_red() { echo -ne '\033[41m' ; } # tput setaf 1
bg_green() { echo -ne '\033[42m' ; } # tput setaf 2
bg_yellow() { echo -ne '\033[43m' ; } # tput setaf 3
bg_blue() { echo -ne '\033[44m' ; } # tput setaf 4
bg_magenta() { echo -ne '\033[45m' ; } # tput setaf 5
bg_cyan() { echo -ne '\033[46m' ; } # tput setaf 6
bg_white() { echo -ne '\033[47m' ; } # tput setaf 7
init_term()
{
saved_tty_modes=$(stty -g)
stty -echo
stty susp '^-'
# stty intr '^-'
# stty intr '^g'
# tput init
tput is1
echo -n "$t_enter_insert_mode"
tput csr 0 $[win_height - 1]
}
reset_term()
{
stty echo
stty "$saved_tty_modes"
tput rmir
# This can screw up xterms
# tput reset
# so instead do:
tput csr 0 $screen_rows
tput sgr0
}
message()
{
moveto $screen_rows 0
echo -n "$t_el"
echo -n "$@"
moveto $y $x
}
prompt()
{
tput csr 0 $screen_rows
moveto $screen_rows 0
echo -n "${t_el}${t_rmir}"
stty echo -opost
read -e -p "$@" response
local stat=$?
stty -echo opost
echo -n "${t_enter_insert_mode}"
tput csr 0 $[win_height - 1]
return $stat
}
minibuf-self-insert()
{
echo -n "$c"
response="${response}${c}"
}
minibuf-accept()
{
minibuf_done_flag=1
}
minibuf-abort()
{
minibuf_done_flag=1
minibuf_abort_flag=1
}
new_prompt()
{
local saved_x=$x
local saved_y=$y
moveto $screen_rows 0
echo -n "${t_el}${t_rmir}"
echo -n "$@"
minibuf_done_flag=0
minibuf_abort_flag=0
response=""
until [ $minibuf_done_flag = 1 ]; do
IFS=: read -sr -d '' -n 1 c
if [ $? != 0 ]; then
# eof, error or timeout
:
fi
char_to_int "$c"
key=$?
if [ ! -z ${mini_map[$key]} ]; then
t=$(type -t ${global_map[$key]})
if [ ! -z "$t" ] ; then
if [ "$t" = 'function' ]; then
${mini_map[$key]}
else
case "$t" in
alias|keyword|builtin) ${mini_map[$key]} ;;
esac
fi
fi
fi
done
local stat=0
moveto $screen_rows 0
echo -n "$t_el"
echo -n "${t_enter_insert_mode}"
if [ $minibuf_abort_flag = 1 ]; then
response=""
stat=1
echo -n "Aborted."
fi
moveto $saved_y $saved_x
return $stat
}
ring_bell()
{
if [ $visible_bell = 1 ] ; then
echo -n $t_flash
else
echo -n $t_bell
fi
}
# Save buffer position, eval each arg, and restore position.
# Returns the last result.
save_excursion()
{
local o_x=$x
local o_y=$y
local o_col=$col
local o_line=$line
local o_do_update=$do_update
set_update 0
local stat=0
for z in "$@"
do
eval "$z" # oh the crapness
stat=$?
done
x=$o_x
y=$o_y
col=$o_col
line=$o_line
set_update $o_do_update
return $stat
}
set_update()
{
if [ $1 = 0 ]; then
if [ $do_update != 0 ]; then
no_update=1
update_off
fi
else
if [ $do_update != 0 ]; then
no_update=0
update_on
fi
fi
}
update_off()
{
echo()
{
:
}
printf()
{
:
}
tput()
{
:
}
}
update_on()
{
unset echo
unset printf
unset tput
}
###########################################################################
# Variables and initialization
#
save_buf_vars()
{
eval "${buf_name}_modified=$modified"
eval "${buf_name}_read_only=$read_only"
eval "${buf_name}_buf_name=$buf_name"
eval "${buf_name}_buf_filename=$buf_filename"
eval "${buf_name}_unsaved=$unsaved"
eval "${buf_name}_mode=(${mode[*]})"
eval "${buf_name}_modeline_format=$modeline_format"
eval "${buf_name}_line=$line"
eval "${buf_name}_col=$col"
eval "${buf_name}_lines=$lines"
eval "${buf_name}_buf_size=$buf_size"
eval "${buf_name}_mark[0]=${mark[0]}"
eval "${buf_name}_mark[1]=${mark[1]}"
eval "${buf_name}_buf=(${buf[@]})"
eval "${buf_name}_bix=(${bix[@]})"
}
load_buf_vars()
{
buf_name="${1:-$buf_name}"
local d="\$"
eval modified=$d${buf_name}_modified
eval read_only=$d${buf_name}_read_only
eval buf_name=$d${buf_name}_buf_name
eval buf_filename=$d${buf_name}_buf_filename
eval unsaved=$d${buf_name}_unsaved
eval mode=($d{${buf_name}_mode[*]})
eval modeline_format=$d${buf_name}_modeline_format
eval line=$d${buf_name}_line
eval col=$d${buf_name}_col
eval lines=$d${buf_name}_lines
eval buf_size=$d${buf_name}_buf_size
eval mark[0]=$d${buf_name}_mark[0]
eval mark[1]=$d${buf_name}_mark[1]
eval buf=($d{${buf_name}_buf[@]})
eval bix=($d{${buf_name}_bix[@]})
typeout doodle_buf
}
# @@@ tmp for debugging
doodle_buf()
{
local i=0
for l in ${buf[@]}
do
echo "buf[$i] = ($l)"
i=$[i + 1]
done
}
buffer_exists()
{
local buf=$1
local i=0
local bufs=${#buffers[*]}
while [ $i -lt $bufs ]
do
if [ "${buffers[$i]}" = "$buf" ]; then
return 0
fi
i=$[i + 1]
done
return 1
}
list-buffers()
{
typeout list_buffers
}
list_buffers()
{
local i=0
local bufs=${#buffers[*]}
local b
local bl
local d="\$"
printf "%-20.20s %8.8s\n" "Name" "Lines"
while [ $i -lt $bufs ]
do
b="${buffers[$i]}"
if [ "$b" = "$buf_name" ]; then
bl=$lines
else
bl=$(eval echo $d${buffers[$i]}_lines)
fi
printf "%-20.20s %8d\n" "$b" "$bl"
i=$[i + 1]
done
}
new_buffer()
{
local n=${#buffers[*]}
local name="$1"
buffers[$n]="$name"
init_buf_vars
buf_name="$name"
}
switch_to_buffer()
{
local t_buf="$1"
save_buf_vars
if buffer_exists "$t_buf"; then
load_buf_vars "$t_buf"
else
new_buffer "$t_buf"
fi
}
init_buf_vars()
{
# buffer vars
modified=0
read_only=0
# buf_name='*scratch*'
buf_name='scratch'
buf_filename=
unsaved=1 # if never saved
mode[0]='Fundamental'
modeline_format= # current modeline format
line=0 # current line in buffer
col=0 # current column in buffer
lines=0 # number of lines in the buffer (index)
buf_size=1 # size of the buffer text heap
mark[0]=x # mark line
mark[1]=x # mark col
# window vars
win_start=0 # starting buffer line of the window
win_height=$[screen_rows - 2]
x=0 # x position on screen
y=0 # y position on screen
}
init_global_vars()
{
# System vars
host=$(uname -n 2> /dev/null)
host=${host//.*/} # get rid of anything past first dot
if [ ! "$host" ] ; then
host=Bmacs
fi
debug_flag=0
current_buffer=0
clipboard=''
need_to_gc_buffer=0
buffer_gc_threshold=30
declare -a documentation # documentation for functions
declare -a func # function names
max_func=0 # index into the above two
# display vars
need_to_redraw=0 # true if screen need to be redrawn
do_update=1 # suppress immediate output of editing commands
line_to_redraw=x # screen line number to redraw if not x
# User preferences
visible_bell=0
default_modeline_format='--%+%*-%h: %b (%m) %p %-'
update_modeline=0
saved_modeline=
next_screen_context_lines=2
scroll_step=0 # zero for centering
timeout=60 # second for timeout
line_numbers=0
init_term_vars
# init_code_chars # way too slow
}
load_init_file()
{
if [ -f ~/.bmacs ]; then
# @@@ check that file is writable only by you
message "Reading ~/.bmacs..."
source ~/.bmacs # Security hole @@@
message "Reading ~/.bmacs...done"
fi
}
conferr()
{
echo "Configuration error: "
echo "$*"
exit 1
}
required_cmd()
{
if [ $(type -t $1) != "file" ]; then
conferr "The required command $1 is missing."
fi
}
configure()
{
if [ "$BASH_VERSINFO" -lt 2 ] ; then
conferr "I'm sorry. You need at least bash version 2.05 for this to work."
fi
if [ "$BASH_VERSINFO" -eq 2 -a "${BASH_VERSINFO[1]%[^0-9]*}" -lt 5 ] ; then
conferr "I'm sorry. You need at least bash version 2.05 for this to work."
fi
for cmd in stty tput uname
do
required_cmd $cmd
done
# use berkeley stty if available
if [ -x /usr/ucb/stty ]; then
stty=/usr/ucb/stty
else
stty=stty
fi
}
initialize()
{
configure
init_global_vars
# init_buf_vars
new_buffer "scratch"
init_term
init_keymap
redraw
load_init_file
}
parse_args()
{
if [ $# -eq 1 ]; then
# debugp "visiting file $1"
visit-file "$1"
need_to_redraw=1
fi
}
###########################################################################
# Editing functions
###########################################################################
doc self-insert "Insert the character that was just typed."
self-insert()
{
insert "$c"
}
# $1 - string to insert
insert()
{
debug "insert(\"$1\")"
if [ $line = $lines ]; then
lines=$[lines + 1]
# Take a line from the end of the heap
bix[$line]=$buf_size
buf[$buf_size]=''
buf_size=$[buf_size + 1]
fi
local str="$1"
declare -i len=${#str}
declare -i bi=${bix[$line]}
buf[$bi]="${buf[$bi]:0:$col}${str}${buf[$bi]:$col}"
x=$[x + $len]
col=$[col + $len]
echo -n "$1" # assuming we're in insert mode
if [ $modified = 0 ]; then
modified=1
draw_modeline # To show modified flag
moveto $y $x # goto the cursor location
fi
}
newline()
{
debug "$FUNCNAME"
insert_line $[line + 1]
declare -i bi=${bix[$line]}
if [ $col != ${#buf[$bi]} ]; then # not EOL
# copy tail of old line to new line
buf[${bix[$line + 1]}]=${buf[$bi]:$col}
# snip the tail of the old line
buf[$bi]=${buf[$bi]:0:$col}
fi
line=$[line + 1]
col=0
y=$[y + 1]
x=0
# @@@ what about when we get to the bottom of the window?
if [ $line -le $[lines - 1] ]; then # not at the EOB
bi=${bix[$line]}
echo "${t_el}"
# echo -n "${t_il1}${buf[$bi]}"
echo -n "${t_il1}"
render_line "${buf[$bi]}"
moveto $y $x
else
echo # a newline
fi
}
doc delete-backward-char "Deletes the previous character."
delete-backward-char()
{
debug "$FUNCNAME"
if [ $col -eq 0 ]; then
if [ $line = 0 ] ; then
message "Beginning of buffer"
ring_bell
return
fi
# back to the end of the above line
line=$[line - 1]
declare -i bi=${bix[$line]}
x=${#buf[$bi]}
col=$x
y=$[y - 1]
# append the old line to the above line, and delete the old line
local ol=${buf[${bix[$[line + 1]]}]}
buf[$bi]="${buf[$bi]}${ol}"
delete_line $[line + 1]
echo -n "${t_dl1}"
if [ $modified = 0 ]; then
modified=1
draw_modeline # To show modified flag
fi
# fill in the empty line at the bottom of the window
moveto $[win_height - 1] 0
bi=${bix[$win_start + $[win_height - 1]]}
# echo -n "${buf[$bi]}"
render_line "${buf[$bi]}"
moveto $y $x # goto the cursor location
# echo -n "$ol" # write
render_line "$ol" # write
moveto $y $x # go back to the beginning of the line
return
fi
declare -i bi=${bix[$line]}
buf[$bi]="${buf[$bi]:0:$[col - 1]}${buf[$bi]:$col}"
x=$[x - 1]
col=$[col - 1]
echo -n "$t_cub1$t_dch1"
if [ $modified = 0 ]; then
modified=1
draw_modeline # To show modified flag
moveto $y $x # goto the cursor location
fi
}
doc delete-char "Deletes the character the cursor is on."
delete-char()
{
debug "$FUNCNAME"
declare -i bi=${bix[$line]}
if [ $col -eq ${#buf[$bi]} ] ; then
if [ $line -eq $lines ]; then
message "End of buffer"
ring_bell
return
fi
# join lines
local ol=${buf[${bix[$[line + 1]]}]} # following line
buf[$bi]="${buf[$bi]}${ol}" # append to current
delete_line $[line + 1]
# update the "view"
# echo "$ol" # append the following line and go down
render_line "$ol" # append the following line and go down
echo -n "${t_dl1}" # delete the following line
if [ $lines -ge $[win_start + win_height] ]; then
# fill in the empty line at the bottom of the window
moveto $[win_height - 1] 0
bi=${bix[$win_start + $[win_height - 1]]}
# echo -n "${buf[$bi]}"
render_line "${buf[$bi]}"
fi
moveto $y $x # go back to where we were
return
fi
buf[$bi]="${buf[$bi]:0:$col}${buf[$bi]:$[col + 1]}"
echo -n "$t_dch1"
if [ $modified = 0 ]; then
modified=1
draw_modeline # To show modified flag
moveto $y $x # goto the cursor location
fi
}
doc kill-word "Kills until the end of the next word."
kill-word()
{
debug "$FUNCNAME"
set $(save_excursion forward-word 'command echo "$line $col"')
# kill_region $1 $2 $line $col
delete_region $line $col $1 $2
}
doc kill-line "Kill to the end of the line."
kill-line()
{
debug "$FUNCNAME"
declare -i bi=${bix[$line]}
if [ -z "${buf[$bi]:$col}" ] ; then
buf[$bi]="${buf[$bi]}${buf[${bix[$line + 1]}]}"
delete_line $line
echo -n "${t_dl1}"
# fill in the empty line at the bottom of the window
moveto $[win_height - 1] 0
bi=${bix[$win_start + $[win_height - 1]]}
# echo -n "${buf[$bi]}"
render_line "${buf[$bi]}"
moveto $y $x # goto the cursor location
else
if [ $line = $lines -a $col = ${#buf[$bi]} ]; then
ring_bell
message "End of buffer"
return
fi
clipboard=${buf[$bi]:$col}
buf[$bi]=${buf[$bi]:0:$col}
echo -n "$t_el"
if [ $modified = 0 ]; then
modified=1
draw_modeline # To show modified flag
moveto $y $x # goto the cursor location
fi
fi
}
doc copy-region "Copy the region to the clipboard."
copy-region()
{
# clipboard=$(buffer_substring $line $col $mark[0] $mark[1])
buffer_substring $line $col $mark[0] $mark[1]
clipboard="$result"
}
yank()
{
insert "$clipboard"
}
set_mark()
{
mark[0]=$line
mark[1]=$col
}
doc set-mark "Set the mark to the current position (the point)."
set-mark()
{
set_mark
message "Mark set"
}
# set line and col and update (or set for update) the screen accordingly
set_point()
{
line=$1
col=$2
if [ $line -lt $win_start -o $line -ge $[win_start + win_height] ] ; then
win_start=$[line - (win_height / 2)]
if [ $win_start -lt 0 ]; then
win_start=0
fi
need_to_redraw=1
fi
y=$[line - win_start]
line_view_col $line $col
x=$?
if [ $need_to_redraw = 0 ]; then
moveto $y $x
fi
}
exchange-point-and-mark()
{
if [ x${mark[0]} = x ]; then
message "No mark set in this buffer."
ring_bell
return 1
fi
new_mark_line=$line
new_mark_col=$col
set_point ${mark[0]} ${mark[1]}
mark[0]=$new_mark_line
mark[1]=$new_mark_col
}
#
# Movement
#
doc beginning-of-line "Move point to the beginning of the current line."
beginning-of-line()
{
debug "$FUNCNAME"
x=0
col=0
moveto $y 0
}
doc beginning-of-line "Move point to the end of the current line."
end-of-line()
{
debug "$FUNCNAME"
declare -i bi=${bix[$line]}
line_view_len $line
x=$?
col=${#buf[$bi]}
moveto $y $x
}
doc next-line "Move the cursor down one line"
next-line()
{
debug "$FUNCNAME"
# End of buffer
if [ $line -ge $lines ]; then
ring_bell
return 1
fi
line=$[line + 1]
declare -i bi=${bix[$line]}
local old_col=$col
if [ $col -gt ${#buf[$bi]} ]; then # col past end of text
col=${#buf[$bi]}
fi
y=$[y + 1]
# If the cursor is past the bottom of window, adjust the window
if [ $y -ge $win_height ]; then
local ss
if [ $scroll_step = 0 ]; then
ss=$[win_height / 2]
else
ss=$scroll_step
fi
win_start=$[win_start + ss];
y=$[line - win_start]
x=$col
need_to_redraw=1
elif [ $col != $old_col ] ; then # if we had to adjust the column
# move the cursor
line_view_len $line
x=$?
moveto $y $x
else # we're just going down
# This doesn't work because at least "stty onlcr" is on
# echo -n "$t_cud1"
# echo
tput cud 1
fi
}
doc previous-line "Move the cursor up one line"
previous-line()
{
debug "$FUNCNAME"
if [ $line -eq 0 ]; then
ring_bell
return 1
fi
line=$[line - 1]
declare -i bi=${bix[$line]}
local old_col=$col
if [ $col -gt ${#buf[$bi]} ]; then
col=${#buf[$bi]}
fi
y=$[y - 1]
if [ $y -lt 0 ]; then
local ss
if [ $scroll_step = 0 ]; then
ss=$[win_height / 2]
else
ss=$scroll_step
fi
win_start=$[win_start - $ss];
y=$[line - win_start]
x=$col
need_to_redraw=1
elif [ $old_col != $col ] ; then
line_view_len $line
x=$?
moveto $y $x
else
# x=$col
echo -n $t_cuu1
fi
}
doc forward-char "Move the cursor forward one character."
forward-char()
{
# debug "$FUNCNAME"
declare -i bi=${bix[$line]}
if [ $col -lt ${#buf[$bi]} ]; then
if [ "${buf[$bi]:$col:1}" = ' ' ]; then
x=$[x + (8 - (x % 8))]
col=$[col + 1]
moveto $y $x
else
x=$[x + 1]
col=$[col + 1]
echo -n $t_cuf1
fi
else
if [ $line -lt $lines ] ; then
line=$[line + 1]
y=$[y + 1]
x=0
col=0
moveto $y $x
else
message "End of buffer"
ring_bell
return
fi
fi
}
doc backward-char "Move the cursor backward one character."
backward-char()
{
debug "$FUNCNAME"
if [ $col = 0 ]; then
if [ $line -gt 0 ] ; then
line=$[line - 1]
y=$[y - 1]
declare -i bi=${bix[$line]}
line_view_len $line
x=$?
# message "x=$x"
col=${#buf[$bi]}
moveto $y $x
else
message "Beginning of buffer"
ring_bell
return
fi
else
col=$[col - 1]
declare -i bi=${bix[$line]}
if [ "${buf[$bi]:$col:1}" = ' ' ]; then
x=$[x - (8 - (x % 8))]
moveto $y $x
else
x=$[x - 1]
echo -n $t_cub1
fi
fi
}
doc forward-word "Move the cursor forward one word."
forward-word()
{
# debug "$FUNCNAME"
# Make sure extended pattern matching is on
shopt -s extglob
# if we're at the EOL or the line has no more words
local ok=0
declare -i bi
while [ $ok = 0 ]
do
bi=${bix[$line]}
local l=${buf[$bi]:$col} # remaining part of line
if [ $col -ge ${#buf[$bi]} ] ; then
if [ $line = $lines ]; then
message "End of Buffer"
ring_bell
return 1
fi
line=$[line + 1]
y=$[y + 1]
x=0
col=0
elif [ "${l##*([^A-Za-z0-9_-])}" = '' ] ; then
if [ $line = $lines ]; then
col=0
x=0
moveto $y 0
message "End of buffer"
ring_bell
return 1
fi
line=$[line + 1]
y=$[y + 1]
x=0
col=0
else
ok=1
fi
done
bi=${bix[$line]}
local l=${buf[$bi]:$col} # remaining part of line
local z=${l##*([^A-Za-z0-9_-])+([A-Za-z0-9_-])} # end of the next word
col=$[col + ( ${#l} - ${#z} )] # add the difference
tput cuf $[col - x] # forward that many
x=$col
moveto $y $x # @@@ perhaps not totally necessary, but
# Turn off extended pattern matching so we don't accidentally use it
shopt -u extglob
return 0
}
doc backward-word "Move the cursor backward one word."
backward-word()
{
debug "$FUNCNAME"
# Make sure extended pattern matching is on
shopt -s extglob
# Go backwards to the first line that has a word or return
local ok=0
declare -i bi
while [ $ok = 0 ]
do
bi=${bix[$line]}
local l=${buf[$bi]:0:$col} # beginning part of line
if [ $col = 0 ] ; then # At EOL
if [ $line = 0 ]; then
message "Beginning of buffer"
ring_bell
return 1
fi
line=$[line - 1]
y=$[y - 1]
bi=${bix[$line]}
x=${#buf[$bi]}
col=$x
elif [ "${l##*([^A-Za-z0-9_-])}" = '' ] ; then
if [ $line = 0 ]; then
col=0
x=0
moveto $y 0
message "Beginning of buffer"
ring_bell
return 1
fi
line=$[line - 1]
y=$[y - 1]
bi=${bix[$line]}
x=${#buf[$bi]}
col=$x
else
ok=1
fi
done
bi=${bix[$line]}
local l=${buf[$bi]:0:$col} # beginning part of line
local z=${l%%+([A-Za-z0-9_-])*([^A-Za-z0-9_-])} # line minus last word
col=$[col - ( ${#l} - ${#z} )] # add the difference
# tput cub $[x - col] # backward that many
x=$col
moveto $y $x
# Turn off extended pattern matching so we don't accidentally use it
shopt -u extglob
}
doc beginning-of-buffer "Move the cursor to the beginning of the buffer."
beginning-of-buffer()
{
debug "$FUNCNAME"
x=0
y=0
col=0
line=0
if [ $win_start != 0 ]; then
win_start=0
need_to_redraw=1
else
moveto 0 0
fi
}
doc end-of-buffer "Move the cursor to the end of the buffer."
end-of-buffer()
{
debug "$FUNCNAME"
line=$lines
declare -i bi=${bix[$line]}
x=${#buf[$bi]}
col=$x
if [ $[line - win_start] -gt $win_height ]; then
win_start=$[line - (win_height / 2)]
y=$[line - win_start]
need_to_redraw=1
else
y=$[line - win_start]
moveto $y $x
fi
}
doc scroll-up "Scroll the window almost a screenful up."
scroll-up()
{
debug "$FUNCNAME"
local new_top=$[win_start + (win_height - next_screen_context_lines)]
if [ $new_top -gt $lines ]; then
ring_bell
return
fi
if [ $new_top -gt $[lines - next_screen_context_lines] ] ; then
new_top=$[lines - next_screen_context_lines]
if [ $new_top -lt 0 ]; then # is this possible?
new_top=0
fi
fi
win_start=$new_top
line=$win_start
y=0
x=0
col=0
need_to_redraw=1
}
doc scroll-down "Scroll the window almost a screenful down."
scroll-down()
{
debug "$FUNCNAME"
if [ $win_start = 0 ]; then
ring_bell
return
fi
local new_top=$[win_start - (win_height - next_screen_context_lines)]
if [ $new_top -lt 0 ]; then
new_top=0
fi
win_start=$new_top
line=$[win_start + (win_height - 1)]
y=$[win_height - 1]
x=0
col=0
need_to_redraw=1
}
up-a-line()
{
if [ $[win_start + 1] -ge $lines ]; then
ring_bell
return
fi
win_start=$[win_start + 1]
y=$[y - 1]
moveto 0 0
echo -n "${t_dl1}"
redraw_line $[win_height - 1]
if [ $win_start -gt $line ]; then
line=$win_start
y=0
x=0
col=0
fi
moveto $y $x
}
down-a-line()
{
if [ $[win_start - 1] -lt 0 ]; then
ring_bell
return
fi
win_start=$[win_start - 1]
y=$[y + 1]
moveto 0 0
echo -n "${t_il1}"
redraw_line 0
if [ $line -ge $[win_start + win_height] ]; then
line=$[win_start + (win_height - 1)]
y=$[win_height - 1]
x=0
col=0
fi
moveto $y $x
}
last_search=""
doc search-forward "Search for the next occurance of a string."
search-forward()
{
if new_prompt "Search [$last_search]: " ; then : ;
else
return 1
fi
local str="$response"
local l
local first_time=1
local i=$line
if [ -z "$str" ]; then
str="$last_search"
else
last_search="$str"
fi
while [ $i -lt $lines ]
do
bi=${bix[$i]}
if [ $first_time = 1 ]; then
l="${buf[$bi]:$col}"
first_time=0
else
l="${buf[$bi]}"
fi
if [ ${#l} != 0 -a -z "${l##*${str}*}" ]; then
line=$i
l=${l%${str}*}
col=$[${#l} + ${#str}]
x=$col
if [ $line -lt $win_start -o \
$line -ge $[win_start + win_height] ]; then
need_to_redraw=1
else
y=$[line - win_start]
moveto $y $x
fi
set_point $line $col
return
fi
i=$[i + 1]
done
message "Not found."
}
###########################################################################
# Buffer functions
###########################################################################
kill-buffer()
{
debug "$funcname"
# @@@ this is so fake
local bufname
prompt "Kill buffer: (default ${buf_name}) "
bufname="$response"
init_buf_vars
unset buf
unset bix
need_to_redraw=1
}
switch-to-buffer()
{
debug "$funcname"
local bufname
prompt "Switch to buffer: (default ${buf_name}) "
if [ "x$response" = "x" ]; then
bufname="$buf_name"
else
bufname="$response"
fi
switch_to_buffer "$bufname"
need_to_redraw=1
}
gc_buffer()
{
if [ $need_to_gc_buffer -lt $buffer_gc_threshold ]; then
return
fi
message "Garbage collecting..."
# This is stupid
local i
# for (( i=0 ; i < lines; i=$[i + 1] ))
# do
# new_buf[$[i + 1]]=${buf[${bix[$i]}]}
# done
i=0
while [ $i -lt $lines ]
do
new_buf[$[i + 1]]=${buf[${bix[$i]}]}
i=$[i + 1]
done
new_buf[0]=''
buf=("${new_buf[@]}")
# for (( i=0 ; i < lines; i=$[i + 1] ))
# do
# bix[$i]=$[i + 1]
# done
i=0
while [ $i -lt $lines ]
do
bix[$i]=$[i + 1]
i=$[i + 1]
done
need_to_gc_buffer=0
message "Garbage collecting...done"
}
# before after
# 1 1
# 2 2
# > 3 4 4
# 4 5 5
# 5 5 0
delete_line()
{
declare -i at="$1"
declare -i i
# for (( i=$at; i < $[lines - 1]; i=$[i + 1] ))
# do
# bix[$i]=${bix[i + 1]}
# done
i=$at
while [ $i -lt $[lines - 1] ]
do
bix[$i]=${bix[i + 1]}
i=$[i + 1]
done
bix[$i]=0 # clear magic last last
lines=$[lines - 1]
need_to_gc_buffer=$[need_to_gc_buffer + 1]
}
insert_line()
{
declare -i at="$1"
declare -i i
if [ $at != $lines ]; then
# for (( i=$lines; i > $at; i=$[i - 1] ))
# do
# bix[$i]=${bix[$i - 1]}
# done
i=$lines
while [ $i -gt $at ]
do
bix[$i]=${bix[$i - 1]}
i=$[i - 1]
done
fi
lines=$[lines + 1]
bix[$lines]=0 # magic last line
# Take a line from the end of the heap
bix[$at]=$buf_size
buf[$buf_size]=''
buf_size=$[buf_size + 1]
need_to_gc_buffer=$[need_to_gc_buffer + 1]
}
delete_region()
{
beg_line=$1
beg_col=$2
end_line=$3
end_col=$4
declare -i bi
if [ $beg_line = $end_line ]; then # easy case: on one line
bi=${bix[$beg_line]}
buf[$bi]="${buf[$bi]:0:$beg_col}${buf[$bi]:$[end_col + 1]}"
if [ $beg_line -ge $win_start -a \
$beg_line -lt $[win_start + win_height] ]; then
moveto $[beg_line - win_start] $beg_col
# echo -n "${t_el}${buf[$bi]:$beg_col}"
echo -n "${t_el}"
render_line "${buf[$bi]:$beg_col}"
moveto $y $x
fi
elif [ $beg_line -lt $end_line ]; then
need_to_redraw=1
else
need_to_redraw=1
fi
if [ $modified = 0 ]; then
modified=1
fi
}
buffer_substring()
{
beg_line=$1
beg_col=$2
end_line=$3
end_col=$4
debugp "buffer_substring $1 $2 $3 $4"
# local str # the substring
# declare -i bi # buffer index
# if [ $beg_line = $end_line ]; then # easy case: on one line
# bi=${bix[$beg_line]}
# str=${buf[$bi]:$beg_col:$[end_col + 1]}
# else # multiple lines
# # the first line
# bi=${bix[$beg_line]}
# str=${buf[$bi]:$beg_col}
# # the middle lines
# local i=$beg_line
# while [ $i -lt $end_line ]
# do
# bi=${bix[$i]}
# str=${str}${buf[$bi]}
# i=$[i + 1]
# done
# # the last line
# bi=${bix[$end_line]}
# str=${str}${buf[$bi]:0:$end_col}
# fi
# echo "$str"
result="$str"
return 0
}
###########################################################################
# File IO
###########################################################################
# Read a file into the buffer
read-file()
{
if [ $# != 1 ]; then
return 1
fi
local filename="$1"
local ln
declare -i i=0
unset buf
unset bix
{
while IFS= read ln
do
bix[$i]=$[i + 1]
buf[$i + 1]="$ln"
i=$[i + 1]
done
} < $filename
lines=$i
buf_size=$[lines + 2]
buf[0]=''
bix[$lines]=0
return 0
}
write-buffer()
{
if [ $# != 1 ]; then
return 1
fi
local filename="$1"
local ln
declare -i i=0
declare -i bi=0
{
# for (( i=0; i < lines; i++ ))
# do
# bi=${bix[$i]}
# echo "${buf[$bi]}"
# done
i=0
while [ $i -lt $lines ]
do
bi=${bix[$i]}
echo "${buf[$bi]}"
i=$[i + 1]
done
} > $filename
return 0
}
# Foreshortened substition doc:
#
# P:-W If P is null, use W is else P.
# P:=W If P is null, assign P=W. (can't do special params)
# P:?W If P is null, write W to stderr and exit
# P:+W If P is null, use NOTHING else W
# P:O P starting at offset O. Negative offset must be: ${P: -O}
# P:O:L P starting at offset O for length L. Negative L is from end.
# !P* Variables beginning with P
# !N[*] Indexes in array N. Use @ instead of * for doublequoted words.
# #P Length of P. If P[*] then array element count.
# P#W Clip shortest head of P matching W. ## is longest head of P.
# P%W Clip shortest tail of P matching W. %% is longest tail of P.
# P/E/S Replace first E in P with S. S can be omitted to delete.
# P//E/S Replace all E in P with S.
# P/#E/S Replace E beginning P with S.
# P/%E/S Replace E ending P with S.
#
# "[:" class ":]"
# where class is one of:
# alnum alpha ascii blank cntrl digit graph lower
# print punct space upper word xdigit
#
# extglob:
# PATTERN-LIST [pattern [| pattern]]
# `?(PATTERN-LIST)' zero or one
# `*(PATTERN-LIST)' zero or more
# `+(PATTERN-LIST)' one or more
# `@(PATTERN-LIST)' one
# `!(PATTERN-LIST)' anything except one
colorize_keyword()
{
# magenta back to green
syntax_line=${syntax_line// "$1" /'[35m' ${1} '[32m'}
syntax_line=${syntax_line/#"$1" /'[35m'${1} '[32m'}
syntax_line=${syntax_line/%"$1"/'[35m'${1}'[32m'}
}
colorize_builtin()
{
# blue back to green
syntax_line=${syntax_line// "$1" /'[34m' ${1} '[32m'}
syntax_line=${syntax_line/#"$1" /'[34m'${1} '[32m'}
syntax_line=${syntax_line/%"$1"/'[34m'${1}'[32m'}
}
colorize_var()
{
: # syntax_line=$
}
colorize_comment()
{
local s="$syntax_line"
# cyan back to green
syntax_line=${s/%"$1"*/'[36m'#${s#*#}'[32m'}
if [ ${#s} != ${#syntax_line} ]; then
return 0
else
return 1
fi
}
colorize_string()
{
local s="$syntax_line"
# cyan back to green
local inside="${s#*[^$1]$1}"
inside="${inside%\"*([^$1])}"
syntax_line=${s//$1*[^$1]\"/'[37m'$1$inside$1'[32m'}
}
bash_keywords="if fi then else elif while do done for case esac return echo read true exit break"
bash_builtins="alias bg cd declare dirs echo eval export jobs kill let local nohup printf pushd pwd read set shift shopt source suspend time typeset ulimit unalias unset wait"
# Colorizer for bash-mode
bash-mode-colorize()
{
syntax_line="$1"
if [ "$syntax_line" ]; then
if colorize_comment "#" ; then
:
else
for k in $bash_keywords
do
colorize_keyword "$k"
done
for k in $bash_builtins
do
colorize_builtin "$k"
done
colorize_string '"'
colorize_string "'"
fi
fi
}
# Mode for editing bash code
bash-mode()
{
colorizer=bash-mode-colorize
mode[0]='Bash'
}
# set a file's mode based on file name or shebang
set-file-mode()
{
# shebang
if [ "${buf[1]:0:3}" = '#!/' -a "${buf[1]: -4}" = 'bash' ] ; then
bash-mode
elif [ -z "${buf[1]##*-\*- * -\*-*}" ]; then
# mode string
local mode=${buf[1]#*-\*- }
mode=${mode% -\*-*}
case "$mode" in
ksh|sh-mode|sh-script|bash) bash-mode ;;
esac
fi
}
visit-file()
{
local filename="$1"
local b_name="${filename##*/}"
switch_to_buffer "$b_name"
read-file "$filename"
set-file-mode
modified=0
buf_filename="$filename"
buf_name="$b_name"
line=0
col=0
x=0
y=0
win_start=0
}
find-file()
{
local filename
prompt "Find file: "
filename=$(eval "echo $response") # eval so we can expand ~ $var etc..
if [ -f "$filename" ]; then
visit-file "$filename"
need_to_redraw=1
else
local b_name="${filename##*/}"
new_buffer "$b_name"
buf_filename="$filename"
buf_name="$b_name"
redraw
message '(New file)'
fi
}
save-buffer()
{
if [ $modified = 0 ]; then
message "(No changes need be saved)"
return
fi
if [ -z "$buf_filename" ] ; then
if prompt "Write file: " ; then
if [ ! "$response" ]; then
message "Not saving"
return
else
buf_filename="$response"
fi
else
return # eof or error
fi
fi
# make a backup the first time saving
if [ -f "$buf_filename" -a $unsaved = 1 ]; then
mv "$buf_filename" "$buf_filename"~
fi
write-buffer "$buf_filename"
message "Wrote $buf_filename"
modified=0
unsaved=0
}
###########################################################################
# Interrupts, cleanup and exiting
###########################################################################
keyboard-quit()
{
debug "$FUNCNAME"
ring_bell
}
suspend-bmacs()
{
echo -n "$t_clear"
moveto $screen_rows 0 # goto to bottom of the screen
reset_term
# why doesn't "suspend" work
kill -TSTP $$
init_term
need_to_redraw=1
}
fatal()
{
reset_term
echo -e "$*\n"
exit 1
}
kill-bmacs()
{
quit
}
quit()
{
echo -n "$t_clear"
moveto $screen_rows 0 # goto to bottom of the screen
reset_term
exit 0
}
interrupt()
{
trap INT # remove interrupt signal handler
echo 'Interrupt!'
quit
}
winch()
{
#trap WINCH # remove interrupt signal handler
get_screen_size
need_to_redraw=1
# echo 'Winch!'
}
garbage_collect()
{
gc_buffer
}
###########################################################################
# Debugging
###########################################################################
debug()
{
if [ x$debug_flag = x1 ] ; then
message "$*"
fi
}
debugp() # with pause
{
if [ x$debug_flag = x1 ] ; then
message "$*"
read -sr -d '' -n 1
fi
}
###########################################################################
# Keymaps
###########################################################################
# This is so meta characters below won't get interpreted as multi-byte
if [ x$LANG != x ]; then
saved_LANG=$LANG
LANG=C
fi
if [ x$LC_CTYPE != x ]; then
saved_LC_CTYPE=$LC_CTYPE
LC_CTYPE=C
fi
# This is way too slow, but how else can we do it?
init_code_chars()
{
local i=0
while [ $i -lt 256 ]
do
v_code_char[$i]=$(echo -e "\\"$(printf "%03o" $i))
let "i++"
done
}
# Outputs the character for the given integer code
code_char()
{
echo ${v_code_char[$1]}
}
# @@@ Is there a better way to do this without using an external program?
char_to_int()
{
case "$1" in
# Control chars
^@) return 0 ;; # special case
) return 1 ;; ) return 2 ;; ) return 3 ;;
) return 4 ;; ) return 5 ;; ) return 6 ;; ) return 7 ;;
) return 8 ;;
# Ye olde whitespace
" ") return 9 ;;
"
") return 10 ;;
# more controls
) return 11 ;; ) return 12 ;;
) return 13 ;; ) return 14 ;;
) return 15 ;; ) return 16 ;; ) return 17 ;; ) return 18 ;;
) return 19 ;; ) return 20 ;; ) return 21 ;; ) return 22 ;;
) return 23 ;; ) return 24 ;; ) return 25 ;; ) return 26 ;;
) return 27 ;; ) return 28 ;; ) return 29 ;; ) return 30 ;;
) return 31 ;;
# Space
" ") return 32 ;;
# Pucntuation
\!) return 33 ;; \") return 34 ;; \#) return 35 ;; \$) return 36 ;;
\%) return 37 ;; \&) return 38 ;; \') return 39 ;; \() return 40 ;;
\)) return 41 ;; \*) return 42 ;; \+) return 43 ;; \,) return 44 ;;
-) return 45 ;; .) return 46 ;; /) return 47 ;;
# Numbers
0) return 48 ;; 1) return 49 ;; 2) return 50 ;; 3) return 51 ;;
4) return 52 ;; 5) return 53 ;; 6) return 54 ;; 7) return 55 ;;
8) return 56 ;; 9) return 57 ;;
# More pucntuation
:) return 58 ;; \;) return 59 ;; \<) return 60 ;; =) return 61 ;;
\>) return 62 ;; \?) return 63 ;; @) return 64 ;;
# Capitol letters
A) return 65 ;; B) return 66 ;; C) return 67 ;; D) return 68 ;;
E) return 69 ;; F) return 70 ;; G) return 71 ;; H) return 72 ;;
I) return 73 ;; J) return 74 ;; K) return 75 ;; L) return 76 ;;
M) return 77 ;; N) return 78 ;; O) return 79 ;; P) return 80 ;;
Q) return 81 ;; R) return 82 ;; S) return 83 ;; T) return 84 ;;
U) return 85 ;; V) return 86 ;; W) return 87 ;; X) return 88 ;;
Y) return 89 ;; Z) return 90 ;;
# Even more pucntuation
\[) return 91 ;; \\) return 92 ;; \]) return 93 ;; \^) return 94 ;;
_) return 95 ;; \`) return 96 ;;
# Lowercase letters
a) return 97 ;; b) return 98 ;; c) return 99 ;; d) return 100 ;;
e) return 101 ;; f) return 102 ;; g) return 103 ;; h) return 104 ;;
i) return 105 ;; j) return 106 ;; k) return 107 ;; l) return 108 ;;
m) return 109 ;; n) return 110 ;; o) return 111 ;; p) return 112 ;;
q) return 113 ;; r) return 114 ;; s) return 115 ;; t) return 116 ;;
u) return 117 ;; v) return 118 ;; w) return 119 ;; x) return 120 ;;
y) return 121 ;; z) return 122 ;;
# Yet more pucntuation
\{) return 123 ;; \|) return 124 ;; \}) return 125 ;; \~) return 126 ;;
# Delete
) return 127 ;;
# Hibittypiddles
# Problem: If LC_CTYPE is not C (or equivalent) these can have
# multibyte interpretations, which we don't want.
) return 128 ;; ) return 129 ;; ) return 130 ;;
) return 131 ;; ) return 132 ;;
) return 133 ;;
) return 134 ;; ) return 135 ;; ) return 136 ;;
) return 137 ;; ) return 138 ;; ) return 139 ;;
) return 140 ;; ) return 141 ;; ) return 142 ;;
) return 143 ;; ) return 144 ;; ) return 145 ;;
) return 146 ;; ) return 147 ;; ) return 148 ;;
) return 149 ;; ) return 150 ;; ) return 151 ;;
) return 152 ;; ) return 153 ;; ) return 154 ;;
) return 155 ;; ) return 156 ;; ) return 157 ;;
) return 158 ;; ) return 159 ;;
# latin1 foozers
) return 160 ;; ¡) return 161 ;; ¢) return 162 ;; £) return 163 ;;
¤) return 164 ;; ¥) return 165 ;; ¦) return 166 ;; §) return 167 ;;
¨) return 168 ;; ©) return 169 ;; ª) return 170 ;; «) return 171 ;;
¬) return 172 ;; ) return 173 ;; ®) return 174 ;; ¯) return 175 ;;
°) return 176 ;; ±) return 177 ;; ²) return 178 ;; ³) return 179 ;;
´) return 180 ;; µ) return 181 ;; ¶) return 182 ;; ·) return 183 ;;
¸) return 184 ;; ¹) return 185 ;; º) return 186 ;; ») return 187 ;;
¼) return 188 ;; ½) return 189 ;; ¾) return 190 ;; ¿) return 191 ;;
À) return 192 ;; Á) return 193 ;; Â) return 194 ;; Ã) return 195 ;;
Ä) return 196 ;; Å) return 197 ;; Æ) return 198 ;; Ç) return 199 ;;
È) return 200 ;; É) return 201 ;; Ê) return 202 ;; Ë) return 203 ;;
Ì) return 204 ;; Í) return 205 ;; Î) return 206 ;; Ï) return 207 ;;
Ð) return 208 ;; Ñ) return 209 ;; Ò) return 210 ;; Ó) return 211 ;;
Ô) return 212 ;; Õ) return 213 ;; Ö) return 214 ;; ×) return 215 ;;
Ø) return 216 ;; Ù) return 217 ;; Ú) return 218 ;; Û) return 219 ;;
Ü) return 220 ;; Ý) return 221 ;; Þ) return 222 ;; ß) return 223 ;;
à) return 224 ;; á) return 225 ;; â) return 226 ;; ã) return 227 ;;
ä) return 228 ;; å) return 229 ;; æ) return 230 ;; ç) return 231 ;;
è) return 232 ;; é) return 233 ;; ê) return 234 ;; ë) return 235 ;;
ì) return 236 ;; í) return 237 ;; î) return 238 ;; ï) return 239 ;;
ð) return 240 ;; ñ) return 241 ;; ò) return 242 ;; ó) return 243 ;;
ô) return 244 ;; õ) return 245 ;; ö) return 246 ;; ÷) return 247 ;;
ø) return 248 ;; ù) return 249 ;; ú) return 250 ;; û) return 251 ;;
ü) return 252 ;; ý) return 253 ;; þ) return 254 ;; ÿ) return 255 ;;
esac
return -1; # @@@ Bad! XXX
}
if [ x$saved_LANG != x ]; then
LANG=$saved_LANG
fi
if [ x$saved_LC_CTYPE != x ]; then
LC_CTYPE=$saved_LC_CTYPE
fi
# test_char_to_int()
# {
# until false
# do
# IFS=: read -sr -d '' -n 1 c
# char_to_int "$c"
# echo $?
# done
# }
# int to character description
# this really stupid
char_desc()
{
local ce="command echo"
case "$1" in
# Control chars
0) $ce '^@' ;; 1) $ce '^A' ;; 2) $ce '^B' ;; 3) $ce '^C' ;;
4) $ce '^D' ;; 5) $ce '^E' ;; 6) $ce '^F' ;; 7) $ce '^G' ;;
8) $ce '^H' ;;
# Ye olde whitespace
9) $ce '^I' ;;
10) $ce '^J' ;;
# more controls
11) $ce '^K' ;; 12) $ce '^L' ;; 13) $ce '^M' ;; 14) $ce '^N' ;;
15) $ce '^O' ;; 16) $ce '^P' ;; 17) $ce '^Q' ;; 18) $ce '^R' ;;
19) $ce '^S' ;; 20) $ce '^T' ;; 21) $ce '^U' ;; 22) $ce '^V' ;;
23) $ce '^W' ;; 24) $ce '^X' ;; 25) $ce '^Y' ;; 26) $ce '^Z' ;;
27) $ce '^[' ;; 28) $ce '^\\' ;; 29) $ce '^]' ;; 30) $ce '^^';;
31) $ce '^_' ;;
# Space
32) $ce 'Space' ;;
# Pucntuation
33) $ce '!';; 34) $ce '"';; 35) $ce '#';; 36) $ce "\$";;
37) $ce '%';; 38) $ce '&';; 39) $ce "\'";; 40) $ce '(';;
41) $ce ')';; 42) $ce '*';; 43) $ce '+';; 44) $ce ',';;
45) $ce '-';; 46) $ce '.';; 47) $ce '/';;
# Numbers
48) $ce '0';; 49) $ce '1';; 50) $ce '2';; 51) $ce '3';;
52) $ce '4';; 53) $ce '5';; 54) $ce '6';; 55) $ce '7';;
56) $ce '8';; 57) $ce '9';;
# More pucntuation
58) $ce ':';; 59) $ce ';' ;; 60) $ce '<' ;; 61) $ce '=' ;;
62) $ce '>' ;; 63) $ce '?' ;; 64) $ce '@' ;;
# Capitol letters
65) $ce 'A';; 66) $ce 'B';; 67) $ce 'C';; 68) $ce 'D';;
69) $ce 'E';; 70) $ce 'F';; 71) $ce 'G';; 72) $ce 'H';;
73) $ce 'I';; 74) $ce 'J';; 75) $ce 'K';; 76) $ce 'L';;
77) $ce 'M';; 78) $ce 'N';; 79) $ce 'O';; 80) $ce 'P';;
81) $ce 'Q';; 82) $ce 'R';; 83) $ce 'S';; 84) $ce 'T';;
85) $ce 'U';; 86) $ce 'V';; 87) $ce 'W';; 88) $ce 'X';;
89) $ce 'Y';; 90) $ce 'Z';;
# Even more pucntuation
91) $ce '[' ;; 92) $ce '\\';; 93) $ce ']';; 94) $ce '^';;
95) $ce '_' ;; 96) $ce '`' ;;
# Lowercase letters
97) $ce 'a';; 98) $ce 'b';; 99) $ce 'c';; 100) $ce 'd';;
101) $ce 'e';; 102) $ce 'f';; 103) $ce 'g';; 104) $ce 'h';;
105) $ce 'i';; 106) $ce 'j';; 107) $ce 'k';; 108) $ce 'l';;
109) $ce 'm';; 110) $ce 'n';; 111) $ce 'o';; 112) $ce 'p';;
113) $ce 'q';; 114) $ce 'r';; 115) $ce 's';; 116) $ce 't';;
117) $ce 'u';; 118) $ce 'v';; 119) $ce 'w';; 120) $ce 'x';;
121) $ce 'y';; 122) $ce 'z';;
# Yet more pucntuation
123) $ce '{';; 124) $ce '|';; 125) $ce '}' ;; 126) $ce '~';;
# Delete
127) $ce 'Delete' ;;
# Hibittypiddles
128) $ce 'C-M-@' ;; 129) $ce 'C-M-a' ;; 130) $ce 'C-M-b' ;;
131) $ce 'C-M-c' ;; 132) $ce 'C-M-d' ;; 133) $ce 'C-M-e' ;;
134) $ce 'C-M-f' ;; 135) $ce 'C-M-g' ;; 136) $ce 'C-M-h' ;;
137) $ce 'M-TAB' ;; 138) $ce 'M-LFD' ;; 139) $ce 'C-M-k' ;;
140) $ce 'C-M-l' ;; 141) $ce 'M-RET' ;; 142) $ce 'C-M-n' ;;
143) $ce 'C-M-o' ;; 144) $ce 'C-M-p' ;; 145) $ce 'C-M-q' ;;
146) $ce 'C-M-r' ;; 147) $ce 'C-M-s' ;; 148) $ce 'C-M-t' ;;
149) $ce 'C-M-u' ;; 150) $ce 'C-M-v' ;; 151) $ce 'C-M-w' ;;
152) $ce 'C-M-x' ;; 153) $ce 'C-M-y' ;; 154) $ce 'C-M-z' ;;
155) $ce 'M-ESC' ;; 156) $ce 'C-M-\\' ;; 157) $ce 'C-M-]' ;;
158) $ce 'C-M-^' ;; 159) $ce 'C-M-_' ;;
# latin1 foozers
160) $ce 'M-SPC';; 161) $ce 'M-!';; 162) $ce 'M-"';;
163) $ce 'M-#';; 164) $ce "M-\$";; 165) $ce 'M-%';;
166) $ce 'M-&';; 167) $ce "M-'";; 168) $ce 'M-(';;
169) $ce 'M-)';; 170) $ce 'M-*';; 171) $ce 'M-+';;
172) $ce 'M-,';; 173) $ce 'M--';; 174) $ce 'M-.';;
175) $ce 'M-/';; 176) $ce 'M-0';; 177) $ce 'M-1';;
178) $ce 'M-2';; 179) $ce 'M-3';; 180) $ce 'M-4';;
181) $ce 'M-5';; 182) $ce 'M-6';; 183) $ce 'M-7';;
184) $ce 'M-8';; 185) $ce 'M-9';; 186) $ce 'M-:';;
187) $ce 'M-;';; 188) $ce 'M-<';; 189) $ce 'M-=';;
190) $ce 'M->';; 191) $ce 'M-?';; 192) $ce 'M-@';;
193) $ce 'M-A';; 194) $ce 'M-B';; 195) $ce 'M-C';;
196) $ce 'M-D';; 197) $ce 'M-E';; 198) $ce 'M-F';;
199) $ce 'M-G';; 200) $ce 'M-H';; 201) $ce 'M-I';;
202) $ce 'M-J';; 203) $ce 'M-K';; 204) $ce 'M-L';;
205) $ce 'M-M';; 206) $ce 'M-N';; 207) $ce 'M-O';;
208) $ce 'M-P';; 209) $ce 'M-Q';; 210) $ce 'M-R';;
211) $ce 'M-S';; 212) $ce 'M-T';; 213) $ce 'M-U';;
214) $ce 'M-V';; 215) $ce 'M-W';; 216) $ce 'M-X';;
217) $ce 'M-Y';; 218) $ce 'M-Z';; 219) $ce 'M-[';;
220) $ce 'M-\\';; 221) $ce 'M-]';; 222) $ce 'M-^';;
223) $ce 'M-_';; 224) $ce 'M-`';; 225) $ce 'M-a';;
226) $ce 'M-b';; 227) $ce 'M-c';; 228) $ce 'M-d';;
229) $ce 'M-e';; 230) $ce 'M-f';; 231) $ce 'M-g';;
232) $ce 'M-h';; 233) $ce 'M-i';; 234) $ce 'M-j';;
235) $ce 'M-k';; 236) $ce 'M-l';; 237) $ce 'M-m';;
238) $ce 'M-n';; 239) $ce 'M-o';; 240) $ce 'M-p';;
241) $ce 'M-q';; 242) $ce 'M-r';; 243) $ce 'M-s';;
244) $ce 'M-t';; 245) $ce 'M-u';; 246) $ce 'M-v';;
247) $ce 'M-w';; 248) $ce 'M-x';; 249) $ce 'M-y';;
250) $ce 'M-z';; 251) $ce 'M-{';; 252) $ce 'M-|';;
253) $ce 'M-}';; 254) $ce 'M-~';; 255) $ce 'M-DEL';;
# whatever?
*) $ce "Unknown";;
esac
}
init_keymap()
{
global_map[0]='set-mark'
global_map[1]='beginning-of-line'
global_map[2]='backward-char'
global_map[3]='control-fucking-c-dude' # @@@
global_map[4]='delete-char'
global_map[5]='end-of-line'
global_map[6]='forward-char'
global_map[7]='keyboard-quit' # @@@ what should this do?
# global_map[8]='delete-backward-char' # not help!
global_map[8]='help-command' # @@@ should be based on stty erase
global_map[9]='self-insert' # not indent!
global_map[10]='newline'
global_map[11]='kill-line'
global_map[12]='redraw'
global_map[13]='newline'
global_map[14]='next-line'
# i never use this, so i've allways wanted to change it into (^O)ops! (undo)?
global_map[15]='open-line'
global_map[16]='previous-line'
global_map[17]='quote-char'
global_map[18]='search-backward'
global_map[19]='search-forward'
global_map[20]='transpose-chars' # i don't use this
global_map[21]='universal-oneness'
global_map[22]='scroll-up'
global_map[23]='kill-region' # wipe my ass
global_map[24]='ctrl-x-map'
global_map[25]='yank'
global_map[26]='up-a-line' # i am rocksor
global_map[27]='esc-map'
global_map[28]='' # C-\ what a waste!
global_map[29]='' # @@ no recursive edit y0!
global_map[30]='' # wastage!
global_map[31]='undo'
local i
# for (( i=32 ; i < 126 ; i++ ))
# do
# global_map[$i]='self-insert'
# done
i=32
while [ $i -lt 126 ]
do
global_map[$i]='self-insert'
i=$[i + 1]
done
global_map[127]='delete-backward-char'
# Meta chars
global_map[160]='set-mark' # meta-space (non-standard)
global_map[188]='beginning-of-buffer' # meta-<
global_map[189]='describe-key-briefly' # meta-=
global_map[190]='end-of-buffer' # meta->
global_map[196]='toggle-debug' # meta-D
global_map[226]='backward-word' # meta-b
global_map[228]='kill-word' # meta-d
global_map[230]='forward-word' # meta-f
global_map[246]='scroll-down' # meta-v
global_map[236]='toggle-line-numbers' # meta-l
global_map[237]='toggle-update-modeline' # meta-m
global_map[247]='copy-region' # meta-w
global_map[248]='execute-extended-command' # meta-x
global_map[250]='down-a-line' # meta-z
global_map[255]='set-mark' # meta-?
# Control-X map
ctrl_x_map[2]='list-buffers' # C-x C-b
ctrl_x_map[3]='kill-bmacs' # C-x C-c
ctrl_x_map[6]='find-file' # C-x C-f
ctrl_x_map[19]='save-buffer' # C-x C-s
ctrl_x_map[24]='exchange-point-and-mark' # C-x C-x
ctrl_x_map[26]='suspend-bmacs' # C-x C-z
ctrl_x_map[98]='switch-to-buffer' # C-x b
ctrl_x_map[107]='kill-buffer' # C-x k
# ESC map
esc_map[32]='set-mark' # ESC space (non-standard)
esc_map[60]='beginning-of-buffer' # ESC <
esc_map[61]='describe-key-briefly' # ESC =
esc_map[62]='end-of-buffer' # ESC >
esc_map[68]='toggle-debug' # ESC D
esc_map[91]='funkey' # ESC [
esc_map[98]='backward-word' # ESC b
esc_map[100]='kill-word' # ESC d
esc_map[102]='forward-word' # ESC f
esc_map[108]='toggle-line-numbers' # ESC l
esc_map[109]='toggle-update-modeline' # ESC m
esc_map[119]='copy-region' # ESC w
esc_map[120]='execute-extended-command' # ESC x
esc_map[118]='scroll-down' # ESC v
esc_map[122]='down-a-line' # ESC z
# minibuf map
mini_map=(${global_map[*]})
mini_map[7]='minibuf-abort' # C-g
mini_map[9]='minibuf-complete' # C-i (Tab)
mini_map[10]='minibuf-accept' # C-j (Newline)
mini_map[13]='minibuf-accept' # C-m (Return)
mini_map[14]='minibuf-next-history' # C-n
unset mini_map[15] # C-o
mini_map[16]='minibuf-previous-history' # C-p
unset mini_map[18] # C-r
unset mini_map[19] # C-s
i=32
while [ $i -lt 126 ]
do
mini_map[$i]='minibuf-self-insert'
i=$[i + 1]
done
}
describe-key-briefly()
{
debug "$FUNCNAME"
local kmap
local prefix
prefix=
if [ $# = 1 ] ; then
kmap="$1"
case $kmap in
ctrl_x_map) prefix="^X " ;;
esc_map) prefix="ESC " ;;
esac
else
kmap=global_map
fi
message "Describe key briefly: "
IFS=: read -sr -d '' -n 1 c
char_to_int "$c"
local key=$?
local k="$key"
local desc=$(char_desc $key)
local binding=$(eval command echo '${'$kmap'[$key]}' )
if [ ! -z $binding ]; then
local f=$binding
local t=$(type -t $f)
if [ ! -z "$t" ] ; then
case "$t" in
function)
if [ -z ${f#*-map} ] ; then
## @@@ should fix to work for any keymap
describe-key-briefly "${f//-/_}"
return
fi
;;
esac
message "'${prefix}${desc}' runs the $t $f"
else
message "'${prefix}${desc}' should run the command $f"
fi
else
message "'${prefix}${desc}' is not bound"
fi
}
funkey()
{
message ""
IFS=: read -sr -d '' -n 1 c
char_to_int "$c"
key=$?
case "$c" in
A) previous-line ;;
B) next-line ;;
C) forward-char ;;
D) backward-char ;;
esac
}
esc-map()
{
local n=$SECONDS
IFS=: read -sr -d '' -n 1 -t 1 c
if [ $? != 0 ] ; then
if [ $[SECONDS - n] -ge 1 ]; then
message "ESC "
IFS=: read -sr -d '' -n 1 c
else
return
fi
fi
char_to_int "$c"
key=$?
if [ ! -z ${esc_map[$key]} ]; then
if [ ! -z $(type -t ${esc_map[$key]}) ] ; then
${esc_map[$key]}
else
message "${esc_map[$key]} is not defined"
fi
else
message "ESC $c is not bound"
fi
}
ctrl-x-map()
{
message "C-x "
IFS=: read -sr -d '' -n 1 c
char_to_int "$c"
key=$?
if [ ! -z ${ctrl_x_map[$key]} ]; then
if [ ! -z $(type -t ${ctrl_x_map[$key]}) ] ; then
${ctrl_x_map[$key]}
else
message "${ctrl_x_map[$key]} is not defined"
fi
else
message "C-x $c is not bound"
fi
}
execute-extended-command()
{
prompt "M-x "
command="$response"
# eval "$response"
typeout "$response"
}
###########################################################################
# Main event loop
###########################################################################
main()
{
initialize
trap 'interrupt' INT
trap 'winch' WINCH
parse_args "$@"
local n
local t
exit_flag=0
until [ $exit_flag = 1 ]; do
# Redraw if we have to
if [ $need_to_redraw = 1 ]; then
redraw
fi
n=$SECONDS
IFS=: read -sr -d '' -n 1 -t $timeout c
if [ $? != 0 ]; then
# eof, error or timeout
if [ $[SECONDS - n] -ge $timeout ]; then
garbage_collect
continue
fi
fi
char_to_int "$c"
key=$?
if [ ! -z ${global_map[$key]} ]; then
t=$(type -t ${global_map[$key]})
if [ ! -z "$t" ] ; then
if [ "$t" = 'function' ]; then
${global_map[$key]}
else
case "$t" in
alias|keyword|builtin) ${global_map[$key]} ;;
file)
reset_term
${global_map[$key]}
init_term
need_to_redraw=1
;;
esac
fi
else
message "${global_map[$key]} is not defined"
fi
else
message "$c is not bound"
fi
# Make sure the modeline is drawn @@@
if [ $update_modeline = 1 ]; then
draw_modeline
moveto $y $x
fi
done
}
main "$@"
exit 0
|
nibbula/bmacs
|
bmacs.bash
|
Shell
|
mit
| 61,269 |
#!/usr/bin/env bash
build=`mktemp -d` || exit 1
pushd "$build"
git clone https://aur.archlinux.org/cower.git
git clone https://aur.archlinux.org/pacaur.git
pushd "cower"
makepkg -sri --skippgpcheck
popd
pushd "pacaur"
makepkg -sri
popd
popd
rm -rf "$build"
|
thornmir/dotfiles
|
archsetup/install_pacaur.sh
|
Shell
|
mit
| 281 |
#!/bin/bash -eu
TEXT=$( echo "${TRAVIS_COMMIT_MESSAGE}" | tr -d "\"" | tr -d "'" )
PAYLOAD=$( printf '{"channel":"#%s","username":"%s","icon_emoji":":%s:","attachments":[{"color":"#36a64f","pretext":"Nouvelle version du blog en prod","title":"%s","title_link":"https://blog.eleven-labs.com/"}]}' "${SLACK_CHANNEL}" "${SLACK_USERNAME}" "${SLACK_EMOJI}" "${TEXT}" )
curl \
-X POST \
-H "Content-type: application/json" \
--data "${PAYLOAD}" \
"${SLACK_WEBHOOK_URL}"
|
eleven-labs/eleven-labs.github.io
|
bin/notify-slack.sh
|
Shell
|
mit
| 482 |
#!/bin/sh
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
CREATE DATABASE $POSTGRES_DB;
CREATE USER $POSTGRES_USER WITH PASSWORD $POSTGRES_PASSWORD;
GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;
EOSQL
|
arturtamborski/wypok
|
images/db/init/init-db.sh
|
Shell
|
mit
| 237 |
#!/bin/tcsh
# Name of system
setenv SYSTEM alanine-dipeptide
# Clean up old files, if present.
rm -f leap.log ${SYSTEM}.{crd,prmtop,pdb}
# Create prmtop/crd files.
tleap -f setup.leap.in
# Create PDB file.
#cat ${SYSTEM}.crd | ambpdb -p ${SYSTEM}.prmtop > ${SYSTEM}.pdb
python generate-pdb.py
|
choderalab/openmmtools
|
openmmtools/data/alanine-dipeptide-explicit/run.sh
|
Shell
|
mit
| 298 |
NB=$(grep -c '^>' $TEST1)
for i in $(seq 0 $((NB - 1))); do
ufasta dsort $TEST1 | ufasta extract -n "read$i" | tail -n +2 | sort -C
EXPECT_SUCCESS "Sorted entry $i"
done
|
gpertea/stringtie
|
SuperReads_RNA/global-1/ufasta/tests/test_dsort.sh
|
Shell
|
mit
| 179 |
#!/bin/sh
# clean up any old stuff
mvn clean
# compile and create jar from main src
cd ..
mvn install
cd -
# create directory structure I'm going to need
mkdir target/jnlp
echo "copying existing jars"
# copy all the jars I'm going to need
# copy our own jar
cp target/JGE_ARTIFACT_ID-1.0-SNAPSHOT.jar target/jnlp/
cp ~/.m2/repository/org/owenbutler/jGameEngine/1.0-SNAPSHOT/jGameEngine-1.0-SNAPSHOT.jar target/jnlp/
cp ~/.m2/repository/org/springframework/spring-core/2.5.2/spring-core-2.5.2.jar target/jnlp/
cp ~/.m2/repository/org/springframework/spring-beans/2.5.2/spring-beans-2.5.2.jar target/jnlp/
cp ~/.m2/repository/commons-lang/commons-lang/2.2/commons-lang-2.2.jar target/jnlp/
cp ~/.m2/repository/log4j/log4j/1.2.14/log4j-1.2.14.jar target/jnlp/
cp ~/.m2/repository/commons-logging/commons-logging-api/1.0.4/commons-logging-api-1.0.4.jar target/jnlp/
cp ~/.m2/repository/slick-util/jogg/0.0.7/jogg-0.0.7.jar target/jnlp/
cp ~/.m2/repository/slick-util/jorbis/0.0.15/jorbis-0.0.15.jar target/jnlp/
cp ~/.m2/repository/slick-util/slick-util/20081222/slick-util-20081222.jar target/jnlp/
cp ~/lib/lwjgl-2.0.1/jar/lwjgl.jar target/jnlp/
cp ~/lib/lwjgl-2.0.1/jar/lwjgl_util.jar target/jnlp/
cp ~/lib/lwjgl-2.0.1/jar/jinput.jar target/jnlp/
# copy jars for native libs
cp ~/lib/lwjgl-2.0.1/native/*.jar target/jnlp/
echo "adding manifest to jar"
# add our manifest file to our jar
jar ufm target/jnlp/JGE_ARTIFACT_ID-1.0-SNAPSHOT.jar manifest.txt
# keytool -genkey -keystore jGameEngineStore -alias engineStore
# Enter keystore password: 45rtfgvb
echo "signing jars"
# sign jars
for file in `find target/jnlp -name "*.jar"`;
do
pack200 --modification-time=latest --deflate-hint=true --strip-debug --repack $file
jarsigner -keystore jGameEngineStore -storepass 45rtfgvb -keypass 45rtfgvb $file engineStore
pack200 --modification-time=latest --deflate-hint=true --strip-debug ${file}.pack.gz $file
done
# create var files
cd target/jnlp
for file in `ls -1 *.jar`;
do
# create var file
# URI: packed/file.jar.pack.gz
# Content-Type: x-java-archive
# Content-Encoding: pack200-gzip
# URI: unpacked/file.jar
# Content-Type: x-java-archive
echo "URI: ${file}" > ${file}.var
echo "" >> ${file}.var
echo "URI: packed/${file}.pack.gz" >> ${file}.var
echo "Content-Type: x-java-archive" >> ${file}.var
echo "Content-Encoding: pack200-gzip" >> ${file}.var
echo "" >> ${file}.var
echo "URI: unpacked/${file}" >> ${file}.var
echo "Content-Type: x-java-archive" >> ${file}.var
done
mkdir packed
mv *.pack.gz packed/
mkdir unpacked
mv *.jar unpacked/
cd -
# copy jnlp file and .htaccess
cp src/main/jnlp/jnlp.xml target/jnlp/game.jnlp
cp src/main/jnlp/.htaccess target/jnlp/
# copy splash and icon file
cp gfxPSD/splash.gif target/jnlp/
cp gfxPSD/icon.gif target/jnlp/
|
owenbutler/gamedev
|
baseGame/assembleJNLP.sh
|
Shell
|
mit
| 2,830 |
#!/bin/sh
# Display date of Easter
echo "Date of Easter"
echo -n "Current year: "
ncal -e $(date +"%Y")
echo -n "Next year: "
ncal -e $(date --date "next year" +"%Y")
|
milosz/shell-octo-adventure
|
scripts/easter_date.sh
|
Shell
|
mit
| 168 |
#!/bin/bash
# ---
# Decreases the volume for the current sink.
# ---
function decreaseVolume() {
local sink=$(getSink)
local volume=$(getVolume "$sink")
local newVolume=$((volume - STEP))
setVolume $newVolume
}
# ---
# Returns the name for the provided sink.
#
# @param $1 The sink index.
#
# @return The name for the sink.
# --
function getName() {
pactl list sinks short | awk -v sink="$1" '{ if ($1 == sink) {print $2} }'
}
# ---
# Returns the current sink. It will show a notification and exit the script
# when pulseaudio returns an error.
#
# @return The current sink.
# ---
function getSink() {
local sink=
if pulseaudio --check; then
sink=$(pacmd list-sinks | awk '/\* index:/{print $3}')
fi
if [[ "$sink" == "" ]]; then
sendNotification "Volume" "Unable to get sink"
exit
fi
echo $sink
}
# ---
# Returns the list of sink inputs based on the current sink index.
#
# @param $1 The sink index.
#
# @return The list of sink inputs.
# ---
function getInputs() {
pacmd list-sink-inputs | grep -B 4 "sink: $1 " | awk '/index:/{print $2}'
}
# ---
# Returns the volume for the provided sink.
#
# @param $1 The sink index.
#
# @return The volume of the current sink.
# ---
function getVolume() {
pacmd list-sinks | grep -A 15 'index: '"$1"'' | grep 'volume:' \
| grep -E -v 'base volume:' | awk -F : '{print $3; exit}' \
| grep -o -P '.{0,3}%' | sed 's/.$//' | tr -d ' '
}
# ---
# Increases the volume for the current sink.
# ---
function increaseVolume() {
local sink=$(getSink)
local volume=$(getVolume "$sink")
local newVolume=$(($(getVolume "$sink") + STEP))
setVolume $newVolume
}
# ---
# Returns the muted status for the provided sink.
#
# @param $1 The sink index.
#
# @return The current muted status (yes/no).
# ---
function isMuted() {
pacmd list-sinks | grep -A 15 "index: $1" | awk '/muted/ {print $2; exit}'
}
# ---
# Listens for changes and updates the output for the bar.
#
# Note: This is faster than having the script on an interval.
# ---
function listen() {
local firstRun=0
pactl subscribe 2>/dev/null | {
while true; do
{
# If this is the first time just continue and print the current
# state. Otherwise wait for events. This is to prevent the
# module being empty until an event occurs.
if [ $firstRun -eq 0 ]; then
firstRun=1
else
read -r event || break
# Avoid double events
if ! echo "$event" | grep -e "on card" -e "on sink" -e "on server"; then
continue
fi
fi
} &>/dev/null
show
done
}
}
# ---
# Changes the mute/unmute volume for the current sink.
#
# @param $1 The action to execute (mute/unmute/toggle).
# ---
function mute() {
sink=$(getSink)
muted=$(isMuted $sink)
if [[ "$1" == "mute" ]] || ([[ "$1" == "toggle" ]] && [[ "$muted" == "no" ]]); then
pactl set-sink-mute "$sink" "yes"
sendNotification "Volume" "Mute: on" "audio-volume-low"
return
fi
pactl set-sink-mute "$sink" "no"
sendNotification "Volume" "Mute: off" "audio-volume-high"
}
# ---
# Displays a notification.
#
# @param $1 The notification title.
# @param $2 The notification text.
# @param $3 The notification icon.
# ---
function sendNotification() {
local title="$1"
if command -v dunstify &>/dev/null; then
notify="dunstify --replace 201839192"
else
notify="notify-send"
fi
$notify "$title" "$2" --icon=$3 &
}
# --
# Changes the volume to the specified value.
#
# @param $1 The volume value.
# --
function setVolume() {
local sink=$(getSink)
local volume=$(getVolume "$sink")
if [ "$1" -le "$MAX_VOL" ] && [ "$1" -ge "0" ]; then
volume=$1
fi
pactl set-sink-volume "$sink" "$volume%"
sendNotification "Volume" "$volume%" "audio-volume-high"
}
# ---
# Synchronizes the volume between sinks with the volume of the current sink.
# ---
function synchronize() {
if [[ "$AUTOSYNC" = "no" ]]; then
return
fi
local sink=$(getSink)
local volume=$(getVolume "$sink")
local inputs=$(getInputs "$sink")
for input in $inputs; do
pactl set-sink-input-volume "$input" "$volume%"
done
}
function show() {
sink=$(getSink)
volume=$(getVolume "$sink")
muted=$(isMuted "$sink")
# Fixed volume icons over max volume
local iconsLen=${#VOLUME_ICONS[@]}
if [ "$iconsLen" -ne 0 ]; then
local volSplit=$((MAX_VOL / iconsLen))
for i in $(seq 1 "$iconsLen"); do
if [ $((i * volSplit)) -ge "$volume" ]; then
volIcon="${VOLUME_ICONS[$((i-1))]}"
break
fi
volIcon=""
done
else
volIcon=""
fi
# Showing the formatted message
if [ "$muted" = "yes" ]; then
echo "$MUTED_BACKGROUND_START$MUTED_FOREGROUND_START$MUTED_ICON$MUTED_FOREGROUND_END$MUTED_BACKGROUND_END"
else
echo "${volIcon}"
fi
}
# ---
# Shows the help message and exits.
# ---
function help() {
echo "Usage: $0 ACTION [OPTIONS]"
echo ""
echo " Defines actions to check and modify the current volume"
echo ""
echo "Options:"
echo " -b, --muted-bg Configure background color for muted status"
echo " -f, --muted-fg Configure foreground color for muted status"
echo " -s, --step Configure the step for up/down actions"
echo ""
echo "Actions:"
echo " help Show this help message"
echo " listen Listen for PulseAudio and update the output"
echo " down Decrease the default sink's volume"
echo " mute Mute or unmute the default sink's audio"
echo " show Show the volume icon"
echo " toggle Toggle muted/unmuted status"
echo " unmute Unmute the default sink's audio"
echo " up Increase the default sink's volume"
exit
}
# ---
# Changes the volume and sends a notification.
# ---
function main() {
STEP=5
MAX_VOL=100
VOLUME=0
VOLUME_ICONS=( "" "" "" )
MUTED_ICON="ﱝ"
MUTED_BACKGROUND_START=
MUTED_BACKGROUND_END=
MUTED_FOREGROUND_START=
MUTED_FOREGROUND_END=
# Environment & global constants for the script
while [[ $# -gt 0 ]]; do
case "$1" in
-b | --muted-bg )
MUTED_BACKGROUND_START="%{B$2}"
MUTED_BACKGROUND_END="%{B-}"
shift
;;
-f | --muted-fg )
MUTED_FOREGROUND_START="%{F$2}"
MUTED_FOREGROUND_END="%{F-}"
shift
;;
-s | --step )
STEP=$1
shift
;;
-v | --volume )
VOLUME=$2
shift
;;
*)
if [[ "$action" != "" ]]; then
echo "Unrecognized action $1"
exit
fi
action=$1
;;
esac
shift
done
case "$action" in
down)
decreaseVolume
synchronize
;;
help)
help
;;
listen)
listen
;;
mute | toggle | unmute)
mute $action
synchronize
;;
set)
setVolume $VOLUME
;;
show)
show
;;
up)
increaseVolume
synchronize
;;
esac
}
main $@
|
dbestevez/dotfiles
|
src/bin/volume.sh
|
Shell
|
mit
| 7,872 |
#!/bin/bash
cat << "ARTWORK"
:;'',
`####''+###'
####```````.##
#+`+,```.......,#
#`:+```..........,#
#`;;```...........`'#
`+###``..`,.......`###.
.#'`:#`..`#++......`..##
#:;;;#...#.............#
#,;;;#...,.............;
#;;;;;.......`.......`..
#;;;;.......``'...:..`#..
###+;,........:...#..+#.,
,#.`++'......'##....#.....
#.+#`##.............:'...`
#`..#................+:..
#`...............``...#.;
.+...............:...#:.#
#:..............###;...#
###`.......`.+:...:#+.;
#`........;.'##+#:.#
#``..:.....'.......#
+.`..,......####..#
#`...+..........'#.
##.`...+........+#'#.
;#+'#,`...,#....+###;'#;
##;;;'#'#########,#';+;;#:
##;;;;'''#,';;;'##++;;;#'''#
##;;''''''''#;;;;;#;#';;#''+:
#;;'''''''''';;;;;#'#;;;#''#
#';''''''''''';;;''``###+''#
:#+###########;;+'#;@:''###;
ARTWORK
echo
echo "Updating APT sources."
echo
apt-get update > /dev/null
echo
echo "Installing Ansible."
echo
apt-get -y install software-properties-common
add-apt-repository -y ppa:ansible/ansible
apt-get update
apt-get -y install ansible
ansible_version=`dpkg -s ansible 2>&1 | grep Version | cut -f2 -d' '`
echo
echo "Installed Ansible $ansible_version"
ANS_BIN=`which ansible-playbook`
if [[ -z $ANS_BIN ]]
then
echo "Error: Cannot locate Ansible. Aborting."
echo
exit
fi
echo
echo "Validating Ansible hostfile permissions."
echo
chmod 644 /vagrant/provision/hosts
# More continuous scroll of the Ansible standard output buffer
export PYTHONUNBUFFERED=1
# $ANS_BIN /vagrant/provision/playbook.yml -i /vagrant/provision/hosts
$ANS_BIN /vagrant/provision/playbook.yml -i'127.0.0.1,'
|
ericmann/itinerant-jenkins
|
bin/provision.sh
|
Shell
|
mit
| 1,782 |
for i in *.done
do
####
echo $i | sed 's/pdsd//g' | sed 's/.txt.done//g' > Year
#
TheYear=`cat Year`
#
sed "s|$| ${TheYear}|" $i > $i.Year
####
done
#The above is crap
cat MergePesticides | sed 's/.*\(.\{4\}$\).*/\1/' | uniq > Year
|
andrewdefries/CA_Pesticides_1991_2011
|
CaliforniaPesticideUse_1991_2014/CleanTheText/PasteYear.sh
|
Shell
|
mit
| 235 |
set -e
. scripts/include.sh
# installer
cd ~/deps/roxcoin
rm -rf nsis
mkdir nsis
git archive HEAD | tar -x -C nsis
cd nsis/src
mkdir ../release
cp ../../release/* ../release/
cp ../../src/*.exe .
makensis ../share/setup.nsi
rm -rf $OUTDIR/setup
mkdir $OUTDIR/setup
cp ../share/roxcoin-*-win32-setup.exe $OUTDIR/setup/
# results
cd $OUTDIR
rm -rf roxcoin-dist.zip
zip -r roxcoin-dist.zip setup client daemon
echo -e "\n\n"
echo "Results are in $OUTDIR/roxcoin-dist.zip"
echo -e "\n"
|
Roxcoin/roxcointor
|
contrib/build_win_on_linux/nsis.sh
|
Shell
|
mit
| 489 |
teefifo note import && tmux renamew -t testing1 pass || tmux renamew -t testing1 FAIL; echo -n 'press ENTER to close'; read x
|
rubysuperhero/hero-notes
|
testing1.sh
|
Shell
|
mit
| 126 |
#!/usr/bin/env bash
# constants
DOKKU_ROOT=${DOKKU_ROOT:=~dokku}
TEST_APP=my-cool-guy-test-app
# test functions
flunk() {
{ if [ "$#" -eq 0 ]; then cat -
else echo "$*"
fi
}
return 1
}
assert_success() {
if [ "$status" -ne 0 ]; then
flunk "command failed with exit status $status"
elif [ "$#" -gt 0 ]; then
assert_output "$1"
fi
}
assert_failure() {
if [ "$status" -eq 0 ]; then
flunk "expected failed exit status"
elif [ "$#" -gt 0 ]; then
assert_output "$1"
fi
}
assert_equal() {
if [ "$1" != "$2" ]; then
{ echo "expected: $1"
echo "actual: $2"
} | flunk
fi
}
assert_output() {
local expected
if [ $# -eq 0 ]; then expected="$(cat -)"
else expected="$1"
fi
assert_equal "$expected" "$output"
}
assert_line() {
if [ "$1" -ge 0 ] 2>/dev/null; then
assert_equal "$2" "${lines[$1]}"
else
local line
for line in "${lines[@]}"; do
if [ "$line" = "$1" ]; then return 0; fi
done
flunk "expected line \`$1'"
fi
}
refute_line() {
if [ "$1" -ge 0 ] 2>/dev/null; then
local num_lines="${#lines[@]}"
if [ "$1" -lt "$num_lines" ]; then
flunk "output has $num_lines lines"
fi
else
local line
for line in "${lines[@]}"; do
if [ "$line" = "$1" ]; then
flunk "expected to not find line \`$line'"
fi
done
fi
}
assert() {
if ! "$*"; then
flunk "failed: $*"
fi
}
assert_exit_status() {
assert_equal "$status" "$1"
}
# dokku functions
create_app() {
dokku apps:create $TEST_APP
}
destroy_app() {
echo $TEST_APP | dokku apps:destroy $TEST_APP
}
deploy_app() {
APP_TYPE="$1"; APP_TYPE=${APP_TYPE:="nodejs-express"}
TMP=$(mktemp -d -t "$TARGET.XXXXX")
rmdir $TMP && cp -r ./tests/apps/$APP_TYPE $TMP
cd $TMP
git init
git config user.email "[email protected]"
git config user.name "Test Robot"
git remote add target [email protected]:$TEST_APP
[[ -f gitignore ]] && mv gitignore .gitignore
git add .
git commit -m 'initial commit'
git push target master || destroy_app
}
setup_client_repo() {
TMP=$(mktemp -d -t "$TARGET.XXXXX")
rmdir $TMP && cp -r ./tests/apps/nodejs-express $TMP
cd $TMP
git init
git config user.email "[email protected]"
git config user.name "Test Robot"
[[ -f gitignore ]] && mv gitignore .gitignore
git add .
git commit -m 'initial commit'
}
setup_test_tls() {
TLS="/home/dokku/$TEST_APP/tls"
mkdir -p $TLS
tar xf $BATS_TEST_DIRNAME/server_ssl.tar -C $TLS
sudo chown -R dokku:dokku $TLS
}
setup_test_tls_with_sans() {
TLS="/home/dokku/$TEST_APP/tls"
mkdir -p $TLS
tar xf $BATS_TEST_DIRNAME/server_ssl_sans.tar -C $TLS
sudo chown -R dokku:dokku $TLS
}
setup_test_tls_wildcard() {
TLS="/home/dokku/tls"
mkdir -p $TLS
tar xf $BATS_TEST_DIRNAME/server_ssl_wildcard.tar -C $TLS
sudo chown -R dokku:dokku $TLS
sed -i -e "s:^# ssl_certificate $DOKKU_ROOT/tls/server.crt;:ssl_certificate $DOKKU_ROOT/tls/server.crt;:g" \
-e "s:^# ssl_certificate_key $DOKKU_ROOT/tls/server.key;:ssl_certificate_key $DOKKU_ROOT/tls/server.key;:g" /etc/nginx/conf.d/dokku.conf
kill -HUP "$(< /var/run/nginx.pid)"; sleep 5
}
disable_tls_wildcard() {
TLS="/home/dokku/tls"
rm -rf $TLS
sed -i -e "s:^ssl_certificate $DOKKU_ROOT/tls/server.crt;:# ssl_certificate $DOKKU_ROOT/tls/server.crt;:g" \
-e "s:^ssl_certificate_key $DOKKU_ROOT/tls/server.key;:# ssl_certificate_key $DOKKU_ROOT/tls/server.key;:g" /etc/nginx/conf.d/dokku.conf
kill -HUP "$(< /var/run/nginx.pid)"; sleep 5
}
|
lmars/dokku
|
tests/unit/test_helper.bash
|
Shell
|
mit
| 3,535 |
#!/bin/sh
# -o /home/mcb/jogdeos/softwareAndscripts/scripts/qsubSample/qOut
# -e /home/mcb/jogdeos/softwareAndscripts/scripts/qsubSample/qErr
#$ -o ../qOut
#$ -e ../qErr
#$ -cwd
#$ -N blastout
#$ -S /bin/sh
#
/local/cluster/bin/blastall -i $1 -o $2 -d ../blastDB/TAIR8_chromosomes.nfa -p blastn -m 8 -e 1e-10
|
sanjuroj/bioscripts
|
qsubSample/blast.sh
|
Shell
|
mit
| 310 |
watchify views/main.jsx -v -d --extension=jsx --o public/js/bundle.js
|
bbviana/laylamarques
|
scripts/watch-build.sh
|
Shell
|
mit
| 70 |
#!/bin/bash
# Installs LAMP on to an Ubuntu server
# Checks if Apache is installed
sudo dpkg -l | grep apache2 &> /dev/null && echo "Apache is already installed..." || (echo "Installing Apache now..." && sudo apt-get -y install apache2 &> /dev/null)
# Checks if PHP mod for Apache is installed
(sudo dpkg -l | grep installed | grep libapache2-mod-php5 &> /dev/null || (sudo dpkg --list | grep installed | grep libapache2-mod-php) && echo "Apache has PHP enabled..." || (echo "Enabling PHP for Apache now..." && (sudo apt-get -y install libapache2-mod-php5 &> /dev/null || sudo apt-get -y install libapache2-mod-php &> /dev/null)
# Checks if MySQL is installed
sudo dpkg -l | grep mysql-server &> /dev/null && echo "MySQL server is already installed..." || (echo "Installing MySQL server now..." && sudo apt-get -y install mysql-server &>/dev/null)
sudo dpkg -l php5-mysql &> /dev/null && echo "MySQL PHP has already been installed..." || (echo "Installing MySQL PHP now..." && (sudo apt-get -y install php5-mysql &> /dev/null || sudo apt-get -y install php-mysql &> /dev/null))
# Checks if PHP is installed
sudo dpkg -l | grep php &> /dev/null && echo "PHP is already installed..." || (echo "Installing PHP now..." && (sudo apt-get -y install php &> /dev/null || sudo apt-get -y php5 &> /dev/null))
|
rhom001/evillious_web
|
scripts/install-ubuntu.sh
|
Shell
|
mit
| 1,303 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:1544
#
# Security announcement date: 2015-08-04 17:18:29 UTC
# Script generation date: 2017-01-01 21:16:31 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - java-1.5.0-ibm.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-demo.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-devel.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-javacomm.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-jdbc.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-plugin.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-src.i386:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm.x86_64:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-accessibility.x86_64:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-demo.x86_64:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-devel.x86_64:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-javacomm.x86_64:1.5.0.16.13-1jpp.3.el5
# - java-1.5.0-ibm-src.x86_64:1.5.0.16.13-1jpp.3.el5
#
# Last versions recommanded by security team:
# - java-1.5.0-ibm.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-demo.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-devel.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-javacomm.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-jdbc.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-plugin.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-src.i386:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm.x86_64:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-accessibility.x86_64:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-demo.x86_64:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-devel.x86_64:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-javacomm.x86_64:1.5.0.16.14-1jpp.1.el5
# - java-1.5.0-ibm-src.x86_64:1.5.0.16.14-1jpp.1.el5
#
# CVE List:
# - CVE-2015-1931
# - CVE-2015-2590
# - CVE-2015-2601
# - CVE-2015-2621
# - CVE-2015-2632
# - CVE-2015-2637
# - CVE-2015-2638
# - CVE-2015-2664
# - CVE-2015-4000
# - CVE-2015-4731
# - CVE-2015-4732
# - CVE-2015-4733
# - CVE-2015-4748
# - CVE-2015-4749
# - CVE-2015-4760
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install java-1.5.0-ibm.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-demo.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-devel.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-javacomm.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-jdbc.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-plugin.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-src.i386-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm.x86_64-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-accessibility.x86_64-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-demo.x86_64-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-devel.x86_64-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-javacomm.x86_64-1.5.0.16.14 -y
sudo yum install java-1.5.0-ibm-src.x86_64-1.5.0.16.14 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2015/RHSA-2015:1544.sh
|
Shell
|
mit
| 2,991 |
#!/bin/bash
if ! which zsh
then
echo "/bin/zsh does not exist. Exiting."
exit 1
fi
if ! which git
then
echo "git does not exist. Exiting."
exit 1
fi
cd $HOME
if which /usr/local/bin/zsh; then
sudo chsh -s /usr/local/bin/zsh $USER
else
sudo chsh -s /bin/zsh $USER
fi
mkdir -p $HOME/.history-directory
rm -rf .dotfiles &> /dev/null || true
rm -rf .oh-my-zsh &> /dev/null || true
git clone https://github.com/ankushagarwal/dotfiles.git .dotfiles
cd .dotfiles
yes o | script/bootstrap
|
ankushagarwal/dotfiles
|
script/setup.sh
|
Shell
|
mit
| 495 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.