code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
cat > sdksyms.c << EOF
/* This file is automatically generated by sdksyms.sh. */
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
/* These must be included first */
#include "misc.h"
#include "miscstruct.h"
/* render/Makefile.am */
#include "picture.h"
#include "mipict.h"
#include "glyphstr.h"
#include "picturestr.h"
#include "renderedge.h"
/* fb/Makefile.am -- module */
/*
#include "fb.h"
#include "fbrop.h"
#include "fboverlay.h"
#include "wfbrename.h"
#include "fbpict.h"
*/
/* miext/shadow/Makefile.am -- module */
/*
#include "shadow.h"
*/
/* miext/damage/Makefile.am */
#include "damage.h"
#include "damagestr.h"
/* Xext/Makefile.am -- half is module, half is builtin */
/*
#include "xvdix.h"
#include "xvmcext.h"
*/
#include "geext.h"
#include "geint.h"
#include "shmint.h"
#if XINERAMA
# include "panoramiXsrv.h"
# include "panoramiX.h"
#endif
/* hw/xfree86/int10/Makefile.am -- module */
/*
#include "xf86int10.h"
*/
/* hw/xfree86/i2c/Makefile.am -- "mostly" modules */
#include "xf86i2c.h"
/*
#include "bt829.h"
#include "fi1236.h"
#include "msp3430.h"
#include "tda8425.h"
#include "tda9850.h"
#include "tda9885.h"
#include "uda1380.h"
#include "i2c_def.h"
*/
/* hw/xfree86/modes/Makefile.am */
#include "xf86Crtc.h"
#include "xf86Modes.h"
#include "xf86RandR12.h"
/* #include "xf86Rename.h" */
/* hw/xfree86/ddc/Makefile.am */
#include "edid.h"
#include "xf86DDC.h"
/* hw/xfree86/dri2/Makefile.am -- module */
/*
#if DRI2
# include "dri2.h"
#endif
*/
/* hw/xfree86/vgahw/Makefile.am -- module */
/*
#include "vgaHW.h"
*/
/* hw/xfree86/fbdevhw/Makefile.am -- module */
/*
#include "fbdevhw.h"
*/
/* hw/xfree86/common/Makefile.am */
#include "compiler.h"
#include "fourcc.h"
#include "xf86.h"
#include "xf86Module.h"
#include "xf86Opt.h"
#include "xf86PciInfo.h"
#include "xf86Priv.h"
#include "xf86Privstr.h"
#include "xf86cmap.h"
#include "xf86fbman.h"
#include "xf86str.h"
#include "xf86Xinput.h"
#include "xf86VGAarbiter.h"
#include "xisb.h"
#if XV
# include "xf86xv.h"
# include "xf86xvmc.h"
# include "xf86xvpriv.h"
#endif
/* XF86VidMode code is in libextmod module */
/*
#if XF86VIDMODE
# include "vidmodeproc.h"
#endif
*/
#include "xorgVersion.h"
#if defined(__sparc__) || defined(__sparc)
# include "xf86sbusBus.h"
#endif
/* hw/xfree86/ramdac/Makefile.am */
#include "BT.h"
#include "IBM.h"
#include "TI.h"
#include "xf86Cursor.h"
#include "xf86RamDac.h"
/* hw/xfree86/shadowfb/Makefile.am -- module */
/*
#include "shadowfb.h"
*/
/* hw/xfree86/os-support/solaris/Makefile.am */
#if defined(sun386)
# include "agpgart.h"
#endif
/* hw/xfree86/os-support/Makefile.am */
#include "xf86_OSproc.h"
#include "xf86_OSlib.h"
/* hw/xfree86/os-support/bus/Makefile.am */
#include "xf86Pci.h"
#if defined(__sparc__) || defined(__sparc)
# include "xf86Sbus.h"
#endif
/* hw/xfree86/xaa/Makefile.am -- module */
/*
#include "xaa.h"
#include "xaalocal.h"
#include "xaarop.h"
#include "xaaWrapper.h"
*/
/* hw/xfree86/dixmods/extmod/Makefile.am -- module */
/*
#include "dgaproc.h"
*/
/* hw/xfree86/parser/Makefile.am */
#include "xf86Parser.h"
#include "xf86Optrec.h"
/* hw/xfree86/vbe/Makefile.am -- module */
/*
#include "vbe.h"
#include "vbeModes.h"
*/
/* hw/xfree86/dri/Makefile.am -- module */
/*
#if XF86DRI
# include "dri.h"
# include "sarea.h"
# include "dristruct.h"
#endif
*/
/* hw/xfree86/xf8_16bpp/Makefile.am -- module */
/*
#include "cfb8_16.h"
*/
/* mi/Makefile.am */
#include "micmap.h"
#include "miline.h"
#include "mipointer.h"
#include "mi.h"
#include "mibstore.h"
#include "migc.h"
#include "mipointrst.h"
#include "mizerarc.h"
#include "micoord.h"
#include "mifillarc.h"
#include "mispans.h"
#include "miwideline.h"
#include "mistruct.h"
#include "mifpoly.h"
#include "mioverlay.h"
/* randr/Makefile.am */
#include "randrstr.h"
#include "rrtransform.h"
/* dbe/Makefile.am -- module */
/*
#include "dbestruct.h"
*/
/* exa/Makefile.am -- module */
/*
#include "exa.h"
*/
/* xfixes/Makefile.am */
#include "xfixes.h"
/* include/Makefile.am */
#include "XIstubs.h"
#include "bstore.h"
#include "bstorestr.h"
#include "closestr.h"
#include "closure.h"
#include "colormap.h"
#include "colormapst.h"
#include "hotplug.h"
#include "cursor.h"
#include "cursorstr.h"
#include "dix.h"
#include "dixaccess.h"
#include "dixevents.h"
#include "dixfont.h"
#include "dixfontstr.h"
#include "dixgrabs.h"
#include "dixstruct.h"
#include "exevents.h"
#include "extension.h"
#include "extinit.h"
#include "extnsionst.h"
#include "gc.h"
#include "gcstruct.h"
#include "globals.h"
#include "input.h"
#include "inputstr.h"
/* already included */
/*
#include "misc.h"
#include "miscstruct.h"
*/
#include "opaque.h"
#include "os.h"
#include "pixmap.h"
#include "pixmapstr.h"
#include "privates.h"
#include "property.h"
#include "propertyst.h"
#include "ptrveloc.h"
#include "region.h"
#include "regionstr.h"
#include "registry.h"
#include "resource.h"
#include "rgb.h"
#include "screenint.h"
#include "scrnintstr.h"
#include "selection.h"
#include "servermd.h"
#include "site.h"
#include "swaprep.h"
#include "swapreq.h"
#include "validate.h"
#include "window.h"
#include "windowstr.h"
#include "xace.h"
#include "xkbfile.h"
#include "xkbsrv.h"
#include "xkbstr.h"
#include "xkbrules.h"
#include "xserver-properties.h"
EOF
topdir=$1
shift
LC_ALL=C
export LC_ALL
${CPP:-cpp} "$@" -DXorgLoader sdksyms.c | ${AWK:-awk} -v topdir=$topdir '
BEGIN {
sdk = 0;
print("/*");
print(" * These symbols are referenced to ensure they");
print(" * will be available in the X Server binary.");
print(" */");
printf("/* topdir=%s */\n", topdir);
print("_X_HIDDEN void *xorg_symbols[] = {");
printf("sdksyms.c:") > "sdksyms.dep";
}
/^# [0-9]+ "/ {
# Process text after a include in a relative path or when the
# processed file has a basename matching $top_srcdir.
# Note that indexing starts at 1; 0 means no match, and there
# is a starting ".
sdk = $3 !~ /^"\// || index($3, topdir) == 2;
if (sdk && $3 ~ /\.h"$/) {
# remove quotes
gsub(/"/, "", $3);
if (! headers[$3]) {
printf(" \\\n %s", $3) >> "sdksyms.dep";
headers[$3] = 1;
}
}
}
/^extern[ ]/ {
if (sdk) {
n = 3;
# skip attribute, if any
while ($n ~ /^(__attribute__|__global)/ ||
# skip modifiers, if any
$n ~ /^\*?(unsigned|const|volatile|struct)$/ ||
# skip pointer
$n ~ /\*$/)
n++;
# type specifier may not be set, as in
# extern _X_EXPORT unsigned name(...)
if ($n !~ /[^a-zA-Z0-9_]/)
n++;
# match
# extern _X_EXPORT type (* name[])(...)
if ($n ~ /^[^a-zA-Z0-9_]+$/)
n++;
# match
# extern _X_EXPORT const name *const ...
if ($n ~ /^([^a-zA-Z0-9_]+)?const$/)
n++;
# actual name may be in the next line, as in
# extern _X_EXPORT type
# possibly ending with a *
# name(...)
if ($n == "" || $n ~ /^\*+$/) {
getline;
n = 1;
}
# dont modify $0 or $n
symbol = $n;
# remove starting non word chars
sub(/^[^a-zA-Z0-9_]+/, "",symbol);
# remove from first non word to end of line
sub(/[^a-zA-Z0-9_].*/, "", symbol);
#print;
printf(" (void *) &%s,\n", symbol);
}
}
END {
print("};");
print("") >> "sdksyms.dep";
}' > _sdksyms.c
STATUS=$?
cat _sdksyms.c >> sdksyms.c
rm _sdksyms.c
[ $? != 0 ] && exit $?
exit $STATUS
|
twobob/buildroot-kindle
|
output/build/xserver_xorg-server-1.9.4/hw/xfree86/loader/sdksyms.sh
|
Shell
|
gpl-2.0
| 7,416 |
#! /bin/sh
# Copyright (C) 1998-2022 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test to make sure the C++ linker is used when appropriate.
# Matthew D. Langston <[email protected]>
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_PROG_CXX
END
cat > Makefile.am << 'END'
bin_PROGRAMS = lavalamp
lavalamp_SOURCES = lava.c lamp.cxx
END
$ACLOCAL
$AUTOMAKE
# We should only see the C++ linker in the rules of 'Makefile.in'.
# Look for this macro not at the beginning of any line; that will have
# to be good enough for now.
grep '.\$(CXXLINK)' Makefile.in
# We should not see these patterns:
grep '.\$(FLINK)' Makefile.in && exit 1
grep '.\$(LINK)' Makefile.in && exit 1
exit 0
|
autotools-mirror/automake
|
t/link_c_cxx.sh
|
Shell
|
gpl-2.0
| 1,313 |
#!/bin/bash
# Copyright (c) 2011, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TNVME_CMD_LINE=$@
Usage() {
echo "usage...."
echo " $0 <tnvme cmd line options>"
echo ""
}
if [ -z $TNVME_CMD_LINE ]; then
Usage
exit
fi
rm -rf ./Logs
mkdir -m 0777 ./Logs
echo ./tnvme -k skiptest.cfg $TNVME_CMD_LINE 2>&1 | tee ./Logs/tnvme.out
#valgrind --tool=memcheck ./tnvme -k skiptest.cfg $TNVME_CMD_LINE 2>&1 | tee ./Logs/tnvme.out
valgrind --tool=memcheck --leak-check=full --track-origins=yes -v --show-reachable=yes ./tnvme -k skiptest.cfg $TNVME_CMD_LINE 2>&1 | tee ./Logs/tnvme.out
|
junqiang521/Test
|
dnvme/tnvme-master/dbgMemLeak.sh
|
Shell
|
gpl-2.0
| 1,123 |
#! /bin/sh
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# TAP support:
# - non-directive comments in TAP results are kept verbatim
. test-init.sh
. tap-setup.sh
cat > all.test <<END
1..5
ok 1 # TDO
not ok 2 # SKP
ok #SIKP${tab}${sp}
not ok # TOD${tab}
ok 5 # ${tab}${tab}TOOD${tab}${sp}${sp}
END
run_make -O -e FAIL check
count_test_results total=5 pass=3 fail=2 xpass=0 xfail=0 skip=0 error=0
# Don't be too strict about trailing white space.
$FGREP ': all.test' stdout | sed "s/[$sp$tab]*$//" > got
cat > exp <<END
PASS: all.test 1 # TDO
FAIL: all.test 2 # SKP
PASS: all.test 3 #SIKP
FAIL: all.test 4 # TOD
PASS: all.test 5 # ${tab}${tab}TOOD
END
cat exp
cat got
diff exp got
:
|
autotools-mirror/automake
|
t/tap-result-comment.sh
|
Shell
|
gpl-2.0
| 1,330 |
#! /bin/sh
# Copyright (C) 2001-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test to make sure Automake supports implicit rules with dot-less
# extensions. Also make sure that '.o' and '.obj' are handled like
# '.$(OBJEXT)'. See also related "semantic" tests 'suffix6b.sh'
# and 'suffix6c.sh'.
. test-init.sh
cat > Makefile.am << 'END'
SUFFIXES = a b .$(OBJEXT) c .o .obj
bin_PROGRAMS = foo
foo_SOURCES = fooa barc bazc
ab:
dummy action 1
b.$(OBJEXT):
dummy action 2
c.o:
dummy action C
c.obj:
dummy action C
END
$ACLOCAL
$AUTOMAKE
sed -n -e '/foo_OBJECTS *=.*\\$/ {
:loop
p
n
t clear
:clear
s/\\$/\\/
t loop
p
n
}' -e 's/$/ /' -e 's/^.*foo_OBJECTS *= */ /p' Makefile.in > foo-objects
cat foo-objects
# Automake must figure that fooa translates to foo.$(OBJEXT) and
# foo.$(OBJEXT) using the following rules:
# fooa --[ab]--> foob --[b.$(OBJEXT)]--> foo.$(OBJEXT)
$FGREP ' foo.$(OBJEXT) ' foo-objects
# barc --[c.o]--> bar.$(OBJEXT) ## This is really meant!
$FGREP ' bar.$(OBJEXT) ' foo-objects
# bazc --[c.obj]--> baz.$(OBJEXT) ## This is really meant!
$FGREP ' baz.$(OBJEXT) ' foo-objects
:
|
komh/automake-os2
|
t/suffix6.sh
|
Shell
|
gpl-2.0
| 1,746 |
#!/bin/sh
# hostname for Splice's rpm builder machine
# used by install scripts to setup up yum repo files
export BUILDER_ADDR=ec2-23-22-86-129.compute-1.amazonaws.com
function waitfor() {
if [ "$#" -ne 4 ]; then
echo "Incorrect usage of waitfor() function, only $# arguments passed when 4 were expected"
echo "Usage: retry CMD WAITING_MESSAGE NUM_ITERATIONS SLEEP_SECONDS_EACH_ITERATION"
exit 1
fi
CMD=$1
WAITING_MSG=$2
MAX_TESTS=$3
SLEEP_SECS=$4
TESTS=0
OVER=0
while [ $OVER != 1 ] && [ $TESTS -lt $MAX_TESTS ]; do
eval ${CMD} > /dev/null
if [ $? -eq 0 ]; then
OVER=1
else
TESTS=$(echo $TESTS+1 | bc)
echo $WAITING_MSG will wait for ${SLEEP_SECS} seconds this is attempt ${TESTS}/${MAX_TESTS} at `date`
sleep $SLEEP_SECS
fi
done
if [ $TESTS = $MAX_TESTS ]; then
echo ""
echo "**ERROR**:"
echo "Command: ${CMD}"
echo "Unsuccessful after ${MAX_TESTS} iterations with a sleep of ${SLEEP_SECS} seconds in between"
exit 1
fi
}
|
splice/splice-server
|
ec2/scripts/functions.sh
|
Shell
|
gpl-2.0
| 1,124 |
#!/bin/bash
. `dirname $0`/functions.sh
# First check if __thread is supported by ld.so/gcc/ld/as:
rm -f tlstest
echo '__thread int a; int main (void) { return a; }' \
| $CC -xc - -o tlstest > /dev/null 2>&1 || exit 77
( ./tlstest || { rm -f tlstest; exit 77; } ) 2>/dev/null || exit 77
rm -f tls4 tls4lib*.so tls4.log
rm -f prelink.cache
$CC -shared -O2 -fpic -o tls4lib1.so $srcdir/tls4lib1.c
$CC -shared -O2 -fpic -o tls4lib2.so $srcdir/tls4lib2.c \
tls4lib1.so 2>/dev/null
BINS="tls4"
LIBS="tls4lib1.so tls4lib2.so"
$CCLINK -o tls4 $srcdir/tls4.c -Wl,--rpath-link,. tls4lib2.so
savelibs
echo $PRELINK ${PRELINK_OPTS--vm} ./tls4 > tls4.log
$PRELINK ${PRELINK_OPTS--vm} ./tls4 >> tls4.log 2>&1 || exit 1
grep -q ^`echo $PRELINK | sed 's/ .*$/: /'` tls4.log && exit 2
LD_LIBRARY_PATH=. ./tls4 || exit 3
readelf -a ./tls4 >> tls4.log 2>&1 || exit 4
# So that it is not prelinked again
chmod -x ./tls4
comparelibs >> tls4.log 2>&1 || exit 5
|
ystk/debian-prelink
|
testsuite/tls4.sh
|
Shell
|
gpl-2.0
| 944 |
#!/bin/sh
ifconfig lo 127.0.0.1
if [ -f /lib/modules/r8101.ko ]; then
insmod /lib/modules/r8101.ko
else
insmod /lib/modules/sdp_mac.ko
fi
|
card2000/buildroot-sam
|
package/busybox/files/genoa/etc/Scripts/network_init.sh
|
Shell
|
gpl-2.0
| 145 |
#!/bin/sh
mount 192.168.0.1:/mnt/nfs
|
lumenosys/lumenosys-adi-buildroot
|
board/Lumenosys/blackfin/target_skeleton/root/mount.sh
|
Shell
|
gpl-2.0
| 37 |
#!/bin/sh
AM_VERSION="1.9"
if ! type aclocal-$AM_VERSION 1>/dev/null 2>&1; then
# automake-1.9 (recommended) is not available on Fedora 8
AUTOMAKE=automake
ACLOCAL=aclocal
else
ACLOCAL=aclocal-${AM_VERSION}
AUTOMAKE=automake-${AM_VERSION}
fi
echo "Generating build scripts in linphone..."
set -x
libtoolize --copy --force
autoheader
$ACLOCAL -I m4
$AUTOMAKE --force-missing --add-missing --copy
autoconf
rm -rf config.cache
echo "Generating build scripts in oRTP..."
cd oRTP && ./autogen.sh && cd -
echo "Generating build scripts in mediastreamer2..."
cd mediastreamer2 && ./autogen.sh && cd -
|
scs/uclinux
|
user/linphone/linphone-3.0.0/autogen.sh
|
Shell
|
gpl-2.0
| 603 |
#!/bin/sh
# quit on errors:
set -o errexit
# quit on unbound symbols:
set -o nounset
DIR=`dirname "$0"`
cd $DIR
export FLASK_APP=app.py
# Setup fixtures
flask db init
flask db create
|
krzysztof/invenio-pidrelations
|
examples/app-fixtures.sh
|
Shell
|
gpl-2.0
| 188 |
#!/bin/bash
PYTHONPATH=$QUARK_CROOT/common:$QUARK_CROOT/tab python $QUARK_CROOT/tab/tab.py $1 $2
|
UCSD-PL/kraken
|
reflex/coq/bench-quark/test/quark/tab/tab.sh
|
Shell
|
gpl-2.0
| 98 |
#!/system/bin/sh
SLEEP=500
if [ -e /data/.FM/calibrata ] ; then
exit 0
fi
(
while : ; do
#in case we might want to cancel the calibration by creating the file
if [ -e /data/.FM/calibrata ] ; then
exit 0
fi
LEVEL=$(cat /sys/class/power_supply/battery/capacity)
CURR_ADC=$(cat /sys/class/power_supply/battery/batt_current_adc)
STATUS=$(cat /sys/class/power_supply/battery/status)
BATTFULL=$(cat /sys/class/power_supply/battery/batt_full_check)
if [ "$LEVEL" == "100" ] && [ "$BATTFULL" == "1" ]; then
log -p i -t battery-calibration "*** LEVEL: $LEVEL CUR: $CURR_ADC***: calibrando..."
rm -f /data/system/batterystats.bin
mkdir /data/.FM
chmod 777 /data/.FM
touch /data/.FM/calibrata
exit 0
fi
# log -p i -t battery-calibration "*** LEVEL: $LEVEL CUR: $CUR ***: sleeping for $SLEEP s..."
sleep $SLEEP
done
) &
|
riversource/Galaxy-Note-Kernel
|
initramfs_own/sbin/boot/calibrate.sh
|
Shell
|
gpl-2.0
| 964 |
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
die() { echo "$@" ; exit 1; }
# only one process can talk to the pump at a time
ls /tmp/openaps.lock >/dev/null 2>/dev/null && die "OpenAPS already running: exiting" && exit
echo "No lockfile: continuing"
touch /tmp/openaps.lock
/home/pi/decocare/insert.sh 2>/dev/null >/dev/null
function finish {
rm /tmp/openaps.lock
}
trap finish EXIT
cd /home/pi/openaps-dev
#git fetch --all && git reset --hard origin/master && git pull
git fetch origin master && git merge -X theirs origin/master
echo "Querying CGM"
openaps report invoke glucose.json.new || openaps report invoke glucose.json.new
grep glucose glucose.json.new && cp glucose.json.new glucose.json && git commit -m"glucose.json has glucose data: committing" glucose.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
#grep glucose glucose.json || git reset --hard origin/master
find glucose.json -mmin -10 | egrep '.*' && grep glucose glucose.json || die "Can't read from CGM"
head -15 glucose.json
#find *.json -mmin 15 -exec mv {} {}.old \;
numprocs=$(fuser -n file $(python -m decocare.scan) 2>&1 | wc -l)
if [[ $numprocs -gt 0 ]] ; then
die "Carelink USB already in use."
fi
echo "Checking pump status"
openaps status || openaps status || die "Can't get pump status"
grep status status.json.new && cp status.json.new status.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
echo "Querying pump"
#openaps pumpquery || openaps pumpquery || die "Can't query pump" && git pull && git push
openaps pumpquery || openaps pumpquery
grep T clock.json.new && cp clock.json.new clock.json
grep temp currenttemp.json.new && cp currenttemp.json.new currenttemp.json
grep timestamp pumphistory.json.new && cp pumphistory.json.new pumphistory.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
echo "Querying CGM"
openaps report invoke glucose.json.new || openaps report invoke glucose.json.new
grep glucose glucose.json.new && cp glucose.json.new glucose.json && git commit -m"glucose.json has glucose data: committing" glucose.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
openaps suggest
grep sens profile.json.new && cp profile.json.new profile.json
grep iob iob.json.new && cp iob.json.new iob.json
grep temp requestedtemp.json.new && cp requestedtemp.json.new requestedtemp.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
tail clock.json
tail currenttemp.json
head -20 pumphistory.json
echo "Querying pump settings"
openaps pumpsettings || openaps pumpsettings || die "Can't query pump settings" && git pull && git push
grep insulin_action_curve pump_settings.json.new && cp pump_settings.json.new pump_settings.json
grep "mg/dL" bg_targets.json.new && cp bg_targets.json.new bg_targets.json
grep sensitivity isf.json.new && cp isf.json.new isf.json
grep rate current_basal_profile.json.new && cp current_basal_profile.json.new current_basal_profile.json
grep grams carb_ratio.json.new && cp carb_ratio.json.new carb_ratio.json
openaps suggest || die "Can't calculate IOB or basal"
grep sens profile.json.new && cp profile.json.new profile.json
grep iob iob.json.new && cp iob.json.new iob.json
grep temp requestedtemp.json.new && cp requestedtemp.json.new requestedtemp.json
git fetch origin master && git merge -X ours origin/master && git push
#git pull && git push
tail profile.json
tail iob.json
tail requestedtemp.json
grep rate requestedtemp.json && ( openaps enact || openaps enact ) && tail enactedtemp.json
git fetch origin master && git merge -X ours origin/master && git push
#openaps report invoke enactedtemp.json
#if /usr/bin/curl -sk https://diyps.net/closedloop.txt | /bin/grep set; then
#echo "No lockfile: continuing"
#touch /tmp/carelink.lock
#/usr/bin/curl -sk https://diyps.net/closedloop.txt | while read x rate y dur op; do cat <<EOF
#{ "duration": $dur, "rate": $rate, "temp": "absolute" }
#EOF
#done | tee requestedtemp.json
#openaps report invoke enactedtemp.json
#fi
|
oskarpearson/betacb
|
old/loop.sh
|
Shell
|
gpl-2.0
| 4,231 |
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#--------------------------------------------------------------------------------
#!/bin/sh
cp ../../../instrmem.mif .
rm -rf simv* csrc DVEfiles AN.DB
echo "Compiling Core Verilog UNISIM/Behavioral model"
vlogan +v2k ../../../instrmem.v
vhdlan ../../example_design/instrmem_exdes.vhd
echo "Compiling Test Bench Files"
vhdlan ../bmg_tb_pkg.vhd
vhdlan ../addr_gen.vhd
vhdlan ../bmg_stim_gen.vhd
vhdlan ../instrmem_synth.vhd
vhdlan ../instrmem_tb.vhd
echo "Elaborating Design"
vlogan +v2k $XILINX/verilog/src/glbl.v
vcs +vcs+lic+wait -debug instrmem_tb glbl
echo "Simulating Design"
./simv -ucli -i ucli_commands.key
dve -session vcs_session.tcl
|
elegabriel/myzju
|
junior1/CA/mips_pipeline2/ipcore_dir/instrmem/simulation/functional/simulate_vcs.sh
|
Shell
|
gpl-2.0
| 2,907 |
#!/bin/bash --
set -e
set -x
EXTNAME="${1}"
NOTESTS="${2}"
# Install dependencies
composer update -n --prefer-dist --no-progress
# Prepare extension structure
mkdir -p ../phpBB3/phpBB/ext/"${EXTNAME}"
# Build extension package
vendor/bin/phing
# Copy extension files and directories
cp -a build/package/"${EXTNAME}"/* ../phpBB3/phpBB/ext/"${EXTNAME}"/
# Add required files for tests
if [[ "${NOTESTS}" != 1 ]]; then
cp -a {phpunit.xml.dist,tests/} ../phpBB3/phpBB/ext/"${EXTNAME}"/
fi
|
AlfredoRamos/phpbb-ext-simple-spoiler
|
.github/setup-extension.sh
|
Shell
|
gpl-2.0
| 493 |
#!/usr/bin/env bash
# clean.sh
#
# Author: Yann KOETH
# Created: Tue Jul 22 20:27:47 2014 (+0200)
# Last-Updated: Tue Jul 22 20:30:43 2014 (+0200)
# By: Yann KOETH
# Update #: 12
#
sudo rm -rvf *.egg *.pyc illum/ build/ *~
|
xsyann/detection
|
clean.sh
|
Shell
|
gpl-2.0
| 238 |
source "$variables"
heading SETUP
if ! dpkg -s "curl" >> /dev/null 2>&1
then
sudo apt-get -y install "curl" || exit 1
fi
subheading "on controller, get token from Keystone (as admin)"
ssh ubuntu@controller "source admin-openrc.sh; openstack token issue" | tee "$TMP_PATH/remote.log" | log -i -vvv
TOKEN=`grep " id" "$TMP_PATH/remote.log" | cut -s -d "|" -f3 | tr -d '[[:space:]]'`
echo "TOKEN=$TOKEN" >> "$variables"
log -vvv "TOKEN=$TOKEN"
|
MasterprojectOpenStack2015/sourcecode
|
experiments/02_memcache_flush/setup.sh
|
Shell
|
gpl-2.0
| 449 |
# prompt.sh sets up the look and feel of
# the command prompt.
# \u = user
# \h = hostname
# \W = current directory name only not full PWD
PS1='\u@\h:\W$ '
|
jwaterfaucett/bashd
|
config/prompt.sh
|
Shell
|
gpl-2.0
| 157 |
#!/bin/bash
# Make sure warpspeed environment vars are available before proceeding.
if [ -z "$WARPSPEED_ROOT" ] || [ -z "$WARPSPEED_USER" ]; then
echo "Error: It appears that this server was not provisioned with Warpspeed."
echo "WARPSPEED_ROOT and WARPSPEED_USER env vars were not found."
exit 1
fi
# Import the warpspeed functions.
source $WARPSPEED_ROOT/includes/installer-functions.sh
# Require that the root user be executing this script.
ws_require_root
ws_log_header "Installing redis."
# Install redis.
apt-get -y install redis-server
# Restart service.
service redis-server restart
|
warpspeed/warpspeed
|
installers/redis.sh
|
Shell
|
gpl-2.0
| 609 |
#!/bin/sh
[ -n "$INCLUDE_ONLY" ] || {
NOT_INCLUDED=1
INCLUDE_ONLY=1
. ../netifd-proto.sh
. ./ppp.sh
init_proto "$@"
}
proto_3g_init_config() {
no_device=1
available=1
ppp_generic_init_config
proto_config_add_string "device:device"
proto_config_add_string "apn"
proto_config_add_string "service"
proto_config_add_string "pincode"
proto_config_add_string "dialnumber"
}
proto_3g_setup() {
local interface="$1"
local chat
json_get_var device device
json_get_var apn apn
json_get_var service service
json_get_var pincode pincode
json_get_var dialnumber dialnumber
[ -n "$dat_device" ] && device=$dat_device
[ -e "$device" ] || {
proto_set_available "$interface" 0
return 1
}
case "$service" in
cdma|evdo)
chat="/etc/chatscripts/evdo.chat"
;;
*)
chat="/etc/chatscripts/3g.chat"
cardinfo=$(gcom -d "$device" -s /etc/gcom/getcardinfo.gcom)
if echo "$cardinfo" | grep -q Novatel; then
case "$service" in
umts_only) CODE=2;;
gprs_only) CODE=1;;
*) CODE=0;;
esac
export MODE="AT\$NWRAT=${CODE},2"
elif echo "$cardinfo" | grep -q Option; then
case "$service" in
umts_only) CODE=1;;
gprs_only) CODE=0;;
*) CODE=3;;
esac
export MODE="AT_OPSYS=${CODE}"
elif echo "$cardinfo" | grep -q "Sierra Wireless"; then
SIERRA=1
elif echo "$cardinfo" | grep -qi huawei; then
case "$service" in
umts_only) CODE="14,2";;
gprs_only) CODE="13,1";;
*) CODE="2,2";;
esac
export MODE="AT^SYSCFG=${CODE},3FFFFFFF,2,4"
fi
if [ -n "$pincode" ]; then
PINCODE="$pincode" gcom -d "$device" -s /etc/gcom/setpin.gcom || {
proto_notify_error "$interface" PIN_FAILED
proto_block_restart "$interface"
return 1
}
fi
[ -n "$MODE" ] && gcom -d "$device" -s /etc/gcom/setmode.gcom
# wait for carrier to avoid firmware stability bugs
[ -n "$SIERRA" ] && {
gcom -d "$device" -s /etc/gcom/getcarrier.gcom || return 1
}
# autodetect APN
if [ -z "$apn" ]; then
mccmnc=`gcom -d "$device" -s /etc/gcom/getimsi.gcom | head -c 5`
apn=`awk "/$mccmnc/{print \\$2}" /etc/apnlist`
logger -t "$0" "MCCMNC: $mccmnc, APN: $apn"
fi
if [ -z "$dialnumber" ]; then
dialnumber="*99***1#"
fi
;;
esac
connect="${apn:+USE_APN=$apn }DIALNUMBER=$dialnumber /usr/sbin/chat -t5 -v -E -f $chat"
ppp_generic_setup "$interface" \
noaccomp \
nopcomp \
novj \
nobsdcomp \
noauth \
set EXTENDPREFIX=1 \
lock \
crtscts \
115200 "$device"
return 0
}
proto_3g_teardown() {
proto_kill_command "$interface"
}
[ -z "NOT_INCLUDED" ] || add_protocol 3g
|
LeeNCompanyInc/openwrt
|
package/network/utils/comgt/files/3g.sh
|
Shell
|
gpl-2.0
| 2,623 |
#!/bin/sh
expt=$1
if [ x"$expt" = x ]; then
>&2 echo "Usage: $0 EXPTID [DECODER]"
exit 1
fi
decode=${2:-../src/programs/pocketsphinx_batch}
# `dirname $decode`/../../libtool --mode=execute \
# valgrind --tool=massif \
# --alloc-fn=__ckd_calloc__ --alloc-fn=__ckd_calloc_2d__ --alloc-fn=__ckd_calloc_3d__ --alloc-fn=__ckd_malloc__ --alloc-fn=__listelem_malloc__ --alloc-fn=listelem_add_block --alloc-fn=__ckd_salloc__ --alloc-fn=__ckd_realloc__ \
# $decode -ctlcount 10 \
$decode \
-hmm ../model/hmm/wsj1 \
-dict bcb05cnp.dic \
-lm bcb05cnp.z.DMP \
-lw 9.5 -wip 0.5 \
-beam 1e-50 -wbeam 1e-30 \
-fwdflat no -bestpath no \
-cepdir . -cepext .sph \
-adcin yes -adchdr 1024 \
-ctl wsj_test.fileids \
-hyp $expt.hyp \
> $expt.log 2>&1
cat wsj_test.transcription | ./word_align.pl -i - $expt.hyp > $expt.align
grep -h ': AVERAGE' $expt.log
tail -n3 $expt.align
|
HomeBankCode/lena-its-tools
|
pocketsphinx-master/regression/wsj1_test5k_fast.sh
|
Shell
|
gpl-2.0
| 924 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2011-2020 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Verify that output does not have the "Active" bar if we are redirecting to
# a file.
. ./tup.sh
cat > Tupfile << HERE
: |> echo foo |>
: |> echo bar 1>&2 |>
HERE
tup upd > out.txt
if grep Active out.txt > /dev/null; then
echo "Error: Shouldn't see 'Active' in the output" 1>&2
exit 1
fi
eotup
|
ppannuto/tup
|
test/t8003-display.sh
|
Shell
|
gpl-2.0
| 1,049 |
#!/bin/bash
DIR="${BASH_SOURCE%/*}"
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
. "$DIR/Common.sh"
detect_os
export PATH=$PATH:/usr/local/bin
service subsonic stop
useradd --system syssubsonic
gpasswd --add syssubsonic audio
gpasswd --add syssubsonic video
if [ $os == "centos" ]
then
fileLoc="/etc/sysconfig/subsonic"
elif [ $os == "ubuntu" ]
then
fileLoc="/etc/default/subsonic"
else
echo "No possible OS found to create configuration!"
exit
fi
if /bin/grep -q "SUBSONIC_USER=root" "${fileLoc}"
then
$(/bin/sed -i -e 's/\SUBSONIC_USER=root/SUBSONIC_USER=syssubsonic/g' $fileLoc)
else
echo "Cannot change user. Open /etc/sysconfig/subsonic to manually change SUBSONIC_USER"
fi
if /bin/grep -q "SUBSONIC_ARGS=\"--max-memory=150\"" "${fileLoc}"
then
$(/bin/sed -i -e 's/\SUBSONIC_ARGS="--max-memory=150"/SUBSONIC_ARGS="--https-port=8404 --max-memory=150"/g' $fileLoc)
else
echo "Cannot change port number. Open /etc/sysconfig/subsonic to manually change SUBSONIC_ARGS=\"--max-memory=150\" to SUBSONIC_ARGS=\"--https-port=8404 --max-memory=150\""
fi
sudo iptables -I INPUT -p tcp --dport 8404 -j ACCEPT
service subsonic start
|
roddydairion/Subsonic
|
RunConfig.sh
|
Shell
|
gpl-3.0
| 1,196 |
#!/usr/bin/env bash
# Installs motion-notify and all required dependencies.
# Pre-requisites: Run script as root, ensure a motion.motion user exists, ensure that /etc/motion-notify does NOT exist
# Refer to the README for additional steps required for configuring Google Drive, email and etc (those steps aren't covered by this script)
# If you're upgrading motion-notify, move your existing motion-notify folder to a new location first and then copy your creds.p12 file across after running the install script and then update your new config file
# Update APT and install dependencies
apt-get update
apt-get install python-twisted-web
apt-get install python-pip
pip install --upgrade PyDrive
pip install --upgrade enum34
pip install --upgrade oauth2client
pip install google-api-python-client --upgrade --ignore-installed six
pip install requests
apt-get install python-openssl
# Install git and clone motion-notify into the destination directory
apt-get install git
git clone https://github.com/amdean/motion-notify.git /etc/motion-notify
chown -R motion.motion /etc/motion-notify
chmod +x /etc/motion-notify/motion-notify.py
chmod +x /etc/motion-notify/utils/ssdp_server.py
mv /etc/motion-notify/utils/startup-script/ssdp /etc/init.d/ssdp
chmod +x /etc/init.d/ssdp
# Create the log files and lock files and set ownership and permissions
touch /var/tmp/motion-notify.log
chown motion.motion /var/tmp/motion-notify.log
chmod 664 /var/tmp/motion-notify.log
touch /var/tmp/motion-notify.lock.pid
chmod 664 /var/tmp/motion-notify.lock.pid
chown motion.motion /var/tmp/motion-notify.lock.pid
|
amdean/motion-notify
|
install-motion-notify.sh
|
Shell
|
gpl-3.0
| 1,592 |
#!/bin/bash
#------------------------------------------------------------------
# Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com>
# This file is part of Gluster Management Console.
#
# Gluster Management Console is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# Gluster Management Console is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------
set -e
GMC_TARGET_URL='[email protected]:gluster/gmc-target.git'
BUCKMINSTER_URL=http://download.eclipse.org/tools/buckminster/headless-3.7/
BUCKMINSTER_PRODUCT_NAME=org.eclipse.buckminster.cmdline.product
GMC_WEBSTART_PROJECT=org.gluster.storage.management.console.feature.webstart
GMC_CORE_PROJECT=org.gluster.storage.management.core
GMC_CONSOLE_PROJECT=org.gluster.storage.management.console
GMG_PROJECT=org.gluster.storage.management.gateway
startBold()
{
tput bold
}
stopBold()
{
tput sgr0
}
# Shows given text in bold
showBold()
{
startBold
echo ${1}
stopBold
}
# Get the director that can be used to install headless buckminster
get_director()
{
mkdir -p ${TOOLS_DIR}
cd ${TOOLS_DIR}
echo "Downloading buckminster director..."
wget -c http://ftp.daum.net/eclipse//tools/buckminster/products/director_latest.zip
if ! unzip -tqq director_latest.zip; then
rm -f director_latest.zip
wget http://ftp.daum.net/eclipse//tools/buckminster/products/director_latest.zip
fi
unzip -q director_latest.zip
cd -
}
install_buckminster()
{
mkdir -p ${BUCKMINSTER_HOME}
echo "Installing buckminster..."
cd ${TOOLS_DIR}/director
./director -r ${BUCKMINSTER_URL} -d ${BUCKMINSTER_HOME} -p Buckminster -i ${BUCKMINSTER_PRODUCT_NAME}
echo "Setting up buckminster..."
cd ${BUCKMINSTER_HOME}
echo " => core"
./buckminster install ${BUCKMINSTER_URL} org.eclipse.buckminster.core.headless.feature
echo " => pde"
./buckminster install ${BUCKMINSTER_URL} org.eclipse.buckminster.pde.headless.feature
echo " => git"
./buckminster install ${BUCKMINSTER_URL} org.eclipse.buckminster.git.headless.feature
echo " => emma"
./buckminster install ${BUCKMINSTER_URL} org.eclipse.buckminster.emma.headless.feature
cd -
}
# Create keystore for jar signing (self signed)
setup_keys()
{
mkdir -p ${KEYS_DIR}
cd ${KEYS_DIR}
keytool -genkeypair -keystore gluster.keystore -storepass gluster -alias gluster -keyalg RSA << EOF
Gluster Temp Build
Gluster
Gluster
Dummy
Dummy
US
yes
EOF
keytool -selfcert -alias gluster -keystore gluster.keystore << EOF
gluster
EOF
cd -
}
configure_workspace()
{
echo "Configuring the workspace..."
rm -rf ${WORKSPACE_DIR}
mkdir -p ${WORKSPACE_DIR}
cd ${WORKSPACE_DIR}
for f in $src_dir/*; do
ln -s $f
done
if [ ! -e gmc-target ]; then
ln -s $gmc_target_dir gmc-target
fi
echo "Importing target platform..."
${BUCKMINSTER_HOME}/buckminster importtarget -data ${WORKSPACE_DIR} --active gmc-target/org.gluster.storage.management.console.target/gmc.target
cd -
}
buckminster_perform()
{
${BUCKMINSTER_HOME}/buckminster perform --properties ${PROPERTIES_FILE} -Dbuckminster.output.root=${DIST_DIR} -data ${WORKSPACE_DIR} $*
}
build_gmc()
{
os=${1}
ws=${2}
arch=${3}
cd ${WORKSPACE_DIR}
DIST_DIR=${DIST_BASE}/gmc/${os}.${ws}.${arch}
if [ ! -d ${DIST_DIR} ]; then
mkdir -p ${DIST_DIR}
fi
echo "Importing component query for glustermc..."
${BUCKMINSTER_HOME}/buckminster import -data ${WORKSPACE_DIR} build/org.gluster.storage.management.console.feature.webstart.cquery
echo "Building GMC for [${os}.${ws}.${arch}]"
buckminster_perform ${GMC_WEBSTART_PROJECT}#buckminster.clean
buckminster_perform -Dproduct.version=${VERSION} ${GMC_CONSOLE_PROJECT}#update.version
buckminster_perform -Dtarget.os=${os} -Dtarget.ws=${ws} -Dtarget.arch=${arch} ${GMC_WEBSTART_PROJECT}#create.eclipse.jnlp.product
buckminster_perform ${GMC_WEBSTART_PROJECT}#copy.root.files
buckminster_perform -Dproduct.version=${VERSION} ${GMC_WEBSTART_PROJECT}#update.version
# buckminster signs the jars using eclipse certificate - hence unsign and sign them again
echo "Signing product jars..."
buckminster_perform ${GMC_WEBSTART_PROJECT}#unsign.jars
buckminster_perform -Djar.signing.keystore=${KEYS_DIR}/gluster.keystore ${GMC_WEBSTART_PROJECT}#sign.jars
}
build_gmg()
{
cd ${WORKSPACE_DIR}
export DIST_DIR=${DIST_BASE}/gmg
if [ ! -d ${DIST_DIR} ]; then
mkdir -p ${DIST_DIR}
fi
echo "Importing component query for glustermg..."
${BUCKMINSTER_HOME}/buckminster import -data ${WORKSPACE_DIR} build/org.gluster.storage.management.core.cquery
${BUCKMINSTER_HOME}/buckminster import -data ${WORKSPACE_DIR} build/org.gluster.storage.management.gateway.cquery
echo "Building CORE..."
buckminster_perform ${GMC_CORE_PROJECT}#bundle.jar
echo "Building Gateway..."
buckminster_perform -Dproduct.version=${VERSION} ${GMG_PROJECT}#archive
echo "Packaging Gateway..."
${SCRIPT_DIR}/package-gateway.sh ${DIST_DIR} ${DIST_BASE}/gmc
}
package_backend()
{
cd ${WORKSPACE_DIR}
echo "Packaging backend scripts"
export DIST_DIR=${DIST_BASE}/gmg-backend
if [ ! -d ${DIST_DIR} ]; then
mkdir -p ${DIST_DIR}
fi
${SCRIPT_DIR}/package-backend.sh ${DIST_DIR}
}
build_gmc_all()
{
build_gmc linux gtk x86
build_gmc linux gtk x86_64
build_gmc win32 win32 x86
build_gmc win32 win32 x86_64
build_gmc macosx cocoa x86
build_gmc macosx cocoa x86_64
}
# Clean the workspace (class files, jar files created during previous build)
# and the dist directory
clean()
{
# Remove the core jar file created by previous build
/bin/rm -f ${WORKSPACE_DIR}/src/org.gluster.storage.management.gateway/WebContent/WEB-INF/lib/org.gluster.storage.management.core*jar
# Remove compiled class files
/bin/rm -rf ${WORKSPACE_DIR}/src/org.gluster.storage.management.gateway/WebContent/WEB-INF/classes/*
# Remove old build artifacts
/bin/rm -rf ${DIST_BASE}/*
}
build()
{
export VERSION=${VERSION:-1.0.0alpha}
clean
build_gmc_all
build_gmg
package_backend
}
#-----------------------------------
# Main Action Body
#-----------------------------------
ME=$(basename $0)
GMC_DIR=$(dirname $(dirname $(readlink -e $0)))
function show_help()
{
cat <<EOF
Usage: `startBold`$ME [-f] [-h] [GMC-TARGET-DIR] [BUILD-DIR]`stopBold`
Build Gluster Management Console from source.
GMC-TARGET-DIR -> Directory where gmc-target.git has been or should be cloned
BUILD-DIR -> Directory where build tasks will be performed and binaries will be created
If not passed, these two directories will be created parallel to ${GMC_DIR}
Options:
-f -> Force build (re-create build directory and perform build).
-h -> Display this help and exit
Examples:
$ME
$ME ~/gmc-target
$ME ~/gmc-target ~/gmc-build
EOF
}
function main()
{
# Parse command line arguments.
while getopts :fh OPT; do
case "$OPT" in
h)
show_help
exit 0
;;
f)
force=yes
;;
\?)
# getopts issues an error message
echo "Invalid option: -$OPTARG"
show_help
exit 1
;;
:)
echo "Option -$OPTARG requires an argument."
show_help
exit 1
;;
esac
done
# Remove the switches we parsed above.
shift `expr $OPTIND - 1`
# We want only one non-option argument.
if [ $# -gt 2 ]; then
show_help
exit 1
fi
src_dir=$(dirname $(dirname $(readlink -e $0)))
parent_dir=$(dirname $src_dir)
gmc_target_dir=$1
build_dir=$2
if [ -z "$gmc_target_dir" ]; then
gmc_target_dir=$parent_dir/gmc-target
fi
if [ -z "$build_dir" ]; then
build_dir=$parent_dir/gmc-build
fi
if [ ! -e "$gmc_target_dir" ]; then
echo "Getting gmc-target from $GMC_TARGET_URL"
git clone $GMC_TARGET_URL $gmc_target_dir
fi
if [ "$force" = "yes" ]; then
rm -fr $build_dir
fi
TOOLS_DIR=${build_dir}/tools
DIST_BASE=${build_dir}/dist
KEYS_DIR=${TOOLS_DIR}/keys
BUCKMINSTER_HOME=${TOOLS_DIR}/buckminster
WORKSPACE_DIR=${BUCKMINSTER_HOME}/workspace
PROPERTIES_FILE=${WORKSPACE_DIR}/build/glustermc_build.properties
SCRIPT_DIR=$src_dir/build
if [ ! -e $build_dir ]; then
mkdir -p $build_dir
if [ ! -e ${TOOLS_DIR} ]; then
get_director
fi
if [ ! -e ${BUCKMINSTER_HOME} ]; then
install_buckminster
fi
if [ ! -e ${KEYS_DIR} ]; then
setup_keys
fi
fi
configure_workspace
build
echo
echo "Build artifacts:"
showBold " ${DIST_BASE}/gmg/gmg-installer-$VERSION.tar.gz"
showBold " ${DIST_BASE}/gmg-backend/gmg-backend-installer-$VERSION.tar.gz"
echo
}
main "$@"
|
gluster/gmc
|
build/gmc-build.sh
|
Shell
|
gpl-3.0
| 9,309 |
#!/bin/env bash
docker stop mysql
docker rm mysql
docker rmi cn/mysql
docker build -t cn/mysql .
|
cn100800/book
|
tools/NMRPG/mysql/build.sh
|
Shell
|
gpl-3.0
| 97 |
#!/bin/bash
# test many concurrent tcp connections
uname
if [ `uname` = "FreeBSD" ] ; then
echo "This test currently does not work on FreeBSD."
exit 77
fi
echo \[manytcp.sh\]: test concurrent tcp connections
uname
if [ `uname` = "SunOS" ] ; then
echo "Solaris: FIX ME"
exit 77
fi
. $srcdir/diag.sh init
. $srcdir/diag.sh startup manytcp.conf
# the config file specifies exactly 1100 connections
. $srcdir/diag.sh tcpflood -c-1100 -m40000
# the sleep below is needed to prevent too-early termination of the tcp listener
sleep 1
. $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
. $srcdir/diag.sh wait-shutdown # we need to wait until rsyslogd is finished!
. $srcdir/diag.sh seq-check 0 39999
. $srcdir/diag.sh exit
|
madedotcom/rsyslog
|
tests/manytcp.sh
|
Shell
|
gpl-3.0
| 769 |
#!/bin/bash
source "${SOURCEDIR}/custombuildscripts/android/prepare_sources.sh"
# mkdir "${TARGETDIR}/interfacestmp"
# cp -L -f -R "${SOURCEDIR}/src/interfaces" "${TARGETDIR}/interfacestmp"
# cp -L -f -R "${SOURCEDIR}/src/interfaces" "${TARGETDIR}"
# mkdir "${TARGETDIR}/scriptstmp"
# cp -L -f -R "${SOURCEDIR}/src/scripts" "${TARGETDIR}/scriptstmp"
# cp -L -f -R "${SOURCEDIR}/src/scripts" "${TARGETDIR}"
# mkdir "${TARGETDIR}/Platform"
## cp -L -f -R "${SOURCEDIR}/src/ORCA/utils/Platform" "${TARGETDIR}/Platform"
# cp -L -f -R "${SOURCEDIR}/src/ORCA/utils/Platform" "${TARGETDIR}"
|
thica/ORCA-Remote
|
custombuildscripts/debian/prepare_sources.sh
|
Shell
|
gpl-3.0
| 594 |
#!/bin/bash
#
# delete server file older than 7days
#sudo rclone --min-age 7d delete gdsecret:$(hostname -f)
# --drive-use-trash=false
sudo rclone sync --drive-use-trash=false /srv/backs/ gdsecret:$(hostname -f)
if [ $? -eq 0 ]
then
echo "Sync complete"
fi
|
PoppyPop/docker
|
system/backup-scripts/scripts/backup-sync.sh
|
Shell
|
gpl-3.0
| 263 |
#!/bin/bash
# DOCUMENTATION SECTION.
:<<'CHARACTERS'
Key characters in the game
==========================
0. Table of contents
---------------------
1. Bosses
2. Wise guys
3. Lone wolves
4. Merchants
5. Hookers
1. Bosses
----------
Tony Emacsio
The leader of the Gnumino family. Best known for his greed, he is nevertheless
a competent leader and has outlasted many others who have tried to depose him.
He and his organization are headquartered in the southwestern part of the
district.
Ed Vimmy
The boss of UniCS (Unified Crime Syndicate), popularly known as the 'Nixers. His
gang is nowhere as rich as the Gnumino family, but what they lack in firepower,
they make up with their dedication. He has a short temper, and tends to snap
back at anyone who doesn't do what he expects of them. His headquarters are in
the northeastern block of the neighborhood.
2. Wise guys
-------------
Grubby Fingers
A rather dim fellow, but one of if not the most loyal Gnumino family member.
3. Lone wolves
---------------
4. Merchants
-------------
Tari Gunzip's Problem Solutions
Principal weapon merchant for Bashside. Sells weapons ("solutions", as he calls
them) and ammunition for them. He runs his shop around the middle of Share St.
Lay Tech Special Supplies
High-end hardware retailer. Carries things such as holographic sights, dazzle
coats (used for countering laser sights) and etc. He resides at the east end
of Root Street.
Doc Groff
One of only a few doctors residing in Bashside, Doc Groff will treat the player
differently depending on his karma - the higher, the less the doctor will
charge for his services. He resides in the southeastern corner of
Root St/Share St intersection.
Fuser's Magical Powders
Runs the principal drug store in Bashside. Located in the upper part of Hoe St.
5. Pimps and hookers
---------------------
Joey Python
Starting out as a bartender, Joey quickly turned to procuring clients for his
newfound female friends. Widely known for his kind demeanor, a rare thing to
see around in Bashside.
Pearl
Though she's not the prettiest girl in town, Pearl is a veteran at her job.
Though she's guaranteed to satisfy, her experience has its drawbacks - better
make sure you're protected! Her standard rate is $100.
Rubi
Hailing from the Far East, Rubi is learned in many mysterious arts. Spending a
night with her is almost guaranteed to be educational. She's not cheap though;
her standard price is $500.
Looa
Another arrival from the more distant parts of the world. For some reason she
isn't particularly popular, but there's been little complaint from her clients.
She takes $200 for a night.
CHARACTERS
:<<'ITEMS'
ITEMS
# END DOCUMENTATION.
# DATA SECTION.
BANNER=' $$$$$$\ $$$$$$$$\ $$$$$$\
$$ __$$\ \__$$ __|$$ __$$\ (press any key to continue)
$$ / \__| $$ | $$ / $$ | $$\
$$ |$$$$\ $$ | $$$$$$$$ | \__|
$$ |\_$$ | $$ | $$ __$$ |
$$ | $$ | $$ | $$ | $$ | $$\
\$$$$$$ | $$ | $$ | $$ | \__|
\______/ \__| \__| \__|
$$$$$$$\ $$\ $$\ $$\
$$ __$$\ $$ | \__| $$ |
$$ | $$ | $$$$$$\ $$$$$$$\ $$$$$$$\ $$$$$$$\ $$\ $$$$$$$ | $$$$$$\
$$$$$$$\ | \____$$\ $$ _____|$$ __$$\ $$ _____|$$ |$$ __$$ |$$ __$$\
$$ __$$\ $$$$$$$ |\$$$$$$\ $$ | $$ |\$$$$$$\ $$ |$$ / $$ |$$$$$$$$ |
$$ | $$ |$$ __$$ | \____$$\ $$ | $$ | \____$$\ $$ |$$ | $$ |$$ ____|
$$$$$$$ |\$$$$$$$ |$$$$$$$ |$$ | $$ |$$$$$$$ |$$ |\$$$$$$$ |\$$$$$$$\
\_______/ \_______|\_______/ \__| \__|\_______/ \__| \_______| \_______|
$$$$$$\ $$\ $$\
$$ __$$\ $$ | \__|
$$ / \__|$$$$$$\ $$$$$$\ $$$$$$\ $$\ $$$$$$\ $$$$$$$\
\$$$$$$\ \_$$ _| $$ __$$\ $$ __$$\ $$ |$$ __$$\ $$ _____|
\____$$\ $$ | $$ / $$ |$$ | \__|$$ |$$$$$$$$ |\$$$$$$\
$$\ $$ | $$ |$$\ $$ | $$ |$$ | $$ |$$ ____| \____$$\
\$$$$$$ | \$$$$ |\$$$$$$ |$$ | $$ |\$$$$$$$\ $$$$$$$ |
\______/ \____/ \______/ \__| \__| \_______|\_______/'
MAP_BLANK=" | | | |
---'S '--------' '---
h Root Rd
---.a .---..---. H.---
|r | || | o|
|e | || | e|
| | || | |
---'S '---''---' S|
t Mount St t|
---. .--------. |
| | | |"
# END DATA.
# DECLARATION SECTION.
declare SAVE_FNAME
declare LOOP
declare -i STAT_Str STAT_Dex STAT_Int STAT_Lck
declare -i SKILL_Guns SKILL_Melee SKILL_Martial
function showHelp {
local -a lines=(
"i(nv): check your inventory."
"l(ook): take measure of your current situation."
"s(hoot) <target>: aim and fire your weapon at the specified target."
"t(alk) <target>: talk to the specified person."
"w(alk) N|E|S|W: move yo azz in the respective direction."
"map: show the map of Bashside and your current location."
"stat: check your statistics."
"help: display this list of commands."
)
local -i start=0
local -i end=${#lines[*]}
if (( $# == 2 )); then
start=$1
end=$2
fi
for (( i=$start ; $i < $end ; i += 1 )); do
echo " ${lines[$i]}"
done
}
function showMap {
clear
#local -i indent
local title="B a s h s i d e"
#indent=$(( ($(tput cols) - ${#title}) / 2 ))
#printf "%${indent}s" "$title"
echo " $title"
echo
echo "$MAP_BLANK"
tput cup $(($1 + 2)) $(($2 * 2))
echo -n "@"
read -n 1 -s
clear
}
function yesNo {
read -p "(y/any)> " -n 1
if [[ $REPLY =~ [Yy] ]]; then
return 0
else
return 1
fi
}
# END DECLARATION.
read -p "$BANNER" -n 1 -s
clear
if [[ -f $1 ]]; then
read -n 1 -p "Attempt to read save data from \"$1\"? (y/n) "
if [[ $REPLY -eq Y || $REPLY -eq Y ]]; then
SAVE_FNAME=$REPLY
fi
echo
fi
if [[ $SAVE_FNAME -eq '' && -f ~/.gta_bashside.save ]]; then
# Old player.
SAVE_FNAME=~/.gta_bashside.save
echo "'Sup, $USER?"
else
# New player.
echo "Yo man. Looks like you're new in Bashside?"
read -n 1 -s -p '> ...'
echo
echo "Yeah man, I got ya. Well I hate to tell ya, but bein' a new kid around here"
echo "ain't easy these days. You'd better learn the laws of the street before you"
echo "go runnin' anywhere. So here's the deal: you gotta know some vocabulary first!"
echo
showHelp
read -n 1 -s
fi
#showMap 2 10
# This is the main read-eval-print loop.
LOOP=true
while $LOOP; do
read -p "> "
case "$REPLY" in
"q" | "quit")
echo -n "Really quit? "
if yesNo; then
LOOP=false
else
echo
fi
;;
'')
:
;;
*)
echo "'$REPLY'? What's that supposed to mean?"
;;
esac
done
|
yaaun/gta-bash
|
gtab.sh
|
Shell
|
gpl-3.0
| 7,053 |
:
# script_reponse.sh
# -----------------
#
# -- Tim Riley
# -----------------
grep=/usr/xpg4/bin/grep
cat typescript |
$grep -e ": " -e '? ' |
grep '^[A-Z]' |
grep -v SQL
|
timhriley/appaserver
|
utility/script_response.sh
|
Shell
|
gpl-3.0
| 179 |
#!/bin/bash
set -ex
if rustc --version | grep '\(beta\|nightly\|dev\)'; then
echo "On non-stable rust version, no deploy."
exit 0
fi
# On stable!
# Create a "staging" directory
mkdir staging
# Copy the release binary to staging.
cp target/$TARGET/release/tfs staging
cd staging
# Prepare the release that will be uploaded.
tar czf ../${PROJECT_NAME}-${TRAVIS_TAG}-${TARGET}.tar.gz *
|
terminalcloud/tfs
|
ci/before_deploy.sh
|
Shell
|
gpl-3.0
| 399 |
LD_LIBRARY_PATH=../../target/release ./romkana_cli $1
|
odrevet/libkana
|
test/romaji_to_katakana_C_cli/run.sh
|
Shell
|
gpl-3.0
| 54 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-averageratioevent_3-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::averageratioevent_3:1.0 -N ID0000027 -R condorpool -L example_workflow -T 2016-12-02T01:43:15+00:00 ./example_workflow-averageratioevent_3-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1B/logs/w-11-B/20161202T014316+0000/00/00/averageratioevent_3_ID0000027.sh
|
Shell
|
gpl-3.0
| 1,261 |
#!/bin/bash
python RunBootstrap.py --paralog1 YDR418W --paralog2 YEL054C --bootnum 84 > YDR418W_YEL054C_Boot84_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Bootstrap/ShFiles/MG94_YDR418W_YEL054C_Boot84.sh
|
Shell
|
gpl-3.0
| 128 |
# If we're not running on a real cluster then we need a local copy of
# ctdb (and other stuff) in $PATH and we will use local daemons.
# Use in-tree binaries if running against local daemons.
# Otherwise CTDB need to be installed on all nodes.
if [ -n "$ctdb_dir" -a -d "${ctdb_dir}/bin" ] ; then
# ctdbd_wrapper is in config/ directory
PATH="${ctdb_dir}/bin:${ctdb_dir}/config:${PATH}"
hdir="${ctdb_dir}/bin"
export CTDB_EVENTD="${hdir}/ctdb_eventd"
export CTDB_EVENT_HELPER="${hdir}/ctdb_event"
export CTDB_LOCK_HELPER="${hdir}/ctdb_lock_helper"
export CTDB_RECOVERY_HELPER="${hdir}/ctdb_recovery_helper"
export CTDB_TAKEOVER_HELPER="${hdir}/ctdb_takeover_helper"
export CTDB_CLUSTER_MUTEX_HELPER="${hdir}/ctdb_mutex_fcntl_helper"
fi
if [ -n "$TEST_SOCKET_WRAPPER_SO_PATH" ] ; then
export LD_PRELOAD="$TEST_SOCKET_WRAPPER_SO_PATH"
export SOCKET_WRAPPER_DIR="${SIMPLE_TESTS_VAR_DIR}/sw"
mkdir -p "$SOCKET_WRAPPER_DIR"
fi
# onnode will execute this, which fakes ssh against local daemons
export ONNODE_SSH="${TEST_SUBDIR}/scripts/ssh_local_daemons.sh"
#######################################
# If the given IP is hosted then print 2 items: maskbits and iface
have_ip ()
{
local addr="$1"
local bits t
case "$addr" in
*:*) bits=128 ;;
*) bits=32 ;;
esac
t=$(ip addr show to "${addr}/${bits}")
[ -n "$t" ]
}
setup_nodes ()
{
local have_all_ips=true
local i
for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
if [ -n "$CTDB_USE_IPV6" ]; then
local j=$(printf "%02x" $i)
local node_ip="fd00::5357:5f${j}"
if have_ip "$node_ip" ; then
echo "$node_ip"
else
cat >&2 <<EOF
ERROR: ${node_ip} not on an interface, please add it
EOF
have_all_ips=false
fi
else
local j=$((i + 10))
echo "127.0.0.${j}"
fi
done
# Fail if we don't have all of the IPv6 addresses assigned
$have_all_ips
}
setup_public_addresses ()
{
local pnn_no_ips="$1"
local i
for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
if [ $((i - 1)) -eq $pnn_no_ips ] ; then
continue
fi
# 2 public addresses on most nodes, just to make
# things interesting
local j=$((i + TEST_LOCAL_DAEMONS))
if [ -n "$CTDB_USE_IPV6" ]; then
printf "fc00:10::1:%x/64 lo\n" "$i"
printf "fc00:10::1:%x/64 lo\n" "$j"
else
printf "192.168.234.%x/24 lo\n" "$i"
printf "192.168.234.%x/24 lo\n" "$j"
fi
done
}
setup_ctdb ()
{
local no_public_addresses=false
local no_event_scripts=false
case "$1" in
--no-public-addresses) no_public_addresses=true ;;
--no-event-scripts) no_event_scripts=true ;;
esac
nodes_file="${SIMPLE_TESTS_VAR_DIR}/nodes"
setup_nodes >"$nodes_file" || return 1
# If there are (strictly) greater than 2 nodes then we'll
# randomly choose a node to have no public addresses
local pnn_no_ips=-1
if [ $TEST_LOCAL_DAEMONS -gt 2 ] ; then
pnn_no_ips=$((RANDOM % TEST_LOCAL_DAEMONS))
fi
local public_addresses_all="${SIMPLE_TESTS_VAR_DIR}/public_addresses"
setup_public_addresses $pnn_no_ips >"$public_addresses_all"
local pnn
for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
setup_ctdb_base "$SIMPLE_TESTS_VAR_DIR" "node.${pnn}" \
functions notify.sh
cp "$nodes_file" "${CTDB_BASE}/nodes"
local public_addresses="${CTDB_BASE}/public_addresses"
if $no_public_addresses || [ $pnn_no_ips -eq $pnn ] ; then
echo "Node ${pnn} will have no public IPs."
: >"$public_addresses"
else
cp "$public_addresses_all" "$public_addresses"
fi
local node_ip=$(sed -n -e "$(($pnn + 1))p" "$nodes_file")
local db_dir="${CTDB_BASE}/db"
local d
for d in "volatile" "persistent" "state" ; do
mkdir -p "${db_dir}/${d}"
done
if $no_event_scripts ; then
rm -vf "${CTDB_BASE}/events.d/"*
fi
cat >"${CTDB_BASE}/ctdb.conf" <<EOF
[logging]
location = file:${CTDB_BASE}/log.ctdb
log level = INFO
[cluster]
recovery lock = ${SIMPLE_TESTS_VAR_DIR}/rec.lock
node address = ${node_ip}
[database]
volatile database directory = ${db_dir}/volatile
persistent database directory = ${db_dir}/persistent
state database directory = ${db_dir}/state
EOF
done
}
start_ctdb_1 ()
{
local pnn="$1"
CTDBD="${VALGRIND} ctdbd" \
onnode "$pnn" ctdbd_wrapper start
}
daemons_start ()
{
echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
local pnn
for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
start_ctdb_1 "$pnn"
done
}
stop_ctdb_1 ()
{
local pnn="$1"
onnode "$pnn" ctdbd_wrapper stop
}
daemons_stop ()
{
echo "Stopping $TEST_LOCAL_DAEMONS ctdb daemons..."
local pnn
for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
stop_ctdb_1 "$pnn"
done
rm -rf "${SIMPLE_TESTS_VAR_DIR}/test.db"
}
restart_ctdb_1 ()
{
stop_ctdb_1 "$1"
start_ctdb_1 "$1"
}
maybe_stop_ctdb ()
{
daemons_stop
}
ctdb_stop_all ()
{
daemons_stop
}
_ctdb_start_all ()
{
daemons_start
}
ps_ctdbd ()
{
# If this fails to find processes then the tests fails, so
# look at full command-line so this will work with valgrind.
# Note that the output could be generated with pgrep's -a
# option but it doesn't exist in older versions.
ps -p $(pgrep -f '\<ctdbd\>' | xargs | sed -e 's| |,|g') -o args ww
echo
}
# onnode will use CTDB_BASES to help the ctdb tool connection to each
# daemon
export CTDB_BASES=""
for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
b="${SIMPLE_TESTS_VAR_DIR}/node.${i}"
CTDB_BASES="${CTDB_BASES}${CTDB_BASES:+ }${b}"
done
# Need a default CTDB_BASE for onnode (to find the functions file).
# Any node will do, so pick the 1st...
export CTDB_BASE="${CTDB_BASES%% *}"
|
sathieu/samba
|
ctdb/tests/simple/scripts/local_daemons.bash
|
Shell
|
gpl-3.0
| 5,528 |
#!/bin/bash
# set -x
# setup a new tinderbox image
# $1:$2, eg. 3:5
function dice() {
[[ $(( $RANDOM%$2)) -lt $1 ]]
}
# helper of ThrowUseFlags
function ThrowUseFlags() {
local n=$1 # pass up to n-1
local m=${2:-4} # mask 1:m of them
shuf -n $(( $RANDOM%$n)) |\
sort |\
while read -r flag
do
if dice 1 $m; then
echo -n "-$flag "
else
echo -n "$flag "
fi
done
}
# helper of InitOptions()
function GetProfiles() {
(
eselect profile list |\
grep -F 'default/linux/amd64/17.1' |\
grep -v -F -e ' (exp)' -e '/x32' -e '/selinux' -e '/uclibc' -e '/musl' -e '/developer'
# by sam
eselect profile list |\
grep -e "default/linux/amd64/17\../musl" |\
grep -v -F -e '/selinux'
) |\
awk ' { print $2 } ' |\
cut -f4- -d'/' -s |\
sort -u
}
# helper of main()
function InitOptions() {
# 1 process in each of M running images is more efficient than *up to* n processes in N images
# (given 1 x M = n x N) and it is much easier to catch the error message
# but: the compile times are awefully with -j1
jobs=4
profile=$(GetProfiles | shuf -n 1)
# a "y" activates "*/* ABI_X86: 32 64"
abi3264="n"
if [[ ! $profile =~ "/no-multilib" ]]; then
if dice 1 80; then
abi3264="y"
fi
fi
cflags_default="-pipe -march=native -fno-diagnostics-color"
# try to debug: mr-fox kernel: [361158.269973] conftest[14463]: segfault at 3496a3b0 ip 00007f1199e1c8da sp 00007fffaf7220c8 error 4 in libc-2.33.so[7f1199cef000+142000]
if dice 1 80; then
cflags_default+=" -Og -g"
else
cflags_default+=" -O2"
fi
cflags=$cflags_default
if dice 1 80; then
# 685160 colon-in-CFLAGS
cflags+=" -falign-functions=32:25:16"
fi
# stable image ?
keyword="~amd64"
if dice 1 160; then
keyword="amd64"
fi
testfeature="n"
if dice 1 80; then
testfeature="y"
fi
useflagfile=""
}
# helper of CheckOptions()
function checkBool() {
var=$1
val=$(eval echo \$${var})
if [[ $val != "y" && $val != "n" ]]; then
echo " wrong value for variable \$$var: >>$val<<"
return 1
fi
}
# helper of main()
function CheckOptions() {
checkBool "abi3264"
checkBool "testfeature"
if [[ -z $profile ]]; then
echo " profile empty!"
return 1
fi
if [[ ! -d $reposdir/gentoo/profiles/default/linux/amd64/$profile ]]; then
echo " wrong profile: >>$profile<<"
return 1
fi
if [[ $abi3264 = "y" ]]; then
if [[ $profile =~ "/no-multilib" ]]; then
echo " ABI_X86 mismatch: >>$profile<<"
return 1
fi
fi
if [[ ! $jobs =~ ^[0-9].*$ ]]; then
echo " jobs is wrong: >>${jobs}<<"
return 1
fi
if [[ $profile =~ "/musl" ]]; then
abi3264="n"
keyword="~amd64"
testfeature="n"
fi
}
# helper of UnpackStage3()
function CreateImageName() {
name="$(tr '/\-' '_' <<< $profile)"
name+="-j${jobs}"
[[ $keyword = '~amd64' ]] || name+="_stable"
[[ $abi3264 = "n" ]] || name+="_abi32+64"
[[ $testfeature = "n" ]] || name+="_test"
[[ $cflags =~ O2 ]] || name+="_debug"
name+="-$(date +%Y%m%d-%H%M%S)"
}
# download, verify and unpack the stage3 file
function UnpackStage3() {
local latest=$tbhome/distfiles/latest-stage3.txt
for mirror in $gentoo_mirrors
do
if wget --connect-timeout=10 --quiet $mirror/releases/amd64/autobuilds/latest-stage3.txt --output-document=$latest; then
echo
date
echo " using mirror $mirror"
break
fi
done
if [[ ! -s $latest ]]; then
echo " empty: $latest"
return 1
fi
echo
date
echo " get prefix for $profile"
local prefix="stage3-amd64-$(sed -e 's,17\../,,' -e 's,/plasma,,' -e 's,/gnome,,' <<< $profile | tr -d '-' | tr '/' '-')"
if [[ ! $profile =~ "/systemd" && ! $profile =~ "/musl" ]]; then
prefix+="-openrc"
fi
if [[ $profile =~ "/desktop" ]]; then
if dice 1 2; then
# setup from a basic stage3 image instead of a desktop stage3
prefix=$(sed -e 's,-desktop,,' <<< $prefix)
fi
fi
if [[ $profile =~ "17.1/no-multilib/hardened" ]]; then
prefix=$(sed -e 's,nomultilib-hardened,hardened-nomultilib,' <<< $prefix)
fi
echo " get stage3 name for $prefix"
local stage3
if ! stage3=$(grep -o "^20.*T.*Z/$prefix-20.*T.*Z\.tar\.\w*" $latest); then
echo " failed"
return 1
fi
if [[ -z $stage3 || $stage3 =~ ' ' ]]; then
echo " wrong grep result for $prefix: >>>$stage3<<<"
return 1
fi
local local_stage3
if ! local_stage3=$tbhome/distfiles/$(basename $stage3); then
return 1
fi
if [[ ! -s $local_stage3 || ! -f $local_stage3.asc ]]; then
echo
date
echo " downloading $stage3{,.asc} files ..."
local wgeturl="$mirror/releases/amd64/autobuilds"
if ! wget --connect-timeout=10 --quiet --no-clobber $wgeturl/$stage3{,.asc} --directory-prefix=$tbhome/distfiles; then
echo " failed"
return 1
fi
fi
echo
date
echo " updating signing keys ..."
for key in 13EBBDBEDE7A12775DFDB1BABB572E0E2D182910 D99EAC7379A850BCE47DA5F29E6438C817072058
do
if ! gpg --keyserver hkps://keys.gentoo.org --recv-keys $key; then
echo " notice: could not update gpg key $key"
fi
done
echo
date
echo " verifying the stage3 file ..."
if ! gpg --quiet --verify $local_stage3.asc; then
echo ' failed'
mv $local_stage3{,.asc} /tmp
return 1
fi
CreateImageName
echo
date
echo " new image: $name"
if ! mkdir ~tinderbox/img/$name; then
return 1
fi
echo
date
echo " untar'ing $local_stage3 ..."
if ! cd ~tinderbox/img/$name; then
return 1
fi
if ! tar -xpf $local_stage3 --same-owner --xattrs; then
echo -e " failed"
mv $local_stage3{,.asc} /tmp
return 1
fi
}
# only ::gentoo in moment
function InitRepository() {
mkdir -p ./etc/portage/repos.conf/
cat << EOF >> ./etc/portage/repos.conf/all.conf
[DEFAULT]
main-repo = gentoo
auto-sync = yes
[gentoo]
location = $reposdir/gentoo
sync-uri = https://github.com/gentoo-mirror/gentoo.git
sync-type = git
EOF
echo
date
local ts=$(ls -t $tbhome/img/*${reposdir}/gentoo/metadata/timestamp.chk 2>/dev/null | head -n 1)
if [[ -z $ts ]]; then
# fallback is the build host
local refdir=$reposdir/gentoo
else
local refdir=$(sed -e 's,metadata/timestamp.chk,,' <<< $ts)
fi
echo " cloning ::gentoo at $(cat $refdir/metadata/timestamp.chk)"
# "git clone" is at a local machine much slower than a "cp --reflink"
cd .$reposdir
cp -ar --reflink=auto $refdir ./
rm -f ./gentoo/.git/refs/heads/stable.lock ./gentoo/.git/gc.log.lock
cd - 1>/dev/null
}
# compile make.conf
function CompileMakeConf() {
cat << EOF > ./etc/portage/make.conf
LC_MESSAGES=C
PORTAGE_TMPFS="/dev/shm"
CFLAGS="$cflags"
CXXFLAGS="\${CFLAGS}"
FCFLAGS="$cflags_default"
FFLAGS="\${FCFLAGS}"
# simply enables QA check for LDFLAGS being respected by build system.
LDFLAGS="\${LDFLAGS} -Wl,--defsym=__gentoo_check_ldflags__=0"
RUSTFLAGS="-Ctarget-cpu=native -v"
$([[ $profile =~ "/musl" ]] && echo 'RUSTFLAGS=" -C target-feature=-crt-static"')
$([[ $profile =~ "/hardened" ]] || echo 'PAX_MARKINGS="none"')
ACCEPT_KEYWORDS="$keyword"
# just tinderbox, no re-distribution nor any "usage"
ACCEPT_LICENSE="*"
# no manual interaction
ACCEPT_PROPERTIES="-interactive"
ACCEPT_RESTRICT="-fetch"
NOCOLOR="true"
PORTAGE_LOG_FILTER_FILE_CMD="bash -c 'ansifilter --ignore-clear; exec cat'"
FEATURES="cgroup protect-owned xattr -collision-protect -news"
EMERGE_DEFAULT_OPTS="--verbose --verbose-conflicts --nospinner --quiet-build --tree --color=n --ask=n"
CLEAN_DELAY=0
PKGSYSTEM_ENABLE_FSYNC=0
PORT_LOGDIR="/var/log/portage"
PORTAGE_ELOG_CLASSES="qa"
PORTAGE_ELOG_SYSTEM="save"
PORTAGE_ELOG_MAILURI="tinderbox@localhost"
PORTAGE_ELOG_MAILFROM="$name <tinderbox@localhost>"
GENTOO_MIRRORS="$gentoo_mirrors"
EOF
# requested by sam
if [[ $keyword = '~amd64' ]]; then
if dice 1 80; then
echo 'LIBTOOL="rdlibtool"' >> ./etc/portage/make.conf
echo 'MAKEFLAGS="LIBTOOL=${LIBTOOL}"' >> ./etc/portage/make.conf
fi
fi
# requested by mgorny in 822354
# Hint: this is unrelated to "test"
if dice 1 2; then
echo 'ALLOW_TEST="network"' >> ./etc/portage/make.conf
fi
chgrp portage ./etc/portage/make.conf
chmod g+w ./etc/portage/make.conf
}
# helper of CompilePortageFiles()
function cpconf() {
for f in $*
do
read -r dummy suffix filename <<<$(tr '.' ' ' <<< $(basename $f))
# eg.: .../package.unmask.??common -> package.unmask/??common
cp $f ./etc/portage/package.$suffix/$filename
done
}
# create portage and tinderbox related directories + files
function CompilePortageFiles() {
mkdir -p ./mnt/tb/data ./var/tmp/{portage,tb,tb/logs} ./var/cache/distfiles
chgrp portage ./var/tmp/tb/{,logs}
chmod ug+rwx ./var/tmp/tb/{,logs}
echo $EPOCHSECONDS > ./var/tmp/tb/setup.timestamp
echo $name > ./var/tmp/tb/name
for d in profile package.{accept_keywords,env,mask,unmask,use} env
do
if [[ ! -d ./etc/portage/$d ]]; then
mkdir ./etc/portage/$d
fi
chgrp portage ./etc/portage/$d
chmod g+w ./etc/portage/$d
done
touch ./etc/portage/package.mask/self # gets failed packages
chmod a+rw ./etc/portage/package.mask/self
# setup or dep calculation issues or just broken at all
echo 'FEATURES="-test"' > ./etc/portage/env/notest
# continue an expected failed test of a package while preserving the dependency tree
echo 'FEATURES="test-fail-continue"' > ./etc/portage/env/test-fail-continue
# retry w/o sandbox'ing
echo 'FEATURES="-sandbox -usersandbox"' > ./etc/portage/env/nosandbox
# retry with sane defaults
cat <<EOF > ./etc/portage/env/cflags_default
CFLAGS="$cflags_default"
CXXFLAGS="\${CFLAGS}"
FCFLAGS="\${CFLAGS}"
FFLAGS="\${CFLAGS}"
EOF
# max $jobs parallel jobs
for j in 1 $jobs
do
cat << EOF > ./etc/portage/env/j$j
EGO_BUILD_FLAGS="-p $j"
GO19CONCURRENTCOMPILATION=0
MAKEOPTS="-j$j"
OMP_DYNAMIC=FALSE
OMP_NESTED=FALSE
OMP_NUM_THREADS=$j
RUST_TEST_THREADS=$j
RUST_TEST_TASKS=$j
EOF
done
echo "*/* j${jobs}" >> ./etc/portage/package.env/00j${jobs}
if [[ $keyword = '~amd64' ]]; then
cpconf $tbhome/tb/data/package.*.??unstable
else
cpconf $tbhome/tb/data/package.*.??stable
fi
if [[ $profile =~ '/systemd' ]]; then
cpconf $tbhome/tb/data/package.*.??systemd
else
cpconf $tbhome/tb/data/package.*.??openrc
fi
cpconf $tbhome/tb/data/package.*.??common
if [[ $abi3264 = "y" ]]; then
cpconf $tbhome/tb/data/package.*.??abi32+64
fi
cpconf $tbhome/tb/data/package.*.??test-$testfeature
if [[ $profile =~ "/musl" ]]; then
cpconf $tbhome/tb/data/package.*.??musl
fi
echo "*/* $(cpuid2cpuflags)" > ./etc/portage/package.use/99cpuflags
for f in $tbhome/tb/data/{package.,}use.mask
do
cp $f ./etc/portage/profile/$(basename $f)
done
touch ./var/tmp/tb/task
chgrp portage ./etc/portage/package.*/* ./etc/portage/env/* ./var/tmp/tb/task
chmod a+r,g+w ./etc/portage/package.*/* ./etc/portage/env/* ./var/tmp/tb/task
}
function CompileMiscFiles() {
# use local host DNS resolver
cat << EOF > ./etc/resolv.conf
domain localdomain
nameserver 127.0.0.1
EOF
local image_hostname=$(echo $name | tr -d '\n' | tr '[:upper:]' '[:lower:]' | tr -c '[^a-z0-9\-]' '-' | cut -c-63)
echo $image_hostname > ./etc/conf.d/hostname
local host_hostname=$(hostname)
cat << EOF > ./etc/hosts
127.0.0.1 localhost $host_hostname $host_hostname.localdomain $image_hostname $image_hostname.localdomain
::1 localhost $host_hostname $host_hostname.localdomain $image_hostname $image_hostname.localdomain
EOF
# avoid interactive question of vim
cat << EOF > ./root/.vimrc
autocmd BufEnter *.txt set textwidth=0
cnoreabbrev X x
let g:session_autosave = 'no'
let g:tex_flavor = 'latex'
set softtabstop=2
set shiftwidth=2
set expandtab
EOF
# include the \n in pasting (sys-libs/readline de-activates that behaviour with v8.x)
echo "set enable-bracketed-paste off" >> ./root/.inputrc
}
# what filled once by updated by
#
# /var/tmp/tb/backlog : setup_img.sh
# /var/tmp/tb/backlog.1st : setup_img.sh job.sh, retest.sh
# /var/tmp/tb/backlog.upd : job.sh
function CreateBacklogs() {
local bl=./var/tmp/tb/backlog
touch $bl{,.1st,.upd}
chown tinderbox:portage $bl{,.1st,.upd}
chmod 664 $bl{,.1st,.upd}
# requested by Whissi (an non-default virtual/mysql engine)
if dice 1 10; then
echo "dev-db/percona-server" >> $bl.1st
fi
cat << EOF >> $bl.1st
@world
%sed -i -e \\'s,--verbose,--deep --verbose,g\\' /etc/portage/make.conf
%emerge -uU sys-devel/gcc
EOF
}
function CreateSetupScript() {
if cat << EOF > ./var/tmp/tb/setup.sh; then
#!/bin/bash
# set -x
export LANG=C.utf8
set -euf
if [[ ! $profile =~ "/musl" ]]; then
date
echo "#setup locale" | tee /var/tmp/tb/task
echo -e "en_US ISO-8859-1" >> /etc/locale.gen
echo -e "en_US.UTF-8 UTF-8" >> /etc/locale.gen # especially for "test" needed
locale-gen
fi
date
echo "#setup timezone" | tee /var/tmp/tb/task
echo "Europe/Berlin" > /etc/timezone
emerge --config sys-libs/timezone-data
env-update
set +u; source /etc/profile; set -u
if [[ $profile =~ "/systemd" ]]; then
systemd-machine-id-setup
fi
groupadd -g $(id -g tinderbox) tinderbox
useradd -g $(id -g tinderbox) -u $(id -u tinderbox) tinderbox
date
echo "#setup git" | tee /var/tmp/tb/task
USE="-cgi -mediawiki -mediawiki-experimental -webdav" emerge -u dev-vcs/git
git config --global gc.auto 0 # might reduce COW profit of --reflink
emaint sync --auto 1>/dev/null
date
echo "#setup portage" | tee /var/tmp/tb/task
emerge -u app-text/ansifilter
emerge -u sys-apps/portage
if grep -q '^LIBTOOL="rdlibtool"' /etc/portage/make.conf; then
date
echo "#setup slibtool" | tee /var/tmp/tb/task
emerge -u sys-devel/slibtool
fi
date
echo "#setup Mail" | tee /var/tmp/tb/task
# emerge MTA before MUA b/c MUA+virtual/mta together would provide another MTA than sSMTP
emerge -u mail-mta/ssmtp
rm /etc/ssmtp/._cfg0000_ssmtp.conf # /etc/ssmtp/ssmtp.conf is bind mounted by bwrap.sh
emerge -u mail-client/s-nail
date
echo "#setup kernel" | tee /var/tmp/tb/task
emerge -u sys-kernel/gentoo-kernel-bin
# provides qatom
date
echo "#setup portage-utils" | tee /var/tmp/tb/task
emerge -u app-portage/portage-utils
date
echo "#setup profile, make.conf, backlog" | tee /var/tmp/tb/task
eselect profile set --force default/linux/amd64/$profile
if [[ $testfeature = "y" ]]; then
sed -i -e 's,FEATURES=",FEATURES="test ,g' /etc/portage/make.conf
fi
if [[ $name =~ "debug" ]]; then
sed -i -e 's,FEATURES=",FEATURES="splitdebug compressdebug ,g' /etc/portage/make.conf
fi
# sort -u is needed if a package is in several repositories
qsearch --all --nocolor --name-only --quiet | grep -v -F -f /mnt/tb/data/IGNORE_PACKAGES | sort -u | shuf > /var/tmp/tb/backlog
date
echo "#setup done" | tee /var/tmp/tb/task
EOF
chmod u+x ./var/tmp/tb/setup.sh
else
return 1
fi
}
function RunSetupScript() {
echo
date
echo " run setup script ..."
echo '/var/tmp/tb/setup.sh &> /var/tmp/tb/setup.sh.log' > ./var/tmp/tb/setup_wrapper.sh
if nice -n 1 $(dirname $0)/bwrap.sh -m $name -e ~tinderbox/img/$name/var/tmp/tb/setup_wrapper.sh; then
echo -e " OK"
else
echo -e "$(date)\n $FUNCNAME was NOT ok\n"
tail -v -n 100 ./var/tmp/tb/setup.sh.log
echo
return 1
fi
}
function RunDryrunWrapper() {
local message=$1
echo "$message" | tee ./var/tmp/tb/task
nice -n 1 sudo $(dirname $0)/bwrap.sh -m $name -e ~tinderbox/img/$name/var/tmp/tb/dryrun_wrapper.sh &> $drylog
local rc=$?
if [[ $rc -eq 0 ]]; then
echo " OK"
else
echo " NOT ok"
fi
chmod a+r $drylog
return $rc
}
function FixPossibleUseFlagIssues() {
if RunDryrunWrapper "#setup dryrun $attempt"; then
return 0
fi
for i in {1..9}
do
# kick off particular packages
local pkg=$(
grep -A 1 'The ebuild selected to satisfy .* has unmet requirements.' $drylog |\
awk ' /^- / { print $2 } ' |\
cut -f1 -d':' -s |\
xargs --no-run-if-empty qatom -F "%{CATEGORY}/%{PN}" |\
sed -e 's,/,\\/,'
)
if [[ -n $pkg ]]; then
local f=./etc/portage/package.use/24thrown_package_use_flags
local before=$(wc -l < $f)
sed -i -e "/$pkg /d" $f
local after=$(wc -l < $f)
if [[ $before != $after ]]; then
if RunDryrunWrapper "#setup dryrun $attempt-$i # solved unmet requirements"; then
return 0
fi
fi
fi
# try to solve a dep cycle
local fautocirc=./etc/portage/package.use/27-$attempt-$i-a-circ-dep
grep -A 10 "It might be possible to break this cycle" $drylog |\
grep -F ' (Change USE: ' |\
grep -v -F -e '+' -e 'This change might require ' |\
sed -e "s,^- ,,g" -e "s, (Change USE:,,g" |\
tr -d ')' |\
sort -u |\
grep -v ".*-.*/.* .*_.*" |\
while read -r p u
do
q=$(qatom -F "%{CATEGORY}/%{PN}" $p)
printf "%-30s %s\n" $q "$u"
done |\
sort -u > $fautocirc
if [[ -s $fautocirc ]]; then
if RunDryrunWrapper "#setup dryrun $attempt-$i # solved circ dep"; then
return 0
fi
else
rm $fautocirc
fi
# follow advices
local fautoflag=./etc/portage/package.use/27-$attempt-$i-b-necessary-use-flag
grep -A 100 'The following USE changes are necessary to proceed:' $drylog |\
grep "^>=" |\
grep -v -e '>=.* .*_' |\
sort -u > $fautoflag
if [[ -s $fautoflag ]]; then
if RunDryrunWrapper "#setup dryrun $attempt-$i # solved flag change"; then
return 0
fi
else
rm $fautoflag
fi
# if no change in this round was made then give up
if [[ -z $pkg && ! -s $fautocirc && ! -s $fautoflag ]]; then
break
fi
done
rm -f ./etc/portage/package.use/27-*-*
return 1
}
# varying USE flags till dry run of @world would succeed
function ThrowImageUseFlags() {
echo "#setup dryrun $attempt # throw flags ..."
grep -v -e '^$' -e '^#' $reposdir/gentoo/profiles/desc/l10n.desc |\
cut -f1 -d' ' -s |\
shuf -n $(( $RANDOM%20 )) |\
sort |\
xargs |\
xargs -I {} --no-run-if-empty echo "*/* L10N: {}" > ./etc/portage/package.use/22thrown_l10n
grep -v -e '^$' -e '^#' -e 'internal use only' $reposdir/gentoo/profiles/use.desc |\
cut -f1 -d' ' -s |\
grep -v -w -f $tbhome/tb/data/IGNORE_USE_FLAGS |\
ThrowUseFlags 250 |\
xargs -s 73 |\
sed -e "s,^,*/* ,g" > ./etc/portage/package.use/23thrown_global_use_flags
grep -Hl 'flag name="' $reposdir/gentoo/*/*/metadata.xml |\
shuf -n $(( $RANDOM%3000)) |\
sort |\
while read -r file
do
pkg=$(cut -f6-7 -d'/' <<< $file)
grep 'flag name="' $file |\
grep -v -i -F -e 'UNSUPPORTED' -e 'UNSTABLE' -e '(requires' |\
cut -f2 -d'"' -s |\
grep -v -w -f $tbhome/tb/data/IGNORE_USE_FLAGS |\
ThrowUseFlags 15 3 |\
xargs |\
xargs -I {} --no-run-if-empty printf "%-40s %s\n" "$pkg" "{}"
done > ./etc/portage/package.use/24thrown_package_use_flags
}
function CompileUseFlagFiles() {
local attempt=0
echo 'emerge --update --changed-use --newuse --deep @world --pretend' > ./var/tmp/tb/dryrun_wrapper.sh
if [[ -e $useflagfile ]]; then
echo
date
echo "dryrun with given USE flag file ==========================================================="
cp $useflagfile ./etc/portage/package.use/28given_use_flags
local drylog=./var/tmp/tb/logs/dryrun.log
FixPossibleUseFlagIssues $attempt
return $?
else
while [[ $(( ++attempt )) -le 200 ]]
do
if [[ -f ./var/tmp/tb/STOP ]]; then
echo -e "\n found STOP file"
rm ./var/tmp/tb/STOP
return 1
fi
echo
date
echo "==========================================================="
local drylog=./var/tmp/tb/logs/dryrun.$(printf "%03i" $attempt).log
ThrowImageUseFlags
if FixPossibleUseFlagIssues $attempt; then
return 0
fi
done
echo -e "\n max attempts reached"
return 1
fi
}
function StartImage() {
cd $tbhome/run
ln -s ../img/$name
wc -l -w $name/etc/portage/package.use/2*
echo
date
su - tinderbox -c "$(dirname $0)/start_img.sh $name"
}
#############################################################################
#
# main
#
set -eu
export PATH="/usr/sbin:/usr/bin:/sbin:/bin:/opt/tb/bin"
export LANG=C.utf8
if [[ "$(whoami)" != "root" ]]; then
echo " you must be root"
exit 1
fi
echo
date
echo " $0 started"
if [[ $# -gt 0 ]]; then
echo " args: '${@}'"
fi
tbhome=~tinderbox
reposdir=/var/db/repos
gentoo_mirrors=$(grep "^GENTOO_MIRRORS=" /etc/portage/make.conf | cut -f2 -d'"' -s | xargs -n 1 | shuf | xargs)
InitOptions
while getopts a:c:j:k:p:t:u: opt
do
case $opt in
a) abi3264="$OPTARG" ;;
c) cflags="$OPTARG" ;;
j) jobs="$OPTARG" ;;
k) keyword="$OPTARG" ;;
p) profile="$OPTARG" ;;
t) testfeature="$OPTARG" ;;
u) useflagfile="$OPTARG" ;; # eg.: /dev/null
*) echo " '$opt' with '$OPTARG' not implemented"; exit 2 ;;
esac
done
CheckOptions
UnpackStage3
InitRepository
CompilePortageFiles
CompileMakeConf
CompileMiscFiles
CreateBacklogs
CreateSetupScript
RunSetupScript
CompileUseFlagFiles
chgrp portage ./etc/portage/package.use/*
chmod g+w,a+r ./etc/portage/package.use/*
echo -e "\n$(date)\n setup done\n"
StartImage
|
toralf/tinderbox
|
bin/setup_img.sh
|
Shell
|
gpl-3.0
| 21,582 |
#!/bin/bash
set +v
if [ ! -d sysrestore.z ]; then
mkdir sysrestore.z
fi
if [ ! -f sysrestore.conf ]; then
echo "No sysrestore.conf"
exit 1
fi
if [ "$1" = "create" ]; then
while read line
do
if [ "$line" = "" ]; then
continue
fi
SAVEFILE=${line////__}
cp -v -p $line "sysrestore.z/${SAVEFILE}"
done < sysrestore.conf
elif [ "$1" = "restore" ]; then
pushd sysrestore.z
for file in __*
do
SAVEFILE=${file//__//}
if [ -f "$SAVEFILE" ]; then
echo "Backing up $SAVEFILE"
cp -v -p "$SAVEFILE" "${SAVEFILE}.orig"
sleep 1
fi
echo "Installing $SAVEFILE"
cp -v -p "$file" "$SAVEFILE"
done
else
echo "$0 [create or restore]"
exit 1
fi
exit 0
|
ljay79/yoda
|
setup/sysrestore.sh
|
Shell
|
gpl-3.0
| 692 |
#!/bin/sh -e
# Run lxc-test.sh on LXD containers in parallel.
process_args() {
script=$(readlink -f "$0")
script_path=$(dirname "${script}")
while [ "$1" ]; do
case "$1" in
"-c" | "--container")
shift
containers="${containers} $1"
;;
"-f" | "--file")
shift
contents=$(grep -v '^#' "$1")
containers="${containers} ${contents}"
;;
"-h" | "--help")
help=1
;;
"--")
shift
options="$*"
break
;;
*)
echo "Unknown argument: $1"
exit 1
;;
esac
shift
done
if [ "${containers}" = "" ]; then
help=1
fi
container_pids="lxc-parallel-test.pid"
}
show_help() {
if [ ! ${help} ]; then return; fi
cat <<- EOF
Usage: lxc-parallel-test.sh source [...] -- [argument...] -- [option...]
Sources:
-c, --container name An LXD container to run tests against.
-f, --file list A file listing LXD containers, one per line.
Lines starting with a # are considered comments.
EOF
"${script_path}/lxc-test.sh" | grep -A 1000 '^Arguments:'
exit 0
}
test_container() {
echo "Starting test of ${container} in the background."
# shellcheck disable=SC2086
"${script_path}/lxc-test.sh" \
"${container}" \
${options} \
> "${container}.log" 2>&1 &
pid=$!
echo "${pid}:${container}" >> "${container_pids}"
pids="${pids} ${pid}"
}
run_tests() {
rm -f "${container_pids}"
for container in ${containers}; do
test_container
done
echo "Waiting for tests to complete."
# shellcheck disable=SC2086
wait_all ${pids}
}
wait_all() {
while [ $# -gt 0 ]; do
for pid in "$@"; do
shift
if kill -0 "${pid}" 2> /dev/null; then
set -- "$@" "${pid}"
elif wait "${pid}"; then
container=$(grep "^${pid}:" "${container_pids}" | sed "s/^${pid}://;")
echo "Container ${container} exited with zero exit status."
else
container=$(grep "^${pid}:" "${container_pids}" | sed "s/^${pid}://;")
echo "Container ${container} exited with non-zero exit status."
fi
done
sleep 1
done
}
main() {
process_args "$@"
show_help
run_tests
}
main "$@"
exit 0
|
mattclay/ansible-hacking
|
lxc-parallel-test.sh
|
Shell
|
gpl-3.0
| 2,269 |
#!/bin/sh
echo 1..4
#########################################
./scc 1 < $srcdir/Matrix/scc.sms
./scc 2 < $srcdir/Matrix/scc2.sms
./scc 3 < $srcdir/Matrix/scc3.sms
./scc 4 < $srcdir/Matrix/mat364.sms
|
cbouilla/spasm
|
test/test_scc.sh
|
Shell
|
gpl-3.0
| 200 |
#!/bin/bash
Version=$(nvidia-settings -q openglversion -t)
Thermal0=$(nvclock -c 1 -T | grep GPU)°C
Thermal1=$(nvclock -c 2 -T | grep GPU)°C
echo "<openbox_pipe_menu>"
echo '<separator label="OpenGL / Version" />'
echo "<item label=\"$Version\" />"
echo '<separator label="Temperature GPU 0" />'
echo "<item label=\"$Thermal0\" />"
echo '<separator label="Temperature GPU 1" />'
echo "<item label=\"$Thermal1\" />"
echo "</openbox_pipe_menu>"
|
widowild/messcripts
|
bash/nvidiatemp.sh
|
Shell
|
gpl-3.0
| 448 |
#!/bin/sh
now="$(date): update all"
git add -A README.md
git add -A CMakeLists.txt
git add -A update-all.sh
git add -A src/*
git add -A include/*
#git add -A resources/*
git commit -m "$now"
git push origin master
|
duguyue100/unixprotege
|
update-all.sh
|
Shell
|
gpl-3.0
| 218 |
#!/bin/bash
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
dataset_name="Ill_Br"
project_out_dir="/proj/b2011117/private/projects/out/db/cal_stat_$dataset_name"
vcf_tabix_file="/glob/jessada/private/master_data/CMM/CMM_Scilife_b2011158/vcf/Ill_Br_Fam242.vcf.gz"
patients_list="/glob/jessada/private/master_data/CMM/family/processed/Ill_Br/Ill_Br.lst"
project_code="b2011158"
jobs_setup_file=cal_mut_stat_"$dataset_name"_jobs_setup.txt
cmd="pyCMM-mutrep-create-job-setup-file"
cmd+=" -d $dataset_name"
cmd+=" -O $project_out_dir"
cmd+=" -i $vcf_tabix_file"
cmd+=" -c $patients_list"
cmd+=" -p $project_code"
cmd+=" -o $jobs_setup_file"
eval "$cmd"
|
bahamoot/CMM-AL-projects
|
scripts/mutstat/mut_stat_Ill_Br/create_jobs_setup.sh
|
Shell
|
gpl-3.0
| 667 |
#!/bin/bash
python AnalyzeSimulation.py --paralog1 YNL069C --paralog2 YIL133C --simnum 67 > YNL069C_YIL133C_MG94_nonclock_Sim67_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Simulation/ShFiles/MG94_YNL069C_YIL133C_sim67.sh
|
Shell
|
gpl-3.0
| 145 |
#!/bin/sh
#mode=online
mode=collect
pln_fitting_method=1
alpha=1
beta=1
min_inliers=1000
useWeight=1
total_frames=10
usePln=1
usePt=1
max_icp=10
max_lm=10
occluding=1
curvature=0
canny=0
mkdir data
mkdir data/depth
mkdir data/rgb
rm data/depth/*.png
rm data/rgb/*.png
rm data/*.txt
bin/Soledad -plnfit ${pln_fitting_method} -pln ${usePln} -pt ${usePt} -mi ${min_inliers} -alpha ${alpha} -beta ${beta} -frames ${total_frames} -icp ${max_icp} -lm ${max_lm} -occluding ${occluding} -curvature ${curvature} -canny ${canny} -useWeight ${useWeight} -mode ${mode}
mv data data0
|
ElinorSun/Soledad
|
build/run/run_online.sh
|
Shell
|
gpl-3.0
| 580 |
#! /bin/bash
# Source: https://raw.github.com/thenbrent/multisite-user-management/master/deploy.sh
# http://thereforei.am/2011/04/21/git-to-svn-automated-wordpress-plugin-deployment/
# A modification of Dean Clatworthy's deploy script as found here: https://github.com/deanc/wordpress-plugin-git-svn
# The difference is that this script lives in the plugin's git repo & doesn't require an existing SVN repo.
# main config
PLUGINSLUG=${PWD##*/} # returns basename of current directory
CURRENTDIR=`pwd`
MAINFILE="multiple-rich-editors.php" # this should be the name of your main php file in the wordpress plugin
SVNUSER="nickohrn" # your svn username
# git config
GITPATH="$CURRENTDIR/" # this file should be in the base of your git repository
# svn config
SVNPATH="/tmp/$PLUGINSLUG" # path to a temp SVN repo. No trailing slash required and don't add trunk.
SVNURL="http://plugins.svn.wordpress.org/$PLUGINSLUG" # Remote SVN repo on wordpress.org, with no trailing slash
# Let's begin...
echo ".........................................."
echo
echo "Preparing to deploy WordPress plugin"
echo
echo ".........................................."
echo
# Check version in readme.txt is the same as plugin file
NEWVERSION1=`grep "^Stable tag" "$GITPATH/readme.txt" | awk -F' ' '{print $3}' | sed 's/[[:space:]]//g'`
echo "readme version: $NEWVERSION1"
NEWVERSION2=`grep "^Version" "$GITPATH/$MAINFILE" | awk -F' ' '{print $2}' | sed 's/[[:space:]]//g'`
echo "$MAINFILE version: $NEWVERSION2"
if [ "$NEWVERSION1" != "$NEWVERSION2" ]; then echo "Versions don't match. Exiting...."; exit 1; fi
echo "Versions match in readme.txt and PHP file. Let's proceed..."
cd $GITPATH
echo -e "Enter a commit message for this new version: \c"
read COMMITMSG
# git commit -am "$COMMITMSG"
echo "Tagging new version in git"
git tag -a "$NEWVERSION1" -m "Version $NEWVERSION1"
echo "Pushing latest commit to origin, with tags"
git push origin master
git push origin master --tags
echo
echo "Creating local copy of SVN repo ..."
svn co $SVNURL $SVNPATH
echo "Exporting the HEAD of master from git to the trunk of SVN"
git checkout-index -a -f --prefix=$SVNPATH/trunk/
echo "Ignoring github specific files and deployment script"
svn propset svn:ignore "deploy.sh
README.md
.git
.gitignore" "$SVNPATH/trunk/"
echo "Changing directory to SVN and committing to trunk"
cd $SVNPATH/trunk/
# Add all new files that are not set to be ignored
svn status | grep -v "^.[ \t]*\..*" | grep "^?" | awk '{print $2}' | xargs svn add
svn commit --username=$SVNUSER -m "$COMMITMSG"
# echo "Creating new SVN tag & committing it"
cd $SVNPATH
svn copy trunk/ tags/$NEWVERSION1/
cd $SVNPATH/tags/$NEWVERSION1
svn commit --username=$SVNUSER -m "Tag $NEWVERSION1"
echo "Removing temporary directory $SVNPATH"
rm -fr $SVNPATH/
echo "*** FIN ***"
|
nickohrn/multiple-rich-editors
|
deploy.sh
|
Shell
|
gpl-3.0
| 2,810 |
#!/bin/bash
OLD_PATH=$(pwd)
echo $OLD_PATH
export PATH=$PATH:/c/Program\ Files/PostgreSQL/13/bin:/c/Program\ Files\ \(x86\)/WinSCP
cd src
sed -i 's/http:\/\/localhost:8000/https:\/\/firmadigital.solvosoft.com/g' client_fva/user_settings.py
sed -i 's/self.installation_path = None/self.installation_path = "C"/g' client_fva/user_settings.py
pyinstaller --clean --onefile -n client_fva -i client_fva/ui/ui_elements/images/icon.ico --noconfirm --log-level=WARN --windowed --noconsole --hidden-import pkcs11.defaults main.py
cd dist
mkdir workdir
EXE_HOMEDIR="workdir"
mkdir -p $EXE_HOMEDIR/
cp $OLD_PATH/src/os_libs/windows/asepkcs.dll $EXE_HOMEDIR/
cp $OLD_PATH/src/dist/client_fva.exe $EXE_HOMEDIR/
cp $OLD_PATH/src/client_fva/ui/ui_elements/images/icon.png $EXE_HOMEDIR/
cp $OLD_PATH/src/client_fva/ui/ui_elements/images/icon.ico $EXE_HOMEDIR/
cp $OLD_PATH/src/os_libs/windows/installer.iss $EXE_HOMEDIR/
cd $EXE_HOMEDIR/
iscc installer.iss
mkdir $OLD_PATH/release/
cp Output/mysetup.exe $OLD_PATH/release/client_fva_${TRAVIS_OS_NAME}_${TRAVIS_BUILD_NUMBER}.exe
exit 0
|
luisza/dfva_client
|
builder/ci/windows.sh
|
Shell
|
gpl-3.0
| 1,077 |
#!/usr/bin/env bash
# Author: Dmitri Popov, [email protected]
#######################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
WORKING_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
CONFIG="${WORKING_DIR}/config.cfg"
dos2unix "$CONFIG"
source "$CONFIG"
# Set the ACT LED to heartbeat
sudo sh -c "echo heartbeat > /sys/class/leds/led0/trigger"
# # If display support is enabled, display the message
if [ $DISP = true ]; then
oled r
oled +a "Ready"
oled +b "Insert storage"
oled s
fi
# Wait for a USB storage device (e.g., a USB flash drive)
STORAGE=$(ls /dev/* | grep "$STORAGE_DEV" | cut -d"/" -f3)
while [ -z ${STORAGE} ]; do
sleep 1
STORAGE=$(ls /dev/* | grep "$STORAGE_DEV" | cut -d"/" -f3)
done
# When the storage device is detected, mount it
mount /dev/"$STORAGE_DEV" "$STORAGE_MOUNT_POINT"
# Set the ACT LED to blink at 500ms to indicate that the storage device has been mounted
sudo sh -c "echo timer > /sys/class/leds/led0/trigger"
sudo sh -c "echo 500 > /sys/class/leds/led0/delay_on"
# If display support is enabled, notify that the storage device has been mounted
if [ $DISP = true ]; then
oled r
oled +a "Storage OK"
oled +b "Connect"
oled +c "iOS device"
oled s
fi
# Try to mount iOS device
ifuse $SOURCE_MOUNT_POINT -o allow_other
# Waiting for the iOS device to be mounted
until [ ! -z "$(ls -A $SOURCE_MOUNT_POINT)" ]; do
if [ $DISP = true ]; then
oled r
oled +a "No iOS device"
oled +b "Waiting ..."
oled s
sleep 5
fi
ifuse $SOURCE_MOUNT_POINT -o allow_other
done
# Define source and destination paths
SOURCE_PATH="$SOURCE_MOUNT_POINT/DCIM"
# Create a .id random identifier file if doesn't exist
cd "$SOURCE_MOUNT_POINT"
if [ ! -f *.id ]; then
random=$(echo $RANDOM)
sudo touch $(date -d "today" +"%Y%m%d%H%M")-$random.id
fi
ID_FILE=$(ls -t *.id | head -n1)
ID="${ID_FILE%.*}"
cd
mkdir -p "$STORAGE_MOUNT_POINT/$ID"
BACKUP_PATH="$STORAGE_MOUNT_POINT/$ID"
# Run the progress.sh script
if [ $DISP = true ]; then
source ${WORKING_DIR}/progress.sh "${SOURCE_PATH}" "${BACKUP_PATH}" &
PID=$!
fi
# Set the ACT LED to blink at 1000ms to indicate that the iOS device has been mounted
sudo sh -c "echo timer > /sys/class/leds/led0/trigger"
sudo sh -c "echo 1000 > /sys/class/leds/led0/delay_on"
# Perform backup using rsync
sudo rm /var/log/little-backup-box.log
RSYNC_OUTPUT=$(rsync -avh --stats --exclude "*.id" --log-file=/var/log/little-backup-box.log "$SOURCE_PATH"/ "$BACKUP_PATH")
# Kill the progress.sh script
kill $PID
rm /tmp/progress
# If display support is enabled, notify that the backup is complete
if [ $DISP = true ]; then
oled r
oled +a "Backup completed"
oled +b "Power off"
oled s
fi
# Check internet connection and send
# a notification if the NOTIFY option is enabled
check=$(wget -q --spider http://google.com/)
if [ $NOTIFY = true ] || [ ! -z "$check" ]; then
curl --url 'smtps://'$SMTP_SERVER':'$SMTP_PORT --ssl-reqd \
--mail-from $MAIL_USER \
--mail-rcpt $MAIL_TO \
--user $MAIL_USER':'$MAIL_PASSWORD \
-T <(echo -e "From: ${MAIL_USER}\nTo: ${MAIL_TO}\nSubject: Little Backup Box: iOS backup completed\n\nBackup log:\n\n${RSYNC_OUTPUT}")
fi
# Power off
if [ $POWER_OFF = true ]; then
poweroff
fi
|
dmpop/little-backup-box
|
scripts/ios-backup.sh
|
Shell
|
gpl-3.0
| 3,947 |
kubectl patch petset web -p '{"spec":{"replicas":0}}'
|
hunering/demo-code
|
kubernates/petset/stop-petset.sh
|
Shell
|
gpl-3.0
| 54 |
#!/bin/bash
NUM_OF_REQUESTS=1000
NUM_OF_CONCURRENT_REQUESTS=100
SERVER="127.0.0.1"
PORT=54569
CHANNEL=liveshow
if [ -n "$1" ]; then
NUM_OF_REQUESTS="$1"
fi
if [ -n "$2" ]; then
NUM_OF_CONCURRENT_REQUESTS="$2"
fi
ab -k -n $NUM_OF_REQUESTS -c $NUM_OF_CONCURRENT_REQUESTS -p post_file -H "channel: $CHANNEL" http://$SERVER:$PORT/api/post
|
shelmesky/message_service
|
benchmark/post_benchmark.sh
|
Shell
|
gpl-3.0
| 348 |
#! /bin/zsh
#set -x
TMP=${TMP:-/tmp}
P2XOPTS=($P2XOPTS -X -m -Vtimes,io)
infile=$TMP/p2x-speed-in
outfile=$TMP/p2x-out.xml
IN_KB=${IN_KB:-1000} # how many KB of input data
P2X=${P2X:-p2x}
yes | dd of=$infile ibs=16c cbs=2c count=$((64*$IN_KB)) obs=1c conv=block,sync
ls -lh $infile
szin=$(ls -l $infile | cut -d " " -f 5)
wc $infile
echo $P2X $P2XOPTS -p ../../examples/configs/default -S c $infile
time ($P2X $P2XOPTS -p ../../examples/configs/default -S c $infile > $outfile)
ls -lh $outfile
szout=$(ls -l $outfile | cut -d " " -f 5)
wc $outfile
echo "size ratio: $(( 1.0 * $szout / $szin))"
|
rainac/p2x
|
tests/sh/speed_test.sh
|
Shell
|
gpl-3.0
| 605 |
######################################
# INSTALL OPENCV ON UBUNTU OR DEBIAN #
######################################
# All credits: milq
# Avaliable on: https://github.com/milq/milq/blob/master/scripts/bash/install-opencv.sh
# | THIS SCRIPT IS TESTED CORRECTLY ON |
# |------------------------------------------------------|
# | OS | OpenCV | Test | Last test |
# |------------------|--------------|------|-------------|
# | Ubuntu 18.04 LTS | OpenCV 3.4.2 | OK | 18 Jul 2018 |
# | Debian 9.5 | OpenCV 3.4.2 | OK | 18 Jul 2018 |
# |----------------------------------------------------- |
# | Debian 9.0 | OpenCV 3.2.0 | OK | 25 Jun 2017 |
# | Debian 8.8 | OpenCV 3.2.0 | OK | 20 May 2017 |
# | Ubuntu 16.04 LTS | OpenCV 3.2.0 | OK | 20 May 2017 |
# VERSION TO BE INSTALLED
OPENCV_VERSION='3.4.2'
# 1. KEEP UBUNTU OR DEBIAN UP TO DATE
sudo apt-get -y update
sudo apt-get -y upgrade
# sudo apt-get -y dist-upgrade
sudo apt-get -y autoremove
# 2. INSTALL THE DEPENDENCIES
# Build tools:
sudo apt-get install -y build-essential cmake
# GUI (if you want to use GTK instead of Qt, replace 'qt5-default' with 'libgtkglext1-dev' and remove '-DWITH_QT=ON' option in CMake):
sudo apt-get install -y qt5-default libvtk6-dev
# Media I/O:
sudo apt-get install -y zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev
# Video I/O:
sudo apt-get install -y libdc1394-22-dev libavcodec-dev libavformat-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev yasm libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev libxine2-dev
# Parallelism and linear algebra libraries:
sudo apt-get install -y libtbb-dev libeigen3-dev
# Python:
sudo apt-get install -y python-dev python-tk python-numpy python3-dev python3-tk python3-numpy
# Java:
sudo apt-get install -y ant default-jdk
# Documentation:
sudo apt-get install -y doxygen
# 3. INSTALL THE LIBRARY
sudo apt-get install -y unzip wget
wget https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.zip
unzip ${OPENCV_VERSION}.zip
rm ${OPENCV_VERSION}.zip
mv opencv-${OPENCV_VERSION} OpenCV
cd OpenCV
mkdir build
cd build
cmake -DWITH_QT=ON -DWITH_OPENGL=ON -DFORCE_VTK=ON -DWITH_TBB=ON -DWITH_GDAL=ON -DWITH_XINE=ON -DBUILD_EXAMPLES=ON -DENABLE_PRECOMPILED_HEADERS=OFF ..
make -j4
sudo make install
sudo ldconfig
# 4. EXECUTE SOME OPENCV EXAMPLES AND COMPILE A DEMONSTRATION
# To complete this step, please visit 'http://milq.github.io/install-opencv-ubuntu-debian'.
|
ajunior/scripts
|
opencv.sh
|
Shell
|
gpl-3.0
| 2,564 |
#!/bin/bash
#
# Ezanvakti Yapılandırma Yöneticisi Bileşeni 2.1
#
#
# --config --gui css dosyası
if [[ -f ${EZANVAKTI_DIZINI}/ezanvakti-config.css ]]
then
EZV_CSS4="${EZANVAKTI_DIZINI}/ezanvakti-config.css"
else
EZV_CSS4="${VERI_DIZINI}/veriler/ezanvakti-config.css"
fi
yapilandirma() {
local d1 d2 d3 d4 d5 d6 d7 d8 d9 d0 ds ayr1 ayr2 ayr3 ayr4
local _GUNCELLEME_YAP _OYNATICI_DURAKLAT _EZAN_DUASI_OKU _RENK_KULLAN _GUNCELLEME_GEREKLI
local _SABAH_OKUNSUN_MU _OGLE_OKUNSUN_MU _IKINDI_OKUNSUN_MU _AKSAM_OKUNSUN_MU _YATSI_OKUNSUN_MU
local _ACILISTA_BASLAT _YENIDEN_BASLATMA_GEREKLI _SESLI_UYARI _CUMA_SELASI_OKUNSUN_MU
(( GUNCELLEME_YAP )) && d1=TRUE || d1=FALSE
(( OYNATICI_DURAKLAT )) && d2=TRUE || d2=FALSE
(( EZAN_DUASI_OKU )) && d3=TRUE || d3=FALSE
(( SABAH_EZANI_OKU )) && d4=TRUE || d4=FALSE
(( OGLE_EZANI_OKU )) && d5=TRUE || d5=FALSE
(( IKINDI_EZANI_OKU )) && d6=TRUE || d6=FALSE
(( AKSAM_EZANI_OKU )) && d7=TRUE || d7=FALSE
(( YATSI_EZANI_OKU )) && d8=TRUE || d8=FALSE
(( SESLI_UYARI )) && d0=TRUE || d0=FALSE
(( CUMA_SELASI_OKU )) && ds=TRUE || ds=FALSE
(( $(gawk -F'=' '/^RENK_KULLAN/{print($2);}' "${EZANVAKTI_AYAR}") )) && d9=TRUE || d9=FALSE
if (( ACILISTA_BASLAT == 0 ))
then
_CALISMA_KIPI='Kapalı'
elif (( ACILISTA_BASLAT == 1 ))
then
_CALISMA_KIPI='Beş vakit'
else
_CALISMA_KIPI='Ramazan'
fi
_GUNCELLEME_GEREKLI=0
_YENIDEN_BASLATMA_GEREKLI=0
ayr1=/tmp/${AD}_ayr1
ayr2=/tmp/${AD}_ayr2
ayr3=/tmp/${AD}_ayr3
ayr4=/tmp/${AD}_ayr4
#ayr4=$(mktemp --tmpdir tab4.XXXXXXXX)
temizlik() {
rm -f $ayr1 $ayr2 $ayr3 $ayr4 > /dev/null 2>&1
}
ipcrm -M 190707 > /dev/null 2>&1
yad --plug=190707 --tabnum=1 --form \
--field='Ezanveri Adı:' \
--field='Ülke:' \
--field='Şehir:' \
--field='İlçe:' \
--field='Otomatik Ezanveri Güncelleme:CHK' \
--field='Uygulama çalışma kipi:CB' \
--field='\nMeal ve Kuran okuyucu seçimleri:LBL' \
--field='Kullanılacak Meal:CB' \
--field='Kuran okuyan:CB' \
--field='\nEzan ve sela okuyan adları:LBL' \
--field='Ezan okuyan:' \
--field='Sela okuyan:' \
"${EZANVERI_ADI}" "${ULKE}" "${SEHIR}" \
"${ILCE}" "$d1" "^${_CALISMA_KIPI}!Beş vakit!Ramazan!Kapalı" " " \
"^${MEAL_SAHIBI}!diyanet!ozturk!ates!yazir" \
"^${KURAN_OKUYAN}!AlGhamdi!AsShatree!AlAjmy" " " \
"${EZAN_OKUYAN}" "${SELA_OKUYAN}" > $ayr1 &
yad --plug=190707 --tabnum=2 --form \
--field='Oynatıcı Duraklat:CHK' \
--field='Ezan Duası Oku:CHK' \
--field='Vakit anımsat için sesli uyarı:CHK' \
--field='Ezan Okunma Süresi Farkı (sn):NUM' \
--field='Sela Okunma Süresi Farkı (dk):NUM' \
--field='Vakit animsat:NUM' \
--field='Sabah Ezanı:FL' \
--field='Öğle Ezanı:FL' \
--field='İkindi Ezanı:FL' \
--field='Akşam Ezanı:FL' \
--field='Yatsı Ezanı:FL' \
--field='Ezan Duası:FL' \
--field='Cuma Selası:FL' \
--field='Uyarı sesi:FL' \
"$d2" "$d3" "$d0" -- "$EZAN_OKUNMA_SURESI_FARKI[!-600..600][!60]]" \
"$SELA_OKUNMA_SURESI_FARKI[!0..240][!10]]" "$VAKIT_ANIMSAT[!0..60[!1]]" \
"${SABAH_EZANI}" "${OGLE_EZANI}" "${IKINDI_EZANI}" "${AKSAM_EZANI}" \
"${YATSI_EZANI}" "${EZAN_DUASI}" "${CUMA_SELASI}" "${UYARI_SESI}" > $ayr2 &
yad --plug=190707 --tabnum=3 --form \
--field='Sabah ezanı okunsun:CHK' \
--field='Öğle ezanı okunsun:CHK' \
--field='İkindi ezanı okunsun:CHK' \
--field='Akşam ezanı okunsun:CHK' \
--field='Yatsı ezanı okunsun:CHK' \
--field='Cuma selası okunsun:CHK' \
--field='Oynatıcı ses seviyesi:SCL' \
--field='Dini gün anımsat (sn. 0 ise kapalı):NUM' \
--field='Ezan Bildirim Süresi (sn):NUM' \
--field='Ayet Bildirim Süresi (sn):NUM' \
--field='Hadis Bildirim Süresi (sn):NUM' \
--field='Bilgi Bildirim Süresi (sn):NUM' \
--field='Güncelleme Bildirim Süresi (sn):NUM' \
"$d4" "$d5" "$d6" "$d7" "$d8" "$ds" \
"$SES[!0..100[!1]]" "$GUN_ANIMSAT[!0..30[!1]]" \
"$EZAN_BILDIRIM_SURESI[!15..300[!15]]" \
"$AYET_BILDIRIM_SURESI[!10..30[!1]]" \
"$HADIS_BILDIRIM_SURESI[!10..30[!1]]" \
"$BILGI_BILDIRIM_SURESI[!10..30[!1]]" \
"$GUNCELLEME_BILDIRIM_SURESI[!10..30[!1]]" > $ayr3 &
yad --plug=190707 --tabnum=4 --form \
--field='Uçbirimde renk kullan:CHK' \
--field='\nGelişmiş arayüz için renk seçimleri\n:LBL' \
--field='Metin kutusu arka plan rengi:CLR' \
--field='Metin kutusu yazı rengi:CLR' \
--field='Arayüz tarih saat rengi:CLR' \
--field='Arayüz konum rengi:CLR' \
--field='Arayüz çizgi rengi:CLR' \
--field='Arayüz simdiki vakit rengi:CLR' \
--field='Arayüz vakitler rengi:CLR' \
--field='Arayüz seçili vakit rengi:CLR' \
--field='Arayüz kalan süre rengi:CLR' \
--field='Arayüz seçke adları rengi:CLR' \
"$d9" " " "${ARKAPLAN_RENGI}" \
"${YAZI_RENGI}" "${ARAYUZ_TARIH_SAAT_RENGI}" \
"${ARAYUZ_KONUM_RENGI}" "${ARAYUZ_CIZGI_RENGI}" \
"${ARAYUZ_SIMDIKI_VAKIT_RENGI}" "${ARAYUZ_VAKITLER_RENGI}" \
"${ARAYUZ_SECILI_VAKIT_RENGI}" "${ARAYUZ_KALAN_SURE_RENGI}" \
"${ARAYUZ_SECKE_ADLARI_RENGI}" > $ayr4 &
yad --notebook --key=190707 \
--title "${AD^} - Yapılandırma Yöneticisi" \
--tab="Ezanveri Ayarları" --tab="Ezan Ayarları" \
--tab="Bildirim Ayarları" --tab="Renk Ayarları" \
--fixed --center --tab-pos=top --buttons-layout='spread' \
--window-icon=${AD} --css="${EZV_CSS4}" \
--button=" Ana Sayfa!${VERI_DIZINI}/simgeler/anasayfa.png":171 \
--button='yad-open:172' --button='yad-save:174' --button='yad-quit:121'
case $? in
171)
temizlik; arayuz ;;
172)
xdg-open "${EZANVAKTI_AYAR}"
temizlik; arayuz ;;
174)
IFS="|"
# liste1: ezanveri ayarları
# liste2: Ezan ayarları
# liste3: Bildirim ayarları
# liste4: Renk ayarları
liste1=(); liste2=(); liste3=(); liste4=()
read -ra liste1 < "$ayr1"
read -ra liste2 < "$ayr2"
read -ra liste3 < "$ayr3"
read -ra liste4 < "$ayr4"
unset IFS
######################################################################
# LİSTE 1 İŞLEMLERİ #
######################################################################
if [[ ${liste1[0]} != ${EZANVERI_ADI} ]]
then
if [[ -n ${liste1[0]} ]]
then
sed -i "s:\(EZANVERI_ADI=\).*:\1\'${liste1[0]}\':" "${EZANVAKTI_AYAR}"
_GUNCELLEME_GEREKLI=1
_YENIDEN_BASLATMA_GEREKLI=1
fi
fi
if [[ ${liste1[1]} != ${ULKE} ]]
then
if [[ -n ${liste1[1]} ]]
then
sed -i "s:\(ULKE=\).*:\1\'${liste1[1]}\':" "${EZANVAKTI_AYAR}"
_GUNCELLEME_GEREKLI=1
_YENIDEN_BASLATMA_GEREKLI=1
fi
fi
if [[ ${liste1[2]} != ${SEHIR} ]]
then
if [[ -n ${liste1[2]} ]]
then
sed -i "s:\(SEHIR=\).*:\1\'${liste1[2]}\':" "${EZANVAKTI_AYAR}"
_GUNCELLEME_GEREKLI=1
_YENIDEN_BASLATMA_GEREKLI=1
fi
fi
if [[ ${liste1[3]} != ${ILCE} ]]
then
if [[ -n ${liste1[3]} ]]
then
sed -i "s:\(ILCE=\).*:\1\'${liste1[3]}\':" "${EZANVAKTI_AYAR}"
_GUNCELLEME_GEREKLI=1
_YENIDEN_BASLATMA_GEREKLI=1
fi
fi
if [[ ${liste1[4]} != TRUE ]]
then
_GUNCELLEME_YAP=0
else
_GUNCELLEME_YAP=1
fi
if (( GUNCELLEME_YAP != _GUNCELLEME_YAP ))
then
sed -i "s:\(GUNCELLEME_YAP=\).*:\1$_GUNCELLEME_YAP:" "${EZANVAKTI_AYAR}"
fi
if [[ "${liste1[5]}" = "Kapalı" ]]
then
_ACILISTA_BASLAT=0
elif [[ "${liste1[5]}" = "Beş vakit" ]]
then
_ACILISTA_BASLAT=1
else
_ACILISTA_BASLAT=2
fi
if (( ACILISTA_BASLAT != _ACILISTA_BASLAT ))
then
sed -i "s:\(ACILISTA_BASLAT=\).*:\1$_ACILISTA_BASLAT:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${liste1[7]} != ${MEAL_SAHIBI} ]]
then
sed -i "s:\(MEAL_SAHIBI=\).*:\1\'${liste1[7]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste1[8]} != ${KURAN_OKUYAN} ]]
then
sed -i "s:\(^KURAN_OKUYAN=\).*:\1\'${liste1[8]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${EZAN_OKUYAN} != ${liste1[10]} ]]
then
if [[ -n ${liste1[10]} ]]
then
sed -i "s:\(EZAN_OKUYAN=\).*:\1\'${liste1[10]}\':" "${EZANVAKTI_AYAR}"
fi
fi
if [[ ${SELA_OKUYAN} != ${liste1[11]} ]]
then
if [[ -n ${liste1[11]} ]]
then
sed -i "s:\(SELA_OKUYAN=\).*:\1\'${liste1[11]}\':" "${EZANVAKTI_AYAR}"
fi
fi
######################################################################
# LİSTE 2 İŞLEMLERİ #
######################################################################
if [[ ${liste2[0]} != TRUE ]]
then
_OYNATICI_DURAKLAT=0
else
_OYNATICI_DURAKLAT=1
fi
if (( OYNATICI_DURAKLAT != _OYNATICI_DURAKLAT ))
then
sed -i "s:\(OYNATICI_DURAKLAT=\).*:\1$_OYNATICI_DURAKLAT:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${liste2[1]} != TRUE ]]
then
_EZAN_DUASI_OKU=0
else
_EZAN_DUASI_OKU=1
fi
if (( EZAN_DUASI_OKU != _EZAN_DUASI_OKU ))
then
sed -i "s:\(EZAN_DUASI_OKU=\).*:\1$_EZAN_DUASI_OKU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${liste2[2]} != TRUE ]]
then
_SESLI_UYARI=0
else
_SESLI_UYARI=1
fi
if (( SESLI_UYARI != _SESLI_UYARI ))
then
sed -i "s:\(SESLI_UYARI=\).*:\1$_SESLI_UYARI:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( EZAN_OKUNMA_SURESI_FARKI != liste2[3] ))
then
sed -i "s:\(EZAN_OKUNMA_SURESI_FARKI=\).*:\1${liste2[3]}:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( SELA_OKUNMA_SURESI_FARKI != liste2[4] ))
then
sed -i "s:\(SELA_OKUNMA_SURESI_FARKI=\).*:\1${liste2[4]}:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( VAKIT_ANIMSAT != liste2[5] ))
then
sed -i "s:\(VAKIT_ANIMSAT=\).*:\1${liste2[5]}:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${SABAH_EZANI} != ${liste2[6]} ]]
then
sed -i "s:\(SABAH_EZANI=\).*:\1\'${liste2[6]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${OGLE_EZANI} != ${liste2[7]} ]]
then
sed -i "s:\(OGLE_EZANI=\).*:\1\'${liste2[7]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${IKINDI_EZANI} != ${liste2[8]} ]]
then
sed -i "s:\(IKINDI_EZANI=\).*:\1\'${liste2[8]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${AKSAM_EZANI} != ${liste2[9]} ]]
then
sed -i "s:\(AKSAM_EZANI=\).*:\1\'${liste2[9]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${YATSI_EZANI} != ${liste2[10]} ]]
then
sed -i "s:\(YATSI_EZANI=\).*:\1\'${liste2[10]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${EZAN_DUASI} != ${liste2[11]} ]]
then
sed -i "s:\(EZAN_DUASI=\).*:\1\'${liste2[11]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${CUMA_SELASI} != ${liste2[12]} ]]
then
sed -i "s:\(CUMA_SELASI=\).*:\1\'${liste2[12]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if [[ ${UYARI_SESI} != ${liste2[13]} ]]
then
sed -i "s:\(UYARI_SESI=\).*:\1\'${liste2[13]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
######################################################################
# LİSTE 3 İŞLEMLERİ #
######################################################################
if [[ ${liste3[0]} != TRUE ]]
then
_SABAH_OKUNSUN_MU=0
else
_SABAH_OKUNSUN_MU=1
fi
if [[ ${liste3[1]} != TRUE ]]
then
_OGLE_OKUNSUN_MU=0
else
_OGLE_OKUNSUN_MU=1
fi
if [[ ${liste3[2]} != TRUE ]]
then
_IKINDI_OKUNSUN_MU=0
else
_IKINDI_OKUNSUN_MU=1
fi
if [[ ${liste3[3]} != TRUE ]]
then
_AKSAM_OKUNSUN_MU=0
else
_AKSAM_OKUNSUN_MU=1
fi
if [[ ${liste3[4]} != TRUE ]]
then
_YATSI_OKUNSUN_MU=0
else
_YATSI_OKUNSUN_MU=1
fi
if [[ ${liste3[5]} != TRUE ]]
then
_CUMA_SELASI_OKUNSUN_MU=0
else
_CUMA_SELASI_OKUNSUN_MU=1
fi
if (( SABAH_EZANI_OKU != _SABAH_OKUNSUN_MU ))
then
sed -i "s:\(SABAH_EZANI_OKU=\).*:\1$_SABAH_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( OGLE_EZANI_OKU != _OGLE_OKUNSUN_MU ))
then
sed -i "s:\(OGLE_EZANI_OKU=\).*:\1$_OGLE_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( IKINDI_EZANI_OKU != _IKINDI_OKUNSUN_MU ))
then
sed -i "s:\(IKINDI_EZANI_OKU=\).*:\1$_IKINDI_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( AKSAM_EZANI_OKU != _AKSAM_OKUNSUN_MU ))
then
sed -i "s:\(AKSAM_EZANI_OKU=\).*:\1$_AKSAM_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( YATSI_EZANI_OKU != _YATSI_OKUNSUN_MU ))
then
sed -i "s:\(YATSI_EZANI_OKU=\).*:\1$_YATSI_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( CUMA_SELASI_OKU != _CUMA_SELASI_OKUNSUN_MU ))
then
sed -i "s:\(CUMA_SELASI_OKU=\).*:\1$_CUMA_SELASI_OKUNSUN_MU:" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( SES != liste3[6] ))
then
sed -i "s:\(SES=\).*:\1\'${liste3[6]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( GUN_ANIMSAT != liste3[7] ))
then
sed -i "s:\(GUN_ANIMSAT=\).*:\1\'${liste3[7]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( EZAN_BILDIRIM_SURESI != liste3[8] ))
then
sed -i "s:\(EZAN_BILDIRIM_SURESI=\).*:\1\'${liste3[8]}\':" "${EZANVAKTI_AYAR}"
_YENIDEN_BASLATMA_GEREKLI=1
fi
if (( AYET_BILDIRIM_SURESI != liste3[9] ))
then
sed -i "s:\(AYET_BILDIRIM_SURESI=\).*:\1\'${liste3[9]}\':" "${EZANVAKTI_AYAR}"
fi
if (( HADIS_BILDIRIM_SURESI != liste3[10] ))
then
sed -i "s:\(HADIS_BILDIRIM_SURESI=\).*:\1\'${liste3[10]}\':" "${EZANVAKTI_AYAR}"
fi
if (( BILGI_BILDIRIM_SURESI != liste3[11] ))
then
sed -i "s:\(BILGI_BILDIRIM_SURESI=\).*:\1\'${liste3[11]}\':" "${EZANVAKTI_AYAR}"
fi
if (( GUNCELLEME_BILDIRIM_SURESI != liste3[12] ))
then
sed -i "s:\(GUNCELLEME_BILDIRIM_SURESI=\).*:\1\'${liste3[12]}\':" "${EZANVAKTI_AYAR}"
fi
######################################################################
# LİSTE 4 İŞLEMLERİ #
######################################################################
if [[ ${liste4[0]} != TRUE ]]
then
_RENK_KULLAN=0
else
_RENK_KULLAN=1
fi
if (( $(gawk -F'=' '/^RENK_KULLAN/{print($2);}' "${EZANVAKTI_AYAR}") != _RENK_KULLAN ))
then
sed -i "s:\(^RENK_KULLAN=\).*:\1$_RENK_KULLAN:" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[2]} != ${ARKAPLAN_RENGI} ]]
then
sed -i "s:\(ARKAPLAN_RENGI=\).*:\1\'${liste4[2]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[3]} != ${YAZI_RENGI} ]]
then
sed -i "s:\(YAZI_RENGI=\).*:\1\'${liste4[3]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[4]} != ${ARAYUZ_TARIH_SAAT_RENGI} ]]
then
sed -i "s:\(ARAYUZ_TARIH_SAAT_RENGI=\).*:\1\'${liste4[4]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[5]} != ${ARAYUZ_KONUM_RENGI} ]]
then
sed -i "s:\(ARAYUZ_KONUM_RENGI=\).*:\1\'${liste4[5]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[6]} != ${ARAYUZ_CIZGI_RENGI} ]]
then
sed -i "s:\(ARAYUZ_CIZGI_RENGI=\).*:\1\'${liste4[6]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[7]} != ${ARAYUZ_SIMDIKI_VAKIT_RENGI} ]]
then
sed -i "s:\(ARAYUZ_SIMDIKI_VAKIT_RENGI=\).*:\1\'${liste4[7]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[8]} != ${ARAYUZ_VAKITLER_RENGI} ]]
then
sed -i "s:\(ARAYUZ_VAKITLER_RENGI=\).*:\1\'${liste4[8]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[9]} != ${ARAYUZ_SECILI_VAKIT_RENGI} ]]
then
sed -i "s:\(ARAYUZ_SECILI_VAKIT_RENGI=\).*:\1\'${liste4[9]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[10]} != ${ARAYUZ_KALAN_SURE_RENGI} ]]
then
sed -i "s:\(ARAYUZ_KALAN_SURE_RENGI=\).*:\1\'${liste4[10]}\':" "${EZANVAKTI_AYAR}"
fi
if [[ ${liste4[11]} != ${ARAYUZ_SECKE_ADLARI_RENGI} ]]
then
sed -i "s:\(ARAYUZ_SECKE_ADLARI_RENGI=\).*:\1\'${liste4[11]}\':" "${EZANVAKTI_AYAR}"
fi
# # source
. "${EZANVAKTI_AYAR}"
(( _GUNCELLEME_GEREKLI )) && {
bilesen_yukle guncelleyici
gorsel_guncelleme_yap
}
# # source
. "${EZANVAKTI_AYAR}"
(( _YENIDEN_BASLATMA_GEREKLI )) && {
if (( ! ACILISTA_BASLAT ))
then
if pgrep ${AD}-sleep
then
pkill ${AD}-sleep
fi
else
if pgrep ${AD}-sleep
then
pkill ${AD}-sleep
fi
${BILESEN_DIZINI}/${AD}-sleep &
disown
fi
}
temizlik; arayuz ;;
# _SES_SEVIYE=$(sed -n 's:[,|.].*::p' <<<${list[8]})
121)
temizlik
exit 0 ;;
esac
}
# vim: set ft=sh ts=2 sw=2 et:
|
fbostanci/ezanvakti
|
lib/yapilandirma_yoneticisi.bash
|
Shell
|
gpl-3.0
| 18,790 |
#!/bin/bash
set -ex
ostree pull --repo=$LUG_path --mirror flathub --depth=1
flatpak build-update-repo $LUG_path
|
sjtug/mirror-docker
|
lug/worker-script/flatpak.sh
|
Shell
|
agpl-3.0
| 114 |
#!/bin/bash
xyz()
{
echo "curr func:$FUNCNAME is executing..."
}
xyz
echo "curr func: $FUNCNAME"
|
cxsjabc/basic
|
bash/_basic/func.sh
|
Shell
|
agpl-3.0
| 103 |
#! /bin/bash
#
#----------------------------------------------------------------------------
#
# - Oh Jasmin Dynamic DNS -
#
# Copyright 2000 - 2015 by
# SwordLord - the coding crew - http://www.swordlord.com
# and contributing authors
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
#
# Original Author:
# [email protected]
# [email protected]
# [email protected]
#
# $Id:
#
#-----------------------------------------------------------------------------
# exiting script when admin did not change configuration
echo ""
echo "************************"
echo "Hello from updatedns.sh"
echo "************************"
echo "I was not configured! "
echo "Please configure me "
echo "and remove this warning!"
echo "************************"
exit 0
TARGET_DIR="/etc/tinydns/root/"
BASE_DIR="/etc/tinydns/root/base_files/"
TARGET_FILE="/etc/tinydns/root/data"
# remove old dns-file
rm $TARGET_FILE
# generate / concat new from primary
for i in "$BASE_DIR"/primary/*
do
cat ${i} >> $TARGET_FILE
done
# concat new from dyndns
for i in "$BASE_DIR"/dyndns/*
do
cat ${i} >> $TARGET_FILE
done
# tinydns-data has to be executed in the same directory where the file data is
cd /etc/tinydns/root
# generate data.cdb from data for dbndns
/usr/bin/tinydns-data
# copy to secondary
#..
# EOF
|
LordEidi/ohjasmin.js
|
utilities/updatedns.sh
|
Shell
|
agpl-3.0
| 2,010 |
#!/usr/bin/env bash
set -eu
DB_HOST=localhost
DB_USER=$1
NEW_USER_PASSWORD=$2
DB_NAME=$3
DUMP_FILE="${4}"
export PGPASSWORD="${NEW_USER_PASSWORD}"
psql_settings=("--username=${DB_USER}" "--host=${DB_HOST}" "--dbname=${DB_NAME}" "--set" "ON_ERROR_STOP=on")
gunzip --stdout "history.${DUMP_FILE}" \
| sed -e "/ALTER DEFAULT PRIVILEGES FOR ROLE postgres/d" \
| sed -e '/COPY public.spatial_ref_sys/,+2d' \
| sed -e "/COPY public.spatial_ref_sys FROM stdin/d" \
| sed -e "/COMMENT ON SCHEMA history/d" \
| sed -e "/CREATE SCHEMA history/d" \
| psql "${psql_settings[@]}"
gunzip --stdout "${DUMP_FILE}" \
| sed -e "/ALTER DEFAULT PRIVILEGES FOR ROLE postgres/d" \
| sed -e '/COPY public.spatial_ref_sys/,+2d' \
| sed -e "/COPY public.spatial_ref_sys FROM stdin/d" \
| sed -e "/COMMENT ON SCHEMA public/d" \
| sed -e "/CREATE SCHEMA public/d" \
| psql "${psql_settings[@]}"
|
akvo/akvo-lumen
|
postgres/migration-from-elephantsql-to-google-sql/restore-one-db.sh
|
Shell
|
agpl-3.0
| 899 |
#!/usr/bin/env bash
. $builddir/tests/test_common.sh
set -e
set -o pipefail
function test_xccdf_results_arf_asset {
local DS="${srcdir}/test_xccdf_results_arf_no_oval.xccdf.xml"
local result="results.xml"
local stderr="error.log"
[ -f $result ] && rm -f $result
$OSCAP xccdf eval --results-arf $result $DS 2> $stderr
[ -f $stderr ]; [ ! -s $stderr ]; rm $stderr
[ -f $result ]
local asset='/arf:asset-report-collection/arf:assets/arf:asset[@id="asset0"]/ai:computing-device'
case $(uname) in
FreeBSD)
local name=`hostname`
local domain=`grep search /etc/resolv.conf | cut -d " " -f 2`
local fqdn=$name.$domain
;;
*)
local fqdn=`hostname --fqdn`
;;
esac
echo $asset'/ai:fqdn[text()="'$fqdn'"]'
assert_exists 1 $asset'/ai:fqdn[text()="'$fqdn'"]'
local hostname=`hostname`
echo $asset'/ai:hostname[text()="'$hostname'"]'
assert_exists 1 $asset'/ai:hostname[text()="'$hostname'"]'
local macs=`ifconfig -a | grep ether | uniq | awk -F ' ' '{print toupper($2);}'`
for mac in $macs; do
echo $asset'/ai:connections/ai:connection/ai:mac-address[text()="'${mac}'"]'
assert_exists 1 $asset'/ai:connections/ai:connection/ai:mac-address[text()="'$mac'"]'
done
local ip4s=`ifconfig -a | grep 'inet ' | uniq | awk -F ' ' '{print $2;}'`
for ip in $ip4s; do
echo $asset'/ai:connections/ai:connection/ai:ip-address/ai:ip-v4[text()="'$ip'"]'
assert_exists 1 $asset'/ai:connections/ai:connection/ai:ip-address/ai:ip-v4[text()="'$ip'"]'
done
if require ipcalc; then
local ip6s=`ifconfig -a | grep 'inet6' | uniq | awk -F ' ' '{print $2;}'`
for ip in $ip6s; do
local ipf=`ipcalc $ip | grep Full | awk -F ' ' '{print gensub(/(^|:)0+([0-9a-f])/,"\\\1\\\2","g",$3);}'`
echo $asset'/ai:connections/ai:connection/ai:ip-address/ai:ip-v6[text()="'$ipf'"]'
assert_exists 1 $asset'/ai:connections/ai:connection/ai:ip-address/ai:ip-v6[text()="'$ipf'"]'
done
fi
rm $result
}
test_xccdf_results_arf_asset
|
OpenSCAP/openscap
|
tests/API/XCCDF/unittests/test_xccdf_results_arf_asset.sh
|
Shell
|
lgpl-2.1
| 2,089 |
#!/bin/bash
set -e
set -x
apt-get update
apt-get install -y git python-pip python-dev libgeoip-dev
pip install virtualenv
SCRIPTS=`dirname $0`
if [ ! -d "/opt/hpfeeds-logger" ]
then
cd /opt/
virtualenv hpfeeds-logger
. hpfeeds-logger/bin/activate
pip install hpfeeds-logger==0.0.7.1
else
echo "It looks like hpfeeds-logger is already installed. Moving on to configuration."
fi
IDENT=hpfeeds-logger-splunk
SECRET=`python -c 'import uuid;print str(uuid.uuid4()).replace("-","")'`
CHANNELS='amun.events,dionaea.connections,dionaea.capture,glastopf.events,beeswarm.hive,kippo.sessions,conpot.events,snort.alerts,suricata.events,wordpot.events,shockpot.events,p0f.events,elastichoney.events'
cat > /opt/hpfeeds-logger/splunk.json <<EOF
{
"host": "localhost",
"port": 10000,
"ident": "${IDENT}",
"secret": "${SECRET}",
"channels": [
"amun.events",
"dionaea.connections",
"dionaea.capture",
"glastopf.events",
"beeswarm.hive",
"kippo.sessions",
"conpot.events",
"snort.alerts",
"suricata.events",
"wordpot.events",
"shockpot.events",
"p0f.events",
"elastichoney.events"
],
"log_file": "/var/log/mhn/mhn-splunk.log",
"formatter_name": "splunk"
}
EOF
. /opt/hpfeeds/env/bin/activate
python /opt/hpfeeds/broker/add_user.py "$IDENT" "$SECRET" "" "$CHANNELS"
mkdir -p /var/log/mhn
apt-get install -y supervisor
cat >> /etc/supervisor/conf.d/hpfeeds-logger-splunk.conf <<EOF
[program:hpfeeds-logger-splunk]
command=/opt/hpfeeds-logger/bin/hpfeeds-logger splunk.json
directory=/opt/hpfeeds-logger
stdout_logfile=/var/log/mhn/hpfeeds-logger-splunk.log
stderr_logfile=/var/log/mhn/hpfeeds-logger-splunk.err
autostart=true
autorestart=true
startsecs=1
EOF
supervisorctl update
|
Endika/mhn
|
scripts/install_hpfeeds-logger-splunk.sh
|
Shell
|
lgpl-2.1
| 1,829 |
#!/bin/bash
# make TARGET overrideable with env
: ${TARGET:=$HOME/miniconda}
function install_miniconda {
echo "installing miniconda to $TARGET"
wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O mc.sh -o /dev/null
bash mc.sh -b -f -p $TARGET
}
install_miniconda
export PATH=$TARGET/bin:$PATH
|
trendelkampschroer/msmtools
|
tools/ci/travis/install_miniconda.sh
|
Shell
|
lgpl-3.0
| 324 |
#!/bin/bash -eux
wget -q http://pkg.jenkins-ci.org/debian-stable/jenkins-ci.org.key -O - | apt-key add -
cat > /etc/apt/sources.list.d/jenkins.list << MUFASA
deb http://pkg.jenkins-ci.org/debian-stable binary/
MUFASA
apt-get update
apt-get install -qq jenkins git
service jenkins start
curl -sSOL https://updates.jenkins-ci.org/latest/jquery.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/simple-theme-plugin.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/scm-api.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/credentials.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/p4.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/git-client.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/git.hpi
curl -sSOL https://updates.jenkins-ci.org/latest/scp.hpi
chown jenkins:jenkins *.hpi
mv *.hpi /var/lib/jenkins/plugins
ssh-keyscan -H github.com > /root/.ssh/known_hosts
(cd /var/lib/jenkins/userContent && \
git clone https://github.com/kevinburke/doony.git && \
chown jenkins:jenkins -R doony)
cat > /var/lib/jenkins/org.codefirst.SimpleThemeDecorator.xml << SIMBA
<org.codefirst.SimpleThemeDecorator plugin="[email protected]">
<cssUrl>http://jenkins.test:8081/userContent/doony/doony.css</cssUrl>
<jsUrl>http://jenkins.test:8081/userContent/doony/doony.js</jsUrl>
</org.codefirst.SimpleThemeDecorator>
SIMBA
chown jenkins:jenkins /var/lib/jenkins/org.codefirst.SimpleThemeDecorator.xml
service jenkins restart
|
gnawhleinad/mufasa
|
install/jenkins.sh
|
Shell
|
unlicense
| 1,454 |
sudo docker run -ti --rm \
-e DISPLAY=$DISPLAY \
--privileged \
-v /dev/bus/usb:/dev/bus/usb \
-v /tmp/.X11-unix:/tmp/.X11-unix \
42n4/android-studio
|
pwasiewi/dokerz
|
android-studio/android.sh
|
Shell
|
unlicense
| 163 |
sudo apt-get install python-pip
sudo apt-get install python-bs4
sudo apt-get install python-wget
|
Pensel/kikaloader
|
install.sh
|
Shell
|
unlicense
| 97 |
#!/usr/bin/env bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
set -e
set -x
MODE=${1:-"mkl"}
N_JOBS=$(grep -c ^processor /proc/cpuinfo)
echo ""
echo "Bazel will use ${N_JOBS} concurrent job(s)."
echo ""
# Run configure.
export TF_NEED_CUDA=0
export PYTHON_BIN_PATH=`which python2`
yes "" | $PYTHON_BIN_PATH configure.py
if [[ "$MODE" == "eigen" ]]; then
CONFIG=""
OMPTHREADS=""
else
CONFIG="--config=mkl"
# Setting OMP_THREADS for low performing benchmarks.
# Default value(=core count) degrades perfrmance of some banchmark cases.
# Optimal thread count is case specific.
# An argument can be passed to script, the value of which is used if given.
# Otherwise OMP_NUM_THREADS is set to 10
if [[ -z $1 ]]; then
OMPTHREADS="--action_env=OMP_NUM_THREADS=10"
else
OMPTHREADS="--action_env=OMP_NUM_THREADS=$1"
fi
fi
# Run bazel test command. Double test timeouts to avoid flakes.
# Setting KMP_BLOCKTIME to 0 lets OpenMP threads to sleep right after parallel execution
# in an MKL primitive. This reduces the effects of an oversubscription of OpenMP threads
# caused by executing multiple tests concurrently.
bazel test --test_tag_filters=-no_oss,-no_oss_py2,-oss_serial,-gpu,-benchmark-test --test_lang_filters=cc,py -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only \
${CONFIG} --test_env=KMP_BLOCKTIME=0 ${OMPTHREADS} --config=opt --test_output=errors -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/... -//tensorflow/lite/...
|
chemelnucfin/tensorflow
|
tensorflow/tools/ci_build/linux/cpu/run_mkl.sh
|
Shell
|
apache-2.0
| 2,190 |
#!/bin/bash
function logit() {
echo "[php_build] $@" 1>&2
}
function build_from_tar()
{
tar="$2"
if [ `uname -s` = "Darwin" ]; then
src=`echo $tar | sed -E 's/^.*\/?(php-[0-9]+\.[0-9]+\.[0-9]+)\.tar\.bz2$/\1/'`
else
src=`echo $tar | sed 's/^.*\/\?\(php-[0-9]\+\.[0-9]\+\.[0-9]\+\)\.tar\.bz2$/\1/'`
fi
if [ -z "$src" ]; then
return 1
fi
# prepare normal
logit "extract tar ball"
rm -fr $src && tar jxf $tar
# build
build $1 $src
}
function build()
{
# init
prefix=$1
src=$2
# debug
param_debug=""
prefix_debug=""
if [ -n "$3" ]; then
param_debug="--enable-debug"
prefix_debug="-debug"
fi
# version related
if [ `uname -s` = "Darwin" ]; then
version=`grep ' PHP_VERSION ' $src/main/php_version.h | sed -E 's/^#define PHP_VERSION "([0-9a-zA-Z\.]+)".*$/\1/'`
else
version=`grep ' PHP_VERSION ' $src/main/php_version.h | sed 's/^#define PHP_VERSION "\([0-9a-zA-Z\.]\+\)".*$/\1/'`
fi
buildname="php-${version}${prefix_debug}"
logit "[$buildname] build"
# patch
selfpath=`dirname $0`
if [ -f $selfpath/php-${version}.patch ]; then
cp $selfpath/php-${version}.patch $src/trace.patch
fi
cd $src
if [ -f trace.patch ]; then
patch -p0 --verbose < trace.patch
logit "[$buildname] patch"
fi
# prepare
param_general="--disable-all $param_debug"
param_sapi="--enable-cli --enable-cgi"
# hack for PHP 5.2
if [ ${version:0:3} == "5.2" ]; then
# pcre: run-tests.php
# spl: spl_autoload_register in trace's tests
param_ext="--with-pcre-regex --enable-spl"
else
param_ext=""
fi
param_ext="$param_ext --with-bz2 --enable-sockets --with-curl --with-zlib --enable-zip --enable-json --enable-session --enable-pdo --with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --with-pear --enable-xml --enable-libxml --enable-phar --enable-filter --enable-hash --with-iconv --with-openssl"
cmd="./configure --quiet --prefix=$prefix/$buildname $param_general $param_sapi $param_ext"
# configure
logit "[$buildname] configure"
logit "$cmd"
$cmd
# make and install
logit "[$buildname] make"
# NOT DO a meaningless "make clean"! it's just extracted
make --quiet && \
make install
ret=$?
if [ $ret -eq 0 ]; then
logit "[$buildname] done"
else
logit "[$buildname] fail"
fi
}
# main
if [ $# -lt 2 ]; then
echo "usage: `basename $0` <prefix> <php-tarfile>"
exit 1
fi
# argument
prefix="$1"
if [ ! -d "$prefix" ]; then
logit "error: invalid prefix \"$prefix\""
exit 1
fi
logit "prefix: $prefix"
tarfile="$2"
if [ ! -f "$tarfile" ]; then
logit "error: invalid PHP tar file \"$tarfile\""
exit 1
fi
logit "tarfile: $tarfile"
# build
build_from_tar $prefix $tarfile
|
chuan-yun/Molten
|
.travis/php_build.sh
|
Shell
|
apache-2.0
| 2,922 |
#!/bin/bash
echo "Checkout deploy/frontend"
git checkout deploy/frontend
echo "Merge master into deploy/frontend"
git merge master
echo "Push to upstream"
git push
echo "Checkout master"
git checkout master
|
Thylossus/tud-movie-character-insights
|
deploy-client.sh
|
Shell
|
apache-2.0
| 207 |
#!/bin/sh
export CLASSPATH=$CLASSPATH:/c/users/ext-shambmi/spark-2.4.3-bin-hadoop2.7/jars
export JAVA_OPTS="$JAVA_OPTS -Dhttp.proxyHost=husproxy.hus.fi -Dhttp.proxyPort=8080 -Dhttps.proxyHost=husproxy.hus.fi -Dhttps.proxyPort=8080"export HADOOP_HOME=/c/users/ext-shambmi/winutils
|
opme/SurgeonScorecard
|
scala/setenv.sh
|
Shell
|
apache-2.0
| 280 |
alter-install() {
if [ -d $1 ];then
sudo update-alternatives --install \
/usr/bin/java java "$1"/bin/java 200 \
--slave /usr/bin/jar jar "$1"/bin/jar \
--slave /usr/bin/javac javac "$1"/bin/javac \
--slave /usr/bin/javadoc javadoc "$1"/bin/javadoc \
--slave /usr/bin/javah javah "$1"/bin/javah \
--slave /usr/bin/javap javap "$1"/bin/javap
else
echo "invalid java home"
fi;
}
alter-jdk() {
sudo update-alternatives --config java
if [ -e /opt/java ];then
sudo rm -f /opt/java
fi;
sudo ln -sf $(dirname $(dirname $(readlink -f /etc/alternatives/java))) /opt/java
}
export JAVA_HOME=/opt/java
export PATH=${JAVA_HOME}/bin:${PATH}
export JRE_HOME="$JAVA_HOME/jre"
export CLASSPATH=".:$JAVA_HOME/lib:$JRE_HOME/lib"
|
JoshuaJiangFD/bashrc.d
|
linux/java.path.bash
|
Shell
|
apache-2.0
| 732 |
#!/bin/bash
# Copyright 2015 Insight Data Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$#" -ne 1 ]; then
echo "Please specify cluster name!" && exit 1
fi
PEG_ROOT=$(dirname ${BASH_SOURCE})/../..
source ${PEG_ROOT}/util.sh
CLUSTER_NAME=$1
PUBLIC_DNS=$(fetch_cluster_public_dns ${CLUSTER_NAME})
MASTER_DNS=$(fetch_cluster_master_public_dns ${CLUSTER_NAME})
WORKER_DNS=$(fetch_cluster_worker_public_dns ${CLUSTER_NAME})
NUM_WORKERS=$(echo ${WORKER_DNS} | wc -w)
# Configure base Presto coordinator and workers
single_script="${PEG_ROOT}/config/presto/setup_single.sh"
args="$MASTER_DNS $NUM_WORKERS"
for dns in ${PUBLIC_DNS}; do
run_script_on_node ${dns} ${single_script} ${args} &
done
wait
# Configure Presto coordinator and workers
coordinator_script="${PEG_ROOT}/config/presto/config_coordinator.sh"
run_script_on_node ${MASTER_DNS} ${coordinator_script}
worker_script="${PEG_ROOT}/config/presto/config_worker.sh"
for dns in ${WORKER_DNS}; do
run_script_on_node ${dns} ${worker_script} &
done
wait
cli_script="${PEG_ROOT}/config/presto/setup_cli.sh"
run_script_on_node ${MASTER_DNS} ${cli_script}
echo "Presto configuration complete!"
|
InsightDataScience/pegasus
|
config/presto/setup_cluster.sh
|
Shell
|
apache-2.0
| 1,673 |
CURRENT_DIRECTORY=`pwd`
cd "$(dirname "$0")"
source load_sysconfig.sh
echo "creating DB scheme"
${WIDGET_HOME}/bin/migrate_db.sh create
cd $CURRENT_DIRECTORY
|
CloudifySource/cloudify-widget
|
setup/utils/create_schema.sh
|
Shell
|
apache-2.0
| 161 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 7+ Debian 8+ and Ubuntu 16+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
Install_ImageMagick() {
if [ -d "${imagick_install_dir}" ]; then
echo "${CWARNING}ImageMagick already installed! ${CEND}"
else
pushd ${oneinstack_dir}/src > /dev/null
tar xzf ImageMagick-${imagemagick_ver}.tar.gz
#if [ "${PM}" == 'yum' ]; then
# yum -y install libwebp-devel
#else if [ "${PM}" == 'apt-get' ]; then
# yum -y install libwebp-dev
#fi
pushd ImageMagick-${imagemagick_ver} > /dev/null
./configure --prefix=${imagick_install_dir} --enable-shared --enable-static
make -j ${THREAD} && make install
popd > /dev/null
rm -rf ImageMagick-${imagemagick_ver}
popd > /dev/null
fi
}
Uninstall_ImageMagick() {
if [ -d "${imagick_install_dir}" ]; then
rm -rf ${imagick_install_dir}
echo; echo "${CMSG}ImageMagick uninstall completed${CEND}"
fi
}
Install_pecl_imagick() {
if [ -e "${php_install_dir}/bin/phpize" ]; then
pushd ${oneinstack_dir}/src > /dev/null
PHP_detail_ver=$(${php_install_dir}/bin/php-config --version)
PHP_main_ver=${PHP_detail_ver%.*}
phpExtensionDir=`${php_install_dir}/bin/php-config --extension-dir`
if [[ "${PHP_main_ver}" =~ ^5.3$ ]]; then
tar xzf imagick-${imagick_oldver}.tgz
pushd imagick-${imagick_oldver} > /dev/null
else
tar xzf imagick-${imagick_ver}.tgz
pushd imagick-${imagick_ver} > /dev/null
fi
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
${php_install_dir}/bin/phpize
./configure --with-php-config=${php_install_dir}/bin/php-config --with-imagick=${imagick_install_dir}
make -j ${THREAD} && make install
popd > /dev/null
if [ -f "${phpExtensionDir}/imagick.so" ]; then
echo 'extension=imagick.so' > ${php_install_dir}/etc/php.d/03-imagick.ini
echo "${CSUCCESS}PHP imagick module installed successfully! ${CEND}"
rm -rf imagick-${imagick_ver} imagick-${imagick_oldver}
else
echo "${CFAILURE}PHP imagick module install failed, Please contact the author! ${CEND}" && lsb_release -a
fi
popd > /dev/null
fi
}
Uninstall_pecl_imagick() {
if [ -e "${php_install_dir}/etc/php.d/03-imagick.ini" ]; then
rm -f ${php_install_dir}/etc/php.d/03-imagick.ini
echo; echo "${CMSG}PHP imagick module uninstall completed${CEND}"
else
echo; echo "${CWARNING}PHP imagick module does not exist! ${CEND}"
fi
}
|
kaneawk/oneinstack
|
include/ImageMagick.sh
|
Shell
|
apache-2.0
| 2,594 |
#!/usr/bin/env bash
set -e
source bosh-cpi-src/ci/tasks/utils.sh
source /etc/profile.d/chruby-with-ruby-2.1.2.sh
cpi_release_name="bosh-google-cpi"
semver=`cat version-semver/number`
pushd bosh-cpi-src
echo "Using BOSH CLI version..."
bosh version
echo "Creating CPI BOSH Release..."
bosh create release --name ${cpi_release_name} --version ${semver} --with-tarball
popd
mv bosh-cpi-src/dev_releases/${cpi_release_name}/${cpi_release_name}-${semver}.tgz candidate/
|
frodenas/bosh-google-cpi-boshrelease
|
ci/tasks/build-candidate.sh
|
Shell
|
apache-2.0
| 479 |
: '@(#)robocmd_768AS.sh 22.1 03/24/08 2003-2004 '
#
#
# Copyright (C) 2015 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#
#!/bin/sh
#
#
# Copyright (C) 2015 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#
PATH=${PATH}:/usr/ucb
export PATH
LD_LIBRARY_PATH=/vnmr/java:$vnmruser/psg:/vnmr/lib
export LD_LIBRARY_PATH
$vnmrsystem/jre/bin/java -mx128m -classpath $vnmrsystem/java/RoboCmd768AS.jar -Dsysdir=$vnmrsystem -Duserdir=$vnmruser RoboCmd >>/dev/null &
|
OpenVnmrJ/OpenVnmrJ
|
src/scripts/robocmd_768AS.sh
|
Shell
|
apache-2.0
| 796 |
#!/bin/bash
set -e
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <tag>"
exit 65
fi
TAG=$1
#
# Deploy the latest documentation
#
echo "Deploying documentation..."
mkdocs gh-deploy
#
# Tag & build master branch
#
echo "Tagging build..."
git checkout master
git tag ${TAG}
box build
#
# Copy executable file into GH pages
#
echo "Moving everything into place..."
#git checkout gh-downloads
cp csvutil.phar downloads/csvutil-${TAG}.phar
git add downloads/csvutil-${TAG}.phar
SHA1=$(openssl sha1 csvutil.phar)
JSON='name:"csvutil.phar"'
JSON="${JSON},sha1:\"${SHA1}\""
JSON="${JSON},url:\"http://theharvester.github.io/csv-util/downloads/csvutil-${TAG}.phar\""
JSON="${JSON},version:\"${TAG}\""
#
# Update manifest
#
cat manifest.json | jsawk -a "this.push({${JSON}})" | python -mjson.tool > manifest.json.tmp
mv manifest.json.tmp manifest.json
echo "Committing and pushing..."
git add manifest.json
git commit -m "Releasing version ${TAG}"
git push origin master
#
# Go back to master
#
git checkout gh-pages
git checkout master -- downloads
git checkout master -- manifest.json
git checkout master -- .gitignore
git add -A
git commit -m "Releasing version ${TAG}"
git push origin gh-pages
git push --tags
git checkout master
composer install
echo "New version ${TAG} created"
|
theHarvester/csv-util
|
release.sh
|
Shell
|
apache-2.0
| 1,293 |
# set -e
echo "Preparing clean slate test run ..."
rm -rf tmp dist coverage .nyc_output
echo "Bootstrapping Denali commands"
npm run bootstrap
echo "Linking Denali globally"
yarn link 2> /dev/null
echo "Full build"
denali build
echo "Running tests"
NODE_ENV=test nyc denali test --verbose "$@"
|
denali-js/denali
|
scripts/test.sh
|
Shell
|
apache-2.0
| 294 |
#! /bin/bash
./remove.sh
docker run --rm --name container-name huas/template command
|
noricohuas/docker
|
_template/run.sh
|
Shell
|
apache-2.0
| 87 |
#!/bin/bash
KUBECTL=${KUBECTL:-"~/kubernetes/cluster/kubectl.sh"}
[ -x ${KUBECTL} ] || KUBECTL=/usr/bin/kubectl
RC_LIST="pulp-apache-rc pulp-worker-rc pulp-resource-manager-rc pulp-beat-rc
pulp-msg-rc pulp-db-rc"
#POD_LIST="pulp-db-init"
POD_LIST=""
SERVICE_LIST="pulp-web pulp-msg pulp-db"
#
# Reduce replicas to 0
#
for RC in ${RC_LIST}
do
${KUBECTL} --replicas=0 scale replicationcontrollers ${RC}
done
#
# Remove all replication controllers
#
for RC in ${RC_LIST}
do
${KUBECTL} delete replicationcontrollers ${RC}
done
for POD in ${POD_LIST}
do
${KUBECTL} delete pods ${POD}
done
#
# Remove services
#
for SERVICE in ${SERVICE_LIST}
do
${KUBECTL} delete services ${SERVICE}
done
|
markllama/atomicapp-examples
|
pulp/kubernetes/centos/stop_pulp.sh
|
Shell
|
apache-2.0
| 711 |
#!/bin/bash
set -o errexit -o nounset
cd target/doc
git init
git config user.email '[email protected]'
git config user.name 'Andrew Paseltiner'
git remote add upstream "https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git"
touch .
git add -A .
git commit -qm "rebuild pages at ${TRAVIS_TAG}"
git push -q upstream HEAD:gh-pages --force
|
apasel422/eclectic
|
deploy-docs.sh
|
Shell
|
apache-2.0
| 346 |
#!/bin/bash
outputfile=$1
filesize=$2
# FIO Write test.
fio --name=writefile --size=2G --filesize=${filesize}G --filename=$outputfile --bs=1M \
--nrfiles=1 --direct=1 --sync=0 --randrepeat=0 --rw=write --refill_buffers --end_fsync=1 \
--iodepth=200 --ioengine=libaio
|
bdastur/cephutils
|
zabbix/scripts/fiotest.sh
|
Shell
|
apache-2.0
| 286 |
#!/bin/bash
## \cond
#HEADSTART##############################################################
#
#PROJECT: UnifiedTraceAndLogManager
#AUTHOR: Arno-Can Uestuensoez - [email protected]
#MAINTAINER: Arno-Can Uestuensoez - [email protected]
#SHORT: utalm-bash
#LICENSE: Apache-2.0 + CCL-BY-SA-3.0
#
#
########################################################################
#
# Copyright [2007,2008,2010,2013] Arno-Can Uestuensoez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
#
# refer to source-package for unstripped sources
#
#HEADEND################################################################
#
#$Header$
#
##
## \endcond
## @file
## @brief record header format SUBSYSNUM SUBSYSSTR
##
## \cond
##
#
shopt -s nullglob
#
#Execution anchor
MYCALLPATHNAME=$0
MYCALLNAME=`basename $MYCALLPATHNAME`
MYCALLNAME=${MYCALLNAME%.sh}
MYCALLPATH=`dirname $MYCALLPATHNAME`
MYBOOTSTRAPFILE=$(getPathToBootstrapDir.sh)/bootstrap-03_03_001.sh
. ${MYBOOTSTRAPFILE}
if [ $? -ne 0 ];then
echo "ERROR:Missing bootstrap file:configuration: ${MYBOOTSTRAPFILE}">&2
exit 1
fi
setUTALMbash 1 $*
#
###
#
. $(getPathToLib.sh libutalmfileobjects.sh)
. $(getPathToLib.sh libutalmrefpersistency.sh)
#
r=$(utalm-bash-cli.sh -d 1,f:all%SUBSYSNUM%SUBSYSSTR,title 2>&1)
f="CALLNAME:RLOGINDNS:DATE:TIME:PID:PPID:FILENAME:LINENUMBER:SUBSYSSTR:LEVELSTR:CODE:SEVERITY:DATA"
assertWithExit $LINENO $BASH_SOURCE "[[ '$r' == '$f' ]]"
## \endcond
|
ArnoCan/utalm
|
src/utalm-bash/tests/utalm-bash/BETA/headerFormatDemo/SUBSYS/CASE001/CallCase.sh
|
Shell
|
apache-2.0
| 2,046 |
#!/bin/bash
# ---------
# -----------------------------------------------------
# Generation of the Dashboard PHP Initialisation script
# -----------------------------------------------------
echo "<?php" > dashboard.inc
function dashboard_service()
{
v=`co-resolver $1`
if [ -z "$v" ]; then
echo "\$$1=\"http://127.0.0.1:8086\";" >> dashboard.inc
e=fail
else
v=`echo $v | cut -f 2 -d =`
echo "\$$1=\"$v\";" >> dashboard.inc
fi
}
# ---------------------------
# the dashboard refresh timer
# ---------------------------
dashboard_timer=$1
dashboard_number=$2
log_timer=$3
if [ -z "$dashboard_timer" ]; then
dashboard_timer=30;
fi
if [ -z "$dashboard_number" ]; then
dashboard_number=1;
fi
if [ -z "$log_timer" ]; then
log_timer=15;
fi
echo "\$dashboard_timer=\"${dashboard_timer}\";" >> dashboard.inc
echo "\$log_timer=\"${log_timer}\";" >> dashboard.inc
echo "\$dashboard_number=\"${dashboard_number}\";" >> dashboard.inc
configdir=`pwd`
echo "\$configdir=\"$configdir\";" >> dashboard.inc
echo "export ACCORDS_BASE=$configdir" > dashboard.sh
# ---------------------------------------------------
# the publisher information for the parser and broker
# ---------------------------------------------------
var=`grep publisher accords.xml | tail -n 1 | cut -f 2 -d =`
echo "\$publisher=$var;" >> dashboard.inc
echo "/usr/local/bin/testcp --publisher $var \$1 \$2 \$3 \$4" > ./dashboard-parser
chmod uog+x dashboard-parser
echo "/usr/local/bin/testcb --publisher $var \$1 \$2 \$3 \$4" > ./dashboard-broker
chmod uog+x dashboard-broker
echo "/usr/local/bin/command --config accords.xml invoice \$1 " > ./dashboard-invoice
chmod uog+x dashboard-invoice
echo "/usr/local/bin/command --config accords.xml analyse co-log " > ./dashboard-analyse
chmod uog+x dashboard-analyse
echo "/usr/local/bin/command --config accords.xml \$1 \$2 \$3 \$4 " > ./dashboard-command
chmod uog+x dashboard-command
chmod uog+rw . rest service co-log
chmod uog+r security security/*
# ----------------------
# description categories
# ----------------------
dashboard_service manifest
dashboard_service node
dashboard_service infrastructure
dashboard_service image
dashboard_service compute
dashboard_service network
dashboard_service storage
dashboard_service port
dashboard_service system
dashboard_service package
dashboard_service configuration
dashboard_service action
dashboard_service release
dashboard_service interface
dashboard_service security
# -----------------------------
# platform operation categories
# -----------------------------
dashboard_service service
dashboard_service contract
dashboard_service instruction
dashboard_service firewall
dashboard_service instruction
dashboard_service placement
dashboard_service quantity
dashboard_service provider
dashboard_service consumer
dashboard_service quota
dashboard_service account
dashboard_service user
dashboard_service authorization
dashboard_service price
dashboard_service transaction
dashboard_service invoice
dashboard_service event
dashboard_service alert
dashboard_service probe
dashboard_service session
dashboard_service connection
dashboard_service monitor
dashboard_service agreement
dashboard_service penalty
dashboard_service reward
# --------------------
# provisioning proccis
# --------------------
dashboard_service openstack
dashboard_service opennebula
dashboard_service windowsazure
dashboard_service amazon
dashboard_service deltacloud
dashboard_service proactive
dashboard_service cloudfoundry
dashboard_service jpaasprocci
echo "?>" >> dashboard.inc
|
compatibleone/accords-platform
|
dashboard/dashboard-init-unsafe.sh
|
Shell
|
apache-2.0
| 3,566 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Script which downloads and builds the thirdparty dependencies
# only if necessary.
#
# In a git repo, this uses git checksum information on the thirdparty
# tree. Otherwise, it uses a 'stamp file' approach.
set -e
set -o pipefail
DEPENDENCY_GROUPS=
case $1 in
"")
DEPENDENCY_GROUPS="common uninstrumented"
;;
"tsan")
DEPENDENCY_GROUPS="common tsan"
;;
"all")
DEPENDENCY_GROUPS="common uninstrumented tsan"
;;
*)
echo "Unknown build configuration: $1"
exit 1
;;
esac
TP_DIR=$(dirname $BASH_SOURCE)
cd $TP_DIR
NEEDS_BUILD=
NEEDS_REHASH=
IS_IN_GIT=$(test -e ../.git && echo true || :)
if [ -n "$IS_IN_GIT" ]; then
# Determine whether this subtree in the git repo has changed since thirdparty
# was last built.
CUR_THIRDPARTY_HASH=$(cd .. && git ls-tree -d HEAD thirdparty | awk '{print $3}')
for GROUP in $DEPENDENCY_GROUPS; do
LAST_BUILD_HASH=$(cat .build-hash.$GROUP || :)
if [ "$CUR_THIRDPARTY_HASH" != "$LAST_BUILD_HASH" ]; then
echo "Rebuilding thirdparty dependency group '$GROUP': the repository has changed since it was last built."
echo "Old git hash: $LAST_BUILD_HASH"
echo "New build hash: $CUR_THIRDPARTY_HASH"
NEEDS_BUILD="$NEEDS_BUILD $GROUP"
NEEDS_REHASH="$NEEDS_REHASH $GROUP"
fi
done
if [ -z "$NEEDS_BUILD" ]; then
# All the hashes matched. Determine whether the developer has any local changes.
if ! ( git diff --quiet . && git diff --cached --quiet . ) ; then
echo "Rebuilding thirdparty dependency groups '$DEPENDENCY_GROUPS': there are local changes in the repository."
NEEDS_BUILD="$DEPENDENCY_GROUPS"
fi
fi
else
# If we aren't inside running inside a git repository (e.g. we are
# part of a source distribution tarball) then we can't use git to find
# out whether the build is clean. Instead, look at the ctimes of special
# stamp files, and see if any files inside this directory have been
# modified since then.
for GROUP in $DEPENDENCY_GROUPS; do
STAMP_FILE=.build-stamp.$GROUP
if [ -f $STAMP_FILE ]; then
CHANGED_FILE_COUNT=$(find . -cnewer $STAMP_FILE | wc -l)
echo "$CHANGED_FILE_COUNT file(s) been modified since thirdparty dependency group '$GROUP' was last built."
if [ $CHANGED_FILE_COUNT -gt 0 ]; then
echo "Rebuilding."
NEEDS_BUILD="$NEEDS_BUILD $GROUP"
fi
else
echo "It appears that thirdparty dependency group '$GROUP' was never built. Building."
NEEDS_BUILD="$NEEDS_BUILD $GROUP"
fi
done
fi
if [ -z "$NEEDS_BUILD" ]; then
echo "Not rebuilding thirdparty. No changes since last build."
exit 0
fi
# Remove the old hashes/stamps before building so that if the build is aborted
# and the repository is taken backwards in time to the point where the old
# hashes/stamps matched, the repository would still be rebuilt.
for GROUP in $NEEDS_BUILD; do
rm -f .build-hash.$GROUP .build-stamp.$GROUP
done
# Download and build the necessary dependency groups.
./download-thirdparty.sh
./build-thirdparty.sh $NEEDS_BUILD
# The build succeeded. Update the appropriate hashes/stamps.
if [ -n "$IS_IN_GIT" ]; then
for GROUP in $NEEDS_REHASH; do
echo $CUR_THIRDPARTY_HASH > .build-hash.$GROUP
done
else
for GROUP in $NEEDS_BUILD; do
touch .build-stamp.$GROUP
done
fi
|
InspurUSA/kudu
|
thirdparty/build-if-necessary.sh
|
Shell
|
apache-2.0
| 4,129 |
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad #
# Complutense de Madrid (dsa-research.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
if [ -f /mnt/context.sh ]
then
. /mnt/context.sh
fi
echo $HOSTNAME > /etc/hostname
hostname $HOSTNAME
sed -i "/127.0.1.1/s/ubuntu/$HOSTNAME/" /etc/hosts
if [ -n "$IP_PUBLIC" ]; then
ifconfig eth0 $IP_PUBLIC
fi
if [ -n "$NETMASK" ]; then
ifconfig eth0 netmask $NETMASK
fi
if [ -f /mnt/$ROOT_PUBKEY ]; then
mkdir -p /root/.ssh
cat /mnt/$ROOT_PUBKEY >> /root/.ssh/authorized_keys
chmod -R 600 /root/.ssh/
fi
if [ -n "$USERNAME" ]; then
useradd -s /bin/bash -m $USERNAME
if [ -f /mnt/$USER_PUBKEY ]; then
mkdir -p /home/$USERNAME/.ssh/
cat /mnt/$USER_PUBKEY >> /home/$USERNAME/.ssh/authorized_keys
chown -R $USERNAME:$USERNAME /home/$USERNAME/.ssh
chmod -R 600 /home/$USERNAME/.ssh/authorized_keys
fi
fi
|
fairchild/open-nebula-mirror
|
share/scripts/ubuntu/context/init.sh
|
Shell
|
apache-2.0
| 2,004 |
#!/usr/bin/env bash
TMP=${BASH_SOURCE[0]}
TB_FW_PATH=${TMP%/*}
TB_PROBES_PATH="${TB_FW_PATH}/../Probes"
TB_SERVERS_PATH="${TB_FW_PATH}/../Servers"
TB_EXECUTORS_PATH="${TB_FW_PATH}/../Executors"
TB_NOTIFIERS_PATH="${TB_FW_PATH}/../Notifiers"
TB_COLOR_RED='\033[38;31m'
TB_COLOR_YELLOW='\033[38;33m'
TB_NO_COLOR='\033[0;00m'
if tput colors > /dev/null; then
TB_DISPLAY_ERROR_START=$TB_COLOR_RED
TB_DISPLAY_ERROR_END=$TB_NOCCOLOR
TB_DISPLAY_INFO_START=$TB_COLOR_YELLOW
TB_DISPLAY_INFO_END=$TB_NOCCOLOR
else
TB_DISPLAY_ERROR_START=""
TB_DISPLAY_ERROR_END=""
TB_DISPLAY_INFO_START=""
TB_DISPLAY_INFO_END=""
fi
declare -A tb_executors
export tb_executors
declare -A tb_notifiers
export tb_notifiers
tb_info() {
echo -e "${TB_DISPLAY_INFO_START}$@${TB_DISPLAY_INFO_END}"
}
tb_error() {
echo -e "${TB_DISPLAY_ERROR_START}$@${TB_DISPLAY_ERROR_END}"
}
tb_critical() {
tb_error "$@"
exit 1
}
tb_dump() {
if [ "$#" -eq 0 ]; then
tb_critical "Attempting to dump an empty array"
fi
local myArray=$1[@]
tb_info "Dump array '$1':\n${!myArray}"
}
tb_update_probe() {
echo "Loading probe: $1"
}
tb_update_server() {
echo "Loading server: $1"
}
tb_update_executor() {
if [ "$2" == "" ]; then
description="$1"
else
description="$2"
fi
tb_executors[$1]=$description
echo "Loading executor: $1"
}
tb_update_notifier() {
if [ "$2" == "" ]; then
description="$1"
else
description="$2"
fi
tb_notifiers[$1]=$description
echo "Loading notifier: $1"
}
ex() {
local mode="$1"
shift
for executor in ${!tb_executors[@]}
do
local ex_mode=${tb_executors[$executor]}
if [ $ex_mode == $mode ]; then
$executor "$@"
fi
done
}
alert() {
local mode="$1"
shift
for notifier in ${!tb_notifiers[@]}
do
local nt_mode=${tb_notifiers[$notifier]}
if [ $nt_mode == $mode ]; then
$notifier "$@"
fi
done
}
bash_version_str=$(bash --version)
if ! [[ "$bash_version_str" =~ .*version.4.* ]]; then
tb_critical "Please upgrade to Bash version >= 4\nIf you are on OS X: 'brew install bash'"
fi
for probe in ${TB_PROBES_PATH}/*.sh; do
. $probe
done
for server in ${TB_SERVERS_PATH}/*.sh; do
. $server
done
for executor in ${TB_EXECUTORS_PATH}/*.sh; do
. $executor
done
for notifier in ${TB_NOTIFIERS_PATH}/*.sh; do
. $notifier
done
|
Fusion/TightBeam
|
_fw/init.sh
|
Shell
|
apache-2.0
| 2,480 |
#!/bin/bash
DATE=""
if [ $1 = "today" ];then
DATE=`date +%Y-%m-%d`
elif [ $1 = "yesterday" ]; then
DATE=`date -d "yesterday" +%Y-%m-%d`
elif [ `echo $1|cut -c1-5` = "2014-" ]; then
DATE=`echo $1|cut -c1-10`
fi
if [ -f "member.sql" ]; then
rm member.sql
fi
if [ "$DATE" = "" ]; then
for i in $(seq $1 0 ); do
date=`date -d "$i days" +%Y-%m-%d`
echo "select concat('$date,',count(*)) from nova_user_vip_recharge where rechargedAt > '$date' and rechargedAt < '$date 99';" >> member.sql
done
else
date=$DATE
echo "select concat('$date,',count(*)) from nova_user_vip_recharge where rechargedAt > '$date' and rechargedAt < '$date 99';" >> member.sql
fi
|
liufeiit/shell
|
awk_shell/stats/member/gen.sh
|
Shell
|
apache-2.0
| 684 |
#!/bin/sh
if [ -z "$CI_IOS_TESTS" ]
then
echo no | android create avd --force -n test -t $ANDROID_TARGET --abi armeabi-v7a
emulator -avd test -no-skin -no-audio -no-window &
android-wait-for-emulator
adb shell input keyevent 82 &
./gradlew squidb-tests:installDebug squidb-tests:installDebugAndroidTest
else
wget https://github.com/google/j2objc/releases/download/$J2OBJC_VERSION/j2objc-$J2OBJC_VERSION.zip -O /tmp/j2objc-$J2OBJC_VERSION.zip
unzip -oq /tmp/j2objc-$J2OBJC_VERSION.zip -d /tmp
fi
|
jdkoren/squidb
|
scripts/before-test.sh
|
Shell
|
apache-2.0
| 523 |
#!/bin/bash
#
# This script updates all slaves to the latest snapshot of the
# WEC indexing package
#
# To update (and install) configuration scripts on the slaves
# run upload_slave_config.sh.
#
SCRIPT_DIR=`dirname $0`
source $SCRIPT_DIR/wec.config.sh
if [ "$1" == "" ]; then
echo -e "USAGE:"
echo -e "\tslaves.sh <command>\n"
echo -e "The following commands are available:\n"
echo -e "\tinit\t\tUpload basic configuration scripts to the slaves (you need to call update after this)"
echo -e "\tupdate\t\tUpdate the WEC distribution on the slaves"
echo -e "\tstart\t\tStart each slave as a WEC worker"
echo -e "\trun <command>\tExecute a command on each slave\n"
fi;
if [ "$1" == "update" ]; then
echo "Updating slaves"
for slave in $SLAVES
do
WECUSER=`echo $slave | sed -e "s/@.*//g"`
ssh $slave /home/$WECUSER/setup_slave.sh
done
fi;
if [ "$1" == "start" ]; then
echo "Starting slaves"
for slave in $SLAVES
do
WECUSER=`echo $slave | sed -e "s/@.*//g"`
START="/home/$WECUSER/wecindexer/bin/wec.sh worker"
echo "Running $START on $slave"
ssh -f $slave $START
done
fi;
if [ "$1" == "init" ]; then
echo "Initializing slaves"
echo "Generating setup_slave.sh"
cat $SCRIPT_DIR/wec.config.sh > $SCRIPT_DIR/setup_slave.sh
cat $SCRIPT_DIR/setup_slave.in >> $SCRIPT_DIR/setup_slave.sh
chmod a+x $SCRIPT_DIR/setup_slave.sh
for slave in $SLAVES
do
echo "Uploading setup_slave.sh to $slave"
scp -p $SCRIPT_DIR/setup_slave.sh $slave:~/
done
echo "Make sure you have a copy of the WEC distribution in public_html/$TARBALL before you run 'slaves.sh update'"
fi;
if [ "$1" == "run" ]; then
if [ "$2" == "" ]; then
echo -e "USAGE:\n\tslaves.sh run <command>"
echo -e "\n<command> must not be empty. Fx. 'killall java'."
elif [ "$2" != "" ]; then
echo -e "Running \"$2\" on all slaves"
for slave in $SLAVES
do
echo -e "Running \"$2\" on $slave"
ssh $slave $2
done
echo "Done"
fi;
fi;
|
statsbiblioteket/summa
|
Core/scripts/wec/slaves.sh
|
Shell
|
apache-2.0
| 2,098 |
#!/bin/bash
export GH_ORG=${GH_ORG:-davidehringer}
export GH_REPO=${GH_REPO:-cf-buildpack-management-plugin}
export DESCRIPTION=${DESCRIPTION:-"GA release"}
export PKG_DIR=${PKG_DIR:=out}
VERSION=1.0.0
if [[ "$(which github-release)X" == "X" ]]; then
echo "Please install github-release. Read https://github.com/aktau/github-release#readme"
exit 1
fi
echo "Creating tagged release v${VERSION} of $GH_ORG/$GH_REPO."
read -n1 -r -p "Ok to proceed? (Ctrl-C to cancel)..." key
github-release release \
--user $GH_ORG --repo $GH_REPO \
--tag v${VERSION} \
--name "v${VERSION}" \
--description "${DESCRIPTION}"
os_arches=( darwin_amd64 linux_386 linux_amd64 windows_386 windows_amd64 )
for os_arch in "${os_arches[@]}"; do
asset=$(ls ${PKG_DIR}/${GH_REPO}_${os_arch}* | head -n 1)
filename="${asset##*/}"
echo "Uploading $filename..."
github-release upload \
--user $GH_ORG --repo $GH_REPO \
--tag v${VERSION} \
--name $filename \
--file ${asset}
done
echo "Release complete: https://github.com/$GH_ORG/$GH_REPO/releases/tag/v$VERSION"
|
davidehringer/cf-buildpack-management-plugin
|
bin/release.sh
|
Shell
|
apache-2.0
| 1,082 |
#!/usr/bin/env sh
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o nounset
set -o errexit
# shellcheck disable=SC1091
. scripts/common.sh
# Doesn't follow symlinks, but it's likely expected for most users
SCRIPT_BASENAME="$(basename "${0}")"
GKE_CLUSTER_NAME_DESCRIPTION="GKE cluster name to deploy workloads to"
GKE_CLUSTER_REGION_DESCRIPTION="ID of the region of the GKE cluster"
GOOGLE_CLOUD_PROJECT_DESCRIPTION="ID of the Google Cloud Project where the cluster to deploy to resides"
usage() {
echo "${SCRIPT_BASENAME} - This script installs Istio in the target GKE cluster."
echo
echo "USAGE"
echo " ${SCRIPT_BASENAME} [options]"
echo
echo "OPTIONS"
echo " -h $(is_linux && echo "| --help"): ${HELP_DESCRIPTION}"
echo " -n $(is_linux && echo "| --cluster-name"): ${GKE_CLUSTER_NAME_DESCRIPTION}"
echo " -p $(is_linux && echo "| --google-cloud-project"): ${GOOGLE_CLOUD_PROJECT_DESCRIPTION}"
echo " -r $(is_linux && echo "| --cluster-region"): ${GKE_CLUSTER_REGION_DESCRIPTION}"
echo
echo "EXIT STATUS"
echo
echo " ${EXIT_OK} on correct execution."
echo " ${ERR_VARIABLE_NOT_DEFINED} when a parameter or a variable is not defined, or empty."
echo " ${ERR_MISSING_DEPENDENCY} when a required dependency is missing."
echo " ${ERR_ARGUMENT_EVAL_ERROR} when there was an error while evaluating the program options."
}
LONG_OPTIONS="cluster-name:,cluster-region:,google-cloud-project:,help"
SHORT_OPTIONS="ce:hn:p:r:s"
echo "Checking if the necessary dependencies are available..."
check_exec_dependency "envsubst"
check_exec_dependency "gcloud"
check_exec_dependency "getopt"
check_exec_dependency "kubectl"
check_exec_dependency "sleep"
# BSD getopt (bundled in MacOS) doesn't support long options, and has different parameters than GNU getopt
if is_linux; then
TEMP="$(getopt -o "${SHORT_OPTIONS}" --long "${LONG_OPTIONS}" -n "${SCRIPT_BASENAME}" -- "$@")"
elif is_macos; then
TEMP="$(getopt "${SHORT_OPTIONS} --" "$@")"
fi
RET_CODE=$?
if [ ! ${RET_CODE} ]; then
echo "Error while evaluating command options. Terminating..."
# Ignoring SC2086 because those are defined in common.sh, and don't need quotes
# shellcheck disable=SC2086
exit ${ERR_ARGUMENT_EVAL_ERROR}
fi
eval set -- "${TEMP}"
GOOGLE_CLOUD_PROJECT=
GKE_CLUSTER_NAME=
GKE_CLUSTER_REGION=
while true; do
case "${1}" in
-n | --cluster-name)
GKE_CLUSTER_NAME="${2}"
shift 2
;;
-r | --cluster-region)
GKE_CLUSTER_REGION="${2}"
shift 2
;;
-p | --google-cloud-project)
GOOGLE_CLOUD_PROJECT="${2}"
shift 2
;;
--)
shift
break
;;
-h | --help | *)
usage
# Ignoring because those are defined in common.sh, and don't need quotes
# shellcheck disable=SC2086
exit $EXIT_OK
break
;;
esac
done
echo "Checking if the necessary parameters are set..."
check_argument "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_NAME_DESCRIPTION}"
check_argument "${GKE_CLUSTER_REGION}" "${GKE_CLUSTER_REGION_DESCRIPTION}"
check_argument "${GOOGLE_CLOUD_PROJECT}" "${GOOGLE_CLOUD_PROJECT_DESCRIPTION}"
echo "Setting the default Google Cloud project to ${GOOGLE_CLOUD_PROJECT}..."
gcloud config set project "${GOOGLE_CLOUD_PROJECT}"
ISTIO_ARCHIVE_NAME=istio-"${ISTIO_VERSION}"-linux-amd64.tar.gz
if [ ! -e "${ISTIO_PATH}" ]; then
echo "Downloading Istio ${ISTIO_VERSION} to ${ISTIO_PATH}"
wget --content-disposition https://github.com/istio/istio/releases/download/"${ISTIO_VERSION}"/"${ISTIO_ARCHIVE_NAME}"
tar -xvzf "${ISTIO_ARCHIVE_NAME}"
rm "${ISTIO_ARCHIVE_NAME}"
fi
echo "Initializing the GKE cluster credentials for ${GKE_CLUSTER_NAME}..."
gcloud container clusters get-credentials "${GKE_CLUSTER_NAME}" \
--region="${GKE_CLUSTER_REGION}"
echo "Installing Istio..."
"${ISTIO_BIN_PATH}"/istioctl install \
--filename "${TUTORIAL_KUBERNETES_DESCRIPTORS_PATH}"/mesh-expansion/istio-operator.yaml \
--set profile=demo \
--skip-confirmation
echo
wait_for_load_balancer_ip "istio-eastwestgateway" "istio-system"
wait_for_load_balancer_ip "istio-ingressgateway" "istio-system"
echo "Configuring the ingress gateway..."
kubectl apply -f "${TUTORIAL_KUBERNETES_DESCRIPTORS_PATH}"/gateway.yaml
echo "Configuring the Prometheus add-on..."
kubectl apply -f "${ISTIO_SAMPLES_PATH}"/addons/prometheus.yaml
echo "Configuring the Grafana add-on..."
GRAFANA_DESCRIPTORS_PATH="${TUTORIAL_KUBERNETES_DESCRIPTORS_PATH}"/grafana
cp "${ISTIO_SAMPLES_PATH}"/addons/grafana.yaml "${GRAFANA_DESCRIPTORS_PATH}"/
kubectl apply -k "${GRAFANA_DESCRIPTORS_PATH}"
echo "Configuring the Kiali add-on..."
KIALI_DESCRIPTOR_PATH="${ISTIO_SAMPLES_PATH}"/addons/kiali.yaml
if ! kubectl apply -f "${KIALI_DESCRIPTOR_PATH}"; then
echo "There were errors installing Kiali. Retrying..."
sleep 5
kubectl apply -f "${KIALI_DESCRIPTOR_PATH}"
fi
kubectl apply -f "${TUTORIAL_KUBERNETES_DESCRIPTORS_PATH}"/kiali/virtual-service.yaml
|
GoogleCloudPlatform/solutions-istio-mesh-expansion-migration
|
scripts/install-istio.sh
|
Shell
|
apache-2.0
| 5,430 |
#!/bin/sh
# Compile-time weaving - The following executes the HelloWorld woven application. This means aspectj aspects were woven in at compile time and
# are part of the classes. aspectjrt.jar needs to be in the classpath.
java -Dorg.automon=sysout -classpath ../automon/target/automon-1.0.1-SNAPSHOT.jar:../helloworld_woven/target/helloworld_woven-1.0.1-SNAPSHOT.jar:libs/aspectjrt.jar com.stevesouza.helloworld.HelloWorld
# to run the program in a loop for 100 times (allows time to look at automon jmx in jconsole)
# java -Dorg.automon=sysout -classpath ../automon/target/automon-1.0.1-SNAPSHOT.jar:../helloworld_woven/target/helloworld_woven-1.0.1-SNAPSHOT.jar:libs/aspectjrt.jar com.stevesouza.helloworld.HelloWorld 100
|
appbakers/automon_example
|
examples/hello-world-woven.sh
|
Shell
|
apache-2.0
| 733 |
echo "$((123+20))"
VALORE=$[123+20]
echo "$[123*$VALORE]"
|
becloudready/devopstraining
|
bash/arithmatic/arith-00.sh
|
Shell
|
apache-2.0
| 59 |
echo Deploying i2 IAP 3.0.11 example deployment
function changeString {
if [[ $# -ne 3 ]]; then
echo "$FUNCNAME ERROR: Wrong number of arguments. Requires FILE FROMSTRING TOSTRING."
return 1
fi
SED_FILE=$1
FROMSTRING=$2
TOSTRING=$3
TMPFILE=$SED_FILE.tmp
#escape to and from strings
FROMSTRINGESC=$(echo $FROMSTRING | sed -e 's/\\/\\\\/g' -e 's/\//\\\//g' -e 's/&/\\\&/g')
TOSTRINGESC=$(echo $TOSTRING | sed -e 's/\\/\\\\/g' -e 's/\//\\\//g' -e 's/&/\\\&/g')
sed -e "s/$FROMSTRINGESC/$TOSTRINGESC/g" $SED_FILE > $TMPFILE && mv $TMPFILE $SED_FILE
if [ ! -f $TMPFILE ]; then
return 0
else
echo "$FUNCNAME ERROR: Something went wrong."
return 2
fi
}
SCRIPTDIR=$(pwd)
export IAPBINARYFILE=I2_INT_ANA_PLTF_V3.0.11_MP_ML.zip
export INSTALLDIR=/iap/install/
export IAPREPOSITORY=/iap/install/iap-repository
export IAPDIR=/iap/3011
export IAPDEPLOYMENTTOOLKITDIR=$IAPDIR/IAP-Deployment-Toolkit
#get and unzip IAP 3.0.11 from file server
wget -q http://$FILESERVER_IP/$IAPBINARYFILE
mkdir -p $INSTALLDIR
mv $IAPBINARYFILE $INSTALLDIR
cd $INSTALLDIR
unzip $IAPBINARYFILE
#install Installation Manager that's included in IAP
cd $INSTALLDIR/installation-manager
unzip -q agent.installer.linux.gtk.x86_64_1.8.0.20140902_1503.zip
./installc -acceptLicense
#make IAP install dirs and bindmount them to
#another location, because default Linux image has limited space
#and with bind mount there's no need add additional disks
mkdir -p /opt/IBM/iap
mkdir -p /iap/3011
mount --bind /iap/3011 /opt/IBM/iap
#generate response file
/opt/IBM/InstallationManager/eclipse/tools/imcl -repositories $IAPREPOSITORY generateResponseFile > response.xml
#change install dir
changeString response.xml /opt/IBM/iap $IAPDIR
#add HOMEDRIVE env variable, because otherwise IM response file install fails
export HOMEDRIVE=/root
/opt/IBM/InstallationManager/eclipse/tools/imcl -acceptLicense input response.xml
echo IAP deployment toolkit installed in $IAPDIR
#copy example configuration
cp -r $IAPDEPLOYMENTTOOLKITDIR/configuration-example $IAPDEPLOYMENTTOOLKITDIR/configuration
#generate IAP defaults
cd $IAPDEPLOYMENTTOOLKITDIR/scripts
./setup -t generateDefaults
#modify configuration/environment/http-server.properties
changeString $IAPDEPLOYMENTTOOLKITDIR/configuration/environment/http-server.properties http.server.home.dir=/opt/IBM/HTTPServer http.server.home.dir=/opt/IBM/WebSphere/HTTPServer
#nodify configuration/environment/iap/environment.properties
changeString $IAPDEPLOYMENTTOOLKITDIR/configuration/environment/iap/environment.properties db.database.location.dir.db2=/home/db2inst1 db.database.location.dir.db2=/db2inst1
#copy db2jcc4.jar to IAP deployment toolkit
cp /opt/ibm/db2/V10.5/java/db2jcc4.jar $IAPDEPLOYMENTTOOLKITDIR/configuration/environment/common/jdbc-drivers/
#set up credentials
CREDENTIALSFILE=$IAPDEPLOYMENTTOOLKITDIR/configuration/environment/credentials.properties
echo db.write1.user-name=$DBUSERNAME > $CREDENTIALSFILE
echo db.write1.password=$DBUSERPASSWORD >> $CREDENTIALSFILE
#set root in dbadmin group, otherwise IAP deployment fails
usermod -a -G db2iadm1 root
#deploy IAP
./setup -t deployExample
#start Liberty
./setup -t startLiberty
#restart HTTP Server
/opt/IBM/WebSphere/HTTPServer/bin/apachectl -k restart
echo i2 IAP example deployment done
|
samisalkosuo/pureapp
|
scriptpackages/i2iap3.0.11/setup.sh
|
Shell
|
apache-2.0
| 3,423 |
#!/bin/bash
readonly mongod="/c/Program Files/MongoDB/Server/3.0/bin/mongod.exe"
readonly dbpath='c/data/db'
readonly port='9090'
mkdir -p $dbpath > /dev/null
"$mongod" --dbpath "$dbpath" --port $port
|
blstream/AugumentedSzczecin_java
|
mongo/mongoStart.sh
|
Shell
|
apache-2.0
| 203 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export TERM=${TERM:-dumb}
export PAGER=cat
export BUILDROOT=$(pwd)
export DEST_DIR=${BUILDROOT}/built-geode
export GEODE_BUILD=${DEST_DIR}/test
export CALLSTACKS_DIR=${GEODE_BUILD}/callstacks
#SLEEP_TIME is in seconds
SLEEP_TIME=${1}
COUNT=3
STACK_INTERVAL=5
mkdir -p ${CALLSTACKS_DIR}
sleep ${SLEEP_TIME}
echo "Capturing call stacks"
for (( h=0; h<${COUNT}; h++)); do
today=`date +%Y-%m-%d-%H-%M-%S`
logfile=${CALLSTACKS_DIR}/callstacks-${today}.txt
if [ -n "${PARALLEL_DUNIT}" ]; then
mapfile -t containers < <(docker ps --format '{{.Names}}')
for (( i=0; i<${#containers[@]}; i++ )); do
echo "Container: ${containers[i]}" | tee -a ${logfile};
mapfile -t processes < <(docker exec ${containers[i]} jps | grep ChildVM | cut -d ' ' -f 1)
echo "Got past processes."
for ((j=0; j<${#processes[@]}; j++ )); do
echo "********* Dumping stack for process ${processes[j]}:" | tee -a ${logfile}
docker exec ${containers[i]} jstack -l ${processes[j]} >> ${logfile}
done
done
else
mapfile -t processes < <(jps | grep ChildVM | cut -d ' ' -f 1)
echo "Got past processes."
for ((j=0; j<${#processes[@]}; j++ )); do
echo "********* Dumping stack for process ${processes[j]}:" | tee -a ${logfile}
jstack -l ${processes[j]} >> ${logfile}
done
fi
sleep ${STACK_INTERVAL}
done
echo "Checking progress files:"
if [ -n "${PARALLEL_DUNIT}" ]; then
mapfile -t progressfiles < <(find ${GEODE_BUILD} -name "*-progress.txt")
for (( i=0; i<${#progressfiles[@]}; i++)); do
echo "Checking progress file: ${progressfiles[i]}"
/usr/local/bin/dunit-progress hang ${progressfiles[i]} | tee -a ${CALLSTACKS_DIR}/dunit-hangs.txt
done
fi
|
smanvi-pivotal/geode
|
ci/scripts/capture-call-stacks.sh
|
Shell
|
apache-2.0
| 2,663 |
#!/bin/bash
export DATA_ROOT=/app/data/
export KALDI_ROOT=/kaldi_uproot/kams/kaldi
|
UFAL-DSG/kams
|
kams/docker_env.sh
|
Shell
|
apache-2.0
| 83 |
#!/bin/bash
rm -fr *.box
vagrant destroy
vagrant up
vagrant halt
vagrant package --output centos7-dev.box
|
dknoern/vagrant-centos67-dev
|
build.sh
|
Shell
|
apache-2.0
| 108 |
pkg_name=libarchive
_distname=$pkg_name
pkg_origin=core
pkg_version=3.3.2
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_description="Multi-format archive and compression library"
pkg_upstream_url="https://www.libarchive.org"
pkg_license=('BSD')
pkg_source="http://www.libarchive.org/downloads/${_distname}-${pkg_version}.tar.gz"
pkg_shasum="ed2dbd6954792b2c054ccf8ec4b330a54b85904a80cef477a1c74643ddafa0ce"
pkg_dirname="${_distname}-${pkg_version}"
pkg_deps=(
core/glibc
lilian/openssl
lilian/zlib
lilian/bzip2
lilian/xz
)
pkg_build_deps=(
lilian/gcc
lilian/coreutils
lilian/make
)
pkg_include_dirs=(include)
pkg_lib_dirs=(lib)
pkg_pconfig_dirs=(lib/pkgconfig)
do_build() {
./configure \
--prefix="$pkg_prefix" \
--without-xml2 \
--without-lzo2
make -j "$(nproc)"
}
do_check() {
make check
}
|
be-plans/be
|
libarchive/plan.sh
|
Shell
|
apache-2.0
| 847 |
#!/bin/bash
#
# update version here and run script
PREV_VERSION=0.6.6
NEW_VERSION=0.6.7
PREV_VERSION_SHORT=$(echo $PREV_VERSION | sed s/\\.//g)
NEW_VERSION_SHORT=$(echo $NEW_VERSION | sed s/\\.//g)
function replaceInFile {
cat "$1" | sed "s/$2/$3/g" > tmp.txt
mv tmp.txt "$1"
}
echo "Updating basic files"
# update MANIFEST.MF
replaceInFile META-INF/MANIFEST.MF $PREV_VERSION.qualifier $NEW_VERSION.qualifier
# update pom.xml
replaceInFile pom.xml $PREV_VERSION.qualifier $NEW_VERSION.qualifier
# update feature/pom.xml
replaceInFile feature/pom.xml $PREV_VERSION.qualifier $NEW_VERSION.qualifier
# update feature/feature.xml
replaceInFile feature/feature.xml $PREV_VERSION.qualifier $NEW_VERSION.qualifier
# create new updatesite directory
echo "Creating new updatesite directory $NEW_VERSION"
cp -r updatesite/$PREV_VERSION updatesite/$NEW_VERSION
# update updatesite files
echo "Updating new updatesite files"
replaceInFile updatesite/$NEW_VERSION/pom.xml $PREV_VERSION.qualifier $NEW_VERSION.qualifier
replaceInFile updatesite/$NEW_VERSION/pom.xml org.aludratest.eclipse.vde.site.version$PREV_VERSION_SHORT org.aludratest.eclipse.vde.site.version$NEW_VERSION_SHORT
replaceInFile updatesite/$NEW_VERSION/pom.xml "AludraTest VDE $PREV_VERSION Update Site" "AludraTest VDE $NEW_VERSION Update Site"
replaceInFile updatesite/$NEW_VERSION/category.xml $PREV_VERSION $NEW_VERSION
replaceInFile updatesite/pom.xml $PREV_VERSION.qualifier $NEW_VERSION.qualifier
# insert new resource XML in updatesite/pom.xml
echo "Inserting new resource XML in updatesite/pom.xml"
grep -B 500 "insert new folders here" updatesite/pom.xml | sed -n '$!p' > tmp.txt
echo " <resource>" >> tmp.txt
echo " <targetPath>$NEW_VERSION</targetPath>" >> tmp.txt
echo " <directory>$NEW_VERSION/target/repository</directory>" >> tmp.txt
echo " <filtering>false</filtering>" >> tmp.txt
echo " </resource>" >> tmp.txt
echo " <!-- insert new folders here -->" >> tmp.txt
grep -A 500 "insert new folders here" updatesite/pom.xml | tail -n "+2" >> tmp.txt
mv tmp.txt updatesite/pom.xml
# update compositeArtifacts.xml and compositeContent.xml
echo "Extending composite Repository XML files"
grep -B 500 "<child location='$PREV_VERSION'/>" updatesite/compositeContent.xml > tmp.txt
echo " <child location='$NEW_VERSION'/>" >> tmp.txt
grep -A 500 "<child location='$PREV_VERSION'/>" updatesite/compositeContent.xml | tail -n "+2" >> tmp.txt
mv tmp.txt updatesite/compositeContent.xml
grep -B 500 "<child location='$PREV_VERSION'/>" updatesite/compositeArtifacts.xml > tmp.txt
echo " <child location='$NEW_VERSION'/>" >> tmp.txt
grep -A 500 "<child location='$PREV_VERSION'/>" updatesite/compositeArtifacts.xml | tail -n "+2" >> tmp.txt
mv tmp.txt updatesite/compositeArtifacts.xml
# update child counts in both files
PREV_CHILDCOUNT=$(egrep "children size='([0-9]+)'" updatesite/compositeContent.xml | sed s/[^0-9]//g)
NEW_CHILDCOUNT=$(expr $PREV_CHILDCOUNT + 1)
echo "Updating child count to $NEW_CHILDCOUNT in XML files"
replaceInFile updatesite/compositeContent.xml "children size='$PREV_CHILDCOUNT'" "children size='$NEW_CHILDCOUNT'"
replaceInFile updatesite/compositeArtifacts.xml "children size='$PREV_CHILDCOUNT'" "children size='$NEW_CHILDCOUNT'"
echo "Done."
|
AludraTest/aludratest-eclipse-plugin
|
newversion.sh
|
Shell
|
apache-2.0
| 3,315 |
#!/bin/bash
java $JAVA_OPTS -jar build/libs/irgat-poc.jar
|
irgat-org/poc
|
run.sh
|
Shell
|
apache-2.0
| 58 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.