code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#Folders
_ALGORITHMS="Algorithms"
_UTILS="utils"
_BASH="bash"
_INSTANCE_MAKER="InstanceMaker"
_CONFIGURATION="Configuration"
_TESTS_INSTANCES="TestsInstances"
_RESULTS="Results"
_GRAPHS="Graphs"
_ALGORITHM_BIN="bin"
_F_BITMATRIX_COMPARATOR="Bitmatrix_Comparator"
#Text Files
_DDM_PARALLEL="DDM_Parallel"
_DDM_SEQUENTIAL="DDM_Sequential"
#Executable Files
_DDM_INSTANCE_MAKER="DDMInstanceMaker"
_BITMATRIX_COMPARATOR="BitmatrixComparator"
#Shell Files
_CONFIGURATION_SHELL="configure.sh"
_CREATE_INSTANCES_DEFAULT="create_instances.sh"
#Valgrind options
_VALGRIND_OUT_FILE="massif.txt"
_VALGRIND_FINAL_FILE="memoryusage.mem"
#Result Bitmatrix
_BITMATRIX_NAME="result_mat.bin"
|
nicholasricci/DDM_Framework
|
utils/bash/definitions.sh
|
Shell
|
gpl-3.0
| 693 |
#!/usr/bin/env bash
grep ' 1 ' $(pwd)/out/*.out > $(pwd)/tmp/data1.txt
grep ' 2 ' $(pwd)/out/*.out > $(pwd)/tmp/data2.txt
grep ' 3 ' $(pwd)/out/*.out > $(pwd)/tmp/data3.txt
grep ' 4 ' $(pwd)/out/*.out > $(pwd)/tmp/data4.txt
grep ' 5 ' $(pwd)/out/*.out > $(pwd)/tmp/data5.txt
|
dywanik/APM
|
apm_search.sh
|
Shell
|
gpl-3.0
| 299 |
#!/bin/sh
ASSET_DIR=$(dirname $0)
EXT_VERSION="4.1.1a"
EXT_ARCHIVE="ext-$EXT_VERSION-gpl.zip"
EXT_DIR="ext-$EXT_VERSION"
ZF_VERSION="1.12.0-minimal"
ZF_ARCHIVE="ZendFramework-$ZF_VERSION.tar.gz"
ZF_DIR="ZendFramework-$ZF_VERSION"
PCHART_VERSION="2.1.3"
PCHART_ARCHIVE="pChart$PCHART_VERSION.tar"
PCHART_DIR="pChart$PCHART_VERSION"
OLE_VERSION="1.0.0RC2"
OLE_ARCHIVE="OLE-$OLE_VERSION.tgz"
OLE_DIR="OLE-$OLE_VERSION"
EXCEL_WRITER_VERSION="0.9.3"
EXCEL_WRITER_ARCHIVE="Spreadsheet_Excel_Writer-$EXCEL_WRITER_VERSION.tgz"
EXCEL_WRITER_DIR="Spreadsheet_Excel_Writer-$EXCEL_WRITER_VERSION"
cd $ASSET_DIR
unzip $EXT_ARCHIVE
cp -rf $EXT_DIR/ext-all.js ../html/js
rm -rf ../html/resources
cp -rf $EXT_DIR/resources ../html
rm -rf $EXT_DIR
find ../html/resources -type f | xargs chmod -x
tar zxvf $ZF_ARCHIVE
rm -rf ../lib/Zend
cp -rf $ZF_DIR/library/Zend ../lib
rm -rf $ZF_DIR
find ../lib/Zend -type f | xargs chmod -x
tar xvf $PCHART_ARCHIVE
rm -rf ../lib/pChart
mkdir ../lib/pChart
cp -rf $PCHART_DIR/class ../lib/pChart
cp -rf $PCHART_DIR/fonts ../lib/pChart
rm -rf $PCHART_DIR
find ../lib/pChart -type f | xargs chmod -x
tar xvf $OLE_ARCHIVE
rm -rf ../lib/OLE*
cp -rf $OLE_DIR/OLE* ../lib
rm -rf $OLE_DIR package.xml
chmod -x ../lib/OLE.php
find ../lib/OLE -type f | xargs chmod -x
tar xvf $EXCEL_WRITER_ARCHIVE
rm -rf ../lib/Spreadsheet
cp -rf $EXCEL_WRITER_DIR/Spreadsheet ../lib
rm -rf $EXCEL_WRITER_DIR package.xml
exit 0
|
ubccr/ubmod
|
portal/assets/setup.sh
|
Shell
|
gpl-3.0
| 1,434 |
#Crypt - Entropy
#Resource: https://random-notes-of-a-sysadmin.blogspot.com/2016/04/is-raspberry-pi-suitable-and-safe-to.html?m=1
#Resource: https://blog.cloudflare.com/ensuring-randomness-with-linuxs-random-number-generator/
#Resource: https://www.2uo.de/myths-about-urandom/
#Resource: https://security.stackexchange.com/questions/47598/why-openssl-cant-use-dev-random-directly
#Resource: https://lwn.net/Articles/261804/
#Resource: https://security.stackexchange.com/questions/69423/add-a-file-as-entropy-source-for-dev-random
#Resource: http://vk5tu.livejournal.com/43059.html
#Assumption
#Since /dev/urandom is a Hash chain seeded from /dev/random, then you could actually predict the next numbers, if you knew the seed. If you have enough control over the entropy pool, then from the output of /dev/urandom you might be able to guess this seed, which would enable you to predict all the next numbers from /dev/urandom, but only if you keep /dev/random exhausted, otherwise /dev/urandom will be reseeded.
####################################
#Setup
sudo apt-get update
sudo apt-get install -y netpbm
sudo apt-get install -y pv
sudo apt-get install -y rng-tools
#Check for hardware entropy service
#ps -e | grep rn #Look for rngd
#We just needs the tools not the service itself
sudo /etc/init.d/rng-tools stop
sudo update-rc.d rng-tools remove
sudo date -s '@99999'
sudo reboot
####################################
####################################
#Run Entropy Monitors
#Check the amount of entropy in the pool
while sleep 3; do cat /proc/sys/kernel/random/entropy_avail; done
#Start FIPS Test. Note .01% of failures is normal
sudo cat /dev/urandom | rngtest -t 2
####################################
####################################
#Try to empty the entropy pool using /dev/random. Do this in multiple instances of "screen"
sudo nice -n -20 dd if=/dev/random | pv | dd of=/dev/null
####################################
####################################
#Reduce Entropy Source by setting the date to a time close to epoch
sudo systemctl disable systemd-timesyncd.service
date
#date -d ‘@2270’
sudo date -s '@99999'
date
####################################
####################################
#Reduce Entropy by seeding low entropy data into the entropy pool using urandom itself.
#This should work if the pool remains low and /dev/random remains exhausted
#rngd --foreground --rng-device=/dev/zero --rng-quality=low --random-step=2500
sudo rngd --foreground --rng-device=/dev/urandom --rng-quality=low --random-step=2500
####################################
####################################
#Do FIPs test
sudo cat /dev/urandom | rngtest -c 1000
####################################
|
danman10000/ics355_demos
|
Entropy.sh
|
Shell
|
gpl-3.0
| 2,734 |
#!/bin/sh
nweb 80 /srv/www
cat << CEOF
[1m 'nweb' has been started on port 80, serving '/srv/www'.[0m
CEOF
|
ivandavidov/minimal
|
src/minimal_overlay/bundles/nweb/90_nweb.sh
|
Shell
|
gpl-3.0
| 112 |
#! /bin/sh
for NME in data*
do
7z a -t7z -mx9 $NME.7z $NME
done
|
sialan-labs/Meikade
|
tools/zipper.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
set -eux
# shellcheck source=tests/lib/dirs.sh
. "$TESTSLIB/dirs.sh"
# shellcheck source=tests/lib/snaps.sh
. "$TESTSLIB/snaps.sh"
# shellcheck source=tests/lib/pkgdb.sh
. "$TESTSLIB/pkgdb.sh"
disable_kernel_rate_limiting() {
# kernel rate limiting hinders debugging security policy so turn it off
echo "Turning off kernel rate-limiting"
# TODO: we should be able to run the tests with rate limiting disabled so
# debug output is robust, but we currently can't :(
echo "SKIPPED: see https://forum.snapcraft.io/t/snapd-spread-tests-should-be-able-to-run-with-kernel-rate-limiting-disabled/424"
#sysctl -w kernel.printk_ratelimit=0
}
update_core_snap_for_classic_reexec() {
# it is possible to disable this to test that snapd (the deb) works
# fine with whatever is in the core snap
if [ "$MODIFY_CORE_SNAP_FOR_REEXEC" != "1" ]; then
echo "Not modifying the core snap as requested via MODIFY_CORE_SNAP_FOR_REEXEC"
return
fi
# We want to use the in-tree snap/snapd/snap-exec/snapctl, because
# we re-exec by default.
# To accomplish that, we'll just unpack the core we just grabbed,
# shove the new snap-exec and snapctl in there, and repack it.
# First of all, unmount the core
core="$(readlink -f "$SNAPMOUNTDIR/core/current" || readlink -f "$SNAPMOUNTDIR/ubuntu-core/current")"
snap="$(mount | grep " $core" | awk '{print $1}')"
umount --verbose "$core"
# Now unpack the core, inject the new snap-exec/snapctl into it
unsquashfs "$snap"
# clean the old snapd libexec binaries, just in case
rm squashfs-root/usr/lib/snapd/*
# and copy in the current ones
cp -a "$LIBEXECDIR"/snapd/* squashfs-root/usr/lib/snapd/
# also the binaries themselves
cp -a /usr/bin/{snap,snapctl} squashfs-root/usr/bin/
case "$SPREAD_SYSTEM" in
ubuntu-*|debian-*)
# and snap-confine's apparmor
if [ -e /etc/apparmor.d/usr.lib.snapd.snap-confine.real ]; then
cp -a /etc/apparmor.d/usr.lib.snapd.snap-confine.real squashfs-root/etc/apparmor.d/usr.lib.snapd.snap-confine.real
else
cp -a /etc/apparmor.d/usr.lib.snapd.snap-confine squashfs-root/etc/apparmor.d/usr.lib.snapd.snap-confine.real
fi
;;
esac
# repack, cheating to speed things up (4sec vs 1.5min)
mv "$snap" "${snap}.orig"
mksnap_fast "squashfs-root" "$snap"
rm -rf squashfs-root
# Now mount the new core snap, first discarding the old mount namespace
$LIBEXECDIR/snapd/snap-discard-ns core
mount "$snap" "$core"
check_file() {
if ! cmp "$1" "$2" ; then
echo "$1 in tree and $2 in core snap are unexpectedly not the same"
exit 1
fi
}
# Make sure we're running with the correct copied bits
for p in "$LIBEXECDIR/snapd/snap-exec" "$LIBEXECDIR/snapd/snap-confine" "$LIBEXECDIR/snapd/snap-discard-ns" "$LIBEXECDIR/snapd/snapd"; do
check_file "$p" "$core/usr/lib/snapd/$(basename "$p")"
done
for p in /usr/bin/snapctl /usr/bin/snap; do
check_file "$p" "$core$p"
done
}
prepare_each_classic() {
mkdir -p /etc/systemd/system/snapd.service.d
if [ -z "${SNAP_REEXEC:-}" ]; then
rm -f /etc/systemd/system/snapd.service.d/reexec.conf
else
cat <<EOF > /etc/systemd/system/snapd.service.d/reexec.conf
[Service]
Environment=SNAP_REEXEC=$SNAP_REEXEC
EOF
fi
if [ ! -f /etc/systemd/system/snapd.service.d/local.conf ]; then
echo "/etc/systemd/system/snapd.service.d/local.conf vanished!"
exit 1
fi
}
prepare_classic() {
distro_install_build_snapd
if snap --version |MATCH unknown; then
echo "Package build incorrect, 'snap --version' mentions 'unknown'"
snap --version
distro_query_package_info snapd
exit 1
fi
if "$LIBEXECDIR/snapd/snap-confine" --version | MATCH unknown; then
echo "Package build incorrect, 'snap-confine --version' mentions 'unknown'"
$LIBEXECDIR/snapd/snap-confine --version
case "$SPREAD_SYSTEM" in
ubuntu-*|debian-*)
apt-cache policy snapd
;;
fedora-*)
dnf info snapd
;;
esac
exit 1
fi
START_LIMIT_INTERVAL="StartLimitInterval=0"
if [[ "$SPREAD_SYSTEM" = opensuse-42.2-* ]]; then
# StartLimitInterval is not supported by the systemd version
# openSUSE 42.2 ships.
START_LIMIT_INTERVAL=""
fi
mkdir -p /etc/systemd/system/snapd.service.d
cat <<EOF > /etc/systemd/system/snapd.service.d/local.conf
[Unit]
$START_LIMIT_INTERVAL
[Service]
Environment=SNAPD_DEBUG_HTTP=7 SNAPD_DEBUG=1 SNAPPY_TESTING=1 SNAPD_CONFIGURE_HOOK_TIMEOUT=30s
EOF
mkdir -p /etc/systemd/system/snapd.socket.d
cat <<EOF > /etc/systemd/system/snapd.socket.d/local.conf
[Unit]
$START_LIMIT_INTERVAL
EOF
# We change the service configuration so reload and restart
# the snapd socket unit to get them applied
systemctl daemon-reload
systemctl restart snapd.socket
if [ "$REMOTE_STORE" = staging ]; then
# shellcheck source=tests/lib/store.sh
. "$TESTSLIB/store.sh"
setup_staging_store
fi
# Snapshot the state including core.
if [ ! -f "$SPREAD_PATH/snapd-state.tar.gz" ]; then
! snap list | grep core || exit 1
# use parameterized core channel (defaults to edge) instead
# of a fixed one and close to stable in order to detect defects
# earlier
snap install --"$CORE_CHANNEL" core
snap list | grep core
systemctl stop snapd.{service,socket}
update_core_snap_for_classic_reexec
systemctl start snapd.{service,socket}
# ensure no auto-refresh happens during the tests
if [ -e /snap/core/current/meta/hooks/configure ]; then
snap set core refresh.schedule="$(date +%a --date=2days)@12:00-14:00"
snap set core refresh.disabled=true
fi
GRUB_EDITENV=grub-editenv
case "$SPREAD_SYSTEM" in
fedora-*|opensuse-*)
GRUB_EDITENV=grub2-editenv
;;
esac
echo "Ensure that the grub-editenv list output does not contain any of the snap_* variables on classic"
output=$($GRUB_EDITENV list)
if echo $output | MATCH snap_ ; then
echo "Expected grub environment without snap_*, got:"
echo "$output"
exit 1
fi
systemctl stop snapd.{service,socket}
systemctl daemon-reload
escaped_snap_mount_dir="$(systemd-escape --path "$SNAPMOUNTDIR")"
mounts="$(systemctl list-unit-files --full | grep "^$escaped_snap_mount_dir[-.].*\.mount" | cut -f1 -d ' ')"
services="$(systemctl list-unit-files --full | grep "^$escaped_snap_mount_dir[-.].*\.service" | cut -f1 -d ' ')"
for unit in $services $mounts; do
systemctl stop "$unit"
done
snapd_env="/etc/environment /etc/systemd/system/snapd.service.d /etc/systemd/system/snapd.socket.d"
tar czf "$SPREAD_PATH"/snapd-state.tar.gz /var/lib/snapd "$SNAPMOUNTDIR" /etc/systemd/system/"$escaped_snap_mount_dir"-*core*.mount $snapd_env
systemctl daemon-reload # Workaround for http://paste.ubuntu.com/17735820/
core="$(readlink -f "$SNAPMOUNTDIR/core/current")"
# on 14.04 it is possible that the core snap is still mounted at this point, unmount
# to prevent errors starting the mount unit
if [[ "$SPREAD_SYSTEM" = ubuntu-14.04-* ]] && mount | grep -q "$core"; then
umount "$core" || true
fi
for unit in $mounts $services; do
systemctl start "$unit"
done
systemctl start snapd.socket
fi
if [[ "$SPREAD_SYSTEM" == debian-* || "$SPREAD_SYSTEM" == ubuntu-* ]]; then
if [[ "$SPREAD_SYSTEM" == ubuntu-* ]]; then
quiet apt install -y -q pollinate
pollinate
fi
# Improve entropy for the whole system quite a lot to get fast
# key generation during our test cycles
apt-get install -y -q rng-tools
echo "HRNGDEVICE=/dev/urandom" > /etc/default/rng-tools
/etc/init.d/rng-tools restart
mkdir -p /etc/systemd/system/rng-tools.service.d/
cat <<EOF > /etc/systemd/system/rng-tools.service.d/local.conf
[Service]
Restart=always
EOF
systemctl daemon-reload
fi
disable_kernel_rate_limiting
}
setup_reflash_magic() {
# install the stuff we need
distro_install_package kpartx busybox-static
distro_install_local_package "$GOHOME"/snapd_*.deb
distro_clean_package_cache
snap install "--${CORE_CHANNEL}" core
# install ubuntu-image
snap install --classic --edge ubuntu-image
# needs to be under /home because ubuntu-device-flash
# uses snap-confine and that will hide parts of the hostfs
IMAGE_HOME=/home/image
mkdir -p "$IMAGE_HOME"
# modify the core snap so that the current root-pw works there
# for spread to do the first login
UNPACKD="/tmp/core-snap"
unsquashfs -d "$UNPACKD" /var/lib/snapd/snaps/core_*.snap
# FIXME: netplan workaround
mkdir -p "$UNPACKD/etc/netplan"
# set root pw by concating root line from host and rest from core
want_pw="$(grep ^root /etc/shadow)"
echo "$want_pw" > /tmp/new-shadow
tail -n +2 /etc/shadow >> /tmp/new-shadow
cp -v /tmp/new-shadow "$UNPACKD/etc/shadow"
cp -v /etc/passwd "$UNPACKD/etc/passwd"
# ensure spread -reuse works in the core image as well
if [ -e /.spread.yaml ]; then
cp -av /.spread.yaml "$UNPACKD"
fi
# we need the test user in the image
# see the comment in spread.yaml about 12345
sed -i 's/^test.*$//' "$UNPACKD"/etc/{shadow,passwd}
chroot "$UNPACKD" addgroup --quiet --gid 12345 test
chroot "$UNPACKD" adduser --quiet --no-create-home --uid 12345 --gid 12345 --disabled-password --gecos '' test
echo 'test ALL=(ALL) NOPASSWD:ALL' >> "$UNPACKD/etc/sudoers.d/99-test-user"
echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' >> "$UNPACKD/etc/sudoers.d/99-ubuntu-user"
# modify sshd so that we can connect as root
sed -i 's/\(PermitRootLogin\|PasswordAuthentication\)\>.*/\1 yes/' "$UNPACKD/etc/ssh/sshd_config"
# FIXME: install would be better but we don't have dpkg on
# the image
# unpack our freshly build snapd into the new core snap
dpkg-deb -x "$SPREAD_PATH"/../snapd_*.deb "$UNPACKD"
# ensure any new timer units are available
cp -a /etc/systemd/system/timers.target.wants/*.timer "$UNPACKD/etc/systemd/system/timers.target.wants"
# add gpio and iio slots
cat >> "$UNPACKD/meta/snap.yaml" <<-EOF
slots:
gpio-pin:
interface: gpio
number: 100
direction: out
iio0:
interface: iio
path: /dev/iio:device0
EOF
# build new core snap for the image
snapbuild "$UNPACKD" "$IMAGE_HOME"
# FIXME: fetch directly once its in the assertion service
cp "$TESTSLIB/assertions/pc-${REMOTE_STORE}.model" "$IMAGE_HOME/pc.model"
# FIXME: how to test store updated of ubuntu-core with sideloaded snap?
IMAGE=all-snap-amd64.img
# ensure that ubuntu-image is using our test-build of snapd with the
# test keys and not the bundled version of usr/bin/snap from the snap.
# Note that we can not put it into /usr/bin as '/usr' is different
# when the snap uses confinement.
cp /usr/bin/snap "$IMAGE_HOME"
export UBUNTU_IMAGE_SNAP_CMD="$IMAGE_HOME/snap"
EXTRA_FUNDAMENTAL=
IMAGE_CHANNEL=edge
if [ "$KERNEL_CHANNEL" = "$GADGET_CHANNEL" ]; then
IMAGE_CHANNEL="$KERNEL_CHANNEL"
else
# download pc-kernel snap for the specified channel and set ubuntu-image channel
# to gadget, so that we don't need to download it
snap download --channel="$KERNEL_CHANNEL" pc-kernel
EXTRA_FUNDAMENTAL="--extra-snaps $PWD/pc-kernel_*.snap"
IMAGE_CHANNEL="$GADGET_CHANNEL"
fi
/snap/bin/ubuntu-image -w "$IMAGE_HOME" "$IMAGE_HOME/pc.model" \
--channel "$IMAGE_CHANNEL" \
"$EXTRA_FUNDAMENTAL" \
--extra-snaps "$IMAGE_HOME"/core_*.snap \
--output "$IMAGE_HOME/$IMAGE"
rm -f ./pc-kernel_*.{snap,assert} ./pc_*.{snap,assert}
# mount fresh image and add all our SPREAD_PROJECT data
kpartx -avs "$IMAGE_HOME/$IMAGE"
# FIXME: hardcoded mapper location, parse from kpartx
mount /dev/mapper/loop2p3 /mnt
mkdir -p /mnt/user-data/
cp -ar /home/gopath /mnt/user-data/
# create test user home dir
mkdir -p /mnt/user-data/test
# using symbolic names requires test:test have the same ids
# inside and outside which is a pain (see 12345 above), but
# using the ids directly is the wrong kind of fragile
chown --verbose test:test /mnt/user-data/test
# we do what sync-dirs is normally doing on boot, but because
# we have subdirs/files in /etc/systemd/system (created below)
# the writeable-path sync-boot won't work
mkdir -p /mnt/system-data/etc/systemd
(cd /tmp ; unsquashfs -v "$IMAGE_HOME"/core_*.snap etc/systemd/system)
cp -avr /tmp/squashfs-root/etc/systemd/system /mnt/system-data/etc/systemd/
# FIXUP silly systemd
mkdir -p /mnt/system-data/etc/systemd/system/snapd.service.d
cat <<EOF > /mnt/system-data/etc/systemd/system/snapd.service.d/local.conf
[Unit]
StartLimitInterval=0
[Service]
Environment=SNAPD_DEBUG_HTTP=7 SNAPD_DEBUG=1 SNAPPY_TESTING=1 SNAPPY_USE_STAGING_STORE=$SNAPPY_USE_STAGING_STORE
ExecStartPre=/bin/touch /dev/iio:device0
EOF
mkdir -p /mnt/system-data/etc/systemd/system/snapd.socket.d
cat <<EOF > /mnt/system-data/etc/systemd/system/snapd.socket.d/local.conf
[Unit]
StartLimitInterval=0
EOF
umount /mnt
kpartx -d "$IMAGE_HOME/$IMAGE"
# the reflash magic
# FIXME: ideally in initrd, but this is good enough for now
cat > "$IMAGE_HOME/reflash.sh" << EOF
#!/bin/sh -ex
mount -t tmpfs none /tmp
cp /bin/busybox /tmp
cp $IMAGE_HOME/$IMAGE /tmp
sync
# blow away everything
/tmp/busybox dd if=/tmp/$IMAGE of=/dev/sda bs=4M
# and reboot
/tmp/busybox sync
/tmp/busybox echo b > /proc/sysrq-trigger
EOF
chmod +x "$IMAGE_HOME/reflash.sh"
# extract ROOT from /proc/cmdline
ROOT=$(sed -e 's/^.*root=//' -e 's/ .*$//' /proc/cmdline)
cat >/boot/grub/grub.cfg <<EOF
set default=0
set timeout=2
menuentry 'flash-all-snaps' {
linux /vmlinuz root=$ROOT ro init=$IMAGE_HOME/reflash.sh console=ttyS0
initrd /initrd.img
}
EOF
}
prepare_all_snap() {
# we are still a "classic" image, prepare the surgery
if [ -e /var/lib/dpkg/status ]; then
setup_reflash_magic
REBOOT
fi
# verify after the first reboot that we are now in the all-snap world
if [ "$SPREAD_REBOOT" = 1 ]; then
echo "Ensure we are now in an all-snap world"
if [ -e /var/lib/dpkg/status ]; then
echo "Rebooting into all-snap system did not work"
exit 1
fi
fi
echo "Wait for firstboot change to be ready"
while ! snap changes | grep "Done"; do
snap changes || true
snap change 1 || true
sleep 1
done
echo "Ensure fundamental snaps are still present"
# shellcheck source=tests/lib/names.sh
. "$TESTSLIB/names.sh"
for name in "$gadget_name" "$kernel_name" core; do
if ! snap list "$name"; then
echo "Not all fundamental snaps are available, all-snap image not valid"
echo "Currently installed snaps"
snap list
exit 1
fi
done
# ensure no auto-refresh happens during the tests
if [ -e /snap/core/current/meta/hooks/configure ]; then
snap set core refresh.schedule="$(date +%a --date=2days)@12:00-14:00"
snap set core refresh.disabled=true
fi
# Snapshot the fresh state (including boot/bootenv)
if [ ! -f "$SPREAD_PATH/snapd-state.tar.gz" ]; then
# we need to ensure that we also restore the boot environment
# fully for tests that break it
BOOT=""
if ls /boot/uboot/*; then
BOOT=/boot/uboot/
elif ls /boot/grub/*; then
BOOT=/boot/grub/
else
echo "Cannot determine bootdir in /boot:"
ls /boot
exit 1
fi
systemctl stop snapd.service snapd.socket
tar czf "$SPREAD_PATH/snapd-state.tar.gz" /var/lib/snapd $BOOT
systemctl start snapd.socket
fi
disable_kernel_rate_limiting
}
|
morphis/snapd
|
tests/lib/prepare.sh
|
Shell
|
gpl-3.0
| 17,131 |
#!/bin/sh
##############################################################
# Do not edit. Managed by Puppet. Changes will be wiped out. #
##############################################################
# /etc/cron.daily/puppet-gitpull.sh
#
# Pull updates on daily basis from my GitHub dl380g7 repository
# (the remote repository is defined when git is configured)
##############################################################
# Only do anything if git is actually installed
if [ ! -x /usr/bin/git ]; then
exit 0
fi
# Only do anything if puppet server is actually installed
if [ -d "/etc/puppet" ]; then
cd /etc/puppet
/usr/bin/git pull
fi
|
berrak/dl380g7
|
modules/hp_puppetize/files/puppet-gitpull.sh
|
Shell
|
gpl-3.0
| 646 |
#!/bin/bash
# A naive Bash script for building the executable: mki3dgame
# It is assumed that you are using bash shell and you have:
# - Go compiler installed, and
# - $GOPATH set properly, and
# - the Go packagaes 'github.com/go-gl/{gl,glfw,mathgl}' and 'golang.org/x' installed
while getopts ":u" opt; do
echo 'Updating Go packages ...';
go get -u -v # update Go packages;
done;
echo 'Compiling mki3dgame ...'
go build # build locally the executable 'mki3dgame'
|
mki1967/mki3dgame
|
make-mki3game.bash
|
Shell
|
gpl-3.0
| 478 |
autoreconf -if || exit $?
test -n $NOCONFIGURE && ./configure $@
|
ptomato/gt
|
autogen.sh
|
Shell
|
gpl-3.0
| 65 |
#!/usr/bin/bash
# By Fahad Alduraibi 2015
# http://www.fadvisor.net
# Get you API key from Google:
# https://developers.google.com/url-shortener/v1/getting_started#APIKey
API_key='PUT YOUR KEY HERE'
GoogleURL='https://www.googleapis.com/urlshortener/v1/url?key='$API_key
longURL=$1
# -f to supress fail messages, -s for silent to supress progress reportingq, remove
# them for debugging.
curl -f -s $GoogleURL -H 'Content-Type: application/json' -d "{'longUrl': '$longURL'}" | awk '/id/ { printf $2}' | awk -F\" '{ printf $2 }'
# If you want the output to be followed by a new line replace the second 'printf' with 'print' in the awk statment
|
fduraibi/Google-URL-Shortener
|
gshort.sh
|
Shell
|
gpl-3.0
| 647 |
#!/bin/sh
# Ensure that cat -E produces same output as cat, module '$'s,
# even when applied to a file in /proc.
# Copyright (C) 2006-2019 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cat
f=/proc/cpuinfo
test -f $f \
|| skip_ "no $f"
# Yes, parts of /proc/cpuinfo might change between cat runs.
# If that happens, consider choosing a file that's less likely to change,
# or just filter out the changing lines. The sed filter should help
# to avoid any spurious numeric differences.
cat -E $f | sed 's/[0-9][0-9]*/D/g' | tr -d '$' > out || fail=1
cat $f | sed 's/[0-9][0-9]*/D/g' | tr -d '$' > exp || fail=1
compare exp out || fail=1
Exit $fail
|
komh/coreutils-os2
|
tests/misc/cat-proc.sh
|
Shell
|
gpl-3.0
| 1,340 |
#!/bin/sh
# Enable the 'mdev' hotplug manager.
echo /sbin/mdev > /proc/sys/kernel/hotplug
# Initial execution of the 'mdev' hotpug manager.
/sbin/mdev -s
cat << CEOF
[1m The 'mdev' hotplug manager is active.[0m
CEOF
|
ivandavidov/minimal
|
src/minimal_overlay/bundles/kernel_modules/10_modules.sh
|
Shell
|
gpl-3.0
| 222 |
#!/bin/bash -v
OPTIONS="-O3 -march=native"
CLANG_OPTIONS="-O3 -march=native"
g++ $OPTIONS card-raytracer.cpp -o card-raytracer-cpp.exe
g++ $OPTIONS card-raytracer-rwolf.cpp -o card-raytracer-rwolf-cpp.exe
g++ $OPTIONS card-raytracer-opt.cpp -o card-raytracer-opt-cpp.exe
clang++ $CLANG_OPTIONS card-raytracer.cpp -o card-raytracer-cpp.clang.exe
clang++ $CLANG_OPTIONS card-raytracer-rwolf.cpp -o card-raytracer-rwolf-cpp.clang.exe
clang++ $CLANG_OPTIONS card-raytracer-opt.cpp -o card-raytracer-opt-cpp.clang.exe
|
Mark-Kovalyov/CardRaytracerBenchmark
|
cpp/make.sh
|
Shell
|
gpl-3.0
| 534 |
#!/bin/bash
# Let's make the user give us a target to work with.
# architecture is assumed universal if not specified, and is optional.
# if arch is defined, it we will store the .app bundle in the target arch build directory
if [ $# == 0 ] || [ $# -gt 2 ]; then
echo "Usage: $0 target <arch>"
echo "Example: $0 release x86"
echo "Valid targets are:"
echo " release"
echo " debug"
echo
echo "Optional architectures are:"
echo " x86"
echo " x86_64"
echo " ppc"
echo " arm64"
echo
exit 1
fi
# validate target name
if [ "$1" == "release" ]; then
TARGET_NAME="release"
elif [ "$1" == "debug" ]; then
TARGET_NAME="debug"
else
echo "Invalid target: $1"
echo "Valid targets are:"
echo " release"
echo " debug"
exit 1
fi
CURRENT_ARCH=""
# validate the architecture if it was specified
if [ "$2" != "" ]; then
if [ "$2" == "x86" ]; then
CURRENT_ARCH="x86"
elif [ "$2" == "x86_64" ]; then
CURRENT_ARCH="x86_64"
elif [ "$2" == "ppc" ]; then
CURRENT_ARCH="ppc"
elif [ "$2" == "arm64" ]; then
CURRENT_ARCH="arm64"
else
echo "Invalid architecture: $2"
echo "Valid architectures are:"
echo " x86"
echo " x86_64"
echo " ppc"
echo " arm64"
echo
exit 1
fi
fi
# symlinkArch() creates a symlink with the architecture suffix.
# meant for universal binaries, but also handles the way this script generates
# application bundles for a single architecture as well.
function symlinkArch()
{
EXT="dylib"
SEP="${3}"
SRCFILE="${1}"
DSTFILE="${2}${SEP}"
DSTPATH="${4}"
if [ ! -e "${DSTPATH}/${SRCFILE}.${EXT}" ]; then
echo "**** ERROR: missing ${SRCFILE}.${EXT} from ${MACOS}"
exit 1
fi
if [ ! -d "${DSTPATH}" ]; then
echo "**** ERROR: path not found ${DSTPATH}"
exit 1
fi
pushd "${DSTPATH}" > /dev/null
IS32=`file "${SRCFILE}.${EXT}" | grep "i386"`
IS64=`file "${SRCFILE}.${EXT}" | grep "x86_64"`
ISPPC=`file "${SRCFILE}.${EXT}" | grep "ppc"`
ISARM=`file "${SRCFILE}.${EXT}" | grep "arm64"`
if [ "${IS32}" != "" ]; then
if [ ! -L "${DSTFILE}i386.${EXT}" ]; then
ln -s "${SRCFILE}.${EXT}" "${DSTFILE}i386.${EXT}"
fi
elif [ -L "${DSTFILE}i386.${EXT}" ]; then
rm "${DSTFILE}i386.${EXT}"
fi
if [ "${IS64}" != "" ]; then
if [ ! -L "${DSTFILE}x86_64.${EXT}" ]; then
ln -s "${SRCFILE}.${EXT}" "${DSTFILE}x86_64.${EXT}"
fi
elif [ -L "${DSTFILE}x86_64.${EXT}" ]; then
rm "${DSTFILE}x86_64.${EXT}"
fi
if [ "${ISPPC}" != "" ]; then
if [ ! -L "${DSTFILE}ppc.${EXT}" ]; then
ln -s "${SRCFILE}.${EXT}" "${DSTFILE}ppc.${EXT}"
fi
elif [ -L "${DSTFILE}ppc.${EXT}" ]; then
rm "${DSTFILE}ppc.${EXT}"
fi
if [ "${ISARM}" != "" ]; then
if [ ! -L "${DSTFILE}arm64.${EXT}" ]; then
ln -s "${SRCFILE}.${EXT}" "${DSTFILE}arm64.${EXT}"
fi
elif [ -L "${DSTFILE}arm64.${EXT}" ]; then
rm "${DSTFILE}arm64.${EXT}"
fi
popd > /dev/null
}
SEARCH_ARCHS=" \
x86 \
x86_64 \
ppc \
arm64 \
"
HAS_LIPO=`command -v lipo`
HAS_CP=`command -v cp`
# if lipo is not available, we cannot make a universal binary, print a warning
if [ ! -x "${HAS_LIPO}" ] && [ "${CURRENT_ARCH}" == "" ]; then
CURRENT_ARCH=`uname -m`
if [ "${CURRENT_ARCH}" == "i386" ]; then CURRENT_ARCH="x86"; fi
echo "$0 cannot make a universal binary, falling back to architecture ${CURRENT_ARCH}"
fi
# if the optional arch parameter is used, replace SEARCH_ARCHS to only work with one
if [ "${CURRENT_ARCH}" != "" ]; then
SEARCH_ARCHS="${CURRENT_ARCH}"
fi
AVAILABLE_ARCHS=""
IORTCW_VERSION=`grep '^VERSION=' Makefile | sed -e 's/.*=\(.*\)/\1/'`
IORTCW_CLIENT_ARCHS=""
IORTCW_RENDERER_GL1_ARCHS=""
IORTCW_RENDERER_GL2_ARCHS=""
IORTCW_CGAME_ARCHS=""
IORTCW_GAME_ARCHS=""
IORTCW_UI_ARCHS=""
BASEDIR="main"
CGAME="cgame.sp"
GAME="qagame.sp"
UI="ui.sp"
RENDERER_OPENGL="renderer_sp_opengl1"
RENDERER_OPENGL2="renderer_sp_rend2"
CGAME_NAME="${CGAME}.dylib"
GAME_NAME="${GAME}.dylib"
UI_NAME="${UI}.dylib"
RENDERER_OPENGL1_NAME="renderer_sp_opengl1.dylib"
RENDERER_OPENGL2_NAME="renderer_sp_rend2.dylib"
ICNSDIR="misc"
ICNS="iortcw.icns"
PKGINFO="APPLIORTCW"
OBJROOT="build"
#BUILT_PRODUCTS_DIR="${OBJROOT}/${TARGET_NAME}-darwin-${CURRENT_ARCH}"
PRODUCT_NAME="iowolfsp"
WRAPPER_EXTENSION="app"
WRAPPER_NAME="${PRODUCT_NAME}.${WRAPPER_EXTENSION}"
CONTENTS_FOLDER_PATH="${WRAPPER_NAME}/Contents"
UNLOCALIZED_RESOURCES_FOLDER_PATH="${CONTENTS_FOLDER_PATH}/Resources"
EXECUTABLE_FOLDER_PATH="${CONTENTS_FOLDER_PATH}/MacOS"
EXECUTABLE_NAME="${PRODUCT_NAME}"
# loop through the architectures to build string lists for each universal binary
for ARCH in $SEARCH_ARCHS; do
CURRENT_ARCH=${ARCH}
if [ ${CURRENT_ARCH} == "x86" ]; then FILE_ARCH="i386"; fi
if [ ${CURRENT_ARCH} == "x86_64" ]; then FILE_ARCH="x86_64"; fi
if [ ${CURRENT_ARCH} == "ppc" ]; then FILE_ARCH="ppc"; fi
if [ ${CURRENT_ARCH} == "arm64" ]; then FILE_ARCH="arm64"; fi
BUILT_PRODUCTS_DIR="${OBJROOT}/${TARGET_NAME}-darwin-${CURRENT_ARCH}"
IORTCW_CLIENT="${EXECUTABLE_NAME}.${CURRENT_ARCH}"
IORTCW_RENDERER_GL1="${RENDERER_OPENGL}_${FILE_ARCH}.dylib"
IORTCW_RENDERER_GL2="${RENDERER_OPENGL2}_${FILE_ARCH}.dylib"
IORTCW_CGAME="${CGAME}.${FILE_ARCH}.dylib"
IORTCW_GAME="${GAME}.${FILE_ARCH}.dylib"
IORTCW_UI="${UI}.${FILE_ARCH}.dylib"
if [ ! -d ${BUILT_PRODUCTS_DIR} ]; then
CURRENT_ARCH=""
BUILT_PRODUCTS_DIR=""
continue
fi
# executables
if [ -e ${BUILT_PRODUCTS_DIR}/${IORTCW_CLIENT} ]; then
IORTCW_CLIENT_ARCHS="${BUILT_PRODUCTS_DIR}/${IORTCW_CLIENT} ${IORTCW_CLIENT_ARCHS}"
VALID_ARCHS="${ARCH} ${VALID_ARCHS}"
else
continue
fi
# renderers
if [ -e ${BUILT_PRODUCTS_DIR}/${IORTCW_RENDERER_GL1} ]; then
IORTCW_RENDERER_GL1_ARCHS="${BUILT_PRODUCTS_DIR}/${IORTCW_RENDERER_GL1} ${IORTCW_RENDERER_GL1_ARCHS}"
fi
if [ -e ${BUILT_PRODUCTS_DIR}/${IORTCW_RENDERER_GL2} ]; then
IORTCW_RENDERER_GL2_ARCHS="${BUILT_PRODUCTS_DIR}/${IORTCW_RENDERER_GL2} ${IORTCW_RENDERER_GL2_ARCHS}"
fi
# game
if [ -e ${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_CGAME} ]; then
IORTCW_CGAME_ARCHS="${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_CGAME} ${IORTCW_CGAME_ARCHS}"
fi
if [ -e ${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_GAME} ]; then
IORTCW_GAME_ARCHS="${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_GAME} ${IORTCW_GAME_ARCHS}"
fi
if [ -e ${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_UI} ]; then
IORTCW_UI_ARCHS="${BUILT_PRODUCTS_DIR}/${BASEDIR}/${IORTCW_UI} ${IORTCW_UI_ARCHS}"
fi
#echo "valid arch: ${ARCH}"
done
# final preparations and checks before attempting to make the application bundle
cd `dirname $0`
if [ ! -f Makefile ]; then
echo "$0 must be run from the iortcw build directory"
exit 1
fi
if [ "${IORTCW_CLIENT_ARCHS}" == "" ]; then
echo "$0: no iortcw binary architectures were found for target '${TARGET_NAME}'"
exit 1
fi
# set the final application bundle output directory
if [ "${2}" == "" ]; then
BUILT_PRODUCTS_DIR="${OBJROOT}/${TARGET_NAME}-darwin-universal"
if [ ! -d ${BUILT_PRODUCTS_DIR} ]; then
mkdir -p ${BUILT_PRODUCTS_DIR} || exit 1;
fi
else
BUILT_PRODUCTS_DIR="${OBJROOT}/${TARGET_NAME}-darwin-${CURRENT_ARCH}"
fi
BUNDLEBINDIR="${BUILT_PRODUCTS_DIR}/${EXECUTABLE_FOLDER_PATH}"
# here we go
echo "Creating bundle '${BUILT_PRODUCTS_DIR}/${WRAPPER_NAME}'"
echo "with architectures:"
for ARCH in ${VALID_ARCHS}; do
echo " ${ARCH}"
done
echo ""
# make the application bundle directories
if [ ! -d "${BUILT_PRODUCTS_DIR}/${EXECUTABLE_FOLDER_PATH}/$BASEDIR" ]; then
mkdir -p "${BUILT_PRODUCTS_DIR}/${EXECUTABLE_FOLDER_PATH}/$BASEDIR" || exit 1;
fi
if [ ! -d "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" ]; then
mkdir -p "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" || exit 1;
fi
# copy and generate some application bundle resources
cp code/libs/macosx/*.dylib "${BUILT_PRODUCTS_DIR}/${EXECUTABLE_FOLDER_PATH}"
cp ${ICNSDIR}/${ICNS} "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/$ICNS" || exit 1;
echo -n ${PKGINFO} > "${BUILT_PRODUCTS_DIR}/${CONTENTS_FOLDER_PATH}/PkgInfo" || exit 1;
# create Info.Plist
PLIST="<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIconFile</key>
<string>iortcw</string>
<key>CFBundleIdentifier</key>
<string>org.iortcw.${PRODUCT_NAME}</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>${PRODUCT_NAME}</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>${IORTCW_VERSION}</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>${IORTCW_VERSION}</string>
<key>CGDisableCoalescedUpdates</key>
<true/>
<key>LSMinimumSystemVersion</key>
<string>${MACOSX_DEPLOYMENT_TARGET}</string>"
if [ -n "${MACOSX_DEPLOYMENT_TARGET_PPC}" ] || [ -n "${MACOSX_DEPLOYMENT_TARGET_X86}" ] || [ -n "${MACOSX_DEPLOYMENT_TARGET_X86_64}" ] || [ -n "${MACOSX_DEPLOYMENT_TARGET_ARM64}" ]; then
PLIST="${PLIST}
<key>LSMinimumSystemVersionByArchitecture</key>
<dict>"
if [ -n "${MACOSX_DEPLOYMENT_TARGET_PPC}" ]; then
PLIST="${PLIST}
<key>ppc</key>
<string>${MACOSX_DEPLOYMENT_TARGET_PPC}</string>"
fi
if [ -n "${MACOSX_DEPLOYMENT_TARGET_X86}" ]; then
PLIST="${PLIST}
<key>i386</key>
<string>${MACOSX_DEPLOYMENT_TARGET_X86}</string>"
fi
if [ -n "${MACOSX_DEPLOYMENT_TARGET_X86_64}" ]; then
PLIST="${PLIST}
<key>x86_64</key>
<string>${MACOSX_DEPLOYMENT_TARGET_X86_64}</string>"
fi
if [ -n "${MACOSX_DEPLOYMENT_TARGET_ARM64}" ]; then
PLIST="${PLIST}
<key>arm64</key>
<string>${MACOSX_DEPLOYMENT_TARGET_ARM64}</string>"
fi
PLIST="${PLIST}
</dict>"
fi
PLIST="${PLIST}
<key>NSHumanReadableCopyright</key>
<string>Return to Castle Wolfenstein single player Copyright (C) 1999-2010 id Software LLC, a ZeniMax Media company.</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
<key>NSHighResolutionCapable</key>
<false/>
</dict>
</plist>
"
echo -e "${PLIST}" > "${BUILT_PRODUCTS_DIR}/${CONTENTS_FOLDER_PATH}/Info.plist"
# action takes care of generating universal binaries if lipo is available
# otherwise, it falls back to using a simple copy, expecting the first item in
# the second parameter list to be the desired architecture
function action()
{
COMMAND=""
if [ -x "${HAS_LIPO}" ]; then
COMMAND="${HAS_LIPO} -create -o"
$HAS_LIPO -create -o "${1}" ${2} # make sure $2 is treated as a list of files
elif [ -x ${HAS_CP} ]; then
COMMAND="${HAS_CP}"
SRC="${2// */}" # in case there is a list here, use only the first item
$HAS_CP "${SRC}" "${1}"
else
"$0 cannot create an application bundle."
exit 1
fi
#echo "${COMMAND}" "${1}" "${2}"
}
#
# the meat of universal binary creation
# destination file names do not have architecture suffix.
# action will handle merging universal binaries if supported.
# symlink appropriate architecture names for universal (fat) binary support.
#
# executables
action "${BUNDLEBINDIR}/${EXECUTABLE_NAME}" "${IORTCW_CLIENT_ARCHS}"
# renderers
action "${BUNDLEBINDIR}/${RENDERER_OPENGL1_NAME}" "${IORTCW_RENDERER_GL1_ARCHS}"
action "${BUNDLEBINDIR}/${RENDERER_OPENGL2_NAME}" "${IORTCW_RENDERER_GL2_ARCHS}"
symlinkArch "${RENDERER_OPENGL}" "${RENDERER_OPENGL}" "_" "${BUNDLEBINDIR}"
symlinkArch "${RENDERER_OPENGL2}" "${RENDERER_OPENGL2}" "_" "${BUNDLEBINDIR}"
# game
action "${BUNDLEBINDIR}/${BASEDIR}/${CGAME_NAME}" "${IORTCW_CGAME_ARCHS}"
action "${BUNDLEBINDIR}/${BASEDIR}/${GAME_NAME}" "${IORTCW_GAME_ARCHS}"
action "${BUNDLEBINDIR}/${BASEDIR}/${UI_NAME}" "${IORTCW_UI_ARCHS}"
symlinkArch "${CGAME}" "${CGAME}." "" "${BUNDLEBINDIR}/${BASEDIR}"
symlinkArch "${GAME}" "${GAME}." "" "${BUNDLEBINDIR}/${BASEDIR}"
symlinkArch "${UI}" "${UI}." "" "${BUNDLEBINDIR}/${BASEDIR}"
|
wolfetplayer/RealRTCW
|
make-macosx-app.sh
|
Shell
|
gpl-3.0
| 12,217 |
#!/bin/bash
# Copyright Mandriva 2009, 2010 all rights reserved
. '../functions.sh'
check_root
fw_lan=$1
fw_wan=$2
roundcube_db_conf_template="templates/db.inc.php.tpl"
roundcube_main_conf_template="templates/main.inc.php.tpl"
mbs_logo="templates/mbs_logo.png"
# copy the logo
cp -f $mbs_logo /usr/share/roundcubemail/skins/
# copy the db config
backup /etc/roundcubemail/db.inc.php
cat $roundcube_db_conf_template > /etc/roundcubemail/db.inc.php
# setup the roundcube db
if [ -f /var/lib/roundcubemail/sqlite.db ]; then
rm -f /var/lib/roundcubemail/sqlite.db
fi
mkdir -p /var/lib/roundcubemail
chown apache /var/lib/roundcubemail
sqlite /var/lib/roundcubemail/sqlite.db < /usr/share/doc/roundcubemail/SQL/sqlite.initial.sql
chown apache:apache /var/lib/roundcubemail/sqlite.db
chmod 0600 /var/lib/roundcubemail/sqlite.db
# set correct permissions
chmod 770 /var/log/roundcubemail
# http -> https redirection
https_redirect roundcubemail /etc/httpd/conf/webapps.d/roundcubemail.conf
restart_service httpd
# copy main config
backup /etc/roundcubemail/main.inc.php
cat $roundcube_main_conf_template > /etc/roundcubemail/main.inc.php
# configure the Firewall
[ $fw_lan == "on" ] && mss-add-shorewall-rule -a Web/ACCEPT -t lan
[ $fw_wan == "on" ] && mss-add-shorewall-rule -a Web/ACCEPT -t wan
restart_service shorewall
info_b $"Webmail Roundcube is installed on your server."
info $"You can access the webmail interface at https://@HOSTNAME@/roundcubemail/."
|
pulse-project/mss
|
modules/mds_webmail/setup-webmail.sh
|
Shell
|
gpl-3.0
| 1,473 |
#!/bin/bash
#
# anonfiles.com module
# Copyright (c) 2012 Plowshare team
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
MODULE_ANONFILES_REGEXP_URL="https\?://\([[:alnum:]]\+\.\)\?anonfiles\.com/"
MODULE_ANONFILES_DOWNLOAD_OPTIONS=""
MODULE_ANONFILES_DOWNLOAD_RESUME=yes
MODULE_ANONFILES_DOWNLOAD_FINAL_LINK_NEEDS_COOKIE=no
MODULE_ANONFILES_DOWNLOAD_SUCCESSIVE_INTERVAL=
MODULE_ANONFILES_UPLOAD_OPTIONS=""
MODULE_ANONFILES_UPLOAD_REMOTE_SUPPORT=no
# Output an AnonFiles.com file download URL
# $1: cookie file (unsued here)
# $2: anonfiles url
# stdout: real file download link
anonfiles_download() {
local -r URL=$2
local PAGE FILE_URL FILENAME
PAGE=$(curl "$URL") || return
FILE_URL=$(echo "$PAGE" | parse_attr_quiet 'download_button' href)
if [ -z "$FILE_URL" ]; then
FILE_URL=$(echo "$PAGE" | \
parse_attr_quiet 'image_preview' src) || return
fi
test "$CHECK_LINK" && return 0
FILENAME=$(echo "$PAGE" | parse_tag '<legend' b)
echo "$FILE_URL"
echo "$FILENAME"
}
# Upload a file to AnonFiles.com
# Use API: https://anonfiles.com/api/help
# $1: cookie file (unused here)
# $2: input file (with full path)
# $3: remote filename
# stdout: download
anonfiles_upload() {
local -r FILE=$2
local -r DESTFILE=$3
local -r BASE_URL='https://anonfiles.com/api'
local JSON DL_URL ERR MSG
# Note1: Accepted file typs is very restrictive!
# Note2: -F "file_publish=on" does not work!
JSON=$(curl_with_log \
-F "file=@$FILE;filename=$DESTFILE" "$BASE_URL") || return
DL_URL=$(echo "$JSON" | parse_json_quiet url)
if match_remote_url "$DL_URL"; then
echo "$DL_URL"
return 0
fi
ERR=$(echo "$JSON" | parse_json status)
MSG=$(echo "$JSON" | parse_json msg)
log_error "Unexpected status ($ERR): $MSG"
return $ERR_FATAL
}
|
mytskine/plowshare
|
src/modules/anonfiles.sh
|
Shell
|
gpl-3.0
| 2,470 |
#!/bin/sh
PID=`cat ~/workspace/rs-testbed/tmp/target/quagga-0.99.15/etc/bgpd.pid`
VALUE=`ps -p $PID -o rss=`
if [ "$VALUE" = "" ]; then
echo "NaN"
else
echo $VALUE
fi
|
sspies8684/hoofprints
|
scripts/getQuaggaMemory.sh
|
Shell
|
gpl-3.0
| 173 |
#!/usr/bin/env bash
MAXTOOLCOUNT=20
################################################################################
#
# I need to work in the canonical location, because I want to access
# resources that are typically bundled with this script.
#
# I try GNU readlink first, if I don't find it I will rely on POSIX
# readlink. I think.
#
CANONICAL_SOURCE=$(readlink -f "$BASH_SOURCE" 2>/dev/null) || {
unsymlink() {
if [ -L "$1" ]; then
unsymlink $(readlink "$1")
elif [ "${1%/*}" == "$1" ]; then
echo "$1"
else
echo $(unsymlink "${1%/*}")/"${1##*/}"
fi
}
CANONICAL_SOURCE=$(unsymlink "$BASH_SOURCE")
}
cd ${CANONICAL_SOURCE%/*}
################################################################################
#
# Environment variables
#
# export CC_TYPE=${CC_TYPE:-gcc}
# export CC=${CC:-$CC_TYPE}
# export CXX=${CXX:-g++}
export LIBERTY_HOME=$(pwd)
export TARGET=${TARGET:-$LIBERTY_HOME/target}
export TMPDIR=${TMPDIR:-$TARGET/tmp}
export PATH=$TARGET/bin:$PATH
export plain=${plain:-FALSE}
export LOG=$TARGET/log/install$(date +'-%Y%m%d-%H%M%S').log
export PREREQUISITES="$CC $CXX" # gccxml"
unset CDPATH
################################################################################
#
# Source the Liberty tools (they provide the progress bar)
#
. $LIBERTY_HOME/work/tools.sh
mkdir -p $TMPDIR
################################################################################
#
# Checking prerequisites
#
_check_libgc() {
title "Checking BDW GC"
cat > $TMPDIR/check_libgc.c <<EOF
#include <stdlib.h>
#include <stdio.h>
#include "gc/gc.h"
int main() {
unsigned major = 0;
unsigned minor = 0;
unsigned micro = 0;
unsigned alpha = 0;
#ifdef GC_VERSION_MICRO
major = GC_VERSION_MAJOR;
minor = GC_VERSION_MINOR;
micro = GC_VERSION_MICRO;
#else
unsigned version = GC_get_version();
major = (version & 0x00ff0000) >> 16;
minor = (version & 0x0000ff00) >> 8;
alpha = (version & 0x000000ff) != GC_NOT_ALPHA;
#endif
printf("BDW (libgc) Version %02d.%02d.%02d %s\n", major, minor, micro, alpha ? "alpha" : "");
if ( (major < 7)
|| (major == 7 && minor < 1)
|| (alpha)) {
exit(1);
}
exit(0);
}
EOF
${CC} -lgc $TMPDIR/check_libgc.c -o $TMPDIR/check_libgc >/dev/null 2>&1 || return 1
if $TMPDIR/check_libgc; then
return 0
else
return 1
fi
}
check_libgc() {
if _check_libgc; then
BDW_GC="-bdw_gc"
else
error_message "BDW too old or missing. On Debian based distribution install libgc and libgc-dev packages; on rpm-based distributions (Fedora) gc and gc-devel; on BSD systems install boehm-gc package"
BDW_GC="-no_gc"
fi
export BDW_GC
}
check_prerequisites() {
title "Checking required programs."
i=0
for PROGRAM in $PREREQUISITES; do
progress 30 $i 3 "Checking $PROGRAM..."
if which $PROGRAM >/dev/null; then
: # all right
else
error_message "$PROGRAM not found, cannot proceed"
exit 5
fi
i=$(($i + 1))
done
progress 30 3 3 "All programs present, proceeding."
echo
}
################################################################################
#
# The bootstrap function:
#
# Locally install Liberty Eiffel starting from the C germ
#
bootstrap() {
test -d $TARGET && rm -rf $TARGET
mkdir -p $TARGET
mkdir -p $TMPDIR
cd $TARGET
test -d log || mkdir log
check_libgc
check_prerequisites
if [ ! -d bin ]; then
title "Preparing $TARGET"
mkdir bin
cd bin
for ace in $LIBERTY_HOME/src/tools/*/*.ace; do
tool=$(basename $(dirname $ace))
if [[ $tool.ace == $(basename $ace) ]]; then
mkdir $TARGET/bin/$tool.d
ln -s $ace $TARGET/bin/$tool.d
fi
done
cd ..
fi
if [ ! -d liberty-eiffel ]; then
title "Preparing Liberty environment"
mkdir liberty-eiffel
cd liberty-eiffel
cat > liberty.se <<EOF
[General]
bin: $TARGET/bin
sys: $LIBERTY_HOME/sys
short: $LIBERTY_HOME/resources/short
os: ${OS}
flavor: ${flavor}
tag: 3
jobs: ${jobs}
[Environment]
path_liberty: $LIBERTY_HOME/
path_liberty_core: $LIBERTY_HOME/src/lib/
path_liberty_extra: $LIBERTY_HOME/src/wrappers/
path_liberty_staging: $LIBERTY_HOME/src/staging/
path_smarteiffel: $LIBERTY_HOME/src/smarteiffel/
path_tools: $LIBERTY_HOME/src/tools/
path_tutorial: $LIBERTY_HOME/tutorial/
hyphen: -
[Loadpath]
liberty_core: \${path_liberty_core}loadpath.se
liberty_extra: \${path_liberty_extra}loadpath.se
liberty_staging: \${path_liberty_staging}loadpath.se
test: \${path_liberty}test/loadpath.se
smarteiffel: \${path_smarteiffel}loadpath.se
tools: \${path_tools}loadpath.se
tutorial: \${path_tutorial}loadpath.se
[Tools]
ace_check: ace_check
c2c: compile_to_c
c: compile
class_check: class_check
clean: clean
doc: eiffeldoc
find: finder
make: se_make.sh
mock: mock
pretty: pretty
short: short
test: eiffeltest
test_ng: eiffeltest_ng
test_server: eiffeltest_server
wrap: wrappers_generator
x_int: extract_internals
[boost]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O2 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: -O2 ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[no_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: -O1 ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[require_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[ensure_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[invariant_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[loop_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[all_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
[debug_check]
c_compiler_type: $CC_TYPE
c_compiler_path: $CC
c_compiler_options: -g -O1 ${CFLAGS}
c_linker_path: $CC
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_path: $CXX
cpp_compiler_options: -g ${CXXFLAGS}
cpp_linker_path: $CC
cpp_linker_options: ${LDFLAGS}
smarteiffel_options: -no_strip
EOF
cd ..
fi
export CONFIG_DIR=${HOME:-/home/$USER}/.config
test -d $CONFIG_DIR || mkdir -p $CONFIG_DIR
if [ -L $CONFIG_DIR/liberty-eiffel ]; then
rm $CONFIG_DIR/liberty-eiffel
elif [ -e $CONFIG_DIR/liberty-eiffel ]; then
mv $CONFIG_DIR/liberty-eiffel $CONFIG_DIR/liberty-eiffel~
fi
ln -s $TARGET/liberty-eiffel $CONFIG_DIR/
find $LIBERTY_HOME -name c -type d -print |
while read c; do
for run in $c/*.run; do
if [[ -x "$run" ]]; then
"$run" > ${run%.run}
fi
done
done
title "Bootstrapping Liberty Eiffel tools"
cd $LIBERTY_HOME/resources/smarteiffel-germ
if [ ! -d $TARGET/bin/compile_to_c.d ]; then
test -d $TARGET/bin/compile_to_c.d || mkdir $TARGET/bin/compile_to_c.d
rm -f compile_to_c*.o
for src in compile_to_c*.c ; do
cmd="${germ_cc} ${germ_cflags} ${src}"
progress 30 0 $MAXTOOLCOUNT "germ: $cmd"
run $cmd || exit 1
done
${CC} ${LDFLAGS} *.o
test -e a.exe && mv a.exe a.out
cp -a * $TARGET/bin/compile_to_c.d/
fi
cd $TARGET/bin/compile_to_c.d
mv a.out compile_to_c || exit 1
progress 30 1 $MAXTOOLCOUNT "T1: compile_to_c"
run ./compile_to_c -verbose -boost -no_gc compile_to_c -o compile_to_c.new || exit 1
grep -v '^#' compile_to_c.make |
while read cmd; do
progress 30 1 $MAXTOOLCOUNT "T1: $cmd"
run $cmd || exit 1
done
progress 30 1 $MAXTOOLCOUNT "T1: save"
mkdir T1
cp -a compile_to_c* T1/
progress 30 1 $MAXTOOLCOUNT "T1: check"
if grep "^$CC" compile_to_c.make >/dev/null 2>&1; then
rm compile_to_c.make
cp -a compile_to_c.new compile_to_c
progress 30 2 $MAXTOOLCOUNT "T2: compile_to_c"
run ./compile_to_c -verbose -boost -no_gc compile_to_c -o compile_to_c.new || exit 1
grep -v '^#' compile_to_c.make |
while read cmd; do
progress 30 2 $MAXTOOLCOUNT "T2: $cmd"
run $cmd || exit 1
done
progress 30 2 $MAXTOOLCOUNT "T2: save"
mkdir T2
cp -a compile_to_c* T2/
progress 30 2 $MAXTOOLCOUNT "T2: check"
if grep "$CC" compile_to_c.make >/dev/null 2>&1; then
rm compile_to_c.make
cp -a compile_to_c.new compile_to_c
progress 30 3 $MAXTOOLCOUNT "T3: compile_to_c"
run ./compile_to_c -verbose -boost -no_gc compile_to_c -o compile_to_c.new || exit 1
progress 30 3 $MAXTOOLCOUNT "T3: save"
mkdir T3
cp -a compile_to_c* T3/
progress 30 3 $MAXTOOLCOUNT "T3: check"
if grep "^$CC" compile_to_c.make >/dev/null 2>&1; then
echo
cat compile_to_c.make >> $LOG
error "The compiler is not stable."
exit 1
fi
else
rm -f compile_to_c.new
fi
else
rm -f compile_to_c.new
fi
cd .. && test -e compile_to_c || ln -s compile_to_c.d/compile_to_c .
progress 30 4 $MAXTOOLCOUNT "compile"
test -d compile.d || mkdir compile.d
cd compile.d
run ../compile_to_c -verbose -boost -no_gc -no_split compile -o compile || exit 1
grep ^$CC compile.make |
while read cmd; do
run $cmd || exit 1
done
cd .. && test -e compile || ln -s compile.d/compile .
{
grep -v '^#' |
while read i gc tool; do
progress 30 $i $MAXTOOLCOUNT "$tool"
test -d ${tool}.d || mkdir ${tool}.d
cd ${tool}.d
case $gc in
no) GC="-no_gc";;
bdw) GC="$BDW_GC";;
*) GC="";;
esac
run ../compile -verbose -boost $GC -no_split $tool -o $tool || exit 1
cd .. && test -e ${tool} || ln -s ${tool}.d/$tool .
done
} <<EOF
5 no se
6 bdw clean
7 bdw ace_check
8 bdw eiffeltest
9 no mock
9 bdw eiffeltest_ng
10 bdw eiffeltest_server
EOF
{
grep -v '^#' |
while read i gc tool; do
progress 30 $i $MAXTOOLCOUNT "$tool"
test -d ${tool}.d || mkdir ${tool}.d
cd ${tool}.d
case $gc in
no) GC="-no_gc";;
bdw) GC="$BDW_GC";;
*) GC="";;
esac
run ../compile -verbose -boost $GC $tool -o $tool || exit 1
cd .. && test -e ${tool} || ln -s ${tool}.d/$tool .
done
} <<EOF
11 no pretty
12 no short
13 no class_check
14 no finder
15 bdw eiffeldoc
16 no extract_internals
17 no wrappers_generator
EOF
{
grep -v '^#' |
while read i gc tool; do
progress 30 $i $MAXTOOLCOUNT "$tool"
test -d ${tool}.d || mkdir ${tool}.d
cd ${tool}.d
if [ -e $tool.ace ]; then
run ../se c -verbose $tool.ace
else
case $gc in
no) GC="-no_gc";;
bdw) GC="$BDW_GC";;
*) GC="";;
esac
run ../se c -verbose -boost $GC $tool -o $tool || exit 1
fi
cd .. && test -e ${tool} || ln -s ${tool}.d/$tool .
done
} <<EOF
#18 bdw mocker
EOF
progress 30 $(($MAXTOOLCOUNT - 1)) $MAXTOOLCOUNT "se_make.sh"
cp $LIBERTY_HOME/work/se_make.sh .
progress 30 $MAXTOOLCOUNT $MAXTOOLCOUNT "done."
echo
}
################################################################################
#
# The compile_plugins function:
#
# Generate "plugins" for the runner
#
compile_plugins() {
title "Compiling plugins"
check_prerequisites
cd $LIBERTY_HOME/work
./compile_plugins.sh
}
################################################################################
#
# The generate_wrappers function:
#
# Generate C library wrappers
#
generate_wrappers() {
title "Generating wrappers"
cd $TARGET/bin
cd wrappers_generator.d
n=$(ls $LIBERTY_HOME/src/wrappers/*/library/externals/Makefile | wc -l)
n=$((n+1))
progress 30 0 $n "Building the wrappers generator"
run ../se c -verbose wrappers_generator.ace
cd .. && test -e wrappers_generator || ln -s wrappers_generator.d/wrappers_generator .
i=1
for f in $(ls $LIBERTY_HOME/src/wrappers/*/library/externals/Makefile); do
cd ${f%/Makefile}
rm -f *.xml *.e plugin/c/plugin.[ch]
t=${f%/library/externals/Makefile}
t=${t#$LIBERTY_HOME/src/wrappers/}
progress 30 $i $n $t
run make
i=$((i+1))
done
progress 30 $n $n "done."
echo
}
################################################################################
#
# The compile_all function:
#
# Compile the Liberty Eiffel tools
#
compile_all() {
n=$(ls $LIBERTY_HOME/src/tools/main/*.ace 2>/dev/null | wc -l || echo 0)
if [ $n -gt 0 ]; then
i=0
for f in $LIBERTY_HOME/src/tools/main/*.ace; do
ace=${f##*/} && ace=${ace%.ace}
progress 30 $i $n $ace
cd $TARGET/bin/${ace}.d
run ../se c -verbose ${ace}.ace
cd .. && test -e "$ace" || ln -s ${ace}.d/$ace .
i=$((i+1))
done
progress 30 $n $n "done."
echo
fi
}
################################################################################
#
# The make_doc function:
#
# Invoke eiffeldoc to build the documentation
#
make_doc() {
export DOC_ROOT=$TARGET/doc/
export LOG=$TARGET/log/build_doc$(date +'-%Y%m%d-%H%M%S').log
test -d $DOC_ROOT && rm -rf $DOC_ROOT
mkdir -p $DOC_ROOT
$LIBERTY_HOME/work/build_doc.sh
}
################################################################################
#
# The packaging functions
#
do_pkg_tools() {
PUBLIC=$USRDIR/bin
PRIVATE=$USRDIR/lib/liberty-eiffel/bin
ETC=$ETCDIR/xdg/liberty-eiffel
SHORT=$USRDIR/share/liberty-eiffel/short
SYS=$USRDIR/share/liberty-eiffel/sys
SITE_LISP=$USRDIR/share/emacs/site-lisp/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $PUBLIC $PRIVATE $ETC $SHORT $SYS $SITE_LISP
install -m 0755 -o ${LE_USER} -g ${LE_GROUP} $TARGET/bin/se $PUBLIC/
install -m 0644 -o ${LE_USER} -g ${LE_GROUP} $LIBERTY_HOME/work/eiffel.el $SITE_LISP/
for tool in compile compile_to_c clean pretty short finder ace_check class_check eiffeldoc eiffeltest extract_internals mock
do
bin=$TARGET/bin/${tool}.d/$tool
if test -e $bin; then
echo "$bin to $PRIVATE/"
install -m 0755 -o ${LE_USER} -g ${LE_GROUP} $bin $PRIVATE/
fi
done
cp -a $LIBERTY_HOME/resources/short/* $SHORT
cp -a $LIBERTY_HOME/sys/* $SYS
chown -R ${LE_USER}:${LE_GROUP} $SHORT $SYS
cat >$ETC/liberty.se <<EOF
[General]
bin: /usr/lib/liberty-eiffel/bin
sys: /usr/share/liberty-eiffel/sys
short: /usr/share/liberty-eiffel/short
os: ${OS}
flavor: ${flavor}
tag: 3
jobs: ${jobs}
[Environment]
hyphen: -
[Tools]
ace_check: ace_check
c2c: compile_to_c
c: compile
class_check: class_check
clean: clean
doc: eiffeldoc
find: finder
mock: mock
pretty: pretty
short: short
test: eiffeltest
test_ng: eiffeltest_ng
test_server: eiffeltest_server
wrap: wrappers_generator
x_int: extract_internals
[boost]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS} -O2
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS} -O2
cpp_linker_options: ${LDFLAGS}
[no_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS} -O1
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS} -O1
cpp_linker_options: ${LDFLAGS}
[require_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS}
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_options: ${LDFLAGS}
[ensure_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS}
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_options: ${LDFLAGS}
[invariant_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS}
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_options: ${LDFLAGS}
[loop_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS}
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_options: ${LDFLAGS}
[all_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS}
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS}
cpp_linker_options: ${LDFLAGS}
[debug_check]
c_compiler_type: ${CC_TYPE}
c_compiler_options: ${CFLAGS} -g
c_linker_options: ${LDFLAGS}
cpp_compiler_type: ${CXX_TYPE}
cpp_compiler_options: ${CXXFLAGS} -g
cpp_linker_options: ${LDFLAGS}
smarteiffel_options: -no_strip
EOF
chown ${LE_USER}:${LE_GROUP} $ETC/liberty.se
}
_do_pkg_src() {
local section=$1
shift
local src=("$@")
SRC=$USRDIR/share/liberty-eiffel/src/$section
ETC=$ETCDIR/xdg/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $SRC $ETC
for s in "${src[@]}"; do
if [ -r $SRC/loadpath.se ]; then
mv $SRC/loadpath.se $SRC/loadpath.se.old
else
touch $SRC/loadpath.se.old
fi
cp -a $s/* $SRC/
if [ -r $SRC/loadpath.se ]; then
mv $SRC/loadpath.se $SRC/loadpath.se.new
else
touch $SRC/loadpath.se.new
fi
cat $SRC/loadpath.se.{old,new} | sort -u > $SRC/loadpath.se
rm $SRC/loadpath.se.{old,new}
done
find $SRC -type f -exec chmod a-x {} +
chown -R ${LE_USER}:${LE_GROUP} $SRC
cat > $ETC/liberty_${section}.se <<EOF
[Environment]
path_${section}: /usr/share/liberty-eiffel/src/${section}/
[Loadpath]
${section}: \${path_${section}}loadpath.se
EOF
chown ${LE_USER}:${LE_GROUP} $ETC/liberty_${section}.se
}
do_pkg_tools_src() {
_do_pkg_src tools $LIBERTY_HOME/src/smarteiffel $LIBERTY_HOME/src/tools
}
do_pkg_core_libs() {
_do_pkg_src core $LIBERTY_HOME/src/lib
}
do_pkg_extra_libs() {
_do_pkg_src liberty_extra $LIBERTY_HOME/src/wrappers
}
do_pkg_staging_libs() {
_do_pkg_src liberty_staging $LIBERTY_HOME/src/staging
}
do_pkg_tutorial() {
_do_pkg_src tutorial $LIBERTY_HOME/tutorial
}
do_pkg_tools_doc() {
DOC=$USRDIR/share/doc/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $DOC/tools/{liberty,smarteiffel}
cp -a $TARGET/doc/api/smarteiffel/* $DOC/tools/smarteiffel/
cp -a $TARGET/doc/api/liberty/* $DOC/tools/liberty/
find $DOC -type f -exec chmod a-x {} +
chown -R ${LE_USER}:${LE_GROUP} $DOC
}
do_pkg_core_doc() {
DOC=$USRDIR/share/doc/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $DOC/core
cp -a $TARGET/doc/api/libraries/* $DOC/core/
find $DOC -type f -exec chmod a-x {} +
chown -R ${LE_USER}:${LE_GROUP} $DOC
}
do_pkg_extra_doc() {
DOC=$USRDIR/share/doc/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $DOC/extra
cp -a $TARGET/doc/api/wrappers/* $DOC/extra/
find $DOC -type f -exec chmod a-x {} +
chown -R ${LE_USER}:${LE_GROUP} $DOC
}
do_pkg_staging_doc() {
DOC=$USRDIR/share/doc/liberty-eiffel
install -d -m 0755 -o ${LE_USER} -g ${LE_GROUP} $DOC/staging
cp -a $TARGET/doc/api/staging/* $DOC/staging/
find $DOC -type f -exec chmod a-x {} +
chown -R ${LE_USER}:${LE_GROUP} $DOC
}
get_user_group_info() {
read -p "User to install as [$(id -un)]: " LE_USER
read -p "Group to install as [$(id -gn)]: " LE_GROUP
export LE_USER=${LE_USER:-$(id -un)}
export LE_GROUP=${LE_GROUP:-$(id -gn)}
}
check_target_dir_permissions() {
for d in $*
do
if test ! -x "${d}"
then
error_message "${d} does not exist. You may need to create it."
elif test ! -w "${d}"
then
error_message "You don't have write permission to ${d}." "Specify a writeable directory or run using sudo."
exit 1
fi
done
}
do_local_install() {
export USRDIR=${USRDIR:-/usr/local}
export ETCDIR=${ETCDIR:-/usr/local/etc}
check_target_dir_permissions "${USRDIR}" "${ETCDIR}"
get_user_group_info
do_pkg_tools
do_pkg_tools_src
do_pkg_tools_doc
do_pkg_core_libs
do_pkg_core_doc
do_pkg_extra_libs
do_pkg_extra_doc
do_pkg_staging_libs
do_pkg_staging_doc
do_pkg_tutorial
}
do_pkg() {
if [ x$DESTDIR == x ]; then
echo "No DESTDIR, please call from debian helper tools" >&2
exit 1
fi
echo do_pkg: DESTDIR= $DESTDIR
echo
export USRDIR=$DESTDIR/usr
export ETCDIR=$DESTDIR/etc
case "$1" in
tools) do_pkg_tools;;
tools_src) do_pkg_tools_src;;
tools_doc) do_pkg_tools_doc;;
core_libs) do_pkg_core_libs;;
core_doc) do_pkg_core_doc;;
extra_libs) do_pkg_extra_libs;;
extra_doc) do_pkg_extra_doc;;
tutorial) do_pkg_tutorial;;
staging_libs) do_pkg_staging_libs;;
staging_doc) do_pkg_staging_doc;;
*)
echo "Unknown pkg name: $1" >&2
exit 1
;;
esac
}
################################################################################
#
# Main
#
if [ $# = 0 ]; then
bootstrap
else
while [ $# -gt 0 ]; do
case x"$1" in
x-plugins)
compile_plugins
;;
x-wrappers)
generate_wrappers
;;
x-bootstrap)
bootstrap
;;
x-compile)
compile_all
;;
x-package)
shift
do_pkg "$1"
;;
x-plain)
plain=TRUE
;;
x-doc)
make_doc
;;
x-local_install)
do_local_install
;;
*)
echo "Unknown argument: $1"
cat >&2 <<EOF
Usage: $0 {-bootstrap|-plugins|-wrappers|-doc|-package}
-bootstrap Bootstraps Liberty Eiffel.
-plugins Compiles the plugins used by the Liberty interpreter.
-wrappers Generates the library wrappers; some are used by the
Liberty tools themselves (ffi, readline, llvm, ...)
-doc Generates the HTML documentation for all classes.
-package Generates the Debian packages into \$DESTDIR.
-local_install Installs Liberty Eiffel in /usr/local (config in /usr/local/etc)
If no argument is provided, -bootstrap is assumed.
EOF
exit 1
;;
esac
shift
done
fi
|
LibertyEiffel/Liberty
|
install.sh
|
Shell
|
gpl-3.0
| 24,624 |
#!/bin/bash
WORK_DIR=oldboy
create(){
i=1
while(($i<11))
do
cd $WORK_DIR && touch `tr -dc "a-z"</dev/urandom | head -c 10`_oldboy.html
i=$(($i+1))
done
}
check(){
if [ -d $WORK_DIR ];
then
create
else
mkdir $WORK_DIR
create
fi
}
check
|
auspbro/CodeSnippets
|
Shell/maintainer_pratice/ex2.sh
|
Shell
|
gpl-3.0
| 312 |
#!/bin/sh
stamp=`docker inspect -f '{{.Created}}' ebusd-buildenv 2>/dev/null`
if [ -z "$stamp" ] || ( rm -f .stamp && touch -d "$stamp" .stamp && [ buildenv/Dockerfile -nt .stamp ] ) ; then
echo "updating ebusd build environment..."
docker rmi -f ebusd-buildenv 2>/dev/null
(cd buildenv && docker build -t ebusd-buildenv .) || exit 1
fi
set -e
echo "creating debian image..."
docker run --rm -it -v `pwd`/../..:/build ebusd-buildenv ./make_debian.sh
export EBUSD_VERSION=`cat ../../VERSION`
export EBUSD_ARCH=`docker version|grep -i "Arch[^:]*:"|tail -n 1|sed -e 's#^.*/##'`
mv ../../ebusd-${EBUSD_VERSION}_${EBUSD_ARCH}_mqtt1.deb runtime/
echo "building docker image..."
(cd runtime && docker build -t ebusd .)
echo "docker image created."
|
mivola/ebusd
|
contrib/docker/build.sh
|
Shell
|
gpl-3.0
| 747 |
#/bin/bash
# cd /tmp && yum install wget -y && wget https://github.com/munishgaurav5/ks/raw/master/us_noraid_1disk_centos7_minimal_install.sh && chmod 777 us_noraid_1disk_centos7_minimal_install.sh && ./us_noraid_1disk_centos7_minimal_install.sh
#export INSTALL_SRV="http://KICKSTART_SRV_FQDN/"
### NEW ###
yum -y install nano wget curl net-tools lsof bzip2 zip unzip rar unrar epel-release git sudo make cmake GeoIP sed at
NETWORK_INTERFACE_NAME="$(ip -o -4 route show to default | awk '{print $5}' | head -1)"
###########
export KSURL="https://github.com/munishgaurav5/ks/raw/master/us_noraid_1disk_centos7_minimal.cfg"
export DNS1=8.8.8.8
export DNS2=8.8.4.4
#export MIRROR="http://mirror.ircam.fr/pub/CentOS/7.2.1511/os/x86_64/"
#export MIRROR="http://mirror.nl.leaseweb.net/centos/7/os/x86_64/"
#export MIRROR="http://mirror.centos.org/centos/7/os/x86_64/"
#export MIRROR="http://mirror.imt-systems.com/centos/7/os/x86_64/"
export MIRROR="http://mirror.inode.at/centos/7.4.1708/os/x86_64/"
#export IPADDR=$(ip a s $NETWORK_INTERFACE_NAME |grep "inet "|awk '{print $2}'| awk -F '/' '{print $1}')
export IPADDR=$(hostname -I | awk -F ' ' '{print $1}' | head -1)
#export PREFIX=$(ip a s $NETWORK_INTERFACE_NAME |grep "inet "|awk '{print $2}'| awk -F '/' '{print $2}')
export MASK=255.255.255.0
export GW=$(ip route|grep default | awk '{print $3}' | head -1)
curl -o /boot/vmlinuz ${MIRROR}images/pxeboot/vmlinuz
curl -o /boot/initrd.img ${MIRROR}images/pxeboot/initrd.img
# linux /vmlinuz net.ifnames=0 biosdevname=0 ip=${IPADDR}::${GW}:${PREFIX}:$(hostname):eth0:off nameserver=$DNS1 nameserver=$DNS2 inst.repo=$MIRROR inst.ks=$KSURL
# inst.vncconnect=${IPADDR}:5500 # inst.vnc inst.vncpassword=changeme headless
# inst.vnc inst.vncpassword=changeme inst.headless inst.lang=en_US inst.keymap=us
echo ""
echo ""
root_value=`grep "set root=" /boot/grub2/grub.cfg | head -1`
echo "$root_value"
echo ""
echo ""
sleep 2
echo ""
Boot_device=${NETWORK_INTERFACE_NAME}
#Boot_device="eth0"
cat << EOF >> /etc/grub.d/40_custom
menuentry "reinstall" {
$root_value
linux /vmlinuz net.ifnames=0 biosdevname=0 ip=${IPADDR}::${GW}:${MASK}:$(hostname):$Boot_device:off nameserver=$DNS1 nameserver=$DNS2 inst.repo=$MIRROR inst.ks=$KSURL inst.vnc inst.vncconnect=${IPADDR}:1 inst.vncpassword=changeme inst.headless inst.lang=en_US inst.keymap=us
initrd /initrd.img
}
EOF
#sed -i -e "s/GRUB_DEFAULT.*/GRUB_DEFAULT=\"reinstall\"/g" /etc/default/grub
grub2-mkconfig
grub2-mkconfig --output=/boot/grub2/grub.cfg
grubby --info=ALL
echo ""
echo ""
echo "Setting Up default Grub Entry ..."
echo ""
sleep 2
echo ""
# install grub-customizer
### Permanent Boot Change
#grubby --default-index
#grub2-set-default 'reinstall'
#grubby --default-index
### Permanent Boot Change
#grubby --default-index
#grubby --set-default /boot/vmlinuz
#grubby --default-index
### One Time Boot Change
grubby --default-index
#grub-reboot 1
grub2-reboot "reinstall"
grubby --default-index
echo ""
echo ""
echo " >>> Manually update 'IP, Gateway & Hostname' in kickstart config file .. <<<"
echo "IP : $IPADDR"
echo "Gateway : $GW"
echo "Network Interface : $NETWORK_INTERFACE_NAME"
echo ""
echo "DONE!"
|
munishgaurav5/ks
|
us_noraid_1disk_centos7_minimal_install.sh
|
Shell
|
gpl-3.0
| 3,202 |
#!/usr/bin/env bash
#
# Common set of functions used by modules
# Copyright (c) 2010-2016 Plowshare team
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
# Make pipes fail on the first failed command (requires Bash 3+)
set -o pipefail
# Each time an API is updated, this value will be increased
declare -r PLOWSHARE_API_VERSION=6
# User configuration directory (contains plowshare.conf, exec/, storage/, modules.d/)
declare -r PLOWSHARE_CONFDIR="${XDG_CONFIG_HOME:-$HOME/.config}/plowshare"
# Dependencies
declare -r PLOWCORE_JS=${PLOWSHARE_JS:-js}
declare -r PLOWCORE_CURL=${PLOWSHARE_CURL:-curl}
# Global error codes
# 0 means success or link alive
declare -r ERR_FATAL=1 # Unexpected result (upstream site updated, etc)
declare -r ERR_NOMODULE=2 # No module found for processing request
declare -r ERR_NETWORK=3 # Generic network error (socket failure, curl, firewall, etc)
declare -r ERR_LOGIN_FAILED=4 # Correct login/password argument is required
declare -r ERR_MAX_WAIT_REACHED=5 # Wait timeout (see -t/--timeout command line option)
declare -r ERR_MAX_TRIES_REACHED=6 # Max tries reached (see -r/--max-retries command line option)
declare -r ERR_CAPTCHA=7 # Captcha solving failure
declare -r ERR_SYSTEM=8 # System failure (missing executable, local filesystem, wrong behavior, etc)
declare -r ERR_LINK_TEMP_UNAVAILABLE=10 # plowdown: Link alive but temporarily unavailable
# plowup: Feature (upload service) seems temporarily unavailable from upstream
# plowlist: Links are temporarily unavailable. Upload still pending?
declare -r ERR_LINK_PASSWORD_REQUIRED=11 # plowdown: Link alive but requires a password
# plowdel: Link requires an admin or removal code
# plowlist: Remote folder is password protected
declare -r ERR_LINK_NEED_PERMISSIONS=12 # plowdown: Link alive but requires some authentication (private or premium link)
# plowup, plowdel: Operation not allowed for anonymous users
declare -r ERR_LINK_DEAD=13 # plowdel: File not found or previously deleted
# plowlist: Remote folder does not exist or is empty
declare -r ERR_SIZE_LIMIT_EXCEEDED=14 # plowdown: Can't download link because file is too big (need permissions)
# plowup: Can't upload too big file (need permissions)
declare -r ERR_BAD_COMMAND_LINE=15 # Unknown command line parameter or incompatible options
declare -r ERR_ASYNC_REQUEST=16 # plowup: Asynchronous remote upload started (can't predict final status)
declare -r ERR_EXPIRED_SESSION=17 # Related to local storage module file, expired session
declare -r ERR_FATAL_MULTIPLE=100 # 100 + (n) with n = first error code (when multiple arguments)
# Global variables used (defined in plow* scripts):
# - VERBOSE Verbosity level (0=none, 1=error, 2=notice, 3=debug, 4=report)
# - INTERFACE (curl) Network interface
# - MAX_LIMIT_RATE (curl) Network maximum speed
# - MIN_LIMIT_RATE (curl) Network minimum speed
# - NO_CURLRC (curl) Do not read of use curlrc config
# - EXT_CURLRC (curl) Alternate curlrc config file
# - CAPTCHA_METHOD User-specified captcha method
# - CAPTCHA_ANTIGATE Antigate.com captcha key
# - CAPTCHA_9KWEU 9kw.eu captcha key
# - CAPTCHA_BHOOD Captcha Brotherhood account
# - CAPTCHA_COIN captchacoin.com captcha key
# - CAPTCHA_DEATHBY DeathByCaptcha account
# - CAPTCHA_PROGRAM External solver program/script
# - MODULE Module name (don't include .sh), used by storage API
# - TMPDIR Temporary directory
# - CACHE Storage API policy: none, session (default or empty), shared.
# - COLOR Display log_notice & log_error messages with colors
# Note: captchas are handled in plowdown, plowup and plowdel.
#
# Logs are sent to stderr stream.
# Policies:
# - error: core/modules error messages, lastest curl call (plowdown, plowup)
# - notice: core messages (wait, timeout, retries), lastest curl call (plowdown, plowup)
# - debug: all core/modules messages, curl calls
# - report: debug plus curl content (html pages, cookies)
#
# Global variables defined here (FIXME later):
# - PS_TIMEOUT (plowdown, plowup) Timeout (in seconds) for one item
# - CONT_SIGNAL SIGCONT signal received
# log_report for a file
# $1: filename
logcat_report() {
if test -s "$1"; then
test $VERBOSE -lt 4 || \
stderr "$(sed -e 's/^/rep:/' "$1")"
fi
return 0
}
# This should not be called within modules
log_report() {
test $VERBOSE -lt 4 || stderr 'rep:' "$@"
}
log_debug() {
test $VERBOSE -lt 3 || stderr 'dbg:' "$@"
}
# This should not be called within modules
log_notice() {
if [[ $COLOR ]]; then
# 33 is yellow
test $VERBOSE -lt 2 || echo -e "\033[0;33m$@\033[0m" >&2
else
test $VERBOSE -lt 2 || stderr "$@"
fi
}
log_error() {
if [[ $COLOR ]]; then
# 31 is red
test $VERBOSE -lt 1 || echo -e "\033[0;31m$@\033[0m" >&2
else
test $VERBOSE -lt 1 || stderr "$@"
fi
}
## ----------------------------------------------------------------------------
##
## All helper functions below can be called by modules
## (see documentation...)
##
# Wrapper for curl: debug and infinite loop control
# $1..$n are curl arguments
# Important note: -D/--dump-header or -o/--output temporary files are deleted in case of error
curl() {
local -a CURL_ARGS=("$@")
local -a OPTIONS=(--insecure --compressed --speed-time 600 --connect-timeout 240)
local DRETVAL=0
# Check if caller has specified a User-Agent, if so, don't put one
if ! find_in_array CURL_ARGS[@] '-A' '--user-agent'; then
OPTIONS+=(--user-agent \
'Mozilla/5.0 (X11; Linux x86_64; rv:6.0) Gecko/20100101 Firefox/6.0')
fi
# If caller has allowed redirection but did not limit it, do it now
if find_in_array CURL_ARGS[@] '-L' '--location'; then
find_in_array CURL_ARGS[@] '--max-redirs' || OPTIONS+=(--max-redirs 5)
fi
if [ -n "$NO_CURLRC" ]; then
OPTIONS=(-q "${OPTIONS[@]}")
elif [ -n "$EXT_CURLRC" ]; then
OPTIONS=(-q --config "$EXT_CURLRC" "${OPTIONS[@]}")
fi
# No verbose unless debug level; don't show progress meter for report level too
if [ "${FUNCNAME[1]}" = 'curl_with_log' ]; then
test $VERBOSE -eq 0 && OPTIONS[${#OPTIONS[@]}]='--silent'
else
test $VERBOSE -ne 3 && OPTIONS[${#OPTIONS[@]}]='--silent'
fi
if test -n "$INTERFACE"; then
OPTIONS+=(--interface $INTERFACE)
fi
if test -n "$MAX_LIMIT_RATE"; then
OPTIONS+=(--limit-rate $MAX_LIMIT_RATE)
fi
if test -n "$MIN_LIMIT_RATE"; then
OPTIONS+=(--speed-time 30 --speed-limit $MIN_LIMIT_RATE)
fi
if test $VERBOSE -lt 4; then
command "$PLOWCORE_CURL" "${OPTIONS[@]}" "${CURL_ARGS[@]}" || DRETVAL=$?
else
local FILESIZE TEMPCURL=$(create_tempfile)
log_report "${OPTIONS[@]}" "${CURL_ARGS[@]}"
command "$PLOWCORE_CURL" "${OPTIONS[@]}" "${CURL_ARGS[@]}" --show-error --silent >"$TEMPCURL" 2>&1 || DRETVAL=$?
FILESIZE=$(get_filesize "$TEMPCURL")
log_report "Received $FILESIZE bytes. DRETVAL=$DRETVAL"
log_report '=== CURL BEGIN ==='
logcat_report "$TEMPCURL"
log_report '=== CURL END ==='
cat "$TEMPCURL"
rm -f "$TEMPCURL"
fi
if [ "$DRETVAL" != 0 ]; then
local INDEX FILE
if INDEX=$(index_in_array CURL_ARGS[@] '-D' '--dump-header'); then
FILE=${OPTIONS[$INDEX]}
if [ -f "$FILE" ]; then
log_debug "deleting temporary HTTP header file: $FILE"
rm -f "$FILE"
fi
fi
if INDEX=$(index_in_array CURL_ARGS[@] '-o' '--output'); then
FILE=${OPTIONS[$INDEX]}
# Test to reject "-o /dev/null" and final plowdown call
if [ -f "$FILE" ] && ! find_in_array OPTIONS[@] '--globoff'; then
log_debug "deleting temporary output file: $FILE"
rm -f "$FILE"
fi
fi
case $DRETVAL in
# Failed to initialize.
2|27)
log_error "$FUNCNAME: out of memory?"
return $ERR_SYSTEM
;;
# Couldn't resolve host. The given remote host was not resolved.
6)
log_notice "$FUNCNAME: couldn't resolve host"
return $ERR_NETWORK
;;
# Failed to connect to host.
7)
log_notice "$FUNCNAME: couldn't connect to host"
return $ERR_NETWORK
;;
# Partial file
18)
return $ERR_LINK_TEMP_UNAVAILABLE
;;
# HTTP retrieve error / Operation timeout
22|28)
log_error "$FUNCNAME: HTTP retrieve error"
return $ERR_NETWORK
;;
# Write error
23)
log_error "$FUNCNAME: write failed, disk full?"
return $ERR_SYSTEM
;;
# Too many redirects
47)
if ! find_in_array CURL_ARGS[@] '--max-redirs'; then
log_error "$FUNCNAME: too many redirects"
return $ERR_FATAL
fi
;;
# Invalid LDAP URL. See command_not_found_handle()
62)
return $ERR_SYSTEM
;;
*)
log_error "$FUNCNAME: failed with exit code $DRETVAL"
return $ERR_NETWORK
;;
esac
fi
return 0
}
# Force debug verbose level (unless -v0/-q specified)
curl_with_log() {
curl "$@"
}
# Substring replacement (replace all matches)
#
# stdin: input string
# $1: substring to find (this is not a regexp)
# $2: replacement string (this is not a regexp)
replace_all() {
# Using $(< /dev/stdin) gives same results
local S=$(cat)
# We must escape '\' character
local FROM=${1//\\/\\\\}
echo "${S//$FROM/$2}"
}
# Substring replacement (replace first match)
#
# stdin: input string
# $1: substring to find (this is not a regexp)
# $2: replacement string (this is not a regexp)
replace() {
local S=$(cat)
local FROM=${1//\\/\\\\}
echo "${S/$FROM/$2}"
}
# Return uppercase string
# $*: input string(s)
uppercase() {
echo "${*^^}"
}
# Return lowercase string
# $*: input string(s)
lowercase() {
echo "${*,,}"
}
# Grep first line of a text
# $1: (optional) How many head lines to take (default is 1)
# stdin: input string (multiline)
first_line() {
local -r N=${1:-1}
if (( N < 1 )); then
log_error "$FUNCNAME: negative index not expected"
return $ERR_FATAL
fi
# Equivalent to `sed -ne 1p` or `sed -e q` or `sed -e 1q` (N=1 here)
head -n$((N))
}
# Grep last line of a text
# $1: (optional) How many tail lines to take (default is 1)
# stdin: input string (multiline)
last_line() {
local -r N=${1:-1}
if (( N < 1 )); then
log_error "$FUNCNAME: negative index not expected"
return $ERR_FATAL
fi
# Equivalent to `sed -ne '$p'` or `sed -e '$!d'` (N=1 here)
tail -n$((N))
}
# Grep nth line of a text
# stdin: input string (multiline)
# $1: line number (start at index 1)
nth_line() {
local -r N=${1:-1}
if (( N < 1 )); then
log_error "$FUNCNAME: negative index not expected"
return $ERR_FATAL
fi
# Equivalent to `sed -e "${1}q;d"` or `sed -e "${1}!d"`
sed -ne "$((N))p"
}
# Delete fist line(s) of a buffer
# $1: (optional) How many head lines to delete (default is 1)
# stdin: input string (multiline)
delete_first_line() {
local -r N=${1:-1}
if (( N < 1 )); then
log_error "$FUNCNAME: negative index not expected"
return $ERR_FATAL
fi
# Equivalent to `tail -n +2` (if $1=1)
sed -ne "$((N+1)),\$p"
}
# Delete last line(s) of a text
# $1: (optional) How many tail lines to delete (default is 1)
# stdin: input string (multiline)
delete_last_line() {
local -r N=${1:-1}
if (( N < 1 )); then
log_error "$FUNCNAME: negative index not expected"
return $ERR_FATAL
fi
# Equivalent to `head -n -1` (if $1=1)
sed -ne :a -e "1,$N!{P;N;D;};N;ba"
}
# Delete lines up to regexp (included).
# In a nutshell: eat lines until regexp is met, if nothing match,
# delete lines until the end of input data (not really useful).
# In vim, it would look like this: ":.,/regex/d" or ":.+5,/regex/+1d" ($3=5,$2=1).
#
# Examples:
# $ echo -e "aa\nbb\ncc\ndd\ee" >/tmp/f
# $ delete_filter_line 'cc' </tmp/f # Returns: dd\nee
# $ delete_filter_line 'cc' -1 </tmp/f # Returns: cc\ndd\nee
# $ delete_filter_line 'cc' -2 </tmp/f # Returns: bb\ncc\ndd\nee
# $ delete_filter_line 'cc' 1 </tmp/f # Returns: ee
#
# $1: stop condition regex
# This is non greedy, first occurrence is taken
# $2: (optional): offset, how many line to skip (default is 0) after matching regexp.
# Example ($2=-1): delete lines from line $3 to regexp (excluded)
# $3: (optional): start line number (start at index 1, default is 1)
# stdin: text data (multiline)
# stdout: result
delete_filter_line() {
local -i FUZZ=${2:-0}
local -i N=${3:-1}
local -r D=$'\001' # Change sed separator to allow '/' characters in regexp
local STR FILTER
if [[ ! $1 ]]; then
log_error "$FUNCNAME: invalid regexp, must not be empty"
return $ERR_FATAL
elif [ $N -le 0 ]; then
log_error "$FUNCNAME: wrong argument, start line must be strictly positive ($N given)"
return $ERR_FATAL
elif [ $N -gt 1 -a $FUZZ -lt -1 ]; then
log_debug "$FUNCNAME: before context ($FUZZ) could duplicates lines (already printed before line $N), continue anyway"
fi
# Notes:
# - We need to be careful when regex matches the first line ($3)
# - This head lines skip ($3) makes things really more complicated
(( --N ))
FILTER="\\${D}$1${D}" # /$1/
if [ $FUZZ -eq 0 ]; then
if (( $N > 0 )); then
# Line $N must be displayed
STR=$(sed -e "${N}p" -e "$N,${FILTER}d")
else
# 0,/regexp/ is valid, match can occur on first line
STR=$(sed -e "$N,${FILTER}d")
fi
elif [ $FUZZ -eq 1 ]; then
if (( $N > 0 )); then
STR=$(sed -e "${N}p" -e "$N,${FILTER}{${FILTER}N;d}")
else
STR=$(sed -e "$N,${FILTER}{${FILTER}N;d}")
fi
elif [ $FUZZ -eq -1 ]; then
if (( $N > 0 )); then
# If regexp matches at line $N do not print it twice
STR=$(sed -e "${N}p" -e "$N,${FILTER}{${N}d;${FILTER}p;d}")
else
STR=$(sed -e "$N,${FILTER}{${FILTER}p;d}")
fi
else
local -i FUZZ_ABS=$(( FUZZ < 0 ? -FUZZ : FUZZ ))
[ $FUZZ_ABS -gt 10 ] &&
log_notice "$FUNCNAME: are you sure you want to skip $((N+1)) lines?"
if [ $FUZZ -gt 0 ]; then
local SKIPS='N'
# printf '=%.0s' {1..n}
while (( --FUZZ_ABS )); do
SKIPS+=';N'
done
if (( $N > 0 )); then
STR=$(sed -e "${N}p" -e "$N,${FILTER}{${FILTER}{${SKIPS}};d}")
else
STR=$(sed -e "$N,${FILTER}{${FILTER}{${SKIPS}};d}")
fi
else
local LINES='.*'
while (( --FUZZ_ABS )); do
LINES+='\n.*'
done
if (( $N > 0 )); then
# Notes: could display duplicated lines when fuzz is below $N
# This is not a bug, just a side effect...
STR=$(sed -e "${N}p" -e "1h;1!H;x;s/^.*\\n\\($LINES\)$/\\1/;x" \
-e "$N,${FILTER}{${N}d;${FILTER}{g;p};d}")
else
STR=$(sed -e "1h;1!H;x;s/^.*\\n\\($LINES\)$/\\1/;x" \
-e "$N,${FILTER}{${FILTER}{g;p};d}")
fi
fi
fi
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"$N,/$1/d\" (skip $FUZZ)"
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Check if a string ($2) matches a regexp ($1)
# This is case sensitive.
#
# $?: 0 for success
match() {
if [ -z "$2" ]; then
log_debug "$FUNCNAME: input buffer is empty"
return $ERR_FATAL
else
grep -q -- "$1" <<< "$2"
fi
}
# Check if a string ($2) matches a regexp ($1)
# This is not case sensitive.
#
# $?: 0 for success
matchi() {
if [ -z "$2" ]; then
log_debug "$FUNCNAME: input buffer is empty"
return $ERR_FATAL
else
grep -iq -- "$1" <<< "$2"
fi
}
# Check if URL is suitable for remote upload
#
# $1: string (URL or anything)
# $2..$n: additional URI scheme names to match. For example: "ftp"
# $?: 0 for success
match_remote_url() {
local -r URL=$1
local RET=$ERR_FATAL
[[ $URL =~ ^[[:space:]]*[Hh][Tt][Tt][Pp][Ss]?:// ]] && return 0
shopt -s nocasematch
while [[ $# -gt 1 ]]; do
shift
if [[ $1 =~ ^[[:alpha:]][-.+[:alnum:]]*$ ]]; then
if [[ $URL =~ ^[[:space:]]*$1:// ]]; then
RET=0
break
fi
else
log_error "$FUNCNAME: invalid scheme \`$1'"
break
fi
done
shopt -u nocasematch
return $RET
}
# Get lines that match filter+parse regular expressions and extract string from it.
#
# $1: regexp to filter (take lines matching $1 pattern; "." or "" disable filtering).
# $2: regexp to parse (must contain parentheses to capture text). Example: "url:'\(http.*\)'"
# $3: (optional) how many lines to skip (default is 0: filter and match regexp on same line).
# Note: $3 may only be used if line filtering is active ($1 != ".")
# Example ($3=1): get lines matching filter regexp, then apply parse regexp on the line after.
# Example ($3=-1): get lines matching filter regexp, then apply parse regexp on the line before.
# stdin: text data
# stdout: result
parse_all() {
local PARSE=$2
local -i N=${3:-0}
local -r D=$'\001' # Change sed separator to allow '/' characters in regexp
local STR FILTER
if [ -n "$1" -a "$1" != '.' ]; then
FILTER="\\${D}$1${D}" # /$1/
elif [ $N -ne 0 ]; then
log_error "$FUNCNAME: wrong argument, offset argument is $N and filter regexp is \"$1\""
return $ERR_FATAL
fi
[ '^' = "${PARSE:0:1}" ] || PARSE="^.*$PARSE"
[ '$' = "${PARSE:(-1):1}" ] || PARSE+='.*$'
PARSE="s${D}$PARSE${D}\1${D}p" # s/$PARSE/\1/p
if [ $N -eq 0 ]; then
# STR=$(sed -ne "/$1/ s/$2/\1/p")
STR=$(sed -ne "$FILTER $PARSE")
elif [ $N -eq 1 ]; then
# Note: Loop (with label) is required for consecutive matches
# STR=$(sed -ne ":a /$1/ {n;h; s/$2/\1/p; g;ba;}")
STR=$(sed -ne ":a $FILTER {n;h; $PARSE; g;ba;}")
elif [ $N -eq -1 ]; then
# STR=$(sed -ne "/$1/ {x; s/$2/\1/p; b;}" -e 'h')
STR=$(sed -ne "$FILTER {x; $PARSE; b;}" -e 'h')
else
local -r FIRST_LINE='^\([^\n]*\).*$'
local -r LAST_LINE='^.*\n\(.*\)$'
local N_ABS=$(( N < 0 ? -N : N ))
local I=$(( N_ABS - 2 )) # Note: N_ABS >= 2 due to "elif" above
local LINES='.*'
local INIT='N'
local FILTER_LINE PARSE_LINE
[ $N_ABS -gt 10 ] &&
log_notice "$FUNCNAME: are you sure you want to skip $N lines?"
while (( I-- )); do
INIT+=';N'
done
while (( N_ABS-- )); do
LINES+='\n.*'
done
if [ $N -gt 0 ]; then
FILTER_LINE=$FIRST_LINE
PARSE_LINE=$LAST_LINE
else
FILTER_LINE=$LAST_LINE
PARSE_LINE=$FIRST_LINE
fi
STR=$(sed -ne "1 {$INIT;h;n}" \
-e "H;g;s/^.*\\n\\($LINES\)$/\\1/;h" \
-e "s/$FILTER_LINE/\1/" \
-e "$FILTER {g;s/$PARSE_LINE/\1/;$PARSE }")
# Explanation: [1], [2] let hold space always contain the current line
# as well as the previous N lines
# [3] let pattern space contain only the line we test filter regex
# on (i.e. first buffered line on skip > 0, last line on skip < 0)
# [4] if filter regex matches, let pattern space contain the line to
# be parsed and apply parse command
fi
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ ${PARSE//$D//}\" (skip $N)"
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse_all, but hide possible error
parse_all_quiet() {
parse_all "$@" 2>/dev/null
return 0
}
# Like parse_all, but get only first match
parse() {
local PARSE=$2
local -i N=${3:-0}
local -r D=$'\001' # Change sed separator to allow '/' characters in regexp
local STR FILTER
if [ -n "$1" -a "$1" != '.' ]; then
FILTER="\\${D}$1${D}" # /$1/
elif [ $N -ne 0 ]; then
log_error "$FUNCNAME: wrong argument, offset argument is $N and filter regexp is \"$1\""
return $ERR_FATAL
fi
[ '^' = "${PARSE:0:1}" ] || PARSE="^.*$PARSE"
[ '$' = "${PARSE:(-1):1}" ] || PARSE+='.*$'
PARSE="s${D}$PARSE${D}\1${D}p" # s/$PARSE/\1/p
if [ $N -eq 0 ]; then
# Note: This requires GNU sed (which is assumed by Plowshare)
#STR=$(sed -ne "$FILTER {$PARSE;ta;b;:a;q;}")
STR=$(sed -ne "$FILTER {$PARSE;T;q;}")
elif [ $N -eq 1 ]; then
#STR=$(sed -ne ":a $FILTER {n;h;$PARSE;tb;ba;:b;q;}")
STR=$(sed -ne ":a $FILTER {n;$PARSE;Ta;q;}")
elif [ $N -eq -1 ]; then
#STR=$(sed -ne "$FILTER {g;$PARSE;ta;b;:a;q;}" -e 'h')
STR=$(sed -ne "$FILTER {g;$PARSE;T;q;}" -e 'h')
else
local -r FIRST_LINE='^\([^\n]*\).*$'
local -r LAST_LINE='^.*\n\(.*\)$'
local N_ABS=$(( N < 0 ? -N : N ))
local I=$(( N_ABS - 2 ))
local LINES='.*'
local INIT='N'
local FILTER_LINE PARSE_LINE
[ $N_ABS -gt 10 ] &&
log_notice "$FUNCNAME: are you sure you want to skip $N lines?"
while (( I-- )); do
INIT+=';N'
done
while (( N_ABS-- )); do
LINES+='\n.*'
done
if [ $N -gt 0 ]; then
FILTER_LINE=$FIRST_LINE
PARSE_LINE=$LAST_LINE
else
FILTER_LINE=$LAST_LINE
PARSE_LINE=$FIRST_LINE
fi
# Note: Need to "clean" conditionnal flag after s/$PARSE_LINE/\1/
STR=$(sed -ne "1 {$INIT;h;n}" \
-e "H;g;s/^.*\\n\\($LINES\)$/\\1/;h" \
-e "s/$FILTER_LINE/\1/" \
-e "$FILTER {g;s/$PARSE_LINE/\1/;ta;:a;$PARSE;T;q;}")
fi
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ ${PARSE//$D//}\" (skip $N)"
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse, but hide possible error
parse_quiet() {
parse "$@" 2>/dev/null
return 0
}
# Simple and limited JSON parsing
#
# Notes:
# - Single line parsing oriented (user should strip newlines first): no tree model
# - Array and Object types: basic poor support (depth 1 without complex types)
# - String type: support for escaped unicode characters (\uXXXX) with bash >= 4.2 (and proper locale)
# - No non standard C/C++ comments handling (like in JSONP)
# - If several entries exist on same line: last occurrence is taken, but:
# consider precedence (order of priority): number, boolean/empty, string.
# - If several entries exist on different lines: all are returned (it's a parse_all_json)
#
# $1: variable name (string)
# $2: (optional) preprocess option. Accepted values are:
# - "join": make a single line of input stream.
# - "split": split input buffer on comma character (,).
# stdin: JSON data
# stdout: result
parse_json() {
local -r NAME="\"$1\"[[:space:]]*:[[:space:]]*"
local STR PRE
# Note: Be nice with unicode chars and don't use $ (end-of-line).
# Because dot will not match everything.
local -r END='\([,}[:space:]].*\)\?'
if [ "$2" = 'join' ]; then
PRE="tr -d '\n\r'"
elif [ "$2" = 'split' ]; then
PRE=sed\ -e\ 's/,[[:space:]]*\(["{]\)/\n\1/g'
else
PRE='cat'
fi
# Note: "ta;:a" is a trick for cleaning conditionnal flag
STR=$($PRE | sed \
-ne "/$NAME\[/{s/^.*$NAME\(\[[^]]*\]\).*$/\1/;ta;:a;s/^\[.*\[//;t;p;q;}" \
-ne "/$NAME{/{s/^.*$NAME\({[^}]*}\).*$/\1/;ta;:a;s/^{.*{//;t;p;q;}" \
-ne "s/^.*$NAME\(-\?\(0\|[1-9][[:digit:]]*\)\(\.[[:digit:]]\+\)\?\([eE][-+]\?[[:digit:]]\+\)\?\)$END/\1/p" \
-ne "s/^.*$NAME\(true\|false\|null\)$END/\1/p" \
-ne "s/\\\\\"/\\\\q/g;s/^.*$NAME\"\([^\"]*\)\"$END/\1/p")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (json): \"$1\""
log_notice_stack
return $ERR_FATAL
fi
# Translate two-character sequence escape representations
STR=${STR//\\\//\/}
STR=${STR//\\\\/\\}
STR=${STR//\\q/\"}
STR=${STR//\\b/$'\b'}
STR=${STR//\\f/$'\f'}
STR=${STR//\\n/$'\n'}
STR=${STR//\\r/$'\r'}
STR=${STR//\\t/ }
echo -e "$STR"
}
# Like parse_json, but hide possible error
parse_json_quiet() {
parse_json "$@" 2>/dev/null
return 0
}
# Check if JSON variable is true
#
# $1: JSON variable name
# $2: JSON data
# $? is zero on success
match_json_true() {
grep -q "\"$1\"[[:space:]]*:[[:space:]]*true" <<< "$2"
}
# Grep "Xxx" HTTP header. Can be:
# - Location
# - Content-Location
# - Content-Type
# - Content-Length
#
# Notes:
# - This is using parse_all, so result can be multiline
# (rare usage is: curl -I -L ...).
# - Use [:cntrl:] instead of \r because Busybox sed <1.19
# does not support it.
#
# stdin: result of curl request (with -i/--include, -D/--dump-header
# or -I/--head flag)
# stdout: result
grep_http_header_location() {
parse_all '^[Ll]ocation:' 'n:[[:space:]]\+\(.*\)[[:cntrl:]]$'
}
grep_http_header_location_quiet() {
parse_all '^[Ll]ocation:' 'n:[[:space:]]\+\(.*\)[[:cntrl:]]$' 2>/dev/null
return 0
}
grep_http_header_content_location() {
parse_all '^[Cc]ontent-[Ll]ocation:' 'n:[[:space:]]\+\(.*\)[[:cntrl:]]$'
}
grep_http_header_content_type() {
parse_all '^[Cc]ontent-[Tt]ype:' 'e:[[:space:]]\+\(.*\)[[:cntrl:]]$'
}
grep_http_header_content_length() {
parse_all '^[Cc]ontent-[Ll]ength:' 'h:[[:space:]]\+\(.*\)[[:cntrl:]]$'
}
# Grep "Content-Disposition" HTTP header
# See RFC5987 and RFC2183.
#
# stdin: HTTP response headers (see below)
# stdout: attachment filename
grep_http_header_content_disposition() {
parse_all '^[Cc]ontent-[Dd]isposition:' "filename\*\?=[\"']\?\([^\"'[:cntrl:]]*\)"
}
# Extract a named form from a HTML content.
# Notes:
# - if several forms (with same name) are available: return all of them
# - start marker <form> and end marker </form> must be on separate lines
# - HTML comments are just ignored
#
# $1: (X)HTML data
# $2: (optional) "name" attribute of <form> marker.
# If not specified: take forms having any "name" attribute (empty or not)
# stdout: result
grep_form_by_name() {
local -r A=${2:-'.*'}
local STR=$(sed -ne \
"/<[Ff][Oo][Rr][Mm][[:space:]].*name[[:space:]]*=[[:space:]]*[\"']\?$A[\"']\?[[:space:]/>]/,/<\/[Ff][Oo][Rr][Mm]>/p" <<< "$1")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"name=$A\""
return $ERR_FATAL
fi
echo "$STR"
}
# Extract a id-specified form from a HTML content.
# Notes:
# - if several forms (with same name) are available: return all of them
# - start marker <form> and end marker </form> must be on separate lines
# - HTML comments are just ignored
#
# $1: (X)HTML data
# $2: (optional) "id" attribute of <form> marker.
# If not specified: take forms having any "id" attribute (empty or not)
# stdout: result
grep_form_by_id() {
local -r A=${2:-'.*'}
local STR=$(sed -ne \
"/<[Ff][Oo][Rr][Mm][[:space:]].*id[[:space:]]*=[[:space:]]*[\"']\?$A[\"']\?[[:space:]/>]/,/<\/[Ff][Oo][Rr][Mm]>/p" <<< "$1")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"id=$A\""
return $ERR_FATAL
fi
echo "$STR"
}
# Extract a specific FORM block from a HTML content.
# $1: (X)HTML data
# $2: (optional) Nth <form> block
grep_form_by_order() {
grep_block_by_order '[Ff][Oo][Rr][Mm]' "$@"
}
# Extract a specific SCRIPT block from a HTML content.
# $1: (X)HTML data
# $2: (optional) Nth <script> block
grep_script_by_order() {
grep_block_by_order '[Ss][Cc][Rr][Ii][Pp][Tt]' "$@"
}
# Split into several lines html markers.
# Insert a new line after ending marker.
#
# stdin: (X)HTML data
# stdout: result
break_html_lines() {
sed -e 's/<\/[^>]*>/&\n/g'
}
# Split into several lines html markers.
# Insert a new line after each (beginning or ending) marker.
#
# stdin: (X)HTML data
# stdout: result
break_html_lines_alt() {
sed -e 's/<[^>]*>/&\n/g'
}
# Delete html comments
# Credits: http://sed.sourceforge.net/grabbag/scripts/strip_html_comments.sed
# stdin: (X)HTML data
# stdout: result
strip_html_comments() {
sed -e '/<!--/!b' -e ':a' -e '/-->/!{N;ba;}' -e 's/<!--.*-->//'
}
# Parse single named HTML marker content
# <tag>..</tag>
# <tag attr="x">..</tag>
# Notes:
# - beginning and ending tag are on the same line
# - this is non greedy, first occurrence is taken
# - marker is case sensitive, it should not
# - "parse_xxx tag" is a shortcut for "parse_xxx tag tag"
#
# $1: (optional) regexp to filter (take lines matching $1 pattern)
# $2: tag name. Example: "span"
# stdin: (X)HTML data
# stdout: result
parse_all_tag() {
local -r T=${2:-"$1"}
local -r D=$'\001'
local STR=$(sed -ne \
"\\${D}$1${D}"'!b; s/<\/'"$T>.*//; s/^.*<$T\(>\|[[:space:]][^>]*>\)//p")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ <$T>\""
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse_all_tag, but hide possible error
parse_all_tag_quiet() {
parse_all_tag "$@" 2>/dev/null
return 0
}
# Parse single named HTML marker content (first match only)
# See parse_all_tag() for documentation.
parse_tag() {
local -r T=${2:-"$1"}
local -r D=$'\001'
local STR=$(sed -ne \
"\\${D}$1${D}"'!b; s/<\/'"$T>.*//; s/^.*<$T\(>\|[[:space:]][^>]*>\)//;ta;b;:a;p;q;")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ <$T>\""
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse_tag, but hide possible error
parse_tag_quiet() {
parse_tag "$@" 2>/dev/null
return 0
}
# Parse HTML attribute content
# http://www.w3.org/TR/html-markup/syntax.html#syntax-attributes
# Notes:
# - empty attribute syntax is not supported (ex: <input disabled>)
# - this is greedy, last occurrence is taken
# - attribute is case sensitive, it should not
# - "parse_xxx attr" is a shortcut for "parse_xxx attr attr"
#
# $1: (optional) regexp to filter (take lines matching $1 pattern)
# $2: attribute name. Examples: "href" or "b\|i\|u"
# stdin: (X)HTML data
# stdout: result
parse_all_attr() {
local -r A=${2:-"$1"}
local -r D=$'\001'
local STR=$(sed \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*\"\([^\">]*\).*${D}\2${D}p" \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*'\([^'>]*\).*${D}\2${D}p" \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*\([^[:space:]\"\`'<=>]\+\).*${D}\2${D}p")
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ $A=\""
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse_all_attr, but hide possible error
parse_all_attr_quiet() {
parse_all_attr "$@" 2>/dev/null
return 0
}
# Parse HTML attribute content (first match only)
# See parse_all_attr() for documentation.
parse_attr() {
local -r A=${2:-"$1"}
local -r D=$'\001'
local STR=$(sed \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*\"\([^\">]*\).*${D}\2${D}p;ta" \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*'\([^'>]*\).*${D}\2${D}p;ta" \
-ne "\\${D}$1${D}s${D}.*[[:space:]]\($A\)[[:space:]]*=[[:space:]]*\([^[:space:]\"\`'<=>]\+\).*${D}\2${D}p;ta" \
-ne 'b;:a;q;')
if [ -z "$STR" ]; then
log_error "$FUNCNAME failed (sed): \"/$1/ $A=\""
log_notice_stack
return $ERR_FATAL
fi
echo "$STR"
}
# Like parse_attr, but hide possible error
parse_attr_quiet() {
parse_attr "$@" 2>/dev/null
return 0
}
# Retrieve "action" attribute (URL) from a <form> marker
#
# stdin: (X)HTML data (ideally, call grep_form_by_xxx before)
# stdout: result
parse_form_action() {
parse_attr '<[Ff][Oo][Rr][Mm]' 'action'
}
# Retrieve "value" attribute from an <input> marker with "name" attribute
# Note: "value" attribute must be on same line as "name" attribute.
#
# $1: name attribute of <input> marker
# stdin: (X)HTML data
# stdout: result (can be null string if <input> has no value attribute)
parse_form_input_by_name() {
parse_attr "<[Ii][Nn][Pp][Uu][Tt][^>]*name[[:space:]]*=[[:space:]]*[\"']\?$1[\"']\?[[:space:]/>]" 'value'
}
# Like parse_form_input_by_name, but hide possible error
parse_form_input_by_name_quiet() {
parse_form_input_by_name "$@" 2>/dev/null
return 0
}
# Retrieve "value" attribute from an <input> marker with "type" attribute
# Note: "value" attribute must be on same line as "type" attribute.
#
# $1: type attribute of <input> marker (for example: "submit")
# stdin: (X)HTML data
# stdout: result (can be null string if <input> has no value attribute)
parse_form_input_by_type() {
parse_attr "<[Ii][Nn][Pp][Uu][Tt][^>]*type[[:space:]]*=[[:space:]]*[\"']\?$1[\"']\?[[:space:]/>]" 'value'
}
# Like parse_form_input_by_type, but hide possible error
parse_form_input_by_type_quiet() {
parse_form_input_by_type "$@" 2>/dev/null
return 0
}
# Retrieve "value" attribute from an <input> marker with "id" attribute
# Note: "value" attribute must be on same line as "id" attribute.
#
# $1: id attribute of <input> marker
# stdin: (X)HTML data
# stdout: result (can be null string if <input> has no value attribute)
parse_form_input_by_id() {
parse_attr "<[Ii][Nn][Pp][Uu][Tt][^>]*id[[:space:]]*=[[:space:]]*[\"']\?$1[\"']\?[[:space:]/>]" 'value'
}
# Like parse_form_input_by_id, but hide possible error
parse_form_input_by_id_quiet() {
parse_form_input_by_id "$@" 2>/dev/null
return 0
}
# Get specific entry (value) from cookie
#
# $1: entry name (example: "lang")
# stdin: cookie data (netscape/mozilla cookie file format)
# stdout: result (can be null string no suck entry exists)
parse_cookie() {
parse_all "\t$1\t[^\t]*\$" "\t$1\t\(.*\)"
}
parse_cookie_quiet() {
parse_all "\t$1\t[^\t]*\$" "\t$1\t\(.*\)" 2>/dev/null
return 0
}
# Return base of URL
# Examples:
# - http://www.host.com => http://www.host.com
# - http://www.host.com/a/b/c/d => http://www.host.com
# - http://www.host.com?sid=123 => http://www.host.com
# - ftp://hostme.net:1234/incoming/testfile.txt => ftp://hostme.net:1234
#
# $1: string (URL)
# stdout: URL composed of scheme + authority. No ending slash character.
basename_url() {
if [[ $1 =~ ([Hh][Tt][Tt][Pp][Ss]?|[Ff][Tt][Pp][Ss]?|[Ff][Ii][Ll][Ee])://[^/?#]* ]]; then
echo "${BASH_REMATCH[0]}"
else
echo "$1"
fi
}
# Return basename of file path
# Example: /usr/bin/foo.bar => foo.bar
#
# $1: filename
basename_file() {
basename -- "$1" || return $ERR_SYSTEM
}
# HTML entities will be translated
#
# stdin: data
# stdout: data (converted)
html_to_utf8() {
if check_exec 'recode'; then
log_report "$FUNCNAME: use recode"
recode html..utf8
elif check_exec 'perl'; then
log_report "$FUNCNAME: use perl"
perl -n -mHTML::Entities \
-e 'BEGIN { eval { binmode(STDOUT,q[:utf8]); }; } \
print HTML::Entities::decode_entities($_);' 2>/dev/null || { \
log_debug "$FUNCNAME failed (perl): HTML::Entities missing ?";
cat;
}
else
log_notice 'recode binary not found, pass-through'
cat
fi
}
# Encode a text to include into an url.
# - Reserved Characters (18): !*'();:@&=+$,/?#[]
# - Check for percent (%) and space character
#
# - Unreserved Characters: ALPHA / DIGIT / "-" / "." / "_" / "~"
# - Unsafe characters (RFC2396) should not be percent-encoded anymore: <>{}|\^`
#
# stdin: data (example: relative URL)
# stdout: data (should be compliant with RFC3986)
uri_encode_strict() {
sed -e '
s/\%/%25/g
s/ /%20/g
s/!/%21/g
s/#/%23/g
s/\$/%24/g
s/&/%26/g
s/'\''/%27/g
s/(/%28/g
s/)/%29/g
s/*/%2A/g
s/+/%2B/g
s/,/%2C/g
s|/|%2F|g
s/:/%3A/g
s/;/%3B/g
s/=/%3D/g
s/?/%3F/g
s/@/%40/g
s/\[/%5B/g
s/\]/%5D/g' #'# Help vim syntax highlighting
}
# Encode a complete url.
# - check for space character and squares brackets
# - do not check for "reserved characters" (use "uri_encode_strict" for that)
#
# Bad encoded URL request can lead to HTTP error 400.
# curl doesn't do any checks, whereas wget convert provided url.
#
# stdin: data (example: absolute URL)
# stdout: data (nearly compliant with RFC3986)
uri_encode() {
sed -e '
s/ /%20/g
s/\[/%5B/g
s/\]/%5D/g'
}
# Decode a complete url.
# - Reserved characters (9): ():&=+,/[]
# - Check for space character
#
# stdin: data (example: absolute URL)
# stdout: data (nearly compliant with RFC3986)
uri_decode() {
sed -e '
s/%20/ /g
s/%21/!/g
s/%26/\&/g
s/%28/(/g
s/%29/)/g
s/%2B/+/g
s/%2C/,/g
s|%2F|/|g
s/%3A/:/g
s/%3D/=/g
s/%40/@/g
s/%5B/\[/g
s/%5D/\]/g'
}
# Retrieves size of file
#
# $1: filename
# stdout: file length (in bytes)
get_filesize() {
local FILE_SIZE=$(ls -l "$1" 2>/dev/null | sed -e 's/[[:space:]]\+/ /g' | cut -d' ' -f5)
if [ -z "$FILE_SIZE" ]; then
log_error "$FUNCNAME: error accessing \`$1'"
return $ERR_SYSTEM
fi
echo "$FILE_SIZE"
}
# Create a tempfile and return path
# Note for later: use mktemp (GNU coreutils)
#
# $1: (optional) filename suffix
create_tempfile() {
local -r SUFFIX=$1
local FILE="$TMPDIR/$(basename_file "$0").$$.$RANDOM$SUFFIX"
if touch "$FILE" && chmod 600 "$FILE"; then
echo "$FILE"
return 0
fi
return $ERR_SYSTEM
}
# User password entry
#
# stdout: entered password (can be null string)
# $? is non zero if no password
prompt_for_password() {
local PASSWORD
log_notice 'No password specified, enter it now (1 hour timeout)'
# Unset IFS to consider trailing and leading spaces
IFS= read -s -r -t 3600 -p 'Enter password: ' PASSWORD
# Add missing trailing newline (see read -p)
stderr
test -z "$PASSWORD" && return $ERR_LINK_PASSWORD_REQUIRED
echo "$PASSWORD"
}
# Login and return cookie.
# A non empty cookie file does not means that login is successful.
#
# $1: String 'username:password' (password can contain semicolons)
# $2: Cookie filename (see create_tempfile() modules)
# $3: Postdata string (ex: 'user=$USER&password=$PASSWORD')
# $4: URL to post
# $5, $6, ...: Additional curl arguments (optional)
# stdout: html result (can be null string)
# $? is zero on success
post_login() {
local -r AUTH=$1
local -r COOKIE=$2
local -r POSTDATA=$3
local -r LOGIN_URL=$4
shift 4
local -a CURL_ARGS=("$@")
local USER PASSWORD DATA RESULT
if [ -z "$AUTH" ]; then
log_error "$FUNCNAME: authentication string is empty"
return $ERR_LOGIN_FAILED
fi
if [ -z "$COOKIE" ]; then
log_error "$FUNCNAME: cookie file expected"
return $ERR_LOGIN_FAILED
fi
# Seem faster than
# IFS=':' read -r USER PASSWORD <<< "$AUTH"
USER=$(echo "${AUTH%%:*}" | uri_encode_strict)
PASSWORD=$(echo "${AUTH#*:}" | uri_encode_strict)
if [ -z "$PASSWORD" -o "$AUTH" = "${AUTH#*:}" ]; then
PASSWORD=$(prompt_for_password) || true
fi
log_notice "Starting login process: $USER/${PASSWORD//?/*}"
DATA=$(eval echo "${POSTDATA//&/\\&}")
RESULT=$(curl --cookie-jar "$COOKIE" --data "$DATA" "${CURL_ARGS[@]}" \
"$LOGIN_URL") || return
# $RESULT can be empty, this is not necessarily an error
if [ ! -s "$COOKIE" ]; then
log_debug "$FUNCNAME: no entry was set (empty cookie file)"
return $ERR_LOGIN_FAILED
fi
log_report '=== COOKIE BEGIN ==='
logcat_report "$COOKIE"
log_report '=== COOKIE END ==='
if ! find_in_array CURL_ARGS[@] '-o' '--output'; then
echo "$RESULT"
fi
}
# Detect if a JavaScript interpreter is installed
# $? is zero on success
detect_javascript() {
if ! type -P "$PLOWCORE_JS" >/dev/null 2>&1; then
log_notice 'Javascript interpreter not found. Please install one!'
return $ERR_SYSTEM
fi
}
# Execute javascript code
#
# stdin: js script
# stdout: script result
javascript() {
local TEMPSCRIPT
detect_javascript || return
TEMPSCRIPT=$(create_tempfile '.js') || return
cat > "$TEMPSCRIPT"
log_report "interpreter: $(type -P js)"
log_report '=== JAVASCRIPT BEGIN ==='
logcat_report "$TEMPSCRIPT"
log_report '=== JAVASCRIPT END ==='
command "$PLOWCORE_JS" "$TEMPSCRIPT"
rm -f "$TEMPSCRIPT"
return 0
}
# Wait some time
# Related to -t/--timeout command line option
#
# $1: Sleep duration
# $2: Unit (seconds | minutes)
wait() {
local -r VALUE=$1
local -r UNIT=$2
local UNIT_STR TOTAL_SECS
if test "$VALUE" = '0'; then
log_debug 'wait called with null duration'
return 0
fi
if [ "$UNIT" = 'minutes' ]; then
UNIT_STR=minutes
TOTAL_SECS=$((VALUE * 60))
else
UNIT_STR=seconds
TOTAL_SECS=$((VALUE))
fi
timeout_update $TOTAL_SECS || return
local REMAINING=$TOTAL_SECS
local MSG="Waiting $VALUE $UNIT_STR..."
local CLEAR=" \b\b\b\b\b"
if test -t 2; then
local START_DATE=$(date +%s)
CONT_SIGNAL=
while [ "$REMAINING" -gt 0 ]; do
log_notice_norc "\r$MSG $(splitseconds $REMAINING) left$CLEAR"
sleep 1
if [[ $CONT_SIGNAL ]]; then
local -i TMP_SECS
(( TMP_SECS = TOTAL_SECS - (CONT_SIGNAL - START_DATE) ))
(( TMP_SECS >= 0 )) || TMP_SECS=0
CONT_SIGNAL=
log_debug "SIGCONT detected, adjust wait time ($REMAINING => $TMP_SECS)"
REMAINING=$TMP_SECS
else
(( --REMAINING ))
fi
done
log_notice_norc "\r$MSG done$CLEAR\n"
else
log_notice "$MSG"
sleep $TOTAL_SECS
fi
}
# $1: Image filename (local with full path or remote). No specific image format expected.
# $2: captcha type or hint. For example: digit, letter, alnum
# $3: (optional) minimal captcha length
# $4: (optional) maximal captcha length (unused)
# stdout: On 2 lines: <word> \n <transaction_id>
# nothing is echoed in case of error
#
# Important note: input image ($1) is deleted in case of error
captcha_process() {
local -r CAPTCHA_TYPE=$2
local METHOD_SOLVE METHOD_VIEW FILENAME RESPONSE WORD I FBDEV
local TID=0
if [ -f "$1" ]; then
FILENAME=$1
elif match_remote_url "$1"; then
FILENAME=$(create_tempfile '.captcha') || return
curl -o "$FILENAME" "$1" || return
else
log_error "$FUNCNAME: image file not found"
return $ERR_FATAL
fi
if [ ! -s "$FILENAME" ]; then
log_error "$FUNCNAME: empty image file"
return $ERR_FATAL
fi
# plowdown/plowup/plowdel --captchaprogram
if [ -n "$CAPTCHA_PROGRAM" ]; then
local RET=0
WORD=$(exec "$CAPTCHA_PROGRAM" "$MODULE" "$FILENAME" "${CAPTCHA_TYPE}${3:+-$3}") || RET=$?
if [ $RET -eq 0 ]; then
echo "$WORD"
echo $TID
return 0
elif [ $RET -ne $ERR_NOMODULE ]; then
log_error "captchaprogram exit with status $RET"
return $RET
fi
fi
# plowdown/plowup/plowdel --captchamethod
if [ -n "$CAPTCHA_METHOD" ]; then
captcha_method_translate "$CAPTCHA_METHOD" METHOD_SOLVE METHOD_VIEW
# Auto-guess mode (solve)
else
METHOD_SOLVE='online,prompt'
fi
if [[ $METHOD_SOLVE = *online* ]]; then
if service_antigate_ready "$CAPTCHA_ANTIGATE"; then
METHOD_SOLVE=antigate
: ${METHOD_VIEW:=log}
elif service_9kweu_ready "$CAPTCHA_9KWEU"; then
METHOD_SOLVE=9kweu
: ${METHOD_VIEW:=log}
elif service_captchabrotherhood_ready "${CAPTCHA_BHOOD%%:*}" "${CAPTCHA_BHOOD#*:}"; then
METHOD_SOLVE=captchabrotherhood
: ${METHOD_VIEW:=log}
elif service_captchacoin_ready "$CAPTCHA_COIN"; then
METHOD_SOLVE=captchacoin
: ${METHOD_VIEW:=log}
elif service_captchadeathby_ready "${CAPTCHA_DEATHBY%%:*}" "${CAPTCHA_DEATHBY#*:}"; then
METHOD_SOLVE=deathbycaptcha
: ${METHOD_VIEW:=log}
elif [ -n "$CAPTCHA_METHOD" ]; then
log_error 'No online recognition service found! Captcha solving request will fail.'
METHOD_SOLVE=none
fi
fi
# Fallback (solve)
if [ "$METHOD_SOLVE" != "${METHOD_SOLVE//,}" ]; then
METHOD_SOLVE=${METHOD_SOLVE##*,}
log_debug "no captcha solving method found: fallback to \`$METHOD_SOLVE'."
fi
# Auto-guess mode (view)
# Is current terminal a pseudo tty?
if [[ "$(tty)" = /dev/tty* ]]; then
: ${METHOD_VIEW:=view-x,view-fb,view-aa,log}
else
: ${METHOD_VIEW:=view-x,view-aa,log}
fi
# 1) Probe for X11/Xorg viewers
if [[ $METHOD_VIEW = *view-x* ]]; then
if test -z "$DISPLAY"; then
log_notice 'DISPLAY variable not exported! Skip X11 viewers probing.'
elif check_exec 'display'; then
METHOD_VIEW=X-display
elif check_exec 'feh'; then
METHOD_VIEW=X-feh
elif check_exec 'sxiv'; then
METHOD_VIEW=X-sxiv
elif check_exec 'qiv'; then
METHOD_VIEW=X-qiv
else
log_notice 'No X11 image viewer found to display captcha image'
fi
fi
# 2) Probe for framebuffer viewers
if [[ $METHOD_VIEW = *view-fb* ]]; then
if test -n "$FRAMEBUFFER"; then
log_notice 'FRAMEBUFFER variable is not empty, use it.'
FBDEV=$FRAMEBUFFER
else
FBDEV=/dev/fb0
fi
if ! test -c "$FBDEV"; then
log_notice "$FBDEV not found! Skip FB viewers probing."
elif check_exec 'fbi'; then
METHOD_VIEW=fb-fbi
elif check_exec 'fim'; then
METHOD_VIEW=fb-fim
else
log_notice 'No FB image viewer found to display captcha image'
fi
fi
# 3) Probe for ascii viewers
# Try to maximize the image size on terminal
local MAX_OUTPUT_WIDTH MAX_OUTPUT_HEIGHT
if [[ $METHOD_VIEW = *view-aa* ]]; then
# libcaca
if check_exec img2txt; then
METHOD_VIEW=img2txt
# terminal image view (perl script using Image::Magick)
elif check_exec tiv; then
METHOD_VIEW=tiv
# libaa
elif check_exec aview && check_exec convert; then
METHOD_VIEW=aview
else
log_notice 'No ascii viewer found to display captcha image'
fi
if [[ $METHOD_VIEW != *view-aa* ]]; then
if check_exec tput; then
local TYPE
if [ -z "$TERM" -o "$TERM" = 'dumb' ]; then
log_notice 'Invalid $TERM value. Terminal type forced to vt100.'
TYPE='-Tvt100'
fi
MAX_OUTPUT_WIDTH=$(tput $TYPE cols)
MAX_OUTPUT_HEIGHT=$(tput $TYPE lines)
else
# Try environment variables
MAX_OUTPUT_WIDTH=${COLUMNS:-150}
MAX_OUTPUT_HEIGHT=${LINES:-57}
fi
if check_exec identify; then
local -i W H
read -r W H < <(identify -quiet -format '%w %h' "$FILENAME")
[ "$W" -lt "$MAX_OUTPUT_WIDTH" ] && MAX_OUTPUT_WIDTH=$W
[ "$H" -lt "$MAX_OUTPUT_HEIGHT" ] && MAX_OUTPUT_HEIGHT=$H
fi
fi
fi
# Fallback (view)
if [ "$METHOD_VIEW" != "${METHOD_VIEW//,}" ]; then
METHOD_VIEW=${METHOD_VIEW##*,}
log_debug "no captcha viewing method found: fallback to \`$METHOD_VIEW'."
fi
local IMG_HASH PRG_PID IMG_PNM
# How to display image
case $METHOD_VIEW in
log)
log_notice "Local image: $FILENAME"
;;
aview)
local -r FF=$'\f'
# aview can only display files in PNM file format
IMG_PNM=$(create_tempfile '.pnm') || return
convert "$FILENAME" -negate -depth 8 pnm:$IMG_PNM && \
aview -width $MAX_OUTPUT_WIDTH -height $MAX_OUTPUT_HEIGHT \
-kbddriver stdin -driver stdout "$IMG_PNM" 2>/dev/null <<<'q' | \
sed -e "1d;/$FF/,/$FF/d;/^[[:space:]]*$/d" 1>&2
rm -f "$IMG_PNM"
;;
tiv)
tiv -a -w $MAX_OUTPUT_WIDTH -h $MAX_OUTPUT_HEIGHT "$FILENAME" 1>&2
;;
img2txt)
img2txt -W $MAX_OUTPUT_WIDTH -H $MAX_OUTPUT_HEIGHT "$FILENAME" 1>&2
;;
X-display)
display "$FILENAME" &
PRG_PID=$!
;;
X-feh)
feh "$FILENAME" &
PRG_PID=$!
;;
X-qiv)
qiv "$FILENAME" &
PRG_PID=$!
;;
X-sxiv)
# open a 640x480 window (rescale to fit)
sxiv -q -s f "$FILENAME" &
[ $? -eq 0 ] && PRG_PID=$!
;;
fb-fbi)
log_debug "fbi -autozoom -noverbose -d $FBDEV $FILENAME"
fbi -autozoom -noverbose -d "$FBDEV" "$FILENAME" &>/dev/null
PRG_PID=""
;;
fb-fim)
log_debug "fim --quiet --autozoom -d $FBDEV $FILENAME"
fim --quiet --autozoom -d "$FBDEV" "$FILENAME" 2>/dev/null
PRG_PID=""
;;
imgur)
IMG_HASH=$(image_upload_imgur "$FILENAME") || true
;;
*)
log_error "unknown view method: $METHOD_VIEW"
rm -f "$FILENAME"
return $ERR_FATAL
;;
esac
# How to solve captcha
case $METHOD_SOLVE in
none)
rm -f "$FILENAME"
return $ERR_CAPTCHA
;;
9kweu)
log_notice 'Using 9kw.eu captcha recognition system'
# Note for later: extra params can be supplied:
# min_len & max_len & phrase & numeric & prio & captchaperhour & confirm
RESPONSE=$(curl -F 'method=post' \
-F 'action=usercaptchaupload' \
-F "apikey=$CAPTCHA_9KWEU" \
-F 'source=plowshare' \
-F 'maxtimeout=235' \
-F "file-upload-01=@$FILENAME;filename=file.jpg" \
'http://www.9kw.eu/index.cgi') || return
if [ -z "$RESPONSE" ]; then
log_error '9kw.eu empty answer'
rm -f "$FILENAME"
return $ERR_NETWORK
# Error range: 0001..0028. German language.
elif [[ $RESPONSE = 00[012][[:digit:]][[:space:]]* ]]; then
log_error "9kw.eu error: ${RESPONSE:5}"
rm -f "$FILENAME"
return $ERR_FATAL
fi
TID=$RESPONSE
for I in 10 6 6 7 7 8 8 9 9 10 10 20 20 20 30 30 30; do
wait $I seconds
RESPONSE=$(curl --get --data 'action=usercaptchacorrectdata' \
--data "apikey=$CAPTCHA_9KWEU" \
--data "id=$TID" \
--data 'source=plowshare' --data 'info=1' \
'http://www.9kw.eu/index.cgi') || return
if [ 'NO DATA' = "$RESPONSE" ]; then
continue
elif [ -z "$RESPONSE" ]; then
continue
elif [ -n "$RESPONSE" ]; then
WORD=$RESPONSE
break
else
log_error "9kw.eu error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_FATAL
fi
done
if [ -z "$WORD" ]; then
log_error '9kw.eu error: service not available'
rm -f "$FILENAME"
return $ERR_CAPTCHA
fi
# Result on two lines
echo "$WORD"
echo "9$TID"
;;
antigate)
log_notice 'Using antigate captcha recognition system'
# Note for later: extra params can be supplied: min_len & max_len
RESPONSE=$(curl -F 'method=post' \
-F "file=@$FILENAME;filename=file.jpg" \
-F "key=$CAPTCHA_ANTIGATE" \
-F 'is_russian=0' \
'http://antigate.com/in.php') || return
if [ -z "$RESPONSE" ]; then
log_error 'antigate empty answer'
rm -f "$FILENAME"
return $ERR_NETWORK
elif [ 'ERROR_IP_NOT_ALLOWED' = "$RESPONSE" ]; then
log_error 'antigate error: IP not allowed'
rm -f "$FILENAME"
return $ERR_FATAL
elif [ 'ERROR_ZERO_BALANCE' = "$RESPONSE" ]; then
log_error 'antigate error: no credits'
rm -f "$FILENAME"
return $ERR_FATAL
elif [ 'ERROR_NO_SLOT_AVAILABLE' = "$RESPONSE" ]; then
log_error 'antigate error: no slot available'
rm -f "$FILENAME"
return $ERR_CAPTCHA
elif match 'ERROR_' "$RESPONSE"; then
log_error "antigate error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_FATAL
fi
TID=$(echo "$RESPONSE" | parse_quiet . 'OK|\(.*\)')
for I in 8 5 5 6 6 7 7 8; do
wait $I seconds
RESPONSE=$(curl --get \
--data "key=${CAPTCHA_ANTIGATE}&action=get&id=$TID" \
'http://antigate.com/res.php') || return
if [ 'CAPCHA_NOT_READY' = "$RESPONSE" ]; then
continue
elif match '^OK|' "$RESPONSE"; then
WORD=$(echo "$RESPONSE" | parse_quiet . 'OK|\(.*\)')
break
else
log_error "antigate error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_FATAL
fi
done
if [ -z "$WORD" ]; then
log_error 'antigate error: service not available'
rm -f "$FILENAME"
return $ERR_CAPTCHA
fi
# result on two lines
echo "$WORD"
echo "a$TID"
;;
captchabrotherhood)
local USERNAME=${CAPTCHA_BHOOD%%:*}
local PASSWORD=${CAPTCHA_BHOOD#*:}
log_notice "Using captcha brotherhood bypass service ($USERNAME)"
# Content-Type is mandatory.
# timeout parameter has no effect
RESPONSE=$(curl --data-binary "@$FILENAME" \
--header 'Content-Type: text/html' \
"http://www.captchabrotherhood.com/sendNewCaptcha.aspx?username=$USERNAME&password=$PASSWORD&captchaSource=plowshare&timeout=30&captchaSite=-1") || return
if [ "${RESPONSE:0:3}" = 'OK-' ]; then
TID=${RESPONSE:3}
if [ -n "$TID" ]; then
for I in 6 5 5 6 6 7 7 8; do
wait $I seconds
RESPONSE=$(curl --get -d "username=$USERNAME" \
-d "password=$PASSWORD" -d "captchaID=$TID" \
'http://www.captchabrotherhood.com/askCaptchaResult.aspx') || return
if [ "${RESPONSE:0:12}" = 'OK-answered-' ]; then
WORD=${RESPONSE:12}
if [ -n "$WORD" ]; then
# Result on two lines
echo "$WORD"
echo "b$TID"
return 0
else
RESPONSE='empty word?'
fi
break
elif [ "${RESPONSE:0:3}" != 'OK-' ]; then
break
fi
# OK-on user-
done
else
RESPONSE='empty tid?'
fi
fi
log_error "Captcha Brotherhood error: ${RESPONSE#Error-}"
rm -f "$FILENAME"
return $ERR_FATAL
;;
captchacoin)
log_notice 'Using CaptchaCoin captcha recognition system'
RESPONSE=$(curl -F "file=@$FILENAME;filename=file.jpg" \
-F "api_key=$CAPTCHA_COIN" \
'http://www.captchacoin.com/api/submit') || return
if [ -z "$RESPONSE" ]; then
log_error 'CaptchaCoin empty answer'
rm -f "$FILENAME"
return $ERR_NETWORK
fi
if match_json_true 'success' "$RESPONSE"; then
TID=$(echo "$RESPONSE" | parse_json_quiet 'img_key')
if [ -z "$TID" ]; then
log_error 'CaptchaCoin error: no image key'
rm -f "$FILENAME"
return $ERR_FATAL
fi
else
log_error "CaptchaCoin error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_FATAL
fi
for I in 10 5 5 6 6 7 7 8; do
wait $I seconds
RESPONSE=$(curl --get \
-d "api_key=$CAPTCHA_COIN" \
-d "img_key=$TID" \
'http://api.captchacoin.com/api/imginfo') || return
if match_json_true 'success' "$RESPONSE"; then
if match_json_true 'assigned' "$RESPONSE"; then
log_debug 'CaptchaCoin: someone is solving the captcha'
fi
CAPCTHA_AGE=$(echo "$RESPONSE" | parse_json_quiet 'age_seconds')
[ -n "$CAPCTHA_AGE" ] && log_debug "CaptchaCoin: captcha age $CAPCTHA_AGE"
WORD=$(echo "$RESPONSE" | parse_json_quiet 'solution')
[ -n "$WORD" ] && break
else
log_error "CaptchaCoin error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_FATAL
fi
done
if [ -z "$WORD" ]; then
RESPONSE=$(curl --get \
-d "api_key=$CAPTCHA_COIN" \
-d "img_key=$TID" \
'http://www.captchacoin.com/api/withdraw') || return
if match_json_true 'success' "$RESPONSE"; then
log_error 'CaptchaCoin: recognition failed, withdraw successful'
else
log_error "CaptchaCoin: recognition failed, withdraw error ($RESPONSE)"
fi
rm -f "$FILENAME"
return $ERR_FATAL
fi
# result on two lines
echo "$WORD"
echo "c$TID"
;;
deathbycaptcha)
local HTTP_CODE POLL_URL
local USERNAME=${CAPTCHA_DEATHBY%%:*}
local PASSWORD=${CAPTCHA_DEATHBY#*:}
log_notice "Using DeathByCaptcha service ($USERNAME)"
# Consider HTTP headers, don't use JSON answer
RESPONSE=$(curl --include --header 'Expect: ' \
--header 'Accept: application/json' \
-F "username=$USERNAME" \
-F "password=$PASSWORD" \
-F "captchafile=@$FILENAME" \
'http://api.dbcapi.me/api/captcha') || return
if [ -z "$RESPONSE" ]; then
log_error 'DeathByCaptcha empty answer'
rm -f "$FILENAME"
return $ERR_NETWORK
fi
HTTP_CODE=$(echo "$RESPONSE" | first_line | \
parse . 'HTTP/1\.. \([[:digit:]]\+\) ')
if [ "$HTTP_CODE" = 303 ]; then
POLL_URL=$(echo "$RESPONSE" | grep_http_header_location) || return
for I in 4 3 3 4 4 5 5; do
wait $I seconds
# {"status": 0, "captcha": 661085218, "is_correct": true, "text": ""}
RESPONSE=$(curl --header 'Accept: application/json' \
"$POLL_URL") || return
if match_json_true 'is_correct' "$RESPONSE"; then
WORD=$(echo "$RESPONSE" | parse_json_quiet text)
if [ -n "$WORD" ]; then
TID=$(echo "$RESPONSE" | parse_json_quiet captcha)
echo "$WORD"
echo "d$TID"
return 0
fi
else
log_error "DeathByCaptcha unknown error: $RESPONSE"
rm -f "$FILENAME"
return $ERR_CAPTCHA
fi
done
log_error 'DeathByCaptcha timeout: give up!'
else
log_error "DeathByCaptcha wrong http answer ($HTTP_CODE)"
fi
rm -f "$FILENAME"
return $ERR_CAPTCHA
;;
prompt)
# Reload mechanism is not available for all types
if [ "$CAPTCHA_TYPE" = 'recaptcha' -o \
"$CAPTCHA_TYPE" = 'solvemedia' ]; then
log_notice 'Leave this field blank and hit enter to get another captcha image'
fi
read -r -p 'Enter captcha response (drop punctuation marks, case insensitive): ' RESPONSE
echo "$RESPONSE"
echo $TID
;;
*)
log_error "unknown solve method: $METHOD_SOLVE"
rm -f "$FILENAME"
return $ERR_FATAL
;;
esac
# Second pass for cleaning up
case $METHOD_VIEW in
X-*)
[[ $PRG_PID ]] && kill -HUP $PRG_PID >/dev/null 2>&1
;;
imgur)
image_delete_imgur "$IMG_HASH" || true
;;
esac
# if captcha URL provided, drop temporary image file
if [ "$1" != "$FILENAME" ]; then
rm -f "$FILENAME"
fi
}
# reCAPTCHA decoding function
# Main engine: http://api.recaptcha.net/js/recaptcha.js
#
# $1: reCAPTCHA site public key
# stdout: On 3 lines: <word> \n <challenge> \n <transaction_id>
recaptcha_process() {
local -r RECAPTCHA_SERVER='http://www.google.com/recaptcha/api/'
local URL="${RECAPTCHA_SERVER}challenge?k=${1}"
local VARS SERVER TRY CHALLENGE FILENAME WORDS TID
VARS=$(curl -L "$URL") || return
if [ -z "$VARS" ]; then
return $ERR_CAPTCHA
fi
# Load image
SERVER=$(echo "$VARS" | parse_quiet 'server' "server[[:space:]]\?:[[:space:]]\?'\([^']*\)'") || return
CHALLENGE=$(echo "$VARS" | parse_quiet 'challenge' "challenge[[:space:]]\?:[[:space:]]\?'\([^']*\)'") || return
# Result: Recaptcha.finish_reload('...', 'image');
VARS=$(curl "${SERVER}reload?k=${1}&c=${CHALLENGE}&reason=i&type=image&lang=en") || return
CHALLENGE=$(echo "$VARS" | parse 'finish_reload' "('\([^']*\)") || return
log_debug "reCaptcha server: $SERVER"
# Image dimension: 300x57
FILENAME=$(create_tempfile '.recaptcha.jpg') || return
TRY=0
# Arbitrary 100 limit is safer
while (( TRY++ < 100 )) || return $ERR_MAX_TRIES_REACHED; do
log_debug "reCaptcha loop $TRY"
log_debug "reCaptcha challenge: $CHALLENGE"
URL="${SERVER}image?c=${CHALLENGE}"
log_debug "reCaptcha image URL: $URL"
curl "$URL" -o "$FILENAME" || return
WORDS=$(captcha_process "$FILENAME" recaptcha) || return
rm -f "$FILENAME"
{ read WORDS; read TID; } <<< "$WORDS"
[ -n "$WORDS" ] && break
# Reload image
log_debug 'empty, request another image'
# Result: Recaptcha.finish_reload('...', 'image');
VARS=$(curl "${SERVER}reload?k=${1}&c=${CHALLENGE}&reason=r&type=image&lang=en") || return
CHALLENGE=$(echo "$VARS" | parse 'finish_reload' "('\([^']*\)") || return
done
WORDS=$(echo "$WORDS" | uri_encode)
echo "$WORDS"
echo "$CHALLENGE"
echo $TID
}
# Process captcha from "Solve Media" (http://www.solvemedia.com/)
# $1: Solvemedia site public key
# stdout: On 2 lines: <verified_challenge> \n <transaction_id>
# stdout: verified challenge
# transaction_id
solvemedia_captcha_process() {
local -r PUB_KEY=$1
local -r BASE_URL='http://api.solvemedia.com/papi'
local URL="$BASE_URL/challenge.noscript?k=$PUB_KEY"
local HTML MAGIC CHALL IMG_FILE XY WI WORDS TID TRY
IMG_FILE=$(create_tempfile '.solvemedia.jpg') || return
TRY=0
# Arbitrary 100 limit is safer
while (( TRY++ < 100 )) || return $ERR_MAX_TRIES_REACHED; do
log_debug "SolveMedia loop $TRY"
XY=''
# Get + scrape captcha iframe
HTML=$(curl "$URL") || return
MAGIC=$(echo "$HTML" | parse_form_input_by_name 'magic') || return
CHALL=$(echo "$HTML" | parse_form_input_by_name \
'adcopy_challenge') || return
# Get actual captcha image
curl -o "$IMG_FILE" "$BASE_URL/media?c=$CHALL" || return
# Solve captcha
# Note: Image is a 300x150 gif file containing text strings
WI=$(captcha_process "$IMG_FILE" solvemedia) || return
{ read WORDS; read TID; } <<< "$WI"
rm -f "$IMG_FILE"
# Reload image?
if [ -z "$WORDS" ]; then
log_debug 'empty, request another image'
XY='-d t_img.x=23 -d t_img.y=7'
fi
# Verify solution/request new challenge
HTML=$(curl --referer "$URL" \
-d "adcopy_response=$WORDS" \
-d "k=$PUB_KEY" \
-d 'l=en' \
-d 't=img' \
-d 's=standard' \
-d "magic=$MAGIC" \
-d "adcopy_challenge=$CHALL" \
$XY \
"$BASE_URL/verify.noscript") || return
if ! match 'Redirecting\.\.\.' "$HTML" ||
match '&error=1&' "$HTML"; then
captcha_nack "$TID"
return $ERR_CAPTCHA
fi
URL=$(echo "$HTML" | parse 'META' 'URL=\(.\+\)">') || return
[ -n "$WORDS" ] && break
done
HTML=$(curl "$URL") || return
if ! match 'Please copy this gibberish:' "$HTML" || \
! match "$CHALL" "$HTML"; then
log_debug 'Unexpected content. Site updated?'
return $ERR_FATAL
fi
echo "$CHALL"
echo "$TID"
}
# Positive acknowledge of captcha answer
# $1: id (returned by captcha_process/recaptcha_process/solvemedia_captcha_process)
captcha_ack() {
[ "$1" = 0 ] && return
local -r M=${1:0:1}
local -r TID=${1:1}
local RESPONSE STR
if [ '9' = "$M" ]; then
if [ -n "$CAPTCHA_9KWEU" ]; then
RESPONSE=$(curl --get --data 'action=usercaptchacorrectback' \
--data "apikey=$CAPTCHA_9KWEU" --data "id=$TID" \
--data 'correct=1' 'http://www.9kw.eu/index.cgi') || return
[ 'OK' = "$RESPONSE" ] || \
log_error "9kw.eu error: $RESPONSE"
else
log_error "$FUNCNAME failed: 9kweu missing captcha key"
fi
elif [ c = "$M" ]; then
if [ -n "$CAPTCHA_COIN" ]; then
log_debug 'CaptchaCoin report ack'
RESPONSE=$(curl --get \
-d "api_key=$CAPTCHA_COIN" -d 'status=1' \
-d "img_key=$TID" \
'http://www.captchacoin.com/api/poststatus') || return
if ! match_json_true 'success' "$RESPONSE"; then
log_error "CaptchaCoin: report ack error ($RESPONSE)"
fi
else
log_error "$FUNCNAME failed: CaptchaCoin missing API key"
fi
elif [[ $M != [abd] ]]; then
log_error "$FUNCNAME failed: unknown transaction ID: $1"
fi
}
# Negative acknowledge of captcha answer
# $1: id (returned by captcha_process/recaptcha_process/solvemedia_captcha_process)
captcha_nack() {
[ "$1" = 0 ] && return
local -r M=${1:0:1}
local -r TID=${1:1}
local RESPONSE STR
if [ '9' = "$M" ]; then
if [ -n "$CAPTCHA_9KWEU" ]; then
RESPONSE=$(curl --get --data 'action=usercaptchacorrectback' \
--data "apikey=$CAPTCHA_9KWEU" --data "id=$TID" \
--data 'correct=2' 'http://www.9kw.eu/index.cgi') || return
[ 'OK' = "$RESPONSE" ] || \
log_error "9kw.eu error: $RESPONSE"
else
log_error "$FUNCNAME failed: 9kweu missing captcha key"
fi
elif [ a = "$M" ]; then
if [ -n "$CAPTCHA_ANTIGATE" ]; then
RESPONSE=$(curl --get \
--data "key=${CAPTCHA_ANTIGATE}&action=reportbad&id=$TID" \
'http://antigate.com/res.php') || return
[ 'OK_REPORT_RECORDED' = "$RESPONSE" ] || \
log_error "antigate error: $RESPONSE"
else
log_error "$FUNCNAME failed: antigate missing captcha key"
fi
elif [ b = "$M" ]; then
if [ -n "$CAPTCHA_BHOOD" ]; then
local USERNAME=${CAPTCHA_BHOOD%%:*}
local PASSWORD=${CAPTCHA_BHOOD#*:}
log_debug "captcha brotherhood report nack ($USERNAME)"
RESPONSE=$(curl --get \
-d "username=$USERNAME" -d "password=$PASSWORD" \
-d "captchaID=$TID" \
'http://www.captchabrotherhood.com/complainCaptcha.aspx') || return
[ 'OK-Complained' = "$RESPONSE" ] || \
log_error "$FUNCNAME FIXME cbh[$RESPONSE]"
else
log_error "$FUNCNAME failed: captcha brotherhood missing account data"
fi
elif [ c = "$M" ]; then
if [ -n "$CAPTCHA_COIN" ]; then
log_debug 'CaptchaCoin report nack'
RESPONSE=$(curl --get \
-d "api_key=$CAPTCHA_COIN" -d 'status=0' \
-d "img_key=$TID" \
'http://www.captchacoin.com/api/poststatus') || return
if ! match_json_true 'success' "$RESPONSE"; then
log_error "CaptchaCoin: report nack error ($RESPONSE)"
fi
else
log_error "$FUNCNAME failed: CaptchaCoin missing API key"
fi
elif [ d = "$M" ]; then
if [ -n "$CAPTCHA_DEATHBY" ]; then
local USERNAME=${CAPTCHA_DEATHBY%%:*}
local PASSWORD=${CAPTCHA_DEATHBY#*:}
log_debug "DeathByCaptcha report nack ($USERNAME)"
RESPONSE=$(curl -F "username=$USERNAME" -F "password=$PASSWORD" \
--header 'Accept: application/json' \
"http://api.dbcapi.me/api/captcha/$TID/report") || return
STR=$(echo "$RESPONSE" | parse_json_quiet 'status')
[ "$STATUS" = '0' ] || \
log_error "DeathByCaptcha: report nack error ($RESPONSE)"
else
log_error "$FUNCNAME failed: DeathByCaptcha missing account data"
fi
else
log_error "$FUNCNAME failed: unknown transaction ID: $1"
fi
}
# Generate a pseudo-random character sequence.
# Don't use /dev/urandom or $$ but $RANDOM (internal bash builtin,
# range 0-32767). Note: chr() is from Greg's Wiki (BashFAQ/071).
#
# $1: operation type (string)
# - "a": alpha [0-9a-z]. Param: length.
# - "d", "dec": positive decimal number. First digit is never 0.
# Param: number of digits.
# - "h", "hex": hexadecimal number. First digit is never 0. No '0x' prefix.
# Param: number of digits.
# - "H", "HEX": same as "h" but in uppercases
# - "js": Math.random() equivalent (>=0 and <1).
# It's a double: ~15.9 number of decimal digits). No param.
# - "l": letters [a-z]. Param: length.
# - "L": letters [A-Z]. Param: length.
# - "ll", "LL": letters [A-Za-z]. Param: length.
# - "u16": unsigned short (decimal) number <=65535. Example: "352".
# stdout: random string/integer (\n-terminated)
random() {
local -i I=0
local LEN=${2:-8}
local -r SEED=$RANDOM
local RESULT N L
case $1 in
d|dec)
RESULT=$(( SEED % 9 + 1 ))
(( ++I ))
while (( I < $LEN )); do
printf -v N '%04u' $((RANDOM % 10000))
RESULT+=$N
(( I += 4 ))
done
;;
h|hex)
printf -v RESULT '%x' $(( SEED % 15 + 1 ))
(( ++I ))
while (( I < $LEN )); do
printf -v N '%04x' $((RANDOM & 65535))
RESULT+=$N
(( I += 4 ))
done
;;
H|HEX)
printf -v RESULT '%X' $(( SEED % 15 + 1 ))
(( ++I ))
while (( I < $LEN )); do
printf -v N '%04X' $((RANDOM & 65535))
RESULT+=$N
(( I += 4 ))
done
;;
l)
while (( I++ < $LEN )); do
N=$(( RANDOM % 26 + 16#61))
printf -v L \\$(($N/64*100+$N%64/8*10+$N%8))
RESULT+=$L
done
;;
L)
while (( I++ < $LEN )); do
N=$(( RANDOM % 26 + 16#41))
printf -v L \\$(($N/64*100+$N%64/8*10+$N%8))
RESULT+=$L
done
;;
[Ll][Ll])
while (( I++ < $LEN )); do
N=$(( RANDOM % 52 + 16#41))
[[ $N -gt 90 ]] && (( N += 6 ))
printf -v L \\$(($N/64*100+$N%64/8*10+$N%8))
RESULT+=$L
done
;;
a)
while (( I++ < $LEN )); do
N=$(( RANDOM % 36 + 16#30))
[[ $N -gt 57 ]] && (( N += 39 ))
printf -v L \\$(($N/64*100+$N%64/8*10+$N%8))
RESULT+=$L
done
;;
js)
LEN=$((SEED % 3 + 17))
RESULT='0.'$((RANDOM * 69069 & 16#ffffffff))
RESULT+=$((RANDOM * 69069 & 16#ffffffff))
;;
u16)
RESULT=$(( 256 * (SEED & 255) + (RANDOM & 255) ))
LEN=${#RESULT}
;;
*)
log_error "$FUNCNAME: unknown operation '$1'"
return $ERR_FATAL
;;
esac
echo ${RESULT:0:$LEN}
}
# Calculate MD5 hash (128-bit) of a string.
# See RFC1321.
#
# $1: input string
# stdout: message-digest fingerprint (32-digit hexadecimal number, lowercase letters)
# $?: 0 for success or $ERR_SYSTEM
md5() {
# GNU coreutils
if check_exec md5sum; then
echo -n "$1" | md5sum -b 2>/dev/null | cut -d' ' -f1
# BSD
elif check_exec md5; then
command md5 -qs "$1"
# OpenSSL
elif check_exec openssl; then
echo -n "$1" | openssl dgst -md5 | cut -d' ' -f2
# FIXME: use javascript if requested
else
log_error "$FUNCNAME: cannot find md5 calculator"
return $ERR_SYSTEM
fi
}
# Calculate SHA-1 hash (160-bit) of a string.
# See FIPS PUB 180-1.
#
# $1: input string
# stdout: message-digest fingerprint (40-digit hexadecimal number, lowercase letters)
# $?: 0 for success or $ERR_SYSTEM
sha1() {
# GNU coreutils
if check_exec sha1sum; then
echo -n "$1" | sha1sum -b 2>/dev/null | cut -d' ' -f1
# BSD
elif check_exec sha1; then
command sha1 -qs "$1"
# OpenSSL
elif check_exec openssl; then
echo -n "$1" | openssl dgst -sha1 | cut -d' ' -f2
# FIXME: use javascript if requested
else
log_error "$FUNCNAME: cannot find sha1 calculator"
return $ERR_SYSTEM
fi
}
# Calculate MD5 hash (128-bit) of a file.
# $1: input file
# stdout: message-digest fingerprint (32-digit hexadecimal number, lowercase letters)
# $?: 0 for success or $ERR_SYSTEM
md5_file() {
if [ -f "$1" ]; then
# GNU coreutils
if check_exec md5sum; then
md5sum -b "$1" 2>/dev/null | cut -d' ' -f1
# BSD
elif check_exec md5; then
command md5 -q "$1"
# OpenSSL
elif check_exec openssl; then
openssl dgst -md5 "$1" | cut -d' ' -f2
else
log_error "$FUNCNAME: cannot find md5 calculator"
return $ERR_SYSTEM
fi
else
log_error "$FUNCNAME: cannot stat file"
return $ERR_SYSTEM
fi
}
# Calculate SHA-1 hash (160-bit) of a file.
# $1: input file
# stdout: message-digest fingerprint (40-digit hexadecimal number, lowercase letters)
# $?: 0 for success or $ERR_SYSTEM
sha1_file() {
if [ -f "$1" ]; then
# GNU coreutils
if check_exec sha1sum; then
sha1sum -b "$1" 2>/dev/null | cut -d' ' -f1
# BSD
elif check_exec sha1; then
command sha1 -q "$1"
# OpenSSL
elif check_exec openssl; then
openssl dgst -sha1 "$1" | cut -d' ' -f2
else
log_error "$FUNCNAME: cannot find sha1 calculator"
return $ERR_SYSTEM
fi
else
log_error "$FUNCNAME: cannot stat file"
return $ERR_SYSTEM
fi
}
# Split credentials
# $1: auth string (user:password)
# $2: variable name (user)
# $3: (optional) variable name (password)
# Note: $2 or $3 can't be named '__AUTH__' or '__STR__'
split_auth() {
local __AUTH__=$1
local __STR__
if [ -z "$__AUTH__" ]; then
log_error "$FUNCNAME: authentication string is empty"
return $ERR_LOGIN_FAILED
fi
__STR__=${__AUTH__%%:*}
if [ -z "$__STR__" ]; then
log_error "$FUNCNAME: empty string (user)"
return $ERR_LOGIN_FAILED
fi
[[ $2 ]] && unset "$2" && eval $2=\$__STR__
if [[ $3 ]]; then
# Sanity check
if [ "$2" = "$3" ]; then
log_error "$FUNCNAME: user and password varname must not be the same"
else
__STR__=${__AUTH__#*:}
if [ -z "$__STR__" -o "$__AUTH__" = "$__STR__" ]; then
__STR__=$(prompt_for_password) || return $ERR_LOGIN_FAILED
fi
unset "$3" && eval $3=\$__STR__
fi
fi
}
# Report list results. Only used by list module functions.
#
# $1: links list (one url per line)
# $2: (optional) filename or name list (one hoster name per line)
# $3: (optional) link prefix (gets prepended to every link)
# $4: (optional) link suffix (gets appended to every link)
# $?: 0 for success or $ERR_LINK_DEAD
list_submit() {
local LINE I
test "$1" || return $ERR_LINK_DEAD
if test "$2"; then
local -a LINKS NAMES
mapfile -t LINKS <<< "$1"
mapfile -t NAMES <<< "$2"
# One single name for all links
if [[ "${#NAMES[@]}" -eq 1 ]]; then
for I in "${!LINKS[@]}"; do
test "${LINKS[$I]}" || continue
echo "$3${LINKS[$I]}$4"
echo "${NAMES[0]}"
done
else
for I in "${!LINKS[@]}"; do
test "${LINKS[$I]}" || continue
echo "$3${LINKS[$I]}$4"
echo "${NAMES[$I]}"
done
fi
else
while IFS= read -r LINE; do
test "$LINE" || continue
echo "$3$LINE$4"
echo
done <<< "$1"
fi
}
# Return a numeric size (in bytes)
# $1: integer or floating point number (examples: "128" ; "4k" ; "5.34MiB")
# with optional suffix (K, kB, KiB, KB, MiB, MB, GiB, GB)
# stdout: fixed point number (in bytes)
translate_size() {
local N=${1// }
local S T
N=${N// }
if [ -z "$N" ]; then
log_error "$FUNCNAME: non empty argument expected"
return $ERR_FATAL
fi
S=$(sed -ne '/[.,]/{s/^\(-\?[[:digit:]]*\)[.,]\([[:digit:]]\+\).*$/\1_\2/p;b;};
s/^\(-\?[[:digit:]]\+\).*$/\1_/p' <<< "$N") || return $ERR_SYSTEM
if [[ $S = '' || $S = '_' ]]; then
log_error "$FUNCNAME: invalid parsed number \`$N'"
return $ERR_FATAL
fi
local -i R=10#${S%_*}
local -i F=0
# Fractionnal part (consider 3 digits)
T=${S#*_}
if test "$T"; then
T="1${T}00"
F=10#${T:1:3}
T=$(( ${#S} ))
else
T=$(( ${#S} - 1 ))
fi
S=$(sed -e "s/^\.\?\([KkMmGg]i\?[Bb]\?\)$/\1/" <<< "${N:$T}") || return $ERR_SYSTEM
case $S in
# kilobyte (10^3 bytes)
k|kB)
echo $(( 1000 * R + F))
;;
# kibibyte (KiB)
KiB|Ki|K|KB)
echo $(( 1024 * R + 1024 * F / 1000))
;;
# megabyte (10^6)
M|MB)
echo $(( 1000000 * R + 1000 * F))
;;
# mebibyte (MiB)
MiB|Mi|m|mB)
echo $(( 1048576 * R + 1048576 * F / 1000))
;;
# gigabyte (10^9)
G|GB)
echo $(( 1000000000 * R + 1000000 * F))
;;
# gibibyte (GiB)
GiB|Gi)
echo $(( 1073741824 * R + 1073741824 * F / 1000))
;;
# bytes
B|'')
echo "$R"
;;
*b)
log_error "$FUNCNAME: unknown unit \`$S' (we don't deal with bits, use B for bytes)"
return $ERR_FATAL
;;
*)
log_error "$FUNCNAME: unknown unit \`$S'"
return $ERR_FATAL
;;
esac
}
# Add/Update item (key-value pair) to local storage module file.
#
# $1: Key name. Will be 'default' for empty string.
# $2: (optional) String value to save. Don't mention this to delete key.
# $?: 0 for success (item saved correctly), error otherwise
storage_set() {
local -r KEY=${1:-default}
local CONFIG
local -A OBJ
if [ -z "$MODULE" ]; then
log_error "$FUNCNAME: \$MODULE is undefined, abort"
return $ERR_NOMODULE
fi
if [ "$CACHE" != 'shared' ]; then
CONFIG="$TMPDIR/$(basename_file "$0").$$.${MODULE}.txt"
else
CONFIG="$PLOWSHARE_CONFDIR/storage"
if [ ! -d "$CONFIG" ]; then
mkdir -p "$CONFIG"
chmod 700 "$CONFIG"
fi
if [ ! -w "$CONFIG" ]; then
log_error "$FUNCNAME: write permissions expected \`$CONFIG'"
return $ERR_SYSTEM
fi
CONFIG="$CONFIG/${MODULE}.txt"
fi
if [ -f "$CONFIG" ]; then
if [ ! -w "$CONFIG" ]; then
log_error "$FUNCNAME: write permissions expected \`$CONFIG'"
return $ERR_SYSTEM
fi
source "$CONFIG"
else
touch "$CONFIG"
chmod 600 "$CONFIG"
fi
# Unset parameter and empty string are different
if test "${2+isset}"; then
OBJ[$KEY]=$2
else
unset -v OBJ[$KEY]
fi
declare -p OBJ >"$CONFIG"
log_debug "$FUNCNAME: \`$KEY' set for module \`$MODULE'"
}
# Get item value from local storage module file.
#
# $1: (optional) Key name. Will be 'default' if unset
# $?: 0 for success, error otherwise
# stdout: value read from file
storage_get() {
local -r KEY=${1:-default}
local CONFIG
local -A OBJ
if [ -z "$MODULE" ]; then
log_error "$FUNCNAME: \$MODULE is undefined, abort"
return $ERR_NOMODULE
fi
if [ "$CACHE" != 'shared' ]; then
CONFIG="$TMPDIR/$(basename_file "$0").$$.${MODULE}.txt"
else
CONFIG="$PLOWSHARE_CONFDIR/storage"
[ -d "$CONFIG" ] || return $ERR_FATAL
CONFIG="$CONFIG/${MODULE}.txt"
fi
if [ -f "$CONFIG" ]; then
if [ ! -r "$CONFIG" ]; then
log_error "$FUNCNAME: read permissions expected \`$CONFIG'"
return $ERR_SYSTEM
fi
source "$CONFIG"
if test "${OBJ[$KEY]+isset}"; then
echo "${OBJ[$KEY]}"
return 0
fi
fi
return $ERR_FATAL
}
# Clear local storage module file, all entries will be lost.
storage_reset() {
local CONFIG
if [ -z "$MODULE" ]; then
log_error "$FUNCNAME: \$MODULE is undefined, abort"
return $ERR_NOMODULE
fi
if [ "$CACHE" != 'shared' ]; then
CONFIG="$TMPDIR/$(basename_file "$0").$$.${MODULE}.txt"
else
CONFIG="$PLOWSHARE_CONFDIR/storage"
[ -d "$CONFIG" ] || return $ERR_FATAL
CONFIG="$CONFIG/${MODULE}.txt"
fi
if [ -f "$CONFIG" ]; then
rm -f "$CONFIG"
log_debug "$FUNCNAME: delete file for module \`$MODULE'"
fi
}
# Save current date (Epoch) in local storage module file.
# $?: 0 for success, error otherwise
storage_timestamp_set() {
storage_set '__date__' "$(date -u +%s)"
}
# Get time difference from date save in local storage module file.
# $1: (optional) Touch flag. If defined, update saved timestamp.
# $?: 0 for success, error otherwise
# stdout: age value (in seconds)
storage_timestamp_diff() {
local CUR DATE
DATE=$(storage_get '__date__') || return
CUR=$(date -u +%s)
[ -z "$1" ] || storage_set '__date__' "$CUR"
echo "$((CUR - DATE))"
}
## ----------------------------------------------------------------------------
##
## Miscellaneous functions that can be called from core:
## download.sh, upload.sh, delete.sh, list.sh, probe.sh
##
# Delete leading and trailing whitespace.
# stdin: input string (can be multiline)
# stdout: result string
strip() {
# first translate non-breaking space to space
sed -e 's/\xC2\?\xA0/ /g' -e 's/^[[:space:]]*//; s/[[:space:]]*$//'
}
# Initialize plowcore: check environment variables and install signal handlers
# $1: program name (used for error reporting only)
core_init() {
local -r NAME=${1:-ERROR}
if [ -z "$TMPDIR" ]; then
log_error "$NAME: \$TMPDIR is undefined, abort"
return $ERR_SYSTEM
elif [ ! -d "$TMPDIR" ]; then
log_error "$NAME: \$TMPDIR is not a directory, abort"
return $ERR_SYSTEM
fi
if [ -n "$PLOWSHARE_CURL" ]; then
if ! type -P "$PLOWSHARE_CURL" >/dev/null 2>&1; then
log_error "$NAME: \$PLOWSHARE_CURL is invalid, abort"
return $ERR_SYSTEM
fi
log_debug "using custom curl: $PLOWSHARE_CURL"
fi
if [ -n "$PLOWSHARE_JS" ]; then
if ! type -P "$PLOWSHARE_JS" >/dev/null 2>&1; then
log_error "$NAME: \$PLOWSHARE_JS is invalid, abort"
return $ERR_SYSTEM
fi
log_debug "using custom js: $PLOWSHARE_JS"
fi
# Shutdown cleanups:
# - Restore proper colors (just in case)
# - Remove temporal files created by create_tempfile
trap 'log_notice_norc ""; rm -f "$TMPDIR/$(basename_file $0).$$".*' EXIT TERM
# SIGCONT notification
trap 'CONT_SIGNAL=$(date +%s)' CONT
}
# Check existence of executable in $PATH
# Better than "which" (external) executable
#
# $1: Executable to check
# $?: one means not found
check_exec() {
command -v "$1" >/dev/null 2>&1
}
# Related to -t/--timeout command line option
timeout_init() {
PS_TIMEOUT=$1
}
# Show help info for options
#
# $1: options (one per line)
# $2: indent string
print_options() {
local -r INDENT=${2:-' '}
local STR VAR SHORT LONG TYPE MSG
while read -r; do
test "$REPLY" || continue
IFS=',' read -r VAR SHORT LONG TYPE MSG <<< "$REPLY"
if [ -n "$SHORT" ]; then
if test "$TYPE"; then
STR="-${SHORT} ${TYPE#*=}"
test -n "$LONG" && STR="-${SHORT}, --${LONG}=${TYPE#*=}"
else
STR="-${SHORT}"
test -n "$LONG" && STR="$STR, --${LONG}"
fi
# long option only
else
if test "$TYPE"; then
STR=" --${LONG}=${TYPE#*=}"
else
STR=" --${LONG}"
fi
fi
printf '%-35s%s\n' "$INDENT$STR" "$MSG"
done <<< "$1"
}
# Show usage info for modules
#
# $1: module list (array name)
# $2: option family name (string, example:UPLOAD)
print_module_options() {
local ELT OPTIONS
for ELT in "${!1}"; do
OPTIONS=$(get_module_options "$ELT" "$2")
if test "$OPTIONS"; then
echo
echo "Options for module <$ELT>:"
print_options "$OPTIONS"
fi
done
}
# Get all modules options with specified family name
#
# $1: module list (array name)
# $2: option family name (string, example:UPLOAD)
get_all_modules_options() {
local ELT
for ELT in "${!1}"; do
get_module_options "$ELT" "$2"
done
}
# Get module name from URL link
#
# $1: url
# $2: module list (array name)
get_module() {
local ELT
for ELT in "${!2}"; do
local -u VAR="MODULE_${ELT}_REGEXP_URL"
if match "${!VAR}" "$1"; then
echo "$ELT"
return 0
fi
done
return $ERR_NOMODULE
}
# $1: program name (used for error reporting only)
# $2: core option list (one per line)
# $3..$n: arguments
# Note: This is called two times: early plowX options and plowX options
process_core_options() {
local -r NAME=$1
local -r OPTIONS=$(strip_and_drop_empty_lines "$2")
shift 2
if [ -d "$PLOWSHARE_CONFDIR/exec" ]; then
VERBOSE=2 PATH="$PLOWSHARE_CONFDIR/exec:$PATH" process_options \
"$NAME" "$OPTIONS" -1 "$@"
else
VERBOSE=2 process_options "$NAME" "$OPTIONS" -1 "$@"
fi
}
# $1: program name (used for error reporting only)
# $2: all modules option list (one per line)
# $3..$n: arguments
process_all_modules_options() {
local -r NAME=$1
local -r OPTIONS=$2
shift 2
process_options "$NAME" "$OPTIONS" 0 "$@"
}
# $1: module name (used for error reporting only)
# $2: option family name (string, example:UPLOAD)
# $3..$n: arguments
process_module_options() {
local -r M=$1
local -r OPTIONS=$(get_module_options "$1" "$2")
shift 2
process_options "$M" "$OPTIONS" 1 "$@"
}
# Get list of all available modules (according to capability)
# Notes:
# - VERBOSE (log_debug) not initialised yet
# - Function is designed to be called in an eval statement (print array on stdout)
#
# $1: feature to grep (must not contain '|' char)
# $2 (optional): feature to subtract (must not contain '|' char)
# stdout: declare an associative array (MODULES_PATH)
get_all_modules_list() {
# Legacy locations are kept for compatibility
local -a SRCS=( "$LIBDIR/modules" "$PLOWSHARE_CONFDIR/modules" )
local -A MODULES_PATH=()
local D CONFIG
if [ -d "$PLOWSHARE_CONFDIR/modules.d/" ]; then
while read -r; do
D=$(dirname "$REPLY")
SRCS+=( "$D" )
done < <(find -L "$PLOWSHARE_CONFDIR/modules.d/" -mindepth 2 -maxdepth 2 -name config)
fi
for D in "${SRCS[@]}"; do
CONFIG="$D/config"
if [[ -d "$D" && -f "$CONFIG" ]]; then
while read -r; do
if [ -f "$D/$REPLY.sh" ]; then
# Silent override: modules installed in $HOME prevails over $LIBDIR
#if [[ ${MODULES_PATH["$REPLY"]} ]]; then
# stderr "INFO: $CONFIG: \`$REPLY\` module overwrite, this one is taken"
#fi
MODULES_PATH[$REPLY]="$D/$REPLY.sh"
else
stderr "ERROR: $CONFIG: \`$REPLY\` module not found, ignoring"
fi
done < <(if test "$2"; then sed -ne \
"/^[^#]/{/|[[:space:]]*$1/{/|[[:space:]]*$2/!s/^\([^[:space:]|]*\).*/\1/p}}" \
"$CONFIG"; else sed -ne \
"/^[^#]/{/|[[:space:]]*$1/s/^\([^[:space:]|]*\).*/\1/p}" "$CONFIG"; fi)
fi
done
declare -p MODULES_PATH
}
# $1: section name in ini-style file ("General" will be considered too)
# $2: command-line argument list
# $3 (optional): user specified configuration file
# Note: VERBOSE (log_debug) not initialised yet
process_configfile_options() {
local CONFIG OPTIONS SECTION NAME VALUE OPTION
if [ -z "$3" ]; then
CONFIG="$PLOWSHARE_CONFDIR/plowshare.conf"
test -f "$CONFIG" || CONFIG='/etc/plowshare.conf'
test -f "$CONFIG" || return 0
else
CONFIG=$3
fi
# Strip spaces in options
OPTIONS=$(strip_and_drop_empty_lines "$2")
# [General] section before [$1] section
SECTION=$(sed -ne "/\[$1\]/,/^\[/H; /\[[Gg]eneral\]/,/^\[/p; \${x;p}" \
"$CONFIG" | sed -e '/^\(#\|\[\|[[:space:]]*$\)/d') || true
if [ -n "$SECTION" -a -n "$OPTIONS" ]; then
while read -r; do
NAME=$(strip <<< "${REPLY%%=*}")
VALUE=$(strip <<< "${REPLY#*=}")
# If $NAME contains a '/' character, this is a module option, skip it
[[ $NAME = */* ]] && continue
# Look for optional double quote (protect leading/trailing spaces)
if [ ${#VALUE} -gt 1 ] && [ '"' = "${VALUE:0:1}" -a '"' = "${VALUE:(-1):1}" ]; then
VALUE=${VALUE%?}
VALUE=${VALUE:1}
fi
# Look for 'long_name' in options list
OPTION=$(sed -ne "/,.\?,${NAME},/{p;q}" <<< "$OPTIONS") || true
if [ -n "$OPTION" ]; then
local VAR=${OPTION%%,*}
eval "$VAR=\$VALUE"
fi
done <<< "$SECTION"
fi
}
# $1: section name in ini-style file ("General" will be considered too)
# $2: module name
# $3: option family name (string, example:DOWNLOAD)
# $4 (optional): user specified configuration file
process_configfile_module_options() {
local CONFIG OPTIONS SECTION OPTION LINE VALUE
if [ -z "$4" ]; then
CONFIG="$PLOWSHARE_CONFDIR/plowshare.conf"
test -f "$CONFIG" || CONFIG='/etc/plowshare.conf'
test -f "$CONFIG" || return 0
else
CONFIG=$4
fi
# Security check
if [ -f "$CONFIG" ]; then
if [ -O "$CONFIG" ]; then
# First 10 characters: access rights (human readable form)
local FILE_PERM=$(ls -l "$CONFIG" 2>/dev/null)
if [[ ${FILE_PERM:4:6} != '------' ]]; then
log_notice "WARNING: Wrong configuration file permissions. Fix it with: chmod 600 $CONFIG"
fi
else
log_notice "WARNING: Bad configuration file ownership. Fix it with: chown $USER $CONFIG"
fi
else
return 0
fi
log_report "use $CONFIG"
OPTIONS=$(get_module_options "$2" "$3")
# [General] section after [$1] section
SECTION=$(sed -ne "/\[[Gg]eneral\]/,/^\[/H; /\[$1\]/,/^\[/p; \${x;p}" \
"$CONFIG" | sed -e '/^\(#\|\[\|[[:space:]]*$\)/d') || true
if [ -n "$SECTION" -a -n "$OPTIONS" ]; then
local VAR SHORT LONG
local -lr M=$2
# For example:
# AUTH,a,auth,a=USER:PASSWORD,User account
while read -r; do
IFS=',' read -r VAR SHORT LONG _ <<< "$REPLY"
# Look for 'module/option_name' (short or long) in section list
LINE=$(sed -ne "/^[[:space:]]*$M\/\($SHORT\|$LONG\)[[:space:]]*=/{p;q}" <<< "$SECTION") || true
if [ -n "$LINE" ]; then
VALUE=$(strip <<< "${LINE#*=}")
# Look for optional double quote (protect leading/trailing spaces)
if [ ${#VALUE} -gt 1 ] && [ '"' = "${VALUE:0:1}" -a '"' = "${VALUE:(-1):1}" ]; then
VALUE=${VALUE%?}
VALUE=${VALUE:1}
fi
eval "$VAR=\$VALUE"
log_notice "$M: take --$LONG option from configuration file"
else
unset "$VAR"
fi
done <<< "$OPTIONS"
fi
}
# Get system information.
# $1: absolute path to plowshare's libdir
log_report_info() {
local -r LIBDIR1=$1
local G LIBDIR2
if test $VERBOSE -ge 4; then
log_report '=== SYSTEM INFO BEGIN ==='
log_report "[mach] $HOSTNAME $HOSTTYPE $OSTYPE $MACHTYPE"
log_report "[bash] $BASH_VERSION"
test "$http_proxy" && log_report "[env ] http_proxy=$http_proxy"
if check_exec 'curl'; then
log_report "[curl] $(command curl --version | first_line)"
[ -f "$HOME/.curlrc" ] && \
log_report '[curl] ~/.curlrc exists'
else
log_report '[curl] not found!'
fi
check_exec 'gsed' && G=g
log_report "[sed ] $(${G}sed --version | sed -ne '/version/p')"
log_report "[lib ] '$LIBDIR1'"
# Having several installations is usually a source of issues
for LIBDIR2 in '/usr/share/plowshare' '/usr/local/share/plowshare'; do
if [ "$LIBDIR2" != "$LIBDIR1" -a -f "$LIBDIR2/core.sh" ]; then
log_report "[lib2] '$LIBDIR2'"
fi
done
# Note: git -C <path> is available since v1.8.5
if git -C "$LIBDIR" rev-parse --is-inside-work-tree &>/dev/null; then
local -r GIT_BRANCH=$(git -C "$LIBDIR" rev-parse --quiet --abbrev-ref HEAD)
local -r GIT_REV=$(git -C "$LIBDIR" describe --tags --always 2>/dev/null)
log_report "[git ] $GIT_REV ($GIT_BRANCH branch)"
fi
log_report '=== SYSTEM INFO END ==='
fi
}
# Translate plowdown/plowup --captchamethod argument
# to solve & view method (used by captcha_process)
# $1: method (string)
# $2 (optional): solve method (variable name)
# $3 (optional): display method (variable name)
captcha_method_translate() {
case $1 in
none)
[[ $2 ]] && unset "$2" && eval $2=none
[[ $3 ]] && unset "$3" && eval $3=log
;;
imgur)
[[ $2 ]] && unset "$2" && eval $2=prompt
[[ $3 ]] && unset "$3" && eval $3=imgur
;;
x11|xorg)
if [ -z "$DISPLAY" ]; then
log_error 'Cannot open display. Are you running on a X11/Xorg environment ?'
return $ERR_FATAL
fi
[[ $2 ]] && unset "$2" && eval $2=prompt
[[ $3 ]] && unset "$3" && eval $3=view-x,log
;;
nox)
[[ $2 ]] && unset "$2" && eval $2=prompt
[[ $3 ]] && unset "$3" && eval $3=view-aa,log
;;
online)
if [ -z "$CAPTCHA_ANTIGATE" -a -z "$CAPTCHA_9KWEU" -a \
-z "$CAPTCHA_BHOOD" -a -z "$CAPTCHA_COIN" -a -z "$CAPTCHA_DEATHBY" ]; then
log_error 'No captcha solver account provided. Consider using --9kweu, --antigate, --captchabhood, --captchacoin or --deathbycaptcha options.'
return $ERR_FATAL
fi
[[ $2 ]] && unset "$2" && eval $2=online
[[ $3 ]] && unset "$3" && eval $3=log
;;
fb|fb0)
if ! test -c '/dev/fb0'; then
log_error 'Cannot find framebuffer device /dev/fb0'
return $ERR_FATAL
fi
[[ $2 ]] && unset "$2" && eval $2=prompt
[[ $3 ]] && unset "$3" && eval $3=view-fb,log
;;
*)
log_error "ERROR: Unknown captcha method '$1'.${DISPLAY:+ Try with 'x11' for example.}"
return $ERR_FATAL
;;
esac
return 0
}
# $1: format string
# $2..$n : list of "token,value" (comma is the separator character)
# stdout: result string
# Note: don't use printf (coreutils).
handle_tokens() {
local S=$1
local -A FMT
local OUT K REGEX
shift
# Populate associative array
for K in "$@"; do
REGEX+="${K%%,*}|"
FMT[${K%%,*}]=${K#*,}
done
REGEX=${REGEX%|}
# Protect end-of-line (don't lose trailing newlines)
OUT='x'
# Parsing is greedy, so there must not be any "tail match": for example "%%i"
# Be careful about function arguments!
while [[ $S =~ ^(.*)($REGEX)(.*)$ ]]; do
S=${BASH_REMATCH[1]}
OUT="${FMT[${BASH_REMATCH[2]}]}${BASH_REMATCH[3]}$OUT"
done
OUT="$S$OUT"
echo -n "${OUT%x}"
}
# Format a string suitable for JSON (see www.json.org)
# Escaped characters: / " \
#
# $1: string
# stdout: JSON string
json_escape() {
local S=${1//\\/\\\\}
S=${S//\//\\/}
echo -n "${S//\"/\\\"}"
}
## ----------------------------------------------------------------------------
##
## Private ('static') functions
## Can be called from this script only.
##
stderr() {
echo "$@" >&2
return 0
}
# This function shell-quotes the argument ($1)
# Note: Taken from /etc/bash_completion
quote() {
echo \'${1//\'/\'\\\'\'}\' #'# Help vim syntax highlighting
}
# Append element to a "quoted" array.
# $1: quoted array (multiline).
# $2: new element (string) to add
# stdout: quoted items (one per line)
quote_multiple() {
if [[ $1 ]]; then
echo "${1%)}"
else
echo '('
fi
quote "$2"
echo ')'
}
# $1: input string (this is a comma separated list)
# stdout: quoted items (one per line)
quote_array() {
local -a ARR
local E
IFS=',' read -r -a ARR <<< "$1"
echo '('
for E in "${ARR[@]}"; do
quote "$(strip <<< "$E")"
done
echo ')'
}
# Get filepath from name (consider $PATH).
# $1: executable name (with or without path)
# $?: return 0 for success
# stdout: result
# Example: "mysolver" => "/usr/bin/mysolver"
# Note: We don't need absolute or canonical path, only a valid one.
translate_exec() {
local F=$1
#local F=${1/#\~\//$HOME/}
if test -x "$F"; then
[[ $F = /* ]] || F="$PWD/$F"
else
# Note: prefer "type -P" rather than "type -p" to override
# local definitions (function, alias, ...).
F=$(type -P "$F" 2>/dev/null)
fi
if [ -z "$F" ]; then
log_error "$FUNCNAME: failed ($1)"
return $ERR_FATAL
fi
echo "$F"
}
# Check for positive speed rate
# Ki is kibi (2^10 = 1024). Alias: K
# Mi is mebi (2^20 = 1024^2 = 1048576). Alias: m
# k is kilo (10^3 = 1000)
# M is mega (10^6 = 1000000)
#
# $1: integer number (with or without suffix)
check_transfer_speed() {
local N=${1// }
# Probe for unit
case $N in
*Ki|*Mi)
N=${N%??}
;;
*K|*m|*k|*M)
N=${N%?}
;;
*)
;;
esac
if [[ $N = *[![:digit:]]* || $N -eq 0 ]]; then
return 1
fi
}
# Check for disk size.
# Mi is mebi (2^20 = 1024^2 = 1048576). Alias:m
# Gi is gibi (2^30 = 1024^3 = 1073741824).
# M is mega (10^6 = 1000000). Alias:MB
# G is giga (10^9 = 1000000000). Alias:GB
#
# $1: integer number (with or without suffix)
check_disk_size() {
local N=${1// }
# Probe for unit
case $N in
*Mi|*Gi|*MB|*GB)
N=${N%??}
;;
*M|*m|*G)
N=${N%?}
;;
*)
N=err
;;
esac
if [[ $N = *[![:digit:]]* || $N -eq 0 ]]; then
return 1
fi
}
# Extract a specific block from a HTML content.
# Notes:
# - Use this function with leaf blocks (avoid <div>, <p>)
# - Two distinct blocks can't begin or end on the same line
# - HTML comments are just ignored (call strip_html_comments first)
#
# $1: Marker regex.
# $2: (X)HTML data
# $3: (optional) Nth <tag>. Index start at 1: first block of page.
# Negative index possible: -1 means last block of page and so on.
# Zero or empty value means 1.
# stdout: result
grep_block_by_order() {
local -r TAG=$1
local DATA=$2
local N=${3:-'1'}
local MAX NEW
# Check number of <tag> markers
MAX=$(grep -c "<$TAG[[:space:]>]" <<< "$DATA") || true
if (( $N < 0 )); then
N=$(( $MAX + 1 + N ))
if (( $N <= 0 )); then
log_error "${FUNCNAME[1]} failed: negative index is too big (detected $MAX forms)"
return $ERR_FATAL
fi
fi
NEW=${TAG//[}
NEW=${NEW//]}
while [ "$N" -gt "1" ]; do
(( --N ))
DATA=$(echo "$DATA" | sed -ne "/<\/$TAG>/,\$p" | \
sed -e "1s/<\/\?$TAG[[:space:]>]/<_${NEW}_>/g")
test -z "$DATA" && break
done
# Get first form only
local STR=$(sed -ne \
"/<$TAG[[:space:]>]/,/<\/$TAG>/{p;/<\/$TAG/q}" <<< "$DATA")
if [ -z "$STR" ]; then
log_error "${FUNCNAME[1]} failed (sed): \"n=$N\""
return $ERR_FATAL
fi
echo "$STR"
}
# Check argument type
# $1: program name (used for error reporting only)
# $2: format (a, c, C, D, e, f, F, l, n, N, r, R, s, S, t)
# $3: option value (string)
# $4: option name (used for error reporting only)
# $?: return 0 for success
check_argument_type() {
local -r NAME=$1
local -r TYPE=$2
local -r VAL=$3
local -r OPT=$4
local RET=$ERR_BAD_COMMAND_LINE
# a: Authentication string (user or user:password)
if [[ $TYPE = 'a' && $VAL != *:* ]]; then
log_debug "$NAME ($OPT): missing password for credentials"
RET=0
# n: Positive integer (>0)
elif [[ $TYPE = 'n' && ( $VAL = *[![:digit:]]* || $VAL -le 0 ) ]]; then
log_error "$NAME ($OPT): positive integer expected"
# N: Positive integer or zero (>=0)
elif [[ $TYPE = 'N' && ( $VAL = *[![:digit:]]* || $VAL = '' ) ]]; then
log_error "$NAME ($OPT): positive or zero integer expected"
# s: Non empty string
# t: Non empty string (multiple command-line switch allowed)
elif [[ $TYPE = [st] && $VAL = '' ]]; then
log_error "$NAME ($OPT): empty string not expected"
# r: Speed rate (positive value, in bytes). Known suffixes: Ki/K/k/Mi/M/m
elif [ "$TYPE" = 'r' ] && ! check_transfer_speed "$VAL"; then
log_error "$NAME ($OPT): positive transfer rate expected"
# R: Disk size (positive value, suffix is mandatory). Known suffixes: Mi/m/M/MB/Gi/G/GB
elif [ "$TYPE" = 'R' ] && ! check_disk_size "$VAL"; then
log_error "$NAME ($OPT): wrong value, megabyte or gigabyte suffix is mandatory"
# e: E-mail string
elif [[ $TYPE = 'e' && "${VAL#*@*.}" = "$VAL" ]]; then
log_error "$NAME ($OPT): invalid email address"
# f: file (with read access)
elif [ $TYPE = 'f' ]; then
if test -z "$VAL"; then
log_error "$NAME ($OPT): filename expected"
elif test -f "$VAL"; then
if test -r "$VAL"; then
RET=0
else
log_error "$NAME ($OPT): read permissions expected \`$VAL'"
fi
else
log_error "$NAME ($OPT): cannot access file \`$VAL'"
fi
# F: executable file (consider $PATH)
elif [ $TYPE = 'F' ]; then
if test -z "$VAL"; then
log_error "$NAME ($OPT): executable filename expected"
elif test -f "$VAL"; then
if test -x "$VAL"; then
RET=0
else
log_error "$NAME ($OPT): executable permissions expected \`$VAL'"
fi
elif check_exec "$VAL"; then
# Sanity check (file in $PATH has no x perms)
local F=$(type -P "$VAL" 2>/dev/null)
if test -x "$F"; then
RET=0
else
log_error "$NAME ($OPT): executable permissions expected \`$F'"
fi
else
log_error "$NAME ($OPT): cannot access file \`$VAL'"
fi
# D: directory (with write access)
elif [ $TYPE = 'D' ]; then
if test -z "$VAL"; then
log_error "$NAME ($OPT): directory expected"
elif test -d "$VAL"; then
if test -w "$VAL"; then
RET=0
else
log_error "$NAME ($OPT): write permissions expected \`$VAL'"
fi
else
log_error "$NAME ($OPT): cannot access directory \`$VAL'"
fi
# l: List (comma-separated values), non empty
elif [[ $TYPE = 'l' && $VAL = '' ]]; then
log_error "$NAME ($OPT): comma-separated list expected"
# c: choice list
# C: choice list with empty string allowed. Long options only advised.
elif [[ $TYPE = [cC]* ]]; then
if [[ $TYPE = C* && $VAL = '' ]]; then
RET=0
else
local -a ITEMS
IFS='|' read -r -a ITEMS <<< "${TYPE:2}"
if find_in_array ITEMS[@] "$VAL"; then
RET=0
else
log_error "$NAME ($OPT): wrong value '$VAL'. Possible values are: ${ITEMS[*]}."
fi
fi
elif [[ $TYPE = [lsSt] ]]; then
RET=0
elif [[ $TYPE = [aenNrR] ]]; then
if [ "${VAL:0:1}" = '-' ]; then
log_error "$NAME ($OPT): missing parameter"
else
RET=0
fi
else
log_error "$NAME: unknown argument type '$TYPE'"
fi
[ $RET -eq 0 ] || echo false
return $RET
}
# Standalone argument parsing (don't use GNU getopt or builtin getopts Bash)
# Notes:
# - Function is designed to be called in an eval statement (prints arrays on stdout)
# - Stdin dash parameter "-" is handled
# - Double dash parameter "--" is handled
#
# $1: program name (used for error reporting only)
# $2: option list (one per line)
# $3: step number (-1, 0 or 1). Always declare UNUSED_ARGS & UNUSED_OPTS arrays.
# -1: check plow* args and declare readonly variables.
# CLOSE_OPT variable can contain a close match heuristic (possible command-line user typo)
# 0: check all module args
# 1: declare module_vars_set & module_vars_unset functions
# $4..$n: arguments
# stdout: (depending step number) declare two arrays (UNUSED_ARGS & UNUSED_OPTS), variables, functions
process_options() {
local -r NAME=$1
local -r OPTIONS=$2
local -r STEP=$3
local -A RES
local -a UNUSED_OPTS UNUSED_ARGS
local -a OPTS_VAR_LONG OPTS_NAME_LONG OPTS_TYPE_LONG
local -a OPTS_VAR_SHORT OPTS_NAME_SHORT OPTS_TYPE_SHORT
local ARG VAR SHORT LONG TYPE SKIP_ARG FOUND FUNC
shift 3
UNUSED_OPTS=()
UNUSED_ARGS=()
if [ -z "$OPTIONS" ]; then
if [ $STEP -gt 0 ]; then
echo "${NAME}_vars_set() { :; }"
echo "${NAME}_vars_unset() { :; }"
return 0
fi
else
# Populate OPTS_* vars
while read -r ARG; do
IFS=',' read -r VAR SHORT LONG TYPE _ <<< "$ARG"
if [ -n "$LONG" ]; then
OPTS_VAR_LONG[${#OPTS_VAR_LONG[@]}]=$VAR
OPTS_NAME_LONG[${#OPTS_NAME_LONG[@]}]="--$LONG"
OPTS_TYPE_LONG[${#OPTS_TYPE_LONG[@]}]=$TYPE
fi
if [ -n "$SHORT" ]; then
OPTS_VAR_SHORT[${#OPTS_VAR_SHORT[@]}]=$VAR
OPTS_NAME_SHORT[${#OPTS_NAME_SHORT[@]}]="-$SHORT"
OPTS_TYPE_SHORT[${#OPTS_TYPE_SHORT[@]}]=$TYPE
fi
done <<< "$OPTIONS"
fi
for ARG in "$@"; do
shift
if [ -n "$SKIP_ARG" ]; then
unset SKIP_ARG
[ $STEP -eq 0 ] && UNUSED_OPTS[${#UNUSED_OPTS[@]}]="$ARG"
continue
fi
if [ "$ARG" = '--' ]; then
UNUSED_ARGS=("${UNUSED_ARGS[@]}" "$@")
break
fi
unset FOUND
# Long option
if [ "${ARG:0:2}" = '--' ]; then
for I in "${!OPTS_NAME_LONG[@]}"; do
if [ "${OPTS_NAME_LONG[$I]}" = "${ARG%%=*}" ]; then
# Argument required?
TYPE=${OPTS_TYPE_LONG[$I]%%=*}
if [ "$TYPE" = 'l' ]; then
FUNC=quote_array
elif [[ $TYPE = [rR] ]]; then
FUNC=translate_size
elif [ "$TYPE" = 'F' ]; then
FUNC=translate_exec
elif [ "$TYPE" = 't' ]; then
FUNC=quote_multiple
else
FUNC=quote
fi
if [ -z "$TYPE" ]; then
[[ ${RES[${OPTS_VAR_LONG[$I]}]} ]] && \
log_notice "$NAME: useless duplicated option ${ARG%%=*}, ignoring"
RES[${OPTS_VAR_LONG[$I]}]=1
[ "${ARG%%=*}" != "$ARG" ] && \
log_notice "$NAME: unwanted argument \`${ARG#*=}' for ${ARG%%=*}, ignoring"
# Argument with equal (ex: --timeout=60)
elif [ "${ARG%%=*}" != "$ARG" ]; then
[ $STEP -gt 0 ] || check_argument_type "$NAME" \
"$TYPE" "${ARG#*=}" "${ARG%%=*}" || return
if [ "$TYPE" = 't' ]; then
RES[${OPTS_VAR_LONG[$I]}]="$($FUNC "${RES[${OPTS_VAR_LONG[$I]}]}" "${ARG#*=}")"
else
[[ ${RES[${OPTS_VAR_LONG[$I]}]} ]] && \
log_notice "$NAME: duplicated option ${ARG%%=*}, taking last one"
RES[${OPTS_VAR_LONG[$I]}]="$($FUNC "${ARG#*=}")"
fi
else
if [ $# -eq 0 ]; then
log_error "$NAME ($ARG): argument required"
echo "exit $ERR_BAD_COMMAND_LINE"
return $ERR_BAD_COMMAND_LINE
fi
[ $STEP -gt 0 ] || check_argument_type "$NAME" \
"$TYPE" "$1" "$ARG" || return
if [ "$TYPE" = 't' ]; then
RES[${OPTS_VAR_LONG[$I]}]="$($FUNC "${RES[${OPTS_VAR_LONG[$I]}]}" "$1")"
else
[[ ${RES[${OPTS_VAR_LONG[$I]}]} ]] && \
log_notice "$NAME: duplicated option $ARG, taking last one"
RES[${OPTS_VAR_LONG[$I]}]="$($FUNC "$1")"
fi
SKIP_ARG=1
fi
FOUND=1
break
fi
done
# Short option
elif [ "${ARG:0:1}" = '-' ]; then
for I in "${!OPTS_NAME_SHORT[@]}"; do
if [ "${OPTS_NAME_SHORT[$I]}" = "${ARG:0:2}" ]; then
# Argument required?
TYPE=${OPTS_TYPE_SHORT[$I]%%=*}
if [ "$TYPE" = 'l' ]; then
FUNC=quote_array
elif [[ $TYPE = [rR] ]]; then
FUNC=translate_size
elif [ "$TYPE" = 'F' ]; then
FUNC=translate_exec
elif [ "$TYPE" = 't' ]; then
FUNC=quote_multiple
else
FUNC=quote
fi
if [ -z "$TYPE" ]; then
[[ ${RES[${OPTS_VAR_SHORT[$I]}]} ]] && \
log_notice "$NAME: useless duplicated option ${ARG:0:2}, ignoring"
RES[${OPTS_VAR_SHORT[$I]}]=1
[[ ${#ARG} -gt 2 ]] && \
log_notice "$NAME: unwanted argument \`${ARG:2}' for ${ARG:0:2}, ignoring"
# Argument without whitespace (ex: -v3)
elif [ ${#ARG} -gt 2 ]; then
[ $STEP -gt 0 ] || check_argument_type "$NAME" \
"$TYPE" "${ARG:2}" "${ARG:0:2}" || return
if [ "$TYPE" = 't' ]; then
RES[${OPTS_VAR_SHORT[$I]}]="$($FUNC "${RES[${OPTS_VAR_SHORT[$I]}]}" "${ARG:2}")"
else
[[ ${RES[${OPTS_VAR_SHORT[$I]}]} ]] && \
log_notice "$NAME: duplicated option ${ARG:0:2}, taking last one"
RES[${OPTS_VAR_SHORT[$I]}]="$($FUNC "${ARG:2}")"
fi
else
if [ $# -eq 0 ]; then
log_error "$NAME ($ARG): argument required"
echo "exit $ERR_BAD_COMMAND_LINE"
return $ERR_BAD_COMMAND_LINE
fi
[ $STEP -gt 0 ] || check_argument_type "$NAME" \
"$TYPE" "$1" "$ARG" || return
if [ "$TYPE" = 't' ]; then
RES[${OPTS_VAR_SHORT[$I]}]="$($FUNC "${RES[${OPTS_VAR_SHORT[$I]}]}" "$1")"
else
[[ ${RES[${OPTS_VAR_SHORT[$I]}]} ]] && \
log_notice "$NAME: duplicated option $ARG, taking last one"
RES[${OPTS_VAR_SHORT[$I]}]="$($FUNC "$1")"
fi
SKIP_ARG=1
fi
FOUND=1
break
fi
done
fi
if [ -z "$FOUND" ]; then
# Check for user typo: -option instead of --option
# Note: COLOR may not be defined here (STEP < 0)
if [[ $ARG =~ ^-[[:alnum:]][[:alnum:]-]+ ]]; then
if find_in_array OPTS_NAME_LONG[@] "-${BASH_REMATCH[0]}"; then
log_error "$NAME: unknown command-line option \`$ARG\`, do you mean \`-${BASH_REMATCH[0]}\` (with two dashes)?"
echo "exit $ERR_BAD_COMMAND_LINE"
return $ERR_BAD_COMMAND_LINE
fi
fi
# Restrict to MAIN_OPTIONS & EARLY_OPTIONS because an unused module switch could be found
# Will detect: "--print" instead of "--printf" and "--noplowsharerc" instead of "--no-plowsharerc"
if [ $STEP -lt 0 ]; then
[[ $ARG = --??* ]] && \
CLOSE_OPT=$(close_match_in_array OPTS_NAME_LONG[@] "$ARG") && \
declare -p CLOSE_OPT
fi
if [ $STEP -eq 0 ]; then
# Accept '-' (stdin semantic) argument
if [[ $ARG = -?* ]]; then
if [[ $CLOSE_OPT ]]; then
log_error "$NAME: unknown command-line option \`$ARG\`, do you mean \`$CLOSE_OPT\` (close match)?"
else
log_error "$NAME: unknown command-line option \`$ARG\`"
fi
echo "exit $ERR_BAD_COMMAND_LINE"
return $ERR_BAD_COMMAND_LINE
fi
UNUSED_ARGS[${#UNUSED_ARGS[@]}]="$ARG"
else
UNUSED_OPTS[${#UNUSED_OPTS[@]}]="$ARG"
fi
elif [ $STEP -eq 0 ]; then
UNUSED_OPTS[${#UNUSED_OPTS[@]}]="$ARG"
fi
done
# Declare core options as readonly
if [ $STEP -lt 0 ]; then
for ARG in "${!RES[@]}"; do echo "declare -r ${ARG}=${RES[$ARG]}"; done
# Declare target module options: ${NAME}_vars_set/unset
elif [ $STEP -gt 0 ]; then
echo "${NAME}_vars_set() { :"
for ARG in "${!RES[@]}"; do echo "${ARG}=${RES[$ARG]}"; done
echo '}'
echo "${NAME}_vars_unset() { :"
for ARG in "${!RES[@]}"; do echo "${ARG%%=*}="; done
echo '}'
fi
declare -p UNUSED_ARGS
declare -p UNUSED_OPTS
}
# Delete leading and trailing whitespace & blank lines
# stdin: input (multiline) string
# stdout: result string
strip_and_drop_empty_lines() {
sed -e '/^[[:space:]]*$/d; s/^[[:space:]]*//; s/[[:space:]]*$//' <<< "$1"
}
# Look for a configuration module variable
# Example: MODULE_4SHARED_DOWNLOAD_OPTIONS (result can be multiline)
# $1: module name
# $2: option family name (string, example:UPLOAD)
# stdout: options list (one per line)
get_module_options() {
local -ur VAR="MODULE_${1}_${2}_OPTIONS"
strip_and_drop_empty_lines "${!VAR}"
}
# Example: 12345 => "3h25m45s"
# $1: duration (integer)
splitseconds() {
local DIV_H=$(( $1 / 3600 ))
local DIV_M=$(( ($1 % 3600) / 60 ))
local DIV_S=$(( $1 % 60 ))
[ "$DIV_H" -eq 0 ] || echo -n "${DIV_H}h"
[ "$DIV_M" -eq 0 ] || echo -n "${DIV_M}m"
[ "$DIV_S" -eq 0 ] && echo || echo "${DIV_S}s"
}
# Called by wait
# See also timeout_init()
timeout_update() {
local WAIT=$1
test -z "$PS_TIMEOUT" && return
log_debug "time left to timeout: $PS_TIMEOUT secs"
if [[ $PS_TIMEOUT -lt $WAIT ]]; then
log_notice "Timeout reached (asked to wait $WAIT seconds, but remaining time is $PS_TIMEOUT)"
return $ERR_MAX_WAIT_REACHED
fi
(( PS_TIMEOUT -= WAIT ))
}
# Look for one element in an array
# $1: array[@]
# $2: element to find
# $3: alternate element to find (can be null)
# $?: 0 for success (one element found), not found otherwise
find_in_array() {
local ELT
for ELT in "${!1}"; do
[ "$ELT" = "$2" -o "$ELT" = "$3" ] && return 0
done
return 1
}
# Look for a close match in an array (of command line options)
# $1: array[@]
# $2: element to find (string)
# $?: 0 for success (close element found), not found otherwise
# stdout: array element, undefined if not found.
close_match_in_array() {
local ELT
local S=${2/#--no/--no-?}
for ELT in "${!1}"; do
if [[ $ELT =~ ^$S.?$ ]]; then
echo "$ELT"
return 0
fi
done
return 1
}
# Find next array index of one element
# $1: array[@]
# $2: element to find
# $3: alternate element to find (can be null)
# $?: 0 for success (one element found), not found otherwise
# stdout: array index, undefined if not found.
index_in_array() {
local ELT I=0
for ELT in "${!1}"; do
(( ++I ))
if [ "$ELT" = "$2" -o "$ELT" = "$3" ]; then
# Note: assume that it is not last element
echo "$I"
return 0
fi
done
return 1
}
# Verify balance (9kw.eu)
# $1: 9kw.eu captcha key
# $?: 0 for success (enough credits)
service_9kweu_ready() {
local -r KEY=$1
local AMOUNT
if [ -z "$KEY" ]; then
return $ERR_FATAL
fi
AMOUNT=$(curl --get --data 'action=usercaptchaguthaben' \
--data "apikey=$CAPTCHA_9KWEU" 'http://www.9kw.eu/index.cgi') || { \
log_notice '9kweu: site seems to be down'
return $ERR_NETWORK
}
if [[ $AMOUNT = 00[012][[:digit:]][[:space:]]* ]]; then
# 0011 Balance insufficient
if [ "${AMOUNT:0:5}" = '0011 ' ]; then
log_notice '9kw.eu: no more credits'
else
log_error "9kw.eu remote error: ${AMOUNT:5}"
fi
# One solved captcha costs between 5 and 10
elif (( AMOUNT < 10 )); then
log_notice '9kw.eu: insufficient credits'
else
log_debug "9kw.eu credits: $AMOUNT"
return 0
fi
return $ERR_FATAL
}
# Verify balance (antigate)
# $1: antigate.com captcha key
# $?: 0 for success (enough credits)
service_antigate_ready() {
local -r KEY=$1
local AMOUNT LOAD
if [ -z "$KEY" ]; then
return $ERR_FATAL
fi
AMOUNT=$(curl --get --data "key=${CAPTCHA_ANTIGATE}&action=getbalance" \
'http://antigate.com/res.php') || { \
log_notice 'antigate: site seems to be down'
return $ERR_NETWORK
}
if match '500 Internal Server Error' "$AMOUNT"; then
log_error 'antigate: internal server error (HTTP 500)'
return $ERR_CAPTCHA
elif match '502 Bad Gateway' "$AMOUNT"; then
log_error 'antigate: bad gateway (HTTP 502)'
return $ERR_CAPTCHA
elif match '503 Service Unavailable' "$AMOUNT"; then
log_error 'antigate: service unavailable (HTTP 503)'
return $ERR_CAPTCHA
elif match '^ERROR' "$AMOUNT"; then
log_error "antigate error: $AMOUNT"
return $ERR_FATAL
elif [ '0.0000' = "$AMOUNT" -o '-' = "${AMOUNT:0:1}" ]; then
log_notice 'antigate: no more credits (or bad key)'
return $ERR_FATAL
else
log_debug "antigate credits: \$$AMOUNT"
fi
# Get real time system load info (XML results)
if LOAD=$(curl 'http://antigate.com/load.php'); then
local N P B T
N=$(parse_tag 'waiting' <<< "$LOAD")
P=$(parse_tag 'load' <<< "$LOAD")
B=$(parse_tag 'minbid' <<< "$LOAD")
T=$(parse_tag 'averageRecognitionTime' <<< "$LOAD")
log_debug "Available workers: $N (load: ${P}%)"
log_debug "Mininum bid: $B"
log_debug "Average recognition time (s): $T"
if [ "$N" = '0' -a "$P" = '100' ]; then
log_notice 'antigate: no slot available (all workers are busy)'
return $ERR_FATAL
fi
fi
}
# Verify balance (Captcha Brotherhood)
# $1: captcha brotherhood username
# $2: captcha brotherhood password
# $?: 0 for success (enough credits)
service_captchabrotherhood_ready() {
local RESPONSE AMOUNT ERROR
if [ -z "$1" -o -z "$2" ]; then
return $ERR_FATAL
fi
RESPONSE=$(curl --get -d "username=$1" -d "password=$2" \
'http://www.captchabrotherhood.com/askCredits.aspx') || return
if [ "${RESPONSE:0:3}" = 'OK-' ]; then
AMOUNT=${RESPONSE:3}
if (( AMOUNT < 10 )); then
log_notice "CaptchaBrotherHood: not enough credits ($1)"
return $ERR_FATAL
fi
else
ERROR=${RESPONSE#Error-}
log_error "CaptchaBrotherHood error: $ERROR"
return $ERR_FATAL
fi
log_debug "CaptchaBrotherhood credits: $AMOUNT"
}
# Verify balance (captchacoin)
# $1: captchacoin.com captcha key
# $?: 0 for success (enough credits)
service_captchacoin_ready() {
local -r KEY=$1
local JSON ERROR AMOUNT
if [ -z "$KEY" ]; then
return $ERR_FATAL
fi
JSON=$(curl --get -d "api_key=${KEY}" \
'http://api.captchacoin.com/api/details') || { \
log_notice 'CaptchaCoin: site seems to be down'
return $ERR_NETWORK
}
if match_json_true 'success' "$JSON"; then
AMOUNT=$(echo "$JSON" | parse_json 'balance')
if (( AMOUNT < 100 )); then
log_error 'CaptchaCoin: not enough credits'
return $ERR_FATAL
fi
else
ERROR=$(echo "$JSON" | parse_json_quiet 'error')
if [ -n "$ERROR" ]; then
log_error "CaptchaCoin error: $ERROR"
else
log_error "CaptchaCoin unknown error: $JSON"
fi
return $ERR_FATAL
fi
log_debug "CaptchaCoin credits: $AMOUNT"
}
# Verify balance (DeathByCaptcha)
# $1: death by captcha username
# $2: death by captcha password
# $?: 0 for success (enough credits)
service_captchadeathby_ready() {
local -r USER=$1
local JSON STATUS AMOUNT ERROR
if [ -z "$1" -o -z "$2" ]; then
return $ERR_FATAL
fi
JSON=$(curl -F "username=$USER" -F "password=$2" \
--header 'Accept: application/json' \
'http://api.dbcapi.me/api/user') || { \
log_notice 'DeathByCaptcha: site seems to be down'
return $ERR_NETWORK
}
STATUS=$(echo "$JSON" | parse_json_quiet 'status')
if [ "$STATUS" = 0 ]; then
AMOUNT=$(echo "$JSON" | parse_json 'balance')
if match_json_true 'is_banned' "$JSON"; then
log_error "DeathByCaptcha error: $USER is banned"
return $ERR_FATAL
fi
if [ "${AMOUNT%.*}" = 0 ]; then
log_notice "DeathByCaptcha: not enough credits ($USER)"
return $ERR_FATAL
fi
elif [ "$STATUS" = 255 ]; then
ERROR=$(echo "$JSON" | parse_json_quiet 'error')
log_error "DeathByCaptcha error: $ERROR"
return $ERR_FATAL
else
log_error "DeathByCaptcha unknown error: $JSON"
return $ERR_FATAL
fi
log_debug "DeathByCaptcha credits: $AMOUNT"
}
# Upload (captcha) image to Imgur (picture hosting service)
# Using official API: http://api.imgur.com/
# $1: image filename (with full path)
# stdout: delete url
# $?: 0 for success
image_upload_imgur() {
local -r IMG=$1
local -r BASE_API='https://api.imgur.com/3'
local RESPONSE DIRECT_URL FID DEL_HASH
log_debug 'uploading image to Imgur.com'
# Plowshare App for Imgur
RESPONSE=$(curl -F "image=@$IMG" \
-H 'Authorization: Client-ID 5926b561daf4510' \
--form-string 'type=file' \
--form-string 'title=Plowshare uploaded image' \
"$BASE_API/upload.json") || return
DIRECT_URL=$(parse_json_quiet link <<< "$RESPONSE")
FID=$(parse_json_quiet id <<< "$RESPONSE")
DEL_HASH=$(parse_json_quiet deletehash <<< "$RESPONSE")
if [ -z "$DIRECT_URL" -o -z "$FID" ]; then
log_debug "$FUNCNAME: $RESPONSE"
if match '504 Gateway Time-out' "$RESPONSE"; then
log_error "$FUNCNAME: upload error (Gateway Time-out)"
# <h1>Imgur is over capacity!</h1>
elif match 'Imgur is over capacity' "$RESPONSE"; then
log_error "$FUNCNAME: upload error (Service Unavailable)"
else
log_error "$FUNCNAME: upload error"
fi
return $ERR_FATAL
fi
log_error "Image: $DIRECT_URL"
log_error "Image: http://imgur.com/$FID"
echo "$DEL_HASH"
}
# Delete (captcha) image from Imgur (picture hosting service)
# $1: delete hash
image_delete_imgur() {
local -r HID=$1
local -r BASE_API='https://api.imgur.com/3'
local RESPONSE
log_debug 'deleting image from Imgur.com'
RESPONSE=$(curl -X DELETE \
-H 'Authorization: Client-ID 5926b561daf4510' \
"$BASE_API/image/$HID.json") || return
if ! match_json_true 'success' "$RESPONSE"; then
local MSG ERRNO
MSG=$(parse_json error <<< "$RESPONSE")
ERRNO=$(parse_json status <<< "$RESPONSE")
log_notice "$FUNCNAME: remote error, $MSG ($ERRNO)"
fi
}
# Some debug information
log_notice_stack() {
local N
for N in "${!FUNCNAME[@]}"; do
[ $N -le 1 ] && continue
log_notice "Failed inside ${FUNCNAME[$N]}(), line ${BASH_LINENO[$((N-1))]}, $(basename_file "${BASH_SOURCE[$N]}")"
# Exit if we go outside core.sh scope
[[ ${BASH_SOURCE[$N]} = */core.sh ]] || break
done
}
# log_notice without end of line
log_notice_norc() {
if [[ $COLOR ]]; then
test $VERBOSE -lt 2 || echo -ne "\033[0;33m$@\033[0m" >&2
else
test $VERBOSE -lt 2 || stderr -ne "$@"
fi
}
# Bash4 builtin error-handling function
# VERBOSE is not defined here.
command_not_found_handle() {
local -r CMD=$1
local ERR=$ERR_SYSTEM
# Missing module function
if [[ $CMD =~ _(delete|download|list|probe|upload)$ ]]; then
stderr "$MODULE module: \`$CMD' function was not found"
else
[ "$CMD" = 'curl' ] && ERR=62
stderr "$CMD: command not found"
fi
shift
stderr "called with arguments: $*"
return $ERR
}
|
arcresu/plowshare-debian
|
src/core.sh
|
Shell
|
gpl-3.0
| 132,226 |
#!/bin/sh
# operate on a very small (1-sector) "disk"
# Copyright (C) 2009-2014, 2019-2021 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/init.sh"; path_prepend_ ../parted
ss=$sector_size_
dev=loop-file
for opt in '' -s; do
dd if=/dev/null of=$dev bs=1 seek=$ss || framework_failure
# create an msdos partition table:
# Before parted-2.1, without -s, this would fail with a bogus diagnostic:
# Error: Success during read on .../tests/loop-file
# Retry/Ignore/Cancel? ^C
parted $opt $dev mklabel msdos ---pretend-input-tty </dev/null > out 2>&1 \
|| fail=1
# expect no output
sed 's/.*WARNING: You are not superuser.*//;/^$/d' out > k && mv k out \
|| fail=1
# When run as root, there are just curses-related control chars. Remove them.
sed 's/^.\{1,12\}$//;/^$/d' out > k && mv k out \
|| fail=1
compare /dev/null out || fail=1
parted -s $dev p || fail=1
rm -f $dev
done
Exit $fail
|
bcl/parted
|
tests/t0001-tiny.sh
|
Shell
|
gpl-3.0
| 1,567 |
#!/bin/bash
function help() {
echo "Usage: buildpkg.sh BUILD_DIR INSTALLER_STUB QCOMPRESS_BIN RUNTIME_DEPS_DIR ARTEFACTS_DIR TARGET_DIR TARGET_NAME"
}
function build() {
checkDependencies
setupOrCleanBuildDir
copyRuntimeDependencies
setupRuntimeDependenmcies
copyArtefacts
fixLibraryNames
copyInstallerStub
mkPkg
copyTargetToTargetDir
deletePkgBuildDir
}
function setupOrCleanBuildDir() {
if [ -e $PKG_BUILD_DIR ]; then
echo -n "Delete old pkg build dir: $PKG_BUILD_DIR..."
rm -rf $PKG_BUILD_DIR
echo "Done"
fi
echo -n "Create pkg build dir..."
mkdir $PKG_BUILD_DIR
mkdir $PKG_BUILD_DIR/bin
echo "Done"
if [ ! -e $TARGET_DIR ]; then
echo -n "Create Target dir $TARGET_DIR..."
mkdir $TARGET_DIR
echo "Done"
fi
if [ -e $TARGET_DIR/$TARGET_NAME ]; then
echo "Delete old package $TARGET_DIR/$TARGET_NAME"
rm -f $TARGET_DIR/$TARGET_NAME
fi
}
function deletePkgBuildDir() {
echo -n "Deleting Package Build Dir..."
rm -rf $PKG_BUILD_DIR
echo "Done"
}
function copyInstallerStub() {
echo -n "Copy Installer stub..."
cp $INSTALLER_STUB $PKG_BUILD_DIR/$TARGET_NAME
echo "Done"
}
function checkDependencies() {
if [ ! -e $BUILD_DIR ]; then
echo "BUILD_DIR: $BUILD_DIR does not exist!"
exit 1
fi
if [ ! -e $INSTALLER_STUB ]; then
echo "INSTALLER_STUB: $INSTALLER_STUB does not exist!"
exit 1
fi
if [ ! -e $QCOMPRESS_BIN ]; then
echo "QCOMPRESS_BIN: $QCOMPRESS_BIN does not exist!"
exit 1
fi
if [ ! -e $RUNTIME_DEPS_DIR ]; then
echo "RUNTIME_DEPS_DIR: $RUNTIME_DEPS_DIR does not exist!"
exit 1
fi
if [ ! -e $ARTEFACTS_DIR ]; then
echo "ARTEFACTS_DIR: $ARTEFACTS_DIR does not exist!"
exit 1
fi
}
function setupRuntimeDependenmcies() {
bash Setup/BuildPackage/SetupLinuxRuntimeDependencies.sh $ARTEFACTS_DIR/ $FULL_PKG_BUILD_DIR/bin
}
function copyRuntimeDependencies() {
echo -n "Copy runtime dependencies to build dir..."
cp -r $RUNTIME_DEPS_DIR/* $PKG_BUILD_DIR/bin
echo "Done"
}
function copyArtefacts() {
echo -n "Copy build artefacts to build dir..."
cp -r $ARTEFACTS_DIR/* $PKG_BUILD_DIR/bin
echo "Done"
}
function fixLibraryNames() {
#if [ `ls -la *.so.1.0.0 &> /dev/null` ]; then
OLDDIR=`pwd`
cd $PKG_BUILD_DIR/bin
for i in `ls *.so.1.0.0`; do
newName=`echo $i | cut -d'.' -f1`
newName="$newName.so.1"
mv $i $newName
done
cd $OLDDIR
#fi
}
function mkPkg() {
OLDDIR=`pwd`
echo "cd to $PKG_BUILD_DIR/bin"
cd $PKG_BUILD_DIR/bin
echo "Build Package"
$QCOMPRESS_BIN directory . . ../$TARGET_NAME
cd $OLDDIR
}
function copyTargetToTargetDir() {
echo "Move $PKG_BUILD_DIR/$TARGET_NAME to $TARGET_DIR/$TARGET_NAME"
mv $PKG_BUILD_DIR/$TARGET_NAME $TARGET_DIR/$TARGET_NAME
chmod +x $TARGET_DIR/$TARGET_NAME
}
# exec starts here
PKG_BUILD_DIR="./pkg_build_dir/"
CURRDIR=`pwd`
FULL_PKG_BUILD_DIR="$CURRDIR/pkg_build_dir/"
INSTALLER_STUB=$1
QCOMPRESS_BIN=$2
RUNTIME_DEPS_DIR=$3
ARTEFACTS_DIR=$4
TARGET_DIR=$5
TARGET_NAME=$6
if [ "$INSTALLER_STUB" == "" ]; then
help;
exit 1
else
echo "INSTALLER_STUB: $INSTALLER_STUB"
fi
if [ "$QCOMPRESS_BIN" == "" ]; then
help;
exit 1
else
echo "QCOMPRESS_BIN: $QCOMPRESS_BIN"
fi
if [ "$RUNTIME_DEPS_DIR" == "" ]; then
help;
exit 1
else
echo "RUNTIME_DEPS_DIR: $RUNTIME_DEPS_DIR"
fi
if [ "$ARTEFACTS_DIR" == "" ]; then
help;
exit 1
else
echo "ARTEFACTS_DIR: $ARTEFACTS_DIR"
fi
if [ "$TARGET_DIR" == "" ]; then
help;
exit 1
else
echo "TARGET_DIR: $TARGET_DIR"
fi
if [ "$TARGET_NAME" == "" ]; then
help;
exit 1
else
echo "TARGET_NAME: $TARGET_NAME"
fi
build
|
baoping/Red-Bull-Media-Player
|
Setup/BuildPackage/mkpkg.sh
|
Shell
|
gpl-3.0
| 3,532 |
#!/bin/sh
curl -X POST "http://localhost:8081/createItem?name=javastuff" --data-binary "@config.xml" -H "Content-Type: text/xml"
|
pnmtjonahen/docker
|
ci/jobs/javastuff/setup.sh
|
Shell
|
gpl-3.0
| 129 |
#!/bin/bash
# bash voodoo to find absolute path of the directory this file is in without symlinks
# taken from stackoverflow, seems to work well
FIND_CONF_DIR="${BASH_SOURCE[0]}"
while [ -h "$FIND_CONF_DIR" ]; do # resolve $FIND_CONF_DIR until the file is no longer a symlink
githooksdir="$( cd -P "$( dirname "$FIND_CONF_DIR" )" && pwd )"
FIND_CONF_DIR="$(readlink "$FIND_CONF_DIR")"
[[ $FIND_CONF_DIR != /* ]] && FIND_CONF_DIR="$DIR/$FIND_CONF_DIR" # if $FIND_CONF_DIR was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
githooksdir="$( cd -P "$( dirname "$FIND_CONF_DIR" )" && pwd )"
all_githooks='applypatch-msg commit-msg post-update pre-applypatch pre-commit pre-push pre-rebase prepare-commit-msg update'
used_githooks="$(cd $githooksdir; ls $all_githooks 2>/dev/null)"
git_root="$(git rev-parse --show-toplevel)"
cd $git_root
for hook in $used_githooks;
do
ln -sf "${githooksdir}/${hook}" "${git_root}/.git/hooks/$hook"
done
|
awesomefireduck/maillurgy
|
scripts/hooks/install.sh
|
Shell
|
gpl-3.0
| 1,001 |
#!/bin/sh -ex
DIR=$1; shift
SUFFIX=$1; shift
RELPATH=$1; shift
cd $DIR;
for f in *$SUFFIX; do
if [ -f $f -a ! -h $f ]; then
if file $f | grep -m1 Mach; then
chmod u+w $f
if [ -n $RELPATH ]; then
install_name_tool -id "@rpath/$RELPATH/$f" $f;
else
install_name_tool -id "@rpath/$f" $f;
fi;
chmod u-w $f
fi;
fi;
done;
|
polymake/polybundle
|
build_scripts/functions/fix_libname.sh
|
Shell
|
gpl-3.0
| 350 |
#!/bin/bash
FPID=`ps ux | grep 'dissferretd.py' | awk {'print $2'}`
echo "killing pid ${FPID}"
kill -n 9 $FPID
|
clayball/Dissembling-Ferret
|
servers/kill-server.sh
|
Shell
|
gpl-3.0
| 112 |
#!/bin/bash
#TODO:Fix this and give a little format, for Gods sake!
# Descripcion: Programa que me llena aleatoriamente de musica el pendrive, tiene que estar montado en tipo vfat
# El formato de la coleccion de musica es ALBUMS/<ARTISTA>/<ALBUM>
# Version: 0-0-1
i=0
a=0
if [ $# -ne 4 ]
then
echo -e "Usage: $0 <directorio coleccion> <No de albums a copiar> <directorio destino> <Borrar todo?(S/N)>"
else
raiz="$1" #Reconocimiento de parametros
num="$2"
dest="$3"
if [ "$4" = "S" ]
then
cd "$dest"
rm -rf *
fi
cd "$raiz" #Cuento el numero de artistas
contador1=0;
for i in *
do
if [ -d "$i" ]
then
let contador1++
fi
done
echo "Hay $contador1 artistas en esa carpeta."
for ((a=0; a<$num; a++))
do
aleatorio1=0;
while [ $aleatorio1 -lt 100000000 ] #Si es menor que este numero el let da error ¿?
do
aleatorio1=`date +%N`
done
let aleatorio1=$aleatorio1/1000
let aleatorio1=$aleatorio1%$contador1
let aleatorio1=$aleatorio1+1
for i in *
do
if [ -d "$i" ] #Recorro la carpeta hasta llegar donde quiero
then
let aleatorio1--
fi
if [ $aleatorio1 -eq 0 ]
then
break
fi
done
echo -e "\nMe ha apetecido copiar algo de $i, que mola mazo :D"
cd "$i"
contador2=0;
for j in *
do
if [ -d "$j" ]
then
let contador2++
fi
done
echo "Hay $contador2 albums de $i."
aleatorio2=0;
while [ $aleatorio2 -lt 100000000 ]
do
aleatorio2=`date +%N`
done
let aleatorio2=$aleatorio2/1000
let aleatorio2=$aleatorio2%$contador2
let aleatorio2=$aleatorio2+1
for j in *
do
if [ -d "$j" ]
then
let aleatorio2--
fi
if [ $aleatorio2 -eq 0 ]
then
break
fi
done
echo -e "Me ha apetecido copiar el $j, que es un album muy guapo"
cp -r "$j" "$dest"
cd ..
done
sync
fi
|
debuti/fillmymp3
|
fillMP3player.sh
|
Shell
|
gpl-3.0
| 1,836 |
#!/bin/bash
if [ -x ./autogen.sh ]; then
./autogen.sh CFLAGS="-Wall -Wextra -ggdb -O0"
make
else
printf "Execute this script as 'utils/build-full-debug.sh' from the main directory.\n"
exit 1
fi
|
mbt/alltray
|
utils/build-full-debug.sh
|
Shell
|
gpl-3.0
| 211 |
#!/bin/bash
############################
## Empaquetar Directorio ##
############################
# Variables
rojo="\033[1;31m"
verde="\033[1;32m"
amarillo="\033[1;33m"
azul="\033[1;34m"
noColor="\033[1;00m"
function info(){
echo -e "$verde Es necesario un $amarillo Directorio $verde a empaquetar"
echo -e "$verde Ejecuta con la sintaxis:$rojo sh $0 $azul Directorio$noColor"
echo ""
}
# Exista un parámetro primer parámetro y sea un directorio
if [ $# -eq 1 ] && [ -d $1 ]; then
#Comprimir
tar -cvzf "`date +%y%b`-`date +%d`_$1.tar.gz" $1
#echo "`date +%y%b`-`date +%d`_$1.tar.gz"
echo "Archivo Comprimido"
else
info
exit 1
fi
exit 0
|
fryntiz/ciclosuperior
|
Scripts_Bash/Nivel Bajo-Medio/9_empaqueta2.sh
|
Shell
|
gpl-3.0
| 653 |
#!/bin/bash
puppet config set autosign ${AUTOSIGN:-false} --section master
|
ageekymonk/docker-puppet
|
puppetserver/docker-entrypoint.d/09-configure-autosign.sh
|
Shell
|
gpl-3.0
| 76 |
#!/bin/bash
#/etc/init.d/bluetooth restart
# Este script es sofware libre. Puede redistribuirlo y/o modificarlo bajo
# los terminos de la licencia pública general de GNU, según es publicada
# por la free software fundation bien la versión 3 de la misma licencia
# o de cualquier versión posterior. (según su elección ).
# Si usted hace alguna modificación en esta aplicación, deberá siempre
# mencionar el autor original de la misma.
# Autor:
# Universidad Distrital Francisco Jose
# Grupo de fisica e informatica
# Dr Julian Andres Salamanca Bernal
# Diego Alberto Parra Garzón
# Colombia, Bogota D.C.
hcitool dev > log.txt | grep -e 'hci0' log.txt > mac.txt
cut -d "0" -f 2,3 mac.txt > MAC.txt
MAC=`cat MAC.txt`
echo "La mac del dispositivo es : $MAC"
hcitool scan > dispo.txt | grep -e "HC-06" dispo.txt > macd.txt
cut -d "H" -f 1 macd.txt > MACD.txt
MACD=`cat MACD.txt`
echo "dispositivo HC-06 encontrado MAC: $MACD"
/etc/init.d/bluetooth restart
rm log.txt mac.txt dispo.txt macd.txt
rfcomm connect 0 $MACD
|
Diego-debian/FREE_POPS_1.0
|
free_pops/c_Blu.sh
|
Shell
|
gpl-3.0
| 1,033 |
#! /bin/bash
##################################################
# Copy Relinux backups to '~/Backups' folder #
# (only after Relinux has backed-up stuff) #
##################################################
# requires: relinux (not yet available in PPA/repo)
notify-send -t 3000 -i /usr/share/icons/gnome/32x32/status/info.png "Relinux ISO Copy Started"
DISTRIB_ID=`cat /etc/lsb-release | grep DISTRIB_ID | cut -d '=' -f 2`
DISTRIB_CODENAME=`lsb_release -cs | sed "s/^./\u&/"`
if [ ! -d "$HOME/Backups" ]; then
mkdir "$HOME/Backups"
fi
if [ ! -d "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y`" ]; then
mkdir "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y`"
cd "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y`"
echo 'Password: password' >> Password.txt
cp /home/relinux/custom.iso "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y`"
cd ..
cd ..
notify-send -t 3000 -i /usr/share/icons/gnome/32x32/status/info.png "Relinux ISO Copy Finished"
else
mkdir "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y_%H%M`"
cd "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y_%H%M`"
echo 'Password: password' >> Password.txt
cp /home/relinux/custom.iso "$HOME/Backups/$DISTRIB_ID $DISTRIB_CODENAME Backup `date +-%e-%m-%Y_%H%M`"
cd ..
cd ..
notify-send -t 3000 -i /usr/share/icons/gnome/32x32/status/info.png "Relinux ISO Copy Finished"
fi
|
kernt/linuxtools
|
gnome3-shell/nautilus-scripts/System/Relinux-Copy.sh
|
Shell
|
gpl-3.0
| 1,505 |
#!/bin/sh
# Author: Valentin Popov
# Email: [email protected]
# Date: 2017-10-19
# Usage: /bin/sh build.sh
# Description: Build the final package for installation in Moodle.
# Updating the Environment
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
export PATH="$PATH:/usr/local/scripts"
# Build the package
cd ..
mv "./moodle-webhooks" "./local_webhooks"
zip -9 -r "local_webhooks.zip" "local_webhooks" \
-x "local_webhooks/.git*" \
-x "local_webhooks/.travis.yml" \
-x "local_webhooks/build.sh"
# End of work
exit 0
|
valentineus/moodle-webhooks
|
build.sh
|
Shell
|
gpl-3.0
| 604 |
#!/bin/bash
# Modified for Yahoo weather by Peter Thorstenson
# Based on a script written by Peter Garceau
# Originally based on the RDF Feed Display Script by Hellf[i]re v0.1
#
# This script is designed for the Yahoo Weather XML RSS.
# Just substitute "BRXX0201" with the code for your location
#
# This script depends on curl.
# yum -y install curl
#
# Usage:
# .conkyrc: ${execi [time] /path/to/script/rss-weather.sh}
#RSS Setup
#the URI to the xml page
URI=http://xml.weather.yahoo.com/forecastrss?p=BRXX0099\&u=c
#number of lines to display
LINES=8
#Path to curl
EXEC="/usr/bin/curl -s"
#Work Start
$EXEC $URI | sed -n -e '/<!\[CDATA\[/,/\]\]>/p' | #print text between "<![CDATA[" and "]]>"
sed -e 's/<!\[CDATA\[//g' | #remove "<![CDATA[" to leave only clean html tags
sed -e 's/\]\]>//g' | #remove "]]>" for the same reason
sed -e 's/<[^>]*>//g' | #remove all html tags
head -n $(($LINES + 2)) | #trim off useless lines
tail -n $LINES
|
jmanoel7/my_shell_scripts
|
bin/get-weather-forecast.sh
|
Shell
|
gpl-3.0
| 1,073 |
#!/bin/sh
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
# Update commit hash when you need to update shavar prod list
# Note: we can update this to use a tag / branch in future
SHAVAR_COMMIT_HASH="3910527004252af3aa9dd701566a2cb3b78e5c3a"
# Install Node.js dependencies and build user scripts
npm install
npm run build
# Clone shavar prod list
rm -rf shavar-prod-lists && git clone https://github.com/mozilla-services/shavar-prod-lists.git && git -C shavar-prod-lists checkout $SHAVAR_COMMIT_HASH
(cd content-blocker-lib-ios/ContentBlockerGen && swift run)
|
mozilla-mobile/firefox-ios
|
content_blocker_update.sh
|
Shell
|
mpl-2.0
| 720 |
# find directories
find . -type d | sort | awk '$0 !~ last "/" {print last} {last=$0} END {print last}'
## loop to do something whit they
find /big-directory -type d | sort | awk '$0 !~ last {print last} {last=$0} END {print last}' | while read dir
do
# Do something useful with $dir
echo $dir
done
## Busca directorios que contengan archivos MD y, convierte a HTML
#!/bin/bash
for dir in `find /big-directory -type d`
do
if [ -e $dir/file.md ]; #test if dir contains file
then
echo "converting $dir/file.md to HTML..."
markdown $dir/file.md > $dir/file.html
fi
done
|
heliogabalo/The-side-of-the-source
|
Codigo/Scripts/Bash/mark2html.sh
|
Shell
|
mpl-2.0
| 592 |
#!/usr/bin/env bash
# @copyright Copyright (c) 2017, Daniel Calviño Sánchez ([email protected])
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Helper script to run the acceptance tests, which test a running Nextcloud
# instance from the point of view of a real user.
#
# The acceptance tests are run in its own Docker container; the grandparent
# directory of the acceptance tests directory (that is, the root directory of
# the Nextcloud server) is copied to the container and the acceptance tests are
# run inside it. Once the tests end the container is stopped. The acceptance
# tests also use the Selenium server to control a web browser, so the Selenium
# server is also launched before the tests start in its own Docker container (it
# will be stopped automatically too once the tests end).
#
# To perform its job, the script requires the "docker" command to be available.
#
# The Docker Command Line Interface (the "docker" command) requires special
# permissions to talk to the Docker daemon, and those permissions are typically
# available only to the root user. Please see the Docker documentation to find
# out how to give access to a regular user to the Docker daemon:
# https://docs.docker.com/engine/installation/linux/linux-postinstall/
#
# Note, however, that being able to communicate with the Docker daemon is the
# same as being able to get root privileges for the system. Therefore, you must
# give access to the Docker daemon (and thus run this script as) ONLY to trusted
# and secure users:
# https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface
#
# Finally, take into account that this script will automatically remove the
# Docker containers named "selenium-nextcloud-local-test-acceptance" and
# "nextcloud-local-test-acceptance", even if the script did not create them
# (probably you will not have containers nor images with those names, but just
# in case).
# Sets the variables that abstract the differences in command names and options
# between operating systems.
#
# Switches between timeout on GNU/Linux and gtimeout on macOS (same for mktemp
# and gmktemp).
function setOperatingSystemAbstractionVariables() {
case "$OSTYPE" in
darwin*)
if [ "$(which gtimeout)" == "" ]; then
echo "Please install coreutils (brew install coreutils)"
exit 1
fi
MKTEMP=gmktemp
TIMEOUT=gtimeout
DOCKER_OPTIONS="-e no_proxy=localhost "
;;
linux*)
MKTEMP=mktemp
TIMEOUT=timeout
DOCKER_OPTIONS=" "
;;
*)
echo "Operating system ($OSTYPE) not supported"
exit 1
;;
esac
}
# Launches the Selenium server in a Docker container.
#
# The acceptance tests use Firefox by default but, unfortunately, Firefox >= 48
# does not provide yet the same level of support as earlier versions for certain
# features related to automated testing. Therefore, the Docker image used is not
# the latest one, but an older version known to work.
#
# The acceptance tests expect the Selenium server to be accessible at
# "127.0.0.1:4444"; as the Selenium server container and the container in which
# the acceptance tests are run share the same network nothing else needs to be
# done for the acceptance tests to access the Selenium server and for the
# Selenium server to access the Nextcloud server. However, in order to ensure
# from this script that the Selenium server was started the 4444 port of its
# container is mapped to the 4444 port of the host.
#
# Besides the Selenium server, the Docker image also provides a VNC server, so
# the 5900 port of the container is also mapped to the 5900 port of the host.
#
# The Docker container started here will be automatically stopped when the
# script exits (see cleanUp). If the Selenium server can not be started then the
# script will be exited immediately with an error state; the most common cause
# for the Selenium server to fail to start is that another server is already
# using the mapped ports in the host.
#
# As the web browser is run inside the Docker container it is not visible by
# default. However, it can be viewed using VNC (for example,
# "vncviewer 127.0.0.1:5900"); when asked for the password use "secret".
function prepareSelenium() {
SELENIUM_CONTAINER=selenium-nextcloud-local-test-acceptance
echo "Starting Selenium server"
docker run --detach --name=$SELENIUM_CONTAINER --publish 4444:4444 --publish 5900:5900 $DOCKER_OPTIONS selenium/standalone-firefox-debug:2.53.1-beryllium
echo "Waiting for Selenium server to be ready"
if ! $TIMEOUT 10s bash -c "while ! curl 127.0.0.1:4444 >/dev/null 2>&1; do sleep 1; done"; then
echo "Could not start Selenium server; running" \
"\"docker run --rm --publish 4444:4444 --publish 5900:5900 $DOCKER_OPTIONS selenium/standalone-firefox-debug:2.53.1-beryllium\"" \
"could give you a hint of the problem"
exit 1
fi
}
# Creates a Docker container to run both the acceptance tests and the Nextcloud
# server used by them.
#
# This function starts a Docker container with a copy the Nextcloud code from
# the grandparent directory, although ignoring any configuration or data that it
# may provide (for example, if that directory was used directly to deploy a
# Nextcloud instance in a web server). As the Nextcloud code is copied to the
# container instead of referenced the original code can be modified while the
# acceptance tests are running without interfering in them.
function prepareDocker() {
NEXTCLOUD_LOCAL_CONTAINER=nextcloud-local-test-acceptance
echo "Starting the Nextcloud container"
# As the Nextcloud server container uses the network of the Selenium server
# container the Nextcloud server can be accessed at "127.0.0.1" from the
# Selenium server.
# The container exits immediately if no command is given, so a Bash session
# is created to prevent that.
docker run --detach --name=$NEXTCLOUD_LOCAL_CONTAINER --network=container:$SELENIUM_CONTAINER --interactive --tty nextcloudci/php7.0:php7.0-7 bash
# Use the $TMPDIR or, if not set, fall back to /tmp.
NEXTCLOUD_LOCAL_TAR="$($MKTEMP --tmpdir="${TMPDIR:-/tmp}" --suffix=.tar nextcloud-local-XXXXXXXXXX)"
# Setting the user and group of files in the tar would be superfluous, as
# "docker cp" does not take them into account (the extracted files are set
# to root).
echo "Copying local Git working directory of Nextcloud to the container"
tar --create --file="$NEXTCLOUD_LOCAL_TAR" --exclude=".git" --exclude="./build" --exclude="./config/config.php" --exclude="./data" --exclude="./data-autotest" --exclude="./tests" --directory=../../ .
tar --append --file="$NEXTCLOUD_LOCAL_TAR" --directory=../../ tests/acceptance/
docker exec $NEXTCLOUD_LOCAL_CONTAINER mkdir /nextcloud
docker cp - $NEXTCLOUD_LOCAL_CONTAINER:/nextcloud/ < "$NEXTCLOUD_LOCAL_TAR"
# run-local.sh expects a Git repository to be available in the root of the
# Nextcloud server, but it was excluded when the Git working directory was
# copied to the container to avoid copying the large and unneeded history of
# the repository.
docker exec $NEXTCLOUD_LOCAL_CONTAINER bash -c "cd nextcloud && git init"
}
# Removes/stops temporal elements created/started by this script.
function cleanUp() {
# Disable (yes, "+" disables) exiting immediately on errors to ensure that
# all the cleanup commands are executed (well, no errors should occur during
# the cleanup anyway, but just in case).
set +o errexit
echo "Cleaning up"
if [ -f "$NEXTCLOUD_LOCAL_TAR" ]; then
echo "Removing $NEXTCLOUD_LOCAL_TAR"
rm $NEXTCLOUD_LOCAL_TAR
fi
# The name filter must be specified as "^/XXX$" to get an exact match; using
# just "XXX" would match every name that contained "XXX".
if [ -n "$(docker ps --all --quiet --filter name="^/$NEXTCLOUD_LOCAL_CONTAINER$")" ]; then
echo "Removing Docker container $NEXTCLOUD_LOCAL_CONTAINER"
docker rm --volumes --force $NEXTCLOUD_LOCAL_CONTAINER
fi
if [ -n "$(docker ps --all --quiet --filter name="^/$SELENIUM_CONTAINER$")" ]; then
echo "Removing Docker container $SELENIUM_CONTAINER"
docker rm --volumes --force $SELENIUM_CONTAINER
fi
}
# Exit immediately on errors.
set -o errexit
# Execute cleanUp when the script exits, either normally or due to an error.
trap cleanUp EXIT
# Ensure working directory is script directory, as some actions (like copying
# the Git working directory to the container) expect that.
cd "$(dirname $0)"
# "--timeout-multiplier N" option can be provided before the specific scenario
# to run, if any, to set the timeout multiplier to be used in the acceptance
# tests.
TIMEOUT_MULTIPLIER_OPTION=""
if [ "$1" = "--timeout-multiplier" ]; then
if [[ ! "$2" =~ ^[0-9]+$ ]]; then
echo "--timeout-multiplier must be followed by a positive integer"
exit 1
fi
TIMEOUT_MULTIPLIER_OPTION="--timeout-multiplier $2"
shift 2
fi
# If no parameter is provided to this script all the acceptance tests are run.
SCENARIO_TO_RUN=$1
setOperatingSystemAbstractionVariables
prepareSelenium
prepareDocker
echo "Running tests"
docker exec $NEXTCLOUD_LOCAL_CONTAINER bash -c "cd nextcloud && tests/acceptance/run-local.sh $TIMEOUT_MULTIPLIER_OPTION allow-git-repository-modifications $SCENARIO_TO_RUN"
|
pixelipo/server
|
tests/acceptance/run.sh
|
Shell
|
agpl-3.0
| 9,823 |
#!/bin/sh
port=$1
node index.js $1
|
azadbolour/boardgame
|
express-trial/run.sh
|
Shell
|
agpl-3.0
| 36 |
# !/bin/bash
# make sure we have counter on this
COUNT=0
# make sure we can run 5 sites per 5 mins
LIMIT_NUM=5
LIMIT_MIN=4
for RUN in `find /var/www/sites/*/sites/*/civicrm.settings.php -mmin +$LIMIT_MIN -printf "%C@ %p\n" | sort | awk '{ print $2 }'` ; do
NAME=${RUN//\/var\/www\/sites\//}
NAME=${NAME%%/*}
CONFPATH=${RUN%/*}
# exclude symbolic link directory
if [ -L $CONFPATH ]; then
continue
fi
TMP=${CONFPATH##*/}
SITE=""
if [ "$TMP" != "default" ]; then
SITE=$TMP
else
SITE=$NAME
fi
RUNNING=$(docker ps -q -f name=$NAME)
if [ -n "$RUNNING" ]; then
if [ $COUNT -gt $LIMIT_NUM ]; then
echo $COUNT
echo "Excceed $LIMIT_NUM of sites in each cron"
break
fi
touch $RUN
sleep $(( ( RANDOM % 3 ) + 1 ))
BATCH_RUN_OUTPUT=$(docker exec -i $NAME bash -c "cd /var/www/html && drush -l $SITE neticrm-batch-run" 2>&1)
if [[ $BATCH_RUN_OUTPUT = *"[ok]"* ]]; then
echo "$(date +"%Y-%m-%d %H:%M:%S") $NAME neticrm cron run" >> /var/log/neticrm_cron.log
COUNT=$(($COUNT+1))
fi
echo $COUNT
fi
done
|
NETivism/ansible-docker
|
roles/neticrm/files/neticrm_cron.sh
|
Shell
|
agpl-3.0
| 1,092 |
#!/bin/bash
curl https://raw.githubusercontent.com/fivethirtyeight/data/master/murder_2016/murder_2015_final.csv \
| sed -e 's/2014_murders/murders_2014/g' \
| sed -e 's/2015_murders/murders_2015/g'
|
akvo/learning-sessions
|
noms/fetch_example_data.sh
|
Shell
|
agpl-3.0
| 208 |
#!/bin/bash
# Tower git-flow Pull-Request tweak install script
sudo cp ./* /Applications/Tower.app/Contents/Resources/git-flow/
|
brandon-reeves/tower-git-flow-github-pull-requests
|
install.sh
|
Shell
|
lgpl-2.1
| 127 |
#!/bin/bash
#Pull latest code
git pull
git reset --hard
git log -1
#Set execute flag on perl and bash scripts
chmod u+x bgt-linux-pcie-drv/v4l/scripts/*.pl
chmod u+x bgt-linux-pcie-drv/v4l/scripts/*.sh
#Change v4l and make
cd bgt-linux-pcie-drv/v4l
make clean
make -j4 KERNELRELEASE=$(uname -r) kernver=$(uname -r)
#Check for clean exit
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
#Install modules (requires root)
sudo make install
|
marcusbirkin/BGT3xxx
|
build.sh
|
Shell
|
lgpl-2.1
| 436 |
rm -rf ./data/crf ./data/folds ./data/logs ./data/models ./data/tag ./data/fold_eval
echo "Generating CRF vectors from documents..."
sh extract_vectors.sh
echo "Splitting vectors into folds..."
sh split_in_folds.sh
echo "Training models..."
sh train_models.sh
echo "Evaluation..."
sh eval_models.sh
|
petasis/irss2014-argument-extraction
|
run.sh
|
Shell
|
lgpl-2.1
| 299 |
#!/bin/sh -e
srcdir=`dirname "$0"`
test -z "$srcdir" && srcdir=.
ORIGDIR=`pwd`
cd "$srcdir"
autoreconf --verbose --install --force --warnings=all
cd "$ORIGDIR"
if test -z "$NOCONFIGURE"; then
exec "$srcdir/configure" "$@"
fi
|
bluetech/libXamine
|
autogen.sh
|
Shell
|
lgpl-2.1
| 233 |
#!/usr/bin/bash
# Build and deploy the testmodel webapp
# This script requires the standard InterMine dependencies:
# * psql (createdb, psql) - your user should have a postgres
# role with password authentication set up.
# * ant
# * a deployment container (tomcat).
set -e # Errors are fatal.
USERPROFILEDB=userprofile-demo
PRODDB=objectstore-demo
MINENAME=demomine
DIR="$(cd $(dirname "$0"); pwd)"
IMDIR=$HOME/.intermine
LOG=$DIR/build.log
PROP_FILE=$IMDIR/testmodel.properties.demo
# Inherit SERVER, PORT, PSQL_USER, PSQL_PWD, TOMCAT_USER and TOMCAT_PWD if in env.
if test -z $SERVER; then
SERVER=localhost
fi
if test -z $PORT; then
PORT=8080
fi
if test -z $PSQL_USER; then
PSQL_USER=$USER
fi
if test -z $PSQL_PWD; then
PSQL_PWD=$USER;
fi
if test -z $TOMCAT_USER; then
TOMCAT_USER=manager
fi
if test -z $TOMCAT_PWD; then
TOMCAT_PWD=manager
fi
cd $HOME
if test ! -d $IMDIR; then
echo Making .intermine configuration directory.
mkdir $IMDIR
fi
if test ! -f $PROP_FILE; then
echo $PROP_FILE not found. Providing default properties file.
cd $IMDIR
cp $DIR/testmodel.properties $PROP_FILE
sed -i "s/PSQL_USER/$PSQL_USER/g" $PROP_FILE
sed -i "s/PSQL_PWD/$PSQL_PWD/g" $PROP_FILE
sed -i "s/TOMCAT_USER/$TOMCAT_USER/g" $PROP_FILE
sed -i "s/TOMCAT_PWD/$TOMCAT_PWD/g" $PROP_FILE
sed -i "s/USERPROFILEDB/$USERPROFILEDB/g" $PROP_FILE
sed -i "s/PRODDB/$PRODDB/g" $PROP_FILE
sed -i "s/SERVER/$SERVER/g" $PROP_FILE
sed -i "s/8080/$PORT/g" $PROP_FILE
sed -i "s/USER/$USER/g" $PROP_FILE
fi
echo "------> Checking databases..."
for db in $USERPROFILEDB $PRODDB; do
if psql --list | egrep -q '\s'$db'\s'; then
echo $db exists.
else
echo Creating $db
createdb $db
fi
done
echo "------> Removing current webapp"
cd $DIR/webapp/main
ant -Drelease=demo -Ddont.minify=true remove-webapp >> $DIR/setup.log
cd $DIR/dbmodel
echo "------> Loading demo data set..."
ant -Drelease=demo loadsadata >> $LOG
cd $DIR/webapp/main
echo "------> Building and releasing web-app..."
ant -Drelease=demo -Ddont.minify=true \
build-test-userprofile-withuser \
create-quicksearch-index \
default \
release-webapp | tee -a $LOG | grep tomcat-deploy
echo "------> All done. Build log is available in $LOG"
|
julie-sullivan/phytomine
|
testmodel/setup.sh
|
Shell
|
lgpl-2.1
| 2,315 |
#!/bin/bash
#
# Copyright (C) 2016 Colin Walters <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
set -euo pipefail
echo '1..2'
echo "Verifying all expected symbols are actually exported..."
grep ' ostree_[A-Za-z0-9_]*;' ${G_TEST_SRCDIR}/src/libostree/libostree.sym | sed -e 's,^ *\([A-Za-z0-9_]*\);,\1,' | sort -u > expected-symbols.txt
eu-readelf -a ${G_TEST_BUILDDIR}/.libs/libostree-1.so | grep 'FUNC.*GLOBAL.*DEFAULT.*@@LIBOSTREE_' | sed -e 's,^.* \(ostree_[A-Za-z0-9_]*\)@@LIBOSTREE_[0-9_.]*,\1,' |sort -u > found-symbols.txt
diff -u expected-symbols.txt found-symbols.txt
echo "ok exports"
# cmd__private__ is private. The fetcher symbol should not have been made public.
grep -E -v '(ostree_cmd__private__)|(ostree_fetcher_config_flags_get_type)' found-symbols.txt > expected-documented.txt
echo "Verifying all public symbols are documented:"
grep '^ostree_' ${G_TEST_SRCDIR}/apidoc/ostree-sections.txt |sort -u > found-documented.txt
diff -u expected-documented.txt found-documented.txt
echo 'ok documented symbols'
|
gatispaeglis/ostree
|
tests/test-symbols.sh
|
Shell
|
lgpl-2.1
| 1,730 |
#!/bin/sh
#
# /**-------------------------------------------------------------------**
# ** CLooG **
# **-------------------------------------------------------------------**
# ** check_strided.sh **
# **-------------------------------------------------------------------**
# ** First version: November 17th 2011 **
# **-------------------------------------------------------------------**/
#
#/*****************************************************************************
# * CLooG : the Chunky Loop Generator (experimental) *
# *****************************************************************************
# * *
# * Copyright (C) 2003 Cedric Bastoul *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, *
# * Boston, MA 02110-1301 USA *
# * *
# * CLooG, the Chunky Loop Generator *
# * Written by Cedric Bastoul, [email protected] *
# * *
# *****************************************************************************/
$CHECKER "STRIDED" "$CLOOGTEST_STRIDED" "-strides 1" "cloog" "c" "${1:-hybrid}"
|
periscop/cloog
|
test/check_strided.sh
|
Shell
|
lgpl-2.1
| 2,637 |
#!/bin/bash
#Version 0.1
VERBOSE=true #Change this to false if no screen output is required
if [ -z "$1" ]; then
echo "You must supply the address of the Zee PIIO board"
echo " "
echo "Usage: piio_init.sh address [port] [value]"
echo "Examples: piio_init.sh 0x20 - This will set all pins as outputs"
echo " piio_init.sh 0x20 A - This will set all pins on PortA (pins 1-8) as outputs"
echo " piio_init.sh 0x20 B - This will set PortB (pins 9-16) as outputs"
echo " piio_init.sh 0x20 A 0xFF - This will set all pins on PortA (pins 1-8) as inputs"
echo " piio_init.sh 0x20 A 0x00 - This will set all pins on PortA (pins 1-8) as outputs"
echo " piio_init.sh 0x20 A 0xF5 - This will set pins 1,3,5,6,7,8 as inputs and 2,4 as outputs"
echo " "
echo "Please read the Zee PIIO quick start guide for more information"
exit
fi
if ! [[ $1 =~ ^(0x20|0x21|0x22|0x23|0x24|0x25|0x26|0x27)$ ]]; then
echo "ERROR: An invalid Zee PIIO board address was given. The address must be between 0x20 to 0x27."
exit
fi
if [ -z "$2" ]; then
if [ "$VERBOSE" = true ]; then
echo "Setting all ports (pins 1-16) as outputs"
fi
/usr/sbin/i2cset -y 1 $1 0x00 0x00 #Set ports 1-8 as outputs
/usr/sbin/i2cset -y 1 $1 0x01 0x00 #Set ports 9-16 as outputs
exit
fi
if [ -z "$3" ]; then
VALUE=0x00
else
VALUE=$3
fi
if [[ $2 =~ ^(A|a|B|b)$ ]]; then
if [[ $2 =~ ^(A|a)$ ]]; then
if [ "$VERBOSE" = true ]; then
echo "Setting Port A (pins 1-8) as $VALUE"
/usr/sbin/i2cset -y 1 0x20 0x00 $VALUE
if [ "$?" == 1 ]; then
echo " "
echo "ERROR: An invalid value was given. The value must be between 0x00 and 0xFF."
exit;
fi
fi
fi
if [[ $2 =~ ^(B|b)$ ]]; then
if [ "$VERBOSE" = true ]; then
echo "Setting Port B (pins 9-16) as $VALUE"
/usr/sbin/i2cset -y 1 0x20 0x01 $VALUE
if [ "$?" == 1 ]; then
echo " "
echo "ERROR: An invalid value was given. The value must be between 0x00 and 0xFF."
exit;
fi
fi
fi
else
echo "Invalid Port. The Port must be either A or B"
fi
|
cactixxx/piio
|
piio_init.sh
|
Shell
|
lgpl-3.0
| 2,110 |
#!/bin/sh
# path to YII Framework
export YII_PATH=/var/www-software/yii/current/yii.php
# set this to application directory root
ROOTDIR=/var/users/blog
# set this to any temporary directory available for writing
TEMPDIR=$ROOTDIR/tmp
# pid file
PIDFILE=$ROOTDIR/abp1mail.pid
# log file
LOGFILE=$TEMPDIR/abp1mail.log
if [ -f $PIDFILE ]; then
PID=`cat $PIDFILE`
RUNNING=`/bin/ps h -ofname $PID`
if [ "$RUNNING" = "abp1mail" ]; then
exit
fi
fi
echo -n $$ > $PIDFILE
cd $ROOTDIR/protected
/usr/bin/php console.php mail -p >> $LOGFILE 2>&1
rm -f $PIDFILE
|
artstyle/BoilingPot
|
scripts/abp1mail.sh
|
Shell
|
lgpl-3.0
| 576 |
sh tulip_run_test.sh layout_bubble_pack bubble_pack.tlp
|
tulip5/tulip
|
tests/gui/run_layout_bubble_pack_test.sh
|
Shell
|
lgpl-3.0
| 56 |
#!/bin/bash
ESC_P="$(echo "$1" | sed -e 's/[!()&|]/\\&/g')"
sh -c './dpt --profile=test --search='$ESC_P
|
BroSkylark/PictureTracker
|
find.sh
|
Shell
|
unlicense
| 106 |
#!/usr/bin/env bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
LAUNCHER=
# If debugging is enabled propagate that through to sub-shells
if [[ "$-" == *x* ]]; then
LAUNCHER="bash -x"
fi
BIN=$(cd "$( dirname "$( readlink "$0" || echo "$0" )" )"; pwd)
#start up alluxio
USAGE="Usage: alluxio-start.sh [-hNwm] [-i backup] ACTION [MOPT] [-f]
Where ACTION is one of:
all [MOPT] \tStart all masters, proxies, and workers.
job_master \tStart the job_master on this node.
job_masters \tStart job_masters on master nodes.
job_worker \tStart a job_worker on this node.
job_workers \tStart job_workers on worker nodes.
local [MOPT] \tStart all processes locally.
master \tStart the local master on this node.
secondary_master \tStart the local secondary master on this node.
masters \tStart masters on master nodes.
proxy \tStart the proxy on this node.
proxies \tStart proxies on master and worker nodes.
safe \tScript will run continuously and start the master if it's not running.
worker [MOPT] \tStart a worker on this node.
workers [MOPT] \tStart workers on worker nodes.
logserver \tStart the logserver
restart_worker \tRestart a failed worker on this node.
restart_workers \tRestart any failed workers on worker nodes.
MOPT (Mount Option) is one of:
Mount \tMount the configured RamFS if it is not already mounted.
SudoMount\tMount the configured RamFS using sudo if it is not already mounted.
NoMount \tDo not mount the configured RamFS.
\tNotice: to avoid sudo requirement but using tmpFS in Linux,
set ALLUXIO_RAM_FOLDER=/dev/shm on each worker and use NoMount.
NoMount is assumed if MOPT is not specified.
-f format Journal, UnderFS Data and Workers Folder on master.
-h display this help.
-i backup a journal backup to restore the master from. The backup should be
a URI path within the root under filesystem, e.g.
hdfs://mycluster/alluxio_backups/alluxio-journal-YYYY-MM-DD-timestamp.gz.
-m launch monitor process to ensure the target processes come up.
-N do not try to kill previous running processes before starting new ones.
-w wait for processes to end before returning.
Supported environment variables:
ALLUXIO_JOB_WORKER_COUNT - identifies how many job workers to start per node (default = 1)"
ensure_dirs() {
if [[ ! -d "${ALLUXIO_LOGS_DIR}" ]]; then
echo "ALLUXIO_LOGS_DIR: ${ALLUXIO_LOGS_DIR}"
mkdir -p ${ALLUXIO_LOGS_DIR}
fi
}
# returns 1 if "$1" contains "$2", 0 otherwise.
contains() {
if [[ "$1" = *"$2"* ]]; then
return 1
fi
return 0
}
get_env() {
DEFAULT_LIBEXEC_DIR="${BIN}"/../libexec
ALLUXIO_LIBEXEC_DIR=${ALLUXIO_LIBEXEC_DIR:-${DEFAULT_LIBEXEC_DIR}}
. ${ALLUXIO_LIBEXEC_DIR}/alluxio-config.sh
CLASSPATH=${ALLUXIO_SERVER_CLASSPATH}
}
# Pass ram folder to check as $1
# Return 0 if ram folder is mounted as tmpfs or ramfs, 1 otherwise
is_ram_folder_mounted() {
local mounted_fs=""
if [[ $(uname -s) == Darwin ]]; then
mounted_fs=$(mount -t "hfs" | grep '/Volumes/' | cut -d " " -f 3)
else
mounted_fs=$(mount -t "tmpfs,ramfs" | cut -d " " -f 3)
fi
for fs in ${mounted_fs}; do
if [[ "${1}" == "${fs}" || "${1}" =~ ^"${fs}"\/.* ]]; then
return 0
fi
done
return 1
}
check_mount_mode() {
case $1 in
Mount);;
SudoMount);;
NoMount)
local tier_alias=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.alias)
local tier_path=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.dirs.path)
if [[ ${tier_alias} != "MEM" ]]; then
# if the top tier is not MEM, skip check
return
fi
is_ram_folder_mounted "${tier_path}"
if [[ $? -ne 0 ]]; then
echo "ERROR: Ramdisk ${tier_path} is not mounted with mount option NoMount. Use alluxio-mount.sh to mount ramdisk." >&2
echo -e "${USAGE}" >&2
exit 1
fi
if [[ "${tier_path}" =~ ^"/dev/shm"\/{0,1}$ ]]; then
echo "WARNING: Using tmpFS does not guarantee data to be stored in memory."
echo "WARNING: Check vmstat for memory statistics (e.g. swapping)."
fi
;;
*)
if [[ -z $1 ]]; then
echo "This command requires a mount mode be specified" >&2
else
echo "Invalid mount mode: $1" >&2
fi
echo -e "${USAGE}" >&2
exit 1
esac
}
# pass mode as $1
do_mount() {
MOUNT_FAILED=0
case "$1" in
Mount|SudoMount)
local tier_alias=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.alias)
local tier_path=$(${BIN}/alluxio getConf alluxio.worker.tieredstore.level0.dirs.path)
if [[ ${tier_alias} != "MEM" ]]; then
echo "Can't Mount/SudoMount when alluxio.worker.tieredstore.level0.alias is not MEM"
exit 1
fi
is_ram_folder_mounted "${tier_path}" # Returns 0 if already mounted.
if [[ $? -eq 0 ]]; then
echo "Ramdisk already mounted. Skipping mounting procedure."
else
echo "Ramdisk not detected. Mounting..."
${LAUNCHER} "${BIN}/alluxio-mount.sh" $1
MOUNT_FAILED=$?
fi
;;
NoMount)
;;
*)
echo "This command requires a mount mode be specified" >&2
echo -e "${USAGE}" >&2
exit 1
esac
}
stop() {
${BIN}/alluxio-stop.sh $1
}
start_job_master() {
if [[ "$1" == "-f" ]]; then
${LAUNCHER} "${BIN}/alluxio" format
fi
if [[ ${ALLUXIO_MASTER_SECONDARY} != "true" ]]; then
if [[ -z ${ALLUXIO_JOB_MASTER_JAVA_OPTS} ]] ; then
ALLUXIO_JOB_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting job master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioJobMaster > ${ALLUXIO_LOGS_DIR}/job_master.out 2>&1) &
fi
}
start_job_masters() {
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" "job_master"
}
start_job_worker() {
if [[ -z ${ALLUXIO_JOB_WORKER_JAVA_OPTS} ]] ; then
ALLUXIO_JOB_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting job worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioJobWorker > ${ALLUXIO_LOGS_DIR}/job_worker.out 2>&1) &
ALLUXIO_JOB_WORKER_JAVA_OPTS+=" -Dalluxio.job.worker.rpc.port=0 -Dalluxio.job.worker.web.port=0"
local nworkers=${ALLUXIO_JOB_WORKER_COUNT:-1}
for (( c = 1; c < ${nworkers}; c++ )); do
echo "Starting job worker #$((c+1)) @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup ${JAVA} -cp ${CLASSPATH} \
${ALLUXIO_JOB_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioJobWorker > ${ALLUXIO_LOGS_DIR}/job_worker.out 2>&1) &
done
}
start_job_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "job_worker"
}
start_logserver() {
if [[ ! -d "${ALLUXIO_LOGSERVER_LOGS_DIR}" ]]; then
echo "ALLUXIO_LOGSERVER_LOGS_DIR: ${ALLUXIO_LOGSERVER_LOGS_DIR}"
mkdir -p ${ALLUXIO_LOGSERVER_LOGS_DIR}
fi
echo "Starting logserver @ $(hostname -f)."
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_LOGSERVER_JAVA_OPTS} \
alluxio.logserver.AlluxioLogServer "${ALLUXIO_LOGSERVER_LOGS_DIR}" > ${ALLUXIO_LOGS_DIR}/logserver.out 2>&1) &
# Wait for 1s before starting other Alluxio servers, otherwise may cause race condition
# leading to connection errors.
sleep 1
}
start_master() {
if [[ "$1" == "-f" ]]; then
${LAUNCHER} ${BIN}/alluxio format
fi
if [[ ${ALLUXIO_MASTER_SECONDARY} == "true" ]]; then
if [[ -z ${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS} ]]; then
ALLUXIO_SECONDARY_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
# use a default Xmx value for the master
contains "${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_SECONDARY_MASTER_JAVA_OPTS+=" -Xmx8g "
fi
echo "Starting secondary master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_SECONDARY_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioSecondaryMaster > ${ALLUXIO_LOGS_DIR}/secondary_master.out 2>&1) &
else
if [[ -z ${ALLUXIO_MASTER_JAVA_OPTS} ]]; then
ALLUXIO_MASTER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
if [[ -n ${journal_backup} ]]; then
ALLUXIO_MASTER_JAVA_OPTS+=" -Dalluxio.master.journal.init.from.backup=${journal_backup}"
fi
# use a default Xmx value for the master
contains "${ALLUXIO_MASTER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_MASTER_JAVA_OPTS+=" -Xmx8g "
fi
echo "Starting master @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_MASTER_JAVA_OPTS} \
alluxio.master.AlluxioMaster > ${ALLUXIO_LOGS_DIR}/master.out 2>&1) &
fi
}
start_masters() {
start_opts=""
if [[ -n ${journal_backup} ]]; then
start_opts="-i ${journal_backup}"
fi
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" ${start_opts} "master" $1
}
start_proxy() {
if [[ -z ${ALLUXIO_PROXY_JAVA_OPTS} ]]; then
ALLUXIO_PROXY_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
echo "Starting proxy @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_PROXY_JAVA_OPTS} \
alluxio.proxy.AlluxioProxy > ${ALLUXIO_LOGS_DIR}/proxy.out 2>&1) &
}
start_proxies() {
${LAUNCHER} "${BIN}/alluxio-masters.sh" "${BIN}/alluxio-start.sh" "proxy"
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "proxy"
}
start_worker() {
do_mount $1
if [ ${MOUNT_FAILED} -ne 0 ] ; then
echo "Mount failed, not starting worker" >&2
exit 1
fi
if [[ -z ${ALLUXIO_WORKER_JAVA_OPTS} ]]; then
ALLUXIO_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
# use a default Xmx value for the worker
contains "${ALLUXIO_WORKER_JAVA_OPTS}" "Xmx"
if [[ $? -eq 0 ]]; then
ALLUXIO_WORKER_JAVA_OPTS+=" -Xmx4g "
fi
# use a default MaxDirectMemorySize value for the worker
contains "${ALLUXIO_WORKER_JAVA_OPTS}" "XX:MaxDirectMemorySize"
if [[ $? -eq 0 ]]; then
ALLUXIO_WORKER_JAVA_OPTS+=" -XX:MaxDirectMemorySize=4g "
fi
echo "Starting worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioWorker > ${ALLUXIO_LOGS_DIR}/worker.out 2>&1 ) &
}
start_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "worker" $1
}
restart_worker() {
if [[ -z ${ALLUXIO_WORKER_JAVA_OPTS} ]]; then
ALLUXIO_WORKER_JAVA_OPTS=${ALLUXIO_JAVA_OPTS}
fi
RUN=$(ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${RUN} -eq 0 ]]; then
echo "Restarting worker @ $(hostname -f). Logging to ${ALLUXIO_LOGS_DIR}"
(nohup "${JAVA}" -cp ${CLASSPATH} \
${ALLUXIO_WORKER_JAVA_OPTS} \
alluxio.worker.AlluxioWorker > ${ALLUXIO_LOGS_DIR}/worker.out 2>&1) &
fi
}
restart_workers() {
${LAUNCHER} "${BIN}/alluxio-workers.sh" "${BIN}/alluxio-start.sh" "restart_worker"
}
get_offline_worker() {
local run=
local result=""
run=$(ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${run} -eq 0 ]]; then
result=$(hostname -f)
fi
echo "${result}"
}
get_offline_workers() {
local result=""
local run=
local i=0
local workers=$(cat "${ALLUXIO_CONF_DIR}/workers" | sed "s/#.*$//;/^$/d")
for worker in $(echo ${workers}); do
if [[ ${i} -gt 0 ]]; then
result+=","
fi
run=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt ${worker} \
ps -ef | grep "alluxio.worker.AlluxioWorker" | grep "java" | wc | awk '{ print $1; }')
if [[ ${run} -eq 0 ]]; then
result+="${worker}"
fi
i=$((i+1))
done
echo "${result}"
}
start_monitor() {
local action=$1
local nodes=$2
local run=
if [[ "${action}" == "restart_worker" ]]; then
action="worker"
if [[ -z "${nodes}" ]]; then
run="false"
fi
elif [[ "${action}" == "restart_workers" ]]; then
action="workers"
if [[ -z "${nodes}" ]]; then
run="false"
fi
elif [[ "${action}" == "logserver" || "${action}" == "safe" ]]; then
echo -e "Error: Invalid Monitor ACTION: ${action}" >&2
exit 1
fi
if [[ -z "${run}" ]]; then
${LAUNCHER} "${BIN}/alluxio-monitor.sh" "${action}" "${nodes}"
else
echo "Skipping the monitor checks..."
fi
}
run_safe() {
while [ 1 ]
do
RUN=$(ps -ef | grep "alluxio.master.AlluxioMaster" | grep "java" | wc | awk '{ print $1; }')
if [[ ${RUN} -eq 0 ]]; then
echo "Restarting the system master..."
start_master
fi
echo "Alluxio is running... "
sleep 2
done
}
main() {
# get environment
get_env
# ensure log/data dirs
ensure_dirs
while getopts "hNwmi:" o; do
case "${o}" in
h)
echo -e "${USAGE}"
exit 0
;;
i)
journal_backup=${OPTARG}
;;
m)
monitor="true"
;;
N)
killonstart="no"
;;
w)
wait="true"
;;
*)
echo -e "${USAGE}" >&2
exit 1
;;
esac
done
shift $((${OPTIND} - 1))
ACTION=$1
if [[ -z "${ACTION}" ]]; then
echo "Error: no ACTION specified" >&2
echo -e "${USAGE}" >&2
exit 1
fi
shift
MOPT=$1
# Set MOPT.
case "${ACTION}" in
all|worker|workers|local)
if [[ -z "${MOPT}" ]]; then
echo "Assuming NoMount by default."
MOPT="NoMount"
elif [[ "${MOPT}" == "-f" ]]; then
echo "Assuming SudoMount given -f option."
MOPT="SudoMount"
else
shift
fi
if [[ "${ACTION}" = "worker" ]] || [[ "${ACTION}" = "local" ]]; then
check_mount_mode "${MOPT}"
fi
;;
*)
MOPT=""
;;
esac
FORMAT=$1
if [[ ! -z "${FORMAT}" && "${FORMAT}" != "-f" ]]; then
echo -e "${USAGE}" >&2
exit 1
fi
MONITOR_NODES=
if [[ "${monitor}" ]]; then
case "${ACTION}" in
restart_worker)
MONITOR_NODES=$(get_offline_worker)
;;
restart_workers)
MONITOR_NODES=$(get_offline_workers)
;;
*)
MONITOR_NODES=""
;;
esac
fi
if [[ "${killonstart}" != "no" ]]; then
case "${ACTION}" in
all | local | master | masters | proxy | proxies | worker | workers | logserver)
stop ${ACTION}
sleep 1
;;
esac
fi
case "${ACTION}" in
all)
start_masters "${FORMAT}"
start_job_masters
sleep 2
start_workers "${MOPT}"
start_job_workers
start_proxies
;;
local)
start_master "${FORMAT}"
ALLUXIO_MASTER_SECONDARY=true
start_master
ALLUXIO_MASTER_SECONDARY=false
start_job_master
sleep 2
start_worker "${MOPT}"
start_job_worker
start_proxy
;;
job_master)
start_job_master
;;
job_masters)
start_job_masters
;;
job_worker)
start_job_worker
;;
job_workers)
start_job_workers
;;
master)
start_master "${FORMAT}"
;;
secondary_master)
ALLUXIO_MASTER_SECONDARY=true
start_master
ALLUXIO_MASTER_SECONDARY=false
;;
masters)
start_masters
;;
proxy)
start_proxy
;;
proxies)
start_proxies
;;
restart_worker)
restart_worker
;;
restart_workers)
restart_workers
;;
safe)
run_safe
;;
worker)
start_worker "${MOPT}"
;;
workers)
start_workers "${MOPT}"
;;
logserver)
start_logserver
;;
*)
echo "Error: Invalid ACTION: ${ACTION}" >&2
echo -e "${USAGE}" >&2
exit 1
esac
sleep 2
if [[ "${wait}" ]]; then
wait
fi
if [[ "${monitor}" ]]; then
start_monitor "${ACTION}" "${MONITOR_NODES}"
fi
}
main "$@"
|
aaudiber/alluxio
|
bin/alluxio-start.sh
|
Shell
|
apache-2.0
| 16,557 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck source=scripts/in_container/_in_container_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/_in_container_script_init.sh"
setup_provider_packages
LIST_OF_DIRS_FILE=$(mktemp)
cd "${AIRFLOW_SOURCES}/airflow/providers" || exit 1
find . -type d | sed 's/.\///; s/\//\./g' | grep -E 'hooks|operators|sensors|secrets|utils' \
> "${LIST_OF_DIRS_FILE}"
cd "${AIRFLOW_SOURCES}/provider_packages" || exit 1
PREPARE_PROVIDER_PACKAGES_PY="${AIRFLOW_SOURCES}/dev/provider_packages/prepare_provider_packages.py"
REFACTOR_PROVIDER_PACKAGES_PY="${AIRFLOW_SOURCES}/dev/provider_packages/refactor_provider_packages.py"
verify_suffix_versions_for_package_preparation
if [[ -z "$*" ]]; then
PROVIDERS_PACKAGES=$(python3 "${PREPARE_PROVIDER_PACKAGES_PY}" list-providers-packages)
PACKAGE_ERROR="false"
# Check if all providers are included
for PACKAGE in ${PROVIDERS_PACKAGES}
do
if ! grep -E "^${PACKAGE}" <"${LIST_OF_DIRS_FILE}" >/dev/null; then
echo "The package ${PACKAGE} is not available in providers dir"
PACKAGE_ERROR="true"
fi
sed -i "/^${PACKAGE}.*/d" "${LIST_OF_DIRS_FILE}"
done
if [[ ${PACKAGE_ERROR} == "true" ]]; then
echo
echo "ERROR! Some packages from dev/provider_packages/prepare_provider_packages.py are missing in providers dir"
exit 1
fi
NUM_LINES=$(wc -l "${LIST_OF_DIRS_FILE}" | awk '{ print $1 }')
if [[ ${NUM_LINES} != "0" ]]; then
echo "ERROR! Some folders from providers package are not defined"
echo " Please add them to dev/provider_packages/prepare_provider_packages.py:"
echo
cat "${LIST_OF_DIRS_FILE}"
echo
exit 1
fi
PROVIDER_PACKAGES=$(python3 "${PREPARE_PROVIDER_PACKAGES_PY}" list-backportable-packages)
else
if [[ "$1" == "--help" ]]; then
echo
echo "Builds all provider packages."
echo
echo "You can provide list of packages to build out of:"
echo
python3 "${PREPARE_PROVIDER_PACKAGES_PY}" list-providers-packages | tr '\n ' ' ' | fold -w 100 -s
echo
echo
exit
fi
PROVIDER_PACKAGES="$*"
fi
if [[ ${BACKPORT_PACKAGES} == "true" ]]; then
echo "==================================================================================="
echo " Copying sources and refactoring code for backport provider packages"
echo "==================================================================================="
else
echo "==================================================================================="
echo " Copying sources for provider packages"
echo "==================================================================================="
fi
python3 "${REFACTOR_PROVIDER_PACKAGES_PY}"
rm -rf dist/*
for PROVIDER_PACKAGE in ${PROVIDER_PACKAGES}
do
rm -rf -- *.egg-info
rm -rf build/
LOG_FILE=$(mktemp)
python3 "${PREPARE_PROVIDER_PACKAGES_PY}" generate-setup-files "${PROVIDER_PACKAGE}"
echo "==================================================================================="
echo " Preparing ${PACKAGE_TYPE} package ${PROVIDER_PACKAGE} "
if [[ "${VERSION_SUFFIX_FOR_PYPI}" == '' && "${VERSION_SUFFIX_FOR_SVN}" == ''
&& ${FILE_VERSION_SUFFIX} == '' ]]; then
echo
echo "Preparing official version"
echo
elif [[ ${FILE_VERSION_SUFFIX} != '' ]]; then
echo
echo " Preparing release candidate with file version suffix only: ${FILE_VERSION_SUFFIX}"
echo
elif [[ "${VERSION_SUFFIX_FOR_PYPI}" == '' ]]; then
echo
echo " Package Version for SVN release candidate: ${TARGET_VERSION_SUFFIX}"
echo
elif [[ "${VERSION_SUFFIX_FOR_SVN}" == '' ]]; then
echo
echo " Package Version for PyPI release candidate: ${TARGET_VERSION_SUFFIX}"
echo
else
# Both SV/PYPI are set to the same version here!
echo
echo " Pre-release version: ${TARGET_VERSION_SUFFIX}"
echo
fi
echo "-----------------------------------------------------------------------------------"
set +e
package_suffix=""
if [[ ${VERSION_SUFFIX_FOR_SVN} == "" && ${VERSION_SUFFIX_FOR_PYPI} != "" ]]; then
# only adds suffix to setup.py if version suffix for PyPI is set but the SVN one is not
package_suffix="${VERSION_SUFFIX_FOR_PYPI}"
fi
python3 "${PREPARE_PROVIDER_PACKAGES_PY}" --version-suffix "${package_suffix}" \
"${PROVIDER_PACKAGE}">"${LOG_FILE}" 2>&1
RES="${?}"
set -e
if [[ ${RES} != "0" ]]; then
cat "${LOG_FILE}"
exit "${RES}"
fi
echo " Prepared ${PACKAGE_TYPE} package ${PROVIDER_PACKAGE}"
echo "==================================================================================="
done
cd "${AIRFLOW_SOURCES}" || exit 1
pushd dist
if [[ ${FILE_VERSION_SUFFIX} != "" ]]; then
# In case we have FILE_VERSION_SUFFIX we rename prepared files
for FILE in *.tar.gz
do
mv "${FILE}" "${FILE//\.tar\.gz/${FILE_VERSION_SUFFIX}-bin.tar.gz}"
done
for FILE in *.whl
do
mv "${FILE}" "${FILE//\-py3/${FILE_VERSION_SUFFIX}-py3}"
done
fi
popd
AIRFLOW_PACKAGES_TGZ_FILE="/files/airflow-packages-$(date +"%Y%m%d-%H%M%S")-${TARGET_VERSION_SUFFIX}.tar.gz"
tar -cvzf "${AIRFLOW_PACKAGES_TGZ_FILE}" dist/*.whl dist/*.tar.gz
echo
echo "Airflow packages are in dist folder and tar-gzipped in ${AIRFLOW_PACKAGES_TGZ_FILE}"
echo
|
mrkm4ntr/incubator-airflow
|
scripts/in_container/run_prepare_provider_packages.sh
|
Shell
|
apache-2.0
| 6,308 |
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This scripts performs cloud training for a TensorFlow model.
set -v
echo "Training Cloud ML model"
DATE=$(date '+%Y%m%d_%H%M%S')
# JOB_NAME: the name of your job running on AI Platform.
JOB_NAME=census_$(date +%Y%m%d_%H%M%S)
# JOB_DIR: the output directory
JOB_DIR=gs://${BUCKET_NAME}/keras-job-dir # TODO Change BUCKET_NAME to your bucket name
# REGION: select a region from https://cloud.google.com/ai-platform/training/docs/regions
# or use the default '`us-central1`'. The region is where the model will be deployed.
REGION=us-central1
PYTHON_VERSION=3.7
RUNTIME_VERSION=2.1
TRAIN_STEPS=1000
EVAL_STEPS=100
CONFIG_FILE=hptuning_config.yaml # Add --config ${CONFIG_FILE} for Hyperparameter tuning
gcloud ai-platform jobs submit training "${JOB_NAME}" \
--package-path trainer/ \
--module-name trainer.task \
--region ${REGION} \
--python-version $PYTHON_VERSION \
--runtime-version $RUNTIME_VERSION \
--job-dir "${JOB_DIR}" \
--stream-logs -- \
--train-steps=${TRAIN_STEPS} \
--eval-steps=${EVAL_STEPS} \
|
GoogleCloudPlatform/ai-platform-samples
|
training/tensorflow/census/tf-keras/scripts/train-cloud.sh
|
Shell
|
apache-2.0
| 1,704 |
#!/usr/bin/env bash
params=("$@")
ulimit -H -n 131072 || true
ulimit -S -n 131072 || true
rm -rf cluster
if [ -d cluster-init ];then
echo "== creating cluster directory from existing cluster-init directory"
cp -a cluster-init cluster
else
echo "== creating fresh directory"
mkdir -p cluster || { echo "failed to create cluster directory"; exit 1; }
#if we want to restart we should probably store the parameters line wise
fi
case $OSTYPE in
darwin*)
lib="$PWD/scripts/cluster-run-common.sh"
;;
*)
lib="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/cluster-run-common.sh"
;;
esac
if [[ -f "$lib" ]]; then
. "$lib"
else
echo "could not source $lib"
exit 1
fi
if [[ -f cluster/startup_parameters ]];then
string="$(< cluster/startup_parameters)"
if [[ -z "${params[@]}" ]]; then
params=( $string )
else
if ! [[ "$*" == "$string" ]]; then
echo "stored and given params do not match:"
echo "given: ${params[@]}"
echo "stored: $string"
fi
fi
else
#store parmeters
if [[ -n "${params[@]}" ]]; then
echo "${params[@]}" > cluster/startup_parameters
fi
fi
parse_args "${params[@]}"
if [ "$POOLSZ" == "" ] ; then
POOLSZ=$NRAGENTS
fi
STORAGE_ENGINE="--server.storage-engine=rocksdb"
if [ "$AUTOUPGRADE" == "1" ];then
echo "-- Using autoupgrade procedure"
fi
if [[ $NRAGENTS -le 0 ]]; then
echo "you need as least one agent currently you have $NRAGENTS"
exit 1
fi
printf "== Starting agency ... \n"
printf " # agents: %s," "$NRAGENTS"
printf " # db servers: %s," "$NRDBSERVERS"
printf " # coordinators: %s," "$NRCOORDINATORS"
printf " transport: %s\n" "$TRANSPORT"
if (( $NRAGENTS % 2 == 0)) ; then
echo "**ERROR: Number of agents must be odd! Bailing out."
exit 1
fi
SFRE=1.0
COMP=500
KEEP=2000
if [ -z "$ONGOING_PORTS" ] ; then
CO_BASE=$(( $PORT_OFFSET + 8530 ))
DB_BASE=$(( $PORT_OFFSET + 8629 ))
AG_BASE=$(( $PORT_OFFSET + 4001 ))
SE_BASE=$(( $PORT_OFFSET + 8729 ))
else
CO_BASE=$(( $PORT_OFFSET + 8530 ))
DB_BASE=$(( $PORT_OFFSET + 8530 + $NRCOORDINATORS ))
AG_BASE=$(( $PORT_OFFSET + 8530 + $NRCOORDINATORS + $NRDBSERVERS ))
SE_BASE=$(( $PORT_OFFSET + 8530 + $NRCOORDINATORS + $NRDBSERVERS + $NRAGENTS ))
fi
NATH=$(( $NRDBSERVERS + $NRCOORDINATORS + $NRAGENTS ))
ENDPOINT=[::]
ADDRESS=${ADDRESS:-[::1]}
if [ -z "$JWT_SECRET" ];then
AUTHENTICATION="--server.authentication false"
AUTHORIZATION_HEADER=""
else
AUTHENTICATION="--server.jwt-secret $JWT_SECRET"
AUTHORIZATION_HEADER="Authorization: bearer $(jwtgen -a HS256 -s $JWT_SECRET -c 'iss=arangodb' -c 'server_id=setup')"
fi
if [ -z "$ENCRYPTION_SECRET" ];then
ENCRYPTION=""
else
echo -n $ENCRYPTION_SECRET > cluster/encryption-secret.txt
ENCRYPTION="--rocksdb.encryption-keyfile cluster/encryption-secret.txt"
fi
if [ "$TRANSPORT" == "ssl" ]; then
SSLKEYFILE="--ssl.keyfile UnitTests/server.pem"
CURL="curl --insecure $CURL_AUTHENTICATION -s -f -X GET https:"
else
SSLKEYFILE=""
CURL="curl -s -f $CURL_AUTHENTICATION -X GET http:"
fi
if [ ! -z "$INTERACTIVE_MODE" ] ; then
if [ "$INTERACTIVE_MODE" == "C" ] ; then
ARANGOD="${BUILD}/bin/arangod "
CO_ARANGOD="$XTERM $XTERMOPTIONS ${BUILD}/bin/arangod --console"
echo "Starting one coordinator in terminal with --console"
elif [ "$INTERACTIVE_MODE" == "R" ] ; then
ARANGOD="rr ${BUILD}/bin/arangod"
CO_ARANGOD="$ARANGOD"
echo Running cluster in rr with --console.
fi
else
ARANGOD="${BUILD}/bin/arangod "
CO_ARANGOD=$ARANGOD
fi
echo == Starting agency ...
for aid in `seq 0 $(( $NRAGENTS - 1 ))`; do
[ "$INTERACTIVE_MODE" == "R" ] && sleep 1
PORT=$(( $AG_BASE + $aid ))
AGENCY_ENDPOINTS+="--cluster.agency-endpoint $TRANSPORT://$ADDRESS:$PORT "
if [ "$AUTOUPGRADE" == "1" ];then
$ARANGOD \
-c none \
--agency.activate true \
--agency.compaction-step-size $COMP \
--agency.compaction-keep-size $KEEP \
--agency.endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--agency.my-address $TRANSPORT://$ADDRESS:$PORT \
--agency.pool-size $NRAGENTS \
--agency.size $NRAGENTS \
--agency.supervision true \
--agency.supervision-frequency $SFRE \
--agency.wait-for-sync false \
--database.directory cluster/data$PORT \
--javascript.enabled false \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--log.role true \
--log.file cluster/$PORT.log \
--log.force-direct false \
--log.level $LOG_LEVEL_AGENCY \
--server.descriptors-minimum 0 \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
$ENCRYPTION \
--database.auto-upgrade true \
2>&1 | tee cluster/$PORT.stdout
fi
$ARANGOD \
-c none \
--agency.activate true \
--agency.compaction-step-size $COMP \
--agency.compaction-keep-size $KEEP \
--agency.endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--agency.my-address $TRANSPORT://$ADDRESS:$PORT \
--agency.pool-size $NRAGENTS \
--agency.size $NRAGENTS \
--agency.supervision true \
--agency.supervision-frequency $SFRE \
--agency.wait-for-sync false \
--database.directory cluster/data$PORT \
--javascript.enabled false \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--log.role true \
--log.file cluster/$PORT.log \
--log.force-direct false \
--log.level $LOG_LEVEL_AGENCY \
--server.descriptors-minimum 0 \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
$ENCRYPTION \
2>&1 | tee cluster/$PORT.stdout &
done
start() {
if [ "$NRDBSERVERS" == "1" ]; then
SYSTEM_REPLICATION_FACTOR="--cluster.system-replication-factor=1"
else
SYSTEM_REPLICATION_FACTOR=""
fi
if [ "$1" == "dbserver" ]; then
ROLE="DBSERVER"
elif [ "$1" == "coordinator" ]; then
ROLE="COORDINATOR"
fi
if [ "$1" == "coordinator" ]; then
CMD=$CO_ARANGOD
else
CMD=$ARANGOD
fi
if [ "$USE_RR" = "true" ]; then
if ! which rr > /dev/null; then
echo 'rr binary not found in PATH!' >&2
exit 1
fi
CMD="rr $CMD"
fi
TYPE=$1
PORT=$2
mkdir -p cluster/data$PORT cluster/apps$PORT
echo == Starting $TYPE on port $PORT
[ "$INTERACTIVE_MODE" == "R" ] && sleep 1
if [ "$AUTOUPGRADE" == "1" ];then
$CMD \
-c none \
--database.directory cluster/data$PORT \
--cluster.agency-endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--cluster.my-address $TRANSPORT://$ADDRESS:$PORT \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--cluster.my-role $ROLE \
--log.role true \
--log.file cluster/$PORT.log \
--log.level $LOG_LEVEL \
--server.statistics true \
--javascript.startup-directory $SRC_DIR/js \
--javascript.module-directory $SRC_DIR/enterprise/js \
--javascript.app-path cluster/apps$PORT \
--log.force-direct false \
--log.level $LOG_LEVEL_CLUSTER \
--server.descriptors-minimum 0 \
--javascript.allow-admin-execute true \
$SYSTEM_REPLICATION_FACTOR \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
$ENCRYPTION \
--database.auto-upgrade true \
2>&1 | tee cluster/$PORT.stdout
fi
$CMD \
-c none \
--database.directory cluster/data$PORT \
--cluster.agency-endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--cluster.my-address $TRANSPORT://$ADDRESS:$PORT \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--cluster.my-role $ROLE \
--log.role true \
--log.file cluster/$PORT.log \
--log.level $LOG_LEVEL \
--server.statistics true \
--javascript.startup-directory $SRC_DIR/js \
--javascript.module-directory $SRC_DIR/enterprise/js \
--javascript.app-path cluster/apps$PORT \
--log.force-direct false \
--log.thread true \
--log.level $LOG_LEVEL_CLUSTER \
--server.descriptors-minimum 0 \
--javascript.allow-admin-execute true \
$SYSTEM_REPLICATION_FACTOR \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
$ENCRYPTION \
2>&1 | tee cluster/$PORT.stdout &
}
PORTTOPDB=`expr $DB_BASE + $NRDBSERVERS - 1`
for p in `seq $DB_BASE $PORTTOPDB` ; do
start dbserver $p
done
PORTTOPCO=`expr $CO_BASE + $NRCOORDINATORS - 1`
for p in `seq $CO_BASE $PORTTOPCO` ; do
start coordinator $p
done
testServer() {
PORT=$1
while true ; do
if [ -z "$AUTHORIZATION_HEADER" ]; then
${CURL}//$ADDRESS:$PORT/_api/version > /dev/null 2>&1
else
${CURL}//$ADDRESS:$PORT/_api/version -H "$AUTHORIZATION_HEADER" > /dev/null 2>&1
fi
if [ "$?" != "0" ] ; then
echo Server on port $PORT does not answer yet.
else
echo Server on port $PORT is ready for business.
break
fi
sleep 1
done
}
for p in `seq $DB_BASE $PORTTOPDB` ; do
testServer $p
done
for p in `seq $CO_BASE $PORTTOPCO` ; do
testServer $p
done
echo == Done, your cluster is ready at
for p in `seq $CO_BASE $PORTTOPCO` ; do
echo " ${BUILD}/bin/arangosh --server.endpoint $TRANSPORT://[::1]:$p"
done
|
Simran-B/arangodb
|
scripts/startLocalCluster.sh
|
Shell
|
apache-2.0
| 9,624 |
docker build -t wuchang/gogs:latest .
|
wuchang/Dockerfile
|
gogs/latest/build.sh
|
Shell
|
apache-2.0
| 38 |
#!/usr/bin/env bash
set -euo pipefail
EXAMPLE_DIR=$(dirname $0)/../../../examples/misc
# TODO move to util file
# retry function adapted from:
# https://unix.stackexchange.com/questions/82598/how-do-i-write-a-retry-logic-in-script-to-keep-retrying-to-run-it-upto-5-times/82610
function retry {
local n=1
local max=5
local delay=5
while true; do
"$@" && break || {
if [[ ${n} -lt ${max} ]]; then
((n++))
echo "Command '$@' failed. Attempt $n/$max:"
sleep ${delay};
else
>&2 echo "The command has failed after $n attempts."
exit 1;
fi
}
done
}
cleanup() {
fission fn delete --name dump
fission fn delete --name fission-inputs
}
trap cleanup EXIT
fission env create --name binary --image fission/binary-env || true
fission fn create --name dump --env binary --deploy ${EXAMPLE_DIR}/dump.sh
retry fission fn test --name dump
fission fn create --name fission-inputs --env workflow --src ${EXAMPLE_DIR}/fission-inputs.wf.yaml
sleep 5 # TODO remove this once we can initiate synchronous commands
printf "[Test 1]: fission-inputs workflow"
OUTPUT=$(fission fn test --name fission-inputs -b "foobar\n" -H 'HEADER_KEY: HEADER_VAL' --method PUT)
printf "[Test 2]: body\n"
echo ${OUTPUT} | grep -q foobar
printf "[Test 3]: headers\n"
echo ${OUTPUT} | grep HTTP_HEADER_KEY
echo ${OUTPUT} | grep HEADER_VAL
printf "[Test 4]: method\n"
echo ${OUTPUT} | grep PUT
printf "[Test 5]: query\n"
# TODO add query parameters once supported in `fission test`
echo "not implemented"
|
fission/fission-workflows
|
test/e2e/tests/test_fission_params.sh
|
Shell
|
apache-2.0
| 1,553 |
#!/bin/bash
set -e
# initialization
initialize_migrator() {
# sets colors for use in output
GREEN='\e[32m'
BLUE='\e[34m'
YELLOW='\e[0;33m'
RED='\e[31m'
BOLD='\e[1m'
CLEAR='\e[0m'
# pre-configure ok, warning, and error output
OK="[${GREEN}OK${CLEAR}]"
INFO="[${BLUE}INFO${CLEAR}]"
NOTICE="[${YELLOW}!!${CLEAR}]"
ERROR="[${RED}ERROR${CLEAR}]"
# trap for ctrl+c
trap 'catch_error User exited' SIGINT
# set default error action to prompt if none provided
ERROR_ACTION=${ERROR_ACTION:-prompt}
# set default to prompt user for validation
USER_PROMPT=${USER_PROMPT:-true}
# set default to require docker login
NO_LOGIN=${NO_LOGIN:-false}
}
# verify requirements met for script to execute properly
verify_ready() {
# verify v1 registry variable has been passed
if [ -z "${V1_REGISTRY}" ]
then
catch_error "${BOLD}V1_REGISTRY${CLEAR} environment variable required"
fi
# verify v2 registry variable has been passed
if [ -z "${V2_REGISTRY}" ]
then
catch_error "${BOLD}V2_REGISTRY${CLEAR} environment variable required"
fi
# verify valid error action
if [ "${ERROR_ACTION}" != "prompt" ] && [ "${ERROR_ACTION}" != "retry" ] && [ "${ERROR_ACTION}" != "skip" ] && [ "${ERROR_ACTION}" != "abort" ]
then
catch_error "${BOLD}ERROR_ACTION${CLEAR} environment variable (${ERROR_ACTION}) invalid; must be one of the following: ${BOLD}prompt${CLEAR}, ${BOLD}retry${CLEAR}, ${BOLD}skip${CLEAR}, or ${BOLD}abort${CLEAR}"
fi
# verify valid user prompt variable
if [ "${USER_PROMPT}" != "true" ] && [ "${USER_PROMPT}" != "false" ]
then
catch_error "${BOLD}USER_PROMPT${CLEAR} environment variable (${USER_PROMPT}) invalid; must be either ${BOLD}true${CLEAR} or ${BOLD}false${CLEAR}"
fi
# verify docker daemon is accessible
if ! $(docker info > /dev/null 2>&1)
then
catch_error "Docker daemon not accessible. Is the Docker socket shared into the container as a volume?"
fi
}
# generic error catching
catch_error(){
echo -e "\n${ERROR} ${@}"
echo -e "${ERROR} Migration from v1 to v2 failed!"
exit 1
}
# catch push/pull error
catch_push_pull_error() {
# set environment variables to handle arguments
ACTION="${1}"
IMAGE="${2}"
TEMP_ERROR_ACTION=${3:-${ERROR_ACTION}}
# perform action based off of error action
case $TEMP_ERROR_ACTION in
prompt)
# prompt user for course of action
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}"
echo -en "\n${NOTICE} "
read -rp $"Retry, skip, or abort? {r|s|a} " -n1 RESPONSE; echo
# act based on user response
case ${RESPONSE} in
r|R)
# re-run function with retry
catch_push_pull_error "${ACTION}" "${IMAGE}" "retry"
;;
s|S)
# re-run function with skip
catch_push_pull_error "${ACTION}" "${IMAGE}" "skip"
;;
a|A)
# re-run function with abort
catch_push_pull_error "${ACTION}" "${IMAGE}" "abort"
;;
*)
# invalid user response; re-run function with prompt
echo -e "\n${ERROR} Invalid response"
catch_push_pull_error "${ACTION}" "${IMAGE}" "prompt"
;;
esac
;;
retry)
# run push or pull again
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}; retrying\n"
sleep 1
push_pull_image "${ACTION}" "${IMAGE}"
;;
skip)
# skip push or pull and proceeed
echo -e "${ERROR} Failed to ${ACTION} ${IMAGE}; skipping\n"
;;
abort)
# abort and exit migration
catch_error "Failed to ${ACTION} ${IMAGE}; aborting"
;;
esac
}
# catch retag error
catch_retag_error() {
# set environment variables to handle arguments
SOURCE_IMAGE="${1}"
DESTINATION_IMAGE="${2}"
TEMP_ERROR_ACTION=${3:-${ERROR_ACTION}}
# perform action based off of error action
case $TEMP_ERROR_ACTION in
prompt)
# prompt user for course of action
echo -e "${ERROR} Failed to retag ${SOURCE_IMAGE} > ${DESTINATION_IMAGE}"
echo -en "\n${NOTICE} "
read -rp $"Retry, skip, or abort? {r|s|a} " -n1 RESPONSE; echo
# act based on user response
case ${RESPONSE} in
r|R)
# re-run function with retry
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "retry"
;;
s|S)
# re-run function with skip
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "skip"
;;
a|A)
# re-run function with abort
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "abort"
;;
*)
# invalid user response; re-run function with prompt
echo -e "\n${ERROR} Invalid response"
catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}" "prompt"
;;
esac
;;
retry)
# run retag again
echo -e "${ERROR} Failed to retag ${IMAGE}; retrying\n"
sleep 1
retag_image "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}"
;;
skip)
# skip retag and proceed
echo -e "${ERROR} Failed to retag ${IMAGE}; skipping\n"
;;
abort)
# abort and exit migration
catch_error "Failed to retag ${IMAGE}; aborting"
;;
esac
}
# perform a docker login
docker_login() {
REGISTRY="${1}"
USERNAME="${2}"
PASSWORD="${3}"
EMAIL="${4}"
if [ -n "${USERNAME}" ] && [ -n "${PASSWORD}" ] && [ -n "${EMAIL}" ]
then
# docker login with credentials provided
docker login --username="${USERNAME}" --password="${PASSWORD}" --email="${EMAIL}" ${REGISTRY} || catch_error "Failed to login using provided credentials"
else
# prompt for credentials for docker login
echo -e "${NOTICE} Please login to ${REGISTRY}:"
LOGIN_SUCCESS="false"
# keep retrying docker login until successful
while [ "$LOGIN_SUCCESS" = "false" ]
do
docker login ${REGISTRY} && LOGIN_SUCCESS="true"
done
fi
}
# decode username/password for a registry to query the API
decode_auth() {
AUTH_CREDS="$(cat ~/.docker/config.json | jq -r '.auths."'${1}'".auth' | base64 --decode)"
}
# query the v1 registry for a list of all images
query_v1_images() {
echo -e "\n${INFO} Getting a list of images from ${V1_REGISTRY}"
# check to see if a filter pattern was provided
if [ -z "${V1_REPO_FILTER}" ]
then
# no filter pattern was defined, get all repos
IMAGE_LIST="$(curl -s https://${AUTH_CREDS}@${V1_REGISTRY}/v1/search?q= | jq -r '.results | .[] | .name')"
else
# filter pattern defined, use grep to match repos w/regex capabilites
IMAGE_LIST="$(curl -s https://${AUTH_CREDS}@${V1_REGISTRY}/v1/search?q= | jq -r '.results | .[] | .name' | grep ${V1_REPO_FILTER})"
fi
# loop through all images in v1 registry to get tags for each
for i in ${IMAGE_LIST}
do
# get list of tags for image i
if [ -z "${TAG_FILTER}" ]
then
# get list of tags for image i
IMAGE_TAGS=$(curl -s https://${AUTH_CREDS}@${V1_REGISTRY}/v1/repositories/${i}/tags | jq -r 'keys | .[]')
else
IMAGE_TAGS=$(curl -s https://${AUTH_CREDS}@${V1_REGISTRY}/v1/repositories/${i}/tags | jq -r 'keys | .[]' | grep ${TAG_FILTER})
fi
# loop through tags to create list of full image names w/tags
for j in ${IMAGE_TAGS}
do
# check if an image is a 'library' image without a namespace
if [ ${i:0:8} = "library/" ]
then
# cut off 'library/' from beginning of image
i="${i:8}"
fi
# add image to list
FULL_IMAGE_LIST="${FULL_IMAGE_LIST} ${i}:${j}"
done
done
echo -e "${OK} Successfully retrieved list of Docker images from ${V1_REGISTRY}"
}
# show list of images from the v1 registry
show_v1_image_list() {
echo -e "\n${INFO} Full list of images from ${V1_REGISTRY} to be migrated:"
for i in ${FULL_IMAGE_LIST}
do
echo ${V1_REGISTRY}/${i}
done
echo -e "${OK} End full list of images from ${V1_REGISTRY}"
# check to see if user should be prompted
if ${USER_PROMPT}
then
# prompt user to press any key to begin migration
echo -en "\n${NOTICE} "
read -rsp $"Press any key to begin migration process [ctrl+c to abort]" -n1 key; echo
fi
}
# push/pull image
push_pull_image() {
# get action and image name passed
ACTION="${1}"
IMAGE="${2}"
# check the action and act accordingly
case ${ACTION} in
push)
# push image
echo -e "${INFO} Pushing ${IMAGE}"
(docker push ${IMAGE} && echo -e "${OK} Successfully ${ACTION}ed ${IMAGE}\n") || catch_push_pull_error "push" "${IMAGE}"
;;
pull)
# pull image
echo -e "${INFO} Pulling ${IMAGE}"
(docker pull ${IMAGE} && echo -e "${OK} Successfully ${ACTION}ed ${IMAGE}\n") || catch_push_pull_error "pull" "${IMAGE}"
;;
esac
}
# retag image
retag_image() {
# get source and destination image names passed
SOURCE_IMAGE="${1}"
DESTINATION_IMAGE="${2}"
# retag image
(docker tag -f ${SOURCE_IMAGE} ${DESTINATION_IMAGE} && echo -e "${OK} ${V1_REGISTRY}/${i} > ${V2_REGISTRY}/${i}") || catch_retag_error "${SOURCE_IMAGE}" "${DESTINATION_IMAGE}"
}
# pull all images to local system
pull_images_from_v1() {
echo -e "\n${INFO} Pulling all images from ${V1_REGISTRY} to your local system"
for i in ${FULL_IMAGE_LIST}
do
push_pull_image "pull" "${V1_REGISTRY}/${i}"
retag_image "${V1_REGISTRY}/${i}" "${V2_REGISTRY}/${i}"
push_pull_image "push" "${V2_REGISTRY}/${i}"
done
echo -e "${OK} Successully pulled all images from ${V1_REGISTRY} to your local system"
}
pull_retag_push_remove_image() {
GLOBIMAGE="${1}"
echo -e "\n${INFO} Processing ${GLOBIMAGE}"
IMAGE_NAME=$(echo ${GLOBIMAGE} | cut -d ':' -f1)
TAG=$(echo ${GLOBIMAGE} | cut -d ':' -f2)
STATUSCODE=$(curl -s -o /dev/null -w "%{http_code}" https://${AUTH_CREDS}@${V2_REGISTRY}/v2/${IMAGE_NAME}/manifests/${TAG})
if [[ $STATUSCODE -eq "200" ]]; then
echo -e "\n${INFO} Tag exists, skipping ${GLOBIMAGE}"
else
push_pull_image "pull" "${V1_REGISTRY}/${GLOBIMAGE}"
retag_image "${V1_REGISTRY}/${GLOBIMAGE}" "${V2_REGISTRY}/${GLOBIMAGE}"
push_pull_image "push" "${V2_REGISTRY}/${GLOBIMAGE}"
fi
}
push_pull_retag_images() {
for i in ${FULL_IMAGE_LIST}
do
pull_retag_push_remove_image "${i}"
done
}
# check to see if v1 and v2 registry share the same DNS name
check_registry_swap_or_retag() {
if [ "${V1_REGISTRY}" = "${V2_REGISTRY}" ]
then
# retagging not needed; re-using same DNS name for v2 registry
echo -e "${OK} Skipping re-tagging; same URL used for v1 and v2\n"
# notify user to swtich out their registry now
echo -en "${NOTICE} "
read -rsp $'Make the necessary changes to switch your v1 and v2 registries and then press any key to continue\n' -n1 key
else
# re-tag images; different DNS name used for v2 registry
echo -e "\n${INFO} Retagging all images from '${V1_REGISTRY}' to '${V2_REGISTRY}'"
for i in ${FULL_IMAGE_LIST}
do
retag_image "${V1_REGISTRY}/${i}" "${V2_REGISTRY}/${i}"
done
echo -e "${OK} Successfully retagged all images"
fi
}
# verify V2_REGISTRY is reporting as a v2 registry
verify_v2_ready() {
V2_READY="false"
while [ "${V2_READY}" = "false" ]
do
# check to see if V2_REGISTRY is returning the proper api version string
if $(curl -Is https://${V2_REGISTRY}/v2/ | grep ^'Docker-Distribution-Api-Version: registry/2' > /dev/null 2>&1)
then
# api version indicates v2; sets value to exit loop
V2_READY="true"
else
# api version either not returned or not showing proper version; will continue in loop
echo -e "\n${ERROR} v2 registry (${V2_REGISTRY}) is not available"
echo -en "${NOTICE} "
read -rsp $'Verify v2 registry is functioning as expected; press any key to continue to retry [ctrl+c to abort]\n' -n1 key
fi
done
# v2 registry verified as available
echo -e "\n${OK} Verified v2 registry (${V2_REGISTRY}) is available"
}
# push images to v2 registry
push_images_to_v2() {
echo -e "\n${INFO} Pushing all images to ${V2_REGISTRY}"
for i in ${FULL_IMAGE_LIST}
do
push_pull_image "push" "${V2_REGISTRY}/${i}"
done
echo -e "${OK} Successfully pushed all images to ${V2_REGISTRY}"
}
# cleanup images from local docker engine
cleanup_local_engine() {
echo -e "\n${INFO} Cleaning up images from local Docker engine"
# see if re-tagged images exist and remove accordingly
if [ "${V1_REGISTRY}" = "${V2_REGISTRY}" ]
then
for i in ${FULL_IMAGE_LIST}
do
# remove docker image/tags; allow failures here (in case image is actually in use)
docker rmi ${V1_REGISTRY}/${i} || true
done
else
for i in ${FULL_IMAGE_LIST}
do
# remove docker image/tags; allow failures here (in case image is actually in use)
docker rmi ${V1_REGISTRY}/${i} || true
docker rmi ${V2_REGISTRY}/${i} || true
done
fi
echo -e "${OK} Successfully cleaned up images from local Docker engine"
}
# migration complete
migration_complete() {
echo -e "\n${OK} Migration from v1 to v2 complete!"
}
# main function
main() {
initialize_migrator
verify_ready
if [ "${NO_LOGIN}" != "true" ]; then
docker_login ${V1_REGISTRY} ${V1_USERNAME} ${V1_PASSWORD} ${V1_EMAIL}
fi
decode_auth ${V1_REGISTRY}
if [ "${NO_LOGIN}" != "true" ]; then
docker_login ${V2_REGISTRY} ${V2_USERNAME} ${V2_PASSWORD} ${V2_EMAIL}
fi
query_v1_images
show_v1_image_list
push_pull_retag_images
cleanup_local_engine
migration_complete
}
main "$@"
|
pcorliss/migrator
|
migrator.sh
|
Shell
|
apache-2.0
| 13,529 |
#!/bin/sh
# (C) Copyright IBM Corporation 2016.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
startContainer () {
echo "Starting a container from the image $IMAGE$VERSION"
CONTAINER_ID=$(docker run -dt $IMAGE$VERSION)
if [ -z $CONTAINER_ID ]; then
echo "ERROR: Could not start a container from the image $IMAGE$VERSION"
exit 1
fi
echo "Created Container: $CONTAINER_ID"
}
stopContainer () {
echo "Stopping Container: $CONTAINER_ID"
docker stop $CONTAINER_ID &> /dev/null
docker rm $CONTAINER_ID &> /dev/null
}
#Parse args
while [ $# -gt 1 ]
do
key="$1"
case $key in
-i|--image)
IMAGE="$2"
shift # past argument
;;
-t|--tag)
VERSION="$2"
shift # past argument
;;
-s|--search)
SEARCH="$2"
shift # past argument
;;
--default)
DEFAULT=YES
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
#Read the args
if [ -z $IMAGE ]; then
echo "Usage: ./testUCDContainer.sh -i IMAGE_NAME [-t IMAGE_TAG] [-s SEARCH_PROCESS]"
exit 1;
fi
if [ -n $VERSION ]; then
VERSION=":$VERSION"
fi
if [ -z $SEARCH ]; then
SEARCH="ibm-ucd"
fi
EXIT_STATUS=0
#Start the container
startContainer
#Wait for the container to start up
sleep 2
echo "Testing if process is running"
#Test if a UCD product is running
CONTAINER_RUNNING=$(docker exec $CONTAINER_ID ps -ef | grep "$SEARCH" | grep -v grep)
EXEC_STATUS=$?
if [ -z "$CONTAINER_RUNNING" ]; then
echo "Container from image $IMAGE$VERSION did not start up correctly"
EXIT_STATUS=1
fi
#If things didn't go well with the container, exit with an error
if [ $EXEC_STATUS != 0 ]; then
echo "Container from image $IMAGE$VERSION did not start correctly"
EXIT_STATUS=2
fi
#Stop the container
stopContainer
if [ $EXIT_STATUS = 0 ]; then
echo "Test passed"
else
echo "Test failed"
fi
echo "Finished, exiting"
exit $EXIT_STATUS
|
IBM-UrbanCode/UCD-Docker-Images
|
tools/test/imageTestRunner.sh
|
Shell
|
apache-2.0
| 2,520 |
#!/bin/bash
# Copyright 2019 - 2022 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CRUNCHY_DIR=${CRUNCHY_DIR:-'/opt/crunchy'}
source "${CRUNCHY_DIR}/bin/common_lib.sh"
export PGHOST="/tmp"
test_server "postgres" "${PGHOST?}" "${PGHA_PG_PORT}" "postgres"
VERSION=$(psql --port="${PG_PRIMARY_PORT}" -d postgres -qtAX -c "SELECT current_setting('server_version_num')")
if (( ${VERSION?} >= 90600 )) && (( ${VERSION?} < 100000 ))
then
function_file="${CRUNCHY_DIR}/bin/modules/pgexporter/pg96/setup.sql"
elif (( ${VERSION?} >= 100000 )) && (( ${VERSION?} < 110000 ))
then
function_file="${CRUNCHY_DIR}/bin/modules/pgexporter/pg10/setup.sql"
elif (( ${VERSION?} >= 110000 )) && (( ${VERSION?} < 120000 ))
then
function_file="${CRUNCHY_DIR}/bin/modules/pgexporter/pg11/setup.sql"
elif (( ${VERSION?} >= 120000 )) && (( ${VERSION?} < 130000 ))
then
function_file="${CRUNCHY_DIR}/bin/modules/pgexporter/pg12/setup.sql"
elif (( ${VERSION?} >= 130000 ))
then
function_file="${CRUNCHY_DIR}/bin/modules/pgexporter/pg13/setup.sql"
else
echo_err "Unknown or unsupported version of PostgreSQL. Exiting.."
exit 1
fi
echo_info "Using setup file '${function_file}' for pgMonitor"
cp "${function_file}" "/tmp/setup_pg.sql"
sed -i "s,/usr/bin/pgbackrest-info.sh,${CRUNCHY_DIR}/bin/postgres/pgbackrest_info.sh,g" "/tmp/setup_pg.sql"
psql -U postgres --port="${PG_PRIMARY_PORT}" -d postgres \
< "/tmp/setup_pg.sql" >> /tmp/pgmonitor-setup.stdout 2>> /tmp/pgmonitor-setup.stderr
psql -U postgres --port="${PG_PRIMARY_PORT}" -d postgres \
-c "CREATE EXTENSION IF NOT EXISTS pgnodemx WITH SCHEMA monitor;" >> /tmp/pgmonitor-setup.stdout 2>> /tmp/pgmonitor-setup.stderr
|
CrunchyData/crunchy-containers
|
bin/postgres_common/exporter/install.sh
|
Shell
|
apache-2.0
| 2,211 |
while [ "$1" != "" ]; do
case $1 in
-u ) shift
fname=$1
;;
-t ) shift
WAITTIME=$1
;;
-s ) shift
SAVENAME=$1
;;
-h | --help )
echo "-u <url>: set the url of the file"
echo "-t <time>: set the time in seconds until the script restart the connection. When you download a file you will see after x seconds a drop in download speed. This is the perfect time. Default time is 11 seconds"
echo "-s <savename>: the name the file should have after downloading. Default is 'downloadedFile'"
exit
;;
* ) exit 1
esac
shift
done
if [ $fname ]
then
echo "Lets start"
else
echo "user --help | -h"
exit 1
fi
if [ ! $WAITTIME ]
then
echo "no wait time specified. We use 11 sec as default"
WAITTIME=11
fi
if [ ! $SAVENAME ]
then
echo "no wait time specified. We use 11 sec as default"
SAVENAME="downloadedFile"
fi
while true;
do
curl -L -o $SAVENAME -C - $fname --max-time $WAITTIME
OUT=$?
echo $OUT
if [ $OUT == 33 ]
then
exit 0
fi
done
|
BrandiATMuhkuh/downloadAccelerator
|
dlScript.sh
|
Shell
|
apache-2.0
| 1,281 |
#!/bin/bash
if [ -z "$P9MAMBO_PATH" ]; then
P9MAMBO_PATH=/opt/ibm/systemsim-p9/
fi
if [ -z "$P9MAMBO_BINARY" ]; then
P9MAMBO_BINARY="/run/p9/power9"
fi
if [ ! -x "$P9MAMBO_PATH/$P9MAMBO_BINARY" ]; then
echo "Could not find executable P9MAMBO_BINARY ($P9MAMBO_PATH/$MAMBO_BINARY). Skipping sreset_world test";
exit 0;
fi
if [ -n "$KERNEL" ]; then
echo 'Please rebuild skiboot without KERNEL set. Skipping sreset_world test';
exit 0;
fi
if [ ! `command -v expect` ]; then
echo 'Could not find expect binary. Skipping sreset_world test';
exit 0;
fi
export SKIBOOT_ZIMAGE=`pwd`/test/sreset_world/sreset_kernel/sreset_kernel
# Currently getting some core dumps from mambo, so disable them!
OLD_ULIMIT_C=`ulimit -c`
ulimit -c 0
t=$(mktemp) || exit 1
trap "rm -f -- '$t'" EXIT
( cd external/mambo;
cat <<EOF | expect
set timeout 30
spawn $P9MAMBO_PATH/$P9MAMBO_BINARY -n -f ../../test/sreset_world/run_sreset_world.tcl
expect {
timeout { send_user "\nTimeout waiting for hello world\n"; exit 1 }
eof { send_user "\nUnexpected EOF\n"; exit 1 }
"Machine Check Stop" { exit 1;}
"Hello World!"
}
expect {
timeout { send_user "\nTimeout waiting for Hello SRESET\n"; exit 1 }
eof { send_user "\nUnexpected EOF\n"; exit 1 }
"Machine Check Stop" { exit 1;}
"Hello SRESET!"
}
expect {
timeout { send_user "\nTimeout waiting for shutdown\n"; exit 1}
eof { send_user "\nUnexpected EOF\n"; exit 1}
"Machine Check Stop" { exit 1;}
"Execution stopped: Sim Support exit requested stop"
}
wait
exit 0
EOF
) 2>&1 > $t
r=$?
if [ $r != 0 ]; then
cat $t
exit $r
fi
ulimit -c $OLD_ULIMIT_C
rm -f -- "$t"
trap - EXIT
exit 0;
|
mikey/skiboot
|
test/sreset_world/run_mambo_p9_sreset.sh
|
Shell
|
apache-2.0
| 1,650 |
#!/bin/bash -x
rosrun pr2_controller_manager pr2_controller_manager stop r_arm_controller
rosrun pr2_controller_manager pr2_controller_manager stop l_arm_controller
rosrun pr2_controller_manager pr2_controller_manager unload r_arm_controller
rosrun pr2_controller_manager pr2_controller_manager unload l_arm_controller
rosparam load pr2_arm_controllers_grasp.yaml
rosrun pr2_controller_manager pr2_controller_manager load r_arm_controller
rosrun pr2_controller_manager pr2_controller_manager load l_arm_controller
echo "Hit any key to start the controllers"
read inp
rosrun pr2_controller_manager pr2_controller_manager start r_arm_controller
rosrun pr2_controller_manager pr2_controller_manager start l_arm_controller
|
gt-ros-pkg/hrl-haptic-manip
|
hrl_haptic_mpc/change_gains_pr2.sh
|
Shell
|
apache-2.0
| 726 |
# add custom script in here
VM_INIT='http://172.16.2.254/Packer/qga/vm_init.sh'
QEMU_GA='http://172.16.2.254/Packer/qga/qemu-ga.deb8'
# add respawn script
cat <<'EOF' > /lib/systemd/system/qemu-guest-agent.service
[Unit]
Description=QEMU Guest Agent
BindsTo=dev-virtio\x2dports-org.qemu.guest_agent.0.device
After=dev-virtio\x2dports-org.qemu.guest_agent.0.device
[Service]
UMask=0077
ExecStart=/usr/bin/qemu-ga \
--method=virtio-serial \
--path=/dev/virtio-ports/org.qemu.guest_agent.0 \
--blacklist=guest-file-open,guest-file-close,guest-file-read,guest-file-write,guest-file-seek,guest-file-flush
StandardError=syslog
Restart=always
RestartSec=0
[Install]
WantedBy=multi-user.target
EOF
systemctl enable qemu-guest-agent.service
mkdir -p /usr/var/run/
# wget vm_init
cd /etc/ && wget $VM_INIT && chmod +x vm_init.sh
rm -rf /bin/sh && ln -s /bin/bash /bin/sh
# wget qemu_ga
cd /usr/bin && wget $QEMU_GA && mv qemu-ga.deb8 qemu-ga && chmod +x qemu-ga
# enable tty console
PATH=/sbin:/usr/sbin:/bin:/usr/bin
sed -i -e 's#GRUB_CMDLINE_LINUX=.*$#GRUB_CMDLINE_LINUX="text console=tty0 console=ttyS0,115200n8"#' \
-e 's/#GRUB_TERMINAL=console/GRUB_TERMINAL=console/' \
-e 's#GRUB_CMDLINE_LINUX_DEFAULT="quiet"#GRUB_CMDLINE_LINUX_DEFAULT=""#' /etc/default/grub
/usr/sbin/update-grub
# delete nic config
sed -i '/eth0/,$d' /etc/network/interfaces
# fix dhcp-client could not set hostname
wget http://ftp.cn.debian.org/debian/pool/main/i/isc-dhcp/isc-dhcp-client_4.3.3-6_amd64.deb \
-O /tmp/isc-dhcp-client_4.3.3-6_amd64.deb && \
dpkg -i /tmp/isc-dhcp-client_4.3.3-6_amd64.deb
wget http://ftp.cn.debian.org/debian/pool/main/i/isc-dhcp/isc-dhcp-common_4.3.3-6_amd64.deb \
-O /tmp/isc-dhcp-common_4.3.3-6_amd64.deb && \
dpkg -i /tmp/isc-dhcp-common_4.3.3-6_amd64.deb
rm -rf /etc/hostname
|
davidddw/imageBuilder
|
debian_8.x_kvm_livecloud/scripts/custom_kvm.sh
|
Shell
|
apache-2.0
| 1,796 |
#!/bin/sh
./jl_gen_mfcc_feat.jl -j 1 -m 7 -f 14 -e false \
--mp3-dir "../../data/test" \
--h5-mp3-file "testXmp3.h5" \
--h5-mp3-dset "lid/test/X/mp3" \
--h5-mfcc-file "testX_mfcc_7.h5" \
--h5-mfcc-dset "lid/test/X/mfcc"
|
WojciechMigda/TCO-SpokenLanguages2
|
src/sh_gen_test_mfcc_7.sh
|
Shell
|
apache-2.0
| 231 |
xctool test -workspace Pdef.xcworkspace/ -scheme "iOS Tests" -sdk iphonesimulator
|
pdef/pdef-objc
|
run-ios-tests.sh
|
Shell
|
apache-2.0
| 82 |
#!/bin/sh
#
# 12.04,4.0.4
# 1.
# <command-line>:0:0: error: "_FORTIFY_SOURCE" redefined [-Werror]
# build/core/combo/HOST_linux-x86.mk
# HOST_GLOBAL_CFLAGS += -D_FORTIFY_SOURCE=0
# HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0
# 2.
# external/mesa3d/src/glsl/linker.cpp:1394:49: error: expected primary-expression before ‘,’ token
# vim external/mesa3d/src/glsl/linker.cpp
# add #include <cstddef>
# 3.
# external/oprofile/libpp/format_output.h:94:22: error: reference ‘counts’ cannot be declared ‘mutable’ [-fpermissive]
# replace "mutable counts_t & counts;" with "counts_t & counts;"
# 4.
# external/gtest/src/../include/gtest/internal/gtest-param-util.h:122:11: error: ‘ptrdiff_t’ does not name a type
# external/gtest/src/../include/gtest/internal/gtest-param-util.h
# add #include <cstddef>
# 5.
# host Executable: test-librsloader (out/host/linux-x86/obj/EXECUTABLES/test-librsloader_intermediates/test-librsloader)
# vim external/llvm/llvm-host-build.mk
# add LOCAL_LDLIBS := -lpthread -ldl
# 6.
# frameworks/compile/slang/slang_rs_export_foreach.cpp:249:23: error: variable ‘ParamName’ set but not used [-Werror=unused-but-set-variable]
# vim frameworks/compile/slang/Android.mk
# replace "local_cflags_for_slang := -Wno-sign-promo -Wall -Wno-unused-parameter -Werror" with "local_cflags_for_slang := -Wno-sign-promo -Wall -Wno-unused-parameter"
#
# 12.04,4.2.2
# make: *** [out/host/linux-x86/obj/EXECUTABLES/mkfs.ubifs_intermediates/mkfs.ubifs] Error 1
# “/usr/bin/ld: cannot find -luuid”
# sudo apt-get install uuid-dev:i386
# locate uuid
# sudo apt-get install uuid
# sudo ln -sf /lib/x86_64-linux-gnu/libuuid.so.1.3.0 /lib/x86_64-linux-gnu/libuuid.so
# sudo apt-get install uuid:i386
# sudo ln -sf /lib/i386-linux-gnu/libuuid.so.1.3.0 /lib/i386-linux-gnu/libuuid.so
#
# set if you want image boot from SDMMC
USE_SDMMC=1
ROOT_PATH=`pwd`
SCRIPT_PATH=${ROOT_PATH}/../script
#CCACHE_BIN=${ROOT_PATH}/prebuilts/misc/linux-x86/ccache/ccache
CCACHE_BIN=/usr/bin/ccache
PRODUCT=sabresd_6dq
OUT_BASE=${ROOT_PATH}/../a_out
OUT_PATH=${OUT_BASE}/${PRODUCT}
LOG_NAME=abuild.log
BLD_LOG=${OUT_BASE}/${LOG_NAME}
SET_NAME=abuild.set
BLD_SET=${OUT_BASE}/${SET_NAME}
FREESCALE_FSTAB_PATH=device/fsl/imx6/etc
RECOVERY_FSTAB_PATH=device/fsl/sabresd_6dq
VOLD_FSTAB_PATH=device/fsl/sabresd_6dq
FREESCALE_FSTAB_FILE=fstab.freescale
RECOVERY_FSTAB_FILE=recovery.fstab
VOLD_FSTAB_FILE=vold.fstab
SDMMC_FREESCALE_FSTAB_FILE=${FREESCALE_FSTAB_FILE}.sdmmc.422_110
EMMC_FREESCALE_FSTAB_FILE=${FREESCALE_FSTAB_FILE}.emmc.422_110
SDMMC_RECOVERY_FSTAB_FILE=${RECOVERY_FSTAB_FILE}.sdmmc.422_110
EMMC_RECOVERY_FSTAB_FILE=${RECOVERY_FSTAB_FILE}.emmc.422_110
SDMMC_VOLD_FSTAB_FILE=${VOLD_FSTAB_FILE}.sdmmc.422_110
EMMC_VOLD_FSTAB_FILE=${VOLD_FSTAB_FILE}.emmc.422_110
echo "#" > ${BLD_SET}
echo "#"
echo "# current path=${ROOT_PATH}"
echo "#"
echo "# current path=${ROOT_PATH}" >> ${BLD_SET}
CCACHE_BIN=`find prebuilts/ -name ccache -path "*linux-x86*" -type f`
if [ "${CCACHE_BIN}" == "" ]; then
CCACHE_BIN=/usr/bin/ccache
fi
if [ -f ${CCACHE_BIN} ]; then
echo "#"
echo "# ccache=${CCACHE_BIN}"
echo "# setup ccache -M 10G ..."
echo "#"
export USE_CCACHE=1
${CCACHE_BIN} -M 10G
fi
echo "# ccache=${CCACHE_BIN}" >> ${BLD_SET}
if [ -d ${OUT_PATH} ]; then
echo "#"
echo "# remove old out folder ..."
echo "#"
rm -rf ${OUT_PATH}
fi
mkdir -p ${OUT_PATH}
echo "#"
echo "# set output to ${OUT_PATH} ..."
echo "#"
###export OUT_DIR_COMMON_BASE=${OUT_PATH}
export OUT_DIR=${OUT_PATH}
echo "# output=${OUT_PATH}" >> ${BLD_SET}
echo "#"
echo "# current path=`pwd`"
echo "#"
echo "# setup build env ..."
echo "#"
. build/envsetup.sh
echo "#"
echo "# setup sabresd build with development configuration ..."
echo "#"
#lunch sabresd_6dq-user
#lunch sabresd_6dq-eng
export TARGET_PRODUCT=sabresd_6dq
export TARGET_BUILD_VARIANT=eng
#export TARGET_BUILD_VARIANT=user
export TARGET_BUILD_TYPE=release
echo "# TARGET_PRODUCT=${TARGET_PRODUCT}" >> ${BLD_SET}
echo "# TARGET_BUILD_VARIANT=${TARGET_BUILD_VARIANT}" >> ${BLD_SET}
echo "# TARGET_BUILD_TYPE=${TARGET_BUILD_TYPE}" >> ${BLD_SET}
if [ -f ${BLD_LOG} ]; then
echo "#"
echo "# remove old log file ..."
echo "#"
rm -rf ${BLD_LOG}
fi
if [ -f ./buildspec.mk ]; then
rm ./buildspec.mk
fi
cp ./build/buildspec.mk.default ./buildspec.mk
cat ${SCRIPT_PATH}/mybuildspec.mk >> ./buildspec.mk
cat ${SCRIPT_PATH}/mybuildspec.mk >> ${BLD_SET}
#JAVA_HOME=/usr/lib/jvm/jdk1.6.0_25
JAVA_HOME=/usr/lib/jvm/jdk1.6.0_45
export JAVA_HOME
PATH=$JAVA_HOME/bin:$PATH
export PATH
echo "#"
echo "# PATH=${PATH}"
echo "# JAVA_HOME=${JAVA_HOME}"
echo "#"
echo "# PATH=${PATH}" >> ${BLD_SET}
echo "# JAVA_HOME=${JAVA_HOME}" >> ${BLD_SET}
if [ "${USE_SDMMC}" == "1" ]; then
FFSTAB=${SCRIPT_PATH}/${SDMMC_FREESCALE_FSTAB_FILE}
RFSTAB=${SCRIPT_PATH}/${SDMMC_RECOVERY_FSTAB_FILE}
VFSTAB=${SCRIPT_PATH}/${SDMMC_VOLD_FSTAB_FILE}
else
FFSTAB=${SCRIPT_PATH}/${EMMC_FREESCALE_FSTAB_FILE}
RFSTAB=${SCRIPT_PATH}/${EMMC_RECOVERY_FSTAB_FILE}
VFSTAB=${SCRIPT_PATH}/${EMMC_VOLD_FSTAB_FILE}
fi
echo "#"
echo "# replace freescale fstab file =${FFSTAB}"
echo "# replace recovery fstab file =${RFSTAB}"
echo "# replace vold fstab file =${VFSTAB}"
echo "#"
echo "# replace freescale fstab file =${FFSTAB}" >> ${BLD_SET}
echo "# replace recovery fstab file =${RFSTAB}" >> ${BLD_SET}
echo "# replace vold fstab file =${VFSTAB}" >> ${BLD_SET}
if [ -f ${ROOT_PATH}/${FREESCALE_FSTAB_PATH}/${FREESCALE_FSTAB_FILE} ]; then
rm -rf ${ROOT_PATH}/${FREESCALE_FSTAB_PATH}/${FREESCALE_FSTAB_FILE}.bak
mv ${ROOT_PATH}/${FREESCALE_FSTAB_PATH}/${FREESCALE_FSTAB_FILE} ${ROOT_PATH}/${FREESCALE_FSTAB_PATH}/${FREESCALE_FSTAB_FILE}.bak
cp ${FFSTAB} ${ROOT_PATH}/${FREESCALE_FSTAB_PATH}/${FREESCALE_FSTAB_FILE}
fi
if [ -f ${ROOT_PATH}/${RECOVERY_FSTAB_PATH}/${RECOVERY_FSTAB_FILE} ]; then
rm -rf ${ROOT_PATH}/${RECOVERY_FSTAB_PATH}/${RECOVERY_FSTAB_FILE}.bak
mv ${ROOT_PATH}/${RECOVERY_FSTAB_PATH}/${RECOVERY_FSTAB_FILE} ${ROOT_PATH}/${RECOVERY_FSTAB_PATH}/${RECOVERY_FSTAB_FILE}.bak
cp ${RFSTAB} ${ROOT_PATH}/${RECOVERY_FSTAB_PATH}/${RECOVERY_FSTAB_FILE}
fi
if [ -f ${ROOT_PATH}/${VOLD_FSTAB_PATH}/${VOLD_FSTAB_FILE} ]; then
rm -rf ${ROOT_PATH}/${VOLD_FSTAB_PATH}/${VOLD_FSTAB_FILE}.bak
mv ${ROOT_PATH}/${VOLD_FSTAB_PATH}/${VOLD_FSTAB_FILE} ${ROOT_PATH}/${VOLD_FSTAB_PATH}/${VOLD_FSTAB_FILE}.bak
cp ${VFSTAB} ${ROOT_PATH}/${VOLD_FSTAB_PATH}/${VOLD_FSTAB_FILE}
fi
make clean
echo "#"
echo "# start source build ..."
echo "#"
_TIMEBUILDSTART=$(date +"%s")
#make -j8 bootloader
#make -j8 kernelimage
#make -j8 kernelmodules
#make -j8 files
#make -j8 systemimage
make -j8 2>&1 | tee ${BLD_LOG}
_TIMEBUILDEND=$(date +"%s")
_TIMEBUILD=$(($_TIMEBUILDEND-$_TIMEBUILDSTART))
cd ${ROOT_PATH}
echo "#"
echo "# current path=`pwd`"
echo "#"
echo "# build time=${_TIMEBUILD} seconds."
echo "#"
|
span999/build-scripts
|
imx/android-422/goandroid.sh
|
Shell
|
apache-2.0
| 6,833 |
#!/bin/sh
# Copyright 2005-2017 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.sh
REDIRECT=/dev/null
templog=${data_dir}/log.step.$$
rm -f ${templog} || true
for i in 0 10
do
for s in 0 1200 600 6000
do
for key in stepRange:s startStep endStep
do
rm -f ${data_dir}/out.grib | true
${tools_dir}/grib_set -s ${key}=$s ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
# echo grib_set -s ${key}=$s ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
# grib_get -p step,startStep,endStep,P1,P2,timeRangeIndicator,indicatorOfUnitOfTimeRange ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
${tools_dir}/grib_get -p mars.step,stepRange,startStep,endStep,P1,P2,timeRangeIndicator,indicatorOfUnitOfTimeRange:l ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib >> ${templog}
done
done
done
i=5
key=stepRange:s
for s in "0-24" "600-1200" "24-48" "36-66"
do
${tools_dir}/grib_set -s ${key}=$s ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
# echo grib_set -s ${key}=$s ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
# grib_ls -p step,startStep,endStep,P1,P2,timeRangeIndicator,indicatorOfUnitOfTimeRange ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib
${tools_dir}/grib_get -p mars.step,stepRange,startStep,endStep,P1,P2,timeRangeIndicator,indicatorOfUnitOfTimeRange:l ${data_dir}/timeRangeIndicator_${i}.grib ${data_dir}/out.grib >> ${templog}
done
rm -f ${data_dir}/out.grib | true
# test added for ifs stepType=max,min
${tools_dir}/grib_set -s stepType=max,startStep=3,endStep=6 ${data_dir}/reduced_gaussian_model_level.grib1 ${data_dir}/out.grib
${tools_dir}/grib_get -p mars.step,stepRange,startStep,endStep,P1,P2,timeRangeIndicator,indicatorOfUnitOfTimeRange:l ${data_dir}/reduced_gaussian_model_level.grib1 ${data_dir}/out.grib >> ${templog}
rm -f ${data_dir}/out.grib | true
diff ${templog} ${data_dir}/step.log
(${tools_dir}/grib_filter ${data_dir}/step_grib1.filter ${data_dir}/timeRangeIndicator_0.grib > ${templog}) 2>$REDIRECT
diff ${templog} ${data_dir}/step_grib1.log
rm -f ${templog} | true
# GRIB-180
# Set PDT 4.8 where you can find the EndOfOverallTimeInterval keys
grib2File=${data_dir}/reduced_latlon_surface_constant.grib2
${tools_dir}/grib_set -sproductDefinitionTemplateNumber=8 $grib2File ${grib2File}.p8tmp
# 78 hours is 3 days and 6 hours
${tools_dir}/grib_set -s step=78 $grib2File.p8tmp ${grib2File}.tmp
set `${tools_dir}/grib_get -p hourOfEndOfOverallTimeInterval,dayOfEndOfOverallTimeInterval ${grib2File}.tmp`
hourEnd=$1; dayEnd=$2
[ "$hourEnd" = "18" ]
[ "$dayEnd" = "8" ]
${tools_dir}/grib_set -s step=12 $grib2File.p8tmp ${grib2File}.tmp
set `${tools_dir}/grib_get -p hourOfEndOfOverallTimeInterval,dayOfEndOfOverallTimeInterval ${grib2File}.tmp`
hourEnd=$1; dayEnd=$2
[ "$hourEnd" = "0" ]
[ "$dayEnd" = "6" ]
# ECC-134 case-sensitivity
grib1_sample=$ECCODES_SAMPLES_PATH/GRIB1.tmpl
grib2_sample=$ECCODES_SAMPLES_PATH/GRIB2.tmpl
temp=temp.step.$$.grib
# M is for Month (code 3)
${tools_dir}/grib_set -s indicatorOfUnitOfTimeRange=M $grib1_sample $temp
unit=`${tools_dir}/grib_get -p unitOfTimeRange $temp`
[ "$unit" = "3" ]
${tools_dir}/grib_set -s indicatorOfUnitOfTimeRange=M $grib2_sample $temp
unit=`${tools_dir}/grib_get -p indicatorOfUnitOfTimeRange $temp`
[ "$unit" = "3" ]
# m is for Minute (code 0)
${tools_dir}/grib_set -s indicatorOfUnitOfTimeRange=m $grib1_sample $temp
unit=`${tools_dir}/grib_get -p unitOfTimeRange $temp`
[ "$unit" = "0" ]
${tools_dir}/grib_set -s indicatorOfUnitOfTimeRange=m $grib2_sample $temp
unit=`${tools_dir}/grib_get -p indicatorOfUnitOfTimeRange $temp`
[ "$unit" = "0" ]
# ECC-457
input=${data_dir}/tp_ecmwf.grib
stepRange=`${tools_dir}/grib_get -w count=1 -p stepRange,startStep,endStep,stepType $input`
[ "$stepRange" = "12 12 12 instant" ]
rm -f $temp
rm -f $grib2File.p8tmp ${grib2File}.tmp x.grib
|
0x1mason/GribApi.XP
|
grib_api/tests/grib_step.sh
|
Shell
|
apache-2.0
| 4,254 |
#!/bin/bash
{# Copyright 2017 Cargill Incorporated
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. #}
# Move parquet data from /incr to /base
set -eu
hdfs dfs -mv {{ conf.staging_database.path }}/{{ table.destination.name }}/incr/* {{ conf.staging_database.path }}/{{ table.destination.name }}/base
|
Cargill/pipewrench
|
templates/sqoop-parquet-hdfs-hive-merge/move-1st-sqoop.sh
|
Shell
|
apache-2.0
| 813 |
#!/bin/bash -e
. /var/cache/build/packages-manual/common.sh
download_and_verify \
hartleys \
qorts \
1.3.0 \
036523a1b024121244f6a1aea5b41eb0f83d8619fb3b598fa125d2999b00fda8 \
https://github.com/hartleys/QoRTs/releases/download/\${version}/QoRTs.jar
cat <<'EOF' >QoRTs
#!/bin/bash
java "${1}" -jar /opt/hartleys/qorts/QoRTs.jar "${@:2}"
EOF
chmod +x QoRTs
add_binary_path \
hartleys \
qorts
|
genialis/resolwe-bio
|
resolwe_bio/docker_images/common/packages-manual/qorts.sh
|
Shell
|
apache-2.0
| 425 |
#!/bin/sh
#
_Tr=$(tput sgr0) # Text reset.
_Br=$(tput setab 1) # Red
NOWTIME=`date +%Y%m%d%02k%02M%02S`
AROOT=`pwd`
WSFOLDER=anadroid-emulator
WSPATH=${AROOT}/${WSFOLDER}
CPUS=12
AVER=6.0.0_r1
#AVER=5.0.2_r1
#AVER=4.4.3_r1
AFOLDER=android-${AVER}
BUBOARD=aosp_arm
BUMODE=eng
LUNCHTYPE=${BUBOARD}-${BUMODE}
#LUNCHTYPE=aosp_arm-eng
CCACHE_BIN=
TARPATH=/home/span/workshop/git/android/tarball
SRCTAR=android-${AVER}_src.tar.xz
if [ ! -d ${WSFOLDER} ]; then
mkdir -p ${WSPATH}
fi
cd ${WSPATH}
echo `pwd`
if [ "${AVER}" = "4.4.3_r1" ]; then
#export JAVA_HOME=/usr/lib/jvm/jdk1.6.0_45
export JAVA_HOME=/home/span/workshop/bin/jdk1.6.0_45
export PATH=/home/span/workshop/bin/jdk1.6.0_45/bin:$PATH
fi
echo "${_Br}#"
echo "# <<<JAVA_HOME=${JAVA_HOME}>>> "
echo "#${_Tr}"
echo "${_Br}#"
echo "**************************************************"
echo "ROOT=${AROOT}"
echo "AFOLDER=${AFOLDER}"
echo "AVER=${AVER}"
echo "LUNCHTYPE=${LUNCHTYPE}"
echo "**************************************************"
echo "#${_Tr}"
_TIMEDOWNLOAD=0
if [ ! -d ${AVER}_src ]; then
rm ${AFOLDER}
mkdir ${AVER}_src
_TIMEBUILDSTART=$(date +"%s")
tar xvf ${TARPATH}/${SRCTAR} -C ${AVER}_src/
ln -s ${AVER}_src/myandroid ${AFOLDER}
_TIMEBUILDEND=$(date +"%s")
_TIMEDOWNLOAD=$(($_TIMEBUILDEND-$_TIMEBUILDSTART))
fi
if [ "${AVER}" = "5.0.2_r1" ]; then
echo "${_Br}#"
echo "# <<<PATCH! replace commit_id.target.linux-arm.mk >>> "
echo "#${_Tr}"
cp -f ${TARPATH}/commit_id.target.linux-arm.mk ${AFOLDER}/external/chromium_org/third_party/angle/src/commit_id.target.linux-arm.mk
fi
cd ${AFOLDER}
echo `pwd`
export USE_CCACHE=1
#export CCACHE_DIR=~/.accache
#export CCACHE_DIR=~/.ccache
CCACHE_BIN=`find ./ -type f -path "*linux-x86*" -name \ccache`
${CCACHE_BIN} -M 25G
. build/envsetup.sh
lunch ${LUNCHTYPE}
make clean
touch startTIME
_TIMEBUILDSTART=$(date +"%s")
#make -j8 2>&1 | tee ${AROOT}/logs/build-${NOWTIME}-[${LUNCHTYPE}]-log.txt
make -j${CPUS} 2>&1 | tee ${AROOT}/logs/build-${NOWTIME}-[${LUNCHTYPE}]-log.txt
touch endTIME
_TIMEBUILDEND=$(date +"%s")
_TIMEBUILD=$(($_TIMEBUILDEND-$_TIMEBUILDSTART))
cd ${AROOT}
echo `pwd`
echo "${_Br}#"
echo "# download time=${_TIMEDOWNLOAD} seconds."
echo "# build time=${_TIMEBUILD} seconds."
#echo "# mkubi time=${_TIMEMKUBI} seconds."
echo "#${_Tr}"
# _E_O_F_
|
span999/build-scripts
|
imx/bld-android-aMM600.sh
|
Shell
|
apache-2.0
| 2,305 |
#!/bin/bash
#
# (C) Copyright IBM Corporation 2015.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HELP="--help"
if [[ $1 -eq $HELP ]]
then
echo "USAGE"
echo " No args are required! "
echo "DESCRIPTION"
echo " "
echo " This test has been created to help test the Static-Topology scripts"
echo " This test runs in a docker enviroment so Docker must be installed on the"
echo " the host machine that this script is running on."
echo " This script will build and run two containers of WebSphere-Liberty coppying in the required files"
echo " It will then check to see when WebSphere-Liberty has started and run the GenPluginCfg.sh script to "
echo " generate the xml for each liberty instance. Another Liberty container is then spun up and the xml"
echo " is coppied into the new container and the merge script is then run to produce the final merged.xml"
else
cd ..
cd gen-plugin-cfg
docker build -t baseliberty .
cd ..
cd test
#Create docker bridge network
docker network create net1
docker build -t liberty .
docker run -d --name liberty1 -h liberty1 --net=net1 liberty
docker run -d --name liberty2 -h liberty2 --net=net1 liberty
docker run -d --name liberty3 -h liberty3 --net=net1 liberty
#This section waits for Liberty to start otherwise the GenPluginCfg.sh script fails
echo " "
echo "The test is waiting for all Liberty containers to start"
found=1
while [ $found != 0 ];
do
sleep 3s
docker logs liberty1 | grep "ready to run a smarter planet"
found=$?
done
docker exec liberty1 /opt/ibm/wlp/bin/GenPluginCfg.sh --installDir=/opt/ibm/wlp --userDir=/opt/ibm/wlp/usr --serverName=defaultServer
found2=1
while [ $found2 != 0 ];
do
sleep 3s
docker logs liberty2 | grep "ready to run a smarter planet"
found2=$?
done
docker exec liberty2 /opt/ibm/wlp/bin/GenPluginCfg.sh --installDir=/opt/ibm/wlp --userDir=/opt/ibm/wlp/usr --serverName=defaultServer
found3=1
while [ $found3 != 0 ];
do
sleep 3s
docker logs liberty3 | grep "ready to run a smarter planet"
found3=$?
done
docker exec liberty3 /opt/ibm/wlp/bin/GenPluginCfg.sh --installDir=/opt/ibm/wlp --userDir=/opt/ibm/wlp/usr --serverName=defaultServer
cd ..
cd get-plugin-cfg
./GetPluginCfg.sh liberty1 liberty1
mv plugin-cfg.xml plugin-cfg1.xml
./GetPluginCfg.sh liberty2 liberty2
mv plugin-cfg.xml plugin-cfg2.xml
./GetPluginCfg.sh liberty3 liberty3
mv plugin-cfg.xml plugin-cfg3.xml
cd ..
mv get-plugin-cfg/plugin-cfg1.xml merge-plugin-cfg/plugin-cfg1.xml
mv get-plugin-cfg/plugin-cfg2.xml merge-plugin-cfg/plugin-cfg2.xml
mv get-plugin-cfg/plugin-cfg3.xml merge-plugin-cfg/plugin-cfg3.xml
cd merge-plugin-cfg
echo " "
echo "Creating new liberty container with the required .jar file"
docker build -t libertytwo .
docker run -d --name=liberty4 libertytwo
echo "Copying xml to the container"
docker cp plugin-cfg1.xml liberty4:/tmp
docker cp plugin-cfg2.xml liberty4:/tmp
docker cp plugin-cfg3.xml liberty4:/tmp
docker cp pluginCfgMerge.sh liberty4:/tmp
found4=1
while [ $found4 != 0 ];
do
sleep 3s
docker logs liberty4 | grep "ready to run a smarter planet"
found4=$?
done
echo " "
echo "Executing merge script"
docker exec liberty4 /tmp/pluginCfgMerge.sh /tmp/plugin-cfg1.xml /tmp/plugin-cfg2.xml /tmp/plugin-cfg3.xml /tmp/merge-cfg.xml
cd ..
docker cp liberty4:/tmp/merge-cfg.xml merge-plugin-cfg/merge-cfg.xml
cd merge-plugin-cfg
#This is the new section to support IHS
echo "Pulling down and deploying the IHS image"
docker run -d -p 80:80 -h ihs --net=net1 --name=ihs ibmcom/ibm-http-server
sleep 5s
echo "Send the merged xml to the IHS Instance"
docker cp merge-cfg.xml ihs:/opt/IBM/WebSphere/Plugins/config/webserver1/plugin-cfg.xml
echo "Stopping and starting the ihs server"
docker exec ihs bash -c "/opt/IBM/HTTPServer/bin/apachectl stop"
echo "ihs has stopped"
sleep 3s
docker exec ihs bash -c "/opt/IBM/HTTPServer/bin/apachectl start"
echo "ihs has started"
sleep 3s
#Getting the port numbers of the liberty instances that have been routed too
echo "Starting comparisons"
wget http://0.0.0.0:80/ferret -q -O ferret1.txt
port1=$(head -75 ferret1.txt | tail -1 | cut -c 7-11) >> test.txt
wget http://0.0.0.0:80/ferret -q -O ferret2.txt
port2=$(head -75 ferret2.txt | tail -1 | cut -c 7-11) >> test.txt
wget http://0.0.0.0:80/ferret -q -O ferret3.txt
port3=$(head -75 ferret3.txt | tail -1 | cut -c 7-11) >> test.txt
echo $port1
echo $port2
echo $port3
wget http://0.0.0.0:80/ferret -q -O ferret11.txt
port11=$(head -75 ferret1.txt | tail -1 | cut -c 7-11) >> test.txt
wget http://0.0.0.0:80/ferret -q -O ferret22.txt
port22=$(head -75 ferret2.txt | tail -1 | cut -c 7-11) >> test.txt
wget http://0.0.0.0:80/ferret -q -O ferret33.txt
port33=$(head -75 ferret3.txt | tail -1 | cut -c 7-11) >> test.txt
echo $port11
echo $port22
echo $port33
#Comparing ports
echo "Comparing Ports"
if [[ $port1 == $port11 ]]
then
result="PASS"
else
result="FAIL"
fi
if [[ $port2 == $port22 ]]
then
result="PASS"
else
result="FAIL"
fi
if [[ $port3 == $port33 ]]
then
result="PASS"
else
result="FAIL"
fi
echo "Test Result: $result"
#Cleanup
rm test.txt
rm ferret1.txt
rm ferret11.txt
rm ferret2.txt
rm ferret22.txt
rm ferret3.txt
rm ferret33.txt
rm plugin-cfg1.xml
rm plugin-cfg2.xml
rm plugin-cfg3.xml
rm merge-cfg.xml
echo "Killing and removing the IHS container"
docker stop ihs
docker rm ihs
echo "Killing and removing each Liberty container"
docker stop liberty1
docker stop liberty2
docker stop liberty3
docker stop liberty4
docker rm liberty1
docker rm liberty2
docker rm liberty3
docker rm liberty4
fi
|
WASdev/ci.docker.ibm-http-server
|
static-topology/test/testMerge.sh
|
Shell
|
apache-2.0
| 6,752 |
#!/bin/sh
set -xe
cp_static () {
pkg-config $1 || exit 0
dir=$(pkg-config --static --libs-only-L $1)
if [ -z "$(echo $dir)" ]; then
exit 0
fi
dir=$(echo $dir | sed 's/^-L//')
if [ -f "${dir}/$2" ]; then
cp "${dir}/$2" "$OUT_DIR"
fi
}
if [ "$(uname -s)" = "Linux" ]; then
cp_static openssl libssl.a
cp_static libcrypto libcrypto.a
fi
|
alexcrichton/openssl-static-sys
|
build/cp_ssl.sh
|
Shell
|
apache-2.0
| 389 |
#!/bin/bash
if [ -z "$ANDROID_SDK" ]; then
. ./env.sh
fi
if [ ! -d "$ANDROID_SDK" ]; then
echo "Please install Android SDK into "$ANDROID_SDK
exit 1
fi
if [ ! -d "$ANDROID_NDK" ]; then
echo "Please install Android NDK into "$ANDROID_NDK
exit 1
fi
if [ "$1" = "clean" ]; then
echo "Cleaning dali..."
rm -rf ./dali-core
rm -rf ./dali-adaptor
rm -rf ./dali-toolkit
rm -rf ./dali-demo
exit 0
fi
if [ ! -z "$DEBUG" ]; then
export ENABLE_TRACE=ON
fi
ANDROID_PLATFORM=26 ANDROID_ABI=${TARGET} ./build_core.sh || exit 1
ANDROID_PLATFORM=26 ANDROID_ABI=${TARGET} ./build_adaptor.sh || exit 1
ANDROID_PLATFORM=26 ANDROID_ABI=${TARGET} ./build_toolkit.sh || exit 1
ANDROID_PLATFORM=26 ANDROID_ABI=${TARGET} ./build_demo.sh || exit 1
|
dalihub/dali-demo
|
build/android/dali/build.sh
|
Shell
|
apache-2.0
| 752 |
#!/bin/sh
set -e
cd $HOME/logs
flock -n ~/var/locks/approve-cleaner $HOME/services/java-wrappers/approve-cleaner $HOME/services/conf/approve-cleaner.properties "$@"
|
statsbiblioteket/digital-pligtaflevering-aviser-tools
|
tools/dpa-tools-deployment/for-deployment/bin/approve-cleaner.sh
|
Shell
|
apache-2.0
| 167 |
#!/usr/bin/env bash
# PLEASE NOTE: This script has been automatically generated by conda-smithy. Any changes here
# will be lost next time ``conda smithy rerender`` is run. If you would like to make permanent
# changes to this script, consider a proposal to conda-smithy so that other feedstocks can also
# benefit from the improvement.
set -xeo pipefail
build_dir=${1}
THISDIR="$( cd "$( dirname "$0" )" >/dev/null && pwd )"
ARROW_ROOT=$(cd "$THISDIR/../../.."; pwd;)
FEEDSTOCK_ROOT=$THISDIR
docker info
# In order for the conda-build process in the container to write to the mounted
# volumes, we need to run with the same id as the host machine, which is
# normally the owner of the mounted volumes, or at least has write permission
export HOST_USER_ID=$(id -u)
# Check if docker-machine is being used (normally on OSX) and get the uid from
# the VM
if hash docker-machine 2> /dev/null && docker-machine active > /dev/null; then
export HOST_USER_ID=$(docker-machine ssh $(docker-machine active) id -u)
fi
if [ -z "$CONFIG" ]; then
set +x
FILES=`ls .ci_support/linux_*`
CONFIGS=""
for file in $FILES; do
CONFIGS="${CONFIGS}'${file:12:-5}' or ";
done
echo "Need to set CONFIG env variable. Value can be one of ${CONFIGS:0:-4}"
exit 1
fi
if [ -z "${DOCKER_IMAGE}" ]; then
SHYAML_INSTALLED="$(shyaml --version || echo NO)"
if [ "${SHYAML_INSTALLED}" == "NO" ]; then
echo "WARNING: DOCKER_IMAGE variable not set and shyaml not installed. Falling back to condaforge/linux-anvil-comp7"
DOCKER_IMAGE="condaforge/linux-anvil-comp7"
else
DOCKER_IMAGE="$(cat "${FEEDSTOCK_ROOT}/.ci_support/${CONFIG}.yaml" | shyaml get-value docker_image.0 condaforge/linux-anvil-comp7 )"
fi
fi
mkdir -p "${build_dir}"
DONE_CANARY="${build_dir}/conda-forge-build-done-${CONFIG}"
rm -f "$DONE_CANARY"
if [ -z "${CI}" ]; then
DOCKER_RUN_ARGS="-it "
fi
export UPLOAD_PACKAGES="${UPLOAD_PACKAGES:-True}"
docker run ${DOCKER_RUN_ARGS} \
--shm-size=2G \
-v "${ARROW_ROOT}":/arrow:rw,z \
-v "${build_dir}":/build:rw \
-e FEEDSTOCK_ROOT="/arrow/dev/tasks/conda-recipes" \
-e CONFIG \
-e HOST_USER_ID \
-e UPLOAD_PACKAGES \
-e ARROW_VERSION \
-e CI \
$DOCKER_IMAGE \
bash /arrow/dev/tasks/conda-recipes/build_steps.sh /build
# verify that the end of the script was reached
test -f "$DONE_CANARY"
|
renesugar/arrow
|
dev/tasks/conda-recipes/run_docker_build.sh
|
Shell
|
apache-2.0
| 2,479 |
#!/usr/bin/env bash
# This script will use cli53 to export the zone file for each Hosted Zone domain in Route 53 for git version control
# Requires Python, pip, awscli, cli53
# For more info on cli53 see https://github.com/barnybug/cli53
# Download here: https://github.com/barnybug/cli53/releases/latest
# Functions
# Check required commands
function check_command {
type -P $1 &>/dev/null || fail "Unable to find $1, please install it and run this script again."
}
# Fail
function fail(){
tput setaf 1; echo "Failure: $*" && tput sgr0
exit 1
}
# Completed
function completed(){
echo
HorizontalRule
tput setaf 2; echo "Completed!" && tput sgr0
HorizontalRule
echo
}
function HorizontalRule(){
echo "============================================================"
}
# Verify AWS CLI Credentials are setup
# http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
if ! grep -q aws_access_key_id ~/.aws/credentials; then
if ! grep -q aws_access_key_id ~/.aws/config; then
fail "AWS config not found or CLI not installed. Please run \"aws configure\"."
fi
fi
# Check for AWS CLI profile argument passed into the script
# http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-multiple-profiles
if [ $# -eq 0 ]; then
scriptname=`basename "$0"`
echo "Usage: ./$scriptname profile"
echo "Where profile is the AWS CLI profile name"
echo "Using default profile"
echo
profile=default
else
profile=$1
fi
check_command "cli53"
# # Test if cli53 already installed, else install it
# command -v cli53 >/dev/null 2>&1 || {
# echo "Installing cli53."
# sudo pip install cli53
# echo "cli53 installed."
# }
# Test for ~/.boto file
# if ! [ -f ~/.boto ]; then
# # read -rp "Attempt to configure cli53 using AWS CLI credentials? (y/n) " CONFIGURE
# # if [[ $CONFIGURE =~ ^([yY][eE][sS]|[yY])$ ]]; then
# # Look for AWS CLI credentials
# echo "Attempting to configure cli53 using AWS CLI credentials..."
# if grep -q aws_access_key_id ~/.aws/config; then
# export AWS_ACCESS_KEY_ID=$(grep aws_access_key_id ~/.aws/config | cut -d ' ' -f3)
# export AWS_SECRET_ACCESS_KEY=$(grep aws_secret_access_key ~/.aws/config | cut -d ' ' -f3)
# elif grep -q aws_access_key_id ~/.aws/credentials; then
# export AWS_ACCESS_KEY_ID=$(grep aws_access_key_id ~/.aws/credentials | cut -d ' ' -f3)
# export AWS_SECRET_ACCESS_KEY=$(grep aws_secret_access_key ~/.aws/credentials | cut -d ' ' -f3)
# else
# echo "AWS config not found or CLI not installed. Please run \"aws configure\"."
# exit 1
# fi
# echo "Found AWS_ACCESS_KEY_ID:" $AWS_ACCESS_KEY_ID
# echo "Found AWS_SECRET_ACCESS_KEY:" $AWS_SECRET_ACCESS_KEY
# echo "Building ~/.boto config file with these credentials..."
# # Build ~/.boto config file
# echo "[Credentials]" >> ~/.boto
# echo "aws_access_key_id = "$AWS_ACCESS_KEY_ID >> ~/.boto
# echo "aws_secret_access_key = "$AWS_SECRET_ACCESS_KEY >> ~/.boto
# fi
# Get list of Hosted Zones in Route 53
DOMAINLIST=$(aws route53 list-hosted-zones --output text --profile $profile | cut -f 4 | rev | cut -c 2- | rev | grep -v '^$')
if [ -z "$DOMAINLIST" ]; then
fail "No hosted zones found in Route 53!"
fi
# Count domains found
TOTALDOMAINS=$(echo "$DOMAINLIST" | wc -l)
echo
HorizontalRule
echo "Exporting Zone Files for Route 53 Hosted Zones"
echo "Total number of Hosted Zones: "$TOTALDOMAINS
HorizontalRule
echo "$DOMAINLIST"
echo
if ! [ -d route53zones/$profile/ ]; then
mkdir -p route53zones/$profile
fi
# Export Hosted Zones
START=1
for (( COUNT=$START; COUNT<=$TOTALDOMAINS; COUNT++ ))
do
HorizontalRule
echo \#$COUNT
DOMAIN_ID=$(echo "$DOMAINLIST" | nl | grep -w $COUNT | cut -f 2)
cli53 export --full --profile $profile $DOMAIN_ID > route53zones/$profile/$DOMAIN_ID.zone
echo "Exported: "$DOMAIN_ID
done
# Remove any empty zone file created
if [ -f route53zones/$profile/.zone ]; then
rm route53zones/$profile/.zone
fi
completed
|
swoodford/aws
|
route53-export-zones.sh
|
Shell
|
apache-2.0
| 3,933 |
#args list
#1: Path to input compact-reads file
#2: Output file
function run_minia {
local INPUT=$1
local OUTPUT=$2
shift
shift
#minia doesnt support compact-reads, convert to fasta
local TEMPFILE=`mktemp readsXXXX`
run_goby 4g compact-to-fasta --output-format fasta --fasta-line-length 10000 --input ${INPUT} --output ${TEMPFILE}
#run minia on converted file
#./minia input kmer_length min_abundance estimated_genome_size prefix
${RESOURCES_ARTIFACTS_MINIA_EXECUTABLE}/minia ${TEMPFILE} 25 3 3000000000 unused
# copy minia output file to specified location
cp unused.contigs.fa ${OUTPUT}
}
|
CampagneLaboratory/gobyweb2-plugins
|
plugins/resources/MINIA_2013_1.4961/script-minia.sh
|
Shell
|
apache-2.0
| 621 |
source mysql-commands/local-get-last-post-id.sh
source mysql-commands/remote-get-last-post-id.sh
source mysql-commands/compare-post-ids.sh
|
franciscof5/mysql-manager
|
mysql-operations/auto-sync.sh
|
Shell
|
apache-2.0
| 141 |
#!/usr/bin/env bash
# Arguments:
# 0: <srcDir> - project root dir
# 1: <dstDir> - output dir
# 2: <language> - optional
# Example usage:
# ./collect-cuba.sh ~/work/cuba ../../content/en/cuba/7.2
./CollectMessages.sh $1 $2 global com.haulmont.cuba $3
./CollectMessages.sh $1 $2 core com.haulmont.cuba.core $3
./CollectMessages.sh $1 $2 gui com.haulmont.cuba.gui $3
./CollectMessages.sh $1 $2 web com.haulmont.cuba.web $3
|
cuba-platform/translations
|
tools/collect/collect-cuba.sh
|
Shell
|
apache-2.0
| 422 |
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source $(dirname $0)/../vendor/knative.dev/hack/codegen-library.sh
# If we run with -mod=vendor here, then generate-groups.sh looks for vendor files in the wrong place.
export GOFLAGS=-mod=
echo "=== Update Codegen for $MODULE_NAME"
echo "GOPATH=$GOPATH"
group "Kubernetes Codegen"
# generate the code with:
# --output-base because this script should also be able to run inside the vendor dir of
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
"knative.dev/eventing-couchdb/source/pkg/client" "knative.dev/eventing-couchdb/source/pkg/apis" \
"sources:v1alpha1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt
# Depends on generate-groups.sh to install bin/deepcopy-gen
${GOPATH}/bin/deepcopy-gen \
-O zz_generated.deepcopy \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt \
-i knative.dev/eventing-couchdb/source/pkg/apis \
group "Knative Codegen"
# Knative Injection
${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \
"knative.dev/eventing-couchdb/source/pkg/client" "knative.dev/eventing-couchdb/source/pkg/apis" \
"sources:v1alpha1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt
group "Update deps post-codegen"
# Make sure our dependencies are up-to-date
${REPO_ROOT_DIR}/hack/update-deps.sh
|
knative-sandbox/eventing-couchdb
|
hack/update-codegen.sh
|
Shell
|
apache-2.0
| 2,143 |
#!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "hello" > /var/lib/hello
docker_code=0
i=1
while [[ $i -le 10 ]]; do
echo "Pulling ubuntu container image... [${i}/10]"
docker pull ubuntu && break || docker_code="$?"
i=$((i+1))
done
if [[ $i -eq 11 ]]; then
echo "Pulling ubuntu failed."
echo "Docker journal logs:"
journalctl -u docker.service --no-pager
exit "${docker_code}"
fi
echo "Successfully pulled ubuntu container image."
|
GoogleCloudPlatform/cos-customizer
|
testing/smoke_test/preload.sh
|
Shell
|
apache-2.0
| 992 |
#!/bin/bash
#
# This script installs the Google App Engine artifacts into you local Maven repository.
#
# If you would like to avoid the need for this step, ask Google by voting for the following issue:
# http://code.google.com/p/googleappengine/issues/detail?id=1296
#
if [ "$1" == "" ]
then
echo "usage: $0 /path/to/appengine-java-sdk-1.3.1 [install|deploy]"
exit 1
fi
if [ "$2" == "" ]
then
echo "usage: $0 /path/to/appengine-java-sdk-1.3.1 [install|deploy]"
exit 1
fi
GAE_SDK_PATH="$1"
TASK="$2"
URL="svn:https://maven-gae-plugin.googlecode.com/svn/repository"
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/user/appengine-api-1.0-sdk-1.3.1.jar -DgroupId=com.google.appengine -DartifactId=appengine-api-1.0-sdk -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/user/appengine-api-labs-1.3.1.jar -DgroupId=com.google.appengine -DartifactId=appengine-api-labs -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/appengine-tools-api.jar -DgroupId=com.google.appengine -DartifactId=appengine-tools-api -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/impl/appengine-local-runtime.jar -DgroupId=com.google.appengine -DartifactId=appengine-local-runtime -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/impl/appengine-api-stubs.jar -DgroupId=com.google.appengine -DartifactId=appengine-api-stubs -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/testing/appengine-testing.jar -DgroupId=com.google.appengine -DartifactId=appengine-testing -Dversion=1.3.1 -DgeneratePom=true -Dpackaging=jar
mvn $TASK:$TASK-file -Durl=$URL -Dfile=$GAE_SDK_PATH/lib/user/orm/datanucleus-appengine-1.0.5.final.jar -DgroupId=com.google.appengine.orm -DartifactId=datanucleus-appengine -Dversion=1.0.5 -DgeneratePom=true -Dpackaging=jar
|
phuongnd08/maven-geaForTest-plugin
|
gae-deploy.sh
|
Shell
|
apache-2.0
| 2,043 |
#!/usr/bin/env bash
############################################################################
# Copyright 2015 Valerio Morsella #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
# These tests will be run concurrently.
# This is to showcase the use of mp plugin.
echo nose2 --plugin nose2.plugins.attrib --plugin nose2.plugins.mp --config nose2.cfg -A group=CONCURRENT
nose2 --plugin nose2.plugins.attrib --plugin nose2.plugins.mp --config nose2.cfg -A group=CONCURRENT
|
valermor/nose2-tests-recipes
|
scripts/run_concurrent_tests.sh
|
Shell
|
apache-2.0
| 1,469 |
#!/bin/bash
#
# Copyright (c) 2001-2016 Primeton Technologies, Ltd.
# All rights reserved.
#
# author: ZhongWen Li (mailto:[email protected])
#
if [ -z ${TOMCAT_HOME} ]; then
echo "Error, TOMCAT_HOME environment variable not found."
exit 1
fi
#
# Java Remote Debug Enable
#
if [ -z "${USE_DEBUG_PORT}" ]; then
USE_DEBUG_PORT=false
fi
#
# auto-config if exists environment 'P_APP_ENV'
# {"key1": "value1", ..., "keyn": "valuen"}
#
${TOMCAT_HOME}/bin/autoconfig.sh ${TOMCAT_HOME}/webapps
#
# chidi custom settings
#
ulimit -n 65535
export LANG=zh_CN.GBK
if [ -d ${TOMCAT_HOME}/work/Catalina ]; then
rm -rf ${TOMCAT_HOME}/work/Catalina
fi
# Memory Limit
if [ -z "${MEM_MIN}" ]; then
MEM_MIN=512
fi
if [ -z "${MEM_MAX}" ]; then
MEM_MAX=2048
fi
JAVA_OPTS="${JAVA_OPTS} -Xms${MEM_MIN}m -Xmx${MEM_MAX}m"
JAVA_OPTS="${JAVA_OPTS} -Xss1m -XX:NewSize=384M -XX:MaxNewSize=384M -XX:MaxMetaspaceSize=512m -XX:SurvivorRatio=32"
JAVA_OPTS="${JAVA_OPTS} -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:TargetSurvivorRatio=50"
if [ -n "${LD_LIBRARY_PATH}" ] && [ -d ${LD_LIBRARY_PATH} ]; then
JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote -Djava.library.path=${CATALINA_HOME}/bin:${LD_LIBRARY_PATH}"
else
JAVA_OPTS="${JAVA_OPTS} -Dcom.sun.management.jmxremote -Djava.library.path=${CATALINA_HOME}/bin"
fi
JAVA_OPTS="${JAVA_OPTS} -DA8.datasource.properies.filepath=${A8_HOME}/base/conf/datasourceCtp.properties"
JAVA_OPTS="${JAVA_OPTS} -Dsun.rmi.dgc.client.gcInterval=21600000 -Dsun.rmi.dgc.server.gcInterval=21600000"
JAVA_OPTS="${JAVA_OPTS} -Djava.awt.headless=true -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=java_pid.hprof"
JAVA_OPTS="${JAVA_OPTS} -Djava.net.preferIPv4Stack=true -Dorg.apache.el.parser.COERCE_TO_ZERO=true"
JAVA_OPTS="${JAVA_OPTS} -Djgroups.bind_addr=0.0.0.0 -DDEE_HOME=${A8_HOME}/base/dee/ -Dfile.encoding=utf-8"
# Common JVM Settings
# JAVA_OPTS="${JAVA_OPTS} -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
# JAVA_OPTS="${JAVA_OPTS} -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:SurvivorRatio=8"
# JAVA_OPTS="${JAVA_OPTS} -XX:+DisableExplicitGC -XX:-OmitStackTraceInFastThrow -Djava.net.preferIPv4Stack=true"
# JAVA_OPTS="${JAVA_OPTS} -Dfile.encoding=utf-8"
# Java Remote Debug Enabled
if [ "true" == "${USE_DEBUG_PORT}" ]; then
JAVA_OPTS="${JAVA_OPTS} -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8888"
fi
echo "JAVA_HOME=${JAVA_HOME}"
# JAVA_OPTS="${JAVA_OPTS} -Djava.util.logging.config.file=${TOMCAT_HOME}/conf/logging.properties"
# JAVA_OPTS="${JAVA_OPTS} -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager"
# JAVA_OPTS="${JAVA_OPTS} -Djava.endorsed.dirs=${TOMCAT_HOME}/endorsed"
# JAVA_OPTS="${JAVA_OPTS} -Dcatalina.base=${TOMCAT_HOME}"
# JAVA_OPTS="${JAVA_OPTS} -Dcatalina.home=${TOMCAT_HOME}"
# JAVA_OPTS="${JAVA_OPTS} -Djava.io.tmpdir=${TOMCAT_HOME}/temp"
# JAVA_OPTS="${JAVA_OPTS} -classpath ${TOMCAT_HOME}/bin/bootstrap.jar:${TOMCAT_HOME}/bin/tomcat-juli.jar"
# ${JAVA_HOME}/bin/java -server ${JAVA_OPTS} org.apache.catalina.startup.Bootstrap start
export JAVA_OPTS
${TOMCAT_HOME}/bin/catalina.sh run "$@"
|
Primeton-External/euler-chidi
|
installer/docker/tomcat8/resources/entrypoint.sh
|
Shell
|
apache-2.0
| 3,186 |
#!/usr/bin/env sh
# generated from catkin/python/catkin/environment_cache.py
# based on a snapshot of the environment before and after calling the setup script
# it emulates the modifications of the setup script without recurring computations
# new environment variables
export CATKIN_TEST_RESULTS_DIR="/home/gautam/hearbo_2dnav/command_line_goals/build/test_results"
export ROS_TEST_RESULTS_DIR="/home/gautam/hearbo_2dnav/command_line_goals/build/test_results"
# modified environment variables
export CMAKE_PREFIX_PATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel:$CMAKE_PREFIX_PATH"
export CPATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/include:$CPATH"
export LD_LIBRARY_PATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/lib:/opt/ros/groovy/lib:/usr/local/lib/gazebo-1.8/plugins/usr/local/lib/"
export PATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/bin:$PATH"
export PKG_CONFIG_PATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/lib/pkgconfig:$PKG_CONFIG_PATH"
export PYTHONPATH="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/lib/python2.7/dist-packages:$PYTHONPATH"
export ROSLISP_PACKAGE_DIRECTORIES="/home/gautam/hearbo_2dnav/command_line_goals/build/devel/share/common-lisp"
export ROS_PACKAGE_PATH="/home/gautam/hearbo_2dnav/command_line_goals:/opt/ros/groovy/share:/opt/ros/groovy/stacks"
|
MRSDTeamI/bud-e
|
ROS/src/BUD-E/hearbo_2dnav/command_line_goals/build/catkin_generated/setup_cached.sh
|
Shell
|
apache-2.0
| 1,381 |
#!/bin/bash -xeu
PKG_NAME=${1:-${CI_REPO##*/}}
if [[ "$CI_BRANCH" =~ ^v[0-9]+.[0-9]?* ]]; then
eval export ${PKG_NAME^^}_RELEASE_VERSION=\$CI_BRANCH
echo ${CI_BRANCH} | tail -c +2 > __conda_version__.txt
fi
# python2 setup.py sdist
# python2 -m pip install dist/*.tar.gz
# (cd /; python2 -m pytest --pyargs $PKG_NAME)
# python3 -m pip install dist/*.tar.gz
# (cd /; python3 -m pytest --pyargs $PKG_NAME)
PYTHONPATH=$(pwd) PYTHON=python3 ./scripts/run_tests.sh --cov $PKG_NAME --cov-report html
./scripts/coverage_badge.py htmlcov/ htmlcov/coverage.svg
! grep "DO-NOT-MERGE!" -R . --exclude ci.sh
|
bjodah/symvarsub
|
scripts/ci.sh
|
Shell
|
bsd-2-clause
| 604 |
#!/usr/bin/env bash
# Make sure the package information is up-to-date
apt-get update || exit 1
# Compilers
apt-get install -y g++-4.8 || exit 1
apt-get install -y gfortran-4.8 || exit 1
apt-get install -y clang-3.4 || exit 1
# Configuration
apt-get install -y cmake || exit 1
# Source control
apt-get install -y git || exit 1
# Utilities
apt-get install -y dos2unix || exit 1
# Anaconda Python (miniconda) with Python dependencies
echo Downloading Miniconda...
curl -O http://repo.continuum.io/miniconda/Miniconda3-3.3.0-Linux-x86.sh || exit 1
su -c 'bash Miniconda3-*.sh -b -p ~/anaconda' vagrant || exit 1
# Install dependencies
su -c '~/anaconda/bin/conda install --yes ipython cython numpy scipy pyyaml nose' vagrant || exit 1
# Add anaconda to the PATH
printf '\nexport PATH=~/anaconda/bin:~/bin:$PATH\n' >> .bashrc
chown vagrant .bashrc
export PATH=~/anaconda/bin:~/bin:$PATH
mkdir ~/bin
chown -R vagrant ~/bin
export CC=gcc
export CXX=g++
# Clone and install libdynd
git clone https://github.com/ContinuumIO/libdynd.git || exit 1
mkdir libdynd/build
chown -R vagrant libdynd
pushd libdynd/build
su -c 'cmake -DCMAKE_INSTALL_PREFIX=~/anaconda -DCMAKE_C_COMPILER=${CC} -DCMAKE_CXX_COMPILER=${CXX} ..' vagrant || exit 1
su -c 'make' vagrant || exit 1
su -c 'make install' vagrant || exit 1
ldconfig
popd
# Clone and install dynd-python
git clone https://github.com/ContinuumIO/dynd-python.git || exit 1
mkdir dynd-python/build
chown -R vagrant dynd-python
pushd dynd-python/build
su -c 'cmake -DDYND_ELWISE_MAX=5 -DPYTHON_EXECUTABLE=~/anaconda/bin/python -DCYTHON_EXECUTABLE=~/anaconda/bin/cython -DCMAKE_C_COMPILER=${CC} -DCMAKE_CXX_COMPILER=${CXX} ..' vagrant || exit 1
su -c 'make' vagrant || exit 1
su -c 'make install' vagrant || exit 1
popd
# Utility scripts
for FILE in pull.sh build.sh refresh.sh
do
chown -R vagrant ~/bin/${FILE}
chmod u+x ~/bin/${FILE}
done
|
aterrel/dynd-python
|
vagrant/trusty32-py33/bootstrap.sh
|
Shell
|
bsd-2-clause
| 1,891 |
#!/bin/bash
./gradlew clean
./gradlew installDist
# Fix problem arising if eclipse is used jointly
mkdir build/xtend/test
mkdir build/blang/test
|
UBC-Stat-ML/inits
|
setup-cli.sh
|
Shell
|
bsd-2-clause
| 148 |
#!/usr/bin/env bash
LIB_VERSION=4.1.2
MASON_NAME=geojsonvt
MASON_VERSION=${LIB_VERSION}-cxx11abi
MASON_LIB_FILE=lib/libgeojsonvt.a
. ${MASON_DIR}/mason.sh
function mason_load_source {
mason_download \
https://github.com/mapbox/geojson-vt-cpp/archive/v${LIB_VERSION}.tar.gz \
a98f44ee9f059e41a90dfed7e4c256c76ea4076a
mason_extract_tar_gz
export MASON_BUILD_PATH=${MASON_ROOT}/.build/geojson-vt-cpp-${LIB_VERSION}
}
function mason_compile {
# setup mason
rm -rf .mason
ln -s ${MASON_DIR} .mason
# Force Linux Makefiles when cross-compiling for Android
if [[ ${MASON_PLATFORM:-} == 'android' ]] ; then
export GYP_FLAVOR_SUFFIX=-linux
fi
# build
INSTALL_PREFIX=${MASON_PREFIX} ./configure
CXXFLAGS="-fPIC ${CFLAGS:-} ${CXXFLAGS:-}" make install
}
function mason_cflags {
echo -I${MASON_PREFIX}/include
}
function mason_ldflags {
:
}
function mason_static_libs {
echo ${MASON_PREFIX}/lib/libgeojsonvt.a
}
mason_run "$@"
|
hydrays/osrm-backend
|
third_party/mason/scripts/geojsonvt/4.1.2-cxx11abi/script.sh
|
Shell
|
bsd-2-clause
| 1,012 |
#!/bin/bash
export PATH='/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin'
SHELLDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function show_retran_rate()
{
netstat -s -t > /tmp/netstat_s 2>/dev/null
s_r=`cat /tmp/netstat_s | grep 'segments send out' | awk '{print $1}'`
s_re=`cat /tmp/netstat_s | grep 'segments retransmited' | awk '{print $1}'`
[ -e ${SHELLDIR}/s_r ] || touch ${SHELLDIR}/s_r
[ -e ${SHELLDIR}/s_re ] || touch ${SHELLDIR}/s_re
l_s_r=`cat ${SHELLDIR}/s_r`
l_s_re=`cat ${SHELLDIR}/s_re`
echo $s_r > ${SHELLDIR}/s_r
echo $s_re > ${SHELLDIR}/s_re
tcp_re_rate=`echo "$s_r $s_re $l_s_r $l_s_re" | awk '{printf("%.3f",($2-$4)/($1-$3)*100)}'`
echo -n `date -Isec` " "
echo -n `expr $s_r - $l_s_r` " "
echo -n `expr $s_re - $l_s_re` " "
echo $tcp_re_rate "%"
}
echo `date -Isec` " " "send " "retran " "rate"
while [ 1 ] ; do
show_retran_rate
sleep 5
done
|
thomaszhao/t_toolkit
|
linux/retransmit_rate.sh
|
Shell
|
bsd-3-clause
| 917 |
#!/bin/bash -e
rm -f ${ROOTFS_DIR}/usr/lib/os-release
install -m 644 files/os-release ${ROOTFS_DIR}/usr/lib/
|
brindosch/pi-gen
|
stage3/00-sys-tweaks/00-run.sh
|
Shell
|
bsd-3-clause
| 110 |
export HOME=/root
set -x
set -e
cat > requirements.txt <<EOT
numpy==1.16.0
Click==7.0
CppHeaderParser==2.7.4
dash==0.21.1
dash-core-components==0.23.0
dash-html-components==0.11.0
dash-renderer==0.13.0
deltasigma==0.2.2
Flask==1.1.1
Flask-Compress==1.4.0
imutils==0.5.3
ipywidgets==7.5.1
itsdangerous==1.1.0
Jinja2==2.10.1
json5==0.8.5
jsonschema==3.0.2
jupyter-contrib-core==0.3.3
jupyter-contrib-nbextensions==0.5.1
jupyter-highlight-selected-word==0.2.0
jupyter-latex-envs==1.4.6
jupyter-nbextensions-configurator==0.4.1
jupyterlab==1.2.0
jupyterlab-server==1.0.6
nbwavedrom==0.2.0
parsec==3.4
patsy==0.5.1
plotly==4.5.2
plotly-express==0.3.1
pyeda==0.28.0
pyrsistent==0.15.4
rise==5.2.0
sphinx-rtd-theme==0.4.3
statsmodels==0.9.0
tqdm==4.32.2
Werkzeug==0.15.6
widgetsnbextension==3.5.1
wurlitzer==1.0.3
cython==0.29.0
setproctitle==1.1.10
psutil==5.7.0
pybind11==2.5.0
EOT
python3.6 -m pip install -r requirements.txt
rm requirements.txt
|
yunqu/PYNQ
|
sdbuild/packages/python_packages_bionic/qemu.sh
|
Shell
|
bsd-3-clause
| 946 |
#!/usr/bin/env bash
. version.sh
. protodefs.sh
[ -d ${XML_DIR} ] && {
PATCHFILE=${PWD}/xmlpatch
pushd ${DOWNLOAD_PATH}/${XPROTO_PACKAGE} > /dev/null
diff -u -r src patched > ${PATCHFILE}
popd > /dev/null
}
|
aslatter/xhb
|
makeprotopatch.sh
|
Shell
|
bsd-3-clause
| 225 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
xmllint --noblanks $DIR/../../doc/oui.xml | xsltproc $DIR/../../etc/oui.xsl - | php -r "require_once('$DIR/../../libs/vendor/jsbeautifier.class.php'); \$js = file_get_contents('php://stdin'); \$opts = new BeautifierOptions(); \$opts->indent_size = 4; print js_beautify(\$js, \$opts);"
|
octris/org.octris.oui
|
test/xsl/test.sh
|
Shell
|
bsd-3-clause
| 363 |
php54 -S localhost:31415 -t www/ > /dev/null 2>&1 & echo $! > test-php.pid
rm app/data/test.db
app/yiic migrate --connectionID=dbTest --interactive=0 > /dev/null
codecept.phar run unit
rm app/data/test.db
app/yiic migrate --connectionID=dbTest --interactive=0 > /dev/null
codecept.phar run functional
rm app/data/test.db
app/yiic migrate --connectionID=dbTest --interactive=0 > /dev/null
codecept.phar run acceptance
kill -9 `cat test-php.pid`
rm test-php.pid
|
motin/giic-gtc-hybrid-template-demo
|
test-dev.sh
|
Shell
|
bsd-3-clause
| 463 |
#!/bin/bash
#This bash file handles the running of the oscillator code.
#Example chmod +X run_oscilator.sh; ./run_oscilator.sh 1000 4 mpi; will run the
#oscillator for 1000 rounds over 4 cores using mpi.
source activate intel
echo 'starting...'
rm -r output*
rm -r *src/__*
cd src/cython_files
#rm -rf build *so *c *html
python setup.py build_ext --inplace
cython -a cython_integrand.pyx
cd ../..
rm error_log
if [ "$3" == "mpi" ]
then
export MKL_NUM_THREADS=1
echo "running with" 1 "MKL core and" $2 "MPI cores, for" $1 "rounds"
mpiexec -n $2 python -m mpi4py.futures src/oscillator.py $3 $1 $2 0
elif [ "$3" == "joblib" ]
then
export MKL_NUM_THREADS=1
echo "running with" 1 "MKL core and" $2 "Multiprocessing cores, for" $1 "rounds"
python src/oscillator.py $3 $1 $2 0
else
echo "running with MKL cores for" $1 "rounds"
python src/oscillator.py $3 $1 1 0
fi
|
ibegleris/Single-mode-FOPO
|
run_oscilator.sh
|
Shell
|
bsd-3-clause
| 873 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.